From e7ef8f5a1013cc32440947a1a98f21be3b2a7ae8 Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Thu, 8 Feb 2024 18:39:51 +0400 Subject: [PATCH 001/127] pytorch backend from tf backend --- src/qibo/backends/pytorch.py | 310 +++++++++++++++++++++++++++++++++++ 1 file changed, 310 insertions(+) create mode 100644 src/qibo/backends/pytorch.py diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py new file mode 100644 index 0000000000..1368fc9f01 --- /dev/null +++ b/src/qibo/backends/pytorch.py @@ -0,0 +1,310 @@ +import collections + +import numpy as np +import torch + +from qibo import __version__ +from qibo.backends.npmatrices import NumpyMatrices +from qibo.backends.numpy import NumpyBackend +from qibo.config import TF_LOG_LEVEL, log, raise_error + + +class TorchMatrices(NumpyMatrices): + # Redefine parametrized gate matrices for backpropagation to work + + def __init__(self, dtype): + self.np = np + super().__init__(dtype) + + def RX(self, theta): + cos = self.np.cos(theta / 2.0) + 0j + isin = -1j * self.np.sin(theta / 2.0) + return torch.tensor([[cos, isin], [isin, cos]], dtype=self.dtype) + + def RY(self, theta): + cos = self.np.cos(theta / 2.0) + 0j + sin = self.np.sin(theta / 2.0) + 0j + return torch.tensor([[cos, -sin], [sin, cos]], dtype=self.dtype) + + def RZ(self, theta): + phase = self.np.exp(0.5j * theta) + return torch.tensor([[self.np.conj(phase), 0], [0, phase]], dtype=self.dtype) + + def U1(self, theta): + phase = self.np.exp(1j * theta) + return torch.tensor([[1, 0], [0, phase]], dtype=self.dtype) + + def U2(self, phi, lam): + eplus = self.np.exp(1j * (phi + lam) / 2.0) + eminus = self.np.exp(1j * (phi - lam) / 2.0) + return torch.tensor( + [[self.np.conj(eplus), -self.np.conj(eminus)], [eminus, eplus]], + dtype=self.dtype, + ) / self.np.sqrt(2) + + def U3(self, theta, phi, lam): + cost = self.np.cos(theta / 2) + sint = self.np.sin(theta / 2) + eplus = self.np.exp(1j * (phi + lam) / 2.0) + eminus = self.np.exp(1j * (phi - lam) / 2.0) + return torch.tensor( + [ + [self.np.conj(eplus) * cost, -self.np.conj(eminus) * sint], + [eminus * sint, eplus * cost], + ], + dtype=self.dtype, + ) + + def CRX(self, theta): + r = self.RX(theta) + return torch.tensor( + [ + [1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, r[0, 0], r[0, 1]], + [0, 0, r[1, 0], r[1, 1]], + ], + dtype=self.dtype, + ) + + def CRY(self, theta): + r = self.RY(theta) + return torch.tensor( + [ + [1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, r[0, 0], r[0, 1]], + [0, 0, r[1, 0], r[1, 1]], + ], + dtype=self.dtype, + ) + + def CRZ(self, theta): + r = self.RZ(theta) + return torch.tensor( + [ + [1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, r[0, 0], r[0, 1]], + [0, 0, r[1, 0], r[1, 1]], + ], + dtype=self.dtype, + ) + + def CU1(self, theta): + r = self.U1(theta) + return torch.tensor( + [ + [1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, r[0, 0], r[0, 1]], + [0, 0, r[1, 0], r[1, 1]], + ], + dtype=self.dtype, + ) + + def CU2(self, phi, lam): + r = self.U2(phi, lam) + return torch.tensor( + [ + [1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, r[0, 0], r[0, 1]], + [0, 0, r[1, 0], r[1, 1]], + ], + dtype=self.dtype, + ) + + def CU3(self, theta, phi, lam): + r = self.U3(theta, phi, lam) + return torch.tensor( + [ + [1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, r[0, 0], r[0, 1]], + [0, 0, r[1, 0], r[1, 1]], + ], + dtype=self.dtype, + ) + + def fSim(self, theta, phi): + cost = self.np.cos(theta) + 0j + isint = -1j * self.np.sin(theta) + phase = self.np.exp(-1j * phi) + return torch.tensor( + [ + [1, 0, 0, 0], + [0, cost, isint, 0], + [0, isint, cost, 0], + [0, 0, 0, phase], + ], + dtype=self.dtype, + ) + + def GeneralizedfSim(self, u, phi): + phase = self.np.exp(-1j * phi) + return torch.tensor( + [ + [1, 0, 0, 0], + [0, u[0, 0], u[0, 1], 0], + [0, u[1, 0], u[1, 1], 0], + [0, 0, 0, phase], + ], + dtype=self.dtype, + ) + + def Unitary(self, u): + return torch.tensor(u, dtype=self.dtype) + + +class TorchBackend(NumpyBackend): + def __init__(self): + super().__init__() + self.name = "torch" + + self.versions = { + "qibo": __version__, + "numpy": np.__version__, + "torch": torch.__version__, + } + + self.matrices = TorchMatrices(self.dtype) + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + self.nthreads = 0 + self.tensor_types = (np.ndarray, torch.Tensor) + + def set_device(self, device): # pragma: no cover + self.device = device + + def cast(self, x, dtype=None, copy=False): + if dtype is None: + dtype = self.dtype + x = torch.tensor(x, dtype=dtype) + if copy: + return x.clone() + return x + + def issparse(self, x): + return x.is_sparse + + def to_numpy(self, x): + return x.detach().cpu().numpy() + + def compile(self, func): + return torch.jit.script(func) + + def zero_state(self, nqubits): + state = torch.zeros(2**nqubits, dtype=self.dtype) + state[0] = 1 + return state + + def zero_density_matrix(self, nqubits): + state = torch.zeros(2 * (2**nqubits,), dtype=self.dtype) + state[0, 0] = 1 + return state + + def matrix(self, gate): + npmatrix = super().matrix(gate) + return torch.tensor(npmatrix, dtype=self.dtype) + + def matrix_parametrized(self, gate): + npmatrix = super().matrix_parametrized(gate) + return torch.tensor(npmatrix, dtype=self.dtype) + + def matrix_fused(self, gate): + npmatrix = super().matrix_fused(gate) + return torch.tensor(npmatrix, dtype=self.dtype) + + def execute_circuit(self, circuit, initial_state=None, nshots=1000): + if initial_state is not None: + initial_state = initial_state.to(self.device) + return super().execute_circuit(circuit, initial_state, nshots) + + def execute_circuit_repeated(self, circuit, nshots, initial_state=None): + if initial_state is not None: + initial_state = initial_state.to(self.device) + return super().execute_circuit_repeated(circuit, nshots, initial_state) + + def sample_shots(self, probabilities, nshots): + return torch.multinomial(probabilities, nshots) + + def samples_to_binary(self, samples, nqubits): + qrange = torch.arange(nqubits - 1, -1, -1, dtype=torch.int32) + samples = samples.int() + samples = samples[:, None] >> qrange + return samples % 2 + + def calculate_frequencies(self, samples): + res, counts = torch.unique(samples, return_counts=True) + res, counts = res.tolist(), counts.tolist() + return collections.Counter({k: v for k, v in zip(res, counts)}) + + def update_frequencies(self, frequencies, probabilities, nsamples): + samples = self.sample_shots(probabilities, nsamples) + unique_samples, counts = torch.unique(samples, return_counts=True) + frequencies.index_add_(0, unique_samples, counts) + return frequencies + + def calculate_norm(self, state, order=2): + state = self.cast(state) + return torch.norm(state, p=order) + + def calculate_norm_density_matrix(self, state, order="nuc"): + state = self.cast(state) + if order == "nuc": + return np.trace(state) + return torch.norm(state, p=order) + + def calculate_eigenvalues(self, matrix): + return torch.linalg.eigvalsh(matrix) + + def calculate_eigenvectors(self, matrix): + return torch.linalg.eigh(matrix) + + def calculate_matrix_exp(self, a, matrix, eigenvectors=None, eigenvalues=None): + if eigenvectors is None or self.issparse(matrix): + return torch.linalg.matrix_exp(-1j * a * matrix) + else: + return super().calculate_matrix_exp(a, matrix, eigenvectors, eigenvalues) + + def calculate_hamiltonian_matrix_product(self, matrix1, matrix2): + if self.issparse(matrix1) or self.issparse(matrix2): + return torch.sparse.mm(matrix1, matrix2) + return super().calculate_hamiltonian_matrix_product(matrix1, matrix2) + + def calculate_hamiltonian_state_product(self, matrix, state): + rank = len(tuple(state.shape)) + if rank == 1: # vector + return np.matmul(matrix, state[:, np.newaxis])[:, 0] + elif rank == 2: # matrix + return np.matmul(matrix, state) + else: + raise_error( + ValueError, + "Cannot multiply Hamiltonian with " "rank-{} tensor.".format(rank), + ) + + def test_regressions(self, name): + if name == "test_measurementresult_apply_bitflips": + return [ + [4, 0, 0, 1, 0, 2, 2, 4, 4, 0], + [4, 0, 0, 1, 0, 2, 2, 4, 4, 0], + [4, 0, 0, 1, 0, 0, 0, 4, 4, 0], + [4, 0, 0, 0, 0, 0, 0, 4, 4, 0], + ] + elif name == "test_probabilistic_measurement": + if "cuda" in self.device: # pragma: no cover + return {0: 273, 1: 233, 2: 242, 3: 252} + else: + return {0: 271, 1: 239, 2: 242, 3: 248} + elif name == "test_unbalanced_probabilistic_measurement": + if "cuda" in self.device: # pragma: no cover + return {0: 196, 1: 153, 2: 156, 3: 495} + else: + return {0: 168, 1: 188, 2: 154, 3: 490} + elif name == "test_post_measurement_bitflips_on_circuit": + return [ + {5: 30}, + {5: 16, 7: 10, 6: 2, 3: 1, 4: 1}, + {3: 6, 5: 6, 7: 5, 2: 4, 4: 3, 0: 2, 1: 2, 6: 2}, + ] From f03d88f55023436884433ff79dd4b8c93b6dccf3 Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Thu, 8 Feb 2024 18:48:11 +0400 Subject: [PATCH 002/127] added in backend constructor --- src/qibo/backends/__init__.py | 5 +++++ src/qibo/backends/pytorch.py | 4 ++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/qibo/backends/__init__.py b/src/qibo/backends/__init__.py index e329c3c8c2..d1c4ab6505 100644 --- a/src/qibo/backends/__init__.py +++ b/src/qibo/backends/__init__.py @@ -4,6 +4,7 @@ from qibo.backends.clifford import CliffordBackend from qibo.backends.npmatrices import NumpyMatrices from qibo.backends.numpy import NumpyBackend +from qibo.backends.pytorch import PyTorchBackend from qibo.backends.tensorflow import TensorflowBackend from qibo.config import log, raise_error @@ -27,6 +28,9 @@ def construct_backend(backend, platform=None): elif backend == "tensorflow": return TensorflowBackend() + elif backend == "pytorch": + return PyTorchBackend() + elif backend == "numpy": return NumpyBackend() @@ -56,6 +60,7 @@ class GlobalBackend(NumpyBackend): {"backend": "qibojit", "platform": "numba"}, {"backend": "tensorflow"}, {"backend": "numpy"}, + {"backend": "pytorch"}, ] def __new__(cls): diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index 1368fc9f01..27244c6421 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -157,10 +157,10 @@ def Unitary(self, u): return torch.tensor(u, dtype=self.dtype) -class TorchBackend(NumpyBackend): +class PyTorchBackend(NumpyBackend): def __init__(self): super().__init__() - self.name = "torch" + self.name = "pytorch" self.versions = { "qibo": __version__, From 752b049bf620ee2be3f350b9605d100e94cf7139 Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Thu, 8 Feb 2024 18:57:29 +0400 Subject: [PATCH 003/127] update dependencies --- poetry.lock | 1621 ++++++++++++++++++++++++++++-------------------- pyproject.toml | 1 + 2 files changed, 957 insertions(+), 665 deletions(-) diff --git a/poetry.lock b/poetry.lock index d9f70f1364..33a4ce05e9 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2,24 +2,24 @@ [[package]] name = "absl-py" -version = "2.0.0" +version = "2.1.0" description = "Abseil Python Common Libraries, see https://github.com/abseil/abseil-py." optional = false python-versions = ">=3.7" files = [ - {file = "absl-py-2.0.0.tar.gz", hash = "sha256:d9690211c5fcfefcdd1a45470ac2b5c5acd45241c3af71eed96bc5441746c0d5"}, - {file = "absl_py-2.0.0-py3-none-any.whl", hash = "sha256:9a28abb62774ae4e8edbe2dd4c49ffcd45a6a848952a5eccc6a49f3f0fc1e2f3"}, + {file = "absl-py-2.1.0.tar.gz", hash = "sha256:7820790efbb316739cde8b4e19357243fc3608a152024288513dd968d7d959ff"}, + {file = "absl_py-2.1.0-py3-none-any.whl", hash = "sha256:526a04eadab8b4ee719ce68f204172ead1027549089702d99b9059f129ff1308"}, ] [[package]] name = "alabaster" -version = "0.7.13" -description = "A configurable sidebar-enabled Sphinx theme" +version = "0.7.16" +description = "A light, configurable Sphinx theme" optional = false -python-versions = ">=3.6" +python-versions = ">=3.9" files = [ - {file = "alabaster-0.7.13-py3-none-any.whl", hash = "sha256:1ee19aca801bbabb5ba3f5f258e4422dfa86f82f3e9cefb0859b283cdd7f62a3"}, - {file = "alabaster-0.7.13.tar.gz", hash = "sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2"}, + {file = "alabaster-0.7.16-py3-none-any.whl", hash = "sha256:b46733c07dce03ae4e150330b975c75737fa60f0a7c591b6c8bf4928a28e2c92"}, + {file = "alabaster-0.7.16.tar.gz", hash = "sha256:75a8b99c28a5dad50dd7f8ccdd447a121ddb3892da9e53d1ca5cca3106d58d65"}, ] [[package]] @@ -46,13 +46,13 @@ trio = ["trio (>=0.23)"] [[package]] name = "astroid" -version = "3.0.2" +version = "3.0.3" description = "An abstract syntax tree for Python with inference support." optional = false python-versions = ">=3.8.0" files = [ - {file = "astroid-3.0.2-py3-none-any.whl", hash = "sha256:d6e62862355f60e716164082d6b4b041d38e2a8cf1c7cd953ded5108bac8ff5c"}, - {file = "astroid-3.0.2.tar.gz", hash = "sha256:4a61cf0a59097c7bb52689b0fd63717cd2a8a14dc9f1eee97b82d814881c8c91"}, + {file = "astroid-3.0.3-py3-none-any.whl", hash = "sha256:92fcf218b89f449cdf9f7b39a269f8d5d617b27be68434912e11e79203963a17"}, + {file = "astroid-3.0.3.tar.gz", hash = "sha256:4148645659b08b70d72460ed1921158027a9e53ae8b7234149b1400eddacbb93"}, ] [package.dependencies] @@ -124,19 +124,22 @@ dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"] [[package]] name = "beautifulsoup4" -version = "4.12.2" +version = "4.12.3" description = "Screen-scraping library" optional = false python-versions = ">=3.6.0" files = [ - {file = "beautifulsoup4-4.12.2-py3-none-any.whl", hash = "sha256:bd2520ca0d9d7d12694a53d44ac482d181b4ec1888909b035a3dbf40d0f57d4a"}, - {file = "beautifulsoup4-4.12.2.tar.gz", hash = "sha256:492bbc69dca35d12daac71c4db1bfff0c876c00ef4a2ffacce226d4638eb72da"}, + {file = "beautifulsoup4-4.12.3-py3-none-any.whl", hash = "sha256:b80878c9f40111313e55da8ba20bdba06d8fa3969fc68304167741bbf9e082ed"}, + {file = "beautifulsoup4-4.12.3.tar.gz", hash = "sha256:74e3d1928edc070d21748185c46e3fb33490f22f52a3addee9aee0f4f7781051"}, ] [package.dependencies] soupsieve = ">1.2" [package.extras] +cchardet = ["cchardet"] +chardet = ["chardet"] +charset-normalizer = ["charset-normalizer"] html5lib = ["html5lib"] lxml = ["lxml"] @@ -171,13 +174,13 @@ files = [ [[package]] name = "certifi" -version = "2023.11.17" +version = "2024.2.2" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" files = [ - {file = "certifi-2023.11.17-py3-none-any.whl", hash = "sha256:e036ab49d5b79556f99cfc2d9320b34cfbe5be05c5871b51de9329f0603b0474"}, - {file = "certifi-2023.11.17.tar.gz", hash = "sha256:9b469f3a900bf28dc19b8cfbf8019bf47f7fdd1a65a1d4ffb98fc14166beb4d1"}, + {file = "certifi-2024.2.2-py3-none-any.whl", hash = "sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1"}, + {file = "certifi-2024.2.2.tar.gz", hash = "sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f"}, ] [[package]] @@ -557,13 +560,13 @@ files = [ [[package]] name = "comm" -version = "0.2.0" +version = "0.2.1" description = "Jupyter Python Comm implementation, for usage in ipykernel, xeus-python etc." optional = false python-versions = ">=3.8" files = [ - {file = "comm-0.2.0-py3-none-any.whl", hash = "sha256:2da8d9ebb8dd7bfc247adaff99f24dce705638a8042b85cb995066793e391001"}, - {file = "comm-0.2.0.tar.gz", hash = "sha256:a517ea2ca28931c7007a7a99c562a0fa5883cfb48963140cf642c41c948498be"}, + {file = "comm-0.2.1-py3-none-any.whl", hash = "sha256:87928485c0dfc0e7976fd89fc1e187023cf587e7c353e4a9b417555b44adf021"}, + {file = "comm-0.2.1.tar.gz", hash = "sha256:0bc91edae1344d39d3661dcbc36937181fdaddb304790458f8b044dbc064b89a"}, ] [package.dependencies] @@ -651,63 +654,63 @@ test-no-images = ["pytest", "pytest-cov", "pytest-xdist", "wurlitzer"] [[package]] name = "coverage" -version = "7.3.4" +version = "7.4.1" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.8" files = [ - {file = "coverage-7.3.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:aff2bd3d585969cc4486bfc69655e862028b689404563e6b549e6a8244f226df"}, - {file = "coverage-7.3.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e4353923f38d752ecfbd3f1f20bf7a3546993ae5ecd7c07fd2f25d40b4e54571"}, - {file = "coverage-7.3.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea473c37872f0159294f7073f3fa72f68b03a129799f3533b2bb44d5e9fa4f82"}, - {file = "coverage-7.3.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5214362abf26e254d749fc0c18af4c57b532a4bfde1a057565616dd3b8d7cc94"}, - {file = "coverage-7.3.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f99b7d3f7a7adfa3d11e3a48d1a91bb65739555dd6a0d3fa68aa5852d962e5b1"}, - {file = "coverage-7.3.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:74397a1263275bea9d736572d4cf338efaade2de9ff759f9c26bcdceb383bb49"}, - {file = "coverage-7.3.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:f154bd866318185ef5865ace5be3ac047b6d1cc0aeecf53bf83fe846f4384d5d"}, - {file = "coverage-7.3.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e0d84099ea7cba9ff467f9c6f747e3fc3906e2aadac1ce7b41add72e8d0a3712"}, - {file = "coverage-7.3.4-cp310-cp310-win32.whl", hash = "sha256:3f477fb8a56e0c603587b8278d9dbd32e54bcc2922d62405f65574bd76eba78a"}, - {file = "coverage-7.3.4-cp310-cp310-win_amd64.whl", hash = "sha256:c75738ce13d257efbb6633a049fb2ed8e87e2e6c2e906c52d1093a4d08d67c6b"}, - {file = "coverage-7.3.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:997aa14b3e014339d8101b9886063c5d06238848905d9ad6c6eabe533440a9a7"}, - {file = "coverage-7.3.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8a9c5bc5db3eb4cd55ecb8397d8e9b70247904f8eca718cc53c12dcc98e59fc8"}, - {file = "coverage-7.3.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:27ee94f088397d1feea3cb524e4313ff0410ead7d968029ecc4bc5a7e1d34fbf"}, - {file = "coverage-7.3.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ce03e25e18dd9bf44723e83bc202114817f3367789052dc9e5b5c79f40cf59d"}, - {file = "coverage-7.3.4-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85072e99474d894e5df582faec04abe137b28972d5e466999bc64fc37f564a03"}, - {file = "coverage-7.3.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a877810ef918d0d345b783fc569608804f3ed2507bf32f14f652e4eaf5d8f8d0"}, - {file = "coverage-7.3.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:9ac17b94ab4ca66cf803f2b22d47e392f0977f9da838bf71d1f0db6c32893cb9"}, - {file = "coverage-7.3.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:36d75ef2acab74dc948d0b537ef021306796da551e8ac8b467810911000af66a"}, - {file = "coverage-7.3.4-cp311-cp311-win32.whl", hash = "sha256:47ee56c2cd445ea35a8cc3ad5c8134cb9bece3a5cb50bb8265514208d0a65928"}, - {file = "coverage-7.3.4-cp311-cp311-win_amd64.whl", hash = "sha256:11ab62d0ce5d9324915726f611f511a761efcca970bd49d876cf831b4de65be5"}, - {file = "coverage-7.3.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:33e63c578f4acce1b6cd292a66bc30164495010f1091d4b7529d014845cd9bee"}, - {file = "coverage-7.3.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:782693b817218169bfeb9b9ba7f4a9f242764e180ac9589b45112571f32a0ba6"}, - {file = "coverage-7.3.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7c4277ddaad9293454da19121c59f2d850f16bcb27f71f89a5c4836906eb35ef"}, - {file = "coverage-7.3.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3d892a19ae24b9801771a5a989fb3e850bd1ad2e2b6e83e949c65e8f37bc67a1"}, - {file = "coverage-7.3.4-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3024ec1b3a221bd10b5d87337d0373c2bcaf7afd86d42081afe39b3e1820323b"}, - {file = "coverage-7.3.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a1c3e9d2bbd6f3f79cfecd6f20854f4dc0c6e0ec317df2b265266d0dc06535f1"}, - {file = "coverage-7.3.4-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:e91029d7f151d8bf5ab7d8bfe2c3dbefd239759d642b211a677bc0709c9fdb96"}, - {file = "coverage-7.3.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:6879fe41c60080aa4bb59703a526c54e0412b77e649a0d06a61782ecf0853ee1"}, - {file = "coverage-7.3.4-cp312-cp312-win32.whl", hash = "sha256:fd2f8a641f8f193968afdc8fd1697e602e199931012b574194052d132a79be13"}, - {file = "coverage-7.3.4-cp312-cp312-win_amd64.whl", hash = "sha256:d1d0ce6c6947a3a4aa5479bebceff2c807b9f3b529b637e2b33dea4468d75fc7"}, - {file = "coverage-7.3.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:36797b3625d1da885b369bdaaa3b0d9fb8865caed3c2b8230afaa6005434aa2f"}, - {file = "coverage-7.3.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bfed0ec4b419fbc807dec417c401499ea869436910e1ca524cfb4f81cf3f60e7"}, - {file = "coverage-7.3.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f97ff5a9fc2ca47f3383482858dd2cb8ddbf7514427eecf5aa5f7992d0571429"}, - {file = "coverage-7.3.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:607b6c6b35aa49defaebf4526729bd5238bc36fe3ef1a417d9839e1d96ee1e4c"}, - {file = "coverage-7.3.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8e258dcc335055ab59fe79f1dec217d9fb0cdace103d6b5c6df6b75915e7959"}, - {file = "coverage-7.3.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a02ac7c51819702b384fea5ee033a7c202f732a2a2f1fe6c41e3d4019828c8d3"}, - {file = "coverage-7.3.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:b710869a15b8caf02e31d16487a931dbe78335462a122c8603bb9bd401ff6fb2"}, - {file = "coverage-7.3.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c6a23ae9348a7a92e7f750f9b7e828448e428e99c24616dec93a0720342f241d"}, - {file = "coverage-7.3.4-cp38-cp38-win32.whl", hash = "sha256:758ebaf74578b73f727acc4e8ab4b16ab6f22a5ffd7dd254e5946aba42a4ce76"}, - {file = "coverage-7.3.4-cp38-cp38-win_amd64.whl", hash = "sha256:309ed6a559bc942b7cc721f2976326efbfe81fc2b8f601c722bff927328507dc"}, - {file = "coverage-7.3.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:aefbb29dc56317a4fcb2f3857d5bce9b881038ed7e5aa5d3bcab25bd23f57328"}, - {file = "coverage-7.3.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:183c16173a70caf92e2dfcfe7c7a576de6fa9edc4119b8e13f91db7ca33a7923"}, - {file = "coverage-7.3.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a4184dcbe4f98d86470273e758f1d24191ca095412e4335ff27b417291f5964"}, - {file = "coverage-7.3.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93698ac0995516ccdca55342599a1463ed2e2d8942316da31686d4d614597ef9"}, - {file = "coverage-7.3.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb220b3596358a86361139edce40d97da7458412d412e1e10c8e1970ee8c09ab"}, - {file = "coverage-7.3.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d5b14abde6f8d969e6b9dd8c7a013d9a2b52af1235fe7bebef25ad5c8f47fa18"}, - {file = "coverage-7.3.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:610afaf929dc0e09a5eef6981edb6a57a46b7eceff151947b836d869d6d567c1"}, - {file = "coverage-7.3.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d6ed790728fb71e6b8247bd28e77e99d0c276dff952389b5388169b8ca7b1c28"}, - {file = "coverage-7.3.4-cp39-cp39-win32.whl", hash = "sha256:c15fdfb141fcf6a900e68bfa35689e1256a670db32b96e7a931cab4a0e1600e5"}, - {file = "coverage-7.3.4-cp39-cp39-win_amd64.whl", hash = "sha256:38d0b307c4d99a7aca4e00cad4311b7c51b7ac38fb7dea2abe0d182dd4008e05"}, - {file = "coverage-7.3.4-pp38.pp39.pp310-none-any.whl", hash = "sha256:b1e0f25ae99cf247abfb3f0fac7ae25739e4cd96bf1afa3537827c576b4847e5"}, - {file = "coverage-7.3.4.tar.gz", hash = "sha256:020d56d2da5bc22a0e00a5b0d54597ee91ad72446fa4cf1b97c35022f6b6dbf0"}, + {file = "coverage-7.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:077d366e724f24fc02dbfe9d946534357fda71af9764ff99d73c3c596001bbd7"}, + {file = "coverage-7.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0193657651f5399d433c92f8ae264aff31fc1d066deee4b831549526433f3f61"}, + {file = "coverage-7.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d17bbc946f52ca67adf72a5ee783cd7cd3477f8f8796f59b4974a9b59cacc9ee"}, + {file = "coverage-7.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a3277f5fa7483c927fe3a7b017b39351610265308f5267ac6d4c2b64cc1d8d25"}, + {file = "coverage-7.4.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6dceb61d40cbfcf45f51e59933c784a50846dc03211054bd76b421a713dcdf19"}, + {file = "coverage-7.4.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6008adeca04a445ea6ef31b2cbaf1d01d02986047606f7da266629afee982630"}, + {file = "coverage-7.4.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:c61f66d93d712f6e03369b6a7769233bfda880b12f417eefdd4f16d1deb2fc4c"}, + {file = "coverage-7.4.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b9bb62fac84d5f2ff523304e59e5c439955fb3b7f44e3d7b2085184db74d733b"}, + {file = "coverage-7.4.1-cp310-cp310-win32.whl", hash = "sha256:f86f368e1c7ce897bf2457b9eb61169a44e2ef797099fb5728482b8d69f3f016"}, + {file = "coverage-7.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:869b5046d41abfea3e381dd143407b0d29b8282a904a19cb908fa24d090cc018"}, + {file = "coverage-7.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b8ffb498a83d7e0305968289441914154fb0ef5d8b3157df02a90c6695978295"}, + {file = "coverage-7.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3cacfaefe6089d477264001f90f55b7881ba615953414999c46cc9713ff93c8c"}, + {file = "coverage-7.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d6850e6e36e332d5511a48a251790ddc545e16e8beaf046c03985c69ccb2676"}, + {file = "coverage-7.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18e961aa13b6d47f758cc5879383d27b5b3f3dcd9ce8cdbfdc2571fe86feb4dd"}, + {file = "coverage-7.4.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dfd1e1b9f0898817babf840b77ce9fe655ecbe8b1b327983df485b30df8cc011"}, + {file = "coverage-7.4.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6b00e21f86598b6330f0019b40fb397e705135040dbedc2ca9a93c7441178e74"}, + {file = "coverage-7.4.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:536d609c6963c50055bab766d9951b6c394759190d03311f3e9fcf194ca909e1"}, + {file = "coverage-7.4.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7ac8f8eb153724f84885a1374999b7e45734bf93a87d8df1e7ce2146860edef6"}, + {file = "coverage-7.4.1-cp311-cp311-win32.whl", hash = "sha256:f3771b23bb3675a06f5d885c3630b1d01ea6cac9e84a01aaf5508706dba546c5"}, + {file = "coverage-7.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:9d2f9d4cc2a53b38cabc2d6d80f7f9b7e3da26b2f53d48f05876fef7956b6968"}, + {file = "coverage-7.4.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f68ef3660677e6624c8cace943e4765545f8191313a07288a53d3da188bd8581"}, + {file = "coverage-7.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:23b27b8a698e749b61809fb637eb98ebf0e505710ec46a8aa6f1be7dc0dc43a6"}, + {file = "coverage-7.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e3424c554391dc9ef4a92ad28665756566a28fecf47308f91841f6c49288e66"}, + {file = "coverage-7.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e0860a348bf7004c812c8368d1fc7f77fe8e4c095d661a579196a9533778e156"}, + {file = "coverage-7.4.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe558371c1bdf3b8fa03e097c523fb9645b8730399c14fe7721ee9c9e2a545d3"}, + {file = "coverage-7.4.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3468cc8720402af37b6c6e7e2a9cdb9f6c16c728638a2ebc768ba1ef6f26c3a1"}, + {file = "coverage-7.4.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:02f2edb575d62172aa28fe00efe821ae31f25dc3d589055b3fb64d51e52e4ab1"}, + {file = "coverage-7.4.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ca6e61dc52f601d1d224526360cdeab0d0712ec104a2ce6cc5ccef6ed9a233bc"}, + {file = "coverage-7.4.1-cp312-cp312-win32.whl", hash = "sha256:ca7b26a5e456a843b9b6683eada193fc1f65c761b3a473941efe5a291f604c74"}, + {file = "coverage-7.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:85ccc5fa54c2ed64bd91ed3b4a627b9cce04646a659512a051fa82a92c04a448"}, + {file = "coverage-7.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8bdb0285a0202888d19ec6b6d23d5990410decb932b709f2b0dfe216d031d218"}, + {file = "coverage-7.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:918440dea04521f499721c039863ef95433314b1db00ff826a02580c1f503e45"}, + {file = "coverage-7.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:379d4c7abad5afbe9d88cc31ea8ca262296480a86af945b08214eb1a556a3e4d"}, + {file = "coverage-7.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b094116f0b6155e36a304ff912f89bbb5067157aff5f94060ff20bbabdc8da06"}, + {file = "coverage-7.4.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2f5968608b1fe2a1d00d01ad1017ee27efd99b3437e08b83ded9b7af3f6f766"}, + {file = "coverage-7.4.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:10e88e7f41e6197ea0429ae18f21ff521d4f4490aa33048f6c6f94c6045a6a75"}, + {file = "coverage-7.4.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a4a3907011d39dbc3e37bdc5df0a8c93853c369039b59efa33a7b6669de04c60"}, + {file = "coverage-7.4.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6d224f0c4c9c98290a6990259073f496fcec1b5cc613eecbd22786d398ded3ad"}, + {file = "coverage-7.4.1-cp38-cp38-win32.whl", hash = "sha256:23f5881362dcb0e1a92b84b3c2809bdc90db892332daab81ad8f642d8ed55042"}, + {file = "coverage-7.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:a07f61fc452c43cd5328b392e52555f7d1952400a1ad09086c4a8addccbd138d"}, + {file = "coverage-7.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8e738a492b6221f8dcf281b67129510835461132b03024830ac0e554311a5c54"}, + {file = "coverage-7.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:46342fed0fff72efcda77040b14728049200cbba1279e0bf1188f1f2078c1d70"}, + {file = "coverage-7.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9641e21670c68c7e57d2053ddf6c443e4f0a6e18e547e86af3fad0795414a628"}, + {file = "coverage-7.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aeb2c2688ed93b027eb0d26aa188ada34acb22dceea256d76390eea135083950"}, + {file = "coverage-7.4.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d12c923757de24e4e2110cf8832d83a886a4cf215c6e61ed506006872b43a6d1"}, + {file = "coverage-7.4.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0491275c3b9971cdbd28a4595c2cb5838f08036bca31765bad5e17edf900b2c7"}, + {file = "coverage-7.4.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:8dfc5e195bbef80aabd81596ef52a1277ee7143fe419efc3c4d8ba2754671756"}, + {file = "coverage-7.4.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1a78b656a4d12b0490ca72651fe4d9f5e07e3c6461063a9b6265ee45eb2bdd35"}, + {file = "coverage-7.4.1-cp39-cp39-win32.whl", hash = "sha256:f90515974b39f4dea2f27c0959688621b46d96d5a626cf9c53dbc653a895c05c"}, + {file = "coverage-7.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:64e723ca82a84053dd7bfcc986bdb34af8d9da83c521c19d6b472bc6880e191a"}, + {file = "coverage-7.4.1-pp38.pp39.pp310-none-any.whl", hash = "sha256:32a8d985462e37cfdab611a6f95b09d7c091d07668fdc26e47a725ee575fe166"}, + {file = "coverage-7.4.1.tar.gz", hash = "sha256:1ed4b95480952b1a26d863e546fa5094564aa0065e1e5f0d4d0041f293251d04"}, ] [package.dependencies] @@ -854,7 +857,6 @@ python-versions = "*" files = [ {file = "cutensor_cu11-1.7.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:c5598670f4f31906d725f5ea852f0df675522e3ff5a7bf886057eab36497062d"}, {file = "cutensor_cu11-1.7.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:67b6c7427d9ab50cb82e01360948bd1b23d73775b5767ab92071c7afcfec4b8b"}, - {file = "cutensor_cu11-1.7.0-py3-none-win_amd64.whl", hash = "sha256:d173b3d0fd51cf761b371a4d4be9a3afd3ef230a55ae4336ae31e905336480e1"}, ] [[package]] @@ -866,7 +868,6 @@ python-versions = "*" files = [ {file = "cutensor_cu12-1.7.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:515caa2406e09ffe9c6524328b7da2106169811665f7684836052753a30dda27"}, {file = "cutensor_cu12-1.7.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:29bdde551788fd3a611992026a5bb422831069e38fd44ab920af5aa00cffa12c"}, - {file = "cutensor_cu12-1.7.0-py3-none-win_amd64.whl", hash = "sha256:e1a9a759a615a64d1b8c6d2b8ffd925deb805750c28481b1a8310d05f35ce229"}, ] [[package]] @@ -899,32 +900,32 @@ cutensor-cu12 = ">=1.6.1,<2" [[package]] name = "cvxpy" -version = "1.4.1" +version = "1.4.2" description = "A domain-specific language for modeling convex optimization problems in Python." optional = false python-versions = ">=3.8" files = [ - {file = "cvxpy-1.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:03588055b660c043848f5281fe24dbd21f005b34bd8bd3b56906d8ad457c14ae"}, - {file = "cvxpy-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:315609ff96adeda4970471b349bc19d44ff4043e15630cf5ac70c029658fe8fc"}, - {file = "cvxpy-1.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55e08ffb973d62b3fabc675ad464cb6013ea5ce69799f330b33a084a2e580d8d"}, - {file = "cvxpy-1.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f1482558b785f2db51c76b9c6e91cc85dbd146675b126a799e7d7aab5b15354"}, - {file = "cvxpy-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:2f84687d15d11f9b49ca902f20103a2076efd47773c399cace71237ef53cdadc"}, - {file = "cvxpy-1.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d6bfbd535fdaabc5fa55f28de7a1d40f3a803a27fe3fec86e90700fa159a3afc"}, - {file = "cvxpy-1.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:71a95aaccf22431fd25a63bcb12d583e1b0baeaeb4fafa3e25857cec03b9e2f3"}, - {file = "cvxpy-1.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9d3bae3bf31e4eb6ed6407f78c6bc3c7bc4b4145cdbbb9ba8c61c3fc541d7067"}, - {file = "cvxpy-1.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41cfaecf86f85162ca53c7be7377b4143e316204fb9b6a7df8b7a08c826e3806"}, - {file = "cvxpy-1.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:edf66010e49b64d3f2dd1a7abde8fa3e615ce7a2b3eb185ab744b0beb3a6adb9"}, - {file = "cvxpy-1.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6b0f17dca85b2a410e73f5d84b28f35f57a20cfec1b0adc9b16f0f8aabff9961"}, - {file = "cvxpy-1.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9318c4e679b3db470e76e7f23cce362b038bd2d68c4a7326a7c21577ddbdc542"}, - {file = "cvxpy-1.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7a46ef722c8d1590875e86360d5781703dfcbd08be73eb98a2fc91a280870064"}, - {file = "cvxpy-1.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:57593a852c563ce77bdb075a3e75f23d36d4b3162ebf3199b54cc7fe75088ef2"}, - {file = "cvxpy-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:db89b55025514bad821b1f1781bed373cbb6aa22fe84420431efd510dbe7f858"}, - {file = "cvxpy-1.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:372c0825cc6e6bb03ecc550d83718761a1bbdbbb48010fec6f9718581ebd45b5"}, - {file = "cvxpy-1.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:163caffd7f7f27b6cb151f4ccff283068e063c3673158793048761690cbe4bbe"}, - {file = "cvxpy-1.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f24067c54979b09910aea0a03256247121d8a8169538facf087c1923e9e2701a"}, - {file = "cvxpy-1.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a3ec054279880a9ebf5fd9d2ac4109acf944b8c45ea8b24e461680e34f3d7b5"}, - {file = "cvxpy-1.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:d220a7ee55907da9b55b98e5238d03735118d03b82855ba87b872cb2e6977367"}, - {file = "cvxpy-1.4.1.tar.gz", hash = "sha256:7a9ef34e3c57ff8c844d86f0a3834fb5575af19233947639de0ba577c6122e3e"}, + {file = "cvxpy-1.4.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:06231c0b2a65f7c8ba32c2772576c24e93e1ca964444b90c6bad366b9c0a5bdc"}, + {file = "cvxpy-1.4.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f257971b007261d53ec7f50618f0c6a511387dd7df6cd686d2647c3fa91da0eb"}, + {file = "cvxpy-1.4.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38c2191d4142baac206ac590ba9e5cb1c6e025ac95d0a746692c9cf8d1afd46e"}, + {file = "cvxpy-1.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba9d006f76925127cd42b80e2d98c950a8339f8204b4c23fa25af83d895e95fa"}, + {file = "cvxpy-1.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:2a09ebd8f7a8b6b5d026d03295daee0780e2f6847fbe6f207e9764045ffbbfc9"}, + {file = "cvxpy-1.4.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:079fe6aeaeec2ddf6163ff8ca6510afd5c2b66ea391605791a77b51e534b935e"}, + {file = "cvxpy-1.4.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f8419dffcadefc16e6fcbe8a088068c29edb1f28ea90582f075a96f21ae7ff11"}, + {file = "cvxpy-1.4.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6551ef3b325d707e98f920dd120ebaa968f3ac3484c21f8567f2081967d26f0"}, + {file = "cvxpy-1.4.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fea513f4bf83491a1c9e5366faa4ca9fc21ec9522c30bcd55e49de9bb85fe9a2"}, + {file = "cvxpy-1.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:78560a02607d16fbb26db6306e7ce6d8e4fcda49cf04578d199ac050c2e74daa"}, + {file = "cvxpy-1.4.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:9817cf8da86641e2d322911844e86b8e7b1d93d9b2d57ae6d33e84be430e1e04"}, + {file = "cvxpy-1.4.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:32999d550a923c9448d973ef9d3ab75d73e1bdf56102fc32fe7ccb5e0cb5d7a3"}, + {file = "cvxpy-1.4.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:213b465450f4254226e6c18c70e25e911ae2c60176621f1bc2d9a0eb874288db"}, + {file = "cvxpy-1.4.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec30efa81d1f79f668b0fa6e8ac654047db7a3e844ab16022e1b5dcf52177192"}, + {file = "cvxpy-1.4.2-cp38-cp38-win_amd64.whl", hash = "sha256:779c19be964f7a586337fd4d017c7a0202bf845e08b04a174850f962b45b2a00"}, + {file = "cvxpy-1.4.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bb1d6af8406efa1de0408d0a76c248da3185cade49f45c443239772830b7d6bb"}, + {file = "cvxpy-1.4.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:63102885fdfd3eae716c042ee7aad9439d0b71ba22e5432c85f0e35056fcb159"}, + {file = "cvxpy-1.4.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20015b82117c0253ca803c4e174010067bda0eedb539503ba58b98e00acdd0f2"}, + {file = "cvxpy-1.4.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3f73ff4f0e7bff1e438dc2b02490d7a8e1027c421057a7971b4ca4982c28d60"}, + {file = "cvxpy-1.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:b7cfc6be34b288acade31b58a1e88b119487165d0ed877db9decf7fd676502f6"}, + {file = "cvxpy-1.4.2.tar.gz", hash = "sha256:0a386a5788dbd78b7b20dd071524ec636c8fa72b3628e69f1abc714c8f9811e5"}, ] [package.dependencies] @@ -1011,17 +1012,18 @@ dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "sphinx (<2)", "tox"] [[package]] name = "dill" -version = "0.3.7" +version = "0.3.8" description = "serialize all of Python" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "dill-0.3.7-py3-none-any.whl", hash = "sha256:76b122c08ef4ce2eedcd4d1abd8e641114bfc6c2867f49f3c41facf65bf19f5e"}, - {file = "dill-0.3.7.tar.gz", hash = "sha256:cc1c8b182eb3013e24bd475ff2e9295af86c1a38eb1aff128dac8962a9ce3c03"}, + {file = "dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7"}, + {file = "dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca"}, ] [package.extras] graph = ["objgraph (>=1.7.2)"] +profile = ["gprof2dot (>=2022.7.29)"] [[package]] name = "docutils" @@ -1050,27 +1052,30 @@ dev-env = ["black (==22.3.0)", "isort (==5.7.*)", "mypy (==0.931.*)", "pylint (= [[package]] name = "ecos" -version = "2.0.12" +version = "2.0.13" description = "This is the Python package for ECOS: Embedded Cone Solver. See Github page for more information." optional = false python-versions = "*" files = [ - {file = "ecos-2.0.12-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:835298a299c88c207b3402fba60ad9b5688b59bbbf2ac34a46de5b37165d773a"}, - {file = "ecos-2.0.12-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:608bc822ee8e070927ab3519169b13a1a0fe88f3d562212d6b5dbb1039776360"}, - {file = "ecos-2.0.12-cp310-cp310-win_amd64.whl", hash = "sha256:5184a9d8521ad1af90ffcd9902a6fa75c7bc473f37d30d86f97beda1033dfca2"}, - {file = "ecos-2.0.12-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:eba07599084724eedc20b2862d5580eebebb09609f4740baadc78401cb99827c"}, - {file = "ecos-2.0.12-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4979dc2d1cb6667e371a45a61887068505c1305437eef104ed6ef16f4b6aa0e3"}, - {file = "ecos-2.0.12-cp311-cp311-win_amd64.whl", hash = "sha256:da8fbbca3feb83a9e27075d29b3765417d0c80af8ea83cbdc4a558cae7b564af"}, - {file = "ecos-2.0.12-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:f70e4547966f530fd7715756f7a65d5b9b90b312b9d37f243ef9356c05e7d74c"}, - {file = "ecos-2.0.12-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:617be25d74222849622b0f82b94a11abcf1fae78ccaf69977b328321ee6ffa0b"}, - {file = "ecos-2.0.12-cp37-cp37m-win_amd64.whl", hash = "sha256:29d00164eaea66ed54697a3b361c575284a8bca54f2623381a0635806c7303a7"}, - {file = "ecos-2.0.12-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4e86671397d1d2cd7cccff8a9c45be0541b0c60af8b92a0ff3581c9ed869db67"}, - {file = "ecos-2.0.12-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:858a4dd3177bdc8cc6e362031732f5177b62138a1e4ef91c0dc3c6bd7d2d1248"}, - {file = "ecos-2.0.12-cp38-cp38-win_amd64.whl", hash = "sha256:528b02f53835bd1baeb2e23f8153b8d6cc2b3704e1768be6a1a972f542241670"}, - {file = "ecos-2.0.12-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3e42bd4c19af6e04f76ccc85d941b1f1adc7faeee4d06d482395a6beb7bec895"}, - {file = "ecos-2.0.12-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6def54336a15b5a49bc3bfcaa36035e8557cae8a4853b17ca84f5a29c93bcaea"}, - {file = "ecos-2.0.12-cp39-cp39-win_amd64.whl", hash = "sha256:7af08941552fce108bd80145cdb6be7fa74477a20bacdac170800442cc7027d4"}, - {file = "ecos-2.0.12.tar.gz", hash = "sha256:f48816d73b87ae325556ea537b7c8743187311403c80e3832035224156337c4e"}, + {file = "ecos-2.0.13-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a1c1acf33b70f8657c25f07ec8d7b59bb01dbad39f072fa61fc956c2166ed979"}, + {file = "ecos-2.0.13-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ea88ee2c94192004d6be9c55a3c79f184eaba3bbf31474229045a1b0a8a1536"}, + {file = "ecos-2.0.13-cp310-cp310-win_amd64.whl", hash = "sha256:df8ae7fce79be9e5f79f0511c51a4824795de5154847fabe1a0288bc2ea349d3"}, + {file = "ecos-2.0.13-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:88dd628bc6e77a069165fa5f50340e2856795c28e00e3fce213a04d7c41c584a"}, + {file = "ecos-2.0.13-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78b2c969c7e22fd8a1d1cd0a90f4325d90572da23e2e923b0da6138ce62503d0"}, + {file = "ecos-2.0.13-cp311-cp311-win_amd64.whl", hash = "sha256:936890fb85a186360a5c8f228dd19acb760e234b38c598d0b46ab29644e31dfc"}, + {file = "ecos-2.0.13-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:62ed497ab56017f1d7264eb56223826a984462b1d84fb850d10f0bec3490877d"}, + {file = "ecos-2.0.13-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf2b1384012bee9e58e5a2373905d3644f74a0ea000b307a239366fe7850c29c"}, + {file = "ecos-2.0.13-cp312-cp312-win_amd64.whl", hash = "sha256:2c1ea09069e32185912506f946bb6d1f144841ba1d1cd0217c67f72cbdf7a8fd"}, + {file = "ecos-2.0.13-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b9f4a76a3e1165359e1704ec6b1b89d487858ec0d838d62a7268133d88221914"}, + {file = "ecos-2.0.13-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3c2d4e0d3ada1a619ddd62fbf48ccbe9b738fdbef119945fe2a05566d03b85a"}, + {file = "ecos-2.0.13-cp37-cp37m-win_amd64.whl", hash = "sha256:84c72e1e5ffa41cd38352dcf0a8c25418f5bf04ed76a576db0daaf9a69f5568f"}, + {file = "ecos-2.0.13-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1979f1f17ec7f1a0fc45964d02d762393f9f427d965fe8a893e7b1476a9023c3"}, + {file = "ecos-2.0.13-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:059e6c29c89f47a490353e4f9336e96350a5102a97e1d8a2aaff796bcbe50058"}, + {file = "ecos-2.0.13-cp38-cp38-win_amd64.whl", hash = "sha256:30c7d0cce6c830da5b9ea25af0d47b203255639524eb4d03d1331c600958c834"}, + {file = "ecos-2.0.13-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ba42c15f1d79eb2ada532e9781b4aeb3ed84b1c7e38239ba4d6502c6a092d5b1"}, + {file = "ecos-2.0.13-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:32fb33185f6dd94a1c798bc481eb86c9f4e832efec91f6ab0584e2fc26fd375e"}, + {file = "ecos-2.0.13-cp39-cp39-win_amd64.whl", hash = "sha256:68995ab12d363576dddb2d1f91ead3b9c8a8ca61f29000f0b1daef1b4e7b5b64"}, + {file = "ecos-2.0.13.tar.gz", hash = "sha256:f2a9dc108ade7faf6f6f4fad245f4714b7293c8767d2a351ead59428a94a98b9"}, ] [package.dependencies] @@ -1122,13 +1127,13 @@ pyrepl = ">=0.8.2" [[package]] name = "fastjsonschema" -version = "2.19.0" +version = "2.19.1" description = "Fastest Python implementation of JSON schema" optional = false python-versions = "*" files = [ - {file = "fastjsonschema-2.19.0-py3-none-any.whl", hash = "sha256:b9fd1a2dd6971dbc7fee280a95bd199ae0dd9ce22beb91cc75e9c1c528a5170e"}, - {file = "fastjsonschema-2.19.0.tar.gz", hash = "sha256:e25df6647e1bc4a26070b700897b07b542ec898dd4f1f6ea013e7f6a88417225"}, + {file = "fastjsonschema-2.19.1-py3-none-any.whl", hash = "sha256:3672b47bc94178c9f23dbb654bf47440155d4db9df5f7bc47643315f9c405cd0"}, + {file = "fastjsonschema-2.19.1.tar.gz", hash = "sha256:e3126a94bdc4623d3de4485f8d468a12f02a67921315ddc87836d6e456dc789d"}, ] [package.extras] @@ -1218,6 +1223,22 @@ files = [ {file = "fastrlock-0.8.2.tar.gz", hash = "sha256:644ec9215cf9c4df8028d8511379a15d9c1af3e16d80e47f1b6fdc6ba118356a"}, ] +[[package]] +name = "filelock" +version = "3.13.1" +description = "A platform independent file lock." +optional = false +python-versions = ">=3.8" +files = [ + {file = "filelock-3.13.1-py3-none-any.whl", hash = "sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c"}, + {file = "filelock-3.13.1.tar.gz", hash = "sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e"}, +] + +[package.extras] +docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.24)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)"] +typing = ["typing-extensions (>=4.8)"] + [[package]] name = "flatbuffers" version = "23.5.26" @@ -1231,60 +1252,60 @@ files = [ [[package]] name = "fonttools" -version = "4.47.0" +version = "4.48.1" description = "Tools to manipulate font files" optional = false python-versions = ">=3.8" files = [ - {file = "fonttools-4.47.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2d2404107626f97a221dc1a65b05396d2bb2ce38e435f64f26ed2369f68675d9"}, - {file = "fonttools-4.47.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c01f409be619a9a0f5590389e37ccb58b47264939f0e8d58bfa1f3ba07d22671"}, - {file = "fonttools-4.47.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d986b66ff722ef675b7ee22fbe5947a41f60a61a4da15579d5e276d897fbc7fa"}, - {file = "fonttools-4.47.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8acf6dd0434b211b3bd30d572d9e019831aae17a54016629fa8224783b22df8"}, - {file = "fonttools-4.47.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:495369c660e0c27233e3c572269cbe520f7f4978be675f990f4005937337d391"}, - {file = "fonttools-4.47.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c59227d7ba5b232281c26ae04fac2c73a79ad0e236bca5c44aae904a18f14faf"}, - {file = "fonttools-4.47.0-cp310-cp310-win32.whl", hash = "sha256:59a6c8b71a245800e923cb684a2dc0eac19c56493e2f896218fcf2571ed28984"}, - {file = "fonttools-4.47.0-cp310-cp310-win_amd64.whl", hash = "sha256:52c82df66201f3a90db438d9d7b337c7c98139de598d0728fb99dab9fd0495ca"}, - {file = "fonttools-4.47.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:854421e328d47d70aa5abceacbe8eef231961b162c71cbe7ff3f47e235e2e5c5"}, - {file = "fonttools-4.47.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:511482df31cfea9f697930f61520f6541185fa5eeba2fa760fe72e8eee5af88b"}, - {file = "fonttools-4.47.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce0e2c88c8c985b7b9a7efcd06511fb0a1fe3ddd9a6cd2895ef1dbf9059719d7"}, - {file = "fonttools-4.47.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e7a0a8848726956e9d9fb18c977a279013daadf0cbb6725d2015a6dd57527992"}, - {file = "fonttools-4.47.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e869da810ae35afb3019baa0d0306cdbab4760a54909c89ad8904fa629991812"}, - {file = "fonttools-4.47.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dd23848f877c3754f53a4903fb7a593ed100924f9b4bff7d5a4e2e8a7001ae11"}, - {file = "fonttools-4.47.0-cp311-cp311-win32.whl", hash = "sha256:bf1810635c00f7c45d93085611c995fc130009cec5abdc35b327156aa191f982"}, - {file = "fonttools-4.47.0-cp311-cp311-win_amd64.whl", hash = "sha256:61df4dee5d38ab65b26da8efd62d859a1eef7a34dcbc331299a28e24d04c59a7"}, - {file = "fonttools-4.47.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:e3f4d61f3a8195eac784f1d0c16c0a3105382c1b9a74d99ac4ba421da39a8826"}, - {file = "fonttools-4.47.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:174995f7b057e799355b393e97f4f93ef1f2197cbfa945e988d49b2a09ecbce8"}, - {file = "fonttools-4.47.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea592e6a09b71cb7a7661dd93ac0b877a6228e2d677ebacbad0a4d118494c86d"}, - {file = "fonttools-4.47.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40bdbe90b33897d9cc4a39f8e415b0fcdeae4c40a99374b8a4982f127ff5c767"}, - {file = "fonttools-4.47.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:843509ae9b93db5aaf1a6302085e30bddc1111d31e11d724584818f5b698f500"}, - {file = "fonttools-4.47.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9acfa1cdc479e0dde528b61423855913d949a7f7fe09e276228298fef4589540"}, - {file = "fonttools-4.47.0-cp312-cp312-win32.whl", hash = "sha256:66c92ec7f95fd9732550ebedefcd190a8d81beaa97e89d523a0d17198a8bda4d"}, - {file = "fonttools-4.47.0-cp312-cp312-win_amd64.whl", hash = "sha256:e8fa20748de55d0021f83754b371432dca0439e02847962fc4c42a0e444c2d78"}, - {file = "fonttools-4.47.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c75e19971209fbbce891ebfd1b10c37320a5a28e8d438861c21d35305aedb81c"}, - {file = "fonttools-4.47.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e79f1a3970d25f692bbb8c8c2637e621a66c0d60c109ab48d4a160f50856deff"}, - {file = "fonttools-4.47.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:562681188c62c024fe2c611b32e08b8de2afa00c0c4e72bed47c47c318e16d5c"}, - {file = "fonttools-4.47.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a77a60315c33393b2bd29d538d1ef026060a63d3a49a9233b779261bad9c3f71"}, - {file = "fonttools-4.47.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b4fabb8cc9422efae1a925160083fdcbab8fdc96a8483441eb7457235df625bd"}, - {file = "fonttools-4.47.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2a78dba8c2a1e9d53a0fb5382979f024200dc86adc46a56cbb668a2249862fda"}, - {file = "fonttools-4.47.0-cp38-cp38-win32.whl", hash = "sha256:e6b968543fde4119231c12c2a953dcf83349590ca631ba8216a8edf9cd4d36a9"}, - {file = "fonttools-4.47.0-cp38-cp38-win_amd64.whl", hash = "sha256:4a9a51745c0439516d947480d4d884fa18bd1458e05b829e482b9269afa655bc"}, - {file = "fonttools-4.47.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:62d8ddb058b8e87018e5dc26f3258e2c30daad4c87262dfeb0e2617dd84750e6"}, - {file = "fonttools-4.47.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5dde0eab40faaa5476133123f6a622a1cc3ac9b7af45d65690870620323308b4"}, - {file = "fonttools-4.47.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4da089f6dfdb822293bde576916492cd708c37c2501c3651adde39804630538"}, - {file = "fonttools-4.47.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:253bb46bab970e8aae254cebf2ae3db98a4ef6bd034707aa68a239027d2b198d"}, - {file = "fonttools-4.47.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1193fb090061efa2f9e2d8d743ae9850c77b66746a3b32792324cdce65784154"}, - {file = "fonttools-4.47.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:084511482dd265bce6dca24c509894062f0117e4e6869384d853f46c0e6d43be"}, - {file = "fonttools-4.47.0-cp39-cp39-win32.whl", hash = "sha256:97620c4af36e4c849e52661492e31dc36916df12571cb900d16960ab8e92a980"}, - {file = "fonttools-4.47.0-cp39-cp39-win_amd64.whl", hash = "sha256:e77bdf52185bdaf63d39f3e1ac3212e6cfa3ab07d509b94557a8902ce9c13c82"}, - {file = "fonttools-4.47.0-py3-none-any.whl", hash = "sha256:d6477ba902dd2d7adda7f0fd3bfaeb92885d45993c9e1928c9f28fc3961415f7"}, - {file = "fonttools-4.47.0.tar.gz", hash = "sha256:ec13a10715eef0e031858c1c23bfaee6cba02b97558e4a7bfa089dba4a8c2ebf"}, + {file = "fonttools-4.48.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:702ae93058c81f46461dc4b2c79f11d3c3d8fd7296eaf8f75b4ba5bbf813cd5f"}, + {file = "fonttools-4.48.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:97f0a49fa6aa2d6205c6f72f4f98b74ef4b9bfdcb06fd78e6fe6c7af4989b63e"}, + {file = "fonttools-4.48.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3260db55f1843e57115256e91247ad9f68cb02a434b51262fe0019e95a98738"}, + {file = "fonttools-4.48.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e740a7602c2bb71e1091269b5dbe89549749a8817dc294b34628ffd8b2bf7124"}, + {file = "fonttools-4.48.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4108b1d247953dd7c90ec8f457a2dec5fceb373485973cc852b14200118a51ee"}, + {file = "fonttools-4.48.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:56339ec557f0c342bddd7c175f5e41c45fc21282bee58a86bd9aa322bec715f2"}, + {file = "fonttools-4.48.1-cp310-cp310-win32.whl", hash = "sha256:bff5b38d0e76eb18e0b8abbf35d384e60b3371be92f7be36128ee3e67483b3ec"}, + {file = "fonttools-4.48.1-cp310-cp310-win_amd64.whl", hash = "sha256:f7449493886da6a17472004d3818cc050ba3f4a0aa03fb47972e4fa5578e6703"}, + {file = "fonttools-4.48.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:18b35fd1a850ed7233a99bbd6774485271756f717dac8b594958224b54118b61"}, + {file = "fonttools-4.48.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cad5cfd044ea2e306fda44482b3dd32ee47830fa82dfa4679374b41baa294f5f"}, + {file = "fonttools-4.48.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f30e605c7565d0da6f0aec75a30ec372072d016957cd8fc4469721a36ea59b7"}, + {file = "fonttools-4.48.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aee76fd81a8571c68841d6ef0da750d5ff08ff2c5f025576473016f16ac3bcf7"}, + {file = "fonttools-4.48.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:5057ade278e67923000041e2b195c9ea53e87f227690d499b6a4edd3702f7f01"}, + {file = "fonttools-4.48.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b10633aafc5932995a391ec07eba5e79f52af0003a1735b2306b3dab8a056d48"}, + {file = "fonttools-4.48.1-cp311-cp311-win32.whl", hash = "sha256:0d533f89819f9b3ee2dbedf0fed3825c425850e32bdda24c558563c71be0064e"}, + {file = "fonttools-4.48.1-cp311-cp311-win_amd64.whl", hash = "sha256:d20588466367f05025bb1efdf4e5d498ca6d14bde07b6928b79199c588800f0a"}, + {file = "fonttools-4.48.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0a2417547462e468edf35b32e3dd06a6215ac26aa6316b41e03b8eeaf9f079ea"}, + {file = "fonttools-4.48.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cf5a0cd974f85a80b74785db2d5c3c1fd6cc09a2ba3c837359b2b5da629ee1b0"}, + {file = "fonttools-4.48.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0452fcbfbce752ba596737a7c5ec5cf76bc5f83847ce1781f4f90eab14ece252"}, + {file = "fonttools-4.48.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:578c00f93868f64a4102ecc5aa600a03b49162c654676c3fadc33de2ddb88a81"}, + {file = "fonttools-4.48.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:63dc592a16cd08388d8c4c7502b59ac74190b23e16dfc863c69fe1ea74605b68"}, + {file = "fonttools-4.48.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9b58638d8a85e3a1b32ec0a91d9f8171a877b4b81c408d4cb3257d0dee63e092"}, + {file = "fonttools-4.48.1-cp312-cp312-win32.whl", hash = "sha256:d10979ef14a8beaaa32f613bb698743f7241d92f437a3b5e32356dfb9769c65d"}, + {file = "fonttools-4.48.1-cp312-cp312-win_amd64.whl", hash = "sha256:cdfd7557d1bd294a200bd211aa665ca3b02998dcc18f8211a5532da5b8fad5c5"}, + {file = "fonttools-4.48.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:3cdb9a92521b81bf717ebccf592bd0292e853244d84115bfb4db0c426de58348"}, + {file = "fonttools-4.48.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9b4ec6d42a7555f5ae35f3b805482f0aad0f1baeeef54859492ea3b782959d4a"}, + {file = "fonttools-4.48.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:902e9c4e9928301912f34a6638741b8ae0b64824112b42aaf240e06b735774b1"}, + {file = "fonttools-4.48.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8c8b54bd1420c184a995f980f1a8076f87363e2bb24239ef8c171a369d85a31"}, + {file = "fonttools-4.48.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:12ee86abca46193359ea69216b3a724e90c66ab05ab220d39e3fc068c1eb72ac"}, + {file = "fonttools-4.48.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6978bade7b6c0335095bdd0bd97f8f3d590d2877b370f17e03e0865241694eb5"}, + {file = "fonttools-4.48.1-cp38-cp38-win32.whl", hash = "sha256:bcd77f89fc1a6b18428e7a55dde8ef56dae95640293bfb8f4e929929eba5e2a2"}, + {file = "fonttools-4.48.1-cp38-cp38-win_amd64.whl", hash = "sha256:f40441437b039930428e04fb05ac3a132e77458fb57666c808d74a556779e784"}, + {file = "fonttools-4.48.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0d2b01428f7da26f229a5656defc824427b741e454b4e210ad2b25ed6ea2aed4"}, + {file = "fonttools-4.48.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:df48798f9a4fc4c315ab46e17873436c8746f5df6eddd02fad91299b2af7af95"}, + {file = "fonttools-4.48.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2eb4167bde04e172a93cf22c875d8b0cff76a2491f67f5eb069566215302d45d"}, + {file = "fonttools-4.48.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c900508c46274d32d308ae8e82335117f11aaee1f7d369ac16502c9a78930b0a"}, + {file = "fonttools-4.48.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:594206b31c95fcfa65f484385171fabb4ec69f7d2d7f56d27f17db26b7a31814"}, + {file = "fonttools-4.48.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:292922dc356d7f11f5063b4111a8b719efb8faea92a2a88ed296408d449d8c2e"}, + {file = "fonttools-4.48.1-cp39-cp39-win32.whl", hash = "sha256:4709c5bf123ba10eac210d2d5c9027d3f472591d9f1a04262122710fa3d23199"}, + {file = "fonttools-4.48.1-cp39-cp39-win_amd64.whl", hash = "sha256:63c73b9dd56a94a3cbd2f90544b5fca83666948a9e03370888994143b8d7c070"}, + {file = "fonttools-4.48.1-py3-none-any.whl", hash = "sha256:e3e33862fc5261d46d9aae3544acb36203b1a337d00bdb5d3753aae50dac860e"}, + {file = "fonttools-4.48.1.tar.gz", hash = "sha256:8b8a45254218679c7f1127812761e7854ed5c8e34349aebf581e8c9204e7495a"}, ] [package.extras] -all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0,<5)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "pycairo", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.1.0)", "xattr", "zopfli (>=0.1.4)"] +all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "pycairo", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.1.0)", "xattr", "zopfli (>=0.1.4)"] graphite = ["lz4 (>=1.7.4.2)"] interpolatable = ["munkres", "pycairo", "scipy"] -lxml = ["lxml (>=4.0,<5)"] +lxml = ["lxml (>=4.0)"] pathops = ["skia-pathops (>=0.5.0)"] plot = ["matplotlib"] repacker = ["uharfbuzz (>=0.23.0)"] @@ -1294,6 +1315,41 @@ ufo = ["fs (>=2.2.0,<3)"] unicode = ["unicodedata2 (>=15.1.0)"] woff = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "zopfli (>=0.1.4)"] +[[package]] +name = "fsspec" +version = "2024.2.0" +description = "File-system specification" +optional = false +python-versions = ">=3.8" +files = [ + {file = "fsspec-2024.2.0-py3-none-any.whl", hash = "sha256:817f969556fa5916bc682e02ca2045f96ff7f586d45110fcb76022063ad2c7d8"}, + {file = "fsspec-2024.2.0.tar.gz", hash = "sha256:b6ad1a679f760dda52b1168c859d01b7b80648ea6f7f7c7f5a8a91dc3f3ecb84"}, +] + +[package.extras] +abfs = ["adlfs"] +adl = ["adlfs"] +arrow = ["pyarrow (>=1)"] +dask = ["dask", "distributed"] +devel = ["pytest", "pytest-cov"] +dropbox = ["dropbox", "dropboxdrivefs", "requests"] +full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "dask", "distributed", "dropbox", "dropboxdrivefs", "fusepy", "gcsfs", "libarchive-c", "ocifs", "panel", "paramiko", "pyarrow (>=1)", "pygit2", "requests", "s3fs", "smbprotocol", "tqdm"] +fuse = ["fusepy"] +gcs = ["gcsfs"] +git = ["pygit2"] +github = ["requests"] +gs = ["gcsfs"] +gui = ["panel"] +hdfs = ["pyarrow (>=1)"] +http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)"] +libarchive = ["libarchive-c"] +oci = ["ocifs"] +s3 = ["s3fs"] +sftp = ["paramiko"] +smb = ["smbprotocol"] +ssh = ["paramiko"] +tqdm = ["tqdm"] + [[package]] name = "furo" version = "2022.12.7" @@ -1334,13 +1390,13 @@ files = [ [[package]] name = "google-api-core" -version = "2.15.0" +version = "2.16.2" description = "Google API client core library" optional = false python-versions = ">=3.7" files = [ - {file = "google-api-core-2.15.0.tar.gz", hash = "sha256:abc978a72658f14a2df1e5e12532effe40f94f868f6e23d95133bd6abcca35ca"}, - {file = "google_api_core-2.15.0-py3-none-any.whl", hash = "sha256:2aa56d2be495551e66bbff7f729b790546f87d5c90e74781aa77233bcb395a8a"}, + {file = "google-api-core-2.16.2.tar.gz", hash = "sha256:032d37b45d1d6bdaf68fb11ff621e2593263a239fa9246e2e94325f9c47876d2"}, + {file = "google_api_core-2.16.2-py3-none-any.whl", hash = "sha256:449ca0e3f14c179b4165b664256066c7861610f70b6ffe54bb01a04e9b466929"}, ] [package.dependencies] @@ -1364,13 +1420,13 @@ grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] [[package]] name = "google-auth" -version = "2.25.2" +version = "2.27.0" description = "Google Authentication Library" optional = false python-versions = ">=3.7" files = [ - {file = "google-auth-2.25.2.tar.gz", hash = "sha256:42f707937feb4f5e5a39e6c4f343a17300a459aaf03141457ba505812841cc40"}, - {file = "google_auth-2.25.2-py2.py3-none-any.whl", hash = "sha256:473a8dfd0135f75bb79d878436e568f2695dce456764bf3a02b6f8c540b1d256"}, + {file = "google-auth-2.27.0.tar.gz", hash = "sha256:e863a56ccc2d8efa83df7a80272601e43487fa9a728a376205c86c26aaefa821"}, + {file = "google_auth-2.27.0-py2.py3-none-any.whl", hash = "sha256:8e4bad367015430ff253fe49d500fdc3396c1a434db5740828c728e45bcce245"}, ] [package.dependencies] @@ -1437,84 +1493,84 @@ grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"] [[package]] name = "grpcio" -version = "1.60.0" +version = "1.60.1" description = "HTTP/2-based RPC framework" optional = false python-versions = ">=3.7" files = [ - {file = "grpcio-1.60.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:d020cfa595d1f8f5c6b343530cd3ca16ae5aefdd1e832b777f9f0eb105f5b139"}, - {file = "grpcio-1.60.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:b98f43fcdb16172dec5f4b49f2fece4b16a99fd284d81c6bbac1b3b69fcbe0ff"}, - {file = "grpcio-1.60.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:20e7a4f7ded59097c84059d28230907cd97130fa74f4a8bfd1d8e5ba18c81491"}, - {file = "grpcio-1.60.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:452ca5b4afed30e7274445dd9b441a35ece656ec1600b77fff8c216fdf07df43"}, - {file = "grpcio-1.60.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:43e636dc2ce9ece583b3e2ca41df5c983f4302eabc6d5f9cd04f0562ee8ec1ae"}, - {file = "grpcio-1.60.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6e306b97966369b889985a562ede9d99180def39ad42c8014628dd3cc343f508"}, - {file = "grpcio-1.60.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f897c3b127532e6befdcf961c415c97f320d45614daf84deba0a54e64ea2457b"}, - {file = "grpcio-1.60.0-cp310-cp310-win32.whl", hash = "sha256:b87efe4a380887425bb15f220079aa8336276398dc33fce38c64d278164f963d"}, - {file = "grpcio-1.60.0-cp310-cp310-win_amd64.whl", hash = "sha256:a9c7b71211f066908e518a2ef7a5e211670761651039f0d6a80d8d40054047df"}, - {file = "grpcio-1.60.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:fb464479934778d7cc5baf463d959d361954d6533ad34c3a4f1d267e86ee25fd"}, - {file = "grpcio-1.60.0-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:4b44d7e39964e808b071714666a812049765b26b3ea48c4434a3b317bac82f14"}, - {file = "grpcio-1.60.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:90bdd76b3f04bdb21de5398b8a7c629676c81dfac290f5f19883857e9371d28c"}, - {file = "grpcio-1.60.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:91229d7203f1ef0ab420c9b53fe2ca5c1fbeb34f69b3bc1b5089466237a4a134"}, - {file = "grpcio-1.60.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b36a2c6d4920ba88fa98075fdd58ff94ebeb8acc1215ae07d01a418af4c0253"}, - {file = "grpcio-1.60.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:297eef542156d6b15174a1231c2493ea9ea54af8d016b8ca7d5d9cc65cfcc444"}, - {file = "grpcio-1.60.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:87c9224acba0ad8bacddf427a1c2772e17ce50b3042a789547af27099c5f751d"}, - {file = "grpcio-1.60.0-cp311-cp311-win32.whl", hash = "sha256:95ae3e8e2c1b9bf671817f86f155c5da7d49a2289c5cf27a319458c3e025c320"}, - {file = "grpcio-1.60.0-cp311-cp311-win_amd64.whl", hash = "sha256:467a7d31554892eed2aa6c2d47ded1079fc40ea0b9601d9f79204afa8902274b"}, - {file = "grpcio-1.60.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:a7152fa6e597c20cb97923407cf0934e14224af42c2b8d915f48bc3ad2d9ac18"}, - {file = "grpcio-1.60.0-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:7db16dd4ea1b05ada504f08d0dca1cd9b926bed3770f50e715d087c6f00ad748"}, - {file = "grpcio-1.60.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:b0571a5aef36ba9177e262dc88a9240c866d903a62799e44fd4aae3f9a2ec17e"}, - {file = "grpcio-1.60.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6fd9584bf1bccdfff1512719316efa77be235469e1e3295dce64538c4773840b"}, - {file = "grpcio-1.60.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d6a478581b1a1a8fdf3318ecb5f4d0cda41cacdffe2b527c23707c9c1b8fdb55"}, - {file = "grpcio-1.60.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:77c8a317f0fd5a0a2be8ed5cbe5341537d5c00bb79b3bb27ba7c5378ba77dbca"}, - {file = "grpcio-1.60.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1c30bb23a41df95109db130a6cc1b974844300ae2e5d68dd4947aacba5985aa5"}, - {file = "grpcio-1.60.0-cp312-cp312-win32.whl", hash = "sha256:2aef56e85901c2397bd557c5ba514f84de1f0ae5dd132f5d5fed042858115951"}, - {file = "grpcio-1.60.0-cp312-cp312-win_amd64.whl", hash = "sha256:e381fe0c2aa6c03b056ad8f52f8efca7be29fb4d9ae2f8873520843b6039612a"}, - {file = "grpcio-1.60.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:92f88ca1b956eb8427a11bb8b4a0c0b2b03377235fc5102cb05e533b8693a415"}, - {file = "grpcio-1.60.0-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:e278eafb406f7e1b1b637c2cf51d3ad45883bb5bd1ca56bc05e4fc135dfdaa65"}, - {file = "grpcio-1.60.0-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:a48edde788b99214613e440fce495bbe2b1e142a7f214cce9e0832146c41e324"}, - {file = "grpcio-1.60.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:de2ad69c9a094bf37c1102b5744c9aec6cf74d2b635558b779085d0263166454"}, - {file = "grpcio-1.60.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:073f959c6f570797272f4ee9464a9997eaf1e98c27cb680225b82b53390d61e6"}, - {file = "grpcio-1.60.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c826f93050c73e7769806f92e601e0efdb83ec8d7c76ddf45d514fee54e8e619"}, - {file = "grpcio-1.60.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:9e30be89a75ee66aec7f9e60086fadb37ff8c0ba49a022887c28c134341f7179"}, - {file = "grpcio-1.60.0-cp37-cp37m-win_amd64.whl", hash = "sha256:b0fb2d4801546598ac5cd18e3ec79c1a9af8b8f2a86283c55a5337c5aeca4b1b"}, - {file = "grpcio-1.60.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:9073513ec380434eb8d21970e1ab3161041de121f4018bbed3146839451a6d8e"}, - {file = "grpcio-1.60.0-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:74d7d9fa97809c5b892449b28a65ec2bfa458a4735ddad46074f9f7d9550ad13"}, - {file = "grpcio-1.60.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:1434ca77d6fed4ea312901122dc8da6c4389738bf5788f43efb19a838ac03ead"}, - {file = "grpcio-1.60.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e61e76020e0c332a98290323ecfec721c9544f5b739fab925b6e8cbe1944cf19"}, - {file = "grpcio-1.60.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675997222f2e2f22928fbba640824aebd43791116034f62006e19730715166c0"}, - {file = "grpcio-1.60.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5208a57eae445ae84a219dfd8b56e04313445d146873117b5fa75f3245bc1390"}, - {file = "grpcio-1.60.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:428d699c8553c27e98f4d29fdc0f0edc50e9a8a7590bfd294d2edb0da7be3629"}, - {file = "grpcio-1.60.0-cp38-cp38-win32.whl", hash = "sha256:83f2292ae292ed5a47cdcb9821039ca8e88902923198f2193f13959360c01860"}, - {file = "grpcio-1.60.0-cp38-cp38-win_amd64.whl", hash = "sha256:705a68a973c4c76db5d369ed573fec3367d7d196673fa86614b33d8c8e9ebb08"}, - {file = "grpcio-1.60.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:c193109ca4070cdcaa6eff00fdb5a56233dc7610216d58fb81638f89f02e4968"}, - {file = "grpcio-1.60.0-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:676e4a44e740deaba0f4d95ba1d8c5c89a2fcc43d02c39f69450b1fa19d39590"}, - {file = "grpcio-1.60.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:5ff21e000ff2f658430bde5288cb1ac440ff15c0d7d18b5fb222f941b46cb0d2"}, - {file = "grpcio-1.60.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c86343cf9ff7b2514dd229bdd88ebba760bd8973dac192ae687ff75e39ebfab"}, - {file = "grpcio-1.60.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fd3b3968ffe7643144580f260f04d39d869fcc2cddb745deef078b09fd2b328"}, - {file = "grpcio-1.60.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:30943b9530fe3620e3b195c03130396cd0ee3a0d10a66c1bee715d1819001eaf"}, - {file = "grpcio-1.60.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b10241250cb77657ab315270b064a6c7f1add58af94befa20687e7c8d8603ae6"}, - {file = "grpcio-1.60.0-cp39-cp39-win32.whl", hash = "sha256:79a050889eb8d57a93ed21d9585bb63fca881666fc709f5d9f7f9372f5e7fd03"}, - {file = "grpcio-1.60.0-cp39-cp39-win_amd64.whl", hash = "sha256:8a97a681e82bc11a42d4372fe57898d270a2707f36c45c6676e49ce0d5c41353"}, - {file = "grpcio-1.60.0.tar.gz", hash = "sha256:2199165a1affb666aa24adf0c97436686d0a61bc5fc113c037701fb7c7fceb96"}, + {file = "grpcio-1.60.1-cp310-cp310-linux_armv7l.whl", hash = "sha256:14e8f2c84c0832773fb3958240c69def72357bc11392571f87b2d7b91e0bb092"}, + {file = "grpcio-1.60.1-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:33aed0a431f5befeffd9d346b0fa44b2c01aa4aeae5ea5b2c03d3e25e0071216"}, + {file = "grpcio-1.60.1-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:fead980fbc68512dfd4e0c7b1f5754c2a8e5015a04dea454b9cada54a8423525"}, + {file = "grpcio-1.60.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:082081e6a36b6eb5cf0fd9a897fe777dbb3802176ffd08e3ec6567edd85bc104"}, + {file = "grpcio-1.60.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:55ccb7db5a665079d68b5c7c86359ebd5ebf31a19bc1a91c982fd622f1e31ff2"}, + {file = "grpcio-1.60.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:9b54577032d4f235452f77a83169b6527bf4b77d73aeada97d45b2aaf1bf5ce0"}, + {file = "grpcio-1.60.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7d142bcd604166417929b071cd396aa13c565749a4c840d6c702727a59d835eb"}, + {file = "grpcio-1.60.1-cp310-cp310-win32.whl", hash = "sha256:2a6087f234cb570008a6041c8ffd1b7d657b397fdd6d26e83d72283dae3527b1"}, + {file = "grpcio-1.60.1-cp310-cp310-win_amd64.whl", hash = "sha256:f2212796593ad1d0235068c79836861f2201fc7137a99aa2fea7beeb3b101177"}, + {file = "grpcio-1.60.1-cp311-cp311-linux_armv7l.whl", hash = "sha256:79ae0dc785504cb1e1788758c588c711f4e4a0195d70dff53db203c95a0bd303"}, + {file = "grpcio-1.60.1-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:4eec8b8c1c2c9b7125508ff7c89d5701bf933c99d3910e446ed531cd16ad5d87"}, + {file = "grpcio-1.60.1-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:8c9554ca8e26241dabe7951aa1fa03a1ba0856688ecd7e7bdbdd286ebc272e4c"}, + {file = "grpcio-1.60.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:91422ba785a8e7a18725b1dc40fbd88f08a5bb4c7f1b3e8739cab24b04fa8a03"}, + {file = "grpcio-1.60.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cba6209c96828711cb7c8fcb45ecef8c8859238baf15119daa1bef0f6c84bfe7"}, + {file = "grpcio-1.60.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c71be3f86d67d8d1311c6076a4ba3b75ba5703c0b856b4e691c9097f9b1e8bd2"}, + {file = "grpcio-1.60.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:af5ef6cfaf0d023c00002ba25d0751e5995fa0e4c9eec6cd263c30352662cbce"}, + {file = "grpcio-1.60.1-cp311-cp311-win32.whl", hash = "sha256:a09506eb48fa5493c58f946c46754ef22f3ec0df64f2b5149373ff31fb67f3dd"}, + {file = "grpcio-1.60.1-cp311-cp311-win_amd64.whl", hash = "sha256:49c9b6a510e3ed8df5f6f4f3c34d7fbf2d2cae048ee90a45cd7415abab72912c"}, + {file = "grpcio-1.60.1-cp312-cp312-linux_armv7l.whl", hash = "sha256:b58b855d0071575ea9c7bc0d84a06d2edfbfccec52e9657864386381a7ce1ae9"}, + {file = "grpcio-1.60.1-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:a731ac5cffc34dac62053e0da90f0c0b8560396a19f69d9703e88240c8f05858"}, + {file = "grpcio-1.60.1-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:cf77f8cf2a651fbd869fbdcb4a1931464189cd210abc4cfad357f1cacc8642a6"}, + {file = "grpcio-1.60.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c557e94e91a983e5b1e9c60076a8fd79fea1e7e06848eb2e48d0ccfb30f6e073"}, + {file = "grpcio-1.60.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:069fe2aeee02dfd2135d562d0663fe70fbb69d5eed6eb3389042a7e963b54de8"}, + {file = "grpcio-1.60.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:cb0af13433dbbd1c806e671d81ec75bd324af6ef75171fd7815ca3074fe32bfe"}, + {file = "grpcio-1.60.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2f44c32aef186bbba254129cea1df08a20be414144ac3bdf0e84b24e3f3b2e05"}, + {file = "grpcio-1.60.1-cp312-cp312-win32.whl", hash = "sha256:a212e5dea1a4182e40cd3e4067ee46be9d10418092ce3627475e995cca95de21"}, + {file = "grpcio-1.60.1-cp312-cp312-win_amd64.whl", hash = "sha256:6e490fa5f7f5326222cb9f0b78f207a2b218a14edf39602e083d5f617354306f"}, + {file = "grpcio-1.60.1-cp37-cp37m-linux_armv7l.whl", hash = "sha256:4216e67ad9a4769117433814956031cb300f85edc855252a645a9a724b3b6594"}, + {file = "grpcio-1.60.1-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:73e14acd3d4247169955fae8fb103a2b900cfad21d0c35f0dcd0fdd54cd60367"}, + {file = "grpcio-1.60.1-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:6ecf21d20d02d1733e9c820fb5c114c749d888704a7ec824b545c12e78734d1c"}, + {file = "grpcio-1.60.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:33bdea30dcfd4f87b045d404388469eb48a48c33a6195a043d116ed1b9a0196c"}, + {file = "grpcio-1.60.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53b69e79d00f78c81eecfb38f4516080dc7f36a198b6b37b928f1c13b3c063e9"}, + {file = "grpcio-1.60.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:39aa848794b887120b1d35b1b994e445cc028ff602ef267f87c38122c1add50d"}, + {file = "grpcio-1.60.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:72153a0d2e425f45b884540a61c6639436ddafa1829a42056aa5764b84108b8e"}, + {file = "grpcio-1.60.1-cp37-cp37m-win_amd64.whl", hash = "sha256:50d56280b482875d1f9128ce596e59031a226a8b84bec88cb2bf76c289f5d0de"}, + {file = "grpcio-1.60.1-cp38-cp38-linux_armv7l.whl", hash = "sha256:6d140bdeb26cad8b93c1455fa00573c05592793c32053d6e0016ce05ba267549"}, + {file = "grpcio-1.60.1-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:bc808924470643b82b14fe121923c30ec211d8c693e747eba8a7414bc4351a23"}, + {file = "grpcio-1.60.1-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:70c83bb530572917be20c21f3b6be92cd86b9aecb44b0c18b1d3b2cc3ae47df0"}, + {file = "grpcio-1.60.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9b106bc52e7f28170e624ba61cc7dc6829566e535a6ec68528f8e1afbed1c41f"}, + {file = "grpcio-1.60.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:30e980cd6db1088c144b92fe376747328d5554bc7960ce583ec7b7d81cd47287"}, + {file = "grpcio-1.60.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:0c5807e9152eff15f1d48f6b9ad3749196f79a4a050469d99eecb679be592acc"}, + {file = "grpcio-1.60.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f1c3dc536b3ee124e8b24feb7533e5c70b9f2ef833e3b2e5513b2897fd46763a"}, + {file = "grpcio-1.60.1-cp38-cp38-win32.whl", hash = "sha256:d7404cebcdb11bb5bd40bf94131faf7e9a7c10a6c60358580fe83913f360f929"}, + {file = "grpcio-1.60.1-cp38-cp38-win_amd64.whl", hash = "sha256:c8754c75f55781515a3005063d9a05878b2cfb3cb7e41d5401ad0cf19de14872"}, + {file = "grpcio-1.60.1-cp39-cp39-linux_armv7l.whl", hash = "sha256:0250a7a70b14000fa311de04b169cc7480be6c1a769b190769d347939d3232a8"}, + {file = "grpcio-1.60.1-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:660fc6b9c2a9ea3bb2a7e64ba878c98339abaf1811edca904ac85e9e662f1d73"}, + {file = "grpcio-1.60.1-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:76eaaba891083fcbe167aa0f03363311a9f12da975b025d30e94b93ac7a765fc"}, + {file = "grpcio-1.60.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5d97c65ea7e097056f3d1ead77040ebc236feaf7f71489383d20f3b4c28412a"}, + {file = "grpcio-1.60.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb2a2911b028f01c8c64d126f6b632fcd8a9ac975aa1b3855766c94e4107180"}, + {file = "grpcio-1.60.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:5a1ebbae7e2214f51b1f23b57bf98eeed2cf1ba84e4d523c48c36d5b2f8829ff"}, + {file = "grpcio-1.60.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9a66f4d2a005bc78e61d805ed95dedfcb35efa84b7bba0403c6d60d13a3de2d6"}, + {file = "grpcio-1.60.1-cp39-cp39-win32.whl", hash = "sha256:8d488fbdbf04283f0d20742b64968d44825617aa6717b07c006168ed16488804"}, + {file = "grpcio-1.60.1-cp39-cp39-win_amd64.whl", hash = "sha256:61b7199cd2a55e62e45bfb629a35b71fc2c0cb88f686a047f25b1112d3810904"}, + {file = "grpcio-1.60.1.tar.gz", hash = "sha256:dd1d3a8d1d2e50ad9b59e10aa7f07c7d1be2b367f3f2d33c5fade96ed5460962"}, ] [package.extras] -protobuf = ["grpcio-tools (>=1.60.0)"] +protobuf = ["grpcio-tools (>=1.60.1)"] [[package]] name = "grpcio-status" -version = "1.60.0" +version = "1.60.1" description = "Status proto mapping for gRPC" optional = false python-versions = ">=3.6" files = [ - {file = "grpcio-status-1.60.0.tar.gz", hash = "sha256:f10e0b6db3adc0fdc244b71962814ee982996ef06186446b5695b9fa635aa1ab"}, - {file = "grpcio_status-1.60.0-py3-none-any.whl", hash = "sha256:7d383fa36e59c1e61d380d91350badd4d12ac56e4de2c2b831b050362c3c572e"}, + {file = "grpcio-status-1.60.1.tar.gz", hash = "sha256:61b5aab8989498e8aa142c20b88829ea5d90d18c18c853b9f9e6d407d37bf8b4"}, + {file = "grpcio_status-1.60.1-py3-none-any.whl", hash = "sha256:3034fdb239185b6e0f3169d08c268c4507481e4b8a434c21311a03d9eb5889a0"}, ] [package.dependencies] googleapis-common-protos = ">=1.5.5" -grpcio = ">=1.60.0" +grpcio = ">=1.60.1" protobuf = ">=4.21.6" [[package]] @@ -1660,13 +1716,13 @@ files = [ [[package]] name = "importlib-metadata" -version = "7.0.0" +version = "7.0.1" description = "Read metadata from Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "importlib_metadata-7.0.0-py3-none-any.whl", hash = "sha256:d97503976bb81f40a193d41ee6570868479c69d5068651eb039c40d850c59d67"}, - {file = "importlib_metadata-7.0.0.tar.gz", hash = "sha256:7fc841f8b8332803464e5dc1c63a2e59121f46ca186c0e2e182e80bf8c1319f7"}, + {file = "importlib_metadata-7.0.1-py3-none-any.whl", hash = "sha256:4805911c3a4ec7c3966410053e9ec6a1fecd629117df5adee56dfc9432a1081e"}, + {file = "importlib_metadata-7.0.1.tar.gz", hash = "sha256:f238736bb06590ae52ac1fab06a3a9ef1d8dce2b7a35b5ab329371d6c8f5d2cc"}, ] [package.dependencies] @@ -1880,13 +1936,13 @@ test = ["coverage", "ipykernel (>=6.14)", "mypy", "paramiko", "pre-commit", "pyt [[package]] name = "jupyter-core" -version = "5.5.1" +version = "5.7.1" description = "Jupyter core package. A base package on which Jupyter projects rely." optional = false python-versions = ">=3.8" files = [ - {file = "jupyter_core-5.5.1-py3-none-any.whl", hash = "sha256:220dfb00c45f0d780ce132bb7976b58263f81a3ada6e90a9b6823785a424f739"}, - {file = "jupyter_core-5.5.1.tar.gz", hash = "sha256:1553311a97ccd12936037f36b9ab4d6ae8ceea6ad2d5c90d94a909e752178e40"}, + {file = "jupyter_core-5.7.1-py3-none-any.whl", hash = "sha256:c65c82126453a723a2804aa52409930434598fd9d35091d63dfb919d2b765bb7"}, + {file = "jupyter_core-5.7.1.tar.gz", hash = "sha256:de61a9d7fc71240f688b2fb5ab659fbb56979458dc66a71decd098e03c79e218"}, ] [package.dependencies] @@ -2095,46 +2151,43 @@ files = [ [[package]] name = "llvmlite" -version = "0.41.1" +version = "0.42.0" description = "lightweight wrapper around basic LLVM functionality" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "llvmlite-0.41.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c1e1029d47ee66d3a0c4d6088641882f75b93db82bd0e6178f7bd744ebce42b9"}, - {file = "llvmlite-0.41.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:150d0bc275a8ac664a705135e639178883293cf08c1a38de3bbaa2f693a0a867"}, - {file = "llvmlite-0.41.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1eee5cf17ec2b4198b509272cf300ee6577229d237c98cc6e63861b08463ddc6"}, - {file = "llvmlite-0.41.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0dd0338da625346538f1173a17cabf21d1e315cf387ca21b294ff209d176e244"}, - {file = "llvmlite-0.41.1-cp310-cp310-win32.whl", hash = "sha256:fa1469901a2e100c17eb8fe2678e34bd4255a3576d1a543421356e9c14d6e2ae"}, - {file = "llvmlite-0.41.1-cp310-cp310-win_amd64.whl", hash = "sha256:2b76acee82ea0e9304be6be9d4b3840208d050ea0dcad75b1635fa06e949a0ae"}, - {file = "llvmlite-0.41.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:210e458723436b2469d61b54b453474e09e12a94453c97ea3fbb0742ba5a83d8"}, - {file = "llvmlite-0.41.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:855f280e781d49e0640aef4c4af586831ade8f1a6c4df483fb901cbe1a48d127"}, - {file = "llvmlite-0.41.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b67340c62c93a11fae482910dc29163a50dff3dfa88bc874872d28ee604a83be"}, - {file = "llvmlite-0.41.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2181bb63ef3c607e6403813421b46982c3ac6bfc1f11fa16a13eaafb46f578e6"}, - {file = "llvmlite-0.41.1-cp311-cp311-win_amd64.whl", hash = "sha256:9564c19b31a0434f01d2025b06b44c7ed422f51e719ab5d24ff03b7560066c9a"}, - {file = "llvmlite-0.41.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5940bc901fb0325970415dbede82c0b7f3e35c2d5fd1d5e0047134c2c46b3281"}, - {file = "llvmlite-0.41.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8b0a9a47c28f67a269bb62f6256e63cef28d3c5f13cbae4fab587c3ad506778b"}, - {file = "llvmlite-0.41.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8afdfa6da33f0b4226af8e64cfc2b28986e005528fbf944d0a24a72acfc9432"}, - {file = "llvmlite-0.41.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8454c1133ef701e8c050a59edd85d238ee18bb9a0eb95faf2fca8b909ee3c89a"}, - {file = "llvmlite-0.41.1-cp38-cp38-win32.whl", hash = "sha256:2d92c51e6e9394d503033ffe3292f5bef1566ab73029ec853861f60ad5c925d0"}, - {file = "llvmlite-0.41.1-cp38-cp38-win_amd64.whl", hash = "sha256:df75594e5a4702b032684d5481db3af990b69c249ccb1d32687b8501f0689432"}, - {file = "llvmlite-0.41.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:04725975e5b2af416d685ea0769f4ecc33f97be541e301054c9f741003085802"}, - {file = "llvmlite-0.41.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:bf14aa0eb22b58c231243dccf7e7f42f7beec48970f2549b3a6acc737d1a4ba4"}, - {file = "llvmlite-0.41.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:92c32356f669e036eb01016e883b22add883c60739bc1ebee3a1cc0249a50828"}, - {file = "llvmlite-0.41.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24091a6b31242bcdd56ae2dbea40007f462260bc9bdf947953acc39dffd54f8f"}, - {file = "llvmlite-0.41.1-cp39-cp39-win32.whl", hash = "sha256:880cb57ca49e862e1cd077104375b9d1dfdc0622596dfa22105f470d7bacb309"}, - {file = "llvmlite-0.41.1-cp39-cp39-win_amd64.whl", hash = "sha256:92f093986ab92e71c9ffe334c002f96defc7986efda18397d0f08534f3ebdc4d"}, - {file = "llvmlite-0.41.1.tar.gz", hash = "sha256:f19f767a018e6ec89608e1f6b13348fa2fcde657151137cb64e56d48598a92db"}, + {file = "llvmlite-0.42.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3366938e1bf63d26c34fbfb4c8e8d2ded57d11e0567d5bb243d89aab1eb56098"}, + {file = "llvmlite-0.42.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c35da49666a21185d21b551fc3caf46a935d54d66969d32d72af109b5e7d2b6f"}, + {file = "llvmlite-0.42.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70f44ccc3c6220bd23e0ba698a63ec2a7d3205da0d848804807f37fc243e3f77"}, + {file = "llvmlite-0.42.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:763f8d8717a9073b9e0246998de89929071d15b47f254c10eef2310b9aac033d"}, + {file = "llvmlite-0.42.0-cp310-cp310-win_amd64.whl", hash = "sha256:8d90edf400b4ceb3a0e776b6c6e4656d05c7187c439587e06f86afceb66d2be5"}, + {file = "llvmlite-0.42.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ae511caed28beaf1252dbaf5f40e663f533b79ceb408c874c01754cafabb9cbf"}, + {file = "llvmlite-0.42.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:81e674c2fe85576e6c4474e8c7e7aba7901ac0196e864fe7985492b737dbab65"}, + {file = "llvmlite-0.42.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb3975787f13eb97629052edb5017f6c170eebc1c14a0433e8089e5db43bcce6"}, + {file = "llvmlite-0.42.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c5bece0cdf77f22379f19b1959ccd7aee518afa4afbd3656c6365865f84903f9"}, + {file = "llvmlite-0.42.0-cp311-cp311-win_amd64.whl", hash = "sha256:7e0c4c11c8c2aa9b0701f91b799cb9134a6a6de51444eff5a9087fc7c1384275"}, + {file = "llvmlite-0.42.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:08fa9ab02b0d0179c688a4216b8939138266519aaa0aa94f1195a8542faedb56"}, + {file = "llvmlite-0.42.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b2fce7d355068494d1e42202c7aff25d50c462584233013eb4470c33b995e3ee"}, + {file = "llvmlite-0.42.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebe66a86dc44634b59a3bc860c7b20d26d9aaffcd30364ebe8ba79161a9121f4"}, + {file = "llvmlite-0.42.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d47494552559e00d81bfb836cf1c4d5a5062e54102cc5767d5aa1e77ccd2505c"}, + {file = "llvmlite-0.42.0-cp312-cp312-win_amd64.whl", hash = "sha256:05cb7e9b6ce69165ce4d1b994fbdedca0c62492e537b0cc86141b6e2c78d5888"}, + {file = "llvmlite-0.42.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bdd3888544538a94d7ec99e7c62a0cdd8833609c85f0c23fcb6c5c591aec60ad"}, + {file = "llvmlite-0.42.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d0936c2067a67fb8816c908d5457d63eba3e2b17e515c5fe00e5ee2bace06040"}, + {file = "llvmlite-0.42.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a78ab89f1924fc11482209f6799a7a3fc74ddc80425a7a3e0e8174af0e9e2301"}, + {file = "llvmlite-0.42.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7599b65c7af7abbc978dbf345712c60fd596aa5670496561cc10e8a71cebfb2"}, + {file = "llvmlite-0.42.0-cp39-cp39-win_amd64.whl", hash = "sha256:43d65cc4e206c2e902c1004dd5418417c4efa6c1d04df05c6c5675a27e8ca90e"}, + {file = "llvmlite-0.42.0.tar.gz", hash = "sha256:f92b09243c0cc3f457da8b983f67bd8e1295d0f5b3746c7a1861d7a99403854a"}, ] [[package]] name = "markdown" -version = "3.5.1" +version = "3.5.2" description = "Python implementation of John Gruber's Markdown." optional = false python-versions = ">=3.8" files = [ - {file = "Markdown-3.5.1-py3-none-any.whl", hash = "sha256:5874b47d4ee3f0b14d764324d2c94c03ea66bee56f2d929da9f2508d65e722dc"}, - {file = "Markdown-3.5.1.tar.gz", hash = "sha256:b65d7beb248dc22f2e8a31fb706d93798093c308dc1aba295aedeb9d41a813bd"}, + {file = "Markdown-3.5.2-py3-none-any.whl", hash = "sha256:d43323865d89fc0cb9b20c75fc8ad313af307cc087e84b657d9eec768eddeadd"}, + {file = "Markdown-3.5.2.tar.gz", hash = "sha256:e1ac7b3dc550ee80e602e71c1d168002f062e49f1b11e26a36264dafd4df2ef8"}, ] [package.dependencies] @@ -2146,71 +2199,71 @@ testing = ["coverage", "pyyaml"] [[package]] name = "markupsafe" -version = "2.1.3" +version = "2.1.5" description = "Safely add untrusted strings to HTML/XML markup." optional = false python-versions = ">=3.7" files = [ - {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-win32.whl", hash = "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-win32.whl", hash = "sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007"}, - {file = "MarkupSafe-2.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-win32.whl", hash = "sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-win_amd64.whl", hash = "sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-win32.whl", hash = "sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-win_amd64.whl", hash = "sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-win32.whl", hash = "sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-win_amd64.whl", hash = "sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba"}, - {file = "MarkupSafe-2.1.3.tar.gz", hash = "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-win32.whl", hash = "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-win_amd64.whl", hash = "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-win32.whl", hash = "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-win_amd64.whl", hash = "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-win32.whl", hash = "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-win_amd64.whl", hash = "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-win32.whl", hash = "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-win_amd64.whl", hash = "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-win32.whl", hash = "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-win_amd64.whl", hash = "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-win32.whl", hash = "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-win_amd64.whl", hash = "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5"}, + {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, ] [[package]] @@ -2440,13 +2493,13 @@ test = ["flaky", "ipykernel (>=6.19.3)", "ipython", "ipywidgets", "nbconvert (>= [[package]] name = "nbconvert" -version = "7.13.0" +version = "7.16.0" description = "Converting Jupyter Notebooks" optional = false python-versions = ">=3.8" files = [ - {file = "nbconvert-7.13.0-py3-none-any.whl", hash = "sha256:22521cfcc10ba5755e44acb6a70d2bd8a891ce7aed6746481e10cd548b169e19"}, - {file = "nbconvert-7.13.0.tar.gz", hash = "sha256:c6f61c86fca5b28bd17f4f9a308248e59fa2b54919e1589f6cc3575c5dfec2bd"}, + {file = "nbconvert-7.16.0-py3-none-any.whl", hash = "sha256:ad3dc865ea6e2768d31b7eb6c7ab3be014927216a5ece3ef276748dd809054c7"}, + {file = "nbconvert-7.16.0.tar.gz", hash = "sha256:813e6553796362489ae572e39ba1bff978536192fb518e10826b0e8cadf03ec8"}, ] [package.dependencies] @@ -2536,81 +2589,222 @@ test = ["pytest (>=7.2)", "pytest-cov (>=4.0)"] [[package]] name = "numba" -version = "0.58.1" +version = "0.59.0" description = "compiling Python code using LLVM" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "numba-0.58.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:07f2fa7e7144aa6f275f27260e73ce0d808d3c62b30cff8906ad1dec12d87bbe"}, - {file = "numba-0.58.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7bf1ddd4f7b9c2306de0384bf3854cac3edd7b4d8dffae2ec1b925e4c436233f"}, - {file = "numba-0.58.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:bc2d904d0319d7a5857bd65062340bed627f5bfe9ae4a495aef342f072880d50"}, - {file = "numba-0.58.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4e79b6cc0d2bf064a955934a2e02bf676bc7995ab2db929dbbc62e4c16551be6"}, - {file = "numba-0.58.1-cp310-cp310-win_amd64.whl", hash = "sha256:81fe5b51532478149b5081311b0fd4206959174e660c372b94ed5364cfb37c82"}, - {file = "numba-0.58.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bcecd3fb9df36554b342140a4d77d938a549be635d64caf8bd9ef6c47a47f8aa"}, - {file = "numba-0.58.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a1eaa744f518bbd60e1f7ccddfb8002b3d06bd865b94a5d7eac25028efe0e0ff"}, - {file = "numba-0.58.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:bf68df9c307fb0aa81cacd33faccd6e419496fdc621e83f1efce35cdc5e79cac"}, - {file = "numba-0.58.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:55a01e1881120e86d54efdff1be08381886fe9f04fc3006af309c602a72bc44d"}, - {file = "numba-0.58.1-cp311-cp311-win_amd64.whl", hash = "sha256:811305d5dc40ae43c3ace5b192c670c358a89a4d2ae4f86d1665003798ea7a1a"}, - {file = "numba-0.58.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ea5bfcf7d641d351c6a80e8e1826eb4a145d619870016eeaf20bbd71ef5caa22"}, - {file = "numba-0.58.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e63d6aacaae1ba4ef3695f1c2122b30fa3d8ba039c8f517784668075856d79e2"}, - {file = "numba-0.58.1-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:6fe7a9d8e3bd996fbe5eac0683227ccef26cba98dae6e5cee2c1894d4b9f16c1"}, - {file = "numba-0.58.1-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:898af055b03f09d33a587e9425500e5be84fc90cd2f80b3fb71c6a4a17a7e354"}, - {file = "numba-0.58.1-cp38-cp38-win_amd64.whl", hash = "sha256:d3e2fe81fe9a59fcd99cc572002101119059d64d31eb6324995ee8b0f144a306"}, - {file = "numba-0.58.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5c765aef472a9406a97ea9782116335ad4f9ef5c9f93fc05fd44aab0db486954"}, - {file = "numba-0.58.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9e9356e943617f5e35a74bf56ff6e7cc83e6b1865d5e13cee535d79bf2cae954"}, - {file = "numba-0.58.1-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:240e7a1ae80eb6b14061dc91263b99dc8d6af9ea45d310751b780888097c1aaa"}, - {file = "numba-0.58.1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:45698b995914003f890ad839cfc909eeb9c74921849c712a05405d1a79c50f68"}, - {file = "numba-0.58.1-cp39-cp39-win_amd64.whl", hash = "sha256:bd3dda77955be03ff366eebbfdb39919ce7c2620d86c906203bed92124989032"}, - {file = "numba-0.58.1.tar.gz", hash = "sha256:487ded0633efccd9ca3a46364b40006dbdaca0f95e99b8b83e778d1195ebcbaa"}, + {file = "numba-0.59.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8d061d800473fb8fef76a455221f4ad649a53f5e0f96e3f6c8b8553ee6fa98fa"}, + {file = "numba-0.59.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c086a434e7d3891ce5dfd3d1e7ee8102ac1e733962098578b507864120559ceb"}, + {file = "numba-0.59.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9e20736bf62e61f8353fb71b0d3a1efba636c7a303d511600fc57648b55823ed"}, + {file = "numba-0.59.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e86e6786aec31d2002122199486e10bbc0dc40f78d76364cded375912b13614c"}, + {file = "numba-0.59.0-cp310-cp310-win_amd64.whl", hash = "sha256:0307ee91b24500bb7e64d8a109848baf3a3905df48ce142b8ac60aaa406a0400"}, + {file = "numba-0.59.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d540f69a8245fb714419c2209e9af6104e568eb97623adc8943642e61f5d6d8e"}, + {file = "numba-0.59.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1192d6b2906bf3ff72b1d97458724d98860ab86a91abdd4cfd9328432b661e31"}, + {file = "numba-0.59.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:90efb436d3413809fcd15298c6d395cb7d98184350472588356ccf19db9e37c8"}, + {file = "numba-0.59.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:cd3dac45e25d927dcb65d44fb3a973994f5add2b15add13337844afe669dd1ba"}, + {file = "numba-0.59.0-cp311-cp311-win_amd64.whl", hash = "sha256:753dc601a159861808cc3207bad5c17724d3b69552fd22768fddbf302a817a4c"}, + {file = "numba-0.59.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ce62bc0e6dd5264e7ff7f34f41786889fa81a6b860662f824aa7532537a7bee0"}, + {file = "numba-0.59.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8cbef55b73741b5eea2dbaf1b0590b14977ca95a13a07d200b794f8f6833a01c"}, + {file = "numba-0.59.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:70d26ba589f764be45ea8c272caa467dbe882b9676f6749fe6f42678091f5f21"}, + {file = "numba-0.59.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e125f7d69968118c28ec0eed9fbedd75440e64214b8d2eac033c22c04db48492"}, + {file = "numba-0.59.0-cp312-cp312-win_amd64.whl", hash = "sha256:4981659220b61a03c1e557654027d271f56f3087448967a55c79a0e5f926de62"}, + {file = "numba-0.59.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fe4d7562d1eed754a7511ed7ba962067f198f86909741c5c6e18c4f1819b1f47"}, + {file = "numba-0.59.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6feb1504bb432280f900deaf4b1dadcee68812209500ed3f81c375cbceab24dc"}, + {file = "numba-0.59.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:944faad25ee23ea9dda582bfb0189fb9f4fc232359a80ab2a028b94c14ce2b1d"}, + {file = "numba-0.59.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5516a469514bfae52a9d7989db4940653a5cbfac106f44cb9c50133b7ad6224b"}, + {file = "numba-0.59.0-cp39-cp39-win_amd64.whl", hash = "sha256:32bd0a41525ec0b1b853da244808f4e5333867df3c43c30c33f89cf20b9c2b63"}, + {file = "numba-0.59.0.tar.gz", hash = "sha256:12b9b064a3e4ad00e2371fc5212ef0396c80f41caec9b5ec391c8b04b6eaf2a8"}, ] [package.dependencies] -llvmlite = "==0.41.*" +llvmlite = "==0.42.*" numpy = ">=1.22,<1.27" [[package]] name = "numpy" -version = "1.26.2" +version = "1.26.4" description = "Fundamental package for array computing in Python" optional = false python-versions = ">=3.9" files = [ - {file = "numpy-1.26.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3703fc9258a4a122d17043e57b35e5ef1c5a5837c3db8be396c82e04c1cf9b0f"}, - {file = "numpy-1.26.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cc392fdcbd21d4be6ae1bb4475a03ce3b025cd49a9be5345d76d7585aea69440"}, - {file = "numpy-1.26.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:36340109af8da8805d8851ef1d74761b3b88e81a9bd80b290bbfed61bd2b4f75"}, - {file = "numpy-1.26.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bcc008217145b3d77abd3e4d5ef586e3bdfba8fe17940769f8aa09b99e856c00"}, - {file = "numpy-1.26.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3ced40d4e9e18242f70dd02d739e44698df3dcb010d31f495ff00a31ef6014fe"}, - {file = "numpy-1.26.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b272d4cecc32c9e19911891446b72e986157e6a1809b7b56518b4f3755267523"}, - {file = "numpy-1.26.2-cp310-cp310-win32.whl", hash = "sha256:22f8fc02fdbc829e7a8c578dd8d2e15a9074b630d4da29cda483337e300e3ee9"}, - {file = "numpy-1.26.2-cp310-cp310-win_amd64.whl", hash = "sha256:26c9d33f8e8b846d5a65dd068c14e04018d05533b348d9eaeef6c1bd787f9919"}, - {file = "numpy-1.26.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b96e7b9c624ef3ae2ae0e04fa9b460f6b9f17ad8b4bec6d7756510f1f6c0c841"}, - {file = "numpy-1.26.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:aa18428111fb9a591d7a9cc1b48150097ba6a7e8299fb56bdf574df650e7d1f1"}, - {file = "numpy-1.26.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:06fa1ed84aa60ea6ef9f91ba57b5ed963c3729534e6e54055fc151fad0423f0a"}, - {file = "numpy-1.26.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96ca5482c3dbdd051bcd1fce8034603d6ebfc125a7bd59f55b40d8f5d246832b"}, - {file = "numpy-1.26.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:854ab91a2906ef29dc3925a064fcd365c7b4da743f84b123002f6139bcb3f8a7"}, - {file = "numpy-1.26.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f43740ab089277d403aa07567be138fc2a89d4d9892d113b76153e0e412409f8"}, - {file = "numpy-1.26.2-cp311-cp311-win32.whl", hash = "sha256:a2bbc29fcb1771cd7b7425f98b05307776a6baf43035d3b80c4b0f29e9545186"}, - {file = "numpy-1.26.2-cp311-cp311-win_amd64.whl", hash = "sha256:2b3fca8a5b00184828d12b073af4d0fc5fdd94b1632c2477526f6bd7842d700d"}, - {file = "numpy-1.26.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a4cd6ed4a339c21f1d1b0fdf13426cb3b284555c27ac2f156dfdaaa7e16bfab0"}, - {file = "numpy-1.26.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5d5244aabd6ed7f312268b9247be47343a654ebea52a60f002dc70c769048e75"}, - {file = "numpy-1.26.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a3cdb4d9c70e6b8c0814239ead47da00934666f668426fc6e94cce869e13fd7"}, - {file = "numpy-1.26.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa317b2325f7aa0a9471663e6093c210cb2ae9c0ad824732b307d2c51983d5b6"}, - {file = "numpy-1.26.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:174a8880739c16c925799c018f3f55b8130c1f7c8e75ab0a6fa9d41cab092fd6"}, - {file = "numpy-1.26.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f79b231bf5c16b1f39c7f4875e1ded36abee1591e98742b05d8a0fb55d8a3eec"}, - {file = "numpy-1.26.2-cp312-cp312-win32.whl", hash = "sha256:4a06263321dfd3598cacb252f51e521a8cb4b6df471bb12a7ee5cbab20ea9167"}, - {file = "numpy-1.26.2-cp312-cp312-win_amd64.whl", hash = "sha256:b04f5dc6b3efdaab541f7857351aac359e6ae3c126e2edb376929bd3b7f92d7e"}, - {file = "numpy-1.26.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4eb8df4bf8d3d90d091e0146f6c28492b0be84da3e409ebef54349f71ed271ef"}, - {file = "numpy-1.26.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1a13860fdcd95de7cf58bd6f8bc5a5ef81c0b0625eb2c9a783948847abbef2c2"}, - {file = "numpy-1.26.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:64308ebc366a8ed63fd0bf426b6a9468060962f1a4339ab1074c228fa6ade8e3"}, - {file = "numpy-1.26.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baf8aab04a2c0e859da118f0b38617e5ee65d75b83795055fb66c0d5e9e9b818"}, - {file = "numpy-1.26.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d73a3abcac238250091b11caef9ad12413dab01669511779bc9b29261dd50210"}, - {file = "numpy-1.26.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b361d369fc7e5e1714cf827b731ca32bff8d411212fccd29ad98ad622449cc36"}, - {file = "numpy-1.26.2-cp39-cp39-win32.whl", hash = "sha256:bd3f0091e845164a20bd5a326860c840fe2af79fa12e0469a12768a3ec578d80"}, - {file = "numpy-1.26.2-cp39-cp39-win_amd64.whl", hash = "sha256:2beef57fb031dcc0dc8fa4fe297a742027b954949cabb52a2a376c144e5e6060"}, - {file = "numpy-1.26.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1cc3d5029a30fb5f06704ad6b23b35e11309491c999838c31f124fee32107c79"}, - {file = "numpy-1.26.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94cc3c222bb9fb5a12e334d0479b97bb2df446fbe622b470928f5284ffca3f8d"}, - {file = "numpy-1.26.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fe6b44fb8fcdf7eda4ef4461b97b3f63c466b27ab151bec2366db8b197387841"}, - {file = "numpy-1.26.2.tar.gz", hash = "sha256:f65738447676ab5777f11e6bbbdb8ce11b785e105f690bc45966574816b6d3ea"}, + {file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"}, + {file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"}, + {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d209d8969599b27ad20994c8e41936ee0964e6da07478d6c35016bc386b66ad4"}, + {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffa75af20b44f8dba823498024771d5ac50620e6915abac414251bd971b4529f"}, + {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:62b8e4b1e28009ef2846b4c7852046736bab361f7aeadeb6a5b89ebec3c7055a"}, + {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a4abb4f9001ad2858e7ac189089c42178fcce737e4169dc61321660f1a96c7d2"}, + {file = "numpy-1.26.4-cp310-cp310-win32.whl", hash = "sha256:bfe25acf8b437eb2a8b2d49d443800a5f18508cd811fea3181723922a8a82b07"}, + {file = "numpy-1.26.4-cp310-cp310-win_amd64.whl", hash = "sha256:b97fe8060236edf3662adfc2c633f56a08ae30560c56310562cb4f95500022d5"}, + {file = "numpy-1.26.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c66707fabe114439db9068ee468c26bbdf909cac0fb58686a42a24de1760c71"}, + {file = "numpy-1.26.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:edd8b5fe47dab091176d21bb6de568acdd906d1887a4584a15a9a96a1dca06ef"}, + {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ab55401287bfec946ced39700c053796e7cc0e3acbef09993a9ad2adba6ca6e"}, + {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:666dbfb6ec68962c033a450943ded891bed2d54e6755e35e5835d63f4f6931d5"}, + {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:96ff0b2ad353d8f990b63294c8986f1ec3cb19d749234014f4e7eb0112ceba5a"}, + {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:60dedbb91afcbfdc9bc0b1f3f402804070deed7392c23eb7a7f07fa857868e8a"}, + {file = "numpy-1.26.4-cp311-cp311-win32.whl", hash = "sha256:1af303d6b2210eb850fcf03064d364652b7120803a0b872f5211f5234b399f20"}, + {file = "numpy-1.26.4-cp311-cp311-win_amd64.whl", hash = "sha256:cd25bcecc4974d09257ffcd1f098ee778f7834c3ad767fe5db785be9a4aa9cb2"}, + {file = "numpy-1.26.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218"}, + {file = "numpy-1.26.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b"}, + {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b"}, + {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed"}, + {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a"}, + {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0"}, + {file = "numpy-1.26.4-cp312-cp312-win32.whl", hash = "sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110"}, + {file = "numpy-1.26.4-cp312-cp312-win_amd64.whl", hash = "sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818"}, + {file = "numpy-1.26.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7349ab0fa0c429c82442a27a9673fc802ffdb7c7775fad780226cb234965e53c"}, + {file = "numpy-1.26.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:52b8b60467cd7dd1e9ed082188b4e6bb35aa5cdd01777621a1658910745b90be"}, + {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5241e0a80d808d70546c697135da2c613f30e28251ff8307eb72ba696945764"}, + {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f870204a840a60da0b12273ef34f7051e98c3b5961b61b0c2c1be6dfd64fbcd3"}, + {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:679b0076f67ecc0138fd2ede3a8fd196dddc2ad3254069bcb9faf9a79b1cebcd"}, + {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:47711010ad8555514b434df65f7d7b076bb8261df1ca9bb78f53d3b2db02e95c"}, + {file = "numpy-1.26.4-cp39-cp39-win32.whl", hash = "sha256:a354325ee03388678242a4d7ebcd08b5c727033fcff3b2f536aea978e15ee9e6"}, + {file = "numpy-1.26.4-cp39-cp39-win_amd64.whl", hash = "sha256:3373d5d70a5fe74a2c1bb6d2cfd9609ecf686d47a2d7b1d37a8f3b6bf6003aea"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:afedb719a9dcfc7eaf2287b839d8198e06dcd4cb5d276a3df279231138e83d30"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95a7476c59002f2f6c590b9b7b998306fba6a5aa646b1e22ddfeaf8f78c3a29c"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7e50d0a0cc3189f9cb0aeb3a6a6af18c16f59f004b866cd2be1c14b36134a4a0"}, + {file = "numpy-1.26.4.tar.gz", hash = "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010"}, +] + +[[package]] +name = "nvidia-cublas-cu12" +version = "12.1.3.1" +description = "CUBLAS native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cublas_cu12-12.1.3.1-py3-none-manylinux1_x86_64.whl", hash = "sha256:ee53ccca76a6fc08fb9701aa95b6ceb242cdaab118c3bb152af4e579af792728"}, + {file = "nvidia_cublas_cu12-12.1.3.1-py3-none-win_amd64.whl", hash = "sha256:2b964d60e8cf11b5e1073d179d85fa340c120e99b3067558f3cf98dd69d02906"}, +] + +[[package]] +name = "nvidia-cuda-cupti-cu12" +version = "12.1.105" +description = "CUDA profiling tools runtime libs." +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cuda_cupti_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:e54fde3983165c624cb79254ae9818a456eb6e87a7fd4d56a2352c24ee542d7e"}, + {file = "nvidia_cuda_cupti_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:bea8236d13a0ac7190bd2919c3e8e6ce1e402104276e6f9694479e48bb0eb2a4"}, +] + +[[package]] +name = "nvidia-cuda-nvrtc-cu12" +version = "12.1.105" +description = "NVRTC native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:339b385f50c309763ca65456ec75e17bbefcbbf2893f462cb8b90584cd27a1c2"}, + {file = "nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:0a98a522d9ff138b96c010a65e145dc1b4850e9ecb75a0172371793752fd46ed"}, +] + +[[package]] +name = "nvidia-cuda-runtime-cu12" +version = "12.1.105" +description = "CUDA Runtime native Libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cuda_runtime_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:6e258468ddf5796e25f1dc591a31029fa317d97a0a94ed93468fc86301d61e40"}, + {file = "nvidia_cuda_runtime_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:dfb46ef84d73fababab44cf03e3b83f80700d27ca300e537f85f636fac474344"}, +] + +[[package]] +name = "nvidia-cudnn-cu12" +version = "8.9.2.26" +description = "cuDNN runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cudnn_cu12-8.9.2.26-py3-none-manylinux1_x86_64.whl", hash = "sha256:5ccb288774fdfb07a7e7025ffec286971c06d8d7b4fb162525334616d7629ff9"}, +] + +[package.dependencies] +nvidia-cublas-cu12 = "*" + +[[package]] +name = "nvidia-cufft-cu12" +version = "11.0.2.54" +description = "CUFFT native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cufft_cu12-11.0.2.54-py3-none-manylinux1_x86_64.whl", hash = "sha256:794e3948a1aa71fd817c3775866943936774d1c14e7628c74f6f7417224cdf56"}, + {file = "nvidia_cufft_cu12-11.0.2.54-py3-none-win_amd64.whl", hash = "sha256:d9ac353f78ff89951da4af698f80870b1534ed69993f10a4cf1d96f21357e253"}, +] + +[[package]] +name = "nvidia-curand-cu12" +version = "10.3.2.106" +description = "CURAND native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_curand_cu12-10.3.2.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:9d264c5036dde4e64f1de8c50ae753237c12e0b1348738169cd0f8a536c0e1e0"}, + {file = "nvidia_curand_cu12-10.3.2.106-py3-none-win_amd64.whl", hash = "sha256:75b6b0c574c0037839121317e17fd01f8a69fd2ef8e25853d826fec30bdba74a"}, +] + +[[package]] +name = "nvidia-cusolver-cu12" +version = "11.4.5.107" +description = "CUDA solver native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl", hash = "sha256:8a7ec542f0412294b15072fa7dab71d31334014a69f953004ea7a118206fe0dd"}, + {file = "nvidia_cusolver_cu12-11.4.5.107-py3-none-win_amd64.whl", hash = "sha256:74e0c3a24c78612192a74fcd90dd117f1cf21dea4822e66d89e8ea80e3cd2da5"}, +] + +[package.dependencies] +nvidia-cublas-cu12 = "*" +nvidia-cusparse-cu12 = "*" +nvidia-nvjitlink-cu12 = "*" + +[[package]] +name = "nvidia-cusparse-cu12" +version = "12.1.0.106" +description = "CUSPARSE native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:f3b50f42cf363f86ab21f720998517a659a48131e8d538dc02f8768237bd884c"}, + {file = "nvidia_cusparse_cu12-12.1.0.106-py3-none-win_amd64.whl", hash = "sha256:b798237e81b9719373e8fae8d4f091b70a0cf09d9d85c95a557e11df2d8e9a5a"}, +] + +[package.dependencies] +nvidia-nvjitlink-cu12 = "*" + +[[package]] +name = "nvidia-nccl-cu12" +version = "2.19.3" +description = "NVIDIA Collective Communication Library (NCCL) Runtime" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_nccl_cu12-2.19.3-py3-none-manylinux1_x86_64.whl", hash = "sha256:a9734707a2c96443331c1e48c717024aa6678a0e2a4cb66b2c364d18cee6b48d"}, +] + +[[package]] +name = "nvidia-nvjitlink-cu12" +version = "12.3.101" +description = "Nvidia JIT LTO Library" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_nvjitlink_cu12-12.3.101-py3-none-manylinux1_x86_64.whl", hash = "sha256:64335a8088e2b9d196ae8665430bc6a2b7e6ef2eb877a9c735c804bd4ff6467c"}, + {file = "nvidia_nvjitlink_cu12-12.3.101-py3-none-win_amd64.whl", hash = "sha256:1b2e317e437433753530792f13eece58f0aec21a2b05903be7bffe58a606cbd1"}, +] + +[[package]] +name = "nvidia-nvtx-cu12" +version = "12.1.105" +description = "NVIDIA Tools Extension" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_nvtx_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:dc21cf308ca5691e7c04d962e213f8a4aa9bbfa23d95412f452254c2caeb09e5"}, + {file = "nvidia_nvtx_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:65f4d98982b31b60026e0e6de73fbdfc09d08a96f4656dd3665ca616a11e1e82"}, ] [[package]] @@ -2649,42 +2843,46 @@ tests = ["pytest", "pytest-cov", "pytest-pep8"] [[package]] name = "osqp" -version = "0.6.3" +version = "0.6.5" description = "OSQP: The Operator Splitting QP Solver" optional = false python-versions = "*" files = [ - {file = "osqp-0.6.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e6b7d923c836f1d07115057e595245ccc1694ecae730a1affda78fc6f3c8d239"}, - {file = "osqp-0.6.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e1dfda08c38c3521012740a73ef782f97dfc54a41deae4b0bc4afd18d0e74da0"}, - {file = "osqp-0.6.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7eafa3f3e82dd36c52f3f4ef19a95142405c807c272c4b53c5971c53535d7804"}, - {file = "osqp-0.6.3-cp310-cp310-win_amd64.whl", hash = "sha256:3cbb6efdaffb7387dc0037dfe3259d4803e5ad7217e6f20fb605c92953214b9d"}, - {file = "osqp-0.6.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1b2049b2c42565dcaa63ddca1c4028b1fb20aab141453f5d77e8ff5b1a99a2cf"}, - {file = "osqp-0.6.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:146b89f2cfbf59eaeb2c47e3a312f2034138df78d80ce052364810dc0ef70fc4"}, - {file = "osqp-0.6.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0084e3d733c75687d68bc133bc380ce471dfe6f7724af2718a43491782eec8d6"}, - {file = "osqp-0.6.3-cp311-cp311-win_amd64.whl", hash = "sha256:1b573fe1cd0e82239a279c58817c1d365187ef862e928b2b9c828c3c516ad3c2"}, - {file = "osqp-0.6.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:6c3951ef505177b858c6cd34de980346014cae3d2234c93db960b12c5885f9a2"}, - {file = "osqp-0.6.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc18f87c9549032c163ce590a5e32079df94ee656c8fb357ba607aa9d78fab81"}, - {file = "osqp-0.6.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c07b1a4b538aab629b0fae69f644b7e76f81f94d65230014d482e296dacd046b"}, - {file = "osqp-0.6.3-cp36-cp36m-win_amd64.whl", hash = "sha256:60abec3593870990b16f00bd5017096a7091fb00b68d0db3383fc048ca8e55c9"}, - {file = "osqp-0.6.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b73bdd9589901841af83c5ed6a4092b4fac5a0beff9e32682d8526d1f16a728c"}, - {file = "osqp-0.6.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71d9f611823af4a8b241c86805920e5382cd65c7f94fd3615b4eef999ed94c7c"}, - {file = "osqp-0.6.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:30fbc3b3c028c06a6c5f1e66be7b7106ad48a29e0dc5bd82393f82dd68235ef8"}, - {file = "osqp-0.6.3-cp37-cp37m-win_amd64.whl", hash = "sha256:fe57e4bde071b388518ecb068f26319506dd9cb107363d3d80c12d2e59fc1e81"}, - {file = "osqp-0.6.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:41f304d1d7f91af07d8f0b01e5af29ec3bb8824f0102c7fd8b13b497be120da4"}, - {file = "osqp-0.6.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea7d8c92bcdf4fef98d777f13d39060d425ef2e8778ed487c96a6fa10848cdea"}, - {file = "osqp-0.6.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f3a3c6d2708868e5e3fe2da300d6523cbf68a3d8734ce9c5043db37391969f5"}, - {file = "osqp-0.6.3-cp38-cp38-win_amd64.whl", hash = "sha256:1c548a0b3691850e7e22f3624a128d8af33416d70a9b5976a47d4d832028dcd8"}, - {file = "osqp-0.6.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:387e7abd737dfe32c9ec00ad74af25328cdd0d0f634d79530655c040a5cb9590"}, - {file = "osqp-0.6.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e1445e10a94e01698e13c87a7debf6ac1a15f3acd1f8f6340cb1ad945db4732b"}, - {file = "osqp-0.6.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0441c10f7fe5f46692a9b44a57138977bb112ae3f8127151671968c5d9ec5dbb"}, - {file = "osqp-0.6.3-cp39-cp39-win_amd64.whl", hash = "sha256:b15e65a307fbbabf60248bb9bc204e61d5d4ae64e00427a69e2dad9622f4c29d"}, - {file = "osqp-0.6.3.tar.gz", hash = "sha256:03e460e683ec2ce0f839353ddfa3c4c8ffa509ab8cf6a2b2afbb586fa453e180"}, + {file = "osqp-0.6.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e8024dba07281111af39e71bff6449fb22a37bf3358aa0c7fd1daa6bca692c99"}, + {file = "osqp-0.6.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a68e247f2bbb53e87f1c1ca80ff3fc86b781f771d6da2a2ecd2f6e7492c802f3"}, + {file = "osqp-0.6.5-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e81e299637eb2342e30eb2df0ec45dc243683af0a71676c9b45b9337bb05da97"}, + {file = "osqp-0.6.5-cp310-cp310-win_amd64.whl", hash = "sha256:42425632927d983cbe935067783b944ebd4959e9eb6611da8401007b66a0c841"}, + {file = "osqp-0.6.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a7b180db09be1c3e3cb4109396b894f481ca9c6e160a530acd71f1769610f96c"}, + {file = "osqp-0.6.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:648f4beff10c16620f3b95e86dee702052d587b847ddbd5d8f71ad39ac36db3a"}, + {file = "osqp-0.6.5-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7649d56d775662e0a5d1665ed220d585f904d14a49cc6931bf27725bb9c4b2e0"}, + {file = "osqp-0.6.5-cp311-cp311-win_amd64.whl", hash = "sha256:b033b7aec973a655cfec4558e0c4fc92ee9f914bcb0a669e0156398d8ddbef8f"}, + {file = "osqp-0.6.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5c344619465e625aac6d13812d442dd31d4a9ab243e39abb5938c3f6116409b0"}, + {file = "osqp-0.6.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:000ad48aa071ecc4c75ebc39d1291752fe3a9937a30d00fff5dc61663ec67eeb"}, + {file = "osqp-0.6.5-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a36a40df69db5195fba613341663db2c7dcf977eb75b9578a8fd7682bbe02324"}, + {file = "osqp-0.6.5-cp312-cp312-win_amd64.whl", hash = "sha256:3d8212db7c55af1961ccce4a32fd382bfe34e2198664ea3f81cc47eef8d0f288"}, + {file = "osqp-0.6.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:ca7d80c0767b1350cd74e4f1446ec51661152690d38b1382ceccdfccd757afce"}, + {file = "osqp-0.6.5-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b15e2b96d4d9b2eff37a05405372c69cf17ada3d1e42c5e28cbdbd053189ab5"}, + {file = "osqp-0.6.5-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a41600e34ece7156606fd3620987fdf224b0a35c857540cb5bf45072f5c022b"}, + {file = "osqp-0.6.5-cp36-cp36m-win_amd64.whl", hash = "sha256:8c38574b35a3ddfb794aafee9bc5a74635160b9fc52bbc89ae6164fe207556de"}, + {file = "osqp-0.6.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:d06f614e3be1b1f3cd68569b2dc3628c2fdef1e7c4b992672fe05efb1add9801"}, + {file = "osqp-0.6.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25a6b995e0a022bd1c33d20d8846d9a068df89cec288b905b5cdfdb98a2ffae8"}, + {file = "osqp-0.6.5-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:09de9b53e7513ee4ade3024ce9f36ef993d916118d0927cce740d086882ea92c"}, + {file = "osqp-0.6.5-cp37-cp37m-win_amd64.whl", hash = "sha256:1f80f85d515ef29b90fb34f137857e75d4fcf21a715d644f54d2cf9494567fab"}, + {file = "osqp-0.6.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:de9b9e96001e8f0b2e474106ac75e220fd9279e1635b107b836a6035795e8d07"}, + {file = "osqp-0.6.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fe545d7a87a46cfc57dfb9f0aa2788d2f29e0c71dc1ac57e92f9c9d93064753"}, + {file = "osqp-0.6.5-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49ab020b5fd7abb5da99e01e47bf81f817ba1df6895e3d3ba4893722cc24d9b6"}, + {file = "osqp-0.6.5-cp38-cp38-win_amd64.whl", hash = "sha256:5d1b5ed6fc4faea94117a0abe140fefe980449b29d3907bd2e6ec1c18eca3d43"}, + {file = "osqp-0.6.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dca127b7a333ce53fb430fc441b2e0aee2df619693d967277a8f8fd095e95007"}, + {file = "osqp-0.6.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ec902844defedf7c5a5ed482b93286d1735a65b71bb27c93e18c929f313c93d"}, + {file = "osqp-0.6.5-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f25a9e1e8f1db38094dc7ee544e603e31fe7bf1b2a3fc75c78c1d39a727e2540"}, + {file = "osqp-0.6.5-cp39-cp39-win_amd64.whl", hash = "sha256:6dce90d8c4ad551489a452573ea819e089e1e1c3b23bbd8f155bb6059ce8ef36"}, + {file = "osqp-0.6.5.tar.gz", hash = "sha256:b2810aee7be2373add8b6c0be5ad99b810288774abca421751cb032d6a5aedef"}, ] [package.dependencies] numpy = ">=1.7" qdldl = "*" -scipy = ">=0.13.2" +scipy = ">=0.13.2,<1.12.0" [[package]] name = "packaging" @@ -2699,36 +2897,40 @@ files = [ [[package]] name = "pandas" -version = "2.1.4" +version = "2.2.0" description = "Powerful data structures for data analysis, time series, and statistics" optional = false python-versions = ">=3.9" files = [ - {file = "pandas-2.1.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bdec823dc6ec53f7a6339a0e34c68b144a7a1fd28d80c260534c39c62c5bf8c9"}, - {file = "pandas-2.1.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:294d96cfaf28d688f30c918a765ea2ae2e0e71d3536754f4b6de0ea4a496d034"}, - {file = "pandas-2.1.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b728fb8deba8905b319f96447a27033969f3ea1fea09d07d296c9030ab2ed1d"}, - {file = "pandas-2.1.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00028e6737c594feac3c2df15636d73ace46b8314d236100b57ed7e4b9ebe8d9"}, - {file = "pandas-2.1.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:426dc0f1b187523c4db06f96fb5c8d1a845e259c99bda74f7de97bd8a3bb3139"}, - {file = "pandas-2.1.4-cp310-cp310-win_amd64.whl", hash = "sha256:f237e6ca6421265643608813ce9793610ad09b40154a3344a088159590469e46"}, - {file = "pandas-2.1.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b7d852d16c270e4331f6f59b3e9aa23f935f5c4b0ed2d0bc77637a8890a5d092"}, - {file = "pandas-2.1.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bd7d5f2f54f78164b3d7a40f33bf79a74cdee72c31affec86bfcabe7e0789821"}, - {file = "pandas-2.1.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0aa6e92e639da0d6e2017d9ccff563222f4eb31e4b2c3cf32a2a392fc3103c0d"}, - {file = "pandas-2.1.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d797591b6846b9db79e65dc2d0d48e61f7db8d10b2a9480b4e3faaddc421a171"}, - {file = "pandas-2.1.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d2d3e7b00f703aea3945995ee63375c61b2e6aa5aa7871c5d622870e5e137623"}, - {file = "pandas-2.1.4-cp311-cp311-win_amd64.whl", hash = "sha256:dc9bf7ade01143cddc0074aa6995edd05323974e6e40d9dbde081021ded8510e"}, - {file = "pandas-2.1.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:482d5076e1791777e1571f2e2d789e940dedd927325cc3cb6d0800c6304082f6"}, - {file = "pandas-2.1.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8a706cfe7955c4ca59af8c7a0517370eafbd98593155b48f10f9811da440248b"}, - {file = "pandas-2.1.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b0513a132a15977b4a5b89aabd304647919bc2169eac4c8536afb29c07c23540"}, - {file = "pandas-2.1.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9f17f2b6fc076b2a0078862547595d66244db0f41bf79fc5f64a5c4d635bead"}, - {file = "pandas-2.1.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:45d63d2a9b1b37fa6c84a68ba2422dc9ed018bdaa668c7f47566a01188ceeec1"}, - {file = "pandas-2.1.4-cp312-cp312-win_amd64.whl", hash = "sha256:f69b0c9bb174a2342818d3e2778584e18c740d56857fc5cdb944ec8bbe4082cf"}, - {file = "pandas-2.1.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3f06bda01a143020bad20f7a85dd5f4a1600112145f126bc9e3e42077c24ef34"}, - {file = "pandas-2.1.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ab5796839eb1fd62a39eec2916d3e979ec3130509930fea17fe6f81e18108f6a"}, - {file = "pandas-2.1.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edbaf9e8d3a63a9276d707b4d25930a262341bca9874fcb22eff5e3da5394732"}, - {file = "pandas-2.1.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ebfd771110b50055712b3b711b51bee5d50135429364d0498e1213a7adc2be8"}, - {file = "pandas-2.1.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8ea107e0be2aba1da619cc6ba3f999b2bfc9669a83554b1904ce3dd9507f0860"}, - {file = "pandas-2.1.4-cp39-cp39-win_amd64.whl", hash = "sha256:d65148b14788b3758daf57bf42725caa536575da2b64df9964c563b015230984"}, - {file = "pandas-2.1.4.tar.gz", hash = "sha256:fcb68203c833cc735321512e13861358079a96c174a61f5116a1de89c58c0ef7"}, + {file = "pandas-2.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8108ee1712bb4fa2c16981fba7e68b3f6ea330277f5ca34fa8d557e986a11670"}, + {file = "pandas-2.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:736da9ad4033aeab51d067fc3bd69a0ba36f5a60f66a527b3d72e2030e63280a"}, + {file = "pandas-2.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38e0b4fc3ddceb56ec8a287313bc22abe17ab0eb184069f08fc6a9352a769b18"}, + {file = "pandas-2.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20404d2adefe92aed3b38da41d0847a143a09be982a31b85bc7dd565bdba0f4e"}, + {file = "pandas-2.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7ea3ee3f125032bfcade3a4cf85131ed064b4f8dd23e5ce6fa16473e48ebcaf5"}, + {file = "pandas-2.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f9670b3ac00a387620489dfc1bca66db47a787f4e55911f1293063a78b108df1"}, + {file = "pandas-2.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:5a946f210383c7e6d16312d30b238fd508d80d927014f3b33fb5b15c2f895430"}, + {file = "pandas-2.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a1b438fa26b208005c997e78672f1aa8138f67002e833312e6230f3e57fa87d5"}, + {file = "pandas-2.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8ce2fbc8d9bf303ce54a476116165220a1fedf15985b09656b4b4275300e920b"}, + {file = "pandas-2.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2707514a7bec41a4ab81f2ccce8b382961a29fbe9492eab1305bb075b2b1ff4f"}, + {file = "pandas-2.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85793cbdc2d5bc32620dc8ffa715423f0c680dacacf55056ba13454a5be5de88"}, + {file = "pandas-2.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:cfd6c2491dc821b10c716ad6776e7ab311f7df5d16038d0b7458bc0b67dc10f3"}, + {file = "pandas-2.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a146b9dcacc3123aa2b399df1a284de5f46287a4ab4fbfc237eac98a92ebcb71"}, + {file = "pandas-2.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:fbc1b53c0e1fdf16388c33c3cca160f798d38aea2978004dd3f4d3dec56454c9"}, + {file = "pandas-2.2.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a41d06f308a024981dcaa6c41f2f2be46a6b186b902c94c2674e8cb5c42985bc"}, + {file = "pandas-2.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:159205c99d7a5ce89ecfc37cb08ed179de7783737cea403b295b5eda8e9c56d1"}, + {file = "pandas-2.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb1e1f3861ea9132b32f2133788f3b14911b68102d562715d71bd0013bc45440"}, + {file = "pandas-2.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:761cb99b42a69005dec2b08854fb1d4888fdf7b05db23a8c5a099e4b886a2106"}, + {file = "pandas-2.2.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a20628faaf444da122b2a64b1e5360cde100ee6283ae8effa0d8745153809a2e"}, + {file = "pandas-2.2.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f5be5d03ea2073627e7111f61b9f1f0d9625dc3c4d8dda72cc827b0c58a1d042"}, + {file = "pandas-2.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:a626795722d893ed6aacb64d2401d017ddc8a2341b49e0384ab9bf7112bdec30"}, + {file = "pandas-2.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9f66419d4a41132eb7e9a73dcec9486cf5019f52d90dd35547af11bc58f8637d"}, + {file = "pandas-2.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:57abcaeda83fb80d447f28ab0cc7b32b13978f6f733875ebd1ed14f8fbc0f4ab"}, + {file = "pandas-2.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e60f1f7dba3c2d5ca159e18c46a34e7ca7247a73b5dd1a22b6d59707ed6b899a"}, + {file = "pandas-2.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb61dc8567b798b969bcc1fc964788f5a68214d333cade8319c7ab33e2b5d88a"}, + {file = "pandas-2.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:52826b5f4ed658fa2b729264d63f6732b8b29949c7fd234510d57c61dbeadfcd"}, + {file = "pandas-2.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bde2bc699dbd80d7bc7f9cab1e23a95c4375de615860ca089f34e7c64f4a8de7"}, + {file = "pandas-2.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:3de918a754bbf2da2381e8a3dcc45eede8cd7775b047b923f9006d5f876802ae"}, + {file = "pandas-2.2.0.tar.gz", hash = "sha256:30b83f7c3eb217fb4d1b494a57a2fda5444f17834f5df2de6b2ffff68dc3c8e2"}, ] [package.dependencies] @@ -2738,41 +2940,41 @@ numpy = [ ] python-dateutil = ">=2.8.2" pytz = ">=2020.1" -tzdata = ">=2022.1" +tzdata = ">=2022.7" [package.extras] -all = ["PyQt5 (>=5.15.6)", "SQLAlchemy (>=1.4.36)", "beautifulsoup4 (>=4.11.1)", "bottleneck (>=1.3.4)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=0.8.1)", "fsspec (>=2022.05.0)", "gcsfs (>=2022.05.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.8.0)", "matplotlib (>=3.6.1)", "numba (>=0.55.2)", "numexpr (>=2.8.0)", "odfpy (>=1.4.1)", "openpyxl (>=3.0.10)", "pandas-gbq (>=0.17.5)", "psycopg2 (>=2.9.3)", "pyarrow (>=7.0.0)", "pymysql (>=1.0.2)", "pyreadstat (>=1.1.5)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)", "pyxlsb (>=1.0.9)", "qtpy (>=2.2.0)", "s3fs (>=2022.05.0)", "scipy (>=1.8.1)", "tables (>=3.7.0)", "tabulate (>=0.8.10)", "xarray (>=2022.03.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.3)", "zstandard (>=0.17.0)"] -aws = ["s3fs (>=2022.05.0)"] -clipboard = ["PyQt5 (>=5.15.6)", "qtpy (>=2.2.0)"] -compression = ["zstandard (>=0.17.0)"] -computation = ["scipy (>=1.8.1)", "xarray (>=2022.03.0)"] +all = ["PyQt5 (>=5.15.9)", "SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)", "beautifulsoup4 (>=4.11.2)", "bottleneck (>=1.3.6)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=2022.12.0)", "fsspec (>=2022.11.0)", "gcsfs (>=2022.11.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.9.2)", "matplotlib (>=3.6.3)", "numba (>=0.56.4)", "numexpr (>=2.8.4)", "odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "pandas-gbq (>=0.19.0)", "psycopg2 (>=2.9.6)", "pyarrow (>=10.0.1)", "pymysql (>=1.0.2)", "pyreadstat (>=1.2.0)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "qtpy (>=2.3.0)", "s3fs (>=2022.11.0)", "scipy (>=1.10.0)", "tables (>=3.8.0)", "tabulate (>=0.9.0)", "xarray (>=2022.12.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)", "zstandard (>=0.19.0)"] +aws = ["s3fs (>=2022.11.0)"] +clipboard = ["PyQt5 (>=5.15.9)", "qtpy (>=2.3.0)"] +compression = ["zstandard (>=0.19.0)"] +computation = ["scipy (>=1.10.0)", "xarray (>=2022.12.0)"] consortium-standard = ["dataframe-api-compat (>=0.1.7)"] -excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.0.10)", "pyxlsb (>=1.0.9)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.3)"] -feather = ["pyarrow (>=7.0.0)"] -fss = ["fsspec (>=2022.05.0)"] -gcp = ["gcsfs (>=2022.05.0)", "pandas-gbq (>=0.17.5)"] -hdf5 = ["tables (>=3.7.0)"] -html = ["beautifulsoup4 (>=4.11.1)", "html5lib (>=1.1)", "lxml (>=4.8.0)"] -mysql = ["SQLAlchemy (>=1.4.36)", "pymysql (>=1.0.2)"] -output-formatting = ["jinja2 (>=3.1.2)", "tabulate (>=0.8.10)"] -parquet = ["pyarrow (>=7.0.0)"] -performance = ["bottleneck (>=1.3.4)", "numba (>=0.55.2)", "numexpr (>=2.8.0)"] -plot = ["matplotlib (>=3.6.1)"] -postgresql = ["SQLAlchemy (>=1.4.36)", "psycopg2 (>=2.9.3)"] -spss = ["pyreadstat (>=1.1.5)"] -sql-other = ["SQLAlchemy (>=1.4.36)"] +excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)"] +feather = ["pyarrow (>=10.0.1)"] +fss = ["fsspec (>=2022.11.0)"] +gcp = ["gcsfs (>=2022.11.0)", "pandas-gbq (>=0.19.0)"] +hdf5 = ["tables (>=3.8.0)"] +html = ["beautifulsoup4 (>=4.11.2)", "html5lib (>=1.1)", "lxml (>=4.9.2)"] +mysql = ["SQLAlchemy (>=2.0.0)", "pymysql (>=1.0.2)"] +output-formatting = ["jinja2 (>=3.1.2)", "tabulate (>=0.9.0)"] +parquet = ["pyarrow (>=10.0.1)"] +performance = ["bottleneck (>=1.3.6)", "numba (>=0.56.4)", "numexpr (>=2.8.4)"] +plot = ["matplotlib (>=3.6.3)"] +postgresql = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "psycopg2 (>=2.9.6)"] +spss = ["pyreadstat (>=1.2.0)"] +sql-other = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)"] test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"] -xml = ["lxml (>=4.8.0)"] +xml = ["lxml (>=4.9.2)"] [[package]] name = "pandocfilters" -version = "1.5.0" +version = "1.5.1" description = "Utilities for writing pandoc filters in python" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ - {file = "pandocfilters-1.5.0-py2.py3-none-any.whl", hash = "sha256:33aae3f25fd1a026079f5d27bdd52496f0e0803b3469282162bafdcbdf6ef14f"}, - {file = "pandocfilters-1.5.0.tar.gz", hash = "sha256:0b679503337d233b4339a817bfc8c50064e2eff681314376a47cb582305a7a38"}, + {file = "pandocfilters-1.5.1-py2.py3-none-any.whl", hash = "sha256:93be382804a9cdb0a7267585f157e5d1731bbe5545a85b268d6f5fe6232de2bc"}, + {file = "pandocfilters-1.5.1.tar.gz", hash = "sha256:002b4a555ee4ebc03f8b66307e287fa492e4a77b4ea14d3f934328297bb4939e"}, ] [[package]] @@ -2911,28 +3113,28 @@ xmp = ["defusedxml"] [[package]] name = "platformdirs" -version = "4.1.0" +version = "4.2.0" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." optional = false python-versions = ">=3.8" files = [ - {file = "platformdirs-4.1.0-py3-none-any.whl", hash = "sha256:11c8f37bcca40db96d8144522d925583bdb7a31f7b0e37e3ed4318400a8e2380"}, - {file = "platformdirs-4.1.0.tar.gz", hash = "sha256:906d548203468492d432bcb294d4bc2fff751bf84971fbb2c10918cc206ee420"}, + {file = "platformdirs-4.2.0-py3-none-any.whl", hash = "sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068"}, + {file = "platformdirs-4.2.0.tar.gz", hash = "sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768"}, ] [package.extras] -docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.1)", "sphinx-autodoc-typehints (>=1.24)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)"] +docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] [[package]] name = "pluggy" -version = "1.3.0" +version = "1.4.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.8" files = [ - {file = "pluggy-1.3.0-py3-none-any.whl", hash = "sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7"}, - {file = "pluggy-1.3.0.tar.gz", hash = "sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12"}, + {file = "pluggy-1.4.0-py3-none-any.whl", hash = "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981"}, + {file = "pluggy-1.4.0.tar.gz", hash = "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be"}, ] [package.extras] @@ -2983,47 +3185,47 @@ testing = ["google-api-core[grpc] (>=1.31.5)"] [[package]] name = "protobuf" -version = "4.25.1" +version = "4.25.2" description = "" optional = false python-versions = ">=3.8" files = [ - {file = "protobuf-4.25.1-cp310-abi3-win32.whl", hash = "sha256:193f50a6ab78a970c9b4f148e7c750cfde64f59815e86f686c22e26b4fe01ce7"}, - {file = "protobuf-4.25.1-cp310-abi3-win_amd64.whl", hash = "sha256:3497c1af9f2526962f09329fd61a36566305e6c72da2590ae0d7d1322818843b"}, - {file = "protobuf-4.25.1-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:0bf384e75b92c42830c0a679b0cd4d6e2b36ae0cf3dbb1e1dfdda48a244f4bcd"}, - {file = "protobuf-4.25.1-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:0f881b589ff449bf0b931a711926e9ddaad3b35089cc039ce1af50b21a4ae8cb"}, - {file = "protobuf-4.25.1-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:ca37bf6a6d0046272c152eea90d2e4ef34593aaa32e8873fc14c16440f22d4b7"}, - {file = "protobuf-4.25.1-cp38-cp38-win32.whl", hash = "sha256:abc0525ae2689a8000837729eef7883b9391cd6aa7950249dcf5a4ede230d5dd"}, - {file = "protobuf-4.25.1-cp38-cp38-win_amd64.whl", hash = "sha256:1484f9e692091450e7edf418c939e15bfc8fc68856e36ce399aed6889dae8bb0"}, - {file = "protobuf-4.25.1-cp39-cp39-win32.whl", hash = "sha256:8bdbeaddaac52d15c6dce38c71b03038ef7772b977847eb6d374fc86636fa510"}, - {file = "protobuf-4.25.1-cp39-cp39-win_amd64.whl", hash = "sha256:becc576b7e6b553d22cbdf418686ee4daa443d7217999125c045ad56322dda10"}, - {file = "protobuf-4.25.1-py3-none-any.whl", hash = "sha256:a19731d5e83ae4737bb2a089605e636077ac001d18781b3cf489b9546c7c80d6"}, - {file = "protobuf-4.25.1.tar.gz", hash = "sha256:57d65074b4f5baa4ab5da1605c02be90ac20c8b40fb137d6a8df9f416b0d0ce2"}, + {file = "protobuf-4.25.2-cp310-abi3-win32.whl", hash = "sha256:b50c949608682b12efb0b2717f53256f03636af5f60ac0c1d900df6213910fd6"}, + {file = "protobuf-4.25.2-cp310-abi3-win_amd64.whl", hash = "sha256:8f62574857ee1de9f770baf04dde4165e30b15ad97ba03ceac65f760ff018ac9"}, + {file = "protobuf-4.25.2-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:2db9f8fa64fbdcdc93767d3cf81e0f2aef176284071507e3ede160811502fd3d"}, + {file = "protobuf-4.25.2-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:10894a2885b7175d3984f2be8d9850712c57d5e7587a2410720af8be56cdaf62"}, + {file = "protobuf-4.25.2-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:fc381d1dd0516343f1440019cedf08a7405f791cd49eef4ae1ea06520bc1c020"}, + {file = "protobuf-4.25.2-cp38-cp38-win32.whl", hash = "sha256:33a1aeef4b1927431d1be780e87b641e322b88d654203a9e9d93f218ee359e61"}, + {file = "protobuf-4.25.2-cp38-cp38-win_amd64.whl", hash = "sha256:47f3de503fe7c1245f6f03bea7e8d3ec11c6c4a2ea9ef910e3221c8a15516d62"}, + {file = "protobuf-4.25.2-cp39-cp39-win32.whl", hash = "sha256:5e5c933b4c30a988b52e0b7c02641760a5ba046edc5e43d3b94a74c9fc57c1b3"}, + {file = "protobuf-4.25.2-cp39-cp39-win_amd64.whl", hash = "sha256:d66a769b8d687df9024f2985d5137a337f957a0916cf5464d1513eee96a63ff0"}, + {file = "protobuf-4.25.2-py3-none-any.whl", hash = "sha256:a8b7a98d4ce823303145bf3c1a8bdb0f2f4642a414b196f04ad9853ed0c8f830"}, + {file = "protobuf-4.25.2.tar.gz", hash = "sha256:fe599e175cb347efc8ee524bcd4b902d11f7262c0e569ececcb89995c15f0a5e"}, ] [[package]] name = "psutil" -version = "5.9.7" +version = "5.9.8" description = "Cross-platform lib for process and system monitoring in Python." optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" files = [ - {file = "psutil-5.9.7-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:0bd41bf2d1463dfa535942b2a8f0e958acf6607ac0be52265ab31f7923bcd5e6"}, - {file = "psutil-5.9.7-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:5794944462509e49d4d458f4dbfb92c47539e7d8d15c796f141f474010084056"}, - {file = "psutil-5.9.7-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:fe361f743cb3389b8efda21980d93eb55c1f1e3898269bc9a2a1d0bb7b1f6508"}, - {file = "psutil-5.9.7-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:e469990e28f1ad738f65a42dcfc17adaed9d0f325d55047593cb9033a0ab63df"}, - {file = "psutil-5.9.7-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:3c4747a3e2ead1589e647e64aad601981f01b68f9398ddf94d01e3dc0d1e57c7"}, - {file = "psutil-5.9.7-cp27-none-win32.whl", hash = "sha256:1d4bc4a0148fdd7fd8f38e0498639ae128e64538faa507df25a20f8f7fb2341c"}, - {file = "psutil-5.9.7-cp27-none-win_amd64.whl", hash = "sha256:4c03362e280d06bbbfcd52f29acd79c733e0af33d707c54255d21029b8b32ba6"}, - {file = "psutil-5.9.7-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:ea36cc62e69a13ec52b2f625c27527f6e4479bca2b340b7a452af55b34fcbe2e"}, - {file = "psutil-5.9.7-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1132704b876e58d277168cd729d64750633d5ff0183acf5b3c986b8466cd0284"}, - {file = "psutil-5.9.7-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe8b7f07948f1304497ce4f4684881250cd859b16d06a1dc4d7941eeb6233bfe"}, - {file = "psutil-5.9.7-cp36-cp36m-win32.whl", hash = "sha256:b27f8fdb190c8c03914f908a4555159327d7481dac2f01008d483137ef3311a9"}, - {file = "psutil-5.9.7-cp36-cp36m-win_amd64.whl", hash = "sha256:44969859757f4d8f2a9bd5b76eba8c3099a2c8cf3992ff62144061e39ba8568e"}, - {file = "psutil-5.9.7-cp37-abi3-win32.whl", hash = "sha256:c727ca5a9b2dd5193b8644b9f0c883d54f1248310023b5ad3e92036c5e2ada68"}, - {file = "psutil-5.9.7-cp37-abi3-win_amd64.whl", hash = "sha256:f37f87e4d73b79e6c5e749440c3113b81d1ee7d26f21c19c47371ddea834f414"}, - {file = "psutil-5.9.7-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:032f4f2c909818c86cea4fe2cc407f1c0f0cde8e6c6d702b28b8ce0c0d143340"}, - {file = "psutil-5.9.7.tar.gz", hash = "sha256:3f02134e82cfb5d089fddf20bb2e03fd5cd52395321d1c8458a9e58500ff417c"}, + {file = "psutil-5.9.8-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:26bd09967ae00920df88e0352a91cff1a78f8d69b3ecabbfe733610c0af486c8"}, + {file = "psutil-5.9.8-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:05806de88103b25903dff19bb6692bd2e714ccf9e668d050d144012055cbca73"}, + {file = "psutil-5.9.8-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:611052c4bc70432ec770d5d54f64206aa7203a101ec273a0cd82418c86503bb7"}, + {file = "psutil-5.9.8-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:50187900d73c1381ba1454cf40308c2bf6f34268518b3f36a9b663ca87e65e36"}, + {file = "psutil-5.9.8-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:02615ed8c5ea222323408ceba16c60e99c3f91639b07da6373fb7e6539abc56d"}, + {file = "psutil-5.9.8-cp27-none-win32.whl", hash = "sha256:36f435891adb138ed3c9e58c6af3e2e6ca9ac2f365efe1f9cfef2794e6c93b4e"}, + {file = "psutil-5.9.8-cp27-none-win_amd64.whl", hash = "sha256:bd1184ceb3f87651a67b2708d4c3338e9b10c5df903f2e3776b62303b26cb631"}, + {file = "psutil-5.9.8-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:aee678c8720623dc456fa20659af736241f575d79429a0e5e9cf88ae0605cc81"}, + {file = "psutil-5.9.8-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8cb6403ce6d8e047495a701dc7c5bd788add903f8986d523e3e20b98b733e421"}, + {file = "psutil-5.9.8-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d06016f7f8625a1825ba3732081d77c94589dca78b7a3fc072194851e88461a4"}, + {file = "psutil-5.9.8-cp36-cp36m-win32.whl", hash = "sha256:7d79560ad97af658a0f6adfef8b834b53f64746d45b403f225b85c5c2c140eee"}, + {file = "psutil-5.9.8-cp36-cp36m-win_amd64.whl", hash = "sha256:27cc40c3493bb10de1be4b3f07cae4c010ce715290a5be22b98493509c6299e2"}, + {file = "psutil-5.9.8-cp37-abi3-win32.whl", hash = "sha256:bc56c2a1b0d15aa3eaa5a60c9f3f8e3e565303b465dbf57a1b730e7a2b9844e0"}, + {file = "psutil-5.9.8-cp37-abi3-win_amd64.whl", hash = "sha256:8db4c1b57507eef143a15a6884ca10f7c73876cdf5d51e713151c1236a0e68cf"}, + {file = "psutil-5.9.8-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:d16bbddf0693323b8c6123dd804100241da461e41d6e332fb0ba6058f630f8c8"}, + {file = "psutil-5.9.8.tar.gz", hash = "sha256:6be126e3225486dff286a8fb9a06246a5253f4c7c53b475ea5f5ac934e64194c"}, ] [package.extras] @@ -3151,47 +3353,47 @@ files = [ [[package]] name = "pydantic" -version = "1.10.13" +version = "1.10.14" description = "Data validation and settings management using python type hints" optional = false python-versions = ">=3.7" files = [ - {file = "pydantic-1.10.13-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:efff03cc7a4f29d9009d1c96ceb1e7a70a65cfe86e89d34e4a5f2ab1e5693737"}, - {file = "pydantic-1.10.13-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3ecea2b9d80e5333303eeb77e180b90e95eea8f765d08c3d278cd56b00345d01"}, - {file = "pydantic-1.10.13-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1740068fd8e2ef6eb27a20e5651df000978edce6da6803c2bef0bc74540f9548"}, - {file = "pydantic-1.10.13-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:84bafe2e60b5e78bc64a2941b4c071a4b7404c5c907f5f5a99b0139781e69ed8"}, - {file = "pydantic-1.10.13-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:bc0898c12f8e9c97f6cd44c0ed70d55749eaf783716896960b4ecce2edfd2d69"}, - {file = "pydantic-1.10.13-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:654db58ae399fe6434e55325a2c3e959836bd17a6f6a0b6ca8107ea0571d2e17"}, - {file = "pydantic-1.10.13-cp310-cp310-win_amd64.whl", hash = "sha256:75ac15385a3534d887a99c713aa3da88a30fbd6204a5cd0dc4dab3d770b9bd2f"}, - {file = "pydantic-1.10.13-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c553f6a156deb868ba38a23cf0df886c63492e9257f60a79c0fd8e7173537653"}, - {file = "pydantic-1.10.13-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5e08865bc6464df8c7d61439ef4439829e3ab62ab1669cddea8dd00cd74b9ffe"}, - {file = "pydantic-1.10.13-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e31647d85a2013d926ce60b84f9dd5300d44535a9941fe825dc349ae1f760df9"}, - {file = "pydantic-1.10.13-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:210ce042e8f6f7c01168b2d84d4c9eb2b009fe7bf572c2266e235edf14bacd80"}, - {file = "pydantic-1.10.13-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:8ae5dd6b721459bfa30805f4c25880e0dd78fc5b5879f9f7a692196ddcb5a580"}, - {file = "pydantic-1.10.13-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f8e81fc5fb17dae698f52bdd1c4f18b6ca674d7068242b2aff075f588301bbb0"}, - {file = "pydantic-1.10.13-cp311-cp311-win_amd64.whl", hash = "sha256:61d9dce220447fb74f45e73d7ff3b530e25db30192ad8d425166d43c5deb6df0"}, - {file = "pydantic-1.10.13-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4b03e42ec20286f052490423682016fd80fda830d8e4119f8ab13ec7464c0132"}, - {file = "pydantic-1.10.13-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f59ef915cac80275245824e9d771ee939133be38215555e9dc90c6cb148aaeb5"}, - {file = "pydantic-1.10.13-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5a1f9f747851338933942db7af7b6ee8268568ef2ed86c4185c6ef4402e80ba8"}, - {file = "pydantic-1.10.13-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:97cce3ae7341f7620a0ba5ef6cf043975cd9d2b81f3aa5f4ea37928269bc1b87"}, - {file = "pydantic-1.10.13-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:854223752ba81e3abf663d685f105c64150873cc6f5d0c01d3e3220bcff7d36f"}, - {file = "pydantic-1.10.13-cp37-cp37m-win_amd64.whl", hash = "sha256:b97c1fac8c49be29486df85968682b0afa77e1b809aff74b83081cc115e52f33"}, - {file = "pydantic-1.10.13-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c958d053453a1c4b1c2062b05cd42d9d5c8eb67537b8d5a7e3c3032943ecd261"}, - {file = "pydantic-1.10.13-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4c5370a7edaac06daee3af1c8b1192e305bc102abcbf2a92374b5bc793818599"}, - {file = "pydantic-1.10.13-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d6f6e7305244bddb4414ba7094ce910560c907bdfa3501e9db1a7fd7eaea127"}, - {file = "pydantic-1.10.13-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d3a3c792a58e1622667a2837512099eac62490cdfd63bd407993aaf200a4cf1f"}, - {file = "pydantic-1.10.13-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:c636925f38b8db208e09d344c7aa4f29a86bb9947495dd6b6d376ad10334fb78"}, - {file = "pydantic-1.10.13-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:678bcf5591b63cc917100dc50ab6caebe597ac67e8c9ccb75e698f66038ea953"}, - {file = "pydantic-1.10.13-cp38-cp38-win_amd64.whl", hash = "sha256:6cf25c1a65c27923a17b3da28a0bdb99f62ee04230c931d83e888012851f4e7f"}, - {file = "pydantic-1.10.13-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8ef467901d7a41fa0ca6db9ae3ec0021e3f657ce2c208e98cd511f3161c762c6"}, - {file = "pydantic-1.10.13-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:968ac42970f57b8344ee08837b62f6ee6f53c33f603547a55571c954a4225691"}, - {file = "pydantic-1.10.13-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9849f031cf8a2f0a928fe885e5a04b08006d6d41876b8bbd2fc68a18f9f2e3fd"}, - {file = "pydantic-1.10.13-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:56e3ff861c3b9c6857579de282ce8baabf443f42ffba355bf070770ed63e11e1"}, - {file = "pydantic-1.10.13-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f00790179497767aae6bcdc36355792c79e7bbb20b145ff449700eb076c5f96"}, - {file = "pydantic-1.10.13-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:75b297827b59bc229cac1a23a2f7a4ac0031068e5be0ce385be1462e7e17a35d"}, - {file = "pydantic-1.10.13-cp39-cp39-win_amd64.whl", hash = "sha256:e70ca129d2053fb8b728ee7d1af8e553a928d7e301a311094b8a0501adc8763d"}, - {file = "pydantic-1.10.13-py3-none-any.whl", hash = "sha256:b87326822e71bd5f313e7d3bfdc77ac3247035ac10b0c0618bd99dcf95b1e687"}, - {file = "pydantic-1.10.13.tar.gz", hash = "sha256:32c8b48dcd3b2ac4e78b0ba4af3a2c2eb6048cb75202f0ea7b34feb740efc340"}, + {file = "pydantic-1.10.14-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7f4fcec873f90537c382840f330b90f4715eebc2bc9925f04cb92de593eae054"}, + {file = "pydantic-1.10.14-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8e3a76f571970fcd3c43ad982daf936ae39b3e90b8a2e96c04113a369869dc87"}, + {file = "pydantic-1.10.14-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82d886bd3c3fbeaa963692ef6b643159ccb4b4cefaf7ff1617720cbead04fd1d"}, + {file = "pydantic-1.10.14-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:798a3d05ee3b71967844a1164fd5bdb8c22c6d674f26274e78b9f29d81770c4e"}, + {file = "pydantic-1.10.14-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:23d47a4b57a38e8652bcab15a658fdb13c785b9ce217cc3a729504ab4e1d6bc9"}, + {file = "pydantic-1.10.14-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f9f674b5c3bebc2eba401de64f29948ae1e646ba2735f884d1594c5f675d6f2a"}, + {file = "pydantic-1.10.14-cp310-cp310-win_amd64.whl", hash = "sha256:24a7679fab2e0eeedb5a8924fc4a694b3bcaac7d305aeeac72dd7d4e05ecbebf"}, + {file = "pydantic-1.10.14-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9d578ac4bf7fdf10ce14caba6f734c178379bd35c486c6deb6f49006e1ba78a7"}, + {file = "pydantic-1.10.14-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fa7790e94c60f809c95602a26d906eba01a0abee9cc24150e4ce2189352deb1b"}, + {file = "pydantic-1.10.14-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aad4e10efa5474ed1a611b6d7f0d130f4aafadceb73c11d9e72823e8f508e663"}, + {file = "pydantic-1.10.14-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1245f4f61f467cb3dfeced2b119afef3db386aec3d24a22a1de08c65038b255f"}, + {file = "pydantic-1.10.14-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:21efacc678a11114c765eb52ec0db62edffa89e9a562a94cbf8fa10b5db5c046"}, + {file = "pydantic-1.10.14-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:412ab4a3f6dbd2bf18aefa9f79c7cca23744846b31f1d6555c2ee2b05a2e14ca"}, + {file = "pydantic-1.10.14-cp311-cp311-win_amd64.whl", hash = "sha256:e897c9f35281f7889873a3e6d6b69aa1447ceb024e8495a5f0d02ecd17742a7f"}, + {file = "pydantic-1.10.14-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:d604be0f0b44d473e54fdcb12302495fe0467c56509a2f80483476f3ba92b33c"}, + {file = "pydantic-1.10.14-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a42c7d17706911199798d4c464b352e640cab4351efe69c2267823d619a937e5"}, + {file = "pydantic-1.10.14-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:596f12a1085e38dbda5cbb874d0973303e34227b400b6414782bf205cc14940c"}, + {file = "pydantic-1.10.14-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:bfb113860e9288d0886e3b9e49d9cf4a9d48b441f52ded7d96db7819028514cc"}, + {file = "pydantic-1.10.14-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:bc3ed06ab13660b565eed80887fcfbc0070f0aa0691fbb351657041d3e874efe"}, + {file = "pydantic-1.10.14-cp37-cp37m-win_amd64.whl", hash = "sha256:ad8c2bc677ae5f6dbd3cf92f2c7dc613507eafe8f71719727cbc0a7dec9a8c01"}, + {file = "pydantic-1.10.14-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c37c28449752bb1f47975d22ef2882d70513c546f8f37201e0fec3a97b816eee"}, + {file = "pydantic-1.10.14-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:49a46a0994dd551ec051986806122767cf144b9702e31d47f6d493c336462597"}, + {file = "pydantic-1.10.14-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53e3819bd20a42470d6dd0fe7fc1c121c92247bca104ce608e609b59bc7a77ee"}, + {file = "pydantic-1.10.14-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0fbb503bbbbab0c588ed3cd21975a1d0d4163b87e360fec17a792f7d8c4ff29f"}, + {file = "pydantic-1.10.14-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:336709883c15c050b9c55a63d6c7ff09be883dbc17805d2b063395dd9d9d0022"}, + {file = "pydantic-1.10.14-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:4ae57b4d8e3312d486e2498d42aed3ece7b51848336964e43abbf9671584e67f"}, + {file = "pydantic-1.10.14-cp38-cp38-win_amd64.whl", hash = "sha256:dba49d52500c35cfec0b28aa8b3ea5c37c9df183ffc7210b10ff2a415c125c4a"}, + {file = "pydantic-1.10.14-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c66609e138c31cba607d8e2a7b6a5dc38979a06c900815495b2d90ce6ded35b4"}, + {file = "pydantic-1.10.14-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d986e115e0b39604b9eee3507987368ff8148222da213cd38c359f6f57b3b347"}, + {file = "pydantic-1.10.14-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:646b2b12df4295b4c3148850c85bff29ef6d0d9621a8d091e98094871a62e5c7"}, + {file = "pydantic-1.10.14-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:282613a5969c47c83a8710cc8bfd1e70c9223feb76566f74683af889faadc0ea"}, + {file = "pydantic-1.10.14-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:466669501d08ad8eb3c4fecd991c5e793c4e0bbd62299d05111d4f827cded64f"}, + {file = "pydantic-1.10.14-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:13e86a19dca96373dcf3190fcb8797d40a6f12f154a244a8d1e8e03b8f280593"}, + {file = "pydantic-1.10.14-cp39-cp39-win_amd64.whl", hash = "sha256:08b6ec0917c30861e3fe71a93be1648a2aa4f62f866142ba21670b24444d7fd8"}, + {file = "pydantic-1.10.14-py3-none-any.whl", hash = "sha256:8ee853cd12ac2ddbf0ecbac1c289f95882b2d4482258048079d13be700aa114c"}, + {file = "pydantic-1.10.14.tar.gz", hash = "sha256:46f17b832fe27de7850896f3afee50ea682220dd218f7e9c88d436788419dca6"}, ] [package.dependencies] @@ -3367,13 +3569,13 @@ files = [ [[package]] name = "pytest" -version = "7.4.3" +version = "7.4.4" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.7" files = [ - {file = "pytest-7.4.3-py3-none-any.whl", hash = "sha256:0d009c083ea859a71b76adf7c1d502e4bc170b80a8ef002da5806527b9591fac"}, - {file = "pytest-7.4.3.tar.gz", hash = "sha256:d989d136982de4e3b29dabcc838ad581c64e8ed52c11fbe86ddebd9da0818cd5"}, + {file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"}, + {file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"}, ] [package.dependencies] @@ -3491,13 +3693,13 @@ files = [ [[package]] name = "pytz" -version = "2023.3.post1" +version = "2024.1" description = "World timezone definitions, modern and historical" optional = false python-versions = "*" files = [ - {file = "pytz-2023.3.post1-py2.py3-none-any.whl", hash = "sha256:ce42d816b81b68506614c11e8937d3aa9e41007ceb50bfdcb0749b921bf646c7"}, - {file = "pytz-2023.3.post1.tar.gz", hash = "sha256:7b4fddbeb94a1eba4b557da24f19fdf9db575192544270a9101d8509f9f43d7b"}, + {file = "pytz-2024.1-py2.py3-none-any.whl", hash = "sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319"}, + {file = "pytz-2024.1.tar.gz", hash = "sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812"}, ] [[package]] @@ -3548,7 +3750,6 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, @@ -3761,7 +3962,7 @@ scipy = "^1.10.1" type = "git" url = "https://github.com/qiboteam/qibojit.git" reference = "HEAD" -resolved_reference = "b32f503452127ac915b20af773e29694b5b64dd4" +resolved_reference = "48b51665775a70dfd4ab577864f57623f15e0073" [[package]] name = "recommonmark" @@ -3892,13 +4093,13 @@ pyasn1 = ">=0.1.3" [[package]] name = "ruamel-yaml" -version = "0.18.5" +version = "0.18.6" description = "ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order" optional = false python-versions = ">=3.7" files = [ - {file = "ruamel.yaml-0.18.5-py3-none-any.whl", hash = "sha256:a013ac02f99a69cdd6277d9664689eb1acba07069f912823177c5eced21a6ada"}, - {file = "ruamel.yaml-0.18.5.tar.gz", hash = "sha256:61917e3a35a569c1133a8f772e1226961bf5a1198bea7e23f06a0841dea1ab0e"}, + {file = "ruamel.yaml-0.18.6-py3-none-any.whl", hash = "sha256:57b53ba33def16c4f3d807c0ccbc00f8a6081827e81ba2491691b76882d0c636"}, + {file = "ruamel.yaml-0.18.6.tar.gz", hash = "sha256:8b27e6a217e786c6fbe5634d8f3f11bc63e0f80f6a5890f28863d9c45aac311b"}, ] [package.dependencies] @@ -3917,24 +4118,24 @@ python-versions = ">=3.6" files = [ {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b42169467c42b692c19cf539c38d4602069d8c1505e97b86387fcf7afb766e1d"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-macosx_13_0_arm64.whl", hash = "sha256:07238db9cbdf8fc1e9de2489a4f68474e70dffcb32232db7c08fa61ca0c7c462"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:d92f81886165cb14d7b067ef37e142256f1c6a90a65cd156b063a43da1708cfd"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:fff3573c2db359f091e1589c3d7c5fc2f86f5bdb6f24252c2d8e539d4e45f412"}, - {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux_2_24_aarch64.whl", hash = "sha256:aa2267c6a303eb483de8d02db2871afb5c5fc15618d894300b88958f729ad74f"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:840f0c7f194986a63d2c2465ca63af8ccbbc90ab1c6001b1978f05119b5e7334"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:024cfe1fc7c7f4e1aff4a81e718109e13409767e4f871443cbff3dba3578203d"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-win32.whl", hash = "sha256:c69212f63169ec1cfc9bb44723bf2917cbbd8f6191a00ef3410f5a7fe300722d"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-win_amd64.whl", hash = "sha256:cabddb8d8ead485e255fe80429f833172b4cadf99274db39abc080e068cbcc31"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:bef08cd86169d9eafb3ccb0a39edb11d8e25f3dae2b28f5c52fd997521133069"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:b16420e621d26fdfa949a8b4b47ade8810c56002f5389970db4ddda51dbff248"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:b5edda50e5e9e15e54a6a8a0070302b00c518a9d32accc2346ad6c984aacd279"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:25c515e350e5b739842fc3228d662413ef28f295791af5e5110b543cf0b57d9b"}, - {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux_2_24_aarch64.whl", hash = "sha256:1707814f0d9791df063f8c19bb51b0d1278b8e9a2353abbb676c2f685dee6afe"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:46d378daaac94f454b3a0e3d8d78cafd78a026b1d71443f4966c696b48a6d899"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:09b055c05697b38ecacb7ac50bdab2240bfca1a0c4872b0fd309bb07dc9aa3a9"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-win32.whl", hash = "sha256:53a300ed9cea38cf5a2a9b069058137c2ca1ce658a874b79baceb8f892f915a7"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-win_amd64.whl", hash = "sha256:c2a72e9109ea74e511e29032f3b670835f8a59bbdc9ce692c5b4ed91ccf1eedb"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:ebc06178e8821efc9692ea7544aa5644217358490145629914d8020042c24aa1"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-macosx_13_0_arm64.whl", hash = "sha256:edaef1c1200c4b4cb914583150dcaa3bc30e592e907c01117c08b13a07255ec2"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:7048c338b6c86627afb27faecf418768acb6331fc24cfa56c93e8c9780f815fa"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d176b57452ab5b7028ac47e7b3cf644bcfdc8cacfecf7e71759f7f51a59e5c92"}, - {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux_2_24_aarch64.whl", hash = "sha256:1dc67314e7e1086c9fdf2680b7b6c2be1c0d8e3a8279f2e993ca2a7545fecf62"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3213ece08ea033eb159ac52ae052a4899b56ecc124bb80020d9bbceeb50258e9"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:aab7fd643f71d7946f2ee58cc88c9b7bfc97debd71dcc93e03e2d174628e7e2d"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-win32.whl", hash = "sha256:5c365d91c88390c8d0a8545df0b5857172824b1c604e867161e6b3d59a827eaa"}, @@ -3942,7 +4143,7 @@ files = [ {file = "ruamel.yaml.clib-0.2.8-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a5aa27bad2bb83670b71683aae140a1f52b0857a2deff56ad3f6c13a017a26ed"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c58ecd827313af6864893e7af0a3bb85fd529f862b6adbefe14643947cfe2942"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-macosx_12_0_arm64.whl", hash = "sha256:f481f16baec5290e45aebdc2a5168ebc6d35189ae6fea7a58787613a25f6e875"}, - {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux_2_24_aarch64.whl", hash = "sha256:77159f5d5b5c14f7c34073862a6b7d34944075d9f93e681638f6d753606c6ce6"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:3fcc54cb0c8b811ff66082de1680b4b14cf8a81dce0d4fbf665c2265a81e07a1"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7f67a1ee819dc4562d444bbafb135832b0b909f81cc90f7aa00260968c9ca1b3"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:4ecbf9c3e19f9562c7fdd462e8d18dd902a47ca046a2e64dba80699f0b6c09b7"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:87ea5ff66d8064301a154b3933ae406b0863402a799b16e4a1d24d9fbbcbe0d3"}, @@ -3950,7 +4151,7 @@ files = [ {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-win_amd64.whl", hash = "sha256:3f215c5daf6a9d7bbed4a0a4f760f3113b10e82ff4c5c44bec20a68c8014f675"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1b617618914cb00bf5c34d4357c37aa15183fa229b24767259657746c9077615"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:a6a9ffd280b71ad062eae53ac1659ad86a17f59a0fdc7699fd9be40525153337"}, - {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux_2_24_aarch64.whl", hash = "sha256:305889baa4043a09e5b76f8e2a51d4ffba44259f6b4c72dec8ca56207d9c6fe1"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:665f58bfd29b167039f714c6998178d27ccd83984084c286110ef26b230f259f"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:700e4ebb569e59e16a976857c8798aee258dceac7c7d6b50cab63e080058df91"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:e2b4c44b60eadec492926a7270abb100ef9f72798e18743939bdbf037aab8c28"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e79e5db08739731b0ce4850bed599235d601701d5694c36570a99a0c5ca41a9d"}, @@ -3958,7 +4159,7 @@ files = [ {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-win_amd64.whl", hash = "sha256:56f4252222c067b4ce51ae12cbac231bce32aee1d33fbfc9d17e5b8d6966c312"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:03d1162b6d1df1caa3a4bd27aa51ce17c9afc2046c31b0ad60a0a96ec22f8001"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:bba64af9fa9cebe325a62fa398760f5c7206b215201b0ec825005f1b18b9bccf"}, - {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux_2_24_aarch64.whl", hash = "sha256:a1a45e0bb052edf6a1d3a93baef85319733a888363938e1fc9924cb00c8df24c"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:9eb5dee2772b0f704ca2e45b1713e4e5198c18f515b52743576d196348f374d3"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:da09ad1c359a728e112d60116f626cc9f29730ff3e0e7db72b9a2dbc2e4beed5"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:184565012b60405d93838167f425713180b949e9d8dd0bbc7b49f074407c5a8b"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a75879bacf2c987c003368cf14bed0ffe99e8e85acfa6c0bfffc21a090f16880"}, @@ -3969,50 +4170,65 @@ files = [ [[package]] name = "scikit-learn" -version = "1.3.2" +version = "1.4.0" description = "A set of python modules for machine learning and data mining" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "scikit-learn-1.3.2.tar.gz", hash = "sha256:a2f54c76accc15a34bfb9066e6c7a56c1e7235dda5762b990792330b52ccfb05"}, - {file = "scikit_learn-1.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e326c0eb5cf4d6ba40f93776a20e9a7a69524c4db0757e7ce24ba222471ee8a1"}, - {file = "scikit_learn-1.3.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:535805c2a01ccb40ca4ab7d081d771aea67e535153e35a1fd99418fcedd1648a"}, - {file = "scikit_learn-1.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1215e5e58e9880b554b01187b8c9390bf4dc4692eedeaf542d3273f4785e342c"}, - {file = "scikit_learn-1.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ee107923a623b9f517754ea2f69ea3b62fc898a3641766cb7deb2f2ce450161"}, - {file = "scikit_learn-1.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:35a22e8015048c628ad099da9df5ab3004cdbf81edc75b396fd0cff8699ac58c"}, - {file = "scikit_learn-1.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6fb6bc98f234fda43163ddbe36df8bcde1d13ee176c6dc9b92bb7d3fc842eb66"}, - {file = "scikit_learn-1.3.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:18424efee518a1cde7b0b53a422cde2f6625197de6af36da0b57ec502f126157"}, - {file = "scikit_learn-1.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3271552a5eb16f208a6f7f617b8cc6d1f137b52c8a1ef8edf547db0259b2c9fb"}, - {file = "scikit_learn-1.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc4144a5004a676d5022b798d9e573b05139e77f271253a4703eed295bde0433"}, - {file = "scikit_learn-1.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:67f37d708f042a9b8d59551cf94d30431e01374e00dc2645fa186059c6c5d78b"}, - {file = "scikit_learn-1.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:8db94cd8a2e038b37a80a04df8783e09caac77cbe052146432e67800e430c028"}, - {file = "scikit_learn-1.3.2-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:61a6efd384258789aa89415a410dcdb39a50e19d3d8410bd29be365bcdd512d5"}, - {file = "scikit_learn-1.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb06f8dce3f5ddc5dee1715a9b9f19f20d295bed8e3cd4fa51e1d050347de525"}, - {file = "scikit_learn-1.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5b2de18d86f630d68fe1f87af690d451388bb186480afc719e5f770590c2ef6c"}, - {file = "scikit_learn-1.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:0402638c9a7c219ee52c94cbebc8fcb5eb9fe9c773717965c1f4185588ad3107"}, - {file = "scikit_learn-1.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a19f90f95ba93c1a7f7924906d0576a84da7f3b2282ac3bfb7a08a32801add93"}, - {file = "scikit_learn-1.3.2-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:b8692e395a03a60cd927125eef3a8e3424d86dde9b2370d544f0ea35f78a8073"}, - {file = "scikit_learn-1.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15e1e94cc23d04d39da797ee34236ce2375ddea158b10bee3c343647d615581d"}, - {file = "scikit_learn-1.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:785a2213086b7b1abf037aeadbbd6d67159feb3e30263434139c98425e3dcfcf"}, - {file = "scikit_learn-1.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:64381066f8aa63c2710e6b56edc9f0894cc7bf59bd71b8ce5613a4559b6145e0"}, - {file = "scikit_learn-1.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6c43290337f7a4b969d207e620658372ba3c1ffb611f8bc2b6f031dc5c6d1d03"}, - {file = "scikit_learn-1.3.2-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:dc9002fc200bed597d5d34e90c752b74df516d592db162f756cc52836b38fe0e"}, - {file = "scikit_learn-1.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d08ada33e955c54355d909b9c06a4789a729977f165b8bae6f225ff0a60ec4a"}, - {file = "scikit_learn-1.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:763f0ae4b79b0ff9cca0bf3716bcc9915bdacff3cebea15ec79652d1cc4fa5c9"}, - {file = "scikit_learn-1.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:ed932ea780517b00dae7431e031faae6b49b20eb6950918eb83bd043237950e0"}, + {file = "scikit-learn-1.4.0.tar.gz", hash = "sha256:d4373c984eba20e393216edd51a3e3eede56cbe93d4247516d205643c3b93121"}, + {file = "scikit_learn-1.4.0-1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:fce93a7473e2f4ee4cc280210968288d6a7d7ad8dc6fa7bb7892145e407085f9"}, + {file = "scikit_learn-1.4.0-1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:d77df3d1e15fc37a9329999979fa7868ba8655dbab21fe97fc7ddabac9e08cc7"}, + {file = "scikit_learn-1.4.0-1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2404659fedec40eeafa310cd14d613e564d13dbf8f3c752d31c095195ec05de6"}, + {file = "scikit_learn-1.4.0-1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e98632da8f6410e6fb6bf66937712c949b4010600ccd3f22a5388a83e610cc3c"}, + {file = "scikit_learn-1.4.0-1-cp310-cp310-win_amd64.whl", hash = "sha256:11b3b140f70fbc9f6a08884631ae8dd60a4bb2d7d6d1de92738ea42b740d8992"}, + {file = "scikit_learn-1.4.0-1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a8341eabdc754d5ab91641a7763243845e96b6d68e03e472531e88a4f1b09f21"}, + {file = "scikit_learn-1.4.0-1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:d1f6bce875ac2bb6b52514f67c185c564ccd299a05b65b7bab091a4c13dde12d"}, + {file = "scikit_learn-1.4.0-1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c408b46b2fd61952d519ea1af2f8f0a7a703e1433923ab1704c4131520b2083b"}, + {file = "scikit_learn-1.4.0-1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2b465dd1dcd237b7b1dcd1a9048ccbf70a98c659474324fa708464c3a2533fad"}, + {file = "scikit_learn-1.4.0-1-cp311-cp311-win_amd64.whl", hash = "sha256:0db8e22c42f7980fe5eb22069b1f84c48966f3e0d23a01afde5999e3987a2501"}, + {file = "scikit_learn-1.4.0-1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e7eef6ea2ed289af40e88c0be9f7704ca8b5de18508a06897c3fe21e0905efdf"}, + {file = "scikit_learn-1.4.0-1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:349669b01435bc4dbf25c6410b0892073befdaec52637d1a1d1ff53865dc8db3"}, + {file = "scikit_learn-1.4.0-1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d439c584e58434d0350701bd33f6c10b309e851fccaf41c121aed55f6851d8cf"}, + {file = "scikit_learn-1.4.0-1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0e2427d9ef46477625ab9b55c1882844fe6fc500f418c3f8e650200182457bc"}, + {file = "scikit_learn-1.4.0-1-cp312-cp312-win_amd64.whl", hash = "sha256:d3d75343940e7bf9b85c830c93d34039fa015eeb341c5c0b4cd7a90dadfe00d4"}, + {file = "scikit_learn-1.4.0-1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:76986d22e884ab062b1beecdd92379656e9d3789ecc1f9870923c178de55f9fe"}, + {file = "scikit_learn-1.4.0-1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:e22446ad89f1cb7657f0d849dcdc345b48e2d10afa3daf2925fdb740f85b714c"}, + {file = "scikit_learn-1.4.0-1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:74812c9eabb265be69d738a8ea8d4884917a59637fcbf88a5f0e9020498bc6b3"}, + {file = "scikit_learn-1.4.0-1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aad2a63e0dd386b92da3270887a29b308af4d7c750d8c4995dfd9a4798691bcc"}, + {file = "scikit_learn-1.4.0-1-cp39-cp39-win_amd64.whl", hash = "sha256:53b9e29177897c37e2ff9d4ba6ca12fdb156e22523e463db05def303f5c72b5c"}, + {file = "scikit_learn-1.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cb8f044a8f5962613ce1feb4351d66f8d784bd072d36393582f351859b065f7d"}, + {file = "scikit_learn-1.4.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:a6372c90bbf302387792108379f1ec77719c1618d88496d0df30cb8e370b4661"}, + {file = "scikit_learn-1.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:785ce3c352bf697adfda357c3922c94517a9376002971bc5ea50896144bc8916"}, + {file = "scikit_learn-1.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0aba2a20d89936d6e72d95d05e3bf1db55bca5c5920926ad7b92c34f5e7d3bbe"}, + {file = "scikit_learn-1.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:2bac5d56b992f8f06816f2cd321eb86071c6f6d44bb4b1cb3d626525820d754b"}, + {file = "scikit_learn-1.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:27ae4b0f1b2c77107c096a7e05b33458354107b47775428d1f11b23e30a73e8a"}, + {file = "scikit_learn-1.4.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:5c5c62ffb52c3ffb755eb21fa74cc2cbf2c521bd53f5c04eaa10011dbecf5f80"}, + {file = "scikit_learn-1.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f0d2018ac6fa055dab65fe8a485967990d33c672d55bc254c56c35287b02fab"}, + {file = "scikit_learn-1.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91a8918c415c4b4bf1d60c38d32958849a9191c2428ab35d30b78354085c7c7a"}, + {file = "scikit_learn-1.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:80a21de63275f8bcd7877b3e781679d2ff1eddfed515a599f95b2502a3283d42"}, + {file = "scikit_learn-1.4.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:0f33bbafb310c26b81c4d41ecaebdbc1f63498a3f13461d50ed9a2e8f24d28e4"}, + {file = "scikit_learn-1.4.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:8b6ac1442ec714b4911e5aef8afd82c691b5c88b525ea58299d455acc4e8dcec"}, + {file = "scikit_learn-1.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05fc5915b716c6cc60a438c250108e9a9445b522975ed37e416d5ea4f9a63381"}, + {file = "scikit_learn-1.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:842b7d6989f3c574685e18da6f91223eb32301d0f93903dd399894250835a6f7"}, + {file = "scikit_learn-1.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:88bcb586fdff865372df1bc6be88bb7e6f9e0aa080dab9f54f5cac7eca8e2b6b"}, + {file = "scikit_learn-1.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f77674647dd31f56cb12ed13ed25b6ed43a056fffef051715022d2ebffd7a7d1"}, + {file = "scikit_learn-1.4.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:833999872e2920ce00f3a50839946bdac7539454e200eb6db54898a41f4bfd43"}, + {file = "scikit_learn-1.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:970ec697accaef10fb4f51763f3a7b1250f9f0553cf05514d0e94905322a0172"}, + {file = "scikit_learn-1.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:923d778f378ebacca2c672ab1740e5a413e437fb45ab45ab02578f8b689e5d43"}, + {file = "scikit_learn-1.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:1d041bc95006b545b59e458399e3175ab11ca7a03dc9a74a573ac891f5df1489"}, ] [package.dependencies] -joblib = ">=1.1.1" -numpy = ">=1.17.3,<2.0" -scipy = ">=1.5.0" +joblib = ">=1.2.0" +numpy = ">=1.19.5" +scipy = ">=1.6.0" threadpoolctl = ">=2.0.0" [package.extras] -benchmark = ["matplotlib (>=3.1.3)", "memory-profiler (>=0.57.0)", "pandas (>=1.0.5)"] -docs = ["Pillow (>=7.1.2)", "matplotlib (>=3.1.3)", "memory-profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.0.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.16.2)", "seaborn (>=0.9.0)", "sphinx (>=6.0.0)", "sphinx-copybutton (>=0.5.2)", "sphinx-gallery (>=0.10.1)", "sphinx-prompt (>=1.3.0)", "sphinxext-opengraph (>=0.4.2)"] -examples = ["matplotlib (>=3.1.3)", "pandas (>=1.0.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.16.2)", "seaborn (>=0.9.0)"] -tests = ["black (>=23.3.0)", "matplotlib (>=3.1.3)", "mypy (>=1.3)", "numpydoc (>=1.2.0)", "pandas (>=1.0.5)", "pooch (>=1.6.0)", "pyamg (>=4.0.0)", "pytest (>=7.1.2)", "pytest-cov (>=2.9.0)", "ruff (>=0.0.272)", "scikit-image (>=0.16.2)"] +benchmark = ["matplotlib (>=3.3.4)", "memory-profiler (>=0.57.0)", "pandas (>=1.1.5)"] +docs = ["Pillow (>=7.1.2)", "matplotlib (>=3.3.4)", "memory-profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.1.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.17.2)", "seaborn (>=0.9.0)", "sphinx (>=6.0.0)", "sphinx-copybutton (>=0.5.2)", "sphinx-gallery (>=0.15.0)", "sphinx-prompt (>=1.3.0)", "sphinxext-opengraph (>=0.4.2)"] +examples = ["matplotlib (>=3.3.4)", "pandas (>=1.1.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.17.2)", "seaborn (>=0.9.0)"] +tests = ["black (>=23.3.0)", "matplotlib (>=3.3.4)", "mypy (>=1.3)", "numpydoc (>=1.2.0)", "pandas (>=1.1.5)", "polars (>=0.19.12)", "pooch (>=1.6.0)", "pyamg (>=4.0.0)", "pyarrow (>=12.0.0)", "pytest (>=7.1.2)", "pytest-cov (>=2.9.0)", "ruff (>=0.0.272)", "scikit-image (>=0.17.2)"] [[package]] name = "scipy" @@ -4091,13 +4307,13 @@ scipy = "*" [[package]] name = "setuptools" -version = "69.0.2" +version = "69.0.3" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "setuptools-69.0.2-py3-none-any.whl", hash = "sha256:1e8fdff6797d3865f37397be788a4e3cba233608e9b509382a2777d25ebde7f2"}, - {file = "setuptools-69.0.2.tar.gz", hash = "sha256:735896e78a4742605974de002ac60562d286fa8051a7e2299445e8e8fbb01aa6"}, + {file = "setuptools-69.0.3-py3-none-any.whl", hash = "sha256:385eb4edd9c9d5c17540511303e39a147ce2fc04bc55289c322b9e5904fe2c05"}, + {file = "setuptools-69.0.3.tar.gz", hash = "sha256:be1af57fc409f93647f2e8e4573a142ed38724b8cdd389706a867bb4efcf1e78"}, ] [package.extras] @@ -4246,20 +4462,18 @@ markdown = ">=3.4" [[package]] name = "sphinxcontrib-applehelp" -version = "1.0.7" +version = "1.0.8" description = "sphinxcontrib-applehelp is a Sphinx extension which outputs Apple help books" optional = false python-versions = ">=3.9" files = [ - {file = "sphinxcontrib_applehelp-1.0.7-py3-none-any.whl", hash = "sha256:094c4d56209d1734e7d252f6e0b3ccc090bd52ee56807a5d9315b19c122ab15d"}, - {file = "sphinxcontrib_applehelp-1.0.7.tar.gz", hash = "sha256:39fdc8d762d33b01a7d8f026a3b7d71563ea3b72787d5f00ad8465bd9d6dfbfa"}, + {file = "sphinxcontrib_applehelp-1.0.8-py3-none-any.whl", hash = "sha256:cb61eb0ec1b61f349e5cc36b2028e9e7ca765be05e49641c97241274753067b4"}, + {file = "sphinxcontrib_applehelp-1.0.8.tar.gz", hash = "sha256:c40a4f96f3776c4393d933412053962fac2b84f4c99a7982ba42e09576a70619"}, ] -[package.dependencies] -Sphinx = ">=5" - [package.extras] lint = ["docutils-stubs", "flake8", "mypy"] +standalone = ["Sphinx (>=5)"] test = ["pytest"] [[package]] @@ -4282,38 +4496,34 @@ Sphinx = ">=2.1" [[package]] name = "sphinxcontrib-devhelp" -version = "1.0.5" +version = "1.0.6" description = "sphinxcontrib-devhelp is a sphinx extension which outputs Devhelp documents" optional = false python-versions = ">=3.9" files = [ - {file = "sphinxcontrib_devhelp-1.0.5-py3-none-any.whl", hash = "sha256:fe8009aed765188f08fcaadbb3ea0d90ce8ae2d76710b7e29ea7d047177dae2f"}, - {file = "sphinxcontrib_devhelp-1.0.5.tar.gz", hash = "sha256:63b41e0d38207ca40ebbeabcf4d8e51f76c03e78cd61abe118cf4435c73d4212"}, + {file = "sphinxcontrib_devhelp-1.0.6-py3-none-any.whl", hash = "sha256:6485d09629944511c893fa11355bda18b742b83a2b181f9a009f7e500595c90f"}, + {file = "sphinxcontrib_devhelp-1.0.6.tar.gz", hash = "sha256:9893fd3f90506bc4b97bdb977ceb8fbd823989f4316b28c3841ec128544372d3"}, ] -[package.dependencies] -Sphinx = ">=5" - [package.extras] lint = ["docutils-stubs", "flake8", "mypy"] +standalone = ["Sphinx (>=5)"] test = ["pytest"] [[package]] name = "sphinxcontrib-htmlhelp" -version = "2.0.4" +version = "2.0.5" description = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files" optional = false python-versions = ">=3.9" files = [ - {file = "sphinxcontrib_htmlhelp-2.0.4-py3-none-any.whl", hash = "sha256:8001661c077a73c29beaf4a79968d0726103c5605e27db92b9ebed8bab1359e9"}, - {file = "sphinxcontrib_htmlhelp-2.0.4.tar.gz", hash = "sha256:6c26a118a05b76000738429b724a0568dbde5b72391a688577da08f11891092a"}, + {file = "sphinxcontrib_htmlhelp-2.0.5-py3-none-any.whl", hash = "sha256:393f04f112b4d2f53d93448d4bce35842f62b307ccdc549ec1585e950bc35e04"}, + {file = "sphinxcontrib_htmlhelp-2.0.5.tar.gz", hash = "sha256:0dc87637d5de53dd5eec3a6a01753b1ccf99494bd756aafecd74b4fa9e729015"}, ] -[package.dependencies] -Sphinx = ">=5" - [package.extras] lint = ["docutils-stubs", "flake8", "mypy"] +standalone = ["Sphinx (>=5)"] test = ["html5lib", "pytest"] [[package]] @@ -4332,38 +4542,34 @@ test = ["flake8", "mypy", "pytest"] [[package]] name = "sphinxcontrib-qthelp" -version = "1.0.6" +version = "1.0.7" description = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp documents" optional = false python-versions = ">=3.9" files = [ - {file = "sphinxcontrib_qthelp-1.0.6-py3-none-any.whl", hash = "sha256:bf76886ee7470b934e363da7a954ea2825650013d367728588732c7350f49ea4"}, - {file = "sphinxcontrib_qthelp-1.0.6.tar.gz", hash = "sha256:62b9d1a186ab7f5ee3356d906f648cacb7a6bdb94d201ee7adf26db55092982d"}, + {file = "sphinxcontrib_qthelp-1.0.7-py3-none-any.whl", hash = "sha256:e2ae3b5c492d58fcbd73281fbd27e34b8393ec34a073c792642cd8e529288182"}, + {file = "sphinxcontrib_qthelp-1.0.7.tar.gz", hash = "sha256:053dedc38823a80a7209a80860b16b722e9e0209e32fea98c90e4e6624588ed6"}, ] -[package.dependencies] -Sphinx = ">=5" - [package.extras] lint = ["docutils-stubs", "flake8", "mypy"] +standalone = ["Sphinx (>=5)"] test = ["pytest"] [[package]] name = "sphinxcontrib-serializinghtml" -version = "1.1.9" +version = "1.1.10" description = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)" optional = false python-versions = ">=3.9" files = [ - {file = "sphinxcontrib_serializinghtml-1.1.9-py3-none-any.whl", hash = "sha256:9b36e503703ff04f20e9675771df105e58aa029cfcbc23b8ed716019b7416ae1"}, - {file = "sphinxcontrib_serializinghtml-1.1.9.tar.gz", hash = "sha256:0c64ff898339e1fac29abd2bf5f11078f3ec413cfe9c046d3120d7ca65530b54"}, + {file = "sphinxcontrib_serializinghtml-1.1.10-py3-none-any.whl", hash = "sha256:326369b8df80a7d2d8d7f99aa5ac577f51ea51556ed974e7716cfd4fca3f6cb7"}, + {file = "sphinxcontrib_serializinghtml-1.1.10.tar.gz", hash = "sha256:93f3f5dc458b91b192fe10c397e324f262cf163d79f3282c158e8436a2c4511f"}, ] -[package.dependencies] -Sphinx = ">=5" - [package.extras] lint = ["docutils-stubs", "flake8", "mypy"] +standalone = ["Sphinx (>=5)"] test = ["pytest"] [[package]] @@ -4526,29 +4732,32 @@ files = [ [[package]] name = "tensorflow-io-gcs-filesystem" -version = "0.35.0" +version = "0.36.0" description = "TensorFlow IO" optional = false python-versions = ">=3.7, <3.12" files = [ - {file = "tensorflow_io_gcs_filesystem-0.35.0-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:5521721b38105496d4b43a4ffb0af5b04cc4873d464f26fbceddf8d63815ce98"}, - {file = "tensorflow_io_gcs_filesystem-0.35.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd8f30908bf8b7b2a017d6b145720d105aff7f998422671b71729708ec7b2fe4"}, - {file = "tensorflow_io_gcs_filesystem-0.35.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac8f1de60fdf9c734aea967b98555e366ac8743f77bca15c49eff023f587076b"}, - {file = "tensorflow_io_gcs_filesystem-0.35.0-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:35b6eca7225c815d962254327195f191d88c3c9c2278a5ab23e0ac834acbadbb"}, - {file = "tensorflow_io_gcs_filesystem-0.35.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e997389bfe008210cbd97c0c738d64282a2f03ad4d0536013bb0a9efde0c283"}, - {file = "tensorflow_io_gcs_filesystem-0.35.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8fb3402fb1457482c386ea19371bc76383412ae9ea4396edb1e8adb4ba76f21"}, - {file = "tensorflow_io_gcs_filesystem-0.35.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb6bf8f5b40207ecb17e7fdc3b4fc824a8361267c14e9528c1688e16de135cb7"}, - {file = "tensorflow_io_gcs_filesystem-0.35.0-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:c4f786eebd98d401565374722f2e67f3878675b0d87489cbaa13c70ee6ac370a"}, - {file = "tensorflow_io_gcs_filesystem-0.35.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fce1466bdb91096b6d22e7df17358ba228bcb92db5cff83f2f9f1c68eb26788"}, - {file = "tensorflow_io_gcs_filesystem-0.35.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1856fe321fdb75f3386d92109c60db6ef097f610b450f9cc69d76444fb9980d1"}, + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:702c6df62b38095ff613c433546d9424d4f33902a5ab26b00fd26457e27a99fa"}, + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:e9b8aaca2789af356c42afda0f52380f82e5abb2f3c0b85087833fcfe03875d8"}, + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c477aed96864ceae77d7051c3b687f28813aba7320fc5dd552164fad6ec8d1a1"}, + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be1ff92559dfa23048b01179a1827081947583f5c6f9986ccac471df8a29322a"}, + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:72c3ca4b8c0d8dbdd970699d05a100107cf200317ad8e6a8373e2c37225cd552"}, + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:848e8e89a0f49258c7782189c938d8d1162d989da1a80c79f95c7af3ef6006c8"}, + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d72db1ab03edb65fa1e98d06e504ccbc64282d38ab3589afb6db66dc448d1c1"}, + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1bd4d946b5fa23220daa473a80e511a5fb27493d7e49d17dff0bb43bb0a31f32"}, + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa346fd1dd9f57848b73874007440504f060fadd689fa1cc29cc49817d0eeaf3"}, + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:0a4437824424a4423cf86162cb8b21b1bec24698194332748b50bb952e62ab9f"}, + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:31806bd7ac2db789161bc720747de22947063265561a4c17be54698fd9780b03"}, + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc0e57976c1aa035af6281f0330cfb8dd50eee2f63412ecc84d60ff5075d29b7"}, + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e97ff5c280eb10f699098ae21057be2b146d39e8a906cd5db91f2ea6c34e47d0"}, ] [package.extras] -tensorflow = ["tensorflow (>=2.14.0,<2.15.0)"] -tensorflow-aarch64 = ["tensorflow-aarch64 (>=2.14.0,<2.15.0)"] -tensorflow-cpu = ["tensorflow-cpu (>=2.14.0,<2.15.0)"] -tensorflow-gpu = ["tensorflow-gpu (>=2.14.0,<2.15.0)"] -tensorflow-rocm = ["tensorflow-rocm (>=2.14.0,<2.15.0)"] +tensorflow = ["tensorflow (>=2.15.0,<2.16.0)"] +tensorflow-aarch64 = ["tensorflow-aarch64 (>=2.15.0,<2.16.0)"] +tensorflow-cpu = ["tensorflow-cpu (>=2.15.0,<2.16.0)"] +tensorflow-gpu = ["tensorflow-gpu (>=2.15.0,<2.16.0)"] +tensorflow-rocm = ["tensorflow-rocm (>=2.15.0,<2.16.0)"] [[package]] name = "termcolor" @@ -4626,6 +4835,64 @@ files = [ {file = "tomlkit-0.12.3.tar.gz", hash = "sha256:75baf5012d06501f07bee5bf8e801b9f343e7aac5a92581f20f80ce632e6b5a4"}, ] +[[package]] +name = "torch" +version = "2.2.0" +description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "torch-2.2.0-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:d366158d6503a3447e67f8c0ad1328d54e6c181d88572d688a625fac61b13a97"}, + {file = "torch-2.2.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:707f2f80402981e9f90d0038d7d481678586251e6642a7a6ef67fc93511cb446"}, + {file = "torch-2.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:15c8f0a105c66b28496092fca1520346082e734095f8eaf47b5786bac24b8a31"}, + {file = "torch-2.2.0-cp310-none-macosx_10_9_x86_64.whl", hash = "sha256:0ca4df4b728515ad009b79f5107b00bcb2c63dc202d991412b9eb3b6a4f24349"}, + {file = "torch-2.2.0-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:3d3eea2d5969b9a1c9401429ca79efc668120314d443d3463edc3289d7f003c7"}, + {file = "torch-2.2.0-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:0d1c580e379c0d48f0f0a08ea28d8e373295aa254de4f9ad0631f9ed8bc04c24"}, + {file = "torch-2.2.0-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:9328e3c1ce628a281d2707526b4d1080eae7c4afab4f81cea75bde1f9441dc78"}, + {file = "torch-2.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:03c8e660907ac1b8ee07f6d929c4e15cd95be2fb764368799cca02c725a212b8"}, + {file = "torch-2.2.0-cp311-none-macosx_10_9_x86_64.whl", hash = "sha256:da0cefe7f84ece3e3b56c11c773b59d1cb2c0fd83ddf6b5f7f1fd1a987b15c3e"}, + {file = "torch-2.2.0-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:f81d23227034221a4a4ff8ef24cc6cec7901edd98d9e64e32822778ff01be85e"}, + {file = "torch-2.2.0-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:dcbfb2192ac41ca93c756ebe9e2af29df0a4c14ee0e7a0dd78f82c67a63d91d4"}, + {file = "torch-2.2.0-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:9eeb42971619e24392c9088b5b6d387d896e267889d41d267b1fec334f5227c5"}, + {file = "torch-2.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:c718b2ca69a6cac28baa36d86d8c0ec708b102cebd1ceb1b6488e404cd9be1d1"}, + {file = "torch-2.2.0-cp312-none-macosx_10_9_x86_64.whl", hash = "sha256:f11d18fceb4f9ecb1ac680dde7c463c120ed29056225d75469c19637e9f98d12"}, + {file = "torch-2.2.0-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:ee1da852bfd4a7e674135a446d6074c2da7194c1b08549e31eae0b3138c6b4d2"}, + {file = "torch-2.2.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:0d819399819d0862268ac531cf12a501c253007df4f9e6709ede8a0148f1a7b8"}, + {file = "torch-2.2.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:08f53ccc38c49d839bc703ea1b20769cc8a429e0c4b20b56921a9f64949bf325"}, + {file = "torch-2.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:93bffe3779965a71dab25fc29787538c37c5d54298fd2f2369e372b6fb137d41"}, + {file = "torch-2.2.0-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:c17ec323da778efe8dad49d8fb534381479ca37af1bfc58efdbb8607a9d263a3"}, + {file = "torch-2.2.0-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:c02685118008834e878f676f81eab3a952b7936fa31f474ef8a5ff4b5c78b36d"}, + {file = "torch-2.2.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:d9f39d6f53cec240a0e3baa82cb697593340f9d4554cee6d3d6ca07925c2fac0"}, + {file = "torch-2.2.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:51770c065206250dc1222ea7c0eff3f88ab317d3e931cca2aee461b85fbc2472"}, + {file = "torch-2.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:008e4c6ad703de55af760c73bf937ecdd61a109f9b08f2bbb9c17e7c7017f194"}, + {file = "torch-2.2.0-cp39-none-macosx_10_9_x86_64.whl", hash = "sha256:de8680472dd14e316f42ceef2a18a301461a9058cd6e99a1f1b20f78f11412f1"}, + {file = "torch-2.2.0-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:99e1dcecb488e3fd25bcaac56e48cdb3539842904bdc8588b0b255fde03a254c"}, +] + +[package.dependencies] +filelock = "*" +fsspec = "*" +jinja2 = "*" +networkx = "*" +nvidia-cublas-cu12 = {version = "12.1.3.1", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-cupti-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-nvrtc-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-runtime-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cudnn-cu12 = {version = "8.9.2.26", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cufft-cu12 = {version = "11.0.2.54", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-curand-cu12 = {version = "10.3.2.106", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cusolver-cu12 = {version = "11.4.5.107", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cusparse-cu12 = {version = "12.1.0.106", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-nccl-cu12 = {version = "2.19.3", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-nvtx-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +sympy = "*" +triton = {version = "2.2.0", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +typing-extensions = ">=4.8.0" + +[package.extras] +opt-einsum = ["opt-einsum (>=3.3)"] +optree = ["optree (>=0.9.1)"] + [[package]] name = "tornado" version = "6.4" @@ -4668,39 +4935,62 @@ telegram = ["requests"] [[package]] name = "traitlets" -version = "5.14.0" +version = "5.14.1" description = "Traitlets Python configuration system" optional = false python-versions = ">=3.8" files = [ - {file = "traitlets-5.14.0-py3-none-any.whl", hash = "sha256:f14949d23829023013c47df20b4a76ccd1a85effb786dc060f34de7948361b33"}, - {file = "traitlets-5.14.0.tar.gz", hash = "sha256:fcdaa8ac49c04dfa0ed3ee3384ef6dfdb5d6f3741502be247279407679296772"}, + {file = "traitlets-5.14.1-py3-none-any.whl", hash = "sha256:2e5a030e6eff91737c643231bfcf04a65b0132078dad75e4936700b213652e74"}, + {file = "traitlets-5.14.1.tar.gz", hash = "sha256:8585105b371a04b8316a43d5ce29c098575c2e477850b62b848b964f1444527e"}, ] [package.extras] docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0,<7.5)", "pytest-mock", "pytest-mypy-testing"] +[[package]] +name = "triton" +version = "2.2.0" +description = "A language and compiler for custom Deep Learning operations" +optional = false +python-versions = "*" +files = [ + {file = "triton-2.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2294514340cfe4e8f4f9e5c66c702744c4a117d25e618bd08469d0bfed1e2e5"}, + {file = "triton-2.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da58a152bddb62cafa9a857dd2bc1f886dbf9f9c90a2b5da82157cd2b34392b0"}, + {file = "triton-2.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af58716e721460a61886668b205963dc4d1e4ac20508cc3f623aef0d70283d5"}, + {file = "triton-2.2.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8fe46d3ab94a8103e291bd44c741cc294b91d1d81c1a2888254cbf7ff846dab"}, + {file = "triton-2.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8ce26093e539d727e7cf6f6f0d932b1ab0574dc02567e684377630d86723ace"}, + {file = "triton-2.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:227cc6f357c5efcb357f3867ac2a8e7ecea2298cd4606a8ba1e931d1d5a947df"}, +] + +[package.dependencies] +filelock = "*" + +[package.extras] +build = ["cmake (>=3.20)", "lit"] +tests = ["autopep8", "flake8", "isort", "numpy", "pytest", "scipy (>=1.7.1)", "torch"] +tutorials = ["matplotlib", "pandas", "tabulate", "torch"] + [[package]] name = "types-deprecated" -version = "1.2.9.3" +version = "1.2.9.20240106" description = "Typing stubs for Deprecated" optional = false -python-versions = "*" +python-versions = ">=3.8" files = [ - {file = "types-Deprecated-1.2.9.3.tar.gz", hash = "sha256:ef87327adf3e3c4a4c7d8e06e58f6476710d3466ecfb53c49efb080804a70ef3"}, - {file = "types_Deprecated-1.2.9.3-py3-none-any.whl", hash = "sha256:24da9210763e5e1b3d0d4f6f8bba9ad3bb6af3fe7f6815fc37e3ede4681704f5"}, + {file = "types-Deprecated-1.2.9.20240106.tar.gz", hash = "sha256:afeb819e9a03d0a5795f18c88fe6207c48ed13c639e93281bd9d9b7bb6d34310"}, + {file = "types_Deprecated-1.2.9.20240106-py3-none-any.whl", hash = "sha256:9dcb258493b5be407574ee21e50ddac9e429072d39b576126bf1ac00764fb9a8"}, ] [[package]] name = "types-python-dateutil" -version = "2.8.19.14" +version = "2.8.19.20240106" description = "Typing stubs for python-dateutil" optional = false -python-versions = "*" +python-versions = ">=3.8" files = [ - {file = "types-python-dateutil-2.8.19.14.tar.gz", hash = "sha256:1f4f10ac98bb8b16ade9dbee3518d9ace017821d94b057a425b069f834737f4b"}, - {file = "types_python_dateutil-2.8.19.14-py3-none-any.whl", hash = "sha256:f977b8de27787639986b4e28963263fd0e5158942b3ecef91b9335c130cb1ce9"}, + {file = "types-python-dateutil-2.8.19.20240106.tar.gz", hash = "sha256:1f8db221c3b98e6ca02ea83a58371b22c374f42ae5bbdf186db9c9a76581459f"}, + {file = "types_python_dateutil-2.8.19.20240106-py3-none-any.whl", hash = "sha256:efbbdc54590d0f16152fa103c9879c7d4a00e82078f6e2cf01769042165acaa2"}, ] [[package]] @@ -4727,40 +5017,41 @@ files = [ [[package]] name = "tzdata" -version = "2023.3" +version = "2023.4" description = "Provider of IANA time zone data" optional = false python-versions = ">=2" files = [ - {file = "tzdata-2023.3-py2.py3-none-any.whl", hash = "sha256:7e65763eef3120314099b6939b5546db7adce1e7d6f2e179e3df563c70511eda"}, - {file = "tzdata-2023.3.tar.gz", hash = "sha256:11ef1e08e54acb0d4f95bdb1be05da659673de4acbd21bf9c69e94cc5e907a3a"}, + {file = "tzdata-2023.4-py2.py3-none-any.whl", hash = "sha256:aa3ace4329eeacda5b7beb7ea08ece826c28d761cda36e747cfbf97996d39bf3"}, + {file = "tzdata-2023.4.tar.gz", hash = "sha256:dd54c94f294765522c77399649b4fefd95522479a664a0cec87f41bebc6148c9"}, ] [[package]] name = "urllib3" -version = "2.1.0" +version = "2.2.0" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.8" files = [ - {file = "urllib3-2.1.0-py3-none-any.whl", hash = "sha256:55901e917a5896a349ff771be919f8bd99aff50b79fe58fec595eb37bbc56bb3"}, - {file = "urllib3-2.1.0.tar.gz", hash = "sha256:df7aa8afb0148fa78488e7899b2c59b5f4ffcfa82e6c54ccb9dd37c1d7b52d54"}, + {file = "urllib3-2.2.0-py3-none-any.whl", hash = "sha256:ce3711610ddce217e6d113a2732fafad960a03fd0318c91faa79481e35c11224"}, + {file = "urllib3-2.2.0.tar.gz", hash = "sha256:051d961ad0c62a94e50ecf1af379c3aba230c66c710493493560c0c223c49f20"}, ] [package.extras] brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] [[package]] name = "wcwidth" -version = "0.2.12" +version = "0.2.13" description = "Measures the displayed width of unicode strings in a terminal" optional = false python-versions = "*" files = [ - {file = "wcwidth-0.2.12-py2.py3-none-any.whl", hash = "sha256:f26ec43d96c8cbfed76a5075dac87680124fa84e0855195a6184da9c187f133c"}, - {file = "wcwidth-0.2.12.tar.gz", hash = "sha256:f01c104efdf57971bcb756f054dd58ddec5204dd15fa31d6503ea57947d97c02"}, + {file = "wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859"}, + {file = "wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5"}, ] [[package]] @@ -4934,4 +5225,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = ">=3.9,<3.12" -content-hash = "fb9a22edabc2aa704e5ba3bcbf60123f4b6c5e929a248b717b962f78c64c49f1" +content-hash = "8912aacfe92b963c9753b1e47e7fa0b281d1b6e77bb59592f70e251c0ab61a2d" diff --git a/pyproject.toml b/pyproject.toml index bb2b539695..61eb632e3c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -64,6 +64,7 @@ pylint = "^3.0.3" matplotlib = "^3.7.0" qibojit = { git = "https://github.com/qiboteam/qibojit.git" } tensorflow = { version = "^2.14.1", markers = "sys_platform == 'linux'" } +torch = "^2.1.1" [tool.poe.tasks] test = "pytest" From e735a80a7230f303259f12ad41a44e8301dc5d36 Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Thu, 8 Feb 2024 19:16:25 +0400 Subject: [PATCH 004/127] disable pylint error --- src/qibo/backends/pytorch.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index 27244c6421..aabd603ade 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -6,7 +6,7 @@ from qibo import __version__ from qibo.backends.npmatrices import NumpyMatrices from qibo.backends.numpy import NumpyBackend -from qibo.config import TF_LOG_LEVEL, log, raise_error +from qibo.config import raise_error class TorchMatrices(NumpyMatrices): @@ -256,20 +256,22 @@ def calculate_norm_density_matrix(self, state, order="nuc"): return torch.norm(state, p=order) def calculate_eigenvalues(self, matrix): - return torch.linalg.eigvalsh(matrix) + return torch.linalg.eigvalsh(matrix) # pylint: disable=not-callable def calculate_eigenvectors(self, matrix): - return torch.linalg.eigh(matrix) + return torch.linalg.eigh(matrix) # pylint: disable=not-callable def calculate_matrix_exp(self, a, matrix, eigenvectors=None, eigenvalues=None): if eigenvectors is None or self.issparse(matrix): - return torch.linalg.matrix_exp(-1j * a * matrix) + return torch.linalg.matrix_exp( + -1j * a * matrix + ) # pylint: disable=not-callable else: return super().calculate_matrix_exp(a, matrix, eigenvectors, eigenvalues) def calculate_hamiltonian_matrix_product(self, matrix1, matrix2): if self.issparse(matrix1) or self.issparse(matrix2): - return torch.sparse.mm(matrix1, matrix2) + return torch.sparse.mm(matrix1, matrix2) # pylint: disable=not-callable return super().calculate_hamiltonian_matrix_product(matrix1, matrix2) def calculate_hamiltonian_state_product(self, matrix, state): From c70bfb5b83b06adaf4c223a0c595d3557eddb062 Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Thu, 8 Feb 2024 19:40:13 +0400 Subject: [PATCH 005/127] disable pylint --- src/qibo/backends/pytorch.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index aabd603ade..e70417201e 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -263,9 +263,9 @@ def calculate_eigenvectors(self, matrix): def calculate_matrix_exp(self, a, matrix, eigenvectors=None, eigenvalues=None): if eigenvectors is None or self.issparse(matrix): - return torch.linalg.matrix_exp( + return torch.linalg.matrix_exp( # pylint: disable=not-callable -1j * a * matrix - ) # pylint: disable=not-callable + ) else: return super().calculate_matrix_exp(a, matrix, eigenvectors, eigenvalues) From 59e6fec325bc61e62c7817fd3cdf50c3786e556b Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Fri, 9 Feb 2024 08:35:36 +0400 Subject: [PATCH 006/127] update `conftest` --- tests/conftest.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/tests/conftest.py b/tests/conftest.py index 618423bbb6..c6fae6298c 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -11,7 +11,14 @@ from qibo.backends import construct_backend # backends to be tested -BACKENDS = ["numpy", "tensorflow", "qibojit-numba", "qibojit-cupy", "qibojit-cuquantum"] +BACKENDS = [ + "numpy", + "pytorch", + "tensorflow", + "qibojit-numba", + "qibojit-cupy", + "qibojit-cuquantum", +] # multigpu configurations to be tested (only with qibojit-cupy) ACCELERATORS = [ {"/GPU:0": 1, "/GPU:1": 1}, From 15f9c1653d3d7dad77859dc9ddefd57863542f79 Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Fri, 9 Feb 2024 12:17:47 +0400 Subject: [PATCH 007/127] renato suggested improvement and torch.dtype --- src/qibo/backends/pytorch.py | 167 +++++++++++------------------------ 1 file changed, 50 insertions(+), 117 deletions(-) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index e70417201e..1b8d876386 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -8,153 +8,85 @@ from qibo.backends.numpy import NumpyBackend from qibo.config import raise_error +torch_dtype_dict = { + "int": torch.int32, + "float": torch.float32, + "complex": torch.complex64, + "int32": torch.int32, + "int64": torch.int64, + "float32": torch.float32, + "float64": torch.float64, + "complex64": torch.complex64, + "complex128": torch.complex128, +} + class TorchMatrices(NumpyMatrices): # Redefine parametrized gate matrices for backpropagation to work def __init__(self, dtype): - self.np = np super().__init__(dtype) + self.np = np + self.torch_dtype = torch_dtype_dict[dtype] def RX(self, theta): - cos = self.np.cos(theta / 2.0) + 0j - isin = -1j * self.np.sin(theta / 2.0) - return torch.tensor([[cos, isin], [isin, cos]], dtype=self.dtype) + matrix = getattr(NumpyMatrices(self.dtype), "RX")(theta) + return torch.tensor(matrix, dtype=self.torch_dtype) def RY(self, theta): - cos = self.np.cos(theta / 2.0) + 0j - sin = self.np.sin(theta / 2.0) + 0j - return torch.tensor([[cos, -sin], [sin, cos]], dtype=self.dtype) + matrix = getattr(NumpyMatrices(self.dtype), "RY")(theta) + return torch.tensor(matrix, dtype=self.torch_dtype) def RZ(self, theta): - phase = self.np.exp(0.5j * theta) - return torch.tensor([[self.np.conj(phase), 0], [0, phase]], dtype=self.dtype) + matrix = getattr(NumpyMatrices(self.dtype), "RZ")(theta) + return torch.tensor(matrix, dtype=self.torch_dtype) def U1(self, theta): - phase = self.np.exp(1j * theta) - return torch.tensor([[1, 0], [0, phase]], dtype=self.dtype) + matrix = getattr(NumpyMatrices(self.dtype), "U1")(theta) + return torch.tensor(matrix, dtype=self.torch_dtype) def U2(self, phi, lam): - eplus = self.np.exp(1j * (phi + lam) / 2.0) - eminus = self.np.exp(1j * (phi - lam) / 2.0) - return torch.tensor( - [[self.np.conj(eplus), -self.np.conj(eminus)], [eminus, eplus]], - dtype=self.dtype, - ) / self.np.sqrt(2) + matrix = getattr(NumpyMatrices(self.dtype), "U2")(phi, lam) + return torch.tensor(matrix, dtype=self.torch_dtype) def U3(self, theta, phi, lam): - cost = self.np.cos(theta / 2) - sint = self.np.sin(theta / 2) - eplus = self.np.exp(1j * (phi + lam) / 2.0) - eminus = self.np.exp(1j * (phi - lam) / 2.0) - return torch.tensor( - [ - [self.np.conj(eplus) * cost, -self.np.conj(eminus) * sint], - [eminus * sint, eplus * cost], - ], - dtype=self.dtype, - ) + matrix = getattr(NumpyMatrices(self.dtype), "U3")(theta, phi, lam) + return torch.tensor(matrix, dtype=self.torch_dtype) def CRX(self, theta): - r = self.RX(theta) - return torch.tensor( - [ - [1, 0, 0, 0], - [0, 1, 0, 0], - [0, 0, r[0, 0], r[0, 1]], - [0, 0, r[1, 0], r[1, 1]], - ], - dtype=self.dtype, - ) + matrix = getattr(NumpyMatrices(self.dtype), "CRX")(theta) + return torch.tensor(matrix, dtype=self.torch_dtype) def CRY(self, theta): - r = self.RY(theta) - return torch.tensor( - [ - [1, 0, 0, 0], - [0, 1, 0, 0], - [0, 0, r[0, 0], r[0, 1]], - [0, 0, r[1, 0], r[1, 1]], - ], - dtype=self.dtype, - ) + matrix = getattr(NumpyMatrices(self.dtype), "CRY")(theta) + return torch.tensor(matrix, dtype=self.torch_dtype) def CRZ(self, theta): - r = self.RZ(theta) - return torch.tensor( - [ - [1, 0, 0, 0], - [0, 1, 0, 0], - [0, 0, r[0, 0], r[0, 1]], - [0, 0, r[1, 0], r[1, 1]], - ], - dtype=self.dtype, - ) + matrix = getattr(NumpyMatrices(self.dtype), "CRZ")(theta) + return torch.tensor(matrix, dtype=self.torch_dtype) def CU1(self, theta): - r = self.U1(theta) - return torch.tensor( - [ - [1, 0, 0, 0], - [0, 1, 0, 0], - [0, 0, r[0, 0], r[0, 1]], - [0, 0, r[1, 0], r[1, 1]], - ], - dtype=self.dtype, - ) + matrix = getattr(NumpyMatrices(self.dtype), "CU1")(theta) + return torch.tensor(matrix, dtype=self.torch_dtype) def CU2(self, phi, lam): - r = self.U2(phi, lam) - return torch.tensor( - [ - [1, 0, 0, 0], - [0, 1, 0, 0], - [0, 0, r[0, 0], r[0, 1]], - [0, 0, r[1, 0], r[1, 1]], - ], - dtype=self.dtype, - ) + matrix = getattr(NumpyMatrices(self.dtype), "CU2")(phi, lam) + return torch.tensor(matrix, dtype=self.torch_dtype) def CU3(self, theta, phi, lam): - r = self.U3(theta, phi, lam) - return torch.tensor( - [ - [1, 0, 0, 0], - [0, 1, 0, 0], - [0, 0, r[0, 0], r[0, 1]], - [0, 0, r[1, 0], r[1, 1]], - ], - dtype=self.dtype, - ) + matrix = getattr(NumpyMatrices(self.dtype), "CU3")(theta, phi, lam) + return torch.tensor(matrix, dtype=self.torch_dtype) def fSim(self, theta, phi): - cost = self.np.cos(theta) + 0j - isint = -1j * self.np.sin(theta) - phase = self.np.exp(-1j * phi) - return torch.tensor( - [ - [1, 0, 0, 0], - [0, cost, isint, 0], - [0, isint, cost, 0], - [0, 0, 0, phase], - ], - dtype=self.dtype, - ) + matrix = getattr(NumpyMatrices(self.dtype), "fSim")(theta, phi) + return torch.tensor(matrix, dtype=self.torch_dtype) def GeneralizedfSim(self, u, phi): - phase = self.np.exp(-1j * phi) - return torch.tensor( - [ - [1, 0, 0, 0], - [0, u[0, 0], u[0, 1], 0], - [0, u[1, 0], u[1, 1], 0], - [0, 0, 0, phase], - ], - dtype=self.dtype, - ) + matrix = getattr(NumpyMatrices(self.dtype), "GeneralizedfSim")(u, phi) + return torch.tensor(matrix, dtype=self.torch_dtype) def Unitary(self, u): - return torch.tensor(u, dtype=self.dtype) + return torch.tensor(u, dtype=self.torch_dtype) class PyTorchBackend(NumpyBackend): @@ -172,6 +104,7 @@ def __init__(self): self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") self.nthreads = 0 self.tensor_types = (np.ndarray, torch.Tensor) + self.torch_dtype = torch_dtype_dict[self.dtype] def set_device(self, device): # pragma: no cover self.device = device @@ -179,7 +112,7 @@ def set_device(self, device): # pragma: no cover def cast(self, x, dtype=None, copy=False): if dtype is None: dtype = self.dtype - x = torch.tensor(x, dtype=dtype) + x = torch.tensor(x, dtype=self.torch_dtype) if copy: return x.clone() return x @@ -194,26 +127,26 @@ def compile(self, func): return torch.jit.script(func) def zero_state(self, nqubits): - state = torch.zeros(2**nqubits, dtype=self.dtype) + state = torch.zeros(2**nqubits, dtype=self.torch_dtype) state[0] = 1 return state def zero_density_matrix(self, nqubits): - state = torch.zeros(2 * (2**nqubits,), dtype=self.dtype) + state = torch.zeros(2 * (2**nqubits,), dtype=self.torch_dtype) state[0, 0] = 1 return state def matrix(self, gate): npmatrix = super().matrix(gate) - return torch.tensor(npmatrix, dtype=self.dtype) + return torch.tensor(npmatrix, dtype=self.torch_dtype) def matrix_parametrized(self, gate): npmatrix = super().matrix_parametrized(gate) - return torch.tensor(npmatrix, dtype=self.dtype) + return torch.tensor(npmatrix, dtype=self.torch_dtype) def matrix_fused(self, gate): npmatrix = super().matrix_fused(gate) - return torch.tensor(npmatrix, dtype=self.dtype) + return torch.tensor(npmatrix, dtype=self.torch_dtype) def execute_circuit(self, circuit, initial_state=None, nshots=1000): if initial_state is not None: From 23106ae8fd9c64a4ca016f6bd174791cb544b406 Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Fri, 9 Feb 2024 12:41:48 +0400 Subject: [PATCH 008/127] error in to_numpy method --- src/qibo/backends/pytorch.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index 1b8d876386..138b42b51d 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -26,7 +26,6 @@ class TorchMatrices(NumpyMatrices): def __init__(self, dtype): super().__init__(dtype) - self.np = np self.torch_dtype = torch_dtype_dict[dtype] def RX(self, theta): @@ -92,8 +91,8 @@ def Unitary(self, u): class PyTorchBackend(NumpyBackend): def __init__(self): super().__init__() - self.name = "pytorch" + self.name = "pytorch" self.versions = { "qibo": __version__, "numpy": np.__version__, @@ -103,7 +102,6 @@ def __init__(self): self.matrices = TorchMatrices(self.dtype) self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") self.nthreads = 0 - self.tensor_types = (np.ndarray, torch.Tensor) self.torch_dtype = torch_dtype_dict[self.dtype] def set_device(self, device): # pragma: no cover @@ -121,7 +119,15 @@ def issparse(self, x): return x.is_sparse def to_numpy(self, x): - return x.detach().cpu().numpy() + if type(x) is torch.Tensor: + return x.detach().cpu().numpy() + elif type(x) is np.ndarray: + return x + else: + raise_error( + ValueError, + "Input must be a torch.Tensor or np.ndarray, not {}.".format(type(x)), + ) def compile(self, func): return torch.jit.script(func) From 4676d8d18e491b9e9a3c08d92d6dd613003dd0d0 Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Fri, 9 Feb 2024 13:36:05 +0400 Subject: [PATCH 009/127] improve cast function to accept also a list of tensors --- src/qibo/backends/pytorch.py | 31 +++++++++++++++++++++++++++---- 1 file changed, 27 insertions(+), 4 deletions(-) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index 138b42b51d..f0d0018c1c 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -1,4 +1,4 @@ -import collections +from typing import Union import numpy as np import torch @@ -107,10 +107,33 @@ def __init__(self): def set_device(self, device): # pragma: no cover self.device = device - def cast(self, x, dtype=None, copy=False): + def cast( + self, + x: Union[torch.Tensor, list[torch.Tensor], np.ndarray, list[np.ndarray]], + dtype=None, + copy=False, + ): + """Casts input as a Torch tensor of the specified dtype. + This method supports casting of single tensors or lists of tensors as for the Tensoflow backend. + + Args: + x (Union[torch.Tensor, list[torch.Tensor], np.ndarray, list[np.ndarray], int, float, complex]): Input to be casted. + dtype (Optional[str]): Target data type. If None, the default dtype of the backend is used. + copy (bool): If True, the input tensor is copied before casting. + """ if dtype is None: - dtype = self.dtype - x = torch.tensor(x, dtype=self.torch_dtype) + dtype = self.torch_dtype + else: + dtype = torch_dtype_dict[str(dtype)] + if isinstance(x, torch.Tensor): + x = x.to(dtype) + elif isinstance(x, list): + if all(isinstance(i, torch.Tensor) for i in x): + x = [i.to(dtype) for i in x] + else: + x = [torch.tensor(i, dtype=dtype) for i in x] + else: + x = torch.tensor(x, dtype=dtype) if copy: return x.clone() return x From 2d2e859d65e6451de380b142ea750a4a931be706 Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Fri, 9 Feb 2024 14:28:44 +0400 Subject: [PATCH 010/127] partial error correction --- src/qibo/backends/pytorch.py | 26 +++++++++++++------------- tests/test_gates_gates.py | 20 ++++++++++---------- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index f0d0018c1c..33425d056b 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -1,3 +1,4 @@ +import collections from typing import Union import numpy as np @@ -103,6 +104,7 @@ def __init__(self): self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") self.nthreads = 0 self.torch_dtype = torch_dtype_dict[self.dtype] + self.tensor_types = (torch.Tensor, np.ndarray) def set_device(self, device): # pragma: no cover self.device = device @@ -110,7 +112,7 @@ def set_device(self, device): # pragma: no cover def cast( self, x: Union[torch.Tensor, list[torch.Tensor], np.ndarray, list[np.ndarray]], - dtype=None, + dtype: Union[str, torch.dtype, np.dtype, type] = None, copy=False, ): """Casts input as a Torch tensor of the specified dtype. @@ -118,11 +120,15 @@ def cast( Args: x (Union[torch.Tensor, list[torch.Tensor], np.ndarray, list[np.ndarray], int, float, complex]): Input to be casted. - dtype (Optional[str]): Target data type. If None, the default dtype of the backend is used. + dtype (Union[str, torch.dtype, np.dtype, type]): Target data type. If None, the default dtype of the backend is used. copy (bool): If True, the input tensor is copied before casting. """ if dtype is None: dtype = self.torch_dtype + elif isinstance(dtype, torch.dtype): + dtype = dtype + elif isinstance(dtype, type): + dtype = torch_dtype_dict[dtype.__name__] else: dtype = torch_dtype_dict[str(dtype)] if isinstance(x, torch.Tensor): @@ -144,13 +150,7 @@ def issparse(self, x): def to_numpy(self, x): if type(x) is torch.Tensor: return x.detach().cpu().numpy() - elif type(x) is np.ndarray: - return x - else: - raise_error( - ValueError, - "Input must be a torch.Tensor or np.ndarray, not {}.".format(type(x)), - ) + return x def compile(self, func): return torch.jit.script(func) @@ -179,12 +179,12 @@ def matrix_fused(self, gate): def execute_circuit(self, circuit, initial_state=None, nshots=1000): if initial_state is not None: - initial_state = initial_state.to(self.device) + initial_state = self.cast(initial_state) return super().execute_circuit(circuit, initial_state, nshots) def execute_circuit_repeated(self, circuit, nshots, initial_state=None): if initial_state is not None: - initial_state = initial_state.to(self.device) + initial_state = self.cast(initial_state) return super().execute_circuit_repeated(circuit, nshots, initial_state) def sample_shots(self, probabilities, nshots): @@ -217,10 +217,10 @@ def calculate_norm_density_matrix(self, state, order="nuc"): return np.trace(state) return torch.norm(state, p=order) - def calculate_eigenvalues(self, matrix): + def calculate_eigenvalues(self, matrix, k=6): return torch.linalg.eigvalsh(matrix) # pylint: disable=not-callable - def calculate_eigenvectors(self, matrix): + def calculate_eigenvectors(self, matrix, k=6): return torch.linalg.eigh(matrix) # pylint: disable=not-callable def calculate_matrix_exp(self, a, matrix, eigenvectors=None, eigenvalues=None): diff --git a/tests/test_gates_gates.py b/tests/test_gates_gates.py index caabc73e57..d4f40a3232 100644 --- a/tests/test_gates_gates.py +++ b/tests/test_gates_gates.py @@ -427,10 +427,10 @@ def test_u3(backend, seed_state, seed_observable): # testing random expectation value due to global phase difference observable = random_hermitian(2**nqubits, seed=seed_observable, backend=backend) backend.assert_allclose( - np.transpose(np.conj(final_state_decompose)) + backend.cast(np.transpose(np.conj(final_state_decompose))) @ observable @ final_state_decompose, - np.transpose(np.conj(target_state)) @ observable @ target_state, + backend.cast(np.transpose(np.conj(target_state))) @ observable @ target_state, ) assert gates.U3(0, theta, phi, lam).qasm_label == "u3" @@ -518,10 +518,10 @@ def test_cy(backend, controlled_by, seed_state, seed_observable): # testing random expectation value due to global phase difference observable = random_hermitian(2**nqubits, seed=seed_observable, backend=backend) backend.assert_allclose( - np.transpose(np.conj(final_state_decompose)) + backend.cast(np.transpose(np.conj(final_state_decompose))) @ observable @ final_state_decompose, - np.transpose(np.conj(target_state)) @ observable @ target_state, + backend.cast(np.transpose(np.conj(target_state))) @ observable @ target_state, ) assert gates.CY(0, 1).qasm_label == "cy" @@ -562,10 +562,10 @@ def test_cz(backend, controlled_by, seed_state, seed_observable): # testing random expectation value due to global phase difference observable = random_hermitian(2**nqubits, seed=seed_observable, backend=backend) backend.assert_allclose( - np.transpose(np.conj(final_state_decompose)) + backend.cast(np.transpose(np.conj(final_state_decompose))) @ observable @ final_state_decompose, - np.transpose(np.conj(target_state)) @ observable @ target_state, + backend.cast(np.transpose(np.conj(target_state))) @ observable @ target_state, ) assert gates.CZ(0, 1).qasm_label == "cz" @@ -1000,10 +1000,10 @@ def test_rxxyy(backend): backend.assert_allclose(final_state, target_state) # testing random expectation value due to global phase difference backend.assert_allclose( - np.transpose(np.conj(final_state_decompose)) + backend.cast(np.transpose(np.conj(final_state_decompose))) @ observable @ final_state_decompose, - np.transpose(np.conj(target_state)) @ observable @ target_state, + backend.cast(np.transpose(np.conj(target_state))) @ observable @ target_state, ) with pytest.raises(NotImplementedError): @@ -1163,10 +1163,10 @@ def test_ecr(backend): # testing random expectation value due to global phase difference observable = random_hermitian(2**nqubits, backend=backend) backend.assert_allclose( - np.transpose(np.conj(final_state_decompose)) + backend.cast(np.transpose(np.conj(final_state_decompose))) @ observable @ final_state_decompose, - np.transpose(np.conj(target_state)) @ observable @ target_state, + backend.cast(np.transpose(np.conj(target_state))) @ observable @ target_state, ) with pytest.raises(NotImplementedError): From 3d511209902113aa940ec70856b4ccb6a31f833d Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Fri, 9 Feb 2024 15:01:10 +0400 Subject: [PATCH 011/127] solved compatibility in callbacks --- src/qibo/backends/pytorch.py | 29 ++++++++++++++++++----------- tests/test_callbacks.py | 12 ++++++++++-- 2 files changed, 28 insertions(+), 13 deletions(-) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index 33425d056b..e23bc75525 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -231,22 +231,29 @@ def calculate_matrix_exp(self, a, matrix, eigenvectors=None, eigenvalues=None): else: return super().calculate_matrix_exp(a, matrix, eigenvectors, eigenvalues) + def calculate_expectation_state(self, hamiltonian, state, normalize): + statec = torch.conj(state) + hstate = hamiltonian @ state + ev = torch.real(torch.sum(statec * hstate)) + if normalize: + ev = ev / torch.sum(torch.square(torch.abs(state))) + return ev + def calculate_hamiltonian_matrix_product(self, matrix1, matrix2): if self.issparse(matrix1) or self.issparse(matrix2): return torch.sparse.mm(matrix1, matrix2) # pylint: disable=not-callable - return super().calculate_hamiltonian_matrix_product(matrix1, matrix2) + return torch.matmul(matrix1, matrix2) def calculate_hamiltonian_state_product(self, matrix, state): - rank = len(tuple(state.shape)) - if rank == 1: # vector - return np.matmul(matrix, state[:, np.newaxis])[:, 0] - elif rank == 2: # matrix - return np.matmul(matrix, state) - else: - raise_error( - ValueError, - "Cannot multiply Hamiltonian with " "rank-{} tensor.".format(rank), - ) + return torch.matmul(matrix, state) + + def calculate_overlap(self, state1, state2): + return torch.abs(torch.sum(torch.conj(self.cast(state1)) * self.cast(state2))) + + def calculate_overlap_density_matrix(self, state1, state2): + return torch.trace( + torch.matmul(torch.conj(self.cast(state1)).T, self.cast(state2)) + ) def test_regressions(self, name): if name == "test_measurementresult_apply_bitflips": diff --git a/tests/test_callbacks.py b/tests/test_callbacks.py index 4c19b859a9..5f3347eba6 100644 --- a/tests/test_callbacks.py +++ b/tests/test_callbacks.py @@ -289,6 +289,12 @@ def test_norm(backend, density_matrix, seed): backend.assert_allclose(final_norm, target_norm) +def to_numpy(x): + if isinstance(x, np.ndarray): + return x + return x.detach().cpu().numpy() + + @pytest.mark.parametrize("seed", list(range(1, 5 + 1))) @pytest.mark.parametrize("density_matrix", [False, True]) @pytest.mark.parametrize("nqubits", list(range(2, 6 + 1, 2))) @@ -306,10 +312,12 @@ def test_overlap(backend, nqubits, density_matrix, seed): if density_matrix: final_overlap = overlap.apply_density_matrix(backend, state1) - target_overlap = np.trace(np.transpose(np.conj(state0)) @ state1) + target_overlap = np.trace( + np.transpose(np.conj(to_numpy(state0))) @ to_numpy(state1) + ) else: final_overlap = overlap.apply(backend, state1) - target_overlap = np.abs(np.sum(np.conj(state0) * state1)) + target_overlap = np.abs(np.sum(np.conj(to_numpy(state0)) * to_numpy(state1))) backend.assert_allclose(final_overlap, target_overlap) From b69eaab487d69c0b48e95ff49730807ee005dca1 Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Fri, 9 Feb 2024 17:17:15 +0400 Subject: [PATCH 012/127] pytorch backend not supported for clifford --- src/qibo/backends/clifford.py | 7 +++++++ tests/test_backends_clifford.py | 8 ++++++++ 2 files changed, 15 insertions(+) diff --git a/src/qibo/backends/clifford.py b/src/qibo/backends/clifford.py index 8189287f82..20bd695a53 100644 --- a/src/qibo/backends/clifford.py +++ b/src/qibo/backends/clifford.py @@ -8,6 +8,7 @@ from qibo import gates from qibo.backends.numpy import NumpyBackend +from qibo.backends.pytorch import PyTorchBackend from qibo.backends.tensorflow import TensorflowBackend from qibo.config import raise_error @@ -548,6 +549,12 @@ def __init__(self, engine=None): "TensorflowBackend for Clifford Simulation is not supported.", ) + if isinstance(engine, PyTorchBackend): + raise_error( + NotImplementedError, + "PyTorchBackend for Clifford Simulation is not supported.", + ) + self.engine = engine self.np = _calculation_engine(engine) diff --git a/tests/test_backends_clifford.py b/tests/test_backends_clifford.py index 13d5ef3aa9..1a1ad46027 100644 --- a/tests/test_backends_clifford.py +++ b/tests/test_backends_clifford.py @@ -8,6 +8,7 @@ CliffordBackend, GlobalBackend, NumpyBackend, + PyTorchBackend, TensorflowBackend, ) from qibo.noise import DepolarizingError, NoiseModel, PauliError @@ -24,6 +25,13 @@ def construct_clifford_backend(backend): str(excinfo.value) == "TensorflowBackend for Clifford Simulation is not supported yet." ) + if isinstance(backend, PyTorchBackend): + with pytest.raises(NotImplementedError) as excinfo: + clifford_backend = CliffordBackend(backend) + assert ( + str(excinfo.value) + == "PyTorchBackend for Clifford Simulation is not supported yet." + ) else: return CliffordBackend(backend) From 8385ffadced4337e758cb6cb23da475263dbe11d Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Fri, 9 Feb 2024 20:22:50 +0400 Subject: [PATCH 013/127] other tests solved --- src/qibo/backends/pytorch.py | 12 +++++++++--- tests/test_quantum_info_clifford.py | 21 ++++++++++++++++++++- 2 files changed, 29 insertions(+), 4 deletions(-) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index e23bc75525..e90d882167 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -188,7 +188,9 @@ def execute_circuit_repeated(self, circuit, nshots, initial_state=None): return super().execute_circuit_repeated(circuit, nshots, initial_state) def sample_shots(self, probabilities, nshots): - return torch.multinomial(probabilities, nshots) + return torch.multinomial( + self.cast(probabilities, dtype="float"), nshots, replacement=True + ) def samples_to_binary(self, samples, nqubits): qrange = torch.arange(nqubits - 1, -1, -1, dtype=torch.int32) @@ -202,9 +204,12 @@ def calculate_frequencies(self, samples): return collections.Counter({k: v for k, v in zip(res, counts)}) def update_frequencies(self, frequencies, probabilities, nsamples): + frequencies = self.cast(frequencies, dtype="int") samples = self.sample_shots(probabilities, nsamples) unique_samples, counts = torch.unique(samples, return_counts=True) - frequencies.index_add_(0, unique_samples, counts) + frequencies.index_add_( + 0, self.cast(unique_samples, dtype="int"), self.cast(counts, dtype="int") + ) return frequencies def calculate_norm(self, state, order=2): @@ -232,8 +237,9 @@ def calculate_matrix_exp(self, a, matrix, eigenvectors=None, eigenvalues=None): return super().calculate_matrix_exp(a, matrix, eigenvectors, eigenvalues) def calculate_expectation_state(self, hamiltonian, state, normalize): + state = self.cast(state) statec = torch.conj(state) - hstate = hamiltonian @ state + hstate = self.cast(hamiltonian @ state) ev = torch.real(torch.sum(statec * hstate)) if normalize: ev = ev / torch.sum(torch.square(torch.abs(state))) diff --git a/tests/test_quantum_info_clifford.py b/tests/test_quantum_info_clifford.py index 3290aefc31..b0865decb0 100644 --- a/tests/test_quantum_info_clifford.py +++ b/tests/test_quantum_info_clifford.py @@ -5,7 +5,7 @@ import pytest from qibo import Circuit, gates -from qibo.backends import CliffordBackend, TensorflowBackend +from qibo.backends import CliffordBackend, PyTorchBackend, TensorflowBackend from qibo.quantum_info._clifford_utils import ( _cnot_cost, _one_qubit_paulis_string_product, @@ -23,6 +23,13 @@ def construct_clifford_backend(backend): str(excinfo.value) == "TensorflowBackend for Clifford Simulation is not supported yet." ) + elif isinstance(backend, PyTorchBackend): + with pytest.raises(NotImplementedError) as excinfo: + clifford_backend = CliffordBackend(backend) + assert ( + str(excinfo.value) + == "PyTorchBackend for Clifford Simulation is not supported." + ) else: return CliffordBackend(backend) @@ -32,6 +39,9 @@ def test_clifford_from_symplectic_matrix(backend, nqubits): if isinstance(backend, TensorflowBackend): with pytest.raises(NotImplementedError): clifford_backend = CliffordBackend(backend) + elif isinstance(backend, PyTorchBackend): + with pytest.raises(NotImplementedError): + clifford_backend = CliffordBackend(backend) else: clifford_backend = CliffordBackend(backend) symplectic_matrix = clifford_backend.zero_state(nqubits) @@ -67,6 +77,8 @@ def test_clifford_from_circuit(backend, measurement): def test_clifford_to_circuit(backend, nqubits, algorithm): if backend.__class__.__name__ == "TensorflowBackend": pytest.skip("CliffordBackend not defined for Tensorflow engine.") + elif backend.__class__.__name__ == "PyTorchBackend": + pytest.skip("CliffordBackend not defined for PyTorch engine.") clifford = random_clifford(nqubits, backend=backend) @@ -117,6 +129,8 @@ def test_clifford_to_circuit(backend, nqubits, algorithm): def test_clifford_initialization(backend, nqubits): if backend.__class__.__name__ == "TensorflowBackend": pytest.skip("CliffordBackend not defined for Tensorflow engine.") + elif backend.__class__.__name__ == "PyTorchBackend": + pytest.skip("CliffordBackend not defined for PyTorch engine.") clifford_backend = construct_clifford_backend(backend) @@ -315,6 +329,9 @@ def test_clifford_samples_error(backend): if isinstance(backend, TensorflowBackend): with pytest.raises(NotImplementedError): clifford_backend = CliffordBackend(backend) + elif isinstance(backend, PyTorchBackend): + with pytest.raises(NotImplementedError): + clifford_backend = CliffordBackend(backend) else: obj = Clifford.from_circuit(c, engine=backend) with pytest.raises(RuntimeError) as excinfo: @@ -327,6 +344,8 @@ def test_clifford_samples_error(backend): def test_clifford_copy(backend, nqubits, deep): if backend.__class__.__name__ == "TensorflowBackend": pytest.skip("CliffordBackend not defined for Tensorflow engine.") + elif backend.__class__.__name__ == "PyTorchBackend": + pytest.skip("CliffordBackend not defined for PyTorch engine.") circuit = random_clifford(nqubits, backend=backend) clifford = Clifford.from_circuit(circuit, engine=backend) From 791b45828e0c960dfff88f26bed96145e9205021 Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Fri, 9 Feb 2024 21:03:21 +0400 Subject: [PATCH 014/127] solved errors in gates --- src/qibo/gates/gates.py | 5 +++++ tests/test_gates_gates.py | 30 ++++++++++++++++++++---------- 2 files changed, 25 insertions(+), 10 deletions(-) diff --git a/src/qibo/gates/gates.py b/src/qibo/gates/gates.py index ae56a2723b..92d7d84bd0 100644 --- a/src/qibo/gates/gates.py +++ b/src/qibo/gates/gates.py @@ -2329,7 +2329,12 @@ def __init__( } # checking unitarity without invoking any backend + # maybe here having the backend would be useful? if check_unitary: + import torch + + if isinstance(unitary, torch.Tensor): + unitary = unitary.detach().cpu().numpy() product = np.transpose(np.conj(unitary)) @ unitary sums = all(np.abs(1 - np.sum(product, axis=1)) < PRECISION_TOL) diagonal = all(np.abs(1 - np.diag(product)) < PRECISION_TOL) diff --git a/tests/test_gates_gates.py b/tests/test_gates_gates.py index d4f40a3232..d495abb0a3 100644 --- a/tests/test_gates_gates.py +++ b/tests/test_gates_gates.py @@ -68,6 +68,12 @@ def test_z(backend): assert gates.Z(0).unitary +def to_numpy(array): + if isinstance(array, np.ndarray): + return array + return array.detach().cpu().numpy() + + def test_sx(backend): nqubits = 1 initial_state = random_statevector(2**nqubits, backend=backend) @@ -94,10 +100,12 @@ def test_sx(backend): # testing random expectation value due to global phase difference observable = random_hermitian(2**nqubits, backend=backend) backend.assert_allclose( - np.transpose(np.conj(final_state_decompose)) - @ observable - @ final_state_decompose, - np.transpose(np.conj(target_state)) @ observable @ target_state, + np.transpose(np.conj(to_numpy(final_state_decompose))) + @ to_numpy(observable) + @ to_numpy(final_state_decompose), + np.transpose(np.conj(to_numpy(target_state))) + @ to_numpy(observable) + @ to_numpy(target_state), ) assert gates.SX(0).qasm_label == "sx" @@ -131,10 +139,12 @@ def test_sxdg(backend): # testing random expectation value due to global phase difference observable = random_hermitian(2**nqubits, backend=backend) backend.assert_allclose( - np.transpose(np.conj(final_state_decompose)) - @ observable - @ final_state_decompose, - np.transpose(np.conj(target_state)) @ observable @ target_state, + np.transpose(np.conj(to_numpy(final_state_decompose))) + @ to_numpy(observable) + @ to_numpy(final_state_decompose), + np.transpose(np.conj(to_numpy(target_state))) + @ to_numpy(observable) + @ to_numpy(target_state), ) assert gates.SXDG(0).qasm_label == "sxdg" @@ -280,8 +290,8 @@ def test_ry(backend, theta): phase = np.exp(1j * theta / 2.0) gate = np.array([[phase.real, -phase.imag], [phase.imag, phase.real]]) - gate = backend.cast(gate, dtype=gate.dtype) - target_state = gate @ initial_state + gate = backend.cast(gate, dtype="complex128") + target_state = gate @ backend.cast(initial_state, dtype="complex128") backend.assert_allclose(final_state, target_state) From f067c9a93ff36a6d63380d9b01053f124d14bab4 Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Mon, 12 Feb 2024 12:02:41 +0400 Subject: [PATCH 015/127] solved many errors --- src/qibo/backends/pytorch.py | 335 ++++++++++++++++++++++++++++++++++- src/qibo/derivative.py | 7 + tests/test_derivative.py | 67 ++++--- tests/test_gates_channels.py | 2 +- 4 files changed, 380 insertions(+), 31 deletions(-) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index e90d882167..9748c3b4db 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -5,9 +5,11 @@ import torch from qibo import __version__ +from qibo.backends import einsum_utils from qibo.backends.npmatrices import NumpyMatrices from qibo.backends.numpy import NumpyBackend from qibo.config import raise_error +from qibo.result import CircuitResult, MeasurementOutcomes, QuantumState torch_dtype_dict = { "int": torch.int32, @@ -144,6 +146,28 @@ def cast( return x.clone() return x + def apply_gate(self, gate, state, nqubits): + state = self.cast(state) + state = torch.reshape(state, nqubits * (2,)) + matrix = gate.matrix(self) + if gate.is_controlled_by: + matrix = torch.reshape(matrix, 2 * len(gate.target_qubits) * (2,)) + ncontrol = len(gate.control_qubits) + nactive = nqubits - ncontrol + order, targets = einsum_utils.control_order(gate, nqubits) + state = state.permute(*order) + state = torch.reshape(state, (2**ncontrol,) + nactive * (2,)) + opstring = einsum_utils.apply_gate_string(targets, nactive) + updates = torch.einsum(opstring, state[-1], matrix) + state = torch.cat([state[:-1], updates[None]], axis=0) + state = torch.reshape(state, nqubits * (2,)) + state = state.permute(*einsum_utils.reverse_order(order)) + else: + matrix = torch.reshape(matrix, 2 * len(gate.qubits) * (2,)) + opstring = einsum_utils.apply_gate_string(gate.qubits, nqubits) + state = torch.einsum(opstring, state, matrix) + return torch.reshape(state, (2**nqubits,)) + def issparse(self, x): return x.is_sparse @@ -183,9 +207,99 @@ def execute_circuit(self, circuit, initial_state=None, nshots=1000): return super().execute_circuit(circuit, initial_state, nshots) def execute_circuit_repeated(self, circuit, nshots, initial_state=None): - if initial_state is not None: - initial_state = self.cast(initial_state) - return super().execute_circuit_repeated(circuit, nshots, initial_state) + """ + Execute the circuit `nshots` times to retrieve probabilities, frequencies + and samples. Note that this method is called only if a unitary channel + is present in the circuit (i.e. noisy simulation) and `density_matrix=False`, or + if some collapsing measuremnt is performed. + """ + + if ( + circuit.has_collapse + and not circuit.measurements + and not circuit.density_matrix + ): + raise RuntimeError( + "The circuit contains only collapsing measurements (`collapse=True`) but `density_matrix=False`. Please set `density_matrix=True` to retrieve the final state after execution." + ) + + results, final_states = [], [] + nqubits = circuit.nqubits + + if not circuit.density_matrix: + samples = [] + target_qubits = [ + measurement.target_qubits for measurement in circuit.measurements + ] + target_qubits = sum(target_qubits, tuple()) + + for _ in range(nshots): + if circuit.density_matrix: + if initial_state is None: + state = self.zero_density_matrix(nqubits) + else: + state = self.cast(initial_state, copy=True) + + for gate in circuit.queue: + if gate.symbolic_parameters: + gate.substitute_symbols() + state = gate.apply_density_matrix(self, state, nqubits) + else: + if circuit.accelerators: # pragma: no cover + # pylint: disable=E1111 + state = self.execute_distributed_circuit(circuit, initial_state) + else: + if initial_state is None: + state = self.zero_state(nqubits) + else: + state = self.cast(initial_state, copy=True) + + for gate in circuit.queue: + if gate.symbolic_parameters: + gate.substitute_symbols() + state = gate.apply(self, state, nqubits) + + if circuit.density_matrix: + final_states.append(state) + if circuit.measurements: + result = CircuitResult( + state, circuit.measurements, backend=self, nshots=1 + ) + sample = result.samples()[0] + results.append(sample) + if not circuit.density_matrix: + samples.append("".join([str(s) for s in sample])) + for gate in circuit.measurements: + gate.result.reset() + + if circuit.density_matrix: # this implies also it has_collapse + assert circuit.has_collapse + final_state = torch.mean(torch.stack(final_states), 0) + if circuit.measurements: + qubits = [q for m in circuit.measurements for q in m.target_qubits] + final_result = CircuitResult( + final_state, + circuit.measurements, + backend=self, + samples=self.aggregate_shots(results), + nshots=nshots, + ) + else: + final_result = QuantumState(final_state, backend=self) + circuit._final_state = final_result + return final_result + else: + final_result = MeasurementOutcomes( + circuit.measurements, + backend=self, + samples=self.aggregate_shots(results), + nshots=nshots, + ) + final_result._repeated_execution_frequencies = self.calculate_frequencies( + samples + ) + circuit._final_state = final_result + return final_result def sample_shots(self, probabilities, nshots): return torch.multinomial( @@ -198,6 +312,50 @@ def samples_to_binary(self, samples, nqubits): samples = samples[:, None] >> qrange return samples % 2 + def _order_probabilities(self, probs, qubits, nqubits): + """Arrange probabilities according to the given ``qubits`` ordering.""" + if probs.dim() == 0: + return probs + unmeasured, reduced = [], {} + for i in range(nqubits): + if i in qubits: + reduced[i] = i - len(unmeasured) + else: + unmeasured.append(i) + return probs.permute(*[reduced.get(i) for i in qubits]) + + def calculate_probabilities(self, state, qubits, nqubits): + rtype = state.real.dtype + unmeasured_qubits = tuple(i for i in range(nqubits) if i not in qubits) + state = torch.reshape(torch.abs(state) ** 2, nqubits * (2,)) + probs = torch.sum(state.type(rtype), dim=unmeasured_qubits) + return self._order_probabilities(probs, qubits, nqubits).view(-1) + + def calculate_probabilities_density_matrix(self, state, qubits, nqubits): + order = tuple(sorted(qubits)) + order += tuple(i for i in range(nqubits) if i not in qubits) + order = order + tuple(i + nqubits for i in order) + shape = 2 * (2 ** len(qubits), 2 ** (nqubits - len(qubits))) + state = torch.reshape(state, 2 * nqubits * (2,)) + state = torch.reshape(state.permute(*order), shape) + probs = torch.abs(torch.einsum("abab->a", state)) + probs = torch.reshape(probs, len(qubits) * (2,)) + return self._order_probabilities(probs, qubits, nqubits).view(-1) + + def sample_frequencies(self, probabilities, nshots): + from qibo.config import SHOT_BATCH_SIZE + + nprobs = probabilities / torch.sum(probabilities) + frequencies = torch.zeros(len(nprobs), dtype=torch.int64) + for _ in range(nshots // SHOT_BATCH_SIZE): + frequencies = self.update_frequencies(frequencies, nprobs, SHOT_BATCH_SIZE) + frequencies = self.update_frequencies( + frequencies, nprobs, nshots % SHOT_BATCH_SIZE + ) + return collections.Counter( + {i: f.item() for i, f in enumerate(frequencies) if f > 0} + ) + def calculate_frequencies(self, samples): res, counts = torch.unique(samples, return_counts=True) res, counts = res.tolist(), counts.tolist() @@ -261,6 +419,177 @@ def calculate_overlap_density_matrix(self, state1, state2): torch.matmul(torch.conj(self.cast(state1)).T, self.cast(state2)) ) + def apply_gate_density_matrix(self, gate, state, nqubits): + state = self.cast(state) + state = torch.reshape(state, 2 * nqubits * (2,)) + matrix = gate.matrix(self) + if gate.is_controlled_by: + matrix = torch.reshape(matrix, 2 * len(gate.target_qubits) * (2,)) + matrixc = torch.conj(matrix) + ncontrol = len(gate.control_qubits) + nactive = nqubits - ncontrol + n = 2**ncontrol + + order, targets = einsum_utils.control_order_density_matrix(gate, nqubits) + state = torch.transpose(state, order) + state = torch.reshape(state, 2 * (n,) + 2 * nactive * (2,)) + + leftc, rightc = einsum_utils.apply_gate_density_matrix_controlled_string( + targets, nactive + ) + state01 = state[: n - 1, n - 1] + state01 = torch.einsum(rightc, state01, matrixc) + state10 = state[n - 1, : n - 1] + state10 = torch.einsum(leftc, state10, matrix) + + left, right = einsum_utils.apply_gate_density_matrix_string( + targets, nactive + ) + state11 = state[n - 1, n - 1] + state11 = torch.einsum(right, state11, matrixc) + state11 = torch.einsum(left, state11, matrix) + + state00 = state[range(n - 1)] + state00 = state00[:, range(n - 1)] + state01 = torch.cat([state00, state01[:, None]], dim=1) + state10 = torch.cat([state10, state11[None]], dim=0) + state = torch.cat([state01, state10[None]], dim=0) + state = torch.reshape(state, 2 * nqubits * (2,)) + state = torch.transpose(state, einsum_utils.reverse_order(order)) + else: + matrix = torch.reshape(matrix, 2 * len(gate.qubits) * (2,)) + matrixc = torch.conj(matrix) + left, right = einsum_utils.apply_gate_density_matrix_string( + gate.qubits, nqubits + ) + state = torch.einsum(right, state, matrixc) + state = torch.einsum(left, state, matrix) + return torch.reshape(state, 2 * (2**nqubits,)) + + def partial_trace(self, state, qubits, nqubits): + state = self.cast(state) + state = torch.reshape(state, nqubits * (2,)) + axes = 2 * [list(qubits)] + rho = torch.tensordot(state, torch.conj(state), dims=axes) + shape = 2 * (2 ** (nqubits - len(qubits)),) + return torch.reshape(rho, shape) + + def partial_trace_density_matrix(self, state, qubits, nqubits): + state = self.cast(state) + state = torch.reshape(state, 2 * nqubits * (2,)) + order = list(sorted(qubits)) + order += [i for i in range(nqubits) if i not in qubits] + order += [i + nqubits for i in order] + state = state.permute(*order) + shape = 2 * (2 ** len(qubits), 2 ** (nqubits - len(qubits))) + state = torch.reshape(state, shape) + return torch.einsum("abac->bc", state) + + def _append_zeros(self, state, qubits, results): + """Helper method for collapse.""" + for q, r in zip(qubits, results): + state = torch.unsqueeze(state, dim=q) + if r: + state = torch.cat([torch.zeros_like(state), state], dim=q) + else: + state = torch.cat([state, torch.zeros_like(state)], dim=q) + return state + + def collapse_state(self, state, qubits, shot, nqubits, normalize=True): + state = self.cast(state) + shape = state.shape + binshot = self.samples_to_binary(shot, len(qubits))[0] + state = torch.reshape(state, nqubits * (2,)) + order = list(qubits) + [q for q in range(nqubits) if q not in qubits] + state = state.permute(*order) + subshape = (2 ** len(qubits),) + (nqubits - len(qubits)) * (2,) + state = torch.reshape(state, subshape)[int(shot)] + if normalize: + norm = torch.sqrt(torch.sum(torch.abs(state) ** 2)) + state = state / norm + state = self._append_zeros(state, qubits, binshot) + return torch.reshape(state, shape) + + def collapse_density_matrix(self, state, qubits, shot, nqubits, normalize=True): + state = self.cast(state) + shape = state.shape + binshot = list(self.samples_to_binary(shot, len(qubits))[0]) + order = list(qubits) + [q + nqubits for q in qubits] + order.extend(q for q in range(nqubits) if q not in qubits) + order.extend(q + nqubits for q in range(nqubits) if q not in qubits) + state = torch.reshape(state, 2 * nqubits * (2,)) + state = state.permute(*order) + subshape = 2 * (2 ** len(qubits),) + 2 * (nqubits - len(qubits)) * (2,) + state = torch.reshape(state, subshape)[int(shot), int(shot)] + n = 2 ** (len(state.shape) // 2) + if normalize: + norm = torch.trace(torch.reshape(state, (n, n))) + state = state / norm + qubits = qubits + [q + nqubits for q in qubits] + state = self._append_zeros(state, qubits, 2 * binshot) + return torch.reshape(state, shape) + + def reset_error_density_matrix(self, gate, state, nqubits): + from qibo.gates import X + + state = self.cast(state) + shape = state.shape + q = gate.target_qubits[0] + p_0, p_1 = gate.init_kwargs["p_0"], gate.init_kwargs["p_1"] + trace = self.partial_trace_density_matrix(state, (q,), nqubits) + trace = torch.reshape(trace, 2 * (nqubits - 1) * (2,)) + zero = self.zero_density_matrix(1) + zero = torch.tensordot(trace, zero, dims=0) + order = list(range(2 * nqubits - 2)) + order.insert(q, 2 * nqubits - 2) + order.insert(q + nqubits, 2 * nqubits - 1) + zero = torch.reshape(zero.permute(*order), shape) + state = (1 - p_0 - p_1) * state + p_0 * zero + return state + p_1 * self.apply_gate_density_matrix(X(q), zero, nqubits) + + def thermal_error_density_matrix(self, gate, state, nqubits): + state = self.cast(state) + shape = state.shape + state = self.apply_gate(gate, state.view(-1), 2 * nqubits) + return torch.reshape(state, shape) + + def identity_density_matrix(self, nqubits, normalize: bool = True): + state = torch.eye(2**nqubits, dtype=torch.complex128) + if normalize is True: + state /= 2**nqubits + return state + + def depolarizing_error_density_matrix(self, gate, state, nqubits): + state = self.cast(state) + shape = state.shape + q = gate.target_qubits + lam = gate.init_kwargs["lam"] + trace = self.partial_trace_density_matrix(state, q, nqubits) + trace = torch.reshape(trace, 2 * (nqubits - len(q)) * (2,)) + identity = self.identity_density_matrix(len(q)) + identity = torch.reshape(identity, 2 * len(q) * (2,)) + identity = torch.tensordot(trace, identity, dims=0) + qubits = list(range(nqubits)) + for j in q: + qubits.pop(qubits.index(j)) + qubits.sort() + qubits += list(q) + qubit_1 = list(range(nqubits - len(q))) + list( + range(2 * (nqubits - len(q)), 2 * nqubits - len(q)) + ) + qubit_2 = list(range(nqubits - len(q), 2 * (nqubits - len(q)))) + list( + range(2 * nqubits - len(q), 2 * nqubits) + ) + qs = [qubit_1, qubit_2] + order = [] + for qj in qs: + qj = [qj[qubits.index(i)] for i in range(len(qubits))] + order += qj + identity = identity.permute(*order) + identity = torch.reshape(identity, shape) + state = (1 - lam) * state + lam * identity + return state + def test_regressions(self, name): if name == "test_measurementresult_apply_bitflips": return [ diff --git a/src/qibo/derivative.py b/src/qibo/derivative.py index 65d4ef87b0..72772cc006 100644 --- a/src/qibo/derivative.py +++ b/src/qibo/derivative.py @@ -1,5 +1,6 @@ import numpy as np +from qibo.backends.pytorch import PyTorchBackend from qibo.config import raise_error from qibo.hamiltonians.abstract import AbstractHamiltonian @@ -102,6 +103,12 @@ def circuit(nqubits = 1): # inheriting hamiltonian's backend backend = hamiltonian.backend + # TODO: make this work wih pytorch backend + if isinstance(backend, PyTorchBackend): + raise_error( + NotImplementedError, + "PyTorchBackend for the parameter shift rule is not supported.", + ) # getting the gate's type gate = circuit.associate_gates_with_parameters()[parameter_index] diff --git a/tests/test_derivative.py b/tests/test_derivative.py index 60ea06244e..a1c6e97e0e 100644 --- a/tests/test_derivative.py +++ b/tests/test_derivative.py @@ -2,6 +2,7 @@ import pytest from qibo import Circuit, gates, hamiltonians +from qibo.backends.pytorch import PyTorchBackend from qibo.derivative import finite_differences, parameter_shift from qibo.symbols import Z @@ -31,6 +32,7 @@ def circuit(nqubits=1): [(1, [-8.51104358e-02, -5.20075970e-01, 0]), (0.5, [-0.02405061, -0.13560379, 0])], ) def test_standard_parameter_shift(backend, nshots, atol, scale_factor, grads): + # initializing the circuit c = circuit(nqubits=1) backend.set_seed(42) @@ -55,34 +57,45 @@ def test_standard_parameter_shift(backend, nshots, atol, scale_factor, grads): circuit=c, hamiltonian=c, parameter_index=0, nshots=nshots ) - # executing all the procedure - grad_0 = parameter_shift( - circuit=c, - hamiltonian=test_hamiltonian, - parameter_index=0, - scale_factor=scale_factor, - nshots=nshots, - ) - grad_1 = parameter_shift( - circuit=c, - hamiltonian=test_hamiltonian, - parameter_index=1, - scale_factor=scale_factor, - nshots=nshots, - ) - grad_2 = parameter_shift( - circuit=c, - hamiltonian=test_hamiltonian, - parameter_index=2, - scale_factor=scale_factor, - nshots=nshots, - ) + if isinstance(backend, PyTorchBackend): + with pytest.raises(NotImplementedError) as excinfo: + grad = parameter_shift( + circuit=c, hamiltonian=test_hamiltonian, parameter_index=0 + ) + assert ( + str(excinfo.value) + == "PyTorchBackend for the parameter shift rule is not supported." + ) + + else: + # executing all the procedure + grad_0 = parameter_shift( + circuit=c, + hamiltonian=test_hamiltonian, + parameter_index=0, + scale_factor=scale_factor, + nshots=nshots, + ) + grad_1 = parameter_shift( + circuit=c, + hamiltonian=test_hamiltonian, + parameter_index=1, + scale_factor=scale_factor, + nshots=nshots, + ) + grad_2 = parameter_shift( + circuit=c, + hamiltonian=test_hamiltonian, + parameter_index=2, + scale_factor=scale_factor, + nshots=nshots, + ) - # check of known values - # calculated using tf.GradientTape - backend.assert_allclose(grad_0, grads[0], atol=atol) - backend.assert_allclose(grad_1, grads[1], atol=atol) - backend.assert_allclose(grad_2, grads[2], atol=atol) + # check of known values + # calculated using tf.GradientTape + backend.assert_allclose(grad_0, grads[0], atol=atol) + backend.assert_allclose(grad_1, grads[1], atol=atol) + backend.assert_allclose(grad_2, grads[2], atol=atol) @pytest.mark.parametrize("step_size", [10**-i for i in range(5, 10, 1)]) diff --git a/tests/test_gates_channels.py b/tests/test_gates_channels.py index a512b83831..322bc05bb0 100644 --- a/tests/test_gates_channels.py +++ b/tests/test_gates_channels.py @@ -209,7 +209,7 @@ def test_pauli_noise_channel(backend, pauli_order): ) gate = gates.X(1) target_state = backend.apply_gate_density_matrix(gate, np.copy(initial_state), 2) - target_state = 0.3 * target_state + 0.7 * initial_state + target_state = 0.3 * target_state + 0.7 * backend.cast(initial_state) backend.assert_allclose(final_state, target_state) basis = ["X", "Y", "Z"] From a5e81e8c9dbc2d3b0f03c07d7ea5fafc607d6394 Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Mon, 12 Feb 2024 18:16:05 +0400 Subject: [PATCH 016/127] muted tests of hamiltonians with pytorch backend --- pyproject.toml | 2 +- src/qibo/backends/pytorch.py | 12 ++++++++--- src/qibo/hamiltonians/hamiltonians.py | 1 + tests/test_gates_channels.py | 2 +- tests/test_hamiltonians.py | 30 +++++++++++++++++++++++++++ tests/test_hamiltonians_symbolic.py | 2 +- tests/test_hamiltonians_trotter.py | 4 ++-- 7 files changed, 45 insertions(+), 8 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 61eb632e3c..afc0d6d613 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -64,7 +64,7 @@ pylint = "^3.0.3" matplotlib = "^3.7.0" qibojit = { git = "https://github.com/qiboteam/qibojit.git" } tensorflow = { version = "^2.14.1", markers = "sys_platform == 'linux'" } -torch = "^2.1.1" +torch = "^2.2.0" [tool.poe.tasks] test = "pytest" diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index 9748c3b4db..817f26d5af 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -3,6 +3,7 @@ import numpy as np import torch +from scipy.sparse import spmatrix from qibo import __version__ from qibo.backends import einsum_utils @@ -140,6 +141,8 @@ def cast( x = [i.to(dtype) for i in x] else: x = [torch.tensor(i, dtype=dtype) for i in x] + elif isinstance(x, spmatrix): + x = torch.tensor(x.toarray(), dtype=dtype) else: x = torch.tensor(x, dtype=dtype) if copy: @@ -169,7 +172,10 @@ def apply_gate(self, gate, state, nqubits): return torch.reshape(state, (2**nqubits,)) def issparse(self, x): - return x.is_sparse + if isinstance(x, torch.Tensor): + return x.is_sparse + else: + return super().issparse(x) def to_numpy(self, x): if type(x) is torch.Tensor: @@ -431,7 +437,7 @@ def apply_gate_density_matrix(self, gate, state, nqubits): n = 2**ncontrol order, targets = einsum_utils.control_order_density_matrix(gate, nqubits) - state = torch.transpose(state, order) + state = state.permute(*order) state = torch.reshape(state, 2 * (n,) + 2 * nactive * (2,)) leftc, rightc = einsum_utils.apply_gate_density_matrix_controlled_string( @@ -455,7 +461,7 @@ def apply_gate_density_matrix(self, gate, state, nqubits): state10 = torch.cat([state10, state11[None]], dim=0) state = torch.cat([state01, state10[None]], dim=0) state = torch.reshape(state, 2 * nqubits * (2,)) - state = torch.transpose(state, einsum_utils.reverse_order(order)) + state = state.permute(*einsum_utils.reverse_order(order)) else: matrix = torch.reshape(matrix, 2 * len(gate.qubits) * (2,)) matrixc = torch.conj(matrix) diff --git a/src/qibo/hamiltonians/hamiltonians.py b/src/qibo/hamiltonians/hamiltonians.py index 10b9419580..71f71f49ed 100644 --- a/src/qibo/hamiltonians/hamiltonians.py +++ b/src/qibo/hamiltonians/hamiltonians.py @@ -6,6 +6,7 @@ import numpy as np import sympy +from qibo.backends import PyTorchBackend from qibo.config import EINSUM_CHARS, log, raise_error from qibo.hamiltonians.abstract import AbstractHamiltonian from qibo.symbols import Z diff --git a/tests/test_gates_channels.py b/tests/test_gates_channels.py index 322bc05bb0..a512b83831 100644 --- a/tests/test_gates_channels.py +++ b/tests/test_gates_channels.py @@ -209,7 +209,7 @@ def test_pauli_noise_channel(backend, pauli_order): ) gate = gates.X(1) target_state = backend.apply_gate_density_matrix(gate, np.copy(initial_state), 2) - target_state = 0.3 * target_state + 0.7 * backend.cast(initial_state) + target_state = 0.3 * target_state + 0.7 * initial_state backend.assert_allclose(final_state, target_state) basis = ["X", "Y", "Z"] diff --git a/tests/test_hamiltonians.py b/tests/test_hamiltonians.py index a00bb53828..e19220d183 100644 --- a/tests/test_hamiltonians.py +++ b/tests/test_hamiltonians.py @@ -37,6 +37,8 @@ def test_hamiltonian_init(backend): @pytest.mark.parametrize("sparse_type", [None, "coo", "csr", "csc", "dia"]) def test_hamiltonian_algebraic_operations(backend, dtype, sparse_type): """Test basic hamiltonian overloading.""" + if backend.name == "pytorch": + pytest.skip("Not implemented.") def transformation_a(a, b): c1 = dtype(0.1) @@ -69,6 +71,7 @@ def transformation_d(a, b, use_eye=False): else: if backend.name == "tensorflow": pytest.skip("Tensorflow does not support operations with sparse matrices.") + mH1 = random_sparse_matrix(backend, 64, sparse_type=sparse_type) mH2 = random_sparse_matrix(backend, 64, sparse_type=sparse_type) H1 = hamiltonians.Hamiltonian(6, mH1, backend=backend) @@ -92,6 +95,8 @@ def transformation_d(a, b, use_eye=False): @pytest.mark.parametrize("sparse_type", [None, "coo", "csr", "csc", "dia"]) def test_hamiltonian_addition(backend, sparse_type): + if backend.name == "pytorch": + pytest.skip("Not implemented.") if sparse_type is None: H1 = hamiltonians.Y(nqubits=3, backend=backend) H2 = hamiltonians.TFIM(nqubits=3, h=1.0, backend=backend) @@ -126,6 +131,8 @@ def test_hamiltonian_addition(backend, sparse_type): def test_hamiltonian_operation_errors(backend): """Testing hamiltonian not implemented errors.""" + if backend.name == "pytorch": + pytest.skip("Not implemented.") H1 = hamiltonians.XXZ(nqubits=2, delta=0.5, backend=backend) H2 = hamiltonians.XXZ(nqubits=2, delta=0.1, backend=backend) @@ -142,6 +149,8 @@ def test_hamiltonian_operation_errors(backend): @pytest.mark.parametrize("sparse_type", [None, "coo", "csr", "csc", "dia"]) def test_hamiltonian_matmul(backend, sparse_type): """Test matrix multiplication between Hamiltonians.""" + if backend.name == "pytorch": + pytest.skip("Not implemented.") if sparse_type is None: nqubits = 3 H1 = hamiltonians.TFIM(nqubits, h=1.0, backend=backend) @@ -178,6 +187,8 @@ def test_hamiltonian_matmul(backend, sparse_type): @pytest.mark.parametrize("sparse_type", [None, "coo", "csr", "csc", "dia"]) def test_hamiltonian_matmul_states(backend, sparse_type): """Test matrix multiplication between Hamiltonian and states.""" + if backend.name == "pytorch": + pytest.skip("Not implemented.") if sparse_type is None: nqubits = 3 H = hamiltonians.TFIM(nqubits, h=1.0, backend=backend) @@ -215,6 +226,8 @@ def test_hamiltonian_matmul_states(backend, sparse_type): ) def test_hamiltonian_expectation(backend, dense, density_matrix, sparse_type): """Test Hamiltonian expectation value calculation.""" + if backend.name == "pytorch": + pytest.skip("Not implemented.") if sparse_type is None: h = hamiltonians.XXZ(nqubits=3, delta=0.5, dense=dense, backend=backend) else: @@ -240,6 +253,8 @@ def test_hamiltonian_expectation(backend, dense, density_matrix, sparse_type): def test_hamiltonian_expectation_errors(backend): + if backend.name == "pytorch": + pytest.skip("Not implemented.") h = hamiltonians.XXZ(nqubits=3, delta=0.5, backend=backend) state = random_complex((4, 4, 4)) with pytest.raises(ValueError): @@ -250,6 +265,8 @@ def test_hamiltonian_expectation_errors(backend): def test_hamiltonian_expectation_from_samples(backend): """Test Hamiltonian expectation value calculation.""" + if backend.name == "pytorch": + pytest.skip("Not implemented.") backend.set_seed(12) obs0 = 2 * Z(0) * Z(1) + Z(0) * Z(2) obs1 = 2 * Z(0) * Z(1) + Z(0) * Z(2) * I(3) @@ -279,6 +296,8 @@ def test_hamiltonian_expectation_from_samples(backend): def test_hamiltonian_expectation_from_samples_errors(backend): + if backend.name == "pytorch": + pytest.skip("Not implemented.") obs = random_complex((4, 4)) h = hamiltonians.Hamiltonian(2, obs, backend=backend) with pytest.raises(NotImplementedError): @@ -299,6 +318,8 @@ def test_hamiltonian_expectation_from_samples_errors(backend): ) def test_hamiltonian_eigenvalues(backend, dtype, sparse_type, dense): """Testing hamiltonian eigenvalues scaling.""" + if backend.name == "pytorch": + pytest.skip("Not implemented.") if sparse_type is None: H1 = hamiltonians.XXZ(nqubits=2, delta=0.5, dense=dense, backend=backend) else: @@ -336,6 +357,8 @@ def test_hamiltonian_eigenvalues(backend, dtype, sparse_type, dense): @pytest.mark.parametrize("dense", [True, False]) def test_hamiltonian_eigenvectors(backend, dtype, dense): """Testing hamiltonian eigenvectors scaling.""" + if backend.name == "pytorch": + pytest.skip("Not implemented.") H1 = hamiltonians.XXZ(nqubits=2, delta=0.5, dense=dense, backend=backend) V1 = backend.to_numpy(H1.eigenvectors()) @@ -374,6 +397,8 @@ def test_hamiltonian_eigenvectors(backend, dtype, dense): ) def test_hamiltonian_ground_state(backend, sparse_type, dense): """Test Hamiltonian ground state.""" + if backend.name == "pytorch": + pytest.skip("Not implemented.") if sparse_type is None: H = hamiltonians.XXZ(nqubits=2, delta=0.5, dense=dense, backend=backend) else: @@ -403,6 +428,9 @@ def test_hamiltonian_exponentiation(backend, sparse_type, dense): """Test matrix exponentiation of Hamiltonians ``exp(1j * t * H)``.""" from scipy.linalg import expm + if backend.name == "pytorch": + pytest.skip("Not implemented.") + def construct_hamiltonian(): if sparse_type is None: return hamiltonians.XXZ(nqubits=2, delta=0.5, dense=dense, backend=backend) @@ -428,6 +456,8 @@ def construct_hamiltonian(): def test_hamiltonian_energy_fluctuation(backend): """Test energy fluctuation.""" + if backend.name == "pytorch": + pytest.skip("Not implemented.") # define hamiltonian ham = hamiltonians.XXZ(nqubits=2, backend=backend) # take ground state and zero state diff --git a/tests/test_hamiltonians_symbolic.py b/tests/test_hamiltonians_symbolic.py index e2b214108f..1be1bc2744 100644 --- a/tests/test_hamiltonians_symbolic.py +++ b/tests/test_hamiltonians_symbolic.py @@ -245,7 +245,7 @@ def test_symbolic_hamiltonian_matmul(backend, nqubits, density_matrix, calcterms if calcterms: _ = local_ham.terms local_matmul = local_ham @ state - target_matmul = dense_ham @ state + target_matmul = dense_ham @ backend.cast(state) backend.assert_allclose(local_matmul, target_matmul) diff --git a/tests/test_hamiltonians_trotter.py b/tests/test_hamiltonians_trotter.py index a3e22551d3..04a77ff4f0 100644 --- a/tests/test_hamiltonians_trotter.py +++ b/tests/test_hamiltonians_trotter.py @@ -93,8 +93,8 @@ def test_trotter_hamiltonian_matmul(backend, nqubits, normalize): target_ev = dense_ham.expectation(state, normalize) backend.assert_allclose(trotter_ev, target_ev) - trotter_matmul = local_ham @ state - target_matmul = dense_ham @ state + trotter_matmul = local_ham @ backend.cast(state) + target_matmul = dense_ham @ backend.cast(state) backend.assert_allclose(trotter_matmul, target_matmul) From 8459afa4c986726fc690b2b5a049a6fa9f09bf60 Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Tue, 13 Feb 2024 15:36:49 +0400 Subject: [PATCH 017/127] parameterized matrices redefined and self.torch --- src/qibo/backends/pytorch.py | 254 ++++++++++++++--------------------- 1 file changed, 102 insertions(+), 152 deletions(-) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index 817f26d5af..98978fcb2d 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -3,7 +3,6 @@ import numpy as np import torch -from scipy.sparse import spmatrix from qibo import __version__ from qibo.backends import einsum_utils @@ -30,66 +29,14 @@ class TorchMatrices(NumpyMatrices): def __init__(self, dtype): super().__init__(dtype) + self.torch = torch self.torch_dtype = torch_dtype_dict[dtype] - def RX(self, theta): - matrix = getattr(NumpyMatrices(self.dtype), "RX")(theta) - return torch.tensor(matrix, dtype=self.torch_dtype) - - def RY(self, theta): - matrix = getattr(NumpyMatrices(self.dtype), "RY")(theta) - return torch.tensor(matrix, dtype=self.torch_dtype) - - def RZ(self, theta): - matrix = getattr(NumpyMatrices(self.dtype), "RZ")(theta) - return torch.tensor(matrix, dtype=self.torch_dtype) - - def U1(self, theta): - matrix = getattr(NumpyMatrices(self.dtype), "U1")(theta) - return torch.tensor(matrix, dtype=self.torch_dtype) - - def U2(self, phi, lam): - matrix = getattr(NumpyMatrices(self.dtype), "U2")(phi, lam) - return torch.tensor(matrix, dtype=self.torch_dtype) - - def U3(self, theta, phi, lam): - matrix = getattr(NumpyMatrices(self.dtype), "U3")(theta, phi, lam) - return torch.tensor(matrix, dtype=self.torch_dtype) - - def CRX(self, theta): - matrix = getattr(NumpyMatrices(self.dtype), "CRX")(theta) - return torch.tensor(matrix, dtype=self.torch_dtype) - - def CRY(self, theta): - matrix = getattr(NumpyMatrices(self.dtype), "CRY")(theta) - return torch.tensor(matrix, dtype=self.torch_dtype) - - def CRZ(self, theta): - matrix = getattr(NumpyMatrices(self.dtype), "CRZ")(theta) - return torch.tensor(matrix, dtype=self.torch_dtype) - - def CU1(self, theta): - matrix = getattr(NumpyMatrices(self.dtype), "CU1")(theta) - return torch.tensor(matrix, dtype=self.torch_dtype) - - def CU2(self, phi, lam): - matrix = getattr(NumpyMatrices(self.dtype), "CU2")(phi, lam) - return torch.tensor(matrix, dtype=self.torch_dtype) - - def CU3(self, theta, phi, lam): - matrix = getattr(NumpyMatrices(self.dtype), "CU3")(theta, phi, lam) - return torch.tensor(matrix, dtype=self.torch_dtype) - - def fSim(self, theta, phi): - matrix = getattr(NumpyMatrices(self.dtype), "fSim")(theta, phi) - return torch.tensor(matrix, dtype=self.torch_dtype) - - def GeneralizedfSim(self, u, phi): - matrix = getattr(NumpyMatrices(self.dtype), "GeneralizedfSim")(u, phi) - return torch.tensor(matrix, dtype=self.torch_dtype) + def _cast(self, x, dtype): + return self.torch.tensor(x, dtype=dtype) def Unitary(self, u): - return torch.tensor(u, dtype=self.torch_dtype) + return self._cast(u, dtype=self.torch_dtype) class PyTorchBackend(NumpyBackend): @@ -106,8 +53,9 @@ def __init__(self): self.matrices = TorchMatrices(self.dtype) self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") self.nthreads = 0 + self.torch = torch self.torch_dtype = torch_dtype_dict[self.dtype] - self.tensor_types = (torch.Tensor, np.ndarray) + self.tensor_types = (self.torch.Tensor, np.ndarray) def set_device(self, device): # pragma: no cover self.device = device @@ -128,84 +76,82 @@ def cast( """ if dtype is None: dtype = self.torch_dtype - elif isinstance(dtype, torch.dtype): + elif isinstance(dtype, self.torch.dtype): dtype = dtype elif isinstance(dtype, type): dtype = torch_dtype_dict[dtype.__name__] else: dtype = torch_dtype_dict[str(dtype)] - if isinstance(x, torch.Tensor): + if isinstance(x, self.torch.Tensor): x = x.to(dtype) elif isinstance(x, list): - if all(isinstance(i, torch.Tensor) for i in x): + if all(isinstance(i, self.torch.Tensor) for i in x): x = [i.to(dtype) for i in x] else: - x = [torch.tensor(i, dtype=dtype) for i in x] - elif isinstance(x, spmatrix): - x = torch.tensor(x.toarray(), dtype=dtype) + x = [self.torch.tensor(i, dtype=dtype) for i in x] else: - x = torch.tensor(x, dtype=dtype) + x = self.torch.tensor(x, dtype=dtype) if copy: return x.clone() return x def apply_gate(self, gate, state, nqubits): state = self.cast(state) - state = torch.reshape(state, nqubits * (2,)) + state = self.torch.reshape(state, nqubits * (2,)) matrix = gate.matrix(self) if gate.is_controlled_by: - matrix = torch.reshape(matrix, 2 * len(gate.target_qubits) * (2,)) + matrix = self.torch.reshape(matrix, 2 * len(gate.target_qubits) * (2,)) ncontrol = len(gate.control_qubits) nactive = nqubits - ncontrol order, targets = einsum_utils.control_order(gate, nqubits) state = state.permute(*order) - state = torch.reshape(state, (2**ncontrol,) + nactive * (2,)) + state = self.torch.reshape(state, (2**ncontrol,) + nactive * (2,)) opstring = einsum_utils.apply_gate_string(targets, nactive) - updates = torch.einsum(opstring, state[-1], matrix) - state = torch.cat([state[:-1], updates[None]], axis=0) - state = torch.reshape(state, nqubits * (2,)) + updates = self.torch.einsum(opstring, state[-1], matrix) + state = self.torch.cat([state[:-1], updates[None]], axis=0) + state = self.torch.reshape(state, nqubits * (2,)) state = state.permute(*einsum_utils.reverse_order(order)) else: - matrix = torch.reshape(matrix, 2 * len(gate.qubits) * (2,)) + matrix = self.torch.reshape(matrix, 2 * len(gate.qubits) * (2,)) opstring = einsum_utils.apply_gate_string(gate.qubits, nqubits) - state = torch.einsum(opstring, state, matrix) - return torch.reshape(state, (2**nqubits,)) + state = self.torch.einsum(opstring, state, matrix) + return self.torch.reshape(state, (2**nqubits,)) def issparse(self, x): - if isinstance(x, torch.Tensor): + if isinstance(x, self.torch.Tensor): return x.is_sparse else: return super().issparse(x) def to_numpy(self, x): - if type(x) is torch.Tensor: + if type(x) is self.torch.Tensor: return x.detach().cpu().numpy() return x def compile(self, func): - return torch.jit.script(func) + return self.torch.jit.script(func) def zero_state(self, nqubits): - state = torch.zeros(2**nqubits, dtype=self.torch_dtype) + state = self.torch.zeros(2**nqubits, dtype=self.torch_dtype) state[0] = 1 return state def zero_density_matrix(self, nqubits): - state = torch.zeros(2 * (2**nqubits,), dtype=self.torch_dtype) + state = self.torch.zeros(2 * (2**nqubits,), dtype=self.torch_dtype) state[0, 0] = 1 return state def matrix(self, gate): npmatrix = super().matrix(gate) - return torch.tensor(npmatrix, dtype=self.torch_dtype) + return self.torch.tensor(npmatrix, dtype=self.torch_dtype) def matrix_parametrized(self, gate): npmatrix = super().matrix_parametrized(gate) - return torch.tensor(npmatrix, dtype=self.torch_dtype) + return self.torch.tensor(npmatrix, dtype=self.torch_dtype) def matrix_fused(self, gate): npmatrix = super().matrix_fused(gate) - return torch.tensor(npmatrix, dtype=self.torch_dtype) + return self.torch.tensor(npmatrix, dtype=self.torch_dtype) def execute_circuit(self, circuit, initial_state=None, nshots=1000): if initial_state is not None: @@ -280,7 +226,7 @@ def execute_circuit_repeated(self, circuit, nshots, initial_state=None): if circuit.density_matrix: # this implies also it has_collapse assert circuit.has_collapse - final_state = torch.mean(torch.stack(final_states), 0) + final_state = self.torch.mean(self.torch.stack(final_states), 0) if circuit.measurements: qubits = [q for m in circuit.measurements for q in m.target_qubits] final_result = CircuitResult( @@ -308,12 +254,12 @@ def execute_circuit_repeated(self, circuit, nshots, initial_state=None): return final_result def sample_shots(self, probabilities, nshots): - return torch.multinomial( + return self.torch.multinomial( self.cast(probabilities, dtype="float"), nshots, replacement=True ) def samples_to_binary(self, samples, nqubits): - qrange = torch.arange(nqubits - 1, -1, -1, dtype=torch.int32) + qrange = self.torch.arange(nqubits - 1, -1, -1, dtype=self.torch.int32) samples = samples.int() samples = samples[:, None] >> qrange return samples % 2 @@ -333,8 +279,8 @@ def _order_probabilities(self, probs, qubits, nqubits): def calculate_probabilities(self, state, qubits, nqubits): rtype = state.real.dtype unmeasured_qubits = tuple(i for i in range(nqubits) if i not in qubits) - state = torch.reshape(torch.abs(state) ** 2, nqubits * (2,)) - probs = torch.sum(state.type(rtype), dim=unmeasured_qubits) + state = self.torch.reshape(self.torch.abs(state) ** 2, nqubits * (2,)) + probs = self.torch.sum(state.type(rtype), dim=unmeasured_qubits) return self._order_probabilities(probs, qubits, nqubits).view(-1) def calculate_probabilities_density_matrix(self, state, qubits, nqubits): @@ -342,17 +288,17 @@ def calculate_probabilities_density_matrix(self, state, qubits, nqubits): order += tuple(i for i in range(nqubits) if i not in qubits) order = order + tuple(i + nqubits for i in order) shape = 2 * (2 ** len(qubits), 2 ** (nqubits - len(qubits))) - state = torch.reshape(state, 2 * nqubits * (2,)) - state = torch.reshape(state.permute(*order), shape) - probs = torch.abs(torch.einsum("abab->a", state)) - probs = torch.reshape(probs, len(qubits) * (2,)) + state = self.torch.reshape(state, 2 * nqubits * (2,)) + state = self.torch.reshape(state.permute(*order), shape) + probs = self.torch.abs(self.torch.einsum("abab->a", state)) + probs = self.torch.reshape(probs, len(qubits) * (2,)) return self._order_probabilities(probs, qubits, nqubits).view(-1) def sample_frequencies(self, probabilities, nshots): from qibo.config import SHOT_BATCH_SIZE - nprobs = probabilities / torch.sum(probabilities) - frequencies = torch.zeros(len(nprobs), dtype=torch.int64) + nprobs = probabilities / self.torch.sum(probabilities) + frequencies = self.torch.zeros(len(nprobs), dtype=self.torch.int64) for _ in range(nshots // SHOT_BATCH_SIZE): frequencies = self.update_frequencies(frequencies, nprobs, SHOT_BATCH_SIZE) frequencies = self.update_frequencies( @@ -363,14 +309,14 @@ def sample_frequencies(self, probabilities, nshots): ) def calculate_frequencies(self, samples): - res, counts = torch.unique(samples, return_counts=True) + res, counts = self.torch.unique(samples, return_counts=True) res, counts = res.tolist(), counts.tolist() return collections.Counter({k: v for k, v in zip(res, counts)}) def update_frequencies(self, frequencies, probabilities, nsamples): frequencies = self.cast(frequencies, dtype="int") samples = self.sample_shots(probabilities, nsamples) - unique_samples, counts = torch.unique(samples, return_counts=True) + unique_samples, counts = self.torch.unique(samples, return_counts=True) frequencies.index_add_( 0, self.cast(unique_samples, dtype="int"), self.cast(counts, dtype="int") ) @@ -378,23 +324,23 @@ def update_frequencies(self, frequencies, probabilities, nsamples): def calculate_norm(self, state, order=2): state = self.cast(state) - return torch.norm(state, p=order) + return self.torch.norm(state, p=order) def calculate_norm_density_matrix(self, state, order="nuc"): state = self.cast(state) if order == "nuc": return np.trace(state) - return torch.norm(state, p=order) + return self.torch.norm(state, p=order) def calculate_eigenvalues(self, matrix, k=6): - return torch.linalg.eigvalsh(matrix) # pylint: disable=not-callable + return self.torch.linalg.eigvalsh(matrix) # pylint: disable=not-callable def calculate_eigenvectors(self, matrix, k=6): - return torch.linalg.eigh(matrix) # pylint: disable=not-callable + return self.torch.linalg.eigh(matrix) # pylint: disable=not-callable def calculate_matrix_exp(self, a, matrix, eigenvectors=None, eigenvalues=None): if eigenvectors is None or self.issparse(matrix): - return torch.linalg.matrix_exp( # pylint: disable=not-callable + return self.torch.linalg.matrix_exp( # pylint: disable=not-callable -1j * a * matrix ) else: @@ -402,119 +348,123 @@ def calculate_matrix_exp(self, a, matrix, eigenvectors=None, eigenvalues=None): def calculate_expectation_state(self, hamiltonian, state, normalize): state = self.cast(state) - statec = torch.conj(state) + statec = self.torch.conj(state) hstate = self.cast(hamiltonian @ state) - ev = torch.real(torch.sum(statec * hstate)) + ev = self.torch.real(self.torch.sum(statec * hstate)) if normalize: - ev = ev / torch.sum(torch.square(torch.abs(state))) + ev = ev / self.torch.sum(self.torch.square(self.torch.abs(state))) return ev def calculate_hamiltonian_matrix_product(self, matrix1, matrix2): if self.issparse(matrix1) or self.issparse(matrix2): - return torch.sparse.mm(matrix1, matrix2) # pylint: disable=not-callable - return torch.matmul(matrix1, matrix2) + return self.torch.sparse.mm( + matrix1, matrix2 + ) # pylint: disable=not-callable + return self.torch.matmul(matrix1, matrix2) def calculate_hamiltonian_state_product(self, matrix, state): - return torch.matmul(matrix, state) + return self.torch.matmul(matrix, state) def calculate_overlap(self, state1, state2): - return torch.abs(torch.sum(torch.conj(self.cast(state1)) * self.cast(state2))) + return self.torch.abs( + self.torch.sum(self.torch.conj(self.cast(state1)) * self.cast(state2)) + ) def calculate_overlap_density_matrix(self, state1, state2): - return torch.trace( - torch.matmul(torch.conj(self.cast(state1)).T, self.cast(state2)) + return self.torch.trace( + self.torch.matmul(self.torch.conj(self.cast(state1)).T, self.cast(state2)) ) def apply_gate_density_matrix(self, gate, state, nqubits): state = self.cast(state) - state = torch.reshape(state, 2 * nqubits * (2,)) + state = self.torch.reshape(state, 2 * nqubits * (2,)) matrix = gate.matrix(self) if gate.is_controlled_by: - matrix = torch.reshape(matrix, 2 * len(gate.target_qubits) * (2,)) - matrixc = torch.conj(matrix) + matrix = self.torch.reshape(matrix, 2 * len(gate.target_qubits) * (2,)) + matrixc = self.torch.conj(matrix) ncontrol = len(gate.control_qubits) nactive = nqubits - ncontrol n = 2**ncontrol order, targets = einsum_utils.control_order_density_matrix(gate, nqubits) state = state.permute(*order) - state = torch.reshape(state, 2 * (n,) + 2 * nactive * (2,)) + state = self.torch.reshape(state, 2 * (n,) + 2 * nactive * (2,)) leftc, rightc = einsum_utils.apply_gate_density_matrix_controlled_string( targets, nactive ) state01 = state[: n - 1, n - 1] - state01 = torch.einsum(rightc, state01, matrixc) + state01 = self.torch.einsum(rightc, state01, matrixc) state10 = state[n - 1, : n - 1] - state10 = torch.einsum(leftc, state10, matrix) + state10 = self.torch.einsum(leftc, state10, matrix) left, right = einsum_utils.apply_gate_density_matrix_string( targets, nactive ) state11 = state[n - 1, n - 1] - state11 = torch.einsum(right, state11, matrixc) - state11 = torch.einsum(left, state11, matrix) + state11 = self.torch.einsum(right, state11, matrixc) + state11 = self.torch.einsum(left, state11, matrix) state00 = state[range(n - 1)] state00 = state00[:, range(n - 1)] - state01 = torch.cat([state00, state01[:, None]], dim=1) - state10 = torch.cat([state10, state11[None]], dim=0) - state = torch.cat([state01, state10[None]], dim=0) - state = torch.reshape(state, 2 * nqubits * (2,)) + state01 = self.torch.cat([state00, state01[:, None]], dim=1) + state10 = self.torch.cat([state10, state11[None]], dim=0) + state = self.torch.cat([state01, state10[None]], dim=0) + state = self.torch.reshape(state, 2 * nqubits * (2,)) state = state.permute(*einsum_utils.reverse_order(order)) else: - matrix = torch.reshape(matrix, 2 * len(gate.qubits) * (2,)) - matrixc = torch.conj(matrix) + matrix = self.torch.reshape(matrix, 2 * len(gate.qubits) * (2,)) + matrixc = self.torch.conj(matrix) left, right = einsum_utils.apply_gate_density_matrix_string( gate.qubits, nqubits ) - state = torch.einsum(right, state, matrixc) - state = torch.einsum(left, state, matrix) - return torch.reshape(state, 2 * (2**nqubits,)) + state = self.torch.einsum(right, state, matrixc) + state = self.torch.einsum(left, state, matrix) + return self.torch.reshape(state, 2 * (2**nqubits,)) def partial_trace(self, state, qubits, nqubits): state = self.cast(state) - state = torch.reshape(state, nqubits * (2,)) + state = self.torch.reshape(state, nqubits * (2,)) axes = 2 * [list(qubits)] - rho = torch.tensordot(state, torch.conj(state), dims=axes) + rho = self.torch.tensordot(state, self.torch.conj(state), dims=axes) shape = 2 * (2 ** (nqubits - len(qubits)),) - return torch.reshape(rho, shape) + return self.torch.reshape(rho, shape) def partial_trace_density_matrix(self, state, qubits, nqubits): state = self.cast(state) - state = torch.reshape(state, 2 * nqubits * (2,)) + state = self.torch.reshape(state, 2 * nqubits * (2,)) order = list(sorted(qubits)) order += [i for i in range(nqubits) if i not in qubits] order += [i + nqubits for i in order] state = state.permute(*order) shape = 2 * (2 ** len(qubits), 2 ** (nqubits - len(qubits))) - state = torch.reshape(state, shape) - return torch.einsum("abac->bc", state) + state = self.torch.reshape(state, shape) + return self.torch.einsum("abac->bc", state) def _append_zeros(self, state, qubits, results): """Helper method for collapse.""" for q, r in zip(qubits, results): - state = torch.unsqueeze(state, dim=q) + state = self.torch.unsqueeze(state, dim=q) if r: - state = torch.cat([torch.zeros_like(state), state], dim=q) + state = self.torch.cat([self.torch.zeros_like(state), state], dim=q) else: - state = torch.cat([state, torch.zeros_like(state)], dim=q) + state = self.torch.cat([state, self.torch.zeros_like(state)], dim=q) return state def collapse_state(self, state, qubits, shot, nqubits, normalize=True): state = self.cast(state) shape = state.shape binshot = self.samples_to_binary(shot, len(qubits))[0] - state = torch.reshape(state, nqubits * (2,)) + state = self.torch.reshape(state, nqubits * (2,)) order = list(qubits) + [q for q in range(nqubits) if q not in qubits] state = state.permute(*order) subshape = (2 ** len(qubits),) + (nqubits - len(qubits)) * (2,) - state = torch.reshape(state, subshape)[int(shot)] + state = self.torch.reshape(state, subshape)[int(shot)] if normalize: - norm = torch.sqrt(torch.sum(torch.abs(state) ** 2)) + norm = self.torch.sqrt(self.torch.sum(self.torch.abs(state) ** 2)) state = state / norm state = self._append_zeros(state, qubits, binshot) - return torch.reshape(state, shape) + return self.torch.reshape(state, shape) def collapse_density_matrix(self, state, qubits, shot, nqubits, normalize=True): state = self.cast(state) @@ -523,17 +473,17 @@ def collapse_density_matrix(self, state, qubits, shot, nqubits, normalize=True): order = list(qubits) + [q + nqubits for q in qubits] order.extend(q for q in range(nqubits) if q not in qubits) order.extend(q + nqubits for q in range(nqubits) if q not in qubits) - state = torch.reshape(state, 2 * nqubits * (2,)) + state = self.torch.reshape(state, 2 * nqubits * (2,)) state = state.permute(*order) subshape = 2 * (2 ** len(qubits),) + 2 * (nqubits - len(qubits)) * (2,) - state = torch.reshape(state, subshape)[int(shot), int(shot)] + state = self.torch.reshape(state, subshape)[int(shot), int(shot)] n = 2 ** (len(state.shape) // 2) if normalize: - norm = torch.trace(torch.reshape(state, (n, n))) + norm = self.torch.trace(self.torch.reshape(state, (n, n))) state = state / norm qubits = qubits + [q + nqubits for q in qubits] state = self._append_zeros(state, qubits, 2 * binshot) - return torch.reshape(state, shape) + return self.torch.reshape(state, shape) def reset_error_density_matrix(self, gate, state, nqubits): from qibo.gates import X @@ -543,13 +493,13 @@ def reset_error_density_matrix(self, gate, state, nqubits): q = gate.target_qubits[0] p_0, p_1 = gate.init_kwargs["p_0"], gate.init_kwargs["p_1"] trace = self.partial_trace_density_matrix(state, (q,), nqubits) - trace = torch.reshape(trace, 2 * (nqubits - 1) * (2,)) + trace = self.torch.reshape(trace, 2 * (nqubits - 1) * (2,)) zero = self.zero_density_matrix(1) - zero = torch.tensordot(trace, zero, dims=0) + zero = self.torch.tensordot(trace, zero, dims=0) order = list(range(2 * nqubits - 2)) order.insert(q, 2 * nqubits - 2) order.insert(q + nqubits, 2 * nqubits - 1) - zero = torch.reshape(zero.permute(*order), shape) + zero = self.torch.reshape(zero.permute(*order), shape) state = (1 - p_0 - p_1) * state + p_0 * zero return state + p_1 * self.apply_gate_density_matrix(X(q), zero, nqubits) @@ -557,10 +507,10 @@ def thermal_error_density_matrix(self, gate, state, nqubits): state = self.cast(state) shape = state.shape state = self.apply_gate(gate, state.view(-1), 2 * nqubits) - return torch.reshape(state, shape) + return self.torch.reshape(state, shape) def identity_density_matrix(self, nqubits, normalize: bool = True): - state = torch.eye(2**nqubits, dtype=torch.complex128) + state = self.torch.eye(2**nqubits, dtype=self.torch.complex128) if normalize is True: state /= 2**nqubits return state @@ -571,10 +521,10 @@ def depolarizing_error_density_matrix(self, gate, state, nqubits): q = gate.target_qubits lam = gate.init_kwargs["lam"] trace = self.partial_trace_density_matrix(state, q, nqubits) - trace = torch.reshape(trace, 2 * (nqubits - len(q)) * (2,)) + trace = self.torch.reshape(trace, 2 * (nqubits - len(q)) * (2,)) identity = self.identity_density_matrix(len(q)) - identity = torch.reshape(identity, 2 * len(q) * (2,)) - identity = torch.tensordot(trace, identity, dims=0) + identity = self.torch.reshape(identity, 2 * len(q) * (2,)) + identity = self.torch.tensordot(trace, identity, dims=0) qubits = list(range(nqubits)) for j in q: qubits.pop(qubits.index(j)) @@ -592,7 +542,7 @@ def depolarizing_error_density_matrix(self, gate, state, nqubits): qj = [qj[qubits.index(i)] for i in range(len(qubits))] order += qj identity = identity.permute(*order) - identity = torch.reshape(identity, shape) + identity = self.torch.reshape(identity, shape) state = (1 - lam) * state + lam * identity return state From 1980f4560038d803df89fedaf5e91764d5c2520a Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Tue, 13 Feb 2024 16:02:38 +0400 Subject: [PATCH 018/127] improve code --- src/qibo/backends/pytorch.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index 98978fcb2d..830c82f723 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -120,8 +120,7 @@ def apply_gate(self, gate, state, nqubits): def issparse(self, x): if isinstance(x, self.torch.Tensor): return x.is_sparse - else: - return super().issparse(x) + return super().issparse(x) def to_numpy(self, x): if type(x) is self.torch.Tensor: @@ -343,8 +342,7 @@ def calculate_matrix_exp(self, a, matrix, eigenvectors=None, eigenvalues=None): return self.torch.linalg.matrix_exp( # pylint: disable=not-callable -1j * a * matrix ) - else: - return super().calculate_matrix_exp(a, matrix, eigenvectors, eigenvalues) + return super().calculate_matrix_exp(a, matrix, eigenvectors, eigenvalues) def calculate_expectation_state(self, hamiltonian, state, normalize): state = self.cast(state) From ba45624df6a0aba8ea961dcd749661674ba1dcc3 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Fri, 16 Feb 2024 13:07:04 +0400 Subject: [PATCH 019/127] fix `poetry.lock` --- poetry.lock | 341 +++++++++++++++++++++++++++++----------------------- 1 file changed, 192 insertions(+), 149 deletions(-) diff --git a/poetry.lock b/poetry.lock index 1d066357ff..8cc81ad6fb 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. [[package]] name = "absl-py" @@ -1302,10 +1302,10 @@ files = [ ] [package.extras] -all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "pycairo", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.1.0)", "xattr", "zopfli (>=0.1.4)"] +all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0,<5)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "pycairo", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.1.0)", "xattr", "zopfli (>=0.1.4)"] graphite = ["lz4 (>=1.7.4.2)"] interpolatable = ["munkres", "pycairo", "scipy"] -lxml = ["lxml (>=4.0)"] +lxml = ["lxml (>=4.0,<5)"] pathops = ["skia-pathops (>=0.5.0)"] plot = ["matplotlib"] repacker = ["uharfbuzz (>=0.23.0)"] @@ -1403,11 +1403,11 @@ files = [ google-auth = ">=2.14.1,<3.0.dev0" googleapis-common-protos = ">=1.56.2,<2.0.dev0" grpcio = [ - {version = ">=1.33.2,<2.0dev", optional = true, markers = "python_version < \"3.11\" and extra == \"grpc\""}, + {version = ">=1.33.2,<2.0dev", optional = true, markers = "extra == \"grpc\""}, {version = ">=1.49.1,<2.0dev", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""}, ] grpcio-status = [ - {version = ">=1.33.2,<2.0.dev0", optional = true, markers = "python_version < \"3.11\" and extra == \"grpc\""}, + {version = ">=1.33.2,<2.0.dev0", optional = true, markers = "extra == \"grpc\""}, {version = ">=1.49.1,<2.0.dev0", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""}, ] protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0.dev0" @@ -2136,8 +2136,6 @@ description = "Clang Python Bindings, mirrored from the official LLVM repo: http optional = false python-versions = "*" files = [ - {file = "libclang-16.0.6-1-py2.py3-none-manylinux2014_aarch64.whl", hash = "sha256:88bc7e7b393c32e41e03ba77ef02fdd647da1f764c2cd028e69e0837080b79f6"}, - {file = "libclang-16.0.6-1-py2.py3-none-manylinux2014_armv7l.whl", hash = "sha256:d80ed5827736ed5ec2bcedf536720476fd9d4fa4c79ef0cb24aea4c59332f361"}, {file = "libclang-16.0.6-py2.py3-none-macosx_10_9_x86_64.whl", hash = "sha256:da9e47ebc3f0a6d90fb169ef25f9fbcd29b4a4ef97a8b0e3e3a17800af1423f4"}, {file = "libclang-16.0.6-py2.py3-none-macosx_11_0_arm64.whl", hash = "sha256:e1a5ad1e895e5443e205568c85c04b4608e4e973dae42f4dfd9cb46c81d1486b"}, {file = "libclang-16.0.6-py2.py3-none-manylinux2010_x86_64.whl", hash = "sha256:9dcdc730939788b8b69ffd6d5d75fe5366e3ee007f1e36a99799ec0b0c001492"}, @@ -2379,9 +2377,9 @@ files = [ [package.dependencies] numpy = [ - {version = ">=1.23.3", markers = "python_version > \"3.10\""}, - {version = ">=1.21.2", markers = "python_version > \"3.9\" and python_version <= \"3.10\""}, {version = ">1.20", markers = "python_version <= \"3.9\""}, + {version = ">=1.23.3", markers = "python_version > \"3.10\""}, + {version = ">=1.21.2", markers = "python_version > \"3.9\""}, ] [package.extras] @@ -2628,42 +2626,183 @@ description = "Fundamental package for array computing in Python" optional = false python-versions = ">=3.9" files = [ - {file = "numpy-1.26.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3703fc9258a4a122d17043e57b35e5ef1c5a5837c3db8be396c82e04c1cf9b0f"}, - {file = "numpy-1.26.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cc392fdcbd21d4be6ae1bb4475a03ce3b025cd49a9be5345d76d7585aea69440"}, - {file = "numpy-1.26.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:36340109af8da8805d8851ef1d74761b3b88e81a9bd80b290bbfed61bd2b4f75"}, - {file = "numpy-1.26.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bcc008217145b3d77abd3e4d5ef586e3bdfba8fe17940769f8aa09b99e856c00"}, - {file = "numpy-1.26.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3ced40d4e9e18242f70dd02d739e44698df3dcb010d31f495ff00a31ef6014fe"}, - {file = "numpy-1.26.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b272d4cecc32c9e19911891446b72e986157e6a1809b7b56518b4f3755267523"}, - {file = "numpy-1.26.2-cp310-cp310-win32.whl", hash = "sha256:22f8fc02fdbc829e7a8c578dd8d2e15a9074b630d4da29cda483337e300e3ee9"}, - {file = "numpy-1.26.2-cp310-cp310-win_amd64.whl", hash = "sha256:26c9d33f8e8b846d5a65dd068c14e04018d05533b348d9eaeef6c1bd787f9919"}, - {file = "numpy-1.26.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b96e7b9c624ef3ae2ae0e04fa9b460f6b9f17ad8b4bec6d7756510f1f6c0c841"}, - {file = "numpy-1.26.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:aa18428111fb9a591d7a9cc1b48150097ba6a7e8299fb56bdf574df650e7d1f1"}, - {file = "numpy-1.26.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:06fa1ed84aa60ea6ef9f91ba57b5ed963c3729534e6e54055fc151fad0423f0a"}, - {file = "numpy-1.26.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96ca5482c3dbdd051bcd1fce8034603d6ebfc125a7bd59f55b40d8f5d246832b"}, - {file = "numpy-1.26.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:854ab91a2906ef29dc3925a064fcd365c7b4da743f84b123002f6139bcb3f8a7"}, - {file = "numpy-1.26.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f43740ab089277d403aa07567be138fc2a89d4d9892d113b76153e0e412409f8"}, - {file = "numpy-1.26.2-cp311-cp311-win32.whl", hash = "sha256:a2bbc29fcb1771cd7b7425f98b05307776a6baf43035d3b80c4b0f29e9545186"}, - {file = "numpy-1.26.2-cp311-cp311-win_amd64.whl", hash = "sha256:2b3fca8a5b00184828d12b073af4d0fc5fdd94b1632c2477526f6bd7842d700d"}, - {file = "numpy-1.26.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a4cd6ed4a339c21f1d1b0fdf13426cb3b284555c27ac2f156dfdaaa7e16bfab0"}, - {file = "numpy-1.26.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5d5244aabd6ed7f312268b9247be47343a654ebea52a60f002dc70c769048e75"}, - {file = "numpy-1.26.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a3cdb4d9c70e6b8c0814239ead47da00934666f668426fc6e94cce869e13fd7"}, - {file = "numpy-1.26.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa317b2325f7aa0a9471663e6093c210cb2ae9c0ad824732b307d2c51983d5b6"}, - {file = "numpy-1.26.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:174a8880739c16c925799c018f3f55b8130c1f7c8e75ab0a6fa9d41cab092fd6"}, - {file = "numpy-1.26.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f79b231bf5c16b1f39c7f4875e1ded36abee1591e98742b05d8a0fb55d8a3eec"}, - {file = "numpy-1.26.2-cp312-cp312-win32.whl", hash = "sha256:4a06263321dfd3598cacb252f51e521a8cb4b6df471bb12a7ee5cbab20ea9167"}, - {file = "numpy-1.26.2-cp312-cp312-win_amd64.whl", hash = "sha256:b04f5dc6b3efdaab541f7857351aac359e6ae3c126e2edb376929bd3b7f92d7e"}, - {file = "numpy-1.26.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4eb8df4bf8d3d90d091e0146f6c28492b0be84da3e409ebef54349f71ed271ef"}, - {file = "numpy-1.26.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1a13860fdcd95de7cf58bd6f8bc5a5ef81c0b0625eb2c9a783948847abbef2c2"}, - {file = "numpy-1.26.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:64308ebc366a8ed63fd0bf426b6a9468060962f1a4339ab1074c228fa6ade8e3"}, - {file = "numpy-1.26.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baf8aab04a2c0e859da118f0b38617e5ee65d75b83795055fb66c0d5e9e9b818"}, - {file = "numpy-1.26.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d73a3abcac238250091b11caef9ad12413dab01669511779bc9b29261dd50210"}, - {file = "numpy-1.26.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b361d369fc7e5e1714cf827b731ca32bff8d411212fccd29ad98ad622449cc36"}, - {file = "numpy-1.26.2-cp39-cp39-win32.whl", hash = "sha256:bd3f0091e845164a20bd5a326860c840fe2af79fa12e0469a12768a3ec578d80"}, - {file = "numpy-1.26.2-cp39-cp39-win_amd64.whl", hash = "sha256:2beef57fb031dcc0dc8fa4fe297a742027b954949cabb52a2a376c144e5e6060"}, - {file = "numpy-1.26.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1cc3d5029a30fb5f06704ad6b23b35e11309491c999838c31f124fee32107c79"}, - {file = "numpy-1.26.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94cc3c222bb9fb5a12e334d0479b97bb2df446fbe622b470928f5284ffca3f8d"}, - {file = "numpy-1.26.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fe6b44fb8fcdf7eda4ef4461b97b3f63c466b27ab151bec2366db8b197387841"}, - {file = "numpy-1.26.2.tar.gz", hash = "sha256:f65738447676ab5777f11e6bbbdb8ce11b785e105f690bc45966574816b6d3ea"}, + {file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"}, + {file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"}, + {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d209d8969599b27ad20994c8e41936ee0964e6da07478d6c35016bc386b66ad4"}, + {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffa75af20b44f8dba823498024771d5ac50620e6915abac414251bd971b4529f"}, + {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:62b8e4b1e28009ef2846b4c7852046736bab361f7aeadeb6a5b89ebec3c7055a"}, + {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a4abb4f9001ad2858e7ac189089c42178fcce737e4169dc61321660f1a96c7d2"}, + {file = "numpy-1.26.4-cp310-cp310-win32.whl", hash = "sha256:bfe25acf8b437eb2a8b2d49d443800a5f18508cd811fea3181723922a8a82b07"}, + {file = "numpy-1.26.4-cp310-cp310-win_amd64.whl", hash = "sha256:b97fe8060236edf3662adfc2c633f56a08ae30560c56310562cb4f95500022d5"}, + {file = "numpy-1.26.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c66707fabe114439db9068ee468c26bbdf909cac0fb58686a42a24de1760c71"}, + {file = "numpy-1.26.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:edd8b5fe47dab091176d21bb6de568acdd906d1887a4584a15a9a96a1dca06ef"}, + {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ab55401287bfec946ced39700c053796e7cc0e3acbef09993a9ad2adba6ca6e"}, + {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:666dbfb6ec68962c033a450943ded891bed2d54e6755e35e5835d63f4f6931d5"}, + {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:96ff0b2ad353d8f990b63294c8986f1ec3cb19d749234014f4e7eb0112ceba5a"}, + {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:60dedbb91afcbfdc9bc0b1f3f402804070deed7392c23eb7a7f07fa857868e8a"}, + {file = "numpy-1.26.4-cp311-cp311-win32.whl", hash = "sha256:1af303d6b2210eb850fcf03064d364652b7120803a0b872f5211f5234b399f20"}, + {file = "numpy-1.26.4-cp311-cp311-win_amd64.whl", hash = "sha256:cd25bcecc4974d09257ffcd1f098ee778f7834c3ad767fe5db785be9a4aa9cb2"}, + {file = "numpy-1.26.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218"}, + {file = "numpy-1.26.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b"}, + {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b"}, + {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed"}, + {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a"}, + {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0"}, + {file = "numpy-1.26.4-cp312-cp312-win32.whl", hash = "sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110"}, + {file = "numpy-1.26.4-cp312-cp312-win_amd64.whl", hash = "sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818"}, + {file = "numpy-1.26.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7349ab0fa0c429c82442a27a9673fc802ffdb7c7775fad780226cb234965e53c"}, + {file = "numpy-1.26.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:52b8b60467cd7dd1e9ed082188b4e6bb35aa5cdd01777621a1658910745b90be"}, + {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5241e0a80d808d70546c697135da2c613f30e28251ff8307eb72ba696945764"}, + {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f870204a840a60da0b12273ef34f7051e98c3b5961b61b0c2c1be6dfd64fbcd3"}, + {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:679b0076f67ecc0138fd2ede3a8fd196dddc2ad3254069bcb9faf9a79b1cebcd"}, + {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:47711010ad8555514b434df65f7d7b076bb8261df1ca9bb78f53d3b2db02e95c"}, + {file = "numpy-1.26.4-cp39-cp39-win32.whl", hash = "sha256:a354325ee03388678242a4d7ebcd08b5c727033fcff3b2f536aea978e15ee9e6"}, + {file = "numpy-1.26.4-cp39-cp39-win_amd64.whl", hash = "sha256:3373d5d70a5fe74a2c1bb6d2cfd9609ecf686d47a2d7b1d37a8f3b6bf6003aea"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:afedb719a9dcfc7eaf2287b839d8198e06dcd4cb5d276a3df279231138e83d30"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95a7476c59002f2f6c590b9b7b998306fba6a5aa646b1e22ddfeaf8f78c3a29c"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7e50d0a0cc3189f9cb0aeb3a6a6af18c16f59f004b866cd2be1c14b36134a4a0"}, + {file = "numpy-1.26.4.tar.gz", hash = "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010"}, +] + +[[package]] +name = "nvidia-cublas-cu12" +version = "12.1.3.1" +description = "CUBLAS native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cublas_cu12-12.1.3.1-py3-none-manylinux1_x86_64.whl", hash = "sha256:ee53ccca76a6fc08fb9701aa95b6ceb242cdaab118c3bb152af4e579af792728"}, + {file = "nvidia_cublas_cu12-12.1.3.1-py3-none-win_amd64.whl", hash = "sha256:2b964d60e8cf11b5e1073d179d85fa340c120e99b3067558f3cf98dd69d02906"}, +] + +[[package]] +name = "nvidia-cuda-cupti-cu12" +version = "12.1.105" +description = "CUDA profiling tools runtime libs." +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cuda_cupti_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:e54fde3983165c624cb79254ae9818a456eb6e87a7fd4d56a2352c24ee542d7e"}, + {file = "nvidia_cuda_cupti_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:bea8236d13a0ac7190bd2919c3e8e6ce1e402104276e6f9694479e48bb0eb2a4"}, +] + +[[package]] +name = "nvidia-cuda-nvrtc-cu12" +version = "12.1.105" +description = "NVRTC native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:339b385f50c309763ca65456ec75e17bbefcbbf2893f462cb8b90584cd27a1c2"}, + {file = "nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:0a98a522d9ff138b96c010a65e145dc1b4850e9ecb75a0172371793752fd46ed"}, +] + +[[package]] +name = "nvidia-cuda-runtime-cu12" +version = "12.1.105" +description = "CUDA Runtime native Libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cuda_runtime_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:6e258468ddf5796e25f1dc591a31029fa317d97a0a94ed93468fc86301d61e40"}, + {file = "nvidia_cuda_runtime_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:dfb46ef84d73fababab44cf03e3b83f80700d27ca300e537f85f636fac474344"}, +] + +[[package]] +name = "nvidia-cudnn-cu12" +version = "8.9.2.26" +description = "cuDNN runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cudnn_cu12-8.9.2.26-py3-none-manylinux1_x86_64.whl", hash = "sha256:5ccb288774fdfb07a7e7025ffec286971c06d8d7b4fb162525334616d7629ff9"}, +] + +[package.dependencies] +nvidia-cublas-cu12 = "*" + +[[package]] +name = "nvidia-cufft-cu12" +version = "11.0.2.54" +description = "CUFFT native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cufft_cu12-11.0.2.54-py3-none-manylinux1_x86_64.whl", hash = "sha256:794e3948a1aa71fd817c3775866943936774d1c14e7628c74f6f7417224cdf56"}, + {file = "nvidia_cufft_cu12-11.0.2.54-py3-none-win_amd64.whl", hash = "sha256:d9ac353f78ff89951da4af698f80870b1534ed69993f10a4cf1d96f21357e253"}, +] + +[[package]] +name = "nvidia-curand-cu12" +version = "10.3.2.106" +description = "CURAND native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_curand_cu12-10.3.2.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:9d264c5036dde4e64f1de8c50ae753237c12e0b1348738169cd0f8a536c0e1e0"}, + {file = "nvidia_curand_cu12-10.3.2.106-py3-none-win_amd64.whl", hash = "sha256:75b6b0c574c0037839121317e17fd01f8a69fd2ef8e25853d826fec30bdba74a"}, +] + +[[package]] +name = "nvidia-cusolver-cu12" +version = "11.4.5.107" +description = "CUDA solver native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl", hash = "sha256:8a7ec542f0412294b15072fa7dab71d31334014a69f953004ea7a118206fe0dd"}, + {file = "nvidia_cusolver_cu12-11.4.5.107-py3-none-win_amd64.whl", hash = "sha256:74e0c3a24c78612192a74fcd90dd117f1cf21dea4822e66d89e8ea80e3cd2da5"}, +] + +[package.dependencies] +nvidia-cublas-cu12 = "*" +nvidia-cusparse-cu12 = "*" +nvidia-nvjitlink-cu12 = "*" + +[[package]] +name = "nvidia-cusparse-cu12" +version = "12.1.0.106" +description = "CUSPARSE native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:f3b50f42cf363f86ab21f720998517a659a48131e8d538dc02f8768237bd884c"}, + {file = "nvidia_cusparse_cu12-12.1.0.106-py3-none-win_amd64.whl", hash = "sha256:b798237e81b9719373e8fae8d4f091b70a0cf09d9d85c95a557e11df2d8e9a5a"}, +] + +[package.dependencies] +nvidia-nvjitlink-cu12 = "*" + +[[package]] +name = "nvidia-nccl-cu12" +version = "2.19.3" +description = "NVIDIA Collective Communication Library (NCCL) Runtime" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_nccl_cu12-2.19.3-py3-none-manylinux1_x86_64.whl", hash = "sha256:a9734707a2c96443331c1e48c717024aa6678a0e2a4cb66b2c364d18cee6b48d"}, +] + +[[package]] +name = "nvidia-nvjitlink-cu12" +version = "12.3.101" +description = "Nvidia JIT LTO Library" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_nvjitlink_cu12-12.3.101-py3-none-manylinux1_x86_64.whl", hash = "sha256:64335a8088e2b9d196ae8665430bc6a2b7e6ef2eb877a9c735c804bd4ff6467c"}, + {file = "nvidia_nvjitlink_cu12-12.3.101-py3-none-win_amd64.whl", hash = "sha256:1b2e317e437433753530792f13eece58f0aec21a2b05903be7bffe58a606cbd1"}, +] + +[[package]] +name = "nvidia-nvtx-cu12" +version = "12.1.105" +description = "NVIDIA Tools Extension" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_nvtx_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:dc21cf308ca5691e7c04d962e213f8a4aa9bbfa23d95412f452254c2caeb09e5"}, + {file = "nvidia_nvtx_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:65f4d98982b31b60026e0e6de73fbdfc09d08a96f4656dd3665ca616a11e1e82"}, ] [[package]] @@ -3605,13 +3744,6 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, - {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, - {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, - {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -3974,24 +4106,24 @@ python-versions = ">=3.6" files = [ {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b42169467c42b692c19cf539c38d4602069d8c1505e97b86387fcf7afb766e1d"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-macosx_13_0_arm64.whl", hash = "sha256:07238db9cbdf8fc1e9de2489a4f68474e70dffcb32232db7c08fa61ca0c7c462"}, - {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:d92f81886165cb14d7b067ef37e142256f1c6a90a65cd156b063a43da1708cfd"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:fff3573c2db359f091e1589c3d7c5fc2f86f5bdb6f24252c2d8e539d4e45f412"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux_2_24_aarch64.whl", hash = "sha256:aa2267c6a303eb483de8d02db2871afb5c5fc15618d894300b88958f729ad74f"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:840f0c7f194986a63d2c2465ca63af8ccbbc90ab1c6001b1978f05119b5e7334"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:024cfe1fc7c7f4e1aff4a81e718109e13409767e4f871443cbff3dba3578203d"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-win32.whl", hash = "sha256:c69212f63169ec1cfc9bb44723bf2917cbbd8f6191a00ef3410f5a7fe300722d"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-win_amd64.whl", hash = "sha256:cabddb8d8ead485e255fe80429f833172b4cadf99274db39abc080e068cbcc31"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:bef08cd86169d9eafb3ccb0a39edb11d8e25f3dae2b28f5c52fd997521133069"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:b16420e621d26fdfa949a8b4b47ade8810c56002f5389970db4ddda51dbff248"}, - {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:b5edda50e5e9e15e54a6a8a0070302b00c518a9d32accc2346ad6c984aacd279"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:25c515e350e5b739842fc3228d662413ef28f295791af5e5110b543cf0b57d9b"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux_2_24_aarch64.whl", hash = "sha256:1707814f0d9791df063f8c19bb51b0d1278b8e9a2353abbb676c2f685dee6afe"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:46d378daaac94f454b3a0e3d8d78cafd78a026b1d71443f4966c696b48a6d899"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:09b055c05697b38ecacb7ac50bdab2240bfca1a0c4872b0fd309bb07dc9aa3a9"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-win32.whl", hash = "sha256:53a300ed9cea38cf5a2a9b069058137c2ca1ce658a874b79baceb8f892f915a7"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-win_amd64.whl", hash = "sha256:c2a72e9109ea74e511e29032f3b670835f8a59bbdc9ce692c5b4ed91ccf1eedb"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:ebc06178e8821efc9692ea7544aa5644217358490145629914d8020042c24aa1"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-macosx_13_0_arm64.whl", hash = "sha256:edaef1c1200c4b4cb914583150dcaa3bc30e592e907c01117c08b13a07255ec2"}, - {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:7048c338b6c86627afb27faecf418768acb6331fc24cfa56c93e8c9780f815fa"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d176b57452ab5b7028ac47e7b3cf644bcfdc8cacfecf7e71759f7f51a59e5c92"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux_2_24_aarch64.whl", hash = "sha256:1dc67314e7e1086c9fdf2680b7b6c2be1c0d8e3a8279f2e993ca2a7545fecf62"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3213ece08ea033eb159ac52ae052a4899b56ecc124bb80020d9bbceeb50258e9"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:aab7fd643f71d7946f2ee58cc88c9b7bfc97debd71dcc93e03e2d174628e7e2d"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-win32.whl", hash = "sha256:5c365d91c88390c8d0a8545df0b5857172824b1c604e867161e6b3d59a827eaa"}, @@ -3999,7 +4131,7 @@ files = [ {file = "ruamel.yaml.clib-0.2.8-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a5aa27bad2bb83670b71683aae140a1f52b0857a2deff56ad3f6c13a017a26ed"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c58ecd827313af6864893e7af0a3bb85fd529f862b6adbefe14643947cfe2942"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-macosx_12_0_arm64.whl", hash = "sha256:f481f16baec5290e45aebdc2a5168ebc6d35189ae6fea7a58787613a25f6e875"}, - {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:3fcc54cb0c8b811ff66082de1680b4b14cf8a81dce0d4fbf665c2265a81e07a1"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux_2_24_aarch64.whl", hash = "sha256:77159f5d5b5c14f7c34073862a6b7d34944075d9f93e681638f6d753606c6ce6"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7f67a1ee819dc4562d444bbafb135832b0b909f81cc90f7aa00260968c9ca1b3"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:4ecbf9c3e19f9562c7fdd462e8d18dd902a47ca046a2e64dba80699f0b6c09b7"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:87ea5ff66d8064301a154b3933ae406b0863402a799b16e4a1d24d9fbbcbe0d3"}, @@ -4007,7 +4139,7 @@ files = [ {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-win_amd64.whl", hash = "sha256:3f215c5daf6a9d7bbed4a0a4f760f3113b10e82ff4c5c44bec20a68c8014f675"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1b617618914cb00bf5c34d4357c37aa15183fa229b24767259657746c9077615"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:a6a9ffd280b71ad062eae53ac1659ad86a17f59a0fdc7699fd9be40525153337"}, - {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:665f58bfd29b167039f714c6998178d27ccd83984084c286110ef26b230f259f"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux_2_24_aarch64.whl", hash = "sha256:305889baa4043a09e5b76f8e2a51d4ffba44259f6b4c72dec8ca56207d9c6fe1"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:700e4ebb569e59e16a976857c8798aee258dceac7c7d6b50cab63e080058df91"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:e2b4c44b60eadec492926a7270abb100ef9f72798e18743939bdbf037aab8c28"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e79e5db08739731b0ce4850bed599235d601701d5694c36570a99a0c5ca41a9d"}, @@ -4015,7 +4147,7 @@ files = [ {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-win_amd64.whl", hash = "sha256:56f4252222c067b4ce51ae12cbac231bce32aee1d33fbfc9d17e5b8d6966c312"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:03d1162b6d1df1caa3a4bd27aa51ce17c9afc2046c31b0ad60a0a96ec22f8001"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:bba64af9fa9cebe325a62fa398760f5c7206b215201b0ec825005f1b18b9bccf"}, - {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:9eb5dee2772b0f704ca2e45b1713e4e5198c18f515b52743576d196348f374d3"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux_2_24_aarch64.whl", hash = "sha256:a1a45e0bb052edf6a1d3a93baef85319733a888363938e1fc9924cb00c8df24c"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:da09ad1c359a728e112d60116f626cc9f29730ff3e0e7db72b9a2dbc2e4beed5"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:184565012b60405d93838167f425713180b949e9d8dd0bbc7b49f074407c5a8b"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a75879bacf2c987c003368cf14bed0ffe99e8e85acfa6c0bfffc21a090f16880"}, @@ -4174,7 +4306,7 @@ files = [ [package.extras] docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] -testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff (>=0.2.1)", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.1)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] [[package]] @@ -5006,16 +5138,6 @@ files = [ {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8ad85f7f4e20964db4daadcab70b47ab05c7c1cf2a7c1e51087bfaa83831854c"}, {file = "wrapt-1.14.1-cp310-cp310-win32.whl", hash = "sha256:a9a52172be0b5aae932bef82a79ec0a0ce87288c7d132946d645eba03f0ad8a8"}, {file = "wrapt-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:6d323e1554b3d22cfc03cd3243b5bb815a51f5249fdcbb86fda4bf62bab9e164"}, - {file = "wrapt-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ecee4132c6cd2ce5308e21672015ddfed1ff975ad0ac8d27168ea82e71413f55"}, - {file = "wrapt-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2020f391008ef874c6d9e208b24f28e31bcb85ccff4f335f15a3251d222b92d9"}, - {file = "wrapt-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2feecf86e1f7a86517cab34ae6c2f081fd2d0dac860cb0c0ded96d799d20b335"}, - {file = "wrapt-1.14.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:240b1686f38ae665d1b15475966fe0472f78e71b1b4903c143a842659c8e4cb9"}, - {file = "wrapt-1.14.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9008dad07d71f68487c91e96579c8567c98ca4c3881b9b113bc7b33e9fd78b8"}, - {file = "wrapt-1.14.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6447e9f3ba72f8e2b985a1da758767698efa72723d5b59accefd716e9e8272bf"}, - {file = "wrapt-1.14.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:acae32e13a4153809db37405f5eba5bac5fbe2e2ba61ab227926a22901051c0a"}, - {file = "wrapt-1.14.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:49ef582b7a1152ae2766557f0550a9fcbf7bbd76f43fbdc94dd3bf07cc7168be"}, - {file = "wrapt-1.14.1-cp311-cp311-win32.whl", hash = "sha256:358fe87cc899c6bb0ddc185bf3dbfa4ba646f05b1b0b9b5a27c2cb92c2cea204"}, - {file = "wrapt-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:26046cd03936ae745a502abf44dac702a5e6880b2b01c29aea8ddf3353b68224"}, {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:43ca3bbbe97af00f49efb06e352eae40434ca9d915906f77def219b88e85d907"}, {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:6b1a564e6cb69922c7fe3a678b9f9a3c54e72b469875aa8018f18b4d1dd1adf3"}, {file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:00b6d4ea20a906c0ca56d84f93065b398ab74b927a7a3dbd470f6fc503f95dc3"}, @@ -5063,85 +5185,6 @@ files = [ {file = "wrapt-1.14.1.tar.gz", hash = "sha256:380a85cf89e0e69b7cfbe2ea9f765f004ff419f34194018a6827ac0e3edfed4d"}, ] -[[package]] -name = "wrapt" -version = "1.16.0" -description = "Module for decorators, wrappers and monkey patching." -optional = false -python-versions = ">=3.6" -files = [ - {file = "wrapt-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ffa565331890b90056c01db69c0fe634a776f8019c143a5ae265f9c6bc4bd6d4"}, - {file = "wrapt-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e4fdb9275308292e880dcbeb12546df7f3e0f96c6b41197e0cf37d2826359020"}, - {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb2dee3874a500de01c93d5c71415fcaef1d858370d405824783e7a8ef5db440"}, - {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a88e6010048489cda82b1326889ec075a8c856c2e6a256072b28eaee3ccf487"}, - {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac83a914ebaf589b69f7d0a1277602ff494e21f4c2f743313414378f8f50a4cf"}, - {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:73aa7d98215d39b8455f103de64391cb79dfcad601701a3aa0dddacf74911d72"}, - {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:807cc8543a477ab7422f1120a217054f958a66ef7314f76dd9e77d3f02cdccd0"}, - {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bf5703fdeb350e36885f2875d853ce13172ae281c56e509f4e6eca049bdfb136"}, - {file = "wrapt-1.16.0-cp310-cp310-win32.whl", hash = "sha256:f6b2d0c6703c988d334f297aa5df18c45e97b0af3679bb75059e0e0bd8b1069d"}, - {file = "wrapt-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:decbfa2f618fa8ed81c95ee18a387ff973143c656ef800c9f24fb7e9c16054e2"}, - {file = "wrapt-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1a5db485fe2de4403f13fafdc231b0dbae5eca4359232d2efc79025527375b09"}, - {file = "wrapt-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:75ea7d0ee2a15733684badb16de6794894ed9c55aa5e9903260922f0482e687d"}, - {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a452f9ca3e3267cd4d0fcf2edd0d035b1934ac2bd7e0e57ac91ad6b95c0c6389"}, - {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:43aa59eadec7890d9958748db829df269f0368521ba6dc68cc172d5d03ed8060"}, - {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72554a23c78a8e7aa02abbd699d129eead8b147a23c56e08d08dfc29cfdddca1"}, - {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d2efee35b4b0a347e0d99d28e884dfd82797852d62fcd7ebdeee26f3ceb72cf3"}, - {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6dcfcffe73710be01d90cae08c3e548d90932d37b39ef83969ae135d36ef3956"}, - {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:eb6e651000a19c96f452c85132811d25e9264d836951022d6e81df2fff38337d"}, - {file = "wrapt-1.16.0-cp311-cp311-win32.whl", hash = "sha256:66027d667efe95cc4fa945af59f92c5a02c6f5bb6012bff9e60542c74c75c362"}, - {file = "wrapt-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:aefbc4cb0a54f91af643660a0a150ce2c090d3652cf4052a5397fb2de549cd89"}, - {file = "wrapt-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5eb404d89131ec9b4f748fa5cfb5346802e5ee8836f57d516576e61f304f3b7b"}, - {file = "wrapt-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9090c9e676d5236a6948330e83cb89969f433b1943a558968f659ead07cb3b36"}, - {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94265b00870aa407bd0cbcfd536f17ecde43b94fb8d228560a1e9d3041462d73"}, - {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2058f813d4f2b5e3a9eb2eb3faf8f1d99b81c3e51aeda4b168406443e8ba809"}, - {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98b5e1f498a8ca1858a1cdbffb023bfd954da4e3fa2c0cb5853d40014557248b"}, - {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:14d7dc606219cdd7405133c713f2c218d4252f2a469003f8c46bb92d5d095d81"}, - {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:49aac49dc4782cb04f58986e81ea0b4768e4ff197b57324dcbd7699c5dfb40b9"}, - {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:418abb18146475c310d7a6dc71143d6f7adec5b004ac9ce08dc7a34e2babdc5c"}, - {file = "wrapt-1.16.0-cp312-cp312-win32.whl", hash = "sha256:685f568fa5e627e93f3b52fda002c7ed2fa1800b50ce51f6ed1d572d8ab3e7fc"}, - {file = "wrapt-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:dcdba5c86e368442528f7060039eda390cc4091bfd1dca41e8046af7c910dda8"}, - {file = "wrapt-1.16.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:d462f28826f4657968ae51d2181a074dfe03c200d6131690b7d65d55b0f360f8"}, - {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a33a747400b94b6d6b8a165e4480264a64a78c8a4c734b62136062e9a248dd39"}, - {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3646eefa23daeba62643a58aac816945cadc0afaf21800a1421eeba5f6cfb9c"}, - {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ebf019be5c09d400cf7b024aa52b1f3aeebeff51550d007e92c3c1c4afc2a40"}, - {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:0d2691979e93d06a95a26257adb7bfd0c93818e89b1406f5a28f36e0d8c1e1fc"}, - {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:1acd723ee2a8826f3d53910255643e33673e1d11db84ce5880675954183ec47e"}, - {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:bc57efac2da352a51cc4658878a68d2b1b67dbe9d33c36cb826ca449d80a8465"}, - {file = "wrapt-1.16.0-cp36-cp36m-win32.whl", hash = "sha256:da4813f751142436b075ed7aa012a8778aa43a99f7b36afe9b742d3ed8bdc95e"}, - {file = "wrapt-1.16.0-cp36-cp36m-win_amd64.whl", hash = "sha256:6f6eac2360f2d543cc875a0e5efd413b6cbd483cb3ad7ebf888884a6e0d2e966"}, - {file = "wrapt-1.16.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a0ea261ce52b5952bf669684a251a66df239ec6d441ccb59ec7afa882265d593"}, - {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bd2d7ff69a2cac767fbf7a2b206add2e9a210e57947dd7ce03e25d03d2de292"}, - {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9159485323798c8dc530a224bd3ffcf76659319ccc7bbd52e01e73bd0241a0c5"}, - {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a86373cf37cd7764f2201b76496aba58a52e76dedfaa698ef9e9688bfd9e41cf"}, - {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:73870c364c11f03ed072dda68ff7aea6d2a3a5c3fe250d917a429c7432e15228"}, - {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b935ae30c6e7400022b50f8d359c03ed233d45b725cfdd299462f41ee5ffba6f"}, - {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:db98ad84a55eb09b3c32a96c576476777e87c520a34e2519d3e59c44710c002c"}, - {file = "wrapt-1.16.0-cp37-cp37m-win32.whl", hash = "sha256:9153ed35fc5e4fa3b2fe97bddaa7cbec0ed22412b85bcdaf54aeba92ea37428c"}, - {file = "wrapt-1.16.0-cp37-cp37m-win_amd64.whl", hash = "sha256:66dfbaa7cfa3eb707bbfcd46dab2bc6207b005cbc9caa2199bcbc81d95071a00"}, - {file = "wrapt-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1dd50a2696ff89f57bd8847647a1c363b687d3d796dc30d4dd4a9d1689a706f0"}, - {file = "wrapt-1.16.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:44a2754372e32ab315734c6c73b24351d06e77ffff6ae27d2ecf14cf3d229202"}, - {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e9723528b9f787dc59168369e42ae1c3b0d3fadb2f1a71de14531d321ee05b0"}, - {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbed418ba5c3dce92619656802cc5355cb679e58d0d89b50f116e4a9d5a9603e"}, - {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:941988b89b4fd6b41c3f0bfb20e92bd23746579736b7343283297c4c8cbae68f"}, - {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6a42cd0cfa8ffc1915aef79cb4284f6383d8a3e9dcca70c445dcfdd639d51267"}, - {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1ca9b6085e4f866bd584fb135a041bfc32cab916e69f714a7d1d397f8c4891ca"}, - {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5e49454f19ef621089e204f862388d29e6e8d8b162efce05208913dde5b9ad6"}, - {file = "wrapt-1.16.0-cp38-cp38-win32.whl", hash = "sha256:c31f72b1b6624c9d863fc095da460802f43a7c6868c5dda140f51da24fd47d7b"}, - {file = "wrapt-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:490b0ee15c1a55be9c1bd8609b8cecd60e325f0575fc98f50058eae366e01f41"}, - {file = "wrapt-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9b201ae332c3637a42f02d1045e1d0cccfdc41f1f2f801dafbaa7e9b4797bfc2"}, - {file = "wrapt-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2076fad65c6736184e77d7d4729b63a6d1ae0b70da4868adeec40989858eb3fb"}, - {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5cd603b575ebceca7da5a3a251e69561bec509e0b46e4993e1cac402b7247b8"}, - {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b47cfad9e9bbbed2339081f4e346c93ecd7ab504299403320bf85f7f85c7d46c"}, - {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8212564d49c50eb4565e502814f694e240c55551a5f1bc841d4fcaabb0a9b8a"}, - {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5f15814a33e42b04e3de432e573aa557f9f0f56458745c2074952f564c50e664"}, - {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db2e408d983b0e61e238cf579c09ef7020560441906ca990fe8412153e3b291f"}, - {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:edfad1d29c73f9b863ebe7082ae9321374ccb10879eeabc84ba3b69f2579d537"}, - {file = "wrapt-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed867c42c268f876097248e05b6117a65bcd1e63b779e916fe2e33cd6fd0d3c3"}, - {file = "wrapt-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:eb1b046be06b0fce7249f1d025cd359b4b80fc1c3e24ad9eca33e0dcdb2e4a35"}, - {file = "wrapt-1.16.0-py3-none-any.whl", hash = "sha256:6906c4100a8fcbf2fa735f6059214bb13b97f75b1a61777fcf6432121ef12ef1"}, - {file = "wrapt-1.16.0.tar.gz", hash = "sha256:5f370f952971e7d17c7d1ead40e49f32345a7f7a5373571ef44d800d06b1899d"}, -] - [[package]] name = "zipp" version = "3.17.0" @@ -5160,4 +5203,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = ">=3.9,<3.12" -content-hash = "fb9a22edabc2aa704e5ba3bcbf60123f4b6c5e929a248b717b962f78c64c49f1" +content-hash = "158813a7b2687578a77f7140d547dd2dd69f53689f5eb1b6cf70e6711d54ba4a" From fcb99371696fe90aa21bb5ab744dc69e7b041973 Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Tue, 13 Feb 2024 16:26:46 +0400 Subject: [PATCH 020/127] dtype --- src/qibo/backends/pytorch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index 830c82f723..1289f118fe 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -33,7 +33,7 @@ def __init__(self, dtype): self.torch_dtype = torch_dtype_dict[dtype] def _cast(self, x, dtype): - return self.torch.tensor(x, dtype=dtype) + return self.torch.tensor(x, dtype=self.torch_dtype) def Unitary(self, u): return self._cast(u, dtype=self.torch_dtype) From 977afe5ce6574787830c25fd5285b621e3e854e9 Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Fri, 16 Feb 2024 13:09:08 +0400 Subject: [PATCH 021/127] use _cast in torchmatrices --- src/qibo/backends/pytorch.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index 1289f118fe..5b6868b80a 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -30,13 +30,13 @@ class TorchMatrices(NumpyMatrices): def __init__(self, dtype): super().__init__(dtype) self.torch = torch - self.torch_dtype = torch_dtype_dict[dtype] + self.dtype = torch_dtype_dict[dtype] def _cast(self, x, dtype): - return self.torch.tensor(x, dtype=self.torch_dtype) + return self.torch.tensor(x, dtype=dtype) def Unitary(self, u): - return self._cast(u, dtype=self.torch_dtype) + return self._cast(u, dtype=self.dtype) class PyTorchBackend(NumpyBackend): From f0c129754f977af8d422054eb4e4466a42c5cf19 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Fri, 16 Feb 2024 13:15:51 +0400 Subject: [PATCH 022/127] minor syntax --- tests/test_backends_clifford.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/test_backends_clifford.py b/tests/test_backends_clifford.py index 1a1ad46027..071d99efd3 100644 --- a/tests/test_backends_clifford.py +++ b/tests/test_backends_clifford.py @@ -25,6 +25,7 @@ def construct_clifford_backend(backend): str(excinfo.value) == "TensorflowBackend for Clifford Simulation is not supported yet." ) + if isinstance(backend, PyTorchBackend): with pytest.raises(NotImplementedError) as excinfo: clifford_backend = CliffordBackend(backend) @@ -32,8 +33,8 @@ def construct_clifford_backend(backend): str(excinfo.value) == "PyTorchBackend for Clifford Simulation is not supported yet." ) - else: - return CliffordBackend(backend) + + return CliffordBackend(backend) THETAS_1Q = [ From bbffcee16bfec4c83ebac1070a00cdcd8d56ae06 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Fri, 16 Feb 2024 13:21:46 +0400 Subject: [PATCH 023/127] minor syntax fix --- tests/test_quantum_info_clifford.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/test_quantum_info_clifford.py b/tests/test_quantum_info_clifford.py index f4c6a00a77..57d4df94f3 100644 --- a/tests/test_quantum_info_clifford.py +++ b/tests/test_quantum_info_clifford.py @@ -23,15 +23,16 @@ def construct_clifford_backend(backend): str(excinfo.value) == "TensorflowBackend for Clifford Simulation is not supported yet." ) - elif isinstance(backend, PyTorchBackend): + + if isinstance(backend, PyTorchBackend): with pytest.raises(NotImplementedError) as excinfo: clifford_backend = CliffordBackend(backend) assert ( str(excinfo.value) == "PyTorchBackend for Clifford Simulation is not supported." ) - else: - return CliffordBackend(backend) + + return CliffordBackend(backend) @pytest.mark.parametrize("nqubits", [2, 10, 50, 100]) From bd353e596afb25ff1b587e73ab4f81bf8cd919fb Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Fri, 16 Feb 2024 16:47:39 +0400 Subject: [PATCH 024/127] corrections by renato and fixed error with check_unitary --- src/qibo/backends/npmatrices.py | 4 +++- src/qibo/backends/pytorch.py | 2 +- src/qibo/gates/gates.py | 8 +------- tests/test_backends_clifford.py | 11 +++++------ tests/test_callbacks.py | 12 ++++-------- tests/test_gates_gates.py | 33 +++++++++++++++------------------ 6 files changed, 29 insertions(+), 41 deletions(-) diff --git a/src/qibo/backends/npmatrices.py b/src/qibo/backends/npmatrices.py index 4f180ef4c3..c6e447e9ea 100644 --- a/src/qibo/backends/npmatrices.py +++ b/src/qibo/backends/npmatrices.py @@ -60,7 +60,9 @@ def TDG(self): ) def I(self, n=2): - return self._cast(self.np.eye(n, dtype=self.dtype), dtype=self.dtype) + # dtype=complex is necessary for pytorch backend, + # _cast will take care of casting in the right dtype for all the backends + return self._cast(self.np.eye(n, dtype=complex), dtype=self.dtype) def Align(self, n=2): return self._cast(self.I(n), dtype=self.dtype) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index 5b6868b80a..b7c35fe0dd 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -123,7 +123,7 @@ def issparse(self, x): return super().issparse(x) def to_numpy(self, x): - if type(x) is self.torch.Tensor: + if isinstance(x, self.torch.Tensor): return x.detach().cpu().numpy() return x diff --git a/src/qibo/gates/gates.py b/src/qibo/gates/gates.py index 92d7d84bd0..05f91abc52 100644 --- a/src/qibo/gates/gates.py +++ b/src/qibo/gates/gates.py @@ -2328,15 +2328,9 @@ def __init__( "trainable": trainable, } - # checking unitarity without invoking any backend - # maybe here having the backend would be useful? if check_unitary: - import torch - - if isinstance(unitary, torch.Tensor): - unitary = unitary.detach().cpu().numpy() product = np.transpose(np.conj(unitary)) @ unitary - sums = all(np.abs(1 - np.sum(product, axis=1)) < PRECISION_TOL) + sums = all(np.abs(1 - product.sum(axis=1)) < PRECISION_TOL) diagonal = all(np.abs(1 - np.diag(product)) < PRECISION_TOL) self.unitary = True if sums and diagonal else False diff --git a/tests/test_backends_clifford.py b/tests/test_backends_clifford.py index 071d99efd3..75666d7b52 100644 --- a/tests/test_backends_clifford.py +++ b/tests/test_backends_clifford.py @@ -23,18 +23,17 @@ def construct_clifford_backend(backend): clifford_backend = CliffordBackend(backend) assert ( str(excinfo.value) - == "TensorflowBackend for Clifford Simulation is not supported yet." + == "TensorflowBackend for Clifford Simulation is not supported." ) - - if isinstance(backend, PyTorchBackend): + elif isinstance(backend, PyTorchBackend): with pytest.raises(NotImplementedError) as excinfo: clifford_backend = CliffordBackend(backend) assert ( str(excinfo.value) - == "PyTorchBackend for Clifford Simulation is not supported yet." + == "PyTorchBackend for Clifford Simulation is not supported." ) - - return CliffordBackend(backend) + else: + return CliffordBackend(backend) THETAS_1Q = [ diff --git a/tests/test_callbacks.py b/tests/test_callbacks.py index 5f3347eba6..5ff73f66a5 100644 --- a/tests/test_callbacks.py +++ b/tests/test_callbacks.py @@ -289,12 +289,6 @@ def test_norm(backend, density_matrix, seed): backend.assert_allclose(final_norm, target_norm) -def to_numpy(x): - if isinstance(x, np.ndarray): - return x - return x.detach().cpu().numpy() - - @pytest.mark.parametrize("seed", list(range(1, 5 + 1))) @pytest.mark.parametrize("density_matrix", [False, True]) @pytest.mark.parametrize("nqubits", list(range(2, 6 + 1, 2))) @@ -313,11 +307,13 @@ def test_overlap(backend, nqubits, density_matrix, seed): if density_matrix: final_overlap = overlap.apply_density_matrix(backend, state1) target_overlap = np.trace( - np.transpose(np.conj(to_numpy(state0))) @ to_numpy(state1) + np.transpose(np.conj(backend.to_numpy(state0))) @ backend.to_numpy(state1) ) else: final_overlap = overlap.apply(backend, state1) - target_overlap = np.abs(np.sum(np.conj(to_numpy(state0)) * to_numpy(state1))) + target_overlap = np.abs( + np.sum(np.conj(backend.to_numpy(state0)) * backend.to_numpy(state1)) + ) backend.assert_allclose(final_overlap, target_overlap) diff --git a/tests/test_gates_gates.py b/tests/test_gates_gates.py index d495abb0a3..09560f4d20 100644 --- a/tests/test_gates_gates.py +++ b/tests/test_gates_gates.py @@ -68,12 +68,6 @@ def test_z(backend): assert gates.Z(0).unitary -def to_numpy(array): - if isinstance(array, np.ndarray): - return array - return array.detach().cpu().numpy() - - def test_sx(backend): nqubits = 1 initial_state = random_statevector(2**nqubits, backend=backend) @@ -99,13 +93,14 @@ def test_sx(backend): # testing random expectation value due to global phase difference observable = random_hermitian(2**nqubits, backend=backend) + np_final_state_decompose = backend.to_numpy(final_state_decompose) + np_obs = backend.to_numpy(observable) + np_target_state = backend.to_numpy(target_state) backend.assert_allclose( - np.transpose(np.conj(to_numpy(final_state_decompose))) - @ to_numpy(observable) - @ to_numpy(final_state_decompose), - np.transpose(np.conj(to_numpy(target_state))) - @ to_numpy(observable) - @ to_numpy(target_state), + np.transpose(np.conj(np_final_state_decompose)) + @ np_obs + @ np_final_state_decompose, + np.transpose(np.conj(np_target_state)) @ np_obs @ np_target_state, ) assert gates.SX(0).qasm_label == "sx" @@ -138,13 +133,14 @@ def test_sxdg(backend): # testing random expectation value due to global phase difference observable = random_hermitian(2**nqubits, backend=backend) + np_final_state_decompose = backend.to_numpy(final_state_decompose) + np_obs = backend.to_numpy(observable) + np_target_state = backend.to_numpy(target_state) backend.assert_allclose( - np.transpose(np.conj(to_numpy(final_state_decompose))) - @ to_numpy(observable) - @ to_numpy(final_state_decompose), - np.transpose(np.conj(to_numpy(target_state))) - @ to_numpy(observable) - @ to_numpy(target_state), + np.transpose(np.conj(np_final_state_decompose)) + @ np_obs + @ np_final_state_decompose, + np.transpose(np.conj(np_target_state)) @ np_obs @ np_target_state, ) assert gates.SXDG(0).qasm_label == "sxdg" @@ -1255,6 +1251,7 @@ def test_unitary(backend, nqubits): def test_unitary_initialization(backend): + matrix = np.random.random((4, 4)) gate = gates.Unitary(matrix, 0, 1) backend.assert_allclose(gate.parameters[0], matrix) From b105cec93c28a2fa23c1753c1f05f6643a177c54 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Mon, 19 Feb 2024 08:53:17 +0400 Subject: [PATCH 025/127] test disabling lint --- src/qibo/backends/pytorch.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index b7c35fe0dd..713b04b175 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -355,9 +355,7 @@ def calculate_expectation_state(self, hamiltonian, state, normalize): def calculate_hamiltonian_matrix_product(self, matrix1, matrix2): if self.issparse(matrix1) or self.issparse(matrix2): - return self.torch.sparse.mm( - matrix1, matrix2 - ) # pylint: disable=not-callable + return self.torch.sparse.mm(matrix1, matrix2) # pylint: disable=E1102 return self.torch.matmul(matrix1, matrix2) def calculate_hamiltonian_state_product(self, matrix, state): From 2ee5003cc55f692e0a6d6d8c24e91bb8eee13462 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Mon, 19 Feb 2024 09:34:38 +0400 Subject: [PATCH 026/127] fix `cast` --- src/qibo/backends/pytorch.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index 713b04b175..41d760bb2f 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -82,17 +82,19 @@ def cast( dtype = torch_dtype_dict[dtype.__name__] else: dtype = torch_dtype_dict[str(dtype)] + if isinstance(x, self.torch.Tensor): x = x.to(dtype) - elif isinstance(x, list): - if all(isinstance(i, self.torch.Tensor) for i in x): - x = [i.to(dtype) for i in x] - else: - x = [self.torch.tensor(i, dtype=dtype) for i in x] + elif isinstance(x, list) and all( + isinstance(row, self.torch.Tensor) for row in x + ): + x = self.torch.stack(x) else: x = self.torch.tensor(x, dtype=dtype) + if copy: return x.clone() + return x def apply_gate(self, gate, state, nqubits): From 56b43911b1d265eebc19b70f089dc2871b9a3716 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Mon, 19 Feb 2024 09:41:36 +0400 Subject: [PATCH 027/127] fixing `cast` issues --- src/qibo/quantum_info/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/qibo/quantum_info/utils.py b/src/qibo/quantum_info/utils.py index ec772168e0..a6bd1ce6ee 100644 --- a/src/qibo/quantum_info/utils.py +++ b/src/qibo/quantum_info/utils.py @@ -237,10 +237,10 @@ def hellinger_distance(prob_dist_p, prob_dist_q, validate: bool = False, backend ValueError, "All elements of the probability array must be between 0. and 1..", ) - if np.abs(np.sum(prob_dist_p) - 1.0) > PRECISION_TOL: + if np.abs(np.sum(backend.to_numpy(prob_dist_p)) - 1.0) > PRECISION_TOL: raise_error(ValueError, "First probability array must sum to 1.") - if np.abs(np.sum(prob_dist_q) - 1.0) > PRECISION_TOL: + if np.abs(np.sum(backend.to_numpy(prob_dist_q)) - 1.0) > PRECISION_TOL: raise_error(ValueError, "Second probability array must sum to 1.") distance = float( From a6e30d436eb638e83107f2b43102c9ceb6a7fb40 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Mon, 19 Feb 2024 09:59:07 +0400 Subject: [PATCH 028/127] rewrite `to_numpy` --- src/qibo/backends/pytorch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index 41d760bb2f..bdd3de7752 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -126,7 +126,7 @@ def issparse(self, x): def to_numpy(self, x): if isinstance(x, self.torch.Tensor): - return x.detach().cpu().numpy() + return x.numpy(force=True) return x def compile(self, func): From bfcbd219544bfb094a70b2f06c74dab7cb1ea1a4 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Mon, 19 Feb 2024 10:57:52 +0400 Subject: [PATCH 029/127] skip `clifford` tests --- tests/test_quantum_info_clifford.py | 83 +++++++++++++---------------- 1 file changed, 36 insertions(+), 47 deletions(-) diff --git a/tests/test_quantum_info_clifford.py b/tests/test_quantum_info_clifford.py index 57d4df94f3..dff105285a 100644 --- a/tests/test_quantum_info_clifford.py +++ b/tests/test_quantum_info_clifford.py @@ -16,44 +16,43 @@ def construct_clifford_backend(backend): - if isinstance(backend, TensorflowBackend): + if isinstance(backend, (TensorflowBackend, PyTorchBackend)): with pytest.raises(NotImplementedError) as excinfo: clifford_backend = CliffordBackend(backend) assert ( str(excinfo.value) - == "TensorflowBackend for Clifford Simulation is not supported yet." + == f"{backend.__class__.__name__} for Clifford Simulation is not supported yet." ) - if isinstance(backend, PyTorchBackend): - with pytest.raises(NotImplementedError) as excinfo: - clifford_backend = CliffordBackend(backend) - assert ( - str(excinfo.value) - == "PyTorchBackend for Clifford Simulation is not supported." - ) + pytest.skip( + f"CliffordBackend not defined for {backend.__class__.__name__} engine." + ) return CliffordBackend(backend) @pytest.mark.parametrize("nqubits", [2, 10, 50, 100]) def test_clifford_from_symplectic_matrix(backend, nqubits): - if isinstance(backend, TensorflowBackend): - with pytest.raises(NotImplementedError): - clifford_backend = CliffordBackend(backend) - elif isinstance(backend, PyTorchBackend): - with pytest.raises(NotImplementedError): - clifford_backend = CliffordBackend(backend) - else: - clifford_backend = CliffordBackend(backend) - symplectic_matrix = clifford_backend.zero_state(nqubits) - clifford_1 = Clifford(symplectic_matrix, engine=backend) - clifford_2 = Clifford(symplectic_matrix[:-1], engine=backend) - - for clifford in [clifford_1, clifford_2]: - backend.assert_allclose( - clifford.symplectic_matrix.shape, - (2 * nqubits + 1, 2 * nqubits + 1), - ) + # if isinstance(backend, TensorflowBackend): + # with pytest.raises(NotImplementedError): + # clifford_backend = CliffordBackend(backend) + # elif isinstance(backend, PyTorchBackend): + # with pytest.raises(NotImplementedError): + # clifford_backend = CliffordBackend(backend) + # else: + + clifford_backend = construct_clifford_backend(backend) + + # clifford_backend = CliffordBackend(backend) + symplectic_matrix = clifford_backend.zero_state(nqubits) + clifford_1 = Clifford(symplectic_matrix, engine=backend) + clifford_2 = Clifford(symplectic_matrix[:-1], engine=backend) + + for clifford in [clifford_1, clifford_2]: + backend.assert_allclose( + clifford.symplectic_matrix.shape, + (2 * nqubits + 1, 2 * nqubits + 1), + ) @pytest.mark.parametrize("measurement", [False, True]) @@ -77,10 +76,7 @@ def test_clifford_from_circuit(backend, measurement): @pytest.mark.parametrize("algorithm", ["AG04", "BM20"]) @pytest.mark.parametrize("nqubits", [1, 2, 3, 10, 50]) def test_clifford_to_circuit(backend, nqubits, algorithm, seed): - if backend.__class__.__name__ == "TensorflowBackend": - pytest.skip("CliffordBackend not defined for Tensorflow engine.") - elif backend.__class__.__name__ == "PyTorchBackend": - pytest.skip("CliffordBackend not defined for PyTorch engine.") + clifford_backend = construct_clifford_backend(backend) clifford = random_clifford(nqubits, seed=seed, backend=backend) @@ -328,26 +324,19 @@ def test_clifford_samples_frequencies(backend, binary): def test_clifford_samples_error(backend): c = random_clifford(1, backend=backend) - if isinstance(backend, TensorflowBackend): - with pytest.raises(NotImplementedError): - clifford_backend = CliffordBackend(backend) - elif isinstance(backend, PyTorchBackend): - with pytest.raises(NotImplementedError): - clifford_backend = CliffordBackend(backend) - else: - obj = Clifford.from_circuit(c, engine=backend) - with pytest.raises(RuntimeError) as excinfo: - obj.samples() - assert str(excinfo.value) == "No measurement provided." + + clifford_backend = construct_clifford_backend(backend) + + obj = Clifford.from_circuit(c, engine=backend) + with pytest.raises(RuntimeError) as excinfo: + obj.samples() + assert str(excinfo.value) == "No measurement provided." @pytest.mark.parametrize("deep", [False, True]) @pytest.mark.parametrize("nqubits", [1, 10, 100]) def test_clifford_copy(backend, nqubits, deep): - if backend.__class__.__name__ == "TensorflowBackend": - pytest.skip("CliffordBackend not defined for Tensorflow engine.") - elif backend.__class__.__name__ == "PyTorchBackend": - pytest.skip("CliffordBackend not defined for PyTorch engine.") + clifford_backend = construct_clifford_backend(backend) circuit = random_clifford(nqubits, backend=backend) clifford = Clifford.from_circuit(circuit, engine=backend) @@ -366,7 +355,7 @@ def test_clifford_copy(backend, nqubits, deep): @pytest.mark.parametrize("pauli_2", ["Z", "Y", "Y"]) @pytest.mark.parametrize("pauli_1", ["X", "Y", "Z"]) -def test_one_qubit_paulis_string_product(backend, pauli_1, pauli_2): +def test_one_qubit_paulis_string_product(pauli_1, pauli_2): products = { "XY": "iZ", "YZ": "iX", @@ -401,7 +390,7 @@ def test_one_qubit_paulis_string_product(backend, pauli_1, pauli_2): [["iY", "iX"], "iZ"], ], ) -def test_string_product(backend, operators, target): +def test_string_product(operators, target): product = _string_product(operators) assert product == target From d782ee4a37afae7ac2be87970dedaf00df53b6e6 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Mon, 19 Feb 2024 12:45:55 +0400 Subject: [PATCH 030/127] fix `quantum_info.entropies` --- src/qibo/quantum_info/entropies.py | 91 ++++++++++++++++++++++++---- tests/test_quantum_info_entropies.py | 26 +++++++- 2 files changed, 101 insertions(+), 16 deletions(-) diff --git a/src/qibo/quantum_info/entropies.py b/src/qibo/quantum_info/entropies.py index 716bdda3fd..6eab69848a 100644 --- a/src/qibo/quantum_info/entropies.py +++ b/src/qibo/quantum_info/entropies.py @@ -54,12 +54,20 @@ def shannon_entropy(prob_dist, base: float = 2, backend=None): "All elements of the probability array must be between 0. and 1..", ) - if np.abs(np.sum(prob_dist) - 1.0) > PRECISION_TOL: + total_sum = ( + backend.torch.sum(prob_dist) if backend.name == "pytorch" else np.sum(prob_dist) + ) + + if np.abs(total_sum - 1.0) > PRECISION_TOL: raise_error(ValueError, "Probability array must sum to 1.") log_prob = np.where(prob_dist != 0, np.log2(prob_dist) / np.log2(base), 0.0) - shan_entropy = -np.sum(prob_dist * log_prob) + shan_entropy = ( + -backend.torch.sum(prob_dist * log_prob) + if backend.name == "pytorch" + else -np.sum(prob_dist * log_prob) + ) # absolute value if entropy == 0.0 to avoid returning -0.0 shan_entropy = np.abs(shan_entropy) if shan_entropy == 0.0 else shan_entropy @@ -119,10 +127,20 @@ def classical_relative_entropy(prob_dist_p, prob_dist_q, base: float = 2, backen ValueError, "All elements of the probability array must be between 0. and 1..", ) - if np.abs(np.sum(prob_dist_p) - 1.0) > PRECISION_TOL: + total_sum_p = ( + backend.torch.sum(prob_dist_p) + if backend.name == "pytorch" + else np.sum(prob_dist_p) + ) + total_sum_q = ( + backend.torch.sum(prob_dist_q) + if backend.name == "pytorch" + else np.sum(prob_dist_q) + ) + if np.abs(total_sum_p - 1.0) > PRECISION_TOL: raise_error(ValueError, "First probability array must sum to 1.") - if np.abs(np.sum(prob_dist_q) - 1.0) > PRECISION_TOL: + if np.abs(total_sum_q - 1.0) > PRECISION_TOL: raise_error(ValueError, "Second probability array must sum to 1.") entropy_p = -1 * shannon_entropy(prob_dist_p, base=base, backend=backend) @@ -133,7 +151,11 @@ def classical_relative_entropy(prob_dist_p, prob_dist_q, base: float = 2, backen log_prob = np.where(prob_dist_p != 0.0, log_prob_q, 0.0) - relative = np.sum(prob_dist_p * log_prob) + relative = ( + backend.torch.sum(prob_dist_p * log_prob) + if backend.name == "pytorch" + else np.sum(prob_dist_p * log_prob) + ) return entropy_p - relative @@ -206,7 +228,11 @@ def classical_renyi_entropy( "All elements of the probability array must be between 0. and 1..", ) - if np.abs(np.sum(prob_dist) - 1.0) > PRECISION_TOL: + total_sum = ( + backend.torch.sum(prob_dist) if backend.name == "pytorch" else np.sum(prob_dist) + ) + + if np.abs(total_sum - 1.0) > PRECISION_TOL: raise_error(ValueError, "Probability array must sum to 1.") if alpha == 0.0: @@ -218,7 +244,13 @@ def classical_renyi_entropy( if alpha == np.inf: return -1 * np.log2(max(prob_dist)) / np.log2(base) - renyi_ent = (1 / (1 - alpha)) * np.log2(np.sum(prob_dist**alpha)) / np.log2(base) + total_sum = ( + backend.torch.sum(prob_dist**alpha) + if backend.name == "pytorch" + else np.sum(prob_dist**alpha) + ) + + renyi_ent = (1 / (1 - alpha)) * np.log2(total_sum) / np.log2(base) return renyi_ent @@ -299,14 +331,32 @@ def classical_relative_renyi_entropy( ValueError, "All elements of the probability array must be between 0. and 1..", ) - if np.abs(np.sum(prob_dist_p) - 1.0) > PRECISION_TOL: + + total_sum_p = ( + backend.torch.sum(prob_dist_p) + if backend.name == "pytorch" + else np.sum(prob_dist_p) + ) + total_sum_q = ( + backend.torch.sum(prob_dist_q) + if backend.name == "pytorch" + else np.sum(prob_dist_q) + ) + + if np.abs(total_sum_p - 1.0) > PRECISION_TOL: raise_error(ValueError, "First probability array must sum to 1.") - if np.abs(np.sum(prob_dist_q) - 1.0) > PRECISION_TOL: + if np.abs(total_sum_q - 1.0) > PRECISION_TOL: raise_error(ValueError, "Second probability array must sum to 1.") if alpha == 0.5: - return -2 * np.log2(np.sum(np.sqrt(prob_dist_p * prob_dist_q))) / np.log2(base) + total_sum = np.sqrt(prob_dist_p * prob_dist_q) + total_sum = ( + backend.torch.sum(total_sum) + if backend.name == "pytorch" + else np.sum(total_sum) + ) + return -2 * np.log2(total_sum) / np.log2(base) if alpha == 1.0: return classical_relative_entropy( @@ -319,7 +369,13 @@ def classical_relative_renyi_entropy( prob_p = prob_dist_p**alpha prob_q = prob_dist_q ** (1 - alpha) - return (1 / (alpha - 1)) * np.log2(np.sum(prob_p * prob_q)) / np.log2(base) + total_sum = ( + backend.torch.sum(prob_p * prob_q) + if backend.name == "pytorch" + else np.sum(prob_p * prob_q) + ) + + return (1 / (alpha - 1)) * np.log2(total_sum) / np.log2(base) def classical_tsallis_entropy(prob_dist, alpha: float, base: float = 2, backend=None): @@ -375,13 +431,22 @@ def classical_tsallis_entropy(prob_dist, alpha: float, base: float = 2, backend= "All elements of the probability array must be between 0. and 1..", ) - if np.abs(np.sum(prob_dist) - 1.0) > PRECISION_TOL: + total_sum = ( + backend.torch.sum(prob_dist) if backend.name == "pytorch" else np.sum(prob_dist) + ) + + if np.abs(total_sum - 1.0) > PRECISION_TOL: raise_error(ValueError, "Probability array must sum to 1.") if alpha == 1.0: return shannon_entropy(prob_dist, base=base, backend=backend) - return (1 / (1 - alpha)) * (np.sum(prob_dist**alpha) - 1) + total_sum = prob_dist**alpha + total_sum = ( + backend.torch.sum(total_sum) if backend.name == "pytorch" else np.sum(total_sum) + ) + + return (1 / (1 - alpha)) * (total_sum - 1) def von_neumann_entropy( diff --git a/tests/test_quantum_info_entropies.py b/tests/test_quantum_info_entropies.py index 3cf8b2d721..5013f4f70d 100644 --- a/tests/test_quantum_info_entropies.py +++ b/tests/test_quantum_info_entropies.py @@ -54,6 +54,7 @@ def test_shannon_entropy_errors(backend): @pytest.mark.parametrize("base", [2, 10, np.e, 5]) def test_shannon_entropy(backend, base): prob_array = [1.0, 0.0] + prob_array = backend.cast(prob_array, dtype=np.float64) result = shannon_entropy(prob_array, base, backend=backend) backend.assert_allclose(result, 0.0) @@ -115,6 +116,9 @@ def test_classical_relative_entropy(backend, base, kind): if kind is not None: prob_p, prob_q = kind(prob_p), kind(prob_q) + else: + prob_p = np.real(backend.cast(prob_p)) + prob_q = np.real(backend.cast(prob_q)) divergence = classical_relative_entropy(prob_p, prob_q, base=base, backend=backend) @@ -168,7 +172,9 @@ def test_classical_renyi_entropy(backend, alpha, base, kind): if alpha == 0.0: target = np.log2(len(prob_dist)) / np.log2(base) elif alpha == 1: - target = shannon_entropy(prob_dist, base=base, backend=backend) + target = shannon_entropy( + backend.cast(prob_dist, dtype=np.float64), base=base, backend=backend + ) elif alpha == 2: target = -1 * np.log2(np.sum(prob_dist**2)) / np.log2(base) elif alpha == np.inf: @@ -178,6 +184,8 @@ def test_classical_renyi_entropy(backend, alpha, base, kind): if kind is not None: prob_dist = kind(prob_dist) + else: + prob_dist = np.real(backend.cast(prob_dist)) renyi_ent = classical_renyi_entropy(prob_dist, alpha, base=base, backend=backend) @@ -261,7 +269,12 @@ def test_classical_relative_renyi_entropy(backend, alpha, base, kind): if alpha == 0.5: target = -2 * np.log2(np.sum(np.sqrt(prob_p * prob_q))) / np.log2(base) elif alpha == 1.0: - target = classical_relative_entropy(prob_p, prob_q, base=base, backend=backend) + target = classical_relative_entropy( + np.real(backend.cast(prob_p)), + np.real(backend.cast(prob_q)), + base=base, + backend=backend, + ) elif alpha == np.inf: target = np.log2(max(prob_p / prob_q)) / np.log2(base) else: @@ -273,6 +286,9 @@ def test_classical_relative_renyi_entropy(backend, alpha, base, kind): if kind is not None: prob_p, prob_q = kind(prob_p), kind(prob_q) + else: + prob_p = np.real(backend.cast(prob_p)) + prob_q = np.real(backend.cast(prob_q)) divergence = classical_relative_renyi_entropy( prob_p, prob_q, alpha=alpha, base=base, backend=backend @@ -326,12 +342,16 @@ def test_classical_tsallis_entropy(backend, alpha, base, kind): prob_dist /= np.sum(prob_dist) if alpha == 1.0: - target = shannon_entropy(prob_dist, base=base, backend=backend) + target = shannon_entropy( + np.real(backend.cast(prob_dist)), base=base, backend=backend + ) else: target = (1 / (1 - alpha)) * (np.sum(prob_dist**alpha) - 1) if kind is not None: prob_dist = kind(prob_dist) + else: + prob_dist = np.real(backend.cast(prob_dist)) backend.assert_allclose( classical_tsallis_entropy(prob_dist, alpha=alpha, base=base, backend=backend), From c417311d7d7d320d16f56471c8b43fb005db2db4 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Mon, 19 Feb 2024 14:40:03 +0400 Subject: [PATCH 031/127] unitary check --- src/qibo/gates/gates.py | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/src/qibo/gates/gates.py b/src/qibo/gates/gates.py index 05f91abc52..f68fb58fec 100644 --- a/src/qibo/gates/gates.py +++ b/src/qibo/gates/gates.py @@ -2329,12 +2329,26 @@ def __init__( } if check_unitary: + if unitary.__class__.__name__ == "Tensor": + import torch # pylint: disable=C0145 + + diag_function = torch.diag + all_function = torch.all + else: + diag_function = np.diag + all_function = np.all + product = np.transpose(np.conj(unitary)) @ unitary - sums = all(np.abs(1 - product.sum(axis=1)) < PRECISION_TOL) - diagonal = all(np.abs(1 - np.diag(product)) < PRECISION_TOL) + diagonals = all(np.abs(1 - diag_function(product)) < PRECISION_TOL) + off_diagonals = bool( + all_function( + np.abs(product - diag_function(diag_function(product))) + < PRECISION_TOL + ) + ) - self.unitary = True if sums and diagonal else False - del sums, diagonal, product + self.unitary = True if diagonals and off_diagonals else False + del diagonals, off_diagonals, product @Gate.parameters.setter def parameters(self, x): From 1001f2ca21c7742f7e3866788eed89a0fda620d3 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Mon, 19 Feb 2024 14:40:16 +0400 Subject: [PATCH 032/127] random ensembles --- src/qibo/quantum_info/random_ensembles.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/qibo/quantum_info/random_ensembles.py b/src/qibo/quantum_info/random_ensembles.py index 94159cf1b2..915c929994 100644 --- a/src/qibo/quantum_info/random_ensembles.py +++ b/src/qibo/quantum_info/random_ensembles.py @@ -226,7 +226,8 @@ def random_unitary(dims: int, measure: Optional[str] = None, seed=None, backend= H = random_hermitian(dims, seed=seed, backend=NumpyBackend()) unitary = expm(-1.0j * H / 2) - unitary = backend.cast(unitary, dtype=unitary.dtype) + + unitary = backend.cast(unitary, dtype=unitary.dtype) return unitary @@ -1177,12 +1178,14 @@ def _super_op_from_bcsz_measure(dims: int, rank: int, order: str, seed, backend) for eigenvalue, eigenvector in zip(eigenvalues, np.transpose(eigenvectors)): operator += eigenvalue * np.outer(eigenvector, np.conj(eigenvector)) + kron = backend.torch.kron if backend.name == "pytorch" else np.kron + if order == "row": - operator = np.kron( + operator = kron( backend.identity_density_matrix(nqubits, normalize=False), operator ) if order == "column": - operator = np.kron( + operator = kron( operator, backend.identity_density_matrix(nqubits, normalize=False) ) From 140df11f66b417a1de7ce09f364475b84a63507f Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Mon, 19 Feb 2024 14:40:25 +0400 Subject: [PATCH 033/127] fix tests --- tests/test_quantum_info_random.py | 62 +++++++++++++++++-------------- 1 file changed, 34 insertions(+), 28 deletions(-) diff --git a/tests/test_quantum_info_random.py b/tests/test_quantum_info_random.py index 0749723637..645df2eb43 100644 --- a/tests/test_quantum_info_random.py +++ b/tests/test_quantum_info_random.py @@ -56,7 +56,10 @@ def test_uniform_sampling_U3(backend, seed): ] ) expectation_values = backend.cast(expectation_values) - expectation_values = np.mean(expectation_values, axis=0) + + mean_function = backend.torch.mean if backend.name == "pytorch" else np.mean + + expectation_values = mean_function(expectation_values, axis=0) backend.assert_allclose(expectation_values[0], expectation_values[1], atol=1e-1) backend.assert_allclose(expectation_values[0], expectation_values[2], atol=1e-1) @@ -150,42 +153,38 @@ def test_random_hermitian(backend): backend.assert_allclose(all(eigenvalues <= 1), True) -def test_random_unitary(backend): +@pytest.mark.parametrize("measure", [None, "haar"]) +def test_random_unitary(backend, measure): with pytest.raises(TypeError): dims = np.array([1]) - random_unitary(dims, backend=backend) + random_unitary(dims, measure=measure, backend=backend) with pytest.raises(TypeError): dims = 2 - measure = 1 - random_unitary(dims, measure, backend=backend) + random_unitary(dims, measure=1, backend=backend) with pytest.raises(ValueError): dims = 0 - random_unitary(dims, backend=backend) + random_unitary(dims, measure=measure, backend=backend) with pytest.raises(ValueError): dims = 2 random_unitary(dims, measure="gaussian", backend=backend) with pytest.raises(TypeError): dims = 2 - random_unitary(dims=2, seed=0.1, backend=backend) + random_unitary(dims=2, measure=measure, seed=0.1, backend=backend) # tests if operator is unitary (measure == "haar") dims = 4 - matrix = random_unitary(dims, backend=backend) + matrix = random_unitary(dims, measure=measure, backend=backend) matrix_dagger = np.transpose(np.conj(matrix)) - matrix_inv = np.linalg.inv(matrix) + matrix_inv = ( + backend.torch.inverse(matrix) + if backend.name == "pytorch" + else np.linalg.inv(matrix) + ) norm = float( backend.calculate_norm_density_matrix(matrix_inv - matrix_dagger, order=2) ) backend.assert_allclose(norm < PRECISION_TOL, True) - # tests if operator is unitary (measure == None) - dims, measure = 4, None - matrix = random_unitary(dims, measure, backend=backend) - matrix_dagger = np.transpose(np.conj(matrix)) - matrix_inv = np.linalg.inv(matrix) - norm = float(backend.calculate_norm(matrix_inv - matrix_dagger, order=2)) - backend.assert_allclose(norm < PRECISION_TOL, True) - @pytest.mark.parametrize("order", ["row", "column"]) @pytest.mark.parametrize("rank", [None, 4]) @@ -465,8 +464,10 @@ def test_random_pauli( ) else: matrix = np.transpose(matrix, (1, 0, 2, 3)) - matrix = [reduce(np.kron, row) for row in matrix] - matrix = reduce(np.dot, matrix) + kron = backend.torch.kron if backend.name == "pytorch" else np.kron + matrix = [reduce(kron, row) for row in matrix] + dot = backend.torch.matmul if backend.name == "pytorch" else np.dot + matrix = reduce(dot, matrix) if subset is None: backend.assert_allclose( @@ -557,10 +558,13 @@ def test_random_stochastic_matrix(backend): dims = 4 random_stochastic_matrix(dims, seed=0.1, backend=backend) + sum_function = backend.torch.sum if backend.name == "pytorch" else np.sum + diag = backend.torch.diag if backend.name == "pytorch" else np.diag + # tests if matrix is row-stochastic dims = 4 matrix = random_stochastic_matrix(dims, backend=backend) - sum_rows = np.sum(matrix, axis=1) + sum_rows = sum_function(matrix, axis=1) backend.assert_allclose(all(sum_rows < 1 + PRECISION_TOL), True) backend.assert_allclose(all(sum_rows > 1 - PRECISION_TOL), True) @@ -570,18 +574,20 @@ def test_random_stochastic_matrix(backend): matrix = random_stochastic_matrix( dims, diagonally_dominant=True, max_iterations=1000, backend=backend ) - sum_rows = np.sum(matrix, axis=1) + + sum_function = backend.torch.sum if backend.name == "pytorch" else np.sum + sum_rows = sum_function(matrix, axis=1) backend.assert_allclose(all(sum_rows < 1 + PRECISION_TOL), True) backend.assert_allclose(all(sum_rows > 1 - PRECISION_TOL), True) - backend.assert_allclose(all(2 * np.diag(matrix) - sum_rows > 0), True) + backend.assert_allclose(all(2 * diag(matrix) - sum_rows > 0), True) # tests if matrix is bistochastic dims = 4 matrix = random_stochastic_matrix(dims, bistochastic=True, backend=backend) - sum_rows = np.sum(matrix, axis=1) - column_rows = np.sum(matrix, axis=0) + sum_rows = sum_function(matrix, axis=1) + column_rows = sum_function(matrix, axis=0) backend.assert_allclose(all(sum_rows < 1 + PRECISION_TOL), True) backend.assert_allclose(all(sum_rows > 1 - PRECISION_TOL), True) @@ -598,8 +604,8 @@ def test_random_stochastic_matrix(backend): max_iterations=1000, backend=backend, ) - sum_rows = np.sum(matrix, axis=1) - column_rows = np.sum(matrix, axis=0) + sum_rows = sum_function(matrix, axis=1) + column_rows = sum_function(matrix, axis=0) backend.assert_allclose(all(sum_rows < 1 + PRECISION_TOL), True) backend.assert_allclose(all(sum_rows > 1 - PRECISION_TOL), True) @@ -607,8 +613,8 @@ def test_random_stochastic_matrix(backend): backend.assert_allclose(all(column_rows < 1 + PRECISION_TOL), True) backend.assert_allclose(all(column_rows > 1 - PRECISION_TOL), True) - backend.assert_allclose(all(2 * np.diag(matrix) - sum_rows > 0), True) - backend.assert_allclose(all(2 * np.diag(matrix) - column_rows > 0), True) + backend.assert_allclose(all(2 * diag(matrix) - sum_rows > 0), True) + backend.assert_allclose(all(2 * diag(matrix) - column_rows > 0), True) # tests warning for max_iterations dims = 4 From 8ade07220a21b4b18b5a1bae9856f75d58a0d147 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Mon, 19 Feb 2024 15:04:35 +0400 Subject: [PATCH 034/127] superoperator transformations --- src/qibo/backends/pytorch.py | 12 ++++-- src/qibo/quantum_info/basis.py | 2 +- .../superoperator_transformations.py | 2 + ...ntum_info_superoperator_transformations.py | 40 +++++++++---------- 4 files changed, 31 insertions(+), 25 deletions(-) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index bdd3de7752..97967e3f23 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -64,15 +64,19 @@ def cast( self, x: Union[torch.Tensor, list[torch.Tensor], np.ndarray, list[np.ndarray]], dtype: Union[str, torch.dtype, np.dtype, type] = None, - copy=False, + copy: bool = False, ): """Casts input as a Torch tensor of the specified dtype. This method supports casting of single tensors or lists of tensors as for the Tensoflow backend. Args: - x (Union[torch.Tensor, list[torch.Tensor], np.ndarray, list[np.ndarray], int, float, complex]): Input to be casted. - dtype (Union[str, torch.dtype, np.dtype, type]): Target data type. If None, the default dtype of the backend is used. - copy (bool): If True, the input tensor is copied before casting. + x (Union[torch.Tensor, list[torch.Tensor], np.ndarray, list[np.ndarray], int, float, complex]): + Input to be casted. + dtype (Union[str, torch.dtype, np.dtype, type]): Target data type. + If ``None``, the default dtype of the backend is used. + Defaults to ``None``. + copy (bool, optional): If ``True``, the input tensor is copied before casting. + Defaults to ``False``. """ if dtype is None: dtype = self.torch_dtype diff --git a/src/qibo/quantum_info/basis.py b/src/qibo/quantum_info/basis.py index dcb5d7ab84..24b2a17507 100644 --- a/src/qibo/quantum_info/basis.py +++ b/src/qibo/quantum_info/basis.py @@ -115,7 +115,7 @@ def pauli_basis( else: basis = basis_full - basis = backend.cast(basis) + basis = backend.cast(basis, dtype=backend.dtype) if normalize: basis /= np.sqrt(2**nqubits) diff --git a/src/qibo/quantum_info/superoperator_transformations.py b/src/qibo/quantum_info/superoperator_transformations.py index c888ed5b18..05c8622428 100644 --- a/src/qibo/quantum_info/superoperator_transformations.py +++ b/src/qibo/quantum_info/superoperator_transformations.py @@ -488,6 +488,8 @@ def choi_to_kraus( kraus_right.append( coeff * unvectorization(eigenvector_right, order=order, backend=backend) ) + kraus_left = backend.cast(kraus_left) + kraus_right = backend.cast(kraus_right) kraus_ops = backend.cast([kraus_left, kraus_right]) else: # when choi_super_op is CP diff --git a/tests/test_quantum_info_superoperator_transformations.py b/tests/test_quantum_info_superoperator_transformations.py index 5923181db9..9cb316e1ae 100644 --- a/tests/test_quantum_info_superoperator_transformations.py +++ b/tests/test_quantum_info_superoperator_transformations.py @@ -396,8 +396,8 @@ def test_choi_to_kraus( test_a0 = backend.cast(test_a0, dtype=test_a0.dtype) test_a1 = backend.cast(test_a1, dtype=test_a1.dtype) - test_kraus_left = backend.cast(test_kraus_left, dtype=test_kraus_left.dtype) - test_kraus_right = backend.cast(test_kraus_right, dtype=test_kraus_right.dtype) + test_kraus_left = backend.cast(test_kraus_left, dtype=backend.dtype) + test_kraus_right = backend.cast(test_kraus_right, dtype=backend.dtype) state = random_density_matrix(2, backend=backend) @@ -790,8 +790,8 @@ def test_pauli_to_choi(backend, normalize, order, pauli_order, test_superop): dim = int(np.sqrt(test_pauli.shape[0])) aux = dim**2 if normalize == False else dim - test_pauli = backend.cast(test_pauli, dtype=test_pauli.dtype) - test_superop = backend.cast(test_superop, dtype=test_superop.dtype) + test_pauli = backend.cast(test_pauli, dtype=backend.dtype) + test_superop = backend.cast(test_superop, dtype=backend.dtype) choi_super_op = pauli_to_choi( test_pauli / aux, normalize, order, pauli_order, backend=backend ) @@ -813,7 +813,7 @@ def test_pauli_to_kraus(backend, normalize, order, pauli_order, test_a0, test_a1 dim = int(np.sqrt(test_pauli.shape[0])) aux = dim**2 if normalize == False else dim - test_pauli = backend.cast(test_pauli, dtype=test_pauli.dtype) + test_pauli = backend.cast(test_pauli, dtype=backend.dtype) kraus_ops, _ = pauli_to_kraus( test_pauli / aux, @@ -852,8 +852,8 @@ def test_pauli_to_chi(backend, normalize, order, pauli_order): dim = int(np.sqrt(test_pauli.shape[0])) aux = dim**2 if normalize == False else dim - test_chi = backend.cast(test_chi, dtype=test_chi.dtype) - test_pauli = backend.cast(test_pauli / aux, dtype=test_pauli.dtype) + test_chi = backend.cast(test_chi, dtype=backend.dtype) + test_pauli = backend.cast(test_pauli / aux, dtype=backend.dtype) chi_matrix = pauli_to_chi( test_pauli, normalize, order, pauli_order, backend=backend @@ -881,12 +881,12 @@ def test_pauli_to_stinespring( test_a1, ): test_pauli = pauli_superop(pauli_order) - test_pauli = backend.cast(test_pauli, dtype=test_pauli.dtype) + test_pauli = backend.cast(test_pauli, dtype=backend.dtype) dim = 2**nqubits aux = dim**2 if normalize is False else dim - test_a0 = backend.cast(test_a0, dtype=test_a0.dtype) - test_a1 = backend.cast(test_a1, dtype=test_a1.dtype) + test_a0 = backend.cast(test_a0, dtype=backend.dtype) + test_a1 = backend.cast(test_a1, dtype=backend.dtype) stinespring = pauli_to_stinespring( test_pauli, @@ -936,8 +936,8 @@ def test_chi_to_choi(backend, normalize, order, pauli_order, test_superop): dim = int(np.sqrt(test_chi.shape[0])) aux = dim**2 if normalize == False else dim - test_chi = backend.cast(test_chi, dtype=test_chi.dtype) - test_superop = backend.cast(test_superop, dtype=test_superop.dtype) + test_chi = backend.cast(test_chi, dtype=backend.dtype) + test_superop = backend.cast(test_superop, dtype=backend.dtype) axes = [1, 2] if order == "row" else [0, 3] test_choi = np.swapaxes(np.reshape(test_superop, [2] * 4), *axes).reshape([4, 4]) @@ -958,8 +958,8 @@ def test_chi_to_liouville(backend, normalize, order, pauli_order, test_superop): dim = int(np.sqrt(test_chi.shape[0])) aux = dim**2 if normalize == False else dim - test_chi = backend.cast(test_chi, dtype=test_chi.dtype) - test_superop = backend.cast(test_superop, dtype=test_superop.dtype) + test_chi = backend.cast(test_chi, dtype=backend.dtype) + test_superop = backend.cast(test_superop, dtype=backend.dtype) super_op = chi_to_liouville( test_chi / aux, normalize, order, pauli_order, backend=backend @@ -977,8 +977,8 @@ def test_chi_to_pauli(backend, normalize, order, pauli_order): dim = int(np.sqrt(test_pauli.shape[0])) aux = 1.0 if normalize else dim**2 - test_chi = backend.cast(test_chi, dtype=test_chi.dtype) - test_pauli = backend.cast(test_pauli, dtype=test_pauli.dtype) + test_chi = backend.cast(test_chi, dtype=backend.dtype) + test_pauli = backend.cast(test_pauli, dtype=backend.dtype) pauli_op = chi_to_pauli( test_chi / aux, normalize, order, pauli_order, backend=backend @@ -997,7 +997,7 @@ def test_chi_to_kraus(backend, normalize, order, pauli_order, test_a0, test_a1): dim = int(np.sqrt(test_chi.shape[0])) aux = dim**2 if normalize == False else dim - test_chi = backend.cast(test_chi, dtype=test_chi.dtype) + test_chi = backend.cast(test_chi, dtype=backend.dtype) kraus_ops, _ = chi_to_kraus( test_chi / aux, normalize, order=order, pauli_order=pauli_order, backend=backend @@ -1034,12 +1034,12 @@ def test_chi_to_stinespring( backend, normalize, order, pauli_order, validate_cp, nqubits, test_a0, test_a1 ): test_chi = chi_superop(pauli_order) - test_chi = backend.cast(test_chi, dtype=test_chi.dtype) + test_chi = backend.cast(test_chi, dtype=backend.dtype) dim = int(np.sqrt(test_chi.shape[0])) aux = dim**2 if normalize == False else dim - test_a0 = backend.cast(test_a0, dtype=test_a0.dtype) - test_a1 = backend.cast(test_a1, dtype=test_a1.dtype) + test_a0 = backend.cast(test_a0, dtype=backend.dtype) + test_a1 = backend.cast(test_a1, dtype=backend.dtype) stinespring = chi_to_stinespring( test_chi, From b075eea1859749cf270b295559c363d929ee0a0f Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Mon, 19 Feb 2024 15:21:59 +0400 Subject: [PATCH 035/127] quantum networks --- src/qibo/quantum_info/quantum_networks.py | 27 +++++++++++++---------- 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/src/qibo/quantum_info/quantum_networks.py b/src/qibo/quantum_info/quantum_networks.py index 8992169ae5..d1dd92f1be 100644 --- a/src/qibo/quantum_info/quantum_networks.py +++ b/src/qibo/quantum_info/quantum_networks.py @@ -164,7 +164,7 @@ def is_unital( self._matrix = self._full() self._pure = False - partial_trace = np.einsum("jkjl -> kl", self._matrix) + partial_trace = self._einsum("jkjl -> kl", self._matrix) identity = self._backend.cast( np.eye(partial_trace.shape[0]), dtype=partial_trace.dtype ) @@ -212,7 +212,7 @@ def is_causal( self._matrix = self._full() self._pure = False - partial_trace = np.einsum("jklk -> jl", self._matrix) + partial_trace = self._einsum("jklk -> jl", self._matrix) identity = self._backend.cast( np.eye(partial_trace.shape[0]), dtype=partial_trace.dtype ) @@ -292,12 +292,12 @@ def apply(self, state): Returns: ndarray: Resulting state :math:`\\mathcal{E}(\\varrho)`. """ - matrix = np.copy(self._matrix) + matrix = self._backend.cast(self._matrix, copy=True) if self.is_pure(): - return np.einsum("kj,ml,jl -> km", matrix, np.conj(matrix), state) + return self._einsum("kj,ml,jl -> km", matrix, np.conj(matrix), state) - return np.einsum("jklm,km -> jl", matrix, state) + return self._einsum("jklm,km -> jl", matrix, state) def link_product(self, second_network, subscripts: str = "ij,jk -> ik"): """Link product between two quantum networks. @@ -353,7 +353,7 @@ def link_product(self, second_network, subscripts: str = "ij,jk -> ik"): if super_subscripts: cexpr = "jklmnopq,klop->jmnq" return QuantumNetwork( - np.einsum(cexpr, first_matrix, second_matrix), + self._einsum(cexpr, first_matrix, second_matrix), [self.partition[0] + self.partition[-1]], ) @@ -361,12 +361,12 @@ def link_product(self, second_network, subscripts: str = "ij,jk -> ik"): if inv_subscripts: return QuantumNetwork( - np.einsum(cexpr, second_matrix, first_matrix), + self._einsum(cexpr, second_matrix, first_matrix), [second_network.partition[0], self.partition[1]], ) return QuantumNetwork( - np.einsum(cexpr, first_matrix, second_matrix), + self._einsum(cexpr, first_matrix, second_matrix), [self.partition[0], second_network.partition[1]], ) @@ -633,6 +633,10 @@ def _set_tensor_and_parameters(self): """Sets tensor based on inputs.""" self._backend = _check_backend(self._backend) + self._einsum = ( + self._backend.torch.einsum if self._backend.name == "pytorch" else np.einsum + ) + if isinstance(self.partition, list): self.partition = tuple(self.partition) @@ -661,11 +665,10 @@ def _set_tensor_and_parameters(self): def _full(self): """Reshapes input matrix based on purity.""" - matrix = np.copy(self._matrix) - if self.is_pure(): - matrix = np.einsum("jk,lm -> kjml", matrix, np.conj(matrix)) + matrix = self._backend.cast(self._matrix, copy=True) - return matrix + if self.is_pure(): + matrix = self._einsum("jk,lm -> kjml", matrix, np.conj(matrix)) return matrix From 259e6bcfff2bb056025c68d7c2352e9182dcea54 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Wed, 21 Feb 2024 10:44:15 +0400 Subject: [PATCH 036/127] fix `CSXDG` issue by updating `poetry.lock` --- poetry.lock | 454 +++++++++++++++++++++++++--------------------------- 1 file changed, 217 insertions(+), 237 deletions(-) diff --git a/poetry.lock b/poetry.lock index 8cc81ad6fb..af71a691fa 100644 --- a/poetry.lock +++ b/poetry.lock @@ -24,13 +24,13 @@ files = [ [[package]] name = "anyio" -version = "4.2.0" +version = "4.3.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.8" files = [ - {file = "anyio-4.2.0-py3-none-any.whl", hash = "sha256:745843b39e829e108e518c489b31dc757de7d2131d53fac32bd8df268227bfee"}, - {file = "anyio-4.2.0.tar.gz", hash = "sha256:e1875bb4b4e2de1669f4bc7869b6d3f54231cdced71605e6e64c9be77e3be50f"}, + {file = "anyio-4.3.0-py3-none-any.whl", hash = "sha256:048e05d0f6caeed70d731f3db756d35dcc1f35747c8c403364a8332c630441b8"}, + {file = "anyio-4.3.0.tar.gz", hash = "sha256:f75253795a87df48568485fd18cdd2a3fa5c4f7c5be8e5e36637733fce06fed6"}, ] [package.dependencies] @@ -654,63 +654,63 @@ test-no-images = ["pytest", "pytest-cov", "pytest-xdist", "wurlitzer"] [[package]] name = "coverage" -version = "7.4.1" +version = "7.4.2" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.8" files = [ - {file = "coverage-7.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:077d366e724f24fc02dbfe9d946534357fda71af9764ff99d73c3c596001bbd7"}, - {file = "coverage-7.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0193657651f5399d433c92f8ae264aff31fc1d066deee4b831549526433f3f61"}, - {file = "coverage-7.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d17bbc946f52ca67adf72a5ee783cd7cd3477f8f8796f59b4974a9b59cacc9ee"}, - {file = "coverage-7.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a3277f5fa7483c927fe3a7b017b39351610265308f5267ac6d4c2b64cc1d8d25"}, - {file = "coverage-7.4.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6dceb61d40cbfcf45f51e59933c784a50846dc03211054bd76b421a713dcdf19"}, - {file = "coverage-7.4.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6008adeca04a445ea6ef31b2cbaf1d01d02986047606f7da266629afee982630"}, - {file = "coverage-7.4.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:c61f66d93d712f6e03369b6a7769233bfda880b12f417eefdd4f16d1deb2fc4c"}, - {file = "coverage-7.4.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b9bb62fac84d5f2ff523304e59e5c439955fb3b7f44e3d7b2085184db74d733b"}, - {file = "coverage-7.4.1-cp310-cp310-win32.whl", hash = "sha256:f86f368e1c7ce897bf2457b9eb61169a44e2ef797099fb5728482b8d69f3f016"}, - {file = "coverage-7.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:869b5046d41abfea3e381dd143407b0d29b8282a904a19cb908fa24d090cc018"}, - {file = "coverage-7.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b8ffb498a83d7e0305968289441914154fb0ef5d8b3157df02a90c6695978295"}, - {file = "coverage-7.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3cacfaefe6089d477264001f90f55b7881ba615953414999c46cc9713ff93c8c"}, - {file = "coverage-7.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d6850e6e36e332d5511a48a251790ddc545e16e8beaf046c03985c69ccb2676"}, - {file = "coverage-7.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18e961aa13b6d47f758cc5879383d27b5b3f3dcd9ce8cdbfdc2571fe86feb4dd"}, - {file = "coverage-7.4.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dfd1e1b9f0898817babf840b77ce9fe655ecbe8b1b327983df485b30df8cc011"}, - {file = "coverage-7.4.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6b00e21f86598b6330f0019b40fb397e705135040dbedc2ca9a93c7441178e74"}, - {file = "coverage-7.4.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:536d609c6963c50055bab766d9951b6c394759190d03311f3e9fcf194ca909e1"}, - {file = "coverage-7.4.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7ac8f8eb153724f84885a1374999b7e45734bf93a87d8df1e7ce2146860edef6"}, - {file = "coverage-7.4.1-cp311-cp311-win32.whl", hash = "sha256:f3771b23bb3675a06f5d885c3630b1d01ea6cac9e84a01aaf5508706dba546c5"}, - {file = "coverage-7.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:9d2f9d4cc2a53b38cabc2d6d80f7f9b7e3da26b2f53d48f05876fef7956b6968"}, - {file = "coverage-7.4.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f68ef3660677e6624c8cace943e4765545f8191313a07288a53d3da188bd8581"}, - {file = "coverage-7.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:23b27b8a698e749b61809fb637eb98ebf0e505710ec46a8aa6f1be7dc0dc43a6"}, - {file = "coverage-7.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e3424c554391dc9ef4a92ad28665756566a28fecf47308f91841f6c49288e66"}, - {file = "coverage-7.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e0860a348bf7004c812c8368d1fc7f77fe8e4c095d661a579196a9533778e156"}, - {file = "coverage-7.4.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe558371c1bdf3b8fa03e097c523fb9645b8730399c14fe7721ee9c9e2a545d3"}, - {file = "coverage-7.4.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3468cc8720402af37b6c6e7e2a9cdb9f6c16c728638a2ebc768ba1ef6f26c3a1"}, - {file = "coverage-7.4.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:02f2edb575d62172aa28fe00efe821ae31f25dc3d589055b3fb64d51e52e4ab1"}, - {file = "coverage-7.4.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ca6e61dc52f601d1d224526360cdeab0d0712ec104a2ce6cc5ccef6ed9a233bc"}, - {file = "coverage-7.4.1-cp312-cp312-win32.whl", hash = "sha256:ca7b26a5e456a843b9b6683eada193fc1f65c761b3a473941efe5a291f604c74"}, - {file = "coverage-7.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:85ccc5fa54c2ed64bd91ed3b4a627b9cce04646a659512a051fa82a92c04a448"}, - {file = "coverage-7.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8bdb0285a0202888d19ec6b6d23d5990410decb932b709f2b0dfe216d031d218"}, - {file = "coverage-7.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:918440dea04521f499721c039863ef95433314b1db00ff826a02580c1f503e45"}, - {file = "coverage-7.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:379d4c7abad5afbe9d88cc31ea8ca262296480a86af945b08214eb1a556a3e4d"}, - {file = "coverage-7.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b094116f0b6155e36a304ff912f89bbb5067157aff5f94060ff20bbabdc8da06"}, - {file = "coverage-7.4.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2f5968608b1fe2a1d00d01ad1017ee27efd99b3437e08b83ded9b7af3f6f766"}, - {file = "coverage-7.4.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:10e88e7f41e6197ea0429ae18f21ff521d4f4490aa33048f6c6f94c6045a6a75"}, - {file = "coverage-7.4.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a4a3907011d39dbc3e37bdc5df0a8c93853c369039b59efa33a7b6669de04c60"}, - {file = "coverage-7.4.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6d224f0c4c9c98290a6990259073f496fcec1b5cc613eecbd22786d398ded3ad"}, - {file = "coverage-7.4.1-cp38-cp38-win32.whl", hash = "sha256:23f5881362dcb0e1a92b84b3c2809bdc90db892332daab81ad8f642d8ed55042"}, - {file = "coverage-7.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:a07f61fc452c43cd5328b392e52555f7d1952400a1ad09086c4a8addccbd138d"}, - {file = "coverage-7.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8e738a492b6221f8dcf281b67129510835461132b03024830ac0e554311a5c54"}, - {file = "coverage-7.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:46342fed0fff72efcda77040b14728049200cbba1279e0bf1188f1f2078c1d70"}, - {file = "coverage-7.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9641e21670c68c7e57d2053ddf6c443e4f0a6e18e547e86af3fad0795414a628"}, - {file = "coverage-7.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aeb2c2688ed93b027eb0d26aa188ada34acb22dceea256d76390eea135083950"}, - {file = "coverage-7.4.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d12c923757de24e4e2110cf8832d83a886a4cf215c6e61ed506006872b43a6d1"}, - {file = "coverage-7.4.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0491275c3b9971cdbd28a4595c2cb5838f08036bca31765bad5e17edf900b2c7"}, - {file = "coverage-7.4.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:8dfc5e195bbef80aabd81596ef52a1277ee7143fe419efc3c4d8ba2754671756"}, - {file = "coverage-7.4.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1a78b656a4d12b0490ca72651fe4d9f5e07e3c6461063a9b6265ee45eb2bdd35"}, - {file = "coverage-7.4.1-cp39-cp39-win32.whl", hash = "sha256:f90515974b39f4dea2f27c0959688621b46d96d5a626cf9c53dbc653a895c05c"}, - {file = "coverage-7.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:64e723ca82a84053dd7bfcc986bdb34af8d9da83c521c19d6b472bc6880e191a"}, - {file = "coverage-7.4.1-pp38.pp39.pp310-none-any.whl", hash = "sha256:32a8d985462e37cfdab611a6f95b09d7c091d07668fdc26e47a725ee575fe166"}, - {file = "coverage-7.4.1.tar.gz", hash = "sha256:1ed4b95480952b1a26d863e546fa5094564aa0065e1e5f0d4d0041f293251d04"}, + {file = "coverage-7.4.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bf54c3e089179d9d23900e3efc86d46e4431188d9a657f345410eecdd0151f50"}, + {file = "coverage-7.4.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fe6e43c8b510719b48af7db9631b5fbac910ade4bd90e6378c85ac5ac706382c"}, + {file = "coverage-7.4.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b98c89db1b150d851a7840142d60d01d07677a18f0f46836e691c38134ed18b"}, + {file = "coverage-7.4.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c5f9683be6a5b19cd776ee4e2f2ffb411424819c69afab6b2db3a0a364ec6642"}, + {file = "coverage-7.4.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78cdcbf7b9cb83fe047ee09298e25b1cd1636824067166dc97ad0543b079d22f"}, + {file = "coverage-7.4.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:2599972b21911111114100d362aea9e70a88b258400672626efa2b9e2179609c"}, + {file = "coverage-7.4.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ef00d31b7569ed3cb2036f26565f1984b9fc08541731ce01012b02a4c238bf03"}, + {file = "coverage-7.4.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:20a875bfd8c282985c4720c32aa05056f77a68e6d8bbc5fe8632c5860ee0b49b"}, + {file = "coverage-7.4.2-cp310-cp310-win32.whl", hash = "sha256:b3f2b1eb229f23c82898eedfc3296137cf1f16bb145ceab3edfd17cbde273fb7"}, + {file = "coverage-7.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:7df95fdd1432a5d2675ce630fef5f239939e2b3610fe2f2b5bf21fa505256fa3"}, + {file = "coverage-7.4.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a8ddbd158e069dded57738ea69b9744525181e99974c899b39f75b2b29a624e2"}, + {file = "coverage-7.4.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:81a5fb41b0d24447a47543b749adc34d45a2cf77b48ca74e5bf3de60a7bd9edc"}, + {file = "coverage-7.4.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2412e98e70f16243be41d20836abd5f3f32edef07cbf8f407f1b6e1ceae783ac"}, + {file = "coverage-7.4.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ddb79414c15c6f03f56cc68fa06994f047cf20207c31b5dad3f6bab54a0f66ef"}, + {file = "coverage-7.4.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf89ab85027427d351f1de918aff4b43f4eb5f33aff6835ed30322a86ac29c9e"}, + {file = "coverage-7.4.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a178b7b1ac0f1530bb28d2e51f88c0bab3e5949835851a60dda80bff6052510c"}, + {file = "coverage-7.4.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:06fe398145a2e91edaf1ab4eee66149c6776c6b25b136f4a86fcbbb09512fd10"}, + {file = "coverage-7.4.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:18cac867950943fe93d6cd56a67eb7dcd2d4a781a40f4c1e25d6f1ed98721a55"}, + {file = "coverage-7.4.2-cp311-cp311-win32.whl", hash = "sha256:f72cdd2586f9a769570d4b5714a3837b3a59a53b096bb954f1811f6a0afad305"}, + {file = "coverage-7.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:d779a48fac416387dd5673fc5b2d6bd903ed903faaa3247dc1865c65eaa5a93e"}, + {file = "coverage-7.4.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:adbdfcda2469d188d79771d5696dc54fab98a16d2ef7e0875013b5f56a251047"}, + {file = "coverage-7.4.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ac4bab32f396b03ebecfcf2971668da9275b3bb5f81b3b6ba96622f4ef3f6e17"}, + {file = "coverage-7.4.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:006d220ba2e1a45f1de083d5022d4955abb0aedd78904cd5a779b955b019ec73"}, + {file = "coverage-7.4.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3733545eb294e5ad274abe131d1e7e7de4ba17a144505c12feca48803fea5f64"}, + {file = "coverage-7.4.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42a9e754aa250fe61f0f99986399cec086d7e7a01dd82fd863a20af34cbce962"}, + {file = "coverage-7.4.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:2ed37e16cf35c8d6e0b430254574b8edd242a367a1b1531bd1adc99c6a5e00fe"}, + {file = "coverage-7.4.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:b953275d4edfab6cc0ed7139fa773dfb89e81fee1569a932f6020ce7c6da0e8f"}, + {file = "coverage-7.4.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:32b4ab7e6c924f945cbae5392832e93e4ceb81483fd6dc4aa8fb1a97b9d3e0e1"}, + {file = "coverage-7.4.2-cp312-cp312-win32.whl", hash = "sha256:f5df76c58977bc35a49515b2fbba84a1d952ff0ec784a4070334dfbec28a2def"}, + {file = "coverage-7.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:34423abbaad70fea9d0164add189eabaea679068ebdf693baa5c02d03e7db244"}, + {file = "coverage-7.4.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5b11f9c6587668e495cc7365f85c93bed34c3a81f9f08b0920b87a89acc13469"}, + {file = "coverage-7.4.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:51593a1f05c39332f623d64d910445fdec3d2ac2d96b37ce7f331882d5678ddf"}, + {file = "coverage-7.4.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69f1665165ba2fe7614e2f0c1aed71e14d83510bf67e2ee13df467d1c08bf1e8"}, + {file = "coverage-7.4.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3c8bbb95a699c80a167478478efe5e09ad31680931ec280bf2087905e3b95ec"}, + {file = "coverage-7.4.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:175f56572f25e1e1201d2b3e07b71ca4d201bf0b9cb8fad3f1dfae6a4188de86"}, + {file = "coverage-7.4.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8562ca91e8c40864942615b1d0b12289d3e745e6b2da901d133f52f2d510a1e3"}, + {file = "coverage-7.4.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d9a1ef0f173e1a19738f154fb3644f90d0ada56fe6c9b422f992b04266c55d5a"}, + {file = "coverage-7.4.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f40ac873045db4fd98a6f40387d242bde2708a3f8167bd967ccd43ad46394ba2"}, + {file = "coverage-7.4.2-cp38-cp38-win32.whl", hash = "sha256:d1b750a8409bec61caa7824bfd64a8074b6d2d420433f64c161a8335796c7c6b"}, + {file = "coverage-7.4.2-cp38-cp38-win_amd64.whl", hash = "sha256:b4ae777bebaed89e3a7e80c4a03fac434a98a8abb5251b2a957d38fe3fd30088"}, + {file = "coverage-7.4.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3ff7f92ae5a456101ca8f48387fd3c56eb96353588e686286f50633a611afc95"}, + {file = "coverage-7.4.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:861d75402269ffda0b33af94694b8e0703563116b04c681b1832903fac8fd647"}, + {file = "coverage-7.4.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3507427d83fa961cbd73f11140f4a5ce84208d31756f7238d6257b2d3d868405"}, + {file = "coverage-7.4.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bf711d517e21fb5bc429f5c4308fbc430a8585ff2a43e88540264ae87871e36a"}, + {file = "coverage-7.4.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c00e54f0bd258ab25e7f731ca1d5144b0bf7bec0051abccd2bdcff65fa3262c9"}, + {file = "coverage-7.4.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f8e845d894e39fb53834da826078f6dc1a933b32b1478cf437007367efaf6f6a"}, + {file = "coverage-7.4.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:840456cb1067dc350af9080298c7c2cfdddcedc1cb1e0b30dceecdaf7be1a2d3"}, + {file = "coverage-7.4.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c11ca2df2206a4e3e4c4567f52594637392ed05d7c7fb73b4ea1c658ba560265"}, + {file = "coverage-7.4.2-cp39-cp39-win32.whl", hash = "sha256:3ff5bdb08d8938d336ce4088ca1a1e4b6c8cd3bef8bb3a4c0eb2f37406e49643"}, + {file = "coverage-7.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:ac9e95cefcf044c98d4e2c829cd0669918585755dd9a92e28a1a7012322d0a95"}, + {file = "coverage-7.4.2-pp38.pp39.pp310-none-any.whl", hash = "sha256:f593a4a90118d99014517c2679e04a4ef5aee2d81aa05c26c734d271065efcb6"}, + {file = "coverage-7.4.2.tar.gz", hash = "sha256:1a5ee18e3a8d766075ce9314ed1cb695414bae67df6a4b0805f5137d93d6f1cb"}, ] [package.dependencies] @@ -1252,60 +1252,60 @@ files = [ [[package]] name = "fonttools" -version = "4.47.0" +version = "4.49.0" description = "Tools to manipulate font files" optional = false python-versions = ">=3.8" files = [ - {file = "fonttools-4.47.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2d2404107626f97a221dc1a65b05396d2bb2ce38e435f64f26ed2369f68675d9"}, - {file = "fonttools-4.47.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c01f409be619a9a0f5590389e37ccb58b47264939f0e8d58bfa1f3ba07d22671"}, - {file = "fonttools-4.47.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d986b66ff722ef675b7ee22fbe5947a41f60a61a4da15579d5e276d897fbc7fa"}, - {file = "fonttools-4.47.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8acf6dd0434b211b3bd30d572d9e019831aae17a54016629fa8224783b22df8"}, - {file = "fonttools-4.47.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:495369c660e0c27233e3c572269cbe520f7f4978be675f990f4005937337d391"}, - {file = "fonttools-4.47.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c59227d7ba5b232281c26ae04fac2c73a79ad0e236bca5c44aae904a18f14faf"}, - {file = "fonttools-4.47.0-cp310-cp310-win32.whl", hash = "sha256:59a6c8b71a245800e923cb684a2dc0eac19c56493e2f896218fcf2571ed28984"}, - {file = "fonttools-4.47.0-cp310-cp310-win_amd64.whl", hash = "sha256:52c82df66201f3a90db438d9d7b337c7c98139de598d0728fb99dab9fd0495ca"}, - {file = "fonttools-4.47.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:854421e328d47d70aa5abceacbe8eef231961b162c71cbe7ff3f47e235e2e5c5"}, - {file = "fonttools-4.47.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:511482df31cfea9f697930f61520f6541185fa5eeba2fa760fe72e8eee5af88b"}, - {file = "fonttools-4.47.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce0e2c88c8c985b7b9a7efcd06511fb0a1fe3ddd9a6cd2895ef1dbf9059719d7"}, - {file = "fonttools-4.47.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e7a0a8848726956e9d9fb18c977a279013daadf0cbb6725d2015a6dd57527992"}, - {file = "fonttools-4.47.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e869da810ae35afb3019baa0d0306cdbab4760a54909c89ad8904fa629991812"}, - {file = "fonttools-4.47.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dd23848f877c3754f53a4903fb7a593ed100924f9b4bff7d5a4e2e8a7001ae11"}, - {file = "fonttools-4.47.0-cp311-cp311-win32.whl", hash = "sha256:bf1810635c00f7c45d93085611c995fc130009cec5abdc35b327156aa191f982"}, - {file = "fonttools-4.47.0-cp311-cp311-win_amd64.whl", hash = "sha256:61df4dee5d38ab65b26da8efd62d859a1eef7a34dcbc331299a28e24d04c59a7"}, - {file = "fonttools-4.47.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:e3f4d61f3a8195eac784f1d0c16c0a3105382c1b9a74d99ac4ba421da39a8826"}, - {file = "fonttools-4.47.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:174995f7b057e799355b393e97f4f93ef1f2197cbfa945e988d49b2a09ecbce8"}, - {file = "fonttools-4.47.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea592e6a09b71cb7a7661dd93ac0b877a6228e2d677ebacbad0a4d118494c86d"}, - {file = "fonttools-4.47.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40bdbe90b33897d9cc4a39f8e415b0fcdeae4c40a99374b8a4982f127ff5c767"}, - {file = "fonttools-4.47.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:843509ae9b93db5aaf1a6302085e30bddc1111d31e11d724584818f5b698f500"}, - {file = "fonttools-4.47.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9acfa1cdc479e0dde528b61423855913d949a7f7fe09e276228298fef4589540"}, - {file = "fonttools-4.47.0-cp312-cp312-win32.whl", hash = "sha256:66c92ec7f95fd9732550ebedefcd190a8d81beaa97e89d523a0d17198a8bda4d"}, - {file = "fonttools-4.47.0-cp312-cp312-win_amd64.whl", hash = "sha256:e8fa20748de55d0021f83754b371432dca0439e02847962fc4c42a0e444c2d78"}, - {file = "fonttools-4.47.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c75e19971209fbbce891ebfd1b10c37320a5a28e8d438861c21d35305aedb81c"}, - {file = "fonttools-4.47.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e79f1a3970d25f692bbb8c8c2637e621a66c0d60c109ab48d4a160f50856deff"}, - {file = "fonttools-4.47.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:562681188c62c024fe2c611b32e08b8de2afa00c0c4e72bed47c47c318e16d5c"}, - {file = "fonttools-4.47.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a77a60315c33393b2bd29d538d1ef026060a63d3a49a9233b779261bad9c3f71"}, - {file = "fonttools-4.47.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b4fabb8cc9422efae1a925160083fdcbab8fdc96a8483441eb7457235df625bd"}, - {file = "fonttools-4.47.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2a78dba8c2a1e9d53a0fb5382979f024200dc86adc46a56cbb668a2249862fda"}, - {file = "fonttools-4.47.0-cp38-cp38-win32.whl", hash = "sha256:e6b968543fde4119231c12c2a953dcf83349590ca631ba8216a8edf9cd4d36a9"}, - {file = "fonttools-4.47.0-cp38-cp38-win_amd64.whl", hash = "sha256:4a9a51745c0439516d947480d4d884fa18bd1458e05b829e482b9269afa655bc"}, - {file = "fonttools-4.47.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:62d8ddb058b8e87018e5dc26f3258e2c30daad4c87262dfeb0e2617dd84750e6"}, - {file = "fonttools-4.47.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5dde0eab40faaa5476133123f6a622a1cc3ac9b7af45d65690870620323308b4"}, - {file = "fonttools-4.47.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4da089f6dfdb822293bde576916492cd708c37c2501c3651adde39804630538"}, - {file = "fonttools-4.47.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:253bb46bab970e8aae254cebf2ae3db98a4ef6bd034707aa68a239027d2b198d"}, - {file = "fonttools-4.47.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1193fb090061efa2f9e2d8d743ae9850c77b66746a3b32792324cdce65784154"}, - {file = "fonttools-4.47.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:084511482dd265bce6dca24c509894062f0117e4e6869384d853f46c0e6d43be"}, - {file = "fonttools-4.47.0-cp39-cp39-win32.whl", hash = "sha256:97620c4af36e4c849e52661492e31dc36916df12571cb900d16960ab8e92a980"}, - {file = "fonttools-4.47.0-cp39-cp39-win_amd64.whl", hash = "sha256:e77bdf52185bdaf63d39f3e1ac3212e6cfa3ab07d509b94557a8902ce9c13c82"}, - {file = "fonttools-4.47.0-py3-none-any.whl", hash = "sha256:d6477ba902dd2d7adda7f0fd3bfaeb92885d45993c9e1928c9f28fc3961415f7"}, - {file = "fonttools-4.47.0.tar.gz", hash = "sha256:ec13a10715eef0e031858c1c23bfaee6cba02b97558e4a7bfa089dba4a8c2ebf"}, + {file = "fonttools-4.49.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d970ecca0aac90d399e458f0b7a8a597e08f95de021f17785fb68e2dc0b99717"}, + {file = "fonttools-4.49.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ac9a745b7609f489faa65e1dc842168c18530874a5f5b742ac3dd79e26bca8bc"}, + {file = "fonttools-4.49.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ba0e00620ca28d4ca11fc700806fd69144b463aa3275e1b36e56c7c09915559"}, + {file = "fonttools-4.49.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdee3ab220283057e7840d5fb768ad4c2ebe65bdba6f75d5d7bf47f4e0ed7d29"}, + {file = "fonttools-4.49.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:ce7033cb61f2bb65d8849658d3786188afd80f53dad8366a7232654804529532"}, + {file = "fonttools-4.49.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:07bc5ea02bb7bc3aa40a1eb0481ce20e8d9b9642a9536cde0218290dd6085828"}, + {file = "fonttools-4.49.0-cp310-cp310-win32.whl", hash = "sha256:86eef6aab7fd7c6c8545f3ebd00fd1d6729ca1f63b0cb4d621bccb7d1d1c852b"}, + {file = "fonttools-4.49.0-cp310-cp310-win_amd64.whl", hash = "sha256:1fac1b7eebfce75ea663e860e7c5b4a8831b858c17acd68263bc156125201abf"}, + {file = "fonttools-4.49.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:edc0cce355984bb3c1d1e89d6a661934d39586bb32191ebff98c600f8957c63e"}, + {file = "fonttools-4.49.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:83a0d9336de2cba86d886507dd6e0153df333ac787377325a39a2797ec529814"}, + {file = "fonttools-4.49.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:36c8865bdb5cfeec88f5028e7e592370a0657b676c6f1d84a2108e0564f90e22"}, + {file = "fonttools-4.49.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33037d9e56e2562c710c8954d0f20d25b8386b397250d65581e544edc9d6b942"}, + {file = "fonttools-4.49.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:8fb022d799b96df3eaa27263e9eea306bd3d437cc9aa981820850281a02b6c9a"}, + {file = "fonttools-4.49.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:33c584c0ef7dc54f5dd4f84082eabd8d09d1871a3d8ca2986b0c0c98165f8e86"}, + {file = "fonttools-4.49.0-cp311-cp311-win32.whl", hash = "sha256:cbe61b158deb09cffdd8540dc4a948d6e8f4d5b4f3bf5cd7db09bd6a61fee64e"}, + {file = "fonttools-4.49.0-cp311-cp311-win_amd64.whl", hash = "sha256:fc11e5114f3f978d0cea7e9853627935b30d451742eeb4239a81a677bdee6bf6"}, + {file = "fonttools-4.49.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:d647a0e697e5daa98c87993726da8281c7233d9d4ffe410812a4896c7c57c075"}, + {file = "fonttools-4.49.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f3bbe672df03563d1f3a691ae531f2e31f84061724c319652039e5a70927167e"}, + {file = "fonttools-4.49.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bebd91041dda0d511b0d303180ed36e31f4f54b106b1259b69fade68413aa7ff"}, + {file = "fonttools-4.49.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4145f91531fd43c50f9eb893faa08399816bb0b13c425667c48475c9f3a2b9b5"}, + {file = "fonttools-4.49.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ea329dafb9670ffbdf4dbc3b0e5c264104abcd8441d56de77f06967f032943cb"}, + {file = "fonttools-4.49.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c076a9e548521ecc13d944b1d261ff3d7825048c338722a4bd126d22316087b7"}, + {file = "fonttools-4.49.0-cp312-cp312-win32.whl", hash = "sha256:b607ea1e96768d13be26d2b400d10d3ebd1456343eb5eaddd2f47d1c4bd00880"}, + {file = "fonttools-4.49.0-cp312-cp312-win_amd64.whl", hash = "sha256:a974c49a981e187381b9cc2c07c6b902d0079b88ff01aed34695ec5360767034"}, + {file = "fonttools-4.49.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:b85ec0bdd7bdaa5c1946398cbb541e90a6dfc51df76dfa88e0aaa41b335940cb"}, + {file = "fonttools-4.49.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:af20acbe198a8a790618ee42db192eb128afcdcc4e96d99993aca0b60d1faeb4"}, + {file = "fonttools-4.49.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d418b1fee41a1d14931f7ab4b92dc0bc323b490e41d7a333eec82c9f1780c75"}, + {file = "fonttools-4.49.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b44a52b8e6244b6548851b03b2b377a9702b88ddc21dcaf56a15a0393d425cb9"}, + {file = "fonttools-4.49.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:7c7125068e04a70739dad11857a4d47626f2b0bd54de39e8622e89701836eabd"}, + {file = "fonttools-4.49.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:29e89d0e1a7f18bc30f197cfadcbef5a13d99806447c7e245f5667579a808036"}, + {file = "fonttools-4.49.0-cp38-cp38-win32.whl", hash = "sha256:9d95fa0d22bf4f12d2fb7b07a46070cdfc19ef5a7b1c98bc172bfab5bf0d6844"}, + {file = "fonttools-4.49.0-cp38-cp38-win_amd64.whl", hash = "sha256:768947008b4dc552d02772e5ebd49e71430a466e2373008ce905f953afea755a"}, + {file = "fonttools-4.49.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:08877e355d3dde1c11973bb58d4acad1981e6d1140711230a4bfb40b2b937ccc"}, + {file = "fonttools-4.49.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fdb54b076f25d6b0f0298dc706acee5052de20c83530fa165b60d1f2e9cbe3cb"}, + {file = "fonttools-4.49.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0af65c720520710cc01c293f9c70bd69684365c6015cc3671db2b7d807fe51f2"}, + {file = "fonttools-4.49.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f255ce8ed7556658f6d23f6afd22a6d9bbc3edb9b96c96682124dc487e1bf42"}, + {file = "fonttools-4.49.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d00af0884c0e65f60dfaf9340e26658836b935052fdd0439952ae42e44fdd2be"}, + {file = "fonttools-4.49.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:263832fae27481d48dfafcc43174644b6706639661e242902ceb30553557e16c"}, + {file = "fonttools-4.49.0-cp39-cp39-win32.whl", hash = "sha256:0404faea044577a01bb82d47a8fa4bc7a54067fa7e324785dd65d200d6dd1133"}, + {file = "fonttools-4.49.0-cp39-cp39-win_amd64.whl", hash = "sha256:b050d362df50fc6e38ae3954d8c29bf2da52be384649ee8245fdb5186b620836"}, + {file = "fonttools-4.49.0-py3-none-any.whl", hash = "sha256:af281525e5dd7fa0b39fb1667b8d5ca0e2a9079967e14c4bfe90fd1cd13e0f18"}, + {file = "fonttools-4.49.0.tar.gz", hash = "sha256:ebf46e7f01b7af7861310417d7c49591a85d99146fc23a5ba82fdb28af156321"}, ] [package.extras] -all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0,<5)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "pycairo", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.1.0)", "xattr", "zopfli (>=0.1.4)"] +all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "pycairo", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.1.0)", "xattr", "zopfli (>=0.1.4)"] graphite = ["lz4 (>=1.7.4.2)"] interpolatable = ["munkres", "pycairo", "scipy"] -lxml = ["lxml (>=4.0,<5)"] +lxml = ["lxml (>=4.0)"] pathops = ["skia-pathops (>=0.5.0)"] plot = ["matplotlib"] repacker = ["uharfbuzz (>=0.23.0)"] @@ -1390,13 +1390,13 @@ files = [ [[package]] name = "google-api-core" -version = "2.15.0" +version = "2.17.1" description = "Google API client core library" optional = false python-versions = ">=3.7" files = [ - {file = "google-api-core-2.15.0.tar.gz", hash = "sha256:abc978a72658f14a2df1e5e12532effe40f94f868f6e23d95133bd6abcca35ca"}, - {file = "google_api_core-2.15.0-py3-none-any.whl", hash = "sha256:2aa56d2be495551e66bbff7f729b790546f87d5c90e74781aa77233bcb395a8a"}, + {file = "google-api-core-2.17.1.tar.gz", hash = "sha256:9df18a1f87ee0df0bc4eea2770ebc4228392d8cc4066655b320e2cfccb15db95"}, + {file = "google_api_core-2.17.1-py3-none-any.whl", hash = "sha256:610c5b90092c360736baccf17bd3efbcb30dd380e7a6dc28a71059edb8bd0d8e"}, ] [package.dependencies] @@ -1420,13 +1420,13 @@ grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] [[package]] name = "google-auth" -version = "2.25.2" +version = "2.28.0" description = "Google Authentication Library" optional = false python-versions = ">=3.7" files = [ - {file = "google-auth-2.25.2.tar.gz", hash = "sha256:42f707937feb4f5e5a39e6c4f343a17300a459aaf03141457ba505812841cc40"}, - {file = "google_auth-2.25.2-py2.py3-none-any.whl", hash = "sha256:473a8dfd0135f75bb79d878436e568f2695dce456764bf3a02b6f8c540b1d256"}, + {file = "google-auth-2.28.0.tar.gz", hash = "sha256:3cfc1b6e4e64797584fb53fc9bd0b7afa9b7c0dba2004fa7dcc9349e58cc3195"}, + {file = "google_auth-2.28.0-py2.py3-none-any.whl", hash = "sha256:7634d29dcd1e101f5226a23cbc4a0c6cda6394253bf80e281d9c5c6797869c53"}, ] [package.dependencies] @@ -2491,13 +2491,13 @@ test = ["flaky", "ipykernel (>=6.19.3)", "ipython", "ipywidgets", "nbconvert (>= [[package]] name = "nbconvert" -version = "7.16.0" -description = "Converting Jupyter Notebooks" +version = "7.16.1" +description = "Converting Jupyter Notebooks (.ipynb files) to other formats. Output formats include asciidoc, html, latex, markdown, pdf, py, rst, script. nbconvert can be used both as a Python library (`import nbconvert`) or as a command line tool (invoked as `jupyter nbconvert ...`)." optional = false python-versions = ">=3.8" files = [ - {file = "nbconvert-7.16.0-py3-none-any.whl", hash = "sha256:ad3dc865ea6e2768d31b7eb6c7ab3be014927216a5ece3ef276748dd809054c7"}, - {file = "nbconvert-7.16.0.tar.gz", hash = "sha256:813e6553796362489ae572e39ba1bff978536192fb518e10826b0e8cadf03ec8"}, + {file = "nbconvert-7.16.1-py3-none-any.whl", hash = "sha256:3188727dffadfdc9c6a1c7250729063d7bc78b355ad7aa023138afa030d1cd07"}, + {file = "nbconvert-7.16.1.tar.gz", hash = "sha256:e79e6a074f49ba3ed29428ed86487bf51509d9aab613bd8522ac08f6d28fd7fd"}, ] [package.dependencies] @@ -2841,46 +2841,46 @@ tests = ["pytest", "pytest-cov", "pytest-pep8"] [[package]] name = "osqp" -version = "0.6.5" +version = "0.6.4" description = "OSQP: The Operator Splitting QP Solver" optional = false python-versions = "*" files = [ - {file = "osqp-0.6.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e8024dba07281111af39e71bff6449fb22a37bf3358aa0c7fd1daa6bca692c99"}, - {file = "osqp-0.6.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a68e247f2bbb53e87f1c1ca80ff3fc86b781f771d6da2a2ecd2f6e7492c802f3"}, - {file = "osqp-0.6.5-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e81e299637eb2342e30eb2df0ec45dc243683af0a71676c9b45b9337bb05da97"}, - {file = "osqp-0.6.5-cp310-cp310-win_amd64.whl", hash = "sha256:42425632927d983cbe935067783b944ebd4959e9eb6611da8401007b66a0c841"}, - {file = "osqp-0.6.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a7b180db09be1c3e3cb4109396b894f481ca9c6e160a530acd71f1769610f96c"}, - {file = "osqp-0.6.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:648f4beff10c16620f3b95e86dee702052d587b847ddbd5d8f71ad39ac36db3a"}, - {file = "osqp-0.6.5-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7649d56d775662e0a5d1665ed220d585f904d14a49cc6931bf27725bb9c4b2e0"}, - {file = "osqp-0.6.5-cp311-cp311-win_amd64.whl", hash = "sha256:b033b7aec973a655cfec4558e0c4fc92ee9f914bcb0a669e0156398d8ddbef8f"}, - {file = "osqp-0.6.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5c344619465e625aac6d13812d442dd31d4a9ab243e39abb5938c3f6116409b0"}, - {file = "osqp-0.6.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:000ad48aa071ecc4c75ebc39d1291752fe3a9937a30d00fff5dc61663ec67eeb"}, - {file = "osqp-0.6.5-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a36a40df69db5195fba613341663db2c7dcf977eb75b9578a8fd7682bbe02324"}, - {file = "osqp-0.6.5-cp312-cp312-win_amd64.whl", hash = "sha256:3d8212db7c55af1961ccce4a32fd382bfe34e2198664ea3f81cc47eef8d0f288"}, - {file = "osqp-0.6.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:ca7d80c0767b1350cd74e4f1446ec51661152690d38b1382ceccdfccd757afce"}, - {file = "osqp-0.6.5-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b15e2b96d4d9b2eff37a05405372c69cf17ada3d1e42c5e28cbdbd053189ab5"}, - {file = "osqp-0.6.5-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a41600e34ece7156606fd3620987fdf224b0a35c857540cb5bf45072f5c022b"}, - {file = "osqp-0.6.5-cp36-cp36m-win_amd64.whl", hash = "sha256:8c38574b35a3ddfb794aafee9bc5a74635160b9fc52bbc89ae6164fe207556de"}, - {file = "osqp-0.6.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:d06f614e3be1b1f3cd68569b2dc3628c2fdef1e7c4b992672fe05efb1add9801"}, - {file = "osqp-0.6.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25a6b995e0a022bd1c33d20d8846d9a068df89cec288b905b5cdfdb98a2ffae8"}, - {file = "osqp-0.6.5-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:09de9b53e7513ee4ade3024ce9f36ef993d916118d0927cce740d086882ea92c"}, - {file = "osqp-0.6.5-cp37-cp37m-win_amd64.whl", hash = "sha256:1f80f85d515ef29b90fb34f137857e75d4fcf21a715d644f54d2cf9494567fab"}, - {file = "osqp-0.6.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:de9b9e96001e8f0b2e474106ac75e220fd9279e1635b107b836a6035795e8d07"}, - {file = "osqp-0.6.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fe545d7a87a46cfc57dfb9f0aa2788d2f29e0c71dc1ac57e92f9c9d93064753"}, - {file = "osqp-0.6.5-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49ab020b5fd7abb5da99e01e47bf81f817ba1df6895e3d3ba4893722cc24d9b6"}, - {file = "osqp-0.6.5-cp38-cp38-win_amd64.whl", hash = "sha256:5d1b5ed6fc4faea94117a0abe140fefe980449b29d3907bd2e6ec1c18eca3d43"}, - {file = "osqp-0.6.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dca127b7a333ce53fb430fc441b2e0aee2df619693d967277a8f8fd095e95007"}, - {file = "osqp-0.6.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ec902844defedf7c5a5ed482b93286d1735a65b71bb27c93e18c929f313c93d"}, - {file = "osqp-0.6.5-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f25a9e1e8f1db38094dc7ee544e603e31fe7bf1b2a3fc75c78c1d39a727e2540"}, - {file = "osqp-0.6.5-cp39-cp39-win_amd64.whl", hash = "sha256:6dce90d8c4ad551489a452573ea819e089e1e1c3b23bbd8f155bb6059ce8ef36"}, - {file = "osqp-0.6.5.tar.gz", hash = "sha256:b2810aee7be2373add8b6c0be5ad99b810288774abca421751cb032d6a5aedef"}, + {file = "osqp-0.6.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1c34dc340b4dc46ed86f811b1015bb2ece444d310b4bb638e509a02df88594c1"}, + {file = "osqp-0.6.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e7fb1ae278d14b7080acfe4d252c4f6df563dd8622847e73f8e5d1f2e027db41"}, + {file = "osqp-0.6.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2488dc19d48fbb46118312cf1a1292942ab41cd5588cf6c75ff1b521afb99ce3"}, + {file = "osqp-0.6.4-cp310-cp310-win_amd64.whl", hash = "sha256:adaf59b134745aec21409e698dcd72d8997be2652e35ed1f5302aaba69654831"}, + {file = "osqp-0.6.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:20aa182b23ca5d433d1b8144d46296304a493d1cc1712cf45c591e5dd7a19436"}, + {file = "osqp-0.6.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21c79624c831e6070b3b1ca1df34032c222cc87e467def5e038713d20c9ffb5c"}, + {file = "osqp-0.6.4-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2eeb4a3982929f5ea89fc2cc0cef238c489020b02671012f0b60a7a7c1df5093"}, + {file = "osqp-0.6.4-cp311-cp311-win_amd64.whl", hash = "sha256:b62631f7388cdc49619e256110595fe741afab4d779fcc2b2ab55922cc93367f"}, + {file = "osqp-0.6.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a7d8dc0a5459558d3f2f975110e21e2292558c943047f09fb51ebc62d07a164c"}, + {file = "osqp-0.6.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89f1b270ed46a92384daa022ed336d58b5f06bdc49abe9684d41aaec02717895"}, + {file = "osqp-0.6.4-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78f7d8b91b0248beb95abda710bbf28ee98d5675dc9f77df7b5412da222e4f5c"}, + {file = "osqp-0.6.4-cp312-cp312-win_amd64.whl", hash = "sha256:ff72fc0cec63965979e86bc99bec1658b85c3e6d8e9f95c37cc5c531fa48aabe"}, + {file = "osqp-0.6.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b7dbc83605a68703f8e509f590ab71f0f6d6992443ae534a8d99d8878bfabd73"}, + {file = "osqp-0.6.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1603ff6d699adcbf7628dadfa54b566023412b60f04f6dda36fc81cf59a678c"}, + {file = "osqp-0.6.4-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:866b16ea55a7ec831ed4fce3c5c812a6fcb84d8b0016a858f1ecc9bf63dcbb00"}, + {file = "osqp-0.6.4-cp36-cp36m-win_amd64.whl", hash = "sha256:5764886a48fc670370283cb7b004cbd5b570967bde3ecf2905e7662d6223c5bc"}, + {file = "osqp-0.6.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:f606cce8f8b5bd9a6a80e3c25e2ffc0180a9da9b550731c0440b1de10565b89e"}, + {file = "osqp-0.6.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0faf25c10b84cd4005b24b290e0b6d885c3e30d01fc065f930a46c8da5401f49"}, + {file = "osqp-0.6.4-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac61b42c1944522bc2db6d38a55cc4b569c98c4e1e512a73d202af578d678f0f"}, + {file = "osqp-0.6.4-cp37-cp37m-win_amd64.whl", hash = "sha256:4f2f7fd96582a69c030d883b9f701028a6df690637d4a122e9043d3062e5e776"}, + {file = "osqp-0.6.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4c80a308d12c4f065ae069060d6ff1b64624d03f832221f073ddaef0ce387cfa"}, + {file = "osqp-0.6.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:baa763c67c3ba5ce1191e4ce4dfc54c6b5fc96e794ea5bae6b03793897af93cf"}, + {file = "osqp-0.6.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b57785b2ed7928b2535978fc862b5d6826a1db69d8d21151630f654d42d7c829"}, + {file = "osqp-0.6.4-cp38-cp38-win_amd64.whl", hash = "sha256:681e8881f71a997a1506ddb8631daa3207d03f59ac929987103f4289287c8065"}, + {file = "osqp-0.6.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bdbf25b567b53192a82a6495979d7714198a1500ca5339c55d851c8d5c7cb8e7"}, + {file = "osqp-0.6.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:72efd10d855c3ed5773ff7f72c76dcddff6bb2454149b27e262d611ba6fb2f28"}, + {file = "osqp-0.6.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f11bc1c5877610afae71ebff5b69325a5a4fc68b155613e454c793a66c5a11bd"}, + {file = "osqp-0.6.4-cp39-cp39-win_amd64.whl", hash = "sha256:702a33c736603e8457acb7512d706bf1d6903f6a75ad140f6c8d14a234cd3f35"}, + {file = "osqp-0.6.4.tar.gz", hash = "sha256:cfa33e0be422ee5d3e792e7c081bcbf6fa222fc2175b6fdde4c4a219354c5e42"}, ] [package.dependencies] numpy = ">=1.7" qdldl = "*" -scipy = ">=0.13.2,<1.12.0" +scipy = ">=0.13.2" [[package]] name = "packaging" @@ -3183,22 +3183,22 @@ testing = ["google-api-core[grpc] (>=1.31.5)"] [[package]] name = "protobuf" -version = "4.25.1" +version = "4.25.3" description = "" optional = false python-versions = ">=3.8" files = [ - {file = "protobuf-4.25.1-cp310-abi3-win32.whl", hash = "sha256:193f50a6ab78a970c9b4f148e7c750cfde64f59815e86f686c22e26b4fe01ce7"}, - {file = "protobuf-4.25.1-cp310-abi3-win_amd64.whl", hash = "sha256:3497c1af9f2526962f09329fd61a36566305e6c72da2590ae0d7d1322818843b"}, - {file = "protobuf-4.25.1-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:0bf384e75b92c42830c0a679b0cd4d6e2b36ae0cf3dbb1e1dfdda48a244f4bcd"}, - {file = "protobuf-4.25.1-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:0f881b589ff449bf0b931a711926e9ddaad3b35089cc039ce1af50b21a4ae8cb"}, - {file = "protobuf-4.25.1-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:ca37bf6a6d0046272c152eea90d2e4ef34593aaa32e8873fc14c16440f22d4b7"}, - {file = "protobuf-4.25.1-cp38-cp38-win32.whl", hash = "sha256:abc0525ae2689a8000837729eef7883b9391cd6aa7950249dcf5a4ede230d5dd"}, - {file = "protobuf-4.25.1-cp38-cp38-win_amd64.whl", hash = "sha256:1484f9e692091450e7edf418c939e15bfc8fc68856e36ce399aed6889dae8bb0"}, - {file = "protobuf-4.25.1-cp39-cp39-win32.whl", hash = "sha256:8bdbeaddaac52d15c6dce38c71b03038ef7772b977847eb6d374fc86636fa510"}, - {file = "protobuf-4.25.1-cp39-cp39-win_amd64.whl", hash = "sha256:becc576b7e6b553d22cbdf418686ee4daa443d7217999125c045ad56322dda10"}, - {file = "protobuf-4.25.1-py3-none-any.whl", hash = "sha256:a19731d5e83ae4737bb2a089605e636077ac001d18781b3cf489b9546c7c80d6"}, - {file = "protobuf-4.25.1.tar.gz", hash = "sha256:57d65074b4f5baa4ab5da1605c02be90ac20c8b40fb137d6a8df9f416b0d0ce2"}, + {file = "protobuf-4.25.3-cp310-abi3-win32.whl", hash = "sha256:d4198877797a83cbfe9bffa3803602bbe1625dc30d8a097365dbc762e5790faa"}, + {file = "protobuf-4.25.3-cp310-abi3-win_amd64.whl", hash = "sha256:209ba4cc916bab46f64e56b85b090607a676f66b473e6b762e6f1d9d591eb2e8"}, + {file = "protobuf-4.25.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:f1279ab38ecbfae7e456a108c5c0681e4956d5b1090027c1de0f934dfdb4b35c"}, + {file = "protobuf-4.25.3-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:e7cb0ae90dd83727f0c0718634ed56837bfeeee29a5f82a7514c03ee1364c019"}, + {file = "protobuf-4.25.3-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:7c8daa26095f82482307bc717364e7c13f4f1c99659be82890dcfc215194554d"}, + {file = "protobuf-4.25.3-cp38-cp38-win32.whl", hash = "sha256:f4f118245c4a087776e0a8408be33cf09f6c547442c00395fbfb116fac2f8ac2"}, + {file = "protobuf-4.25.3-cp38-cp38-win_amd64.whl", hash = "sha256:c053062984e61144385022e53678fbded7aea14ebb3e0305ae3592fb219ccfa4"}, + {file = "protobuf-4.25.3-cp39-cp39-win32.whl", hash = "sha256:19b270aeaa0099f16d3ca02628546b8baefe2955bbe23224aaf856134eccf1e4"}, + {file = "protobuf-4.25.3-cp39-cp39-win_amd64.whl", hash = "sha256:e3c97a1555fd6388f857770ff8b9703083de6bf1f9274a002a332d65fbb56c8c"}, + {file = "protobuf-4.25.3-py3-none-any.whl", hash = "sha256:f0700d54bcf45424477e46a9f0944155b46fb0639d69728739c0e47bab83f2b9"}, + {file = "protobuf-4.25.3.tar.gz", hash = "sha256:25b5d0b42fd000320bd7830b349e3b696435f3b329810427a6bcce6a5492cc5c"}, ] [[package]] @@ -3936,7 +3936,7 @@ name = "qibojit" version = "0.1.3" description = "Simulation tools based on numba and cupy." optional = false -python-versions = ">=3.9.0,<3.12" +python-versions = "^3.9,<3.12" files = [] develop = false @@ -3950,7 +3950,7 @@ scipy = "^1.10.1" type = "git" url = "https://github.com/qiboteam/qibojit.git" reference = "HEAD" -resolved_reference = "b32f503452127ac915b20af773e29694b5b64dd4" +resolved_reference = "ce537c898e7d1a98329eaaaf359db01ccb60499d" [[package]] name = "recommonmark" @@ -4158,57 +4158,37 @@ files = [ [[package]] name = "scikit-learn" -version = "1.4.0" +version = "1.4.1.post1" description = "A set of python modules for machine learning and data mining" optional = false python-versions = ">=3.9" files = [ - {file = "scikit-learn-1.4.0.tar.gz", hash = "sha256:d4373c984eba20e393216edd51a3e3eede56cbe93d4247516d205643c3b93121"}, - {file = "scikit_learn-1.4.0-1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:fce93a7473e2f4ee4cc280210968288d6a7d7ad8dc6fa7bb7892145e407085f9"}, - {file = "scikit_learn-1.4.0-1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:d77df3d1e15fc37a9329999979fa7868ba8655dbab21fe97fc7ddabac9e08cc7"}, - {file = "scikit_learn-1.4.0-1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2404659fedec40eeafa310cd14d613e564d13dbf8f3c752d31c095195ec05de6"}, - {file = "scikit_learn-1.4.0-1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e98632da8f6410e6fb6bf66937712c949b4010600ccd3f22a5388a83e610cc3c"}, - {file = "scikit_learn-1.4.0-1-cp310-cp310-win_amd64.whl", hash = "sha256:11b3b140f70fbc9f6a08884631ae8dd60a4bb2d7d6d1de92738ea42b740d8992"}, - {file = "scikit_learn-1.4.0-1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a8341eabdc754d5ab91641a7763243845e96b6d68e03e472531e88a4f1b09f21"}, - {file = "scikit_learn-1.4.0-1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:d1f6bce875ac2bb6b52514f67c185c564ccd299a05b65b7bab091a4c13dde12d"}, - {file = "scikit_learn-1.4.0-1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c408b46b2fd61952d519ea1af2f8f0a7a703e1433923ab1704c4131520b2083b"}, - {file = "scikit_learn-1.4.0-1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2b465dd1dcd237b7b1dcd1a9048ccbf70a98c659474324fa708464c3a2533fad"}, - {file = "scikit_learn-1.4.0-1-cp311-cp311-win_amd64.whl", hash = "sha256:0db8e22c42f7980fe5eb22069b1f84c48966f3e0d23a01afde5999e3987a2501"}, - {file = "scikit_learn-1.4.0-1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e7eef6ea2ed289af40e88c0be9f7704ca8b5de18508a06897c3fe21e0905efdf"}, - {file = "scikit_learn-1.4.0-1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:349669b01435bc4dbf25c6410b0892073befdaec52637d1a1d1ff53865dc8db3"}, - {file = "scikit_learn-1.4.0-1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d439c584e58434d0350701bd33f6c10b309e851fccaf41c121aed55f6851d8cf"}, - {file = "scikit_learn-1.4.0-1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0e2427d9ef46477625ab9b55c1882844fe6fc500f418c3f8e650200182457bc"}, - {file = "scikit_learn-1.4.0-1-cp312-cp312-win_amd64.whl", hash = "sha256:d3d75343940e7bf9b85c830c93d34039fa015eeb341c5c0b4cd7a90dadfe00d4"}, - {file = "scikit_learn-1.4.0-1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:76986d22e884ab062b1beecdd92379656e9d3789ecc1f9870923c178de55f9fe"}, - {file = "scikit_learn-1.4.0-1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:e22446ad89f1cb7657f0d849dcdc345b48e2d10afa3daf2925fdb740f85b714c"}, - {file = "scikit_learn-1.4.0-1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:74812c9eabb265be69d738a8ea8d4884917a59637fcbf88a5f0e9020498bc6b3"}, - {file = "scikit_learn-1.4.0-1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aad2a63e0dd386b92da3270887a29b308af4d7c750d8c4995dfd9a4798691bcc"}, - {file = "scikit_learn-1.4.0-1-cp39-cp39-win_amd64.whl", hash = "sha256:53b9e29177897c37e2ff9d4ba6ca12fdb156e22523e463db05def303f5c72b5c"}, - {file = "scikit_learn-1.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cb8f044a8f5962613ce1feb4351d66f8d784bd072d36393582f351859b065f7d"}, - {file = "scikit_learn-1.4.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:a6372c90bbf302387792108379f1ec77719c1618d88496d0df30cb8e370b4661"}, - {file = "scikit_learn-1.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:785ce3c352bf697adfda357c3922c94517a9376002971bc5ea50896144bc8916"}, - {file = "scikit_learn-1.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0aba2a20d89936d6e72d95d05e3bf1db55bca5c5920926ad7b92c34f5e7d3bbe"}, - {file = "scikit_learn-1.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:2bac5d56b992f8f06816f2cd321eb86071c6f6d44bb4b1cb3d626525820d754b"}, - {file = "scikit_learn-1.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:27ae4b0f1b2c77107c096a7e05b33458354107b47775428d1f11b23e30a73e8a"}, - {file = "scikit_learn-1.4.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:5c5c62ffb52c3ffb755eb21fa74cc2cbf2c521bd53f5c04eaa10011dbecf5f80"}, - {file = "scikit_learn-1.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f0d2018ac6fa055dab65fe8a485967990d33c672d55bc254c56c35287b02fab"}, - {file = "scikit_learn-1.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91a8918c415c4b4bf1d60c38d32958849a9191c2428ab35d30b78354085c7c7a"}, - {file = "scikit_learn-1.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:80a21de63275f8bcd7877b3e781679d2ff1eddfed515a599f95b2502a3283d42"}, - {file = "scikit_learn-1.4.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:0f33bbafb310c26b81c4d41ecaebdbc1f63498a3f13461d50ed9a2e8f24d28e4"}, - {file = "scikit_learn-1.4.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:8b6ac1442ec714b4911e5aef8afd82c691b5c88b525ea58299d455acc4e8dcec"}, - {file = "scikit_learn-1.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05fc5915b716c6cc60a438c250108e9a9445b522975ed37e416d5ea4f9a63381"}, - {file = "scikit_learn-1.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:842b7d6989f3c574685e18da6f91223eb32301d0f93903dd399894250835a6f7"}, - {file = "scikit_learn-1.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:88bcb586fdff865372df1bc6be88bb7e6f9e0aa080dab9f54f5cac7eca8e2b6b"}, - {file = "scikit_learn-1.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f77674647dd31f56cb12ed13ed25b6ed43a056fffef051715022d2ebffd7a7d1"}, - {file = "scikit_learn-1.4.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:833999872e2920ce00f3a50839946bdac7539454e200eb6db54898a41f4bfd43"}, - {file = "scikit_learn-1.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:970ec697accaef10fb4f51763f3a7b1250f9f0553cf05514d0e94905322a0172"}, - {file = "scikit_learn-1.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:923d778f378ebacca2c672ab1740e5a413e437fb45ab45ab02578f8b689e5d43"}, - {file = "scikit_learn-1.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:1d041bc95006b545b59e458399e3175ab11ca7a03dc9a74a573ac891f5df1489"}, + {file = "scikit-learn-1.4.1.post1.tar.gz", hash = "sha256:93d3d496ff1965470f9977d05e5ec3376fb1e63b10e4fda5e39d23c2d8969a30"}, + {file = "scikit_learn-1.4.1.post1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c540aaf44729ab5cd4bd5e394f2b375e65ceaea9cdd8c195788e70433d91bbc5"}, + {file = "scikit_learn-1.4.1.post1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:4310bff71aa98b45b46cd26fa641309deb73a5d1c0461d181587ad4f30ea3c36"}, + {file = "scikit_learn-1.4.1.post1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f43dd527dabff5521af2786a2f8de5ba381e182ec7292663508901cf6ceaf6e"}, + {file = "scikit_learn-1.4.1.post1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c02e27d65b0c7dc32f2c5eb601aaf5530b7a02bfbe92438188624524878336f2"}, + {file = "scikit_learn-1.4.1.post1-cp310-cp310-win_amd64.whl", hash = "sha256:629e09f772ad42f657ca60a1a52342eef786218dd20cf1369a3b8d085e55ef8f"}, + {file = "scikit_learn-1.4.1.post1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6145dfd9605b0b50ae72cdf72b61a2acd87501369a763b0d73d004710ebb76b5"}, + {file = "scikit_learn-1.4.1.post1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:1afed6951bc9d2053c6ee9a518a466cbc9b07c6a3f9d43bfe734192b6125d508"}, + {file = "scikit_learn-1.4.1.post1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce03506ccf5f96b7e9030fea7eb148999b254c44c10182ac55857bc9b5d4815f"}, + {file = "scikit_learn-1.4.1.post1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ba516fcdc73d60e7f48cbb0bccb9acbdb21807de3651531208aac73c758e3ab"}, + {file = "scikit_learn-1.4.1.post1-cp311-cp311-win_amd64.whl", hash = "sha256:78cd27b4669513b50db4f683ef41ea35b5dddc797bd2bbd990d49897fd1c8a46"}, + {file = "scikit_learn-1.4.1.post1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a1e289f33f613cefe6707dead50db31930530dc386b6ccff176c786335a7b01c"}, + {file = "scikit_learn-1.4.1.post1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:0df87de9ce1c0140f2818beef310fb2e2afdc1e66fc9ad587965577f17733649"}, + {file = "scikit_learn-1.4.1.post1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:712c1c69c45b58ef21635360b3d0a680ff7d83ac95b6f9b82cf9294070cda710"}, + {file = "scikit_learn-1.4.1.post1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1754b0c2409d6ed5a3380512d0adcf182a01363c669033a2b55cca429ed86a81"}, + {file = "scikit_learn-1.4.1.post1-cp312-cp312-win_amd64.whl", hash = "sha256:1d491ef66e37f4e812db7e6c8286520c2c3fc61b34bf5e59b67b4ce528de93af"}, + {file = "scikit_learn-1.4.1.post1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:aa0029b78ef59af22cfbd833e8ace8526e4df90212db7ceccbea582ebb5d6794"}, + {file = "scikit_learn-1.4.1.post1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:14e4c88436ac96bf69eb6d746ac76a574c314a23c6961b7d344b38877f20fee1"}, + {file = "scikit_learn-1.4.1.post1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7cd3a77c32879311f2aa93466d3c288c955ef71d191503cf0677c3340ae8ae0"}, + {file = "scikit_learn-1.4.1.post1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a3ee19211ded1a52ee37b0a7b373a8bfc66f95353af058a210b692bd4cda0dd"}, + {file = "scikit_learn-1.4.1.post1-cp39-cp39-win_amd64.whl", hash = "sha256:234b6bda70fdcae9e4abbbe028582ce99c280458665a155eed0b820599377d25"}, ] [package.dependencies] joblib = ">=1.2.0" -numpy = ">=1.19.5" +numpy = ">=1.19.5,<2.0" scipy = ">=1.6.0" threadpoolctl = ">=2.0.0" @@ -4220,45 +4200,45 @@ tests = ["black (>=23.3.0)", "matplotlib (>=3.3.4)", "mypy (>=1.3)", "numpydoc ( [[package]] name = "scipy" -version = "1.11.4" +version = "1.12.0" description = "Fundamental algorithms for scientific computing in Python" optional = false python-versions = ">=3.9" files = [ - {file = "scipy-1.11.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bc9a714581f561af0848e6b69947fda0614915f072dfd14142ed1bfe1b806710"}, - {file = "scipy-1.11.4-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:cf00bd2b1b0211888d4dc75656c0412213a8b25e80d73898083f402b50f47e41"}, - {file = "scipy-1.11.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9999c008ccf00e8fbcce1236f85ade5c569d13144f77a1946bef8863e8f6eb4"}, - {file = "scipy-1.11.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:933baf588daa8dc9a92c20a0be32f56d43faf3d1a60ab11b3f08c356430f6e56"}, - {file = "scipy-1.11.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8fce70f39076a5aa62e92e69a7f62349f9574d8405c0a5de6ed3ef72de07f446"}, - {file = "scipy-1.11.4-cp310-cp310-win_amd64.whl", hash = "sha256:6550466fbeec7453d7465e74d4f4b19f905642c89a7525571ee91dd7adabb5a3"}, - {file = "scipy-1.11.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f313b39a7e94f296025e3cffc2c567618174c0b1dde173960cf23808f9fae4be"}, - {file = "scipy-1.11.4-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:1b7c3dca977f30a739e0409fb001056484661cb2541a01aba0bb0029f7b68db8"}, - {file = "scipy-1.11.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00150c5eae7b610c32589dda259eacc7c4f1665aedf25d921907f4d08a951b1c"}, - {file = "scipy-1.11.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:530f9ad26440e85766509dbf78edcfe13ffd0ab7fec2560ee5c36ff74d6269ff"}, - {file = "scipy-1.11.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5e347b14fe01003d3b78e196e84bd3f48ffe4c8a7b8a1afbcb8f5505cb710993"}, - {file = "scipy-1.11.4-cp311-cp311-win_amd64.whl", hash = "sha256:acf8ed278cc03f5aff035e69cb511741e0418681d25fbbb86ca65429c4f4d9cd"}, - {file = "scipy-1.11.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:028eccd22e654b3ea01ee63705681ee79933652b2d8f873e7949898dda6d11b6"}, - {file = "scipy-1.11.4-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:2c6ff6ef9cc27f9b3db93a6f8b38f97387e6e0591600369a297a50a8e96e835d"}, - {file = "scipy-1.11.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b030c6674b9230d37c5c60ab456e2cf12f6784596d15ce8da9365e70896effc4"}, - {file = "scipy-1.11.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad669df80528aeca5f557712102538f4f37e503f0c5b9541655016dd0932ca79"}, - {file = "scipy-1.11.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ce7fff2e23ab2cc81ff452a9444c215c28e6305f396b2ba88343a567feec9660"}, - {file = "scipy-1.11.4-cp312-cp312-win_amd64.whl", hash = "sha256:36750b7733d960d7994888f0d148d31ea3017ac15eef664194b4ef68d36a4a97"}, - {file = "scipy-1.11.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6e619aba2df228a9b34718efb023966da781e89dd3d21637b27f2e54db0410d7"}, - {file = "scipy-1.11.4-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:f3cd9e7b3c2c1ec26364856f9fbe78695fe631150f94cd1c22228456404cf1ec"}, - {file = "scipy-1.11.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d10e45a6c50211fe256da61a11c34927c68f277e03138777bdebedd933712fea"}, - {file = "scipy-1.11.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91af76a68eeae0064887a48e25c4e616fa519fa0d38602eda7e0f97d65d57937"}, - {file = "scipy-1.11.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6df1468153a31cf55ed5ed39647279beb9cfb5d3f84369453b49e4b8502394fd"}, - {file = "scipy-1.11.4-cp39-cp39-win_amd64.whl", hash = "sha256:ee410e6de8f88fd5cf6eadd73c135020bfbbbdfcd0f6162c36a7638a1ea8cc65"}, - {file = "scipy-1.11.4.tar.gz", hash = "sha256:90a2b78e7f5733b9de748f589f09225013685f9b218275257f8a8168ededaeaa"}, + {file = "scipy-1.12.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:78e4402e140879387187f7f25d91cc592b3501a2e51dfb320f48dfb73565f10b"}, + {file = "scipy-1.12.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:f5f00ebaf8de24d14b8449981a2842d404152774c1a1d880c901bf454cb8e2a1"}, + {file = "scipy-1.12.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e53958531a7c695ff66c2e7bb7b79560ffdc562e2051644c5576c39ff8efb563"}, + {file = "scipy-1.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e32847e08da8d895ce09d108a494d9eb78974cf6de23063f93306a3e419960c"}, + {file = "scipy-1.12.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4c1020cad92772bf44b8e4cdabc1df5d87376cb219742549ef69fc9fd86282dd"}, + {file = "scipy-1.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:75ea2a144096b5e39402e2ff53a36fecfd3b960d786b7efd3c180e29c39e53f2"}, + {file = "scipy-1.12.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:408c68423f9de16cb9e602528be4ce0d6312b05001f3de61fe9ec8b1263cad08"}, + {file = "scipy-1.12.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:5adfad5dbf0163397beb4aca679187d24aec085343755fcdbdeb32b3679f254c"}, + {file = "scipy-1.12.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3003652496f6e7c387b1cf63f4bb720951cfa18907e998ea551e6de51a04467"}, + {file = "scipy-1.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b8066bce124ee5531d12a74b617d9ac0ea59245246410e19bca549656d9a40a"}, + {file = "scipy-1.12.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8bee4993817e204d761dba10dbab0774ba5a8612e57e81319ea04d84945375ba"}, + {file = "scipy-1.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:a24024d45ce9a675c1fb8494e8e5244efea1c7a09c60beb1eeb80373d0fecc70"}, + {file = "scipy-1.12.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e7e76cc48638228212c747ada851ef355c2bb5e7f939e10952bc504c11f4e372"}, + {file = "scipy-1.12.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:f7ce148dffcd64ade37b2df9315541f9adad6efcaa86866ee7dd5db0c8f041c3"}, + {file = "scipy-1.12.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c39f92041f490422924dfdb782527a4abddf4707616e07b021de33467f917bc"}, + {file = "scipy-1.12.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a7ebda398f86e56178c2fa94cad15bf457a218a54a35c2a7b4490b9f9cb2676c"}, + {file = "scipy-1.12.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:95e5c750d55cf518c398a8240571b0e0782c2d5a703250872f36eaf737751338"}, + {file = "scipy-1.12.0-cp312-cp312-win_amd64.whl", hash = "sha256:e646d8571804a304e1da01040d21577685ce8e2db08ac58e543eaca063453e1c"}, + {file = "scipy-1.12.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:913d6e7956c3a671de3b05ccb66b11bc293f56bfdef040583a7221d9e22a2e35"}, + {file = "scipy-1.12.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:bba1b0c7256ad75401c73e4b3cf09d1f176e9bd4248f0d3112170fb2ec4db067"}, + {file = "scipy-1.12.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:730badef9b827b368f351eacae2e82da414e13cf8bd5051b4bdfd720271a5371"}, + {file = "scipy-1.12.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6546dc2c11a9df6926afcbdd8a3edec28566e4e785b915e849348c6dd9f3f490"}, + {file = "scipy-1.12.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:196ebad3a4882081f62a5bf4aeb7326aa34b110e533aab23e4374fcccb0890dc"}, + {file = "scipy-1.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:b360f1b6b2f742781299514e99ff560d1fe9bd1bff2712894b52abe528d1fd1e"}, + {file = "scipy-1.12.0.tar.gz", hash = "sha256:4bf5abab8a36d20193c698b0f1fc282c1d083c94723902c447e5d2f1780936a3"}, ] [package.dependencies] -numpy = ">=1.21.6,<1.28.0" +numpy = ">=1.22.4,<1.29.0" [package.extras] dev = ["click", "cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy", "pycodestyle", "pydevtool", "rich-click", "ruff", "types-psutil", "typing_extensions"] doc = ["jupytext", "matplotlib (>2)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-design (>=0.2.0)"] -test = ["asv", "gmpy2", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] +test = ["asv", "gmpy2", "hypothesis", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] [[package]] name = "scs" @@ -4295,18 +4275,18 @@ scipy = "*" [[package]] name = "setuptools" -version = "69.0.2" +version = "69.1.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "setuptools-69.0.2-py3-none-any.whl", hash = "sha256:1e8fdff6797d3865f37397be788a4e3cba233608e9b509382a2777d25ebde7f2"}, - {file = "setuptools-69.0.2.tar.gz", hash = "sha256:735896e78a4742605974de002ac60562d286fa8051a7e2299445e8e8fbb01aa6"}, + {file = "setuptools-69.1.0-py3-none-any.whl", hash = "sha256:c054629b81b946d63a9c6e732bc8b2513a7c3ea645f11d0139a2191d735c60c6"}, + {file = "setuptools-69.1.0.tar.gz", hash = "sha256:850894c4195f09c4ed30dba56213bf7c3f21d86ed6bdaafb5df5972593bfc401"}, ] [package.extras] docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] -testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff (>=0.2.1)", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.1)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] [[package]] @@ -5005,24 +4985,24 @@ files = [ [[package]] name = "tzdata" -version = "2023.3" +version = "2024.1" description = "Provider of IANA time zone data" optional = false python-versions = ">=2" files = [ - {file = "tzdata-2023.3-py2.py3-none-any.whl", hash = "sha256:7e65763eef3120314099b6939b5546db7adce1e7d6f2e179e3df563c70511eda"}, - {file = "tzdata-2023.3.tar.gz", hash = "sha256:11ef1e08e54acb0d4f95bdb1be05da659673de4acbd21bf9c69e94cc5e907a3a"}, + {file = "tzdata-2024.1-py2.py3-none-any.whl", hash = "sha256:9068bc196136463f5245e51efda838afa15aaeca9903f49050dfa2679db4d252"}, + {file = "tzdata-2024.1.tar.gz", hash = "sha256:2674120f8d891909751c38abcdfd386ac0a5a1127954fbc332af6b5ceae07efd"}, ] [[package]] name = "urllib3" -version = "2.2.0" +version = "2.2.1" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.8" files = [ - {file = "urllib3-2.2.0-py3-none-any.whl", hash = "sha256:ce3711610ddce217e6d113a2732fafad960a03fd0318c91faa79481e35c11224"}, - {file = "urllib3-2.2.0.tar.gz", hash = "sha256:051d961ad0c62a94e50ecf1af379c3aba230c66c710493493560c0c223c49f20"}, + {file = "urllib3-2.2.1-py3-none-any.whl", hash = "sha256:450b20ec296a467077128bff42b73080516e71b56ff59a60a02bef2232c4fa9d"}, + {file = "urllib3-2.2.1.tar.gz", hash = "sha256:d0570876c61ab9e520d776c38acbbb5b05a776d3f9ff98a5c8fd5162a444cf19"}, ] [package.extras] From 0f2804db41731b2b216777e77eb12c5cba9cd6f5 Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Fri, 23 Feb 2024 13:59:36 +0400 Subject: [PATCH 037/127] change self.np to torch and eliminate redundant functions --- src/qibo/backends/numpy.py | 10 +- src/qibo/backends/pytorch.py | 314 +++++++++++------------------------ 2 files changed, 100 insertions(+), 224 deletions(-) diff --git a/src/qibo/backends/numpy.py b/src/qibo/backends/numpy.py index 6f7c6215eb..fec993bdb4 100644 --- a/src/qibo/backends/numpy.py +++ b/src/qibo/backends/numpy.py @@ -369,6 +369,8 @@ def depolarizing_error_density_matrix(self, gate, state, nqubits): return state def execute_circuit(self, circuit, initial_state=None, nshots=1000): + if initial_state is not None: + initial_state = self.cast(initial_state) if isinstance(initial_state, type(circuit)): if not initial_state.density_matrix == circuit.density_matrix: raise_error( @@ -528,7 +530,7 @@ def execute_circuit_repeated(self, circuit, nshots, initial_state=None): if circuit.density_matrix: # this implies also it has_collapse assert circuit.has_collapse - final_state = np.mean(self.to_numpy(final_states), 0) + final_state = self.np.mean(self.to_numpy(final_states), 0) if circuit.measurements: qubits = [q for m in circuit.measurements for q in m.target_qubits] final_result = CircuitResult( @@ -655,13 +657,15 @@ def sample_frequencies(self, probabilities, nshots): from qibo.config import SHOT_BATCH_SIZE nprobs = probabilities / self.np.sum(probabilities) - frequencies = self.np.zeros(len(nprobs), dtype="int64") + frequencies = self.np.zeros(len(nprobs), dtype=self.np.int64) for _ in range(nshots // SHOT_BATCH_SIZE): frequencies = self.update_frequencies(frequencies, nprobs, SHOT_BATCH_SIZE) frequencies = self.update_frequencies( frequencies, nprobs, nshots % SHOT_BATCH_SIZE ) - return collections.Counter({i: f for i, f in enumerate(frequencies) if f > 0}) + return collections.Counter( + {i: f.item() for i, f in enumerate(frequencies) if f > 0} + ) def apply_bitflips(self, noiseless_samples, bitflip_probabilities): fprobs = self.np.array(bitflip_probabilities, dtype="float64") diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index 97967e3f23..0f3f781046 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -8,7 +8,6 @@ from qibo.backends import einsum_utils from qibo.backends.npmatrices import NumpyMatrices from qibo.backends.numpy import NumpyBackend -from qibo.config import raise_error from qibo.result import CircuitResult, MeasurementOutcomes, QuantumState torch_dtype_dict = { @@ -25,7 +24,6 @@ class TorchMatrices(NumpyMatrices): - # Redefine parametrized gate matrices for backpropagation to work def __init__(self, dtype): super().__init__(dtype) @@ -53,9 +51,9 @@ def __init__(self): self.matrices = TorchMatrices(self.dtype) self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") self.nthreads = 0 - self.torch = torch - self.torch_dtype = torch_dtype_dict[self.dtype] - self.tensor_types = (self.torch.Tensor, np.ndarray) + self.np = torch + self.dtype = torch_dtype_dict[self.dtype] + self.tensor_types = (self.np.Tensor, np.ndarray) def set_device(self, device): # pragma: no cover self.device = device @@ -79,22 +77,20 @@ def cast( Defaults to ``False``. """ if dtype is None: - dtype = self.torch_dtype - elif isinstance(dtype, self.torch.dtype): + dtype = self.dtype + elif isinstance(dtype, self.np.dtype): dtype = dtype elif isinstance(dtype, type): dtype = torch_dtype_dict[dtype.__name__] else: dtype = torch_dtype_dict[str(dtype)] - if isinstance(x, self.torch.Tensor): + if isinstance(x, self.np.Tensor): x = x.to(dtype) - elif isinstance(x, list) and all( - isinstance(row, self.torch.Tensor) for row in x - ): - x = self.torch.stack(x) + elif isinstance(x, list) and all(isinstance(row, self.np.Tensor) for row in x): + x = self.np.stack(x) else: - x = self.torch.tensor(x, dtype=dtype) + x = self.np.tensor(x, dtype=dtype) if copy: return x.clone() @@ -103,168 +99,58 @@ def cast( def apply_gate(self, gate, state, nqubits): state = self.cast(state) - state = self.torch.reshape(state, nqubits * (2,)) + state = self.np.reshape(state, nqubits * (2,)) matrix = gate.matrix(self) if gate.is_controlled_by: - matrix = self.torch.reshape(matrix, 2 * len(gate.target_qubits) * (2,)) + matrix = self.np.reshape(matrix, 2 * len(gate.target_qubits) * (2,)) ncontrol = len(gate.control_qubits) nactive = nqubits - ncontrol order, targets = einsum_utils.control_order(gate, nqubits) state = state.permute(*order) - state = self.torch.reshape(state, (2**ncontrol,) + nactive * (2,)) + state = self.np.reshape(state, (2**ncontrol,) + nactive * (2,)) opstring = einsum_utils.apply_gate_string(targets, nactive) - updates = self.torch.einsum(opstring, state[-1], matrix) - state = self.torch.cat([state[:-1], updates[None]], axis=0) - state = self.torch.reshape(state, nqubits * (2,)) + updates = self.np.einsum(opstring, state[-1], matrix) + state = self.np.cat([state[:-1], updates[None]], axis=0) + state = self.np.reshape(state, nqubits * (2,)) state = state.permute(*einsum_utils.reverse_order(order)) else: - matrix = self.torch.reshape(matrix, 2 * len(gate.qubits) * (2,)) + matrix = self.np.reshape(matrix, 2 * len(gate.qubits) * (2,)) opstring = einsum_utils.apply_gate_string(gate.qubits, nqubits) - state = self.torch.einsum(opstring, state, matrix) - return self.torch.reshape(state, (2**nqubits,)) + state = self.np.einsum(opstring, state, matrix) + return self.np.reshape(state, (2**nqubits,)) def issparse(self, x): - if isinstance(x, self.torch.Tensor): + if isinstance(x, self.np.Tensor): return x.is_sparse return super().issparse(x) def to_numpy(self, x): - if isinstance(x, self.torch.Tensor): + if isinstance(x, self.np.Tensor): return x.numpy(force=True) return x def compile(self, func): - return self.torch.jit.script(func) - - def zero_state(self, nqubits): - state = self.torch.zeros(2**nqubits, dtype=self.torch_dtype) - state[0] = 1 - return state - - def zero_density_matrix(self, nqubits): - state = self.torch.zeros(2 * (2**nqubits,), dtype=self.torch_dtype) - state[0, 0] = 1 - return state + return self.np.jit.script(func) def matrix(self, gate): npmatrix = super().matrix(gate) - return self.torch.tensor(npmatrix, dtype=self.torch_dtype) + return self.np.tensor(npmatrix, dtype=self.dtype) def matrix_parametrized(self, gate): npmatrix = super().matrix_parametrized(gate) - return self.torch.tensor(npmatrix, dtype=self.torch_dtype) + return self.np.tensor(npmatrix, dtype=self.dtype) def matrix_fused(self, gate): npmatrix = super().matrix_fused(gate) - return self.torch.tensor(npmatrix, dtype=self.torch_dtype) - - def execute_circuit(self, circuit, initial_state=None, nshots=1000): - if initial_state is not None: - initial_state = self.cast(initial_state) - return super().execute_circuit(circuit, initial_state, nshots) - - def execute_circuit_repeated(self, circuit, nshots, initial_state=None): - """ - Execute the circuit `nshots` times to retrieve probabilities, frequencies - and samples. Note that this method is called only if a unitary channel - is present in the circuit (i.e. noisy simulation) and `density_matrix=False`, or - if some collapsing measuremnt is performed. - """ - - if ( - circuit.has_collapse - and not circuit.measurements - and not circuit.density_matrix - ): - raise RuntimeError( - "The circuit contains only collapsing measurements (`collapse=True`) but `density_matrix=False`. Please set `density_matrix=True` to retrieve the final state after execution." - ) - - results, final_states = [], [] - nqubits = circuit.nqubits - - if not circuit.density_matrix: - samples = [] - target_qubits = [ - measurement.target_qubits for measurement in circuit.measurements - ] - target_qubits = sum(target_qubits, tuple()) - - for _ in range(nshots): - if circuit.density_matrix: - if initial_state is None: - state = self.zero_density_matrix(nqubits) - else: - state = self.cast(initial_state, copy=True) - - for gate in circuit.queue: - if gate.symbolic_parameters: - gate.substitute_symbols() - state = gate.apply_density_matrix(self, state, nqubits) - else: - if circuit.accelerators: # pragma: no cover - # pylint: disable=E1111 - state = self.execute_distributed_circuit(circuit, initial_state) - else: - if initial_state is None: - state = self.zero_state(nqubits) - else: - state = self.cast(initial_state, copy=True) - - for gate in circuit.queue: - if gate.symbolic_parameters: - gate.substitute_symbols() - state = gate.apply(self, state, nqubits) - - if circuit.density_matrix: - final_states.append(state) - if circuit.measurements: - result = CircuitResult( - state, circuit.measurements, backend=self, nshots=1 - ) - sample = result.samples()[0] - results.append(sample) - if not circuit.density_matrix: - samples.append("".join([str(s) for s in sample])) - for gate in circuit.measurements: - gate.result.reset() - - if circuit.density_matrix: # this implies also it has_collapse - assert circuit.has_collapse - final_state = self.torch.mean(self.torch.stack(final_states), 0) - if circuit.measurements: - qubits = [q for m in circuit.measurements for q in m.target_qubits] - final_result = CircuitResult( - final_state, - circuit.measurements, - backend=self, - samples=self.aggregate_shots(results), - nshots=nshots, - ) - else: - final_result = QuantumState(final_state, backend=self) - circuit._final_state = final_result - return final_result - else: - final_result = MeasurementOutcomes( - circuit.measurements, - backend=self, - samples=self.aggregate_shots(results), - nshots=nshots, - ) - final_result._repeated_execution_frequencies = self.calculate_frequencies( - samples - ) - circuit._final_state = final_result - return final_result + return self.np.tensor(npmatrix, dtype=self.dtype) def sample_shots(self, probabilities, nshots): - return self.torch.multinomial( + return self.np.multinomial( self.cast(probabilities, dtype="float"), nshots, replacement=True ) def samples_to_binary(self, samples, nqubits): - qrange = self.torch.arange(nqubits - 1, -1, -1, dtype=self.torch.int32) + qrange = self.np.arange(nqubits - 1, -1, -1, dtype=self.np.int32) samples = samples.int() samples = samples[:, None] >> qrange return samples % 2 @@ -284,8 +170,8 @@ def _order_probabilities(self, probs, qubits, nqubits): def calculate_probabilities(self, state, qubits, nqubits): rtype = state.real.dtype unmeasured_qubits = tuple(i for i in range(nqubits) if i not in qubits) - state = self.torch.reshape(self.torch.abs(state) ** 2, nqubits * (2,)) - probs = self.torch.sum(state.type(rtype), dim=unmeasured_qubits) + state = self.np.reshape(self.np.abs(state) ** 2, nqubits * (2,)) + probs = self.np.sum(state.type(rtype), dim=unmeasured_qubits) return self._order_probabilities(probs, qubits, nqubits).view(-1) def calculate_probabilities_density_matrix(self, state, qubits, nqubits): @@ -293,35 +179,21 @@ def calculate_probabilities_density_matrix(self, state, qubits, nqubits): order += tuple(i for i in range(nqubits) if i not in qubits) order = order + tuple(i + nqubits for i in order) shape = 2 * (2 ** len(qubits), 2 ** (nqubits - len(qubits))) - state = self.torch.reshape(state, 2 * nqubits * (2,)) - state = self.torch.reshape(state.permute(*order), shape) - probs = self.torch.abs(self.torch.einsum("abab->a", state)) - probs = self.torch.reshape(probs, len(qubits) * (2,)) + state = self.np.reshape(state, 2 * nqubits * (2,)) + state = self.np.reshape(state.permute(*order), shape) + probs = self.np.abs(self.np.einsum("abab->a", state)) + probs = self.np.reshape(probs, len(qubits) * (2,)) return self._order_probabilities(probs, qubits, nqubits).view(-1) - def sample_frequencies(self, probabilities, nshots): - from qibo.config import SHOT_BATCH_SIZE - - nprobs = probabilities / self.torch.sum(probabilities) - frequencies = self.torch.zeros(len(nprobs), dtype=self.torch.int64) - for _ in range(nshots // SHOT_BATCH_SIZE): - frequencies = self.update_frequencies(frequencies, nprobs, SHOT_BATCH_SIZE) - frequencies = self.update_frequencies( - frequencies, nprobs, nshots % SHOT_BATCH_SIZE - ) - return collections.Counter( - {i: f.item() for i, f in enumerate(frequencies) if f > 0} - ) - def calculate_frequencies(self, samples): - res, counts = self.torch.unique(samples, return_counts=True) + res, counts = self.np.unique(samples, return_counts=True) res, counts = res.tolist(), counts.tolist() return collections.Counter({k: v for k, v in zip(res, counts)}) def update_frequencies(self, frequencies, probabilities, nsamples): frequencies = self.cast(frequencies, dtype="int") samples = self.sample_shots(probabilities, nsamples) - unique_samples, counts = self.torch.unique(samples, return_counts=True) + unique_samples, counts = self.np.unique(samples, return_counts=True) frequencies.index_add_( 0, self.cast(unique_samples, dtype="int"), self.cast(counts, dtype="int") ) @@ -329,144 +201,144 @@ def update_frequencies(self, frequencies, probabilities, nsamples): def calculate_norm(self, state, order=2): state = self.cast(state) - return self.torch.norm(state, p=order) + return self.np.norm(state, p=order) def calculate_norm_density_matrix(self, state, order="nuc"): state = self.cast(state) if order == "nuc": - return np.trace(state) - return self.torch.norm(state, p=order) + return self.np.trace(state) + return self.np.norm(state, p=order) def calculate_eigenvalues(self, matrix, k=6): - return self.torch.linalg.eigvalsh(matrix) # pylint: disable=not-callable + return self.np.linalg.eigvalsh(matrix) # pylint: disable=not-callable def calculate_eigenvectors(self, matrix, k=6): - return self.torch.linalg.eigh(matrix) # pylint: disable=not-callable + return self.np.linalg.eigh(matrix) # pylint: disable=not-callable def calculate_matrix_exp(self, a, matrix, eigenvectors=None, eigenvalues=None): if eigenvectors is None or self.issparse(matrix): - return self.torch.linalg.matrix_exp( # pylint: disable=not-callable + return self.np.linalg.matrix_exp( # pylint: disable=not-callable -1j * a * matrix ) return super().calculate_matrix_exp(a, matrix, eigenvectors, eigenvalues) def calculate_expectation_state(self, hamiltonian, state, normalize): state = self.cast(state) - statec = self.torch.conj(state) + statec = self.np.conj(state) hstate = self.cast(hamiltonian @ state) - ev = self.torch.real(self.torch.sum(statec * hstate)) + ev = self.np.real(self.np.sum(statec * hstate)) if normalize: - ev = ev / self.torch.sum(self.torch.square(self.torch.abs(state))) + ev = ev / self.np.sum(self.np.square(self.np.abs(state))) return ev def calculate_hamiltonian_matrix_product(self, matrix1, matrix2): if self.issparse(matrix1) or self.issparse(matrix2): - return self.torch.sparse.mm(matrix1, matrix2) # pylint: disable=E1102 - return self.torch.matmul(matrix1, matrix2) + return self.np.sparse.mm(matrix1, matrix2) # pylint: disable=E1102 + return self.np.matmul(matrix1, matrix2) def calculate_hamiltonian_state_product(self, matrix, state): - return self.torch.matmul(matrix, state) + return self.np.matmul(matrix, state) def calculate_overlap(self, state1, state2): - return self.torch.abs( - self.torch.sum(self.torch.conj(self.cast(state1)) * self.cast(state2)) + return self.np.abs( + self.np.sum(self.np.conj(self.cast(state1)) * self.cast(state2)) ) def calculate_overlap_density_matrix(self, state1, state2): - return self.torch.trace( - self.torch.matmul(self.torch.conj(self.cast(state1)).T, self.cast(state2)) + return self.np.trace( + self.np.matmul(self.np.conj(self.cast(state1)).T, self.cast(state2)) ) def apply_gate_density_matrix(self, gate, state, nqubits): state = self.cast(state) - state = self.torch.reshape(state, 2 * nqubits * (2,)) + state = self.np.reshape(state, 2 * nqubits * (2,)) matrix = gate.matrix(self) if gate.is_controlled_by: - matrix = self.torch.reshape(matrix, 2 * len(gate.target_qubits) * (2,)) - matrixc = self.torch.conj(matrix) + matrix = self.np.reshape(matrix, 2 * len(gate.target_qubits) * (2,)) + matrixc = self.np.conj(matrix) ncontrol = len(gate.control_qubits) nactive = nqubits - ncontrol n = 2**ncontrol order, targets = einsum_utils.control_order_density_matrix(gate, nqubits) state = state.permute(*order) - state = self.torch.reshape(state, 2 * (n,) + 2 * nactive * (2,)) + state = self.np.reshape(state, 2 * (n,) + 2 * nactive * (2,)) leftc, rightc = einsum_utils.apply_gate_density_matrix_controlled_string( targets, nactive ) state01 = state[: n - 1, n - 1] - state01 = self.torch.einsum(rightc, state01, matrixc) + state01 = self.np.einsum(rightc, state01, matrixc) state10 = state[n - 1, : n - 1] - state10 = self.torch.einsum(leftc, state10, matrix) + state10 = self.np.einsum(leftc, state10, matrix) left, right = einsum_utils.apply_gate_density_matrix_string( targets, nactive ) state11 = state[n - 1, n - 1] - state11 = self.torch.einsum(right, state11, matrixc) - state11 = self.torch.einsum(left, state11, matrix) + state11 = self.np.einsum(right, state11, matrixc) + state11 = self.np.einsum(left, state11, matrix) state00 = state[range(n - 1)] state00 = state00[:, range(n - 1)] - state01 = self.torch.cat([state00, state01[:, None]], dim=1) - state10 = self.torch.cat([state10, state11[None]], dim=0) - state = self.torch.cat([state01, state10[None]], dim=0) - state = self.torch.reshape(state, 2 * nqubits * (2,)) + state01 = self.np.cat([state00, state01[:, None]], dim=1) + state10 = self.np.cat([state10, state11[None]], dim=0) + state = self.np.cat([state01, state10[None]], dim=0) + state = self.np.reshape(state, 2 * nqubits * (2,)) state = state.permute(*einsum_utils.reverse_order(order)) else: - matrix = self.torch.reshape(matrix, 2 * len(gate.qubits) * (2,)) - matrixc = self.torch.conj(matrix) + matrix = self.np.reshape(matrix, 2 * len(gate.qubits) * (2,)) + matrixc = self.np.conj(matrix) left, right = einsum_utils.apply_gate_density_matrix_string( gate.qubits, nqubits ) - state = self.torch.einsum(right, state, matrixc) - state = self.torch.einsum(left, state, matrix) - return self.torch.reshape(state, 2 * (2**nqubits,)) + state = self.np.einsum(right, state, matrixc) + state = self.np.einsum(left, state, matrix) + return self.np.reshape(state, 2 * (2**nqubits,)) def partial_trace(self, state, qubits, nqubits): state = self.cast(state) - state = self.torch.reshape(state, nqubits * (2,)) + state = self.np.reshape(state, nqubits * (2,)) axes = 2 * [list(qubits)] - rho = self.torch.tensordot(state, self.torch.conj(state), dims=axes) + rho = self.np.tensordot(state, self.np.conj(state), dims=axes) shape = 2 * (2 ** (nqubits - len(qubits)),) - return self.torch.reshape(rho, shape) + return self.np.reshape(rho, shape) def partial_trace_density_matrix(self, state, qubits, nqubits): state = self.cast(state) - state = self.torch.reshape(state, 2 * nqubits * (2,)) + state = self.np.reshape(state, 2 * nqubits * (2,)) order = list(sorted(qubits)) order += [i for i in range(nqubits) if i not in qubits] order += [i + nqubits for i in order] state = state.permute(*order) shape = 2 * (2 ** len(qubits), 2 ** (nqubits - len(qubits))) - state = self.torch.reshape(state, shape) - return self.torch.einsum("abac->bc", state) + state = self.np.reshape(state, shape) + return self.np.einsum("abac->bc", state) def _append_zeros(self, state, qubits, results): """Helper method for collapse.""" for q, r in zip(qubits, results): - state = self.torch.unsqueeze(state, dim=q) + state = self.np.unsqueeze(state, dim=q) if r: - state = self.torch.cat([self.torch.zeros_like(state), state], dim=q) + state = self.np.cat([self.np.zeros_like(state), state], dim=q) else: - state = self.torch.cat([state, self.torch.zeros_like(state)], dim=q) + state = self.np.cat([state, self.np.zeros_like(state)], dim=q) return state def collapse_state(self, state, qubits, shot, nqubits, normalize=True): state = self.cast(state) shape = state.shape binshot = self.samples_to_binary(shot, len(qubits))[0] - state = self.torch.reshape(state, nqubits * (2,)) + state = self.np.reshape(state, nqubits * (2,)) order = list(qubits) + [q for q in range(nqubits) if q not in qubits] state = state.permute(*order) subshape = (2 ** len(qubits),) + (nqubits - len(qubits)) * (2,) - state = self.torch.reshape(state, subshape)[int(shot)] + state = self.np.reshape(state, subshape)[int(shot)] if normalize: - norm = self.torch.sqrt(self.torch.sum(self.torch.abs(state) ** 2)) + norm = self.np.sqrt(self.np.sum(self.np.abs(state) ** 2)) state = state / norm state = self._append_zeros(state, qubits, binshot) - return self.torch.reshape(state, shape) + return self.np.reshape(state, shape) def collapse_density_matrix(self, state, qubits, shot, nqubits, normalize=True): state = self.cast(state) @@ -475,17 +347,17 @@ def collapse_density_matrix(self, state, qubits, shot, nqubits, normalize=True): order = list(qubits) + [q + nqubits for q in qubits] order.extend(q for q in range(nqubits) if q not in qubits) order.extend(q + nqubits for q in range(nqubits) if q not in qubits) - state = self.torch.reshape(state, 2 * nqubits * (2,)) + state = self.np.reshape(state, 2 * nqubits * (2,)) state = state.permute(*order) subshape = 2 * (2 ** len(qubits),) + 2 * (nqubits - len(qubits)) * (2,) - state = self.torch.reshape(state, subshape)[int(shot), int(shot)] + state = self.np.reshape(state, subshape)[int(shot), int(shot)] n = 2 ** (len(state.shape) // 2) if normalize: - norm = self.torch.trace(self.torch.reshape(state, (n, n))) + norm = self.np.trace(self.np.reshape(state, (n, n))) state = state / norm qubits = qubits + [q + nqubits for q in qubits] state = self._append_zeros(state, qubits, 2 * binshot) - return self.torch.reshape(state, shape) + return self.np.reshape(state, shape) def reset_error_density_matrix(self, gate, state, nqubits): from qibo.gates import X @@ -495,13 +367,13 @@ def reset_error_density_matrix(self, gate, state, nqubits): q = gate.target_qubits[0] p_0, p_1 = gate.init_kwargs["p_0"], gate.init_kwargs["p_1"] trace = self.partial_trace_density_matrix(state, (q,), nqubits) - trace = self.torch.reshape(trace, 2 * (nqubits - 1) * (2,)) + trace = self.np.reshape(trace, 2 * (nqubits - 1) * (2,)) zero = self.zero_density_matrix(1) - zero = self.torch.tensordot(trace, zero, dims=0) + zero = self.np.tensordot(trace, zero, dims=0) order = list(range(2 * nqubits - 2)) order.insert(q, 2 * nqubits - 2) order.insert(q + nqubits, 2 * nqubits - 1) - zero = self.torch.reshape(zero.permute(*order), shape) + zero = self.np.reshape(zero.permute(*order), shape) state = (1 - p_0 - p_1) * state + p_0 * zero return state + p_1 * self.apply_gate_density_matrix(X(q), zero, nqubits) @@ -509,10 +381,10 @@ def thermal_error_density_matrix(self, gate, state, nqubits): state = self.cast(state) shape = state.shape state = self.apply_gate(gate, state.view(-1), 2 * nqubits) - return self.torch.reshape(state, shape) + return self.np.reshape(state, shape) def identity_density_matrix(self, nqubits, normalize: bool = True): - state = self.torch.eye(2**nqubits, dtype=self.torch.complex128) + state = self.np.eye(2**nqubits, dtype=self.np.complex128) if normalize is True: state /= 2**nqubits return state @@ -523,10 +395,10 @@ def depolarizing_error_density_matrix(self, gate, state, nqubits): q = gate.target_qubits lam = gate.init_kwargs["lam"] trace = self.partial_trace_density_matrix(state, q, nqubits) - trace = self.torch.reshape(trace, 2 * (nqubits - len(q)) * (2,)) + trace = self.np.reshape(trace, 2 * (nqubits - len(q)) * (2,)) identity = self.identity_density_matrix(len(q)) - identity = self.torch.reshape(identity, 2 * len(q) * (2,)) - identity = self.torch.tensordot(trace, identity, dims=0) + identity = self.np.reshape(identity, 2 * len(q) * (2,)) + identity = self.np.tensordot(trace, identity, dims=0) qubits = list(range(nqubits)) for j in q: qubits.pop(qubits.index(j)) @@ -544,7 +416,7 @@ def depolarizing_error_density_matrix(self, gate, state, nqubits): qj = [qj[qubits.index(i)] for i in range(len(qubits))] order += qj identity = identity.permute(*order) - identity = self.torch.reshape(identity, shape) + identity = self.np.reshape(identity, shape) state = (1 - lam) * state + lam * identity return state From 6e100266388c3a98447acf571e44554b26d10212 Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Fri, 23 Feb 2024 15:50:17 +0400 Subject: [PATCH 038/127] solved tests and removed duplicate functions --- src/qibo/backends/numpy.py | 17 ++++++++--------- src/qibo/backends/pytorch.py | 22 ++++++++++++---------- src/qibo/hamiltonians/hamiltonians.py | 1 + src/qibo/solvers.py | 2 +- tests/test_backends.py | 2 +- tests/test_gates_gates.py | 16 ++++++++-------- 6 files changed, 31 insertions(+), 29 deletions(-) diff --git a/src/qibo/backends/numpy.py b/src/qibo/backends/numpy.py index fec993bdb4..81711c0bbe 100644 --- a/src/qibo/backends/numpy.py +++ b/src/qibo/backends/numpy.py @@ -93,7 +93,7 @@ def identity_density_matrix(self, nqubits, normalize: bool = True): def plus_state(self, nqubits): state = self.np.ones(2**nqubits, dtype=self.dtype) - state /= self.np.sqrt(2**nqubits) + state /= self.np.sqrt(self.cast(2**nqubits)) return state def plus_density_matrix(self, nqubits): @@ -114,7 +114,7 @@ def matrix_parametrized(self, gate): def matrix_fused(self, fgate): rank = len(fgate.target_qubits) - matrix = np.eye(2**rank, dtype=self.dtype) + matrix = np.eye(2**rank, dtype=np.complex128) for gate in fgate.gates: # transfer gate matrix to numpy as it is more efficient for # small tensor calculations @@ -122,7 +122,7 @@ def matrix_fused(self, fgate): gmatrix = self.to_numpy(gate.matrix(self)) # Kronecker product with identity is needed to make the # original matrix have shape (2**rank x 2**rank) - eye = np.eye(2 ** (rank - len(gate.qubits)), dtype=self.dtype) + eye = np.eye(2 ** (rank - len(gate.qubits)), dtype=np.complex128) gmatrix = np.kron(gmatrix, eye) # Transpose the new matrix indices so that it targets the # target qubits of the original gate @@ -137,7 +137,7 @@ def matrix_fused(self, fgate): gmatrix = np.reshape(gmatrix, original_shape) # fuse the individual gate matrix to the total ``FusedGate`` matrix matrix = gmatrix @ matrix - return matrix + return self.cast(matrix) def control_matrix(self, gate): if len(gate.control_qubits) > 1: @@ -530,7 +530,7 @@ def execute_circuit_repeated(self, circuit, nshots, initial_state=None): if circuit.density_matrix: # this implies also it has_collapse assert circuit.has_collapse - final_state = self.np.mean(self.to_numpy(final_states), 0) + final_state = np.mean(self.to_numpy(final_states), 0) if circuit.measurements: qubits = [q for m in circuit.measurements for q in m.target_qubits] final_result = CircuitResult( @@ -741,10 +741,9 @@ def calculate_matrix_exp(self, a, matrix, eigenvectors=None, eigenvalues=None): else: from scipy.linalg import expm return expm(-1j * a * matrix) - else: - expd = self.np.diag(self.np.exp(-1j * a * eigenvalues)) - ud = self.np.transpose(self.np.conj(eigenvectors)) - return self.np.matmul(eigenvectors, self.np.matmul(expd, ud)) + expd = self.np.diag(self.np.exp(-1j * a * eigenvalues)) + ud = self.np.transpose(self.np.conj(eigenvectors)) + return self.np.matmul(eigenvectors, self.np.matmul(expd, ud)) def calculate_expectation_state(self, hamiltonian, state, normalize): statec = self.np.conj(state) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index 0f3f781046..e34f7d290c 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -1,4 +1,3 @@ -import collections from typing import Union import numpy as np @@ -58,6 +57,9 @@ def __init__(self): def set_device(self, device): # pragma: no cover self.device = device + def set_seed(self, seed): + self.np.manual_seed(seed) + def cast( self, x: Union[torch.Tensor, list[torch.Tensor], np.ndarray, list[np.ndarray]], @@ -125,6 +127,8 @@ def issparse(self, x): return super().issparse(x) def to_numpy(self, x): + if isinstance(x, list): + return np.asarray([self.to_numpy(i) for i in x]) if isinstance(x, self.np.Tensor): return x.numpy(force=True) return x @@ -140,10 +144,6 @@ def matrix_parametrized(self, gate): npmatrix = super().matrix_parametrized(gate) return self.np.tensor(npmatrix, dtype=self.dtype) - def matrix_fused(self, gate): - npmatrix = super().matrix_fused(gate) - return self.np.tensor(npmatrix, dtype=self.dtype) - def sample_shots(self, probabilities, nshots): return self.np.multinomial( self.cast(probabilities, dtype="float"), nshots, replacement=True @@ -185,10 +185,10 @@ def calculate_probabilities_density_matrix(self, state, qubits, nqubits): probs = self.np.reshape(probs, len(qubits) * (2,)) return self._order_probabilities(probs, qubits, nqubits).view(-1) - def calculate_frequencies(self, samples): - res, counts = self.np.unique(samples, return_counts=True) - res, counts = res.tolist(), counts.tolist() - return collections.Counter({k: v for k, v in zip(res, counts)}) + # def calculate_frequencies(self, samples): + # res, counts = self.np.unique(samples, return_counts=True) + # res, counts = res.tolist(), counts.tolist() + # return collections.Counter({k: v for k, v in zip(res, counts)}) def update_frequencies(self, frequencies, probabilities, nsamples): frequencies = self.cast(frequencies, dtype="int") @@ -220,7 +220,9 @@ def calculate_matrix_exp(self, a, matrix, eigenvectors=None, eigenvalues=None): return self.np.linalg.matrix_exp( # pylint: disable=not-callable -1j * a * matrix ) - return super().calculate_matrix_exp(a, matrix, eigenvectors, eigenvalues) + expd = self.np.diag(self.np.exp(-1j * a * eigenvalues)) + ud = self.np.transpose(self.np.conj(eigenvectors), 0, 1) + return self.np.matmul(eigenvectors, self.np.matmul(expd, ud)) def calculate_expectation_state(self, hamiltonian, state, normalize): state = self.cast(state) diff --git a/src/qibo/hamiltonians/hamiltonians.py b/src/qibo/hamiltonians/hamiltonians.py index 71f71f49ed..179895f751 100644 --- a/src/qibo/hamiltonians/hamiltonians.py +++ b/src/qibo/hamiltonians/hamiltonians.py @@ -243,6 +243,7 @@ def __mul__(self, o): ) new_matrix = self.matrix * o r = self.__class__(self.nqubits, new_matrix, backend=self.backend) + o = self.backend.cast(o) if self._eigenvalues is not None: if self.backend.np.real(o) >= 0: # TODO: check for side effects K.qnp r._eigenvalues = o * self._eigenvalues diff --git a/src/qibo/solvers.py b/src/qibo/solvers.py index 8a24ff3c3d..69ab0d3622 100644 --- a/src/qibo/solvers.py +++ b/src/qibo/solvers.py @@ -74,7 +74,7 @@ class Exponential(BaseSolver): def __call__(self, state): propagator = self.current_hamiltonian.exp(self.dt) self.t += self.dt - return (propagator @ state[:, self.backend.np.newaxis])[:, 0] + return (propagator @ state[:, None])[:, 0] class RungeKutta4(BaseSolver): diff --git a/tests/test_backends.py b/tests/test_backends.py index a4714bd378..61ed409f49 100644 --- a/tests/test_backends.py +++ b/tests/test_backends.py @@ -108,7 +108,7 @@ def test_control_matrix_unitary(backend): u = np.random.random((2, 2)) gate = gates.Unitary(u, 0).controlled_by(1) matrix = backend.control_matrix(gate) - target_matrix = np.eye(4, dtype=backend.dtype) + target_matrix = np.eye(4, dtype=np.complex128) target_matrix[2:, 2:] = u backend.assert_allclose(matrix, target_matrix) diff --git a/tests/test_gates_gates.py b/tests/test_gates_gates.py index 09560f4d20..f2d5f1db67 100644 --- a/tests/test_gates_gates.py +++ b/tests/test_gates_gates.py @@ -753,7 +753,7 @@ def test_fswap(backend): [0, 1, 0, 0], [0, 0, 0, -1], ], - dtype=backend.dtype, + dtype=np.complex128, ) matrix = backend.cast(matrix, dtype=matrix.dtype) target_state = matrix @ initial_state @@ -815,7 +815,7 @@ def test_sycamore(backend): [0, -1j, 0, 0], [0, 0, 0, np.exp(-1j * np.pi / 6)], ], - dtype=backend.dtype, + dtype=np.complex128, ) matrix = backend.cast(matrix, dtype=matrix.dtype) target_state = matrix @ initial_state @@ -955,7 +955,7 @@ def test_rzx(backend): [0, 0, cos, 1j * sin], [0, 0, 1j * sin, cos], ], - dtype=backend.dtype, + dtype=np.complex128, ) matrix = backend.cast(matrix, dtype=matrix.dtype) target_state = matrix @ initial_state @@ -996,7 +996,7 @@ def test_rxxyy(backend): [0, -1j * sin, cos, 0], [0, 0, 0, 1], ], - dtype=backend.dtype, + dtype=np.complex128, ) matrix = backend.cast(matrix, dtype=matrix.dtype) target_state = matrix @ initial_state @@ -1081,7 +1081,7 @@ def test_givens(backend): [0, np.sin(theta), np.cos(theta), 0], [0, 0, 0, 1], ], - dtype=backend.dtype, + dtype=np.complex128, ) matrix = backend.cast(matrix, dtype=matrix.dtype) @@ -1121,7 +1121,7 @@ def test_rbs(backend): [0, -np.sin(theta), np.cos(theta), 0], [0, 0, 0, 1], ], - dtype=backend.dtype, + dtype=np.complex128, ) matrix = backend.cast(matrix, dtype=matrix.dtype) @@ -1160,7 +1160,7 @@ def test_ecr(backend): [1, -1j, 0, 0], [-1j, 1, 0, 0], ], - dtype=backend.dtype, + dtype=np.complex128, ) / np.sqrt(2) matrix = backend.cast(matrix, dtype=matrix.dtype) @@ -1224,7 +1224,7 @@ def test_deutsch(backend): [0, 0, 0, 0, 0, 0, 1j * cos, sin], [0, 0, 0, 0, 0, 0, sin, 1j * cos], ], - dtype=backend.dtype, + dtype=np.complex128, ) matrix = backend.cast(matrix, dtype=matrix.dtype) From a52b25cdf905fecc6497574bd6935977937fdcf2 Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Fri, 23 Feb 2024 16:28:49 +0400 Subject: [PATCH 039/127] solved errors --- src/qibo/backends/pytorch.py | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index e34f7d290c..530bd36383 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -1,3 +1,4 @@ +from collections import Counter from typing import Union import numpy as np @@ -134,7 +135,8 @@ def to_numpy(self, x): return x def compile(self, func): - return self.np.jit.script(func) + return func + # return self.np.jit.script(func) def matrix(self, gate): npmatrix = super().matrix(gate) @@ -149,7 +151,14 @@ def sample_shots(self, probabilities, nshots): self.cast(probabilities, dtype="float"), nshots, replacement=True ) + def samples_to_decimal(self, samples, nqubits): + samples = self.cast(samples, dtype="int32") + qrange = self.np.arange(nqubits - 1, -1, -1, dtype=torch.int32) + qrange = (2**qrange).unsqueeze(1) + return self.np.matmul(samples, qrange).squeeze(1) + def samples_to_binary(self, samples, nqubits): + samples = self.cast(samples, dtype="int32") qrange = self.np.arange(nqubits - 1, -1, -1, dtype=self.np.int32) samples = samples.int() samples = samples[:, None] >> qrange @@ -172,7 +181,7 @@ def calculate_probabilities(self, state, qubits, nqubits): unmeasured_qubits = tuple(i for i in range(nqubits) if i not in qubits) state = self.np.reshape(self.np.abs(state) ** 2, nqubits * (2,)) probs = self.np.sum(state.type(rtype), dim=unmeasured_qubits) - return self._order_probabilities(probs, qubits, nqubits).view(-1) + return self._order_probabilities(probs, qubits, nqubits).reshape(-1) def calculate_probabilities_density_matrix(self, state, qubits, nqubits): order = tuple(sorted(qubits)) @@ -185,10 +194,10 @@ def calculate_probabilities_density_matrix(self, state, qubits, nqubits): probs = self.np.reshape(probs, len(qubits) * (2,)) return self._order_probabilities(probs, qubits, nqubits).view(-1) - # def calculate_frequencies(self, samples): - # res, counts = self.np.unique(samples, return_counts=True) - # res, counts = res.tolist(), counts.tolist() - # return collections.Counter({k: v for k, v in zip(res, counts)}) + def calculate_frequencies(self, samples): + res, counts = self.np.unique(samples, return_counts=True) + res, counts = res.tolist(), counts.tolist() + return Counter({k: v for k, v in zip(res, counts)}) def update_frequencies(self, frequencies, probabilities, nsamples): frequencies = self.cast(frequencies, dtype="int") From fad9c6281d921a40e257eecc8a22455535c22261 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Sat, 24 Feb 2024 09:27:35 +0400 Subject: [PATCH 040/127] disable pylint --- src/qibo/backends/pytorch.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index 530bd36383..c9b843f9dd 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -180,7 +180,7 @@ def calculate_probabilities(self, state, qubits, nqubits): rtype = state.real.dtype unmeasured_qubits = tuple(i for i in range(nqubits) if i not in qubits) state = self.np.reshape(self.np.abs(state) ** 2, nqubits * (2,)) - probs = self.np.sum(state.type(rtype), dim=unmeasured_qubits) + probs = self.np.sum(state.type(rtype), dim=unmeasured_qubits) # pylint: diable=E1123 return self._order_probabilities(probs, qubits, nqubits).reshape(-1) def calculate_probabilities_density_matrix(self, state, qubits, nqubits): @@ -230,7 +230,7 @@ def calculate_matrix_exp(self, a, matrix, eigenvectors=None, eigenvalues=None): -1j * a * matrix ) expd = self.np.diag(self.np.exp(-1j * a * eigenvalues)) - ud = self.np.transpose(self.np.conj(eigenvectors), 0, 1) + ud = self.np.transpose(self.np.conj(eigenvectors), 0, 1) # pylint: diable=E1121 return self.np.matmul(eigenvectors, self.np.matmul(expd, ud)) def calculate_expectation_state(self, hamiltonian, state, normalize): From 46d322f06c74b6002e60823344fc49fdcba99bb4 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sat, 24 Feb 2024 05:28:02 +0000 Subject: [PATCH 041/127] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/qibo/backends/pytorch.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index c9b843f9dd..ba950ccae3 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -180,7 +180,9 @@ def calculate_probabilities(self, state, qubits, nqubits): rtype = state.real.dtype unmeasured_qubits = tuple(i for i in range(nqubits) if i not in qubits) state = self.np.reshape(self.np.abs(state) ** 2, nqubits * (2,)) - probs = self.np.sum(state.type(rtype), dim=unmeasured_qubits) # pylint: diable=E1123 + probs = self.np.sum( + state.type(rtype), dim=unmeasured_qubits + ) # pylint: diable=E1123 return self._order_probabilities(probs, qubits, nqubits).reshape(-1) def calculate_probabilities_density_matrix(self, state, qubits, nqubits): From 4b2da009a1ec612a7f826c0455e3777b93543907 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Sat, 24 Feb 2024 09:33:51 +0400 Subject: [PATCH 042/127] fix spelling error --- src/qibo/backends/pytorch.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index ba950ccae3..f3d5ef7b88 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -180,9 +180,7 @@ def calculate_probabilities(self, state, qubits, nqubits): rtype = state.real.dtype unmeasured_qubits = tuple(i for i in range(nqubits) if i not in qubits) state = self.np.reshape(self.np.abs(state) ** 2, nqubits * (2,)) - probs = self.np.sum( - state.type(rtype), dim=unmeasured_qubits - ) # pylint: diable=E1123 + probs = self.np.sum(state.type(rtype), dim=unmeasured_qubits) # pylint: disable=E1123 return self._order_probabilities(probs, qubits, nqubits).reshape(-1) def calculate_probabilities_density_matrix(self, state, qubits, nqubits): @@ -232,7 +230,7 @@ def calculate_matrix_exp(self, a, matrix, eigenvectors=None, eigenvalues=None): -1j * a * matrix ) expd = self.np.diag(self.np.exp(-1j * a * eigenvalues)) - ud = self.np.transpose(self.np.conj(eigenvectors), 0, 1) # pylint: diable=E1121 + ud = self.np.transpose(self.np.conj(eigenvectors), 0, 1) # pylint: disable=E1121 return self.np.matmul(eigenvectors, self.np.matmul(expd, ud)) def calculate_expectation_state(self, hamiltonian, state, normalize): From b9c573df9f12737749ae27af3e5af8e604430fb5 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sat, 24 Feb 2024 05:34:18 +0000 Subject: [PATCH 043/127] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/qibo/backends/pytorch.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index f3d5ef7b88..138754a970 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -180,7 +180,9 @@ def calculate_probabilities(self, state, qubits, nqubits): rtype = state.real.dtype unmeasured_qubits = tuple(i for i in range(nqubits) if i not in qubits) state = self.np.reshape(self.np.abs(state) ** 2, nqubits * (2,)) - probs = self.np.sum(state.type(rtype), dim=unmeasured_qubits) # pylint: disable=E1123 + probs = self.np.sum( + state.type(rtype), dim=unmeasured_qubits + ) # pylint: disable=E1123 return self._order_probabilities(probs, qubits, nqubits).reshape(-1) def calculate_probabilities_density_matrix(self, state, qubits, nqubits): @@ -230,7 +232,9 @@ def calculate_matrix_exp(self, a, matrix, eigenvectors=None, eigenvalues=None): -1j * a * matrix ) expd = self.np.diag(self.np.exp(-1j * a * eigenvalues)) - ud = self.np.transpose(self.np.conj(eigenvectors), 0, 1) # pylint: disable=E1121 + ud = self.np.transpose( + self.np.conj(eigenvectors), 0, 1 + ) # pylint: disable=E1121 return self.np.matmul(eigenvectors, self.np.matmul(expd, ud)) def calculate_expectation_state(self, hamiltonian, state, normalize): From 4e2835dee7f078972c3fb8d850167022e464524c Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Sat, 24 Feb 2024 09:53:00 +0400 Subject: [PATCH 044/127] `pylint` suggestions --- src/qibo/backends/pytorch.py | 35 ++++++++++++++++++----------------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index 138754a970..50e5f8be4b 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -1,3 +1,5 @@ +"""PyTorch backend.""" + from collections import Counter from typing import Union @@ -8,7 +10,6 @@ from qibo.backends import einsum_utils from qibo.backends.npmatrices import NumpyMatrices from qibo.backends.numpy import NumpyBackend -from qibo.result import CircuitResult, MeasurementOutcomes, QuantumState torch_dtype_dict = { "int": torch.int32, @@ -24,6 +25,7 @@ class TorchMatrices(NumpyMatrices): + """Matrix representation of every gate as a torch Tensor.""" def __init__(self, dtype): super().__init__(dtype) @@ -68,7 +70,9 @@ def cast( copy: bool = False, ): """Casts input as a Torch tensor of the specified dtype. - This method supports casting of single tensors or lists of tensors as for the Tensoflow backend. + + This method supports casting of single tensors or lists of tensors + as for the :class:`qibo.backends.PyTorchBackend`. Args: x (Union[torch.Tensor, list[torch.Tensor], np.ndarray, list[np.ndarray], int, float, complex]): @@ -180,9 +184,7 @@ def calculate_probabilities(self, state, qubits, nqubits): rtype = state.real.dtype unmeasured_qubits = tuple(i for i in range(nqubits) if i not in qubits) state = self.np.reshape(self.np.abs(state) ** 2, nqubits * (2,)) - probs = self.np.sum( - state.type(rtype), dim=unmeasured_qubits - ) # pylint: disable=E1123 + probs = self.np.sum(state.type(rtype), unmeasured_qubits) return self._order_probabilities(probs, qubits, nqubits).reshape(-1) def calculate_probabilities_density_matrix(self, state, qubits, nqubits): @@ -199,7 +201,7 @@ def calculate_probabilities_density_matrix(self, state, qubits, nqubits): def calculate_frequencies(self, samples): res, counts = self.np.unique(samples, return_counts=True) res, counts = res.tolist(), counts.tolist() - return Counter({k: v for k, v in zip(res, counts)}) + return Counter(zip(res, counts)) def update_frequencies(self, frequencies, probabilities, nsamples): frequencies = self.cast(frequencies, dtype="int") @@ -232,9 +234,7 @@ def calculate_matrix_exp(self, a, matrix, eigenvectors=None, eigenvalues=None): -1j * a * matrix ) expd = self.np.diag(self.np.exp(-1j * a * eigenvalues)) - ud = self.np.transpose( - self.np.conj(eigenvectors), 0, 1 - ) # pylint: disable=E1121 + ud = self.np.transpose(self.np.conj(eigenvectors), dim0=0, dim1=1) return self.np.matmul(eigenvectors, self.np.matmul(expd, ud)) def calculate_expectation_state(self, hamiltonian, state, normalize): @@ -375,7 +375,7 @@ def collapse_density_matrix(self, state, qubits, shot, nqubits, normalize=True): return self.np.reshape(state, shape) def reset_error_density_matrix(self, gate, state, nqubits): - from qibo.gates import X + from qibo.gates import X # pylint: disable=C0415 state = self.cast(state) shape = state.shape @@ -443,17 +443,18 @@ def test_regressions(self, name): [4, 0, 0, 1, 0, 0, 0, 4, 4, 0], [4, 0, 0, 0, 0, 0, 0, 4, 4, 0], ] - elif name == "test_probabilistic_measurement": + + if name == "test_probabilistic_measurement": if "cuda" in self.device: # pragma: no cover return {0: 273, 1: 233, 2: 242, 3: 252} - else: - return {0: 271, 1: 239, 2: 242, 3: 248} - elif name == "test_unbalanced_probabilistic_measurement": + return {0: 271, 1: 239, 2: 242, 3: 248} + + if name == "test_unbalanced_probabilistic_measurement": if "cuda" in self.device: # pragma: no cover return {0: 196, 1: 153, 2: 156, 3: 495} - else: - return {0: 168, 1: 188, 2: 154, 3: 490} - elif name == "test_post_measurement_bitflips_on_circuit": + return {0: 168, 1: 188, 2: 154, 3: 490} + + if name == "test_post_measurement_bitflips_on_circuit": return [ {5: 30}, {5: 16, 7: 10, 6: 2, 3: 1, 4: 1}, From c9c7bf404d8147a13e860c1f8663e07188e2d466 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sat, 24 Feb 2024 05:53:26 +0000 Subject: [PATCH 045/127] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/qibo/backends/pytorch.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index 50e5f8be4b..3df847c6b4 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -70,8 +70,8 @@ def cast( copy: bool = False, ): """Casts input as a Torch tensor of the specified dtype. - - This method supports casting of single tensors or lists of tensors + + This method supports casting of single tensors or lists of tensors as for the :class:`qibo.backends.PyTorchBackend`. Args: From 8fe4bf2f2ffc886831ee9329c6d30c11c7c0bcd9 Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Mon, 26 Feb 2024 17:49:38 +0400 Subject: [PATCH 046/127] solved errors on gates --- src/qibo/backends/pytorch.py | 4 +--- tests/test_gates_gates.py | 13 +++++++++---- tests/test_measurements.py | 5 ++++- 3 files changed, 14 insertions(+), 8 deletions(-) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index 3df847c6b4..9bc1909a85 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -85,11 +85,9 @@ def cast( """ if dtype is None: dtype = self.dtype - elif isinstance(dtype, self.np.dtype): - dtype = dtype elif isinstance(dtype, type): dtype = torch_dtype_dict[dtype.__name__] - else: + elif not isinstance(dtype, torch.dtype): dtype = torch_dtype_dict[str(dtype)] if isinstance(x, self.np.Tensor): diff --git a/tests/test_gates_gates.py b/tests/test_gates_gates.py index f2d5f1db67..77eda97cc0 100644 --- a/tests/test_gates_gates.py +++ b/tests/test_gates_gates.py @@ -436,9 +436,10 @@ def test_u3(backend, seed_state, seed_observable): backend.cast(np.transpose(np.conj(final_state_decompose))) @ observable @ final_state_decompose, - backend.cast(np.transpose(np.conj(target_state))) @ observable @ target_state, + backend.cast(np.transpose(np.conj(target_state))) + @ observable + @ backend.cast(target_state), ) - assert gates.U3(0, theta, phi, lam).qasm_label == "u3" assert not gates.U3(0, theta, phi, lam).clifford assert gates.U3(0, theta, phi, lam).unitary @@ -527,7 +528,9 @@ def test_cy(backend, controlled_by, seed_state, seed_observable): backend.cast(np.transpose(np.conj(final_state_decompose))) @ observable @ final_state_decompose, - backend.cast(np.transpose(np.conj(target_state))) @ observable @ target_state, + backend.cast(np.transpose(np.conj(target_state))) + @ observable + @ backend.cast(target_state), ) assert gates.CY(0, 1).qasm_label == "cy" @@ -571,7 +574,9 @@ def test_cz(backend, controlled_by, seed_state, seed_observable): backend.cast(np.transpose(np.conj(final_state_decompose))) @ observable @ final_state_decompose, - backend.cast(np.transpose(np.conj(target_state))) @ observable @ target_state, + backend.cast(np.transpose(np.conj(target_state))) + @ observable + @ backend.cast(target_state), ) assert gates.CZ(0, 1).qasm_label == "cz" diff --git a/tests/test_measurements.py b/tests/test_measurements.py index 07ba9d9631..51e5bae07b 100644 --- a/tests/test_measurements.py +++ b/tests/test_measurements.py @@ -76,8 +76,11 @@ def test_measurement_gate(backend, n, nshots): def test_multiple_qubit_measurement_gate(backend): c = models.Circuit(2) c.add(gates.X(0)) - c.add(gates.M(0, 1)) + measure = c.add(gates.M(0, 1)) result = backend.execute_circuit(c, nshots=100) + print(result.frequencies()) + print(result.probabilities()) + # print(measure.samples()) target_binary_samples = np.zeros((100, 2)) target_binary_samples[:, 0] = 1 assert_result( From 225903e28bc5601df3e460b50ee897565ff2fcec Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Mon, 26 Feb 2024 19:09:21 +0400 Subject: [PATCH 047/127] rewrite fix --- src/qibo/quantum_info/entropies.py | 73 +++++------------------ src/qibo/quantum_info/quantum_networks.py | 4 +- src/qibo/quantum_info/random_ensembles.py | 6 +- tests/test_quantum_info_random.py | 33 ++++------ 4 files changed, 32 insertions(+), 84 deletions(-) diff --git a/src/qibo/quantum_info/entropies.py b/src/qibo/quantum_info/entropies.py index 6eab69848a..3d4e7ac027 100644 --- a/src/qibo/quantum_info/entropies.py +++ b/src/qibo/quantum_info/entropies.py @@ -54,20 +54,14 @@ def shannon_entropy(prob_dist, base: float = 2, backend=None): "All elements of the probability array must be between 0. and 1..", ) - total_sum = ( - backend.torch.sum(prob_dist) if backend.name == "pytorch" else np.sum(prob_dist) - ) + total_sum = backend.np.sum(prob_dist) if np.abs(total_sum - 1.0) > PRECISION_TOL: raise_error(ValueError, "Probability array must sum to 1.") log_prob = np.where(prob_dist != 0, np.log2(prob_dist) / np.log2(base), 0.0) - shan_entropy = ( - -backend.torch.sum(prob_dist * log_prob) - if backend.name == "pytorch" - else -np.sum(prob_dist * log_prob) - ) + shan_entropy = -backend.np.sum(prob_dist * log_prob) # absolute value if entropy == 0.0 to avoid returning -0.0 shan_entropy = np.abs(shan_entropy) if shan_entropy == 0.0 else shan_entropy @@ -127,16 +121,10 @@ def classical_relative_entropy(prob_dist_p, prob_dist_q, base: float = 2, backen ValueError, "All elements of the probability array must be between 0. and 1..", ) - total_sum_p = ( - backend.torch.sum(prob_dist_p) - if backend.name == "pytorch" - else np.sum(prob_dist_p) - ) - total_sum_q = ( - backend.torch.sum(prob_dist_q) - if backend.name == "pytorch" - else np.sum(prob_dist_q) - ) + total_sum_p = backend.np.sum(prob_dist_p) + + total_sum_q = backend.np.sum(prob_dist_q) + if np.abs(total_sum_p - 1.0) > PRECISION_TOL: raise_error(ValueError, "First probability array must sum to 1.") @@ -151,11 +139,7 @@ def classical_relative_entropy(prob_dist_p, prob_dist_q, base: float = 2, backen log_prob = np.where(prob_dist_p != 0.0, log_prob_q, 0.0) - relative = ( - backend.torch.sum(prob_dist_p * log_prob) - if backend.name == "pytorch" - else np.sum(prob_dist_p * log_prob) - ) + relative = backend.np.sum(prob_dist_p * log_prob) return entropy_p - relative @@ -228,9 +212,7 @@ def classical_renyi_entropy( "All elements of the probability array must be between 0. and 1..", ) - total_sum = ( - backend.torch.sum(prob_dist) if backend.name == "pytorch" else np.sum(prob_dist) - ) + total_sum = backend.np.sum(prob_dist) if np.abs(total_sum - 1.0) > PRECISION_TOL: raise_error(ValueError, "Probability array must sum to 1.") @@ -244,11 +226,7 @@ def classical_renyi_entropy( if alpha == np.inf: return -1 * np.log2(max(prob_dist)) / np.log2(base) - total_sum = ( - backend.torch.sum(prob_dist**alpha) - if backend.name == "pytorch" - else np.sum(prob_dist**alpha) - ) + total_sum = backend.np.sum(prob_dist**alpha) renyi_ent = (1 / (1 - alpha)) * np.log2(total_sum) / np.log2(base) @@ -332,16 +310,8 @@ def classical_relative_renyi_entropy( "All elements of the probability array must be between 0. and 1..", ) - total_sum_p = ( - backend.torch.sum(prob_dist_p) - if backend.name == "pytorch" - else np.sum(prob_dist_p) - ) - total_sum_q = ( - backend.torch.sum(prob_dist_q) - if backend.name == "pytorch" - else np.sum(prob_dist_q) - ) + total_sum_p = backend.np.sum(prob_dist_p) + total_sum_q = backend.np.sum(prob_dist_q) if np.abs(total_sum_p - 1.0) > PRECISION_TOL: raise_error(ValueError, "First probability array must sum to 1.") @@ -351,11 +321,8 @@ def classical_relative_renyi_entropy( if alpha == 0.5: total_sum = np.sqrt(prob_dist_p * prob_dist_q) - total_sum = ( - backend.torch.sum(total_sum) - if backend.name == "pytorch" - else np.sum(total_sum) - ) + total_sum = backend.np.sum(total_sum) + return -2 * np.log2(total_sum) / np.log2(base) if alpha == 1.0: @@ -369,11 +336,7 @@ def classical_relative_renyi_entropy( prob_p = prob_dist_p**alpha prob_q = prob_dist_q ** (1 - alpha) - total_sum = ( - backend.torch.sum(prob_p * prob_q) - if backend.name == "pytorch" - else np.sum(prob_p * prob_q) - ) + total_sum = backend.np.sum(prob_p * prob_q) return (1 / (alpha - 1)) * np.log2(total_sum) / np.log2(base) @@ -431,9 +394,7 @@ def classical_tsallis_entropy(prob_dist, alpha: float, base: float = 2, backend= "All elements of the probability array must be between 0. and 1..", ) - total_sum = ( - backend.torch.sum(prob_dist) if backend.name == "pytorch" else np.sum(prob_dist) - ) + total_sum = backend.np.sum(prob_dist) if np.abs(total_sum - 1.0) > PRECISION_TOL: raise_error(ValueError, "Probability array must sum to 1.") @@ -442,9 +403,7 @@ def classical_tsallis_entropy(prob_dist, alpha: float, base: float = 2, backend= return shannon_entropy(prob_dist, base=base, backend=backend) total_sum = prob_dist**alpha - total_sum = ( - backend.torch.sum(total_sum) if backend.name == "pytorch" else np.sum(total_sum) - ) + total_sum = backend.np.sum(total_sum) return (1 / (1 - alpha)) * (total_sum - 1) diff --git a/src/qibo/quantum_info/quantum_networks.py b/src/qibo/quantum_info/quantum_networks.py index d1dd92f1be..e7bf52e0ad 100644 --- a/src/qibo/quantum_info/quantum_networks.py +++ b/src/qibo/quantum_info/quantum_networks.py @@ -633,9 +633,7 @@ def _set_tensor_and_parameters(self): """Sets tensor based on inputs.""" self._backend = _check_backend(self._backend) - self._einsum = ( - self._backend.torch.einsum if self._backend.name == "pytorch" else np.einsum - ) + self._einsum = self._backend.np.einsum if isinstance(self.partition, list): self.partition = tuple(self.partition) diff --git a/src/qibo/quantum_info/random_ensembles.py b/src/qibo/quantum_info/random_ensembles.py index 915c929994..b246a07137 100644 --- a/src/qibo/quantum_info/random_ensembles.py +++ b/src/qibo/quantum_info/random_ensembles.py @@ -1178,14 +1178,12 @@ def _super_op_from_bcsz_measure(dims: int, rank: int, order: str, seed, backend) for eigenvalue, eigenvector in zip(eigenvalues, np.transpose(eigenvectors)): operator += eigenvalue * np.outer(eigenvector, np.conj(eigenvector)) - kron = backend.torch.kron if backend.name == "pytorch" else np.kron - if order == "row": - operator = kron( + operator = backend.np.kron( backend.identity_density_matrix(nqubits, normalize=False), operator ) if order == "column": - operator = kron( + operator = backend.np.kron( operator, backend.identity_density_matrix(nqubits, normalize=False) ) diff --git a/tests/test_quantum_info_random.py b/tests/test_quantum_info_random.py index 645df2eb43..27c250765e 100644 --- a/tests/test_quantum_info_random.py +++ b/tests/test_quantum_info_random.py @@ -57,9 +57,7 @@ def test_uniform_sampling_U3(backend, seed): ) expectation_values = backend.cast(expectation_values) - mean_function = backend.torch.mean if backend.name == "pytorch" else np.mean - - expectation_values = mean_function(expectation_values, axis=0) + expectation_values = backend.np.mean(expectation_values, axis=0) backend.assert_allclose(expectation_values[0], expectation_values[1], atol=1e-1) backend.assert_allclose(expectation_values[0], expectation_values[2], atol=1e-1) @@ -176,7 +174,7 @@ def test_random_unitary(backend, measure): matrix = random_unitary(dims, measure=measure, backend=backend) matrix_dagger = np.transpose(np.conj(matrix)) matrix_inv = ( - backend.torch.inverse(matrix) + backend.np.inverse(matrix) if backend.name == "pytorch" else np.linalg.inv(matrix) ) @@ -464,9 +462,8 @@ def test_random_pauli( ) else: matrix = np.transpose(matrix, (1, 0, 2, 3)) - kron = backend.torch.kron if backend.name == "pytorch" else np.kron - matrix = [reduce(kron, row) for row in matrix] - dot = backend.torch.matmul if backend.name == "pytorch" else np.dot + matrix = [reduce(backend.np.kron, row) for row in matrix] + dot = backend.np.matmul if backend.name == "pytorch" else np.dot matrix = reduce(dot, matrix) if subset is None: @@ -558,13 +555,10 @@ def test_random_stochastic_matrix(backend): dims = 4 random_stochastic_matrix(dims, seed=0.1, backend=backend) - sum_function = backend.torch.sum if backend.name == "pytorch" else np.sum - diag = backend.torch.diag if backend.name == "pytorch" else np.diag - # tests if matrix is row-stochastic dims = 4 matrix = random_stochastic_matrix(dims, backend=backend) - sum_rows = sum_function(matrix, axis=1) + sum_rows = backend.np.sum(matrix, axis=1) backend.assert_allclose(all(sum_rows < 1 + PRECISION_TOL), True) backend.assert_allclose(all(sum_rows > 1 - PRECISION_TOL), True) @@ -575,19 +569,18 @@ def test_random_stochastic_matrix(backend): dims, diagonally_dominant=True, max_iterations=1000, backend=backend ) - sum_function = backend.torch.sum if backend.name == "pytorch" else np.sum - sum_rows = sum_function(matrix, axis=1) + sum_rows = backend.np.sum(matrix, axis=1) backend.assert_allclose(all(sum_rows < 1 + PRECISION_TOL), True) backend.assert_allclose(all(sum_rows > 1 - PRECISION_TOL), True) - backend.assert_allclose(all(2 * diag(matrix) - sum_rows > 0), True) + backend.assert_allclose(all(2 * backend.np.diag(matrix) - sum_rows > 0), True) # tests if matrix is bistochastic dims = 4 matrix = random_stochastic_matrix(dims, bistochastic=True, backend=backend) - sum_rows = sum_function(matrix, axis=1) - column_rows = sum_function(matrix, axis=0) + sum_rows = backend.np.sum(matrix, axis=1) + column_rows = backend.np.sum(matrix, axis=0) backend.assert_allclose(all(sum_rows < 1 + PRECISION_TOL), True) backend.assert_allclose(all(sum_rows > 1 - PRECISION_TOL), True) @@ -604,8 +597,8 @@ def test_random_stochastic_matrix(backend): max_iterations=1000, backend=backend, ) - sum_rows = sum_function(matrix, axis=1) - column_rows = sum_function(matrix, axis=0) + sum_rows = backend.np.sum(matrix, axis=1) + column_rows = backend.np.sum(matrix, axis=0) backend.assert_allclose(all(sum_rows < 1 + PRECISION_TOL), True) backend.assert_allclose(all(sum_rows > 1 - PRECISION_TOL), True) @@ -613,8 +606,8 @@ def test_random_stochastic_matrix(backend): backend.assert_allclose(all(column_rows < 1 + PRECISION_TOL), True) backend.assert_allclose(all(column_rows > 1 - PRECISION_TOL), True) - backend.assert_allclose(all(2 * diag(matrix) - sum_rows > 0), True) - backend.assert_allclose(all(2 * diag(matrix) - column_rows > 0), True) + backend.assert_allclose(all(2 * backend.np.diag(matrix) - sum_rows > 0), True) + backend.assert_allclose(all(2 * backend.np.diag(matrix) - column_rows > 0), True) # tests warning for max_iterations dims = 4 From e13c38243027b2c87b63936d0a81bcce6293679f Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Tue, 27 Feb 2024 17:19:11 +0400 Subject: [PATCH 048/127] solved errors on measurements --- src/qibo/backends/numpy.py | 17 +++++----- src/qibo/backends/pytorch.py | 40 +++++++++--------------- src/qibo/result.py | 4 +-- tests/test_measurements.py | 3 -- tests/test_measurements_probabilistic.py | 6 ++-- 5 files changed, 28 insertions(+), 42 deletions(-) diff --git a/src/qibo/backends/numpy.py b/src/qibo/backends/numpy.py index 81711c0bbe..6e5a793fbd 100644 --- a/src/qibo/backends/numpy.py +++ b/src/qibo/backends/numpy.py @@ -524,7 +524,7 @@ def execute_circuit_repeated(self, circuit, nshots, initial_state=None): sample = result.samples()[0] results.append(sample) if not circuit.density_matrix: - samples.append("".join([str(s) for s in sample])) + samples.append("".join([str(s) for s in self.to_numpy(sample)])) for gate in circuit.measurements: gate.result.reset() @@ -631,7 +631,7 @@ def sample_shots(self, probabilities, nshots): ) def aggregate_shots(self, shots): - return self.np.array(shots, dtype=shots[0].dtype) + return self.cast(shots, dtype=shots[0].dtype) def samples_to_binary(self, samples, nqubits): qrange = self.np.arange(nqubits - 1, -1, -1, dtype="int32") @@ -643,8 +643,7 @@ def samples_to_decimal(self, samples, nqubits): return self.np.matmul(self.to_numpy(samples), qrange)[:, 0] def calculate_frequencies(self, samples): - res, counts = self.np.unique(samples, return_counts=True) - res, counts = self.np.array(res), self.np.array(counts) + res, counts = np.unique(samples, return_counts=True) return collections.Counter({k: v for k, v in zip(res, counts)}) def update_frequencies(self, frequencies, probabilities, nsamples): @@ -668,10 +667,10 @@ def sample_frequencies(self, probabilities, nshots): ) def apply_bitflips(self, noiseless_samples, bitflip_probabilities): - fprobs = self.np.array(bitflip_probabilities, dtype="float64") - sprobs = self.np.random.random(noiseless_samples.shape) - flip_0 = self.np.array(sprobs < fprobs[0], dtype=noiseless_samples.dtype) - flip_1 = self.np.array(sprobs < fprobs[1], dtype=noiseless_samples.dtype) + fprobs = self.cast(bitflip_probabilities, dtype="float64") + sprobs = self.cast(np.random.random(noiseless_samples.shape), dtype="float64") + flip_0 = self.cast(sprobs < fprobs[0], dtype=noiseless_samples.dtype) + flip_1 = self.cast(sprobs < fprobs[1], dtype=noiseless_samples.dtype) noisy_samples = noiseless_samples + (1 - noiseless_samples) * flip_0 noisy_samples = noisy_samples - noiseless_samples * flip_1 return noisy_samples @@ -755,7 +754,7 @@ def calculate_expectation_state(self, hamiltonian, state, normalize): return ev def calculate_expectation_density_matrix(self, hamiltonian, state, normalize): - ev = self.np.real(self.np.trace(hamiltonian @ state)) + ev = self.np.real(self.np.trace(self.cast(hamiltonian @ state))) if normalize: norm = self.np.real(self.np.trace(state)) ev = ev / norm diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index 9bc1909a85..740cf424c7 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -1,6 +1,5 @@ """PyTorch backend.""" -from collections import Counter from typing import Union import numpy as np @@ -182,7 +181,10 @@ def calculate_probabilities(self, state, qubits, nqubits): rtype = state.real.dtype unmeasured_qubits = tuple(i for i in range(nqubits) if i not in qubits) state = self.np.reshape(self.np.abs(state) ** 2, nqubits * (2,)) - probs = self.np.sum(state.type(rtype), unmeasured_qubits) + if len(unmeasured_qubits) == 0: + probs = state.type(rtype) + else: + probs = self.np.sum(state.type(rtype), dim=unmeasured_qubits) return self._order_probabilities(probs, qubits, nqubits).reshape(-1) def calculate_probabilities_density_matrix(self, state, qubits, nqubits): @@ -196,20 +198,6 @@ def calculate_probabilities_density_matrix(self, state, qubits, nqubits): probs = self.np.reshape(probs, len(qubits) * (2,)) return self._order_probabilities(probs, qubits, nqubits).view(-1) - def calculate_frequencies(self, samples): - res, counts = self.np.unique(samples, return_counts=True) - res, counts = res.tolist(), counts.tolist() - return Counter(zip(res, counts)) - - def update_frequencies(self, frequencies, probabilities, nsamples): - frequencies = self.cast(frequencies, dtype="int") - samples = self.sample_shots(probabilities, nsamples) - unique_samples, counts = self.np.unique(samples, return_counts=True) - frequencies.index_add_( - 0, self.cast(unique_samples, dtype="int"), self.cast(counts, dtype="int") - ) - return frequencies - def calculate_norm(self, state, order=2): state = self.cast(state) return self.np.norm(state, p=order) @@ -436,25 +424,25 @@ def depolarizing_error_density_matrix(self, gate, state, nqubits): def test_regressions(self, name): if name == "test_measurementresult_apply_bitflips": return [ - [4, 0, 0, 1, 0, 2, 2, 4, 4, 0], - [4, 0, 0, 1, 0, 2, 2, 4, 4, 0], - [4, 0, 0, 1, 0, 0, 0, 4, 4, 0], - [4, 0, 0, 0, 0, 0, 0, 4, 4, 0], + [4, 0, 0, 1, 0, 0, 1, 0, 0, 0], + [0, 1, 1, 2, 1, 1, 4, 0, 0, 4], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 4, 0, 0, 0, 4], ] if name == "test_probabilistic_measurement": - if "cuda" in self.device: # pragma: no cover + if self.device == "cuda": # pragma: no cover return {0: 273, 1: 233, 2: 242, 3: 252} - return {0: 271, 1: 239, 2: 242, 3: 248} + return {1: 270, 2: 248, 3: 244, 0: 238} if name == "test_unbalanced_probabilistic_measurement": - if "cuda" in self.device: # pragma: no cover + if self.device == "cuda": # pragma: no cover return {0: 196, 1: 153, 2: 156, 3: 495} - return {0: 168, 1: 188, 2: 154, 3: 490} + return {3: 492, 2: 176, 0: 168, 1: 164} if name == "test_post_measurement_bitflips_on_circuit": return [ {5: 30}, - {5: 16, 7: 10, 6: 2, 3: 1, 4: 1}, - {3: 6, 5: 6, 7: 5, 2: 4, 4: 3, 0: 2, 1: 2, 6: 2}, + {5: 12, 4: 6, 7: 6, 1: 5, 6: 1}, + {3: 7, 0: 4, 2: 4, 6: 4, 7: 4, 5: 3, 1: 2, 4: 2}, ] diff --git a/src/qibo/result.py b/src/qibo/result.py index 5d74157007..934f156e39 100644 --- a/src/qibo/result.py +++ b/src/qibo/result.py @@ -325,7 +325,7 @@ def samples(self, binary: bool = True, registers: bool = False): qubits = self.measurement_gate.target_qubits if self._samples is None: if self.measurements[0].result.has_samples(): - self._samples = np.concatenate( + self._samples = self.backend.np.concatenate( [gate.result.samples() for gate in self.measurements], axis=1 ) else: @@ -353,7 +353,7 @@ def samples(self, binary: bool = True, registers: bool = False): qubit_map = { q: i for i, q in enumerate(self.measurement_gate.target_qubits) } - self._samples = np.array(samples, dtype="int32") + self._samples = self.backend.cast(samples, "int32") for gate in self.measurements: rqubits = tuple(qubit_map.get(q) for q in gate.target_qubits) gate.result.register_samples( diff --git a/tests/test_measurements.py b/tests/test_measurements.py index 51e5bae07b..4d6a33a998 100644 --- a/tests/test_measurements.py +++ b/tests/test_measurements.py @@ -78,9 +78,6 @@ def test_multiple_qubit_measurement_gate(backend): c.add(gates.X(0)) measure = c.add(gates.M(0, 1)) result = backend.execute_circuit(c, nshots=100) - print(result.frequencies()) - print(result.probabilities()) - # print(measure.samples()) target_binary_samples = np.zeros((100, 2)) target_binary_samples[:, 0] = 1 assert_result( diff --git a/tests/test_measurements_probabilistic.py b/tests/test_measurements_probabilistic.py index ebe8ec84f3..2969531fd3 100644 --- a/tests/test_measurements_probabilistic.py +++ b/tests/test_measurements_probabilistic.py @@ -65,6 +65,7 @@ def test_unbalanced_probabilistic_measurement(backend, use_samples): decimal_frequencies = backend.test_regressions( "test_unbalanced_probabilistic_measurement" ) + assert sum(result.frequencies().values()) == 1000 assert_result(backend, result, decimal_frequencies=decimal_frequencies) @@ -112,6 +113,7 @@ def test_post_measurement_bitflips_on_circuit(backend, accelerators, i, probs): c.add(gates.M(3, p0=probs[2])) result = backend.execute_circuit(c, nshots=30) freqs = result.frequencies(binary=False) + print(freqs) targets = backend.test_regressions("test_post_measurement_bitflips_on_circuit") assert freqs == targets[i] @@ -145,10 +147,10 @@ def test_measurementresult_apply_bitflips(backend, i, p0, p1): c = models.Circuit(3) c.add(gates.M(*range(3))) - state = np.zeros(8) + state = backend.np.zeros(8) state[0] = 1.0 result = CircuitResult(state, c.measurements, backend) - result._samples = np.zeros((10, 3), dtype="int32") + result._samples = backend.cast(np.zeros((10, 3)), dtype="int32") backend.set_seed(123) noisy_samples = result.apply_bitflips(p0, p1) targets = backend.test_regressions("test_measurementresult_apply_bitflips") From 87cd0a8d2697c028fa9c9783eef12431d7e62472 Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Wed, 28 Feb 2024 16:06:51 +0400 Subject: [PATCH 049/127] redefine permute to work both for pytorch and numpy --- src/qibo/backends/numpy.py | 80 +++++++------ src/qibo/backends/pytorch.py | 223 ++--------------------------------- 2 files changed, 55 insertions(+), 248 deletions(-) diff --git a/src/qibo/backends/numpy.py b/src/qibo/backends/numpy.py index 6e5a793fbd..2b33030a93 100644 --- a/src/qibo/backends/numpy.py +++ b/src/qibo/backends/numpy.py @@ -1,4 +1,5 @@ import collections +import math import numpy as np @@ -93,7 +94,7 @@ def identity_density_matrix(self, nqubits, normalize: bool = True): def plus_state(self, nqubits): state = self.np.ones(2**nqubits, dtype=self.dtype) - state /= self.np.sqrt(self.cast(2**nqubits)) + state /= math.sqrt(2**nqubits) return state def plus_density_matrix(self, nqubits): @@ -114,30 +115,33 @@ def matrix_parametrized(self, gate): def matrix_fused(self, fgate): rank = len(fgate.target_qubits) - matrix = np.eye(2**rank, dtype=np.complex128) + matrix = self.np.eye(2**rank, dtype=self.dtype) for gate in fgate.gates: # transfer gate matrix to numpy as it is more efficient for # small tensor calculations # explicit to_numpy see https://github.com/qiboteam/qibo/issues/928 - gmatrix = self.to_numpy(gate.matrix(self)) + gmatrix = self.cast(gate.matrix(self)) # Kronecker product with identity is needed to make the # original matrix have shape (2**rank x 2**rank) - eye = np.eye(2 ** (rank - len(gate.qubits)), dtype=np.complex128) - gmatrix = np.kron(gmatrix, eye) + eye = self.np.eye(2 ** (rank - len(gate.qubits)), dtype=self.dtype) + gmatrix = self.np.kron(gmatrix, eye) # Transpose the new matrix indices so that it targets the # target qubits of the original gate original_shape = gmatrix.shape - gmatrix = np.reshape(gmatrix, 2 * rank * (2,)) + gmatrix = self.np.reshape(gmatrix, 2 * rank * (2,)) qubits = list(gate.qubits) indices = qubits + [q for q in fgate.target_qubits if q not in qubits] indices = np.argsort(indices) transpose_indices = list(indices) transpose_indices.extend(indices + rank) - gmatrix = np.transpose(gmatrix, transpose_indices) - gmatrix = np.reshape(gmatrix, original_shape) + gmatrix = self.transpose(gmatrix, transpose_indices) + gmatrix = self.np.reshape(gmatrix, original_shape) # fuse the individual gate matrix to the total ``FusedGate`` matrix matrix = gmatrix @ matrix - return self.cast(matrix) + return matrix + + def transpose(self, matrix, transpose_indices): + return self.np.transpose(matrix, transpose_indices) def control_matrix(self, gate): if len(gate.control_qubits) > 1: @@ -169,7 +173,7 @@ def apply_gate(self, gate, state, nqubits): ncontrol = len(gate.control_qubits) nactive = nqubits - ncontrol order, targets = einsum_utils.control_order(gate, nqubits) - state = self.np.transpose(state, order) + state = self.transpose(state, order) # Apply `einsum` only to the part of the state where all controls # are active. This should be `state[-1]` state = self.np.reshape(state, (2**ncontrol,) + nactive * (2,)) @@ -177,10 +181,10 @@ def apply_gate(self, gate, state, nqubits): updates = self.np.einsum(opstring, state[-1], matrix) # Concatenate the updated part of the state `updates` with the # part of of the state that remained unaffected `state[:-1]`. - state = self.np.concatenate([state[:-1], updates[self.np.newaxis]], axis=0) + state = self.np.concatenate([state[:-1], updates[None]], axis=0) state = self.np.reshape(state, nqubits * (2,)) # Put qubit indices back to their proper places - state = self.np.transpose(state, einsum_utils.reverse_order(order)) + state = self.transpose(state, einsum_utils.reverse_order(order)) else: matrix = self.np.reshape(matrix, 2 * len(gate.qubits) * (2,)) opstring = einsum_utils.apply_gate_string(gate.qubits, nqubits) @@ -199,7 +203,7 @@ def apply_gate_density_matrix(self, gate, state, nqubits): n = 2**ncontrol order, targets = einsum_utils.control_order_density_matrix(gate, nqubits) - state = self.np.transpose(state, order) + state = self.transpose(state, order) state = self.np.reshape(state, 2 * (n,) + 2 * nactive * (2,)) leftc, rightc = einsum_utils.apply_gate_density_matrix_controlled_string( @@ -219,13 +223,11 @@ def apply_gate_density_matrix(self, gate, state, nqubits): state00 = state[range(n - 1)] state00 = state00[:, range(n - 1)] - state01 = self.np.concatenate( - [state00, state01[:, self.np.newaxis]], axis=1 - ) - state10 = self.np.concatenate([state10, state11[self.np.newaxis]], axis=0) - state = self.np.concatenate([state01, state10[self.np.newaxis]], axis=0) + state01 = self.np.concatenate([state00, state01[:, None]], axis=1) + state10 = self.np.concatenate([state10, state11[None]], axis=0) + state = self.np.concatenate([state01, state10[None]], axis=0) state = self.np.reshape(state, 2 * nqubits * (2,)) - state = self.np.transpose(state, einsum_utils.reverse_order(order)) + state = self.transpose(state, einsum_utils.reverse_order(order)) else: matrix = self.np.reshape(matrix, 2 * len(gate.qubits) * (2,)) matrixc = self.np.conj(matrix) @@ -286,7 +288,7 @@ def collapse_state(self, state, qubits, shot, nqubits, normalize=True): binshot = self.samples_to_binary(shot, len(qubits))[0] state = self.np.reshape(state, nqubits * (2,)) order = list(qubits) + [q for q in range(nqubits) if q not in qubits] - state = self.np.transpose(state, order) + state = self.transpose(state, order) subshape = (2 ** len(qubits),) + (nqubits - len(qubits)) * (2,) state = self.np.reshape(state, subshape)[int(shot)] if normalize: @@ -303,7 +305,7 @@ def collapse_density_matrix(self, state, qubits, shot, nqubits, normalize=True): order.extend(q for q in range(nqubits) if q not in qubits) order.extend(q + nqubits for q in range(nqubits) if q not in qubits) state = self.np.reshape(state, 2 * nqubits * (2,)) - state = self.np.transpose(state, order) + state = self.transpose(state, order) subshape = 2 * (2 ** len(qubits),) + 2 * (nqubits - len(qubits)) * (2,) state = self.np.reshape(state, subshape)[int(shot), int(shot)] n = 2 ** (len(state.shape) // 2) @@ -324,18 +326,18 @@ def reset_error_density_matrix(self, gate, state, nqubits): trace = self.partial_trace_density_matrix(state, (q,), nqubits) trace = self.np.reshape(trace, 2 * (nqubits - 1) * (2,)) zero = self.zero_density_matrix(1) - zero = self.np.tensordot(trace, zero, axes=0) + zero = self.np.tensordot(trace, zero, 0) order = list(range(2 * nqubits - 2)) order.insert(q, 2 * nqubits - 2) order.insert(q + nqubits, 2 * nqubits - 1) - zero = self.np.reshape(self.np.transpose(zero, order), shape) + zero = self.np.reshape(self.transpose(zero, order), shape) state = (1 - p_0 - p_1) * state + p_0 * zero return state + p_1 * self.apply_gate_density_matrix(X(q), zero, nqubits) def thermal_error_density_matrix(self, gate, state, nqubits): state = self.cast(state) shape = state.shape - state = self.apply_gate(gate, state.ravel(), 2 * nqubits) + state = self.apply_gate(gate, self.ravel(state), 2 * nqubits) return self.np.reshape(state, shape) def depolarizing_error_density_matrix(self, gate, state, nqubits): @@ -347,7 +349,7 @@ def depolarizing_error_density_matrix(self, gate, state, nqubits): trace = self.np.reshape(trace, 2 * (nqubits - len(q)) * (2,)) identity = self.identity_density_matrix(len(q)) identity = self.np.reshape(identity, 2 * len(q) * (2,)) - identity = self.np.tensordot(trace, identity, axes=0) + identity = self.np.tensordot(trace, identity, 0) qubits = list(range(nqubits)) for j in q: qubits.pop(qubits.index(j)) @@ -364,7 +366,7 @@ def depolarizing_error_density_matrix(self, gate, state, nqubits): for qj in qs: qj = [qj[qubits.index(i)] for i in range(len(qubits))] order += qj - identity = self.np.reshape(self.np.transpose(identity, order), shape) + identity = self.np.reshape(self.transpose(identity, order), shape) state = (1 - lam) * state + lam * identity return state @@ -594,22 +596,34 @@ def calculate_symbolic_density_matrix( return terms return terms + def dimensions(self, x): + return x.ndim + def _order_probabilities(self, probs, qubits, nqubits): """Arrange probabilities according to the given ``qubits`` ordering.""" + if self.dimensions(probs) == 0: + return probs unmeasured, reduced = [], {} for i in range(nqubits): if i in qubits: reduced[i] = i - len(unmeasured) else: unmeasured.append(i) - return self.np.transpose(probs, [reduced.get(i) for i in qubits]) + return self.transpose(probs, [reduced.get(i) for i in qubits]) + + def ravel(self, x): + return x.ravel() def calculate_probabilities(self, state, qubits, nqubits): rtype = self.np.real(state).dtype unmeasured_qubits = tuple(i for i in range(nqubits) if i not in qubits) state = self.np.reshape(self.np.abs(state) ** 2, nqubits * (2,)) - probs = self.np.sum(state.astype(rtype), axis=unmeasured_qubits) - return self._order_probabilities(probs, qubits, nqubits).ravel() + # This is necessary to use the same function on pytorch backend + if len(unmeasured_qubits) == 0: + probs = self.cast(state, dtype=rtype) + else: + probs = self.np.sum(self.cast(state, dtype=rtype), axis=unmeasured_qubits) + return self.ravel(self._order_probabilities(probs, qubits, nqubits)) def calculate_probabilities_density_matrix(self, state, qubits, nqubits): order = tuple(sorted(qubits)) @@ -617,10 +631,10 @@ def calculate_probabilities_density_matrix(self, state, qubits, nqubits): order = order + tuple(i + nqubits for i in order) shape = 2 * (2 ** len(qubits), 2 ** (nqubits - len(qubits))) state = self.np.reshape(state, 2 * nqubits * (2,)) - state = self.np.reshape(self.np.transpose(state, order), shape) + state = self.np.reshape(self.transpose(state, order), shape) probs = self.np.abs(self.np.einsum("abab->a", state)) probs = self.np.reshape(probs, len(qubits) * (2,)) - return self._order_probabilities(probs, qubits, nqubits).ravel() + return self.ravel(self._order_probabilities(probs, qubits, nqubits)) def set_seed(self, seed): self.np.random.seed(seed) @@ -679,7 +693,7 @@ def partial_trace(self, state, qubits, nqubits): state = self.cast(state) state = self.np.reshape(state, nqubits * (2,)) axes = 2 * [list(qubits)] - rho = self.np.tensordot(state, self.np.conj(state), axes=axes) + rho = self.np.tensordot(state, self.np.conj(state), axes) shape = 2 * (2 ** (nqubits - len(qubits)),) return self.np.reshape(rho, shape) @@ -692,7 +706,7 @@ def partial_trace_density_matrix(self, state, qubits, nqubits): order += tuple(i + nqubits for i in order) shape = 2 * (2 ** len(qubits), 2 ** (nqubits - len(qubits))) - state = self.np.transpose(state, order) + state = self.transpose(state, order) state = self.np.reshape(state, shape) return self.np.einsum("abac->bc", state) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index 740cf424c7..37acd3e729 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -101,28 +101,6 @@ def cast( return x - def apply_gate(self, gate, state, nqubits): - state = self.cast(state) - state = self.np.reshape(state, nqubits * (2,)) - matrix = gate.matrix(self) - if gate.is_controlled_by: - matrix = self.np.reshape(matrix, 2 * len(gate.target_qubits) * (2,)) - ncontrol = len(gate.control_qubits) - nactive = nqubits - ncontrol - order, targets = einsum_utils.control_order(gate, nqubits) - state = state.permute(*order) - state = self.np.reshape(state, (2**ncontrol,) + nactive * (2,)) - opstring = einsum_utils.apply_gate_string(targets, nactive) - updates = self.np.einsum(opstring, state[-1], matrix) - state = self.np.cat([state[:-1], updates[None]], axis=0) - state = self.np.reshape(state, nqubits * (2,)) - state = state.permute(*einsum_utils.reverse_order(order)) - else: - matrix = self.np.reshape(matrix, 2 * len(gate.qubits) * (2,)) - opstring = einsum_utils.apply_gate_string(gate.qubits, nqubits) - state = self.np.einsum(opstring, state, matrix) - return self.np.reshape(state, (2**nqubits,)) - def issparse(self, x): if isinstance(x, self.np.Tensor): return x.is_sparse @@ -139,6 +117,9 @@ def compile(self, func): return func # return self.np.jit.script(func) + def transpose(self, matrix, transpose_indices): + return matrix.permute(*transpose_indices) + def matrix(self, gate): npmatrix = super().matrix(gate) return self.np.tensor(npmatrix, dtype=self.dtype) @@ -165,38 +146,11 @@ def samples_to_binary(self, samples, nqubits): samples = samples[:, None] >> qrange return samples % 2 - def _order_probabilities(self, probs, qubits, nqubits): - """Arrange probabilities according to the given ``qubits`` ordering.""" - if probs.dim() == 0: - return probs - unmeasured, reduced = [], {} - for i in range(nqubits): - if i in qubits: - reduced[i] = i - len(unmeasured) - else: - unmeasured.append(i) - return probs.permute(*[reduced.get(i) for i in qubits]) - - def calculate_probabilities(self, state, qubits, nqubits): - rtype = state.real.dtype - unmeasured_qubits = tuple(i for i in range(nqubits) if i not in qubits) - state = self.np.reshape(self.np.abs(state) ** 2, nqubits * (2,)) - if len(unmeasured_qubits) == 0: - probs = state.type(rtype) - else: - probs = self.np.sum(state.type(rtype), dim=unmeasured_qubits) - return self._order_probabilities(probs, qubits, nqubits).reshape(-1) - - def calculate_probabilities_density_matrix(self, state, qubits, nqubits): - order = tuple(sorted(qubits)) - order += tuple(i for i in range(nqubits) if i not in qubits) - order = order + tuple(i + nqubits for i in order) - shape = 2 * (2 ** len(qubits), 2 ** (nqubits - len(qubits))) - state = self.np.reshape(state, 2 * nqubits * (2,)) - state = self.np.reshape(state.permute(*order), shape) - probs = self.np.abs(self.np.einsum("abab->a", state)) - probs = self.np.reshape(probs, len(qubits) * (2,)) - return self._order_probabilities(probs, qubits, nqubits).view(-1) + def dimensions(self, x): + return x.dim() + + def ravel(self, x): + return x.reshape(-1) def calculate_norm(self, state, order=2): state = self.cast(state) @@ -250,72 +204,6 @@ def calculate_overlap_density_matrix(self, state1, state2): self.np.matmul(self.np.conj(self.cast(state1)).T, self.cast(state2)) ) - def apply_gate_density_matrix(self, gate, state, nqubits): - state = self.cast(state) - state = self.np.reshape(state, 2 * nqubits * (2,)) - matrix = gate.matrix(self) - if gate.is_controlled_by: - matrix = self.np.reshape(matrix, 2 * len(gate.target_qubits) * (2,)) - matrixc = self.np.conj(matrix) - ncontrol = len(gate.control_qubits) - nactive = nqubits - ncontrol - n = 2**ncontrol - - order, targets = einsum_utils.control_order_density_matrix(gate, nqubits) - state = state.permute(*order) - state = self.np.reshape(state, 2 * (n,) + 2 * nactive * (2,)) - - leftc, rightc = einsum_utils.apply_gate_density_matrix_controlled_string( - targets, nactive - ) - state01 = state[: n - 1, n - 1] - state01 = self.np.einsum(rightc, state01, matrixc) - state10 = state[n - 1, : n - 1] - state10 = self.np.einsum(leftc, state10, matrix) - - left, right = einsum_utils.apply_gate_density_matrix_string( - targets, nactive - ) - state11 = state[n - 1, n - 1] - state11 = self.np.einsum(right, state11, matrixc) - state11 = self.np.einsum(left, state11, matrix) - - state00 = state[range(n - 1)] - state00 = state00[:, range(n - 1)] - state01 = self.np.cat([state00, state01[:, None]], dim=1) - state10 = self.np.cat([state10, state11[None]], dim=0) - state = self.np.cat([state01, state10[None]], dim=0) - state = self.np.reshape(state, 2 * nqubits * (2,)) - state = state.permute(*einsum_utils.reverse_order(order)) - else: - matrix = self.np.reshape(matrix, 2 * len(gate.qubits) * (2,)) - matrixc = self.np.conj(matrix) - left, right = einsum_utils.apply_gate_density_matrix_string( - gate.qubits, nqubits - ) - state = self.np.einsum(right, state, matrixc) - state = self.np.einsum(left, state, matrix) - return self.np.reshape(state, 2 * (2**nqubits,)) - - def partial_trace(self, state, qubits, nqubits): - state = self.cast(state) - state = self.np.reshape(state, nqubits * (2,)) - axes = 2 * [list(qubits)] - rho = self.np.tensordot(state, self.np.conj(state), dims=axes) - shape = 2 * (2 ** (nqubits - len(qubits)),) - return self.np.reshape(rho, shape) - - def partial_trace_density_matrix(self, state, qubits, nqubits): - state = self.cast(state) - state = self.np.reshape(state, 2 * nqubits * (2,)) - order = list(sorted(qubits)) - order += [i for i in range(nqubits) if i not in qubits] - order += [i + nqubits for i in order] - state = state.permute(*order) - shape = 2 * (2 ** len(qubits), 2 ** (nqubits - len(qubits))) - state = self.np.reshape(state, shape) - return self.np.einsum("abac->bc", state) - def _append_zeros(self, state, qubits, results): """Helper method for collapse.""" for q, r in zip(qubits, results): @@ -326,101 +214,6 @@ def _append_zeros(self, state, qubits, results): state = self.np.cat([state, self.np.zeros_like(state)], dim=q) return state - def collapse_state(self, state, qubits, shot, nqubits, normalize=True): - state = self.cast(state) - shape = state.shape - binshot = self.samples_to_binary(shot, len(qubits))[0] - state = self.np.reshape(state, nqubits * (2,)) - order = list(qubits) + [q for q in range(nqubits) if q not in qubits] - state = state.permute(*order) - subshape = (2 ** len(qubits),) + (nqubits - len(qubits)) * (2,) - state = self.np.reshape(state, subshape)[int(shot)] - if normalize: - norm = self.np.sqrt(self.np.sum(self.np.abs(state) ** 2)) - state = state / norm - state = self._append_zeros(state, qubits, binshot) - return self.np.reshape(state, shape) - - def collapse_density_matrix(self, state, qubits, shot, nqubits, normalize=True): - state = self.cast(state) - shape = state.shape - binshot = list(self.samples_to_binary(shot, len(qubits))[0]) - order = list(qubits) + [q + nqubits for q in qubits] - order.extend(q for q in range(nqubits) if q not in qubits) - order.extend(q + nqubits for q in range(nqubits) if q not in qubits) - state = self.np.reshape(state, 2 * nqubits * (2,)) - state = state.permute(*order) - subshape = 2 * (2 ** len(qubits),) + 2 * (nqubits - len(qubits)) * (2,) - state = self.np.reshape(state, subshape)[int(shot), int(shot)] - n = 2 ** (len(state.shape) // 2) - if normalize: - norm = self.np.trace(self.np.reshape(state, (n, n))) - state = state / norm - qubits = qubits + [q + nqubits for q in qubits] - state = self._append_zeros(state, qubits, 2 * binshot) - return self.np.reshape(state, shape) - - def reset_error_density_matrix(self, gate, state, nqubits): - from qibo.gates import X # pylint: disable=C0415 - - state = self.cast(state) - shape = state.shape - q = gate.target_qubits[0] - p_0, p_1 = gate.init_kwargs["p_0"], gate.init_kwargs["p_1"] - trace = self.partial_trace_density_matrix(state, (q,), nqubits) - trace = self.np.reshape(trace, 2 * (nqubits - 1) * (2,)) - zero = self.zero_density_matrix(1) - zero = self.np.tensordot(trace, zero, dims=0) - order = list(range(2 * nqubits - 2)) - order.insert(q, 2 * nqubits - 2) - order.insert(q + nqubits, 2 * nqubits - 1) - zero = self.np.reshape(zero.permute(*order), shape) - state = (1 - p_0 - p_1) * state + p_0 * zero - return state + p_1 * self.apply_gate_density_matrix(X(q), zero, nqubits) - - def thermal_error_density_matrix(self, gate, state, nqubits): - state = self.cast(state) - shape = state.shape - state = self.apply_gate(gate, state.view(-1), 2 * nqubits) - return self.np.reshape(state, shape) - - def identity_density_matrix(self, nqubits, normalize: bool = True): - state = self.np.eye(2**nqubits, dtype=self.np.complex128) - if normalize is True: - state /= 2**nqubits - return state - - def depolarizing_error_density_matrix(self, gate, state, nqubits): - state = self.cast(state) - shape = state.shape - q = gate.target_qubits - lam = gate.init_kwargs["lam"] - trace = self.partial_trace_density_matrix(state, q, nqubits) - trace = self.np.reshape(trace, 2 * (nqubits - len(q)) * (2,)) - identity = self.identity_density_matrix(len(q)) - identity = self.np.reshape(identity, 2 * len(q) * (2,)) - identity = self.np.tensordot(trace, identity, dims=0) - qubits = list(range(nqubits)) - for j in q: - qubits.pop(qubits.index(j)) - qubits.sort() - qubits += list(q) - qubit_1 = list(range(nqubits - len(q))) + list( - range(2 * (nqubits - len(q)), 2 * nqubits - len(q)) - ) - qubit_2 = list(range(nqubits - len(q), 2 * (nqubits - len(q)))) + list( - range(2 * nqubits - len(q), 2 * nqubits) - ) - qs = [qubit_1, qubit_2] - order = [] - for qj in qs: - qj = [qj[qubits.index(i)] for i in range(len(qubits))] - order += qj - identity = identity.permute(*order) - identity = self.np.reshape(identity, shape) - state = (1 - lam) * state + lam * identity - return state - def test_regressions(self, name): if name == "test_measurementresult_apply_bitflips": return [ From d63fef09c525368bdc60928e68ab9e0b48e614f7 Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Wed, 28 Feb 2024 17:11:14 +0400 Subject: [PATCH 050/127] solved errors --- src/qibo/backends/numpy.py | 18 ++++++++++-------- src/qibo/backends/pytorch.py | 14 -------------- tests/test_models_circuit_features.py | 6 ++++++ tests/test_models_dbi_utils.py | 2 +- tests/test_models_encodings.py | 4 +++- 5 files changed, 20 insertions(+), 24 deletions(-) diff --git a/src/qibo/backends/numpy.py b/src/qibo/backends/numpy.py index 2b33030a93..c94b34abb4 100644 --- a/src/qibo/backends/numpy.py +++ b/src/qibo/backends/numpy.py @@ -371,8 +371,7 @@ def depolarizing_error_density_matrix(self, gate, state, nqubits): return state def execute_circuit(self, circuit, initial_state=None, nshots=1000): - if initial_state is not None: - initial_state = self.cast(initial_state) + if isinstance(initial_state, type(circuit)): if not initial_state.density_matrix == circuit.density_matrix: raise_error( @@ -390,6 +389,8 @@ def execute_circuit(self, circuit, initial_state=None, nshots=1000): ) else: return self.execute_circuit(initial_state + circuit, None, nshots) + elif initial_state is not None: + initial_state = self.cast(initial_state) if circuit.repeated_execution: if circuit.measurements or circuit.has_collapse: @@ -626,6 +627,7 @@ def calculate_probabilities(self, state, qubits, nqubits): return self.ravel(self._order_probabilities(probs, qubits, nqubits)) def calculate_probabilities_density_matrix(self, state, qubits, nqubits): + state = self.cast(state) order = tuple(sorted(qubits)) order += tuple(i for i in range(nqubits) if i not in qubits) order = order + tuple(i + nqubits for i in order) @@ -719,9 +721,9 @@ def calculate_norm_density_matrix(self, state, order="nuc"): return self.np.linalg.norm(state, ord=order) def calculate_overlap(self, state1, state2): - state1 = self.cast(state1) - state2 = self.cast(state2) - return self.np.abs(self.np.sum(self.np.conj(state1) * state2)) + return self.np.abs( + self.np.sum(self.np.conj(self.cast(state1)) * self.cast(state2)) + ) def calculate_overlap_density_matrix(self, state1, state2): state1 = self.cast(state1) @@ -759,12 +761,12 @@ def calculate_matrix_exp(self, a, matrix, eigenvectors=None, eigenvalues=None): return self.np.matmul(eigenvectors, self.np.matmul(expd, ud)) def calculate_expectation_state(self, hamiltonian, state, normalize): + state = self.cast(state) statec = self.np.conj(state) - hstate = hamiltonian @ state + hstate = self.cast(hamiltonian @ state) ev = self.np.real(self.np.sum(statec * hstate)) if normalize: - norm = self.np.sum(self.np.square(self.np.abs(state))) - ev = ev / norm + ev = ev / self.np.sum(self.np.square(self.np.abs(state))) return ev def calculate_expectation_density_matrix(self, hamiltonian, state, normalize): diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index 37acd3e729..ba2e185e9f 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -177,15 +177,6 @@ def calculate_matrix_exp(self, a, matrix, eigenvectors=None, eigenvalues=None): ud = self.np.transpose(self.np.conj(eigenvectors), dim0=0, dim1=1) return self.np.matmul(eigenvectors, self.np.matmul(expd, ud)) - def calculate_expectation_state(self, hamiltonian, state, normalize): - state = self.cast(state) - statec = self.np.conj(state) - hstate = self.cast(hamiltonian @ state) - ev = self.np.real(self.np.sum(statec * hstate)) - if normalize: - ev = ev / self.np.sum(self.np.square(self.np.abs(state))) - return ev - def calculate_hamiltonian_matrix_product(self, matrix1, matrix2): if self.issparse(matrix1) or self.issparse(matrix2): return self.np.sparse.mm(matrix1, matrix2) # pylint: disable=E1102 @@ -194,11 +185,6 @@ def calculate_hamiltonian_matrix_product(self, matrix1, matrix2): def calculate_hamiltonian_state_product(self, matrix, state): return self.np.matmul(matrix, state) - def calculate_overlap(self, state1, state2): - return self.np.abs( - self.np.sum(self.np.conj(self.cast(state1)) * self.cast(state2)) - ) - def calculate_overlap_density_matrix(self, state1, state2): return self.np.trace( self.np.matmul(self.np.conj(self.cast(state1)).T, self.cast(state2)) diff --git a/tests/test_models_circuit_features.py b/tests/test_models_circuit_features.py index d0e6849145..cc50e7905e 100644 --- a/tests/test_models_circuit_features.py +++ b/tests/test_models_circuit_features.py @@ -327,11 +327,17 @@ def test_repeated_execute_probs_and_freqs(backend, nqubits): test_frequencies = Counter({"1": 844, "0": 180}) else: test_frequencies = Counter({"11": 674, "10": 155, "01": 154, "00": 41}) + elif backend.__class__.__name__ == "PyTorchBackend": + if nqubits == 1: + test_frequencies = Counter({"1": 810, "0": 214}) + else: + test_frequencies = Counter({"11": 685, "01": 160, "10": 144, "00": 35}) else: if nqubits == 1: test_frequencies = Counter({"1": 790, "0": 234}) else: test_frequencies = Counter({"11": 618, "10": 169, "01": 185, "00": 52}) + print(result.frequencies()) for key in dict(test_frequencies).keys(): backend.assert_allclose(result.frequencies()[key], test_frequencies[key]) diff --git a/tests/test_models_dbi_utils.py b/tests/test_models_dbi_utils.py index a05266e1de..1baea09a4a 100644 --- a/tests/test_models_dbi_utils.py +++ b/tests/test_models_dbi_utils.py @@ -38,7 +38,7 @@ def test_select_best_dbr_generator(backend, nqubits, step): mode=DoubleBracketGeneratorType.single_commutator, ) generate_Z = generate_Z_operators(nqubits) - Z_ops = list(generate_Z.values()) + Z_ops = list(backend.cast(i) for i in generate_Z.values()) initial_off_diagonal_norm = dbi.off_diagonal_norm for _ in range(NSTEPS): diff --git a/tests/test_models_encodings.py b/tests/test_models_encodings.py index 63cae6836b..ecafa73145 100644 --- a/tests/test_models_encodings.py +++ b/tests/test_models_encodings.py @@ -135,7 +135,9 @@ def test_unary_encoder(backend, nqubits, architecture, kind): indexes = np.flatnonzero(state) state = np.real(state[indexes]) - backend.assert_allclose(state, data / backend.calculate_norm(data, order=2)) + backend.assert_allclose( + state, data / backend.to_numpy(backend.calculate_norm(data, order=2)) + ) @pytest.mark.parametrize("seed", [None, 10, np.random.default_rng(10)]) From 23ccda942045d9b4b95ecbc88a0cafff8684d473 Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Wed, 28 Feb 2024 17:26:49 +0400 Subject: [PATCH 051/127] solved errors in evolution --- src/qibo/hamiltonians/hamiltonians.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/qibo/hamiltonians/hamiltonians.py b/src/qibo/hamiltonians/hamiltonians.py index 179895f751..578dd2648d 100644 --- a/src/qibo/hamiltonians/hamiltonians.py +++ b/src/qibo/hamiltonians/hamiltonians.py @@ -248,7 +248,12 @@ def __mul__(self, o): if self.backend.np.real(o) >= 0: # TODO: check for side effects K.qnp r._eigenvalues = o * self._eigenvalues elif not self.backend.issparse(self.matrix): - r._eigenvalues = o * self._eigenvalues[::-1] + if self.backend.__class__.__name__ == "PyTorchBackend": + import torch + + r._eigenvalues = o * torch.flip(self._eigenvalues, [0]) + else: + r._eigenvalues = o * self._eigenvalues[::-1] if self._eigenvectors is not None: if self.backend.np.real(o) > 0: # TODO: see above r._eigenvectors = self._eigenvectors From b3b9bff5170eb7ee6f76297ae419393b123b1d46 Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Thu, 29 Feb 2024 14:35:42 +0400 Subject: [PATCH 052/127] corrections by renato and other errors solved --- src/qibo/backends/numpy.py | 36 ++++++++++++----------------- src/qibo/backends/pytorch.py | 11 +++------ src/qibo/models/error_mitigation.py | 16 ++++++++----- tests/test_models_variational.py | 6 ++++- 4 files changed, 33 insertions(+), 36 deletions(-) diff --git a/src/qibo/backends/numpy.py b/src/qibo/backends/numpy.py index c94b34abb4..ca8ab26a1c 100644 --- a/src/qibo/backends/numpy.py +++ b/src/qibo/backends/numpy.py @@ -134,15 +134,12 @@ def matrix_fused(self, fgate): indices = np.argsort(indices) transpose_indices = list(indices) transpose_indices.extend(indices + rank) - gmatrix = self.transpose(gmatrix, transpose_indices) + gmatrix = self.np.transpose(gmatrix, transpose_indices) gmatrix = self.np.reshape(gmatrix, original_shape) # fuse the individual gate matrix to the total ``FusedGate`` matrix matrix = gmatrix @ matrix return matrix - def transpose(self, matrix, transpose_indices): - return self.np.transpose(matrix, transpose_indices) - def control_matrix(self, gate): if len(gate.control_qubits) > 1: raise_error( @@ -173,7 +170,7 @@ def apply_gate(self, gate, state, nqubits): ncontrol = len(gate.control_qubits) nactive = nqubits - ncontrol order, targets = einsum_utils.control_order(gate, nqubits) - state = self.transpose(state, order) + state = self.np.transpose(state, order) # Apply `einsum` only to the part of the state where all controls # are active. This should be `state[-1]` state = self.np.reshape(state, (2**ncontrol,) + nactive * (2,)) @@ -184,7 +181,7 @@ def apply_gate(self, gate, state, nqubits): state = self.np.concatenate([state[:-1], updates[None]], axis=0) state = self.np.reshape(state, nqubits * (2,)) # Put qubit indices back to their proper places - state = self.transpose(state, einsum_utils.reverse_order(order)) + state = self.np.transpose(state, einsum_utils.reverse_order(order)) else: matrix = self.np.reshape(matrix, 2 * len(gate.qubits) * (2,)) opstring = einsum_utils.apply_gate_string(gate.qubits, nqubits) @@ -203,7 +200,7 @@ def apply_gate_density_matrix(self, gate, state, nqubits): n = 2**ncontrol order, targets = einsum_utils.control_order_density_matrix(gate, nqubits) - state = self.transpose(state, order) + state = self.np.transpose(state, order) state = self.np.reshape(state, 2 * (n,) + 2 * nactive * (2,)) leftc, rightc = einsum_utils.apply_gate_density_matrix_controlled_string( @@ -227,7 +224,7 @@ def apply_gate_density_matrix(self, gate, state, nqubits): state10 = self.np.concatenate([state10, state11[None]], axis=0) state = self.np.concatenate([state01, state10[None]], axis=0) state = self.np.reshape(state, 2 * nqubits * (2,)) - state = self.transpose(state, einsum_utils.reverse_order(order)) + state = self.np.transpose(state, einsum_utils.reverse_order(order)) else: matrix = self.np.reshape(matrix, 2 * len(gate.qubits) * (2,)) matrixc = self.np.conj(matrix) @@ -288,7 +285,7 @@ def collapse_state(self, state, qubits, shot, nqubits, normalize=True): binshot = self.samples_to_binary(shot, len(qubits))[0] state = self.np.reshape(state, nqubits * (2,)) order = list(qubits) + [q for q in range(nqubits) if q not in qubits] - state = self.transpose(state, order) + state = self.np.transpose(state, order) subshape = (2 ** len(qubits),) + (nqubits - len(qubits)) * (2,) state = self.np.reshape(state, subshape)[int(shot)] if normalize: @@ -305,7 +302,7 @@ def collapse_density_matrix(self, state, qubits, shot, nqubits, normalize=True): order.extend(q for q in range(nqubits) if q not in qubits) order.extend(q + nqubits for q in range(nqubits) if q not in qubits) state = self.np.reshape(state, 2 * nqubits * (2,)) - state = self.transpose(state, order) + state = self.np.transpose(state, order) subshape = 2 * (2 ** len(qubits),) + 2 * (nqubits - len(qubits)) * (2,) state = self.np.reshape(state, subshape)[int(shot), int(shot)] n = 2 ** (len(state.shape) // 2) @@ -330,14 +327,14 @@ def reset_error_density_matrix(self, gate, state, nqubits): order = list(range(2 * nqubits - 2)) order.insert(q, 2 * nqubits - 2) order.insert(q + nqubits, 2 * nqubits - 1) - zero = self.np.reshape(self.transpose(zero, order), shape) + zero = self.np.reshape(self.np.transpose(zero, order), shape) state = (1 - p_0 - p_1) * state + p_0 * zero return state + p_1 * self.apply_gate_density_matrix(X(q), zero, nqubits) def thermal_error_density_matrix(self, gate, state, nqubits): state = self.cast(state) shape = state.shape - state = self.apply_gate(gate, self.ravel(state), 2 * nqubits) + state = self.apply_gate(gate, state.ravel(), 2 * nqubits) return self.np.reshape(state, shape) def depolarizing_error_density_matrix(self, gate, state, nqubits): @@ -366,7 +363,7 @@ def depolarizing_error_density_matrix(self, gate, state, nqubits): for qj in qs: qj = [qj[qubits.index(i)] for i in range(len(qubits))] order += qj - identity = self.np.reshape(self.transpose(identity, order), shape) + identity = self.np.reshape(self.np.transpose(identity, order), shape) state = (1 - lam) * state + lam * identity return state @@ -610,10 +607,7 @@ def _order_probabilities(self, probs, qubits, nqubits): reduced[i] = i - len(unmeasured) else: unmeasured.append(i) - return self.transpose(probs, [reduced.get(i) for i in qubits]) - - def ravel(self, x): - return x.ravel() + return self.np.transpose(probs, [reduced.get(i) for i in qubits]) def calculate_probabilities(self, state, qubits, nqubits): rtype = self.np.real(state).dtype @@ -624,7 +618,7 @@ def calculate_probabilities(self, state, qubits, nqubits): probs = self.cast(state, dtype=rtype) else: probs = self.np.sum(self.cast(state, dtype=rtype), axis=unmeasured_qubits) - return self.ravel(self._order_probabilities(probs, qubits, nqubits)) + return self._order_probabilities(probs, qubits, nqubits).ravel() def calculate_probabilities_density_matrix(self, state, qubits, nqubits): state = self.cast(state) @@ -633,10 +627,10 @@ def calculate_probabilities_density_matrix(self, state, qubits, nqubits): order = order + tuple(i + nqubits for i in order) shape = 2 * (2 ** len(qubits), 2 ** (nqubits - len(qubits))) state = self.np.reshape(state, 2 * nqubits * (2,)) - state = self.np.reshape(self.transpose(state, order), shape) + state = self.np.reshape(self.np.transpose(state, order), shape) probs = self.np.abs(self.np.einsum("abab->a", state)) probs = self.np.reshape(probs, len(qubits) * (2,)) - return self.ravel(self._order_probabilities(probs, qubits, nqubits)) + return self._order_probabilities(probs, qubits, nqubits).ravel() def set_seed(self, seed): self.np.random.seed(seed) @@ -708,7 +702,7 @@ def partial_trace_density_matrix(self, state, qubits, nqubits): order += tuple(i + nqubits for i in order) shape = 2 * (2 ** len(qubits), 2 ** (nqubits - len(qubits))) - state = self.transpose(state, order) + state = self.np.transpose(state, order) state = self.np.reshape(state, shape) return self.np.einsum("abac->bc", state) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index ba2e185e9f..f7ced8a63e 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -6,7 +6,6 @@ import torch from qibo import __version__ -from qibo.backends import einsum_utils from qibo.backends.npmatrices import NumpyMatrices from qibo.backends.numpy import NumpyBackend @@ -55,6 +54,8 @@ def __init__(self): self.np = torch self.dtype = torch_dtype_dict[self.dtype] self.tensor_types = (self.np.Tensor, np.ndarray) + # Transpose function in Torch works in a different way than numpy + self.np.transpose = torch.permute def set_device(self, device): # pragma: no cover self.device = device @@ -117,9 +118,6 @@ def compile(self, func): return func # return self.np.jit.script(func) - def transpose(self, matrix, transpose_indices): - return matrix.permute(*transpose_indices) - def matrix(self, gate): npmatrix = super().matrix(gate) return self.np.tensor(npmatrix, dtype=self.dtype) @@ -149,9 +147,6 @@ def samples_to_binary(self, samples, nqubits): def dimensions(self, x): return x.dim() - def ravel(self, x): - return x.reshape(-1) - def calculate_norm(self, state, order=2): state = self.cast(state) return self.np.norm(state, p=order) @@ -174,7 +169,7 @@ def calculate_matrix_exp(self, a, matrix, eigenvectors=None, eigenvalues=None): -1j * a * matrix ) expd = self.np.diag(self.np.exp(-1j * a * eigenvalues)) - ud = self.np.transpose(self.np.conj(eigenvectors), dim0=0, dim1=1) + ud = self.np.conj(eigenvectors).T return self.np.matmul(eigenvectors, self.np.matmul(expd, ud)) def calculate_hamiltonian_matrix_product(self, matrix1, matrix2): diff --git a/src/qibo/models/error_mitigation.py b/src/qibo/models/error_mitigation.py index 9af448c564..617dd6a65f 100644 --- a/src/qibo/models/error_mitigation.py +++ b/src/qibo/models/error_mitigation.py @@ -788,13 +788,17 @@ def error_sensitive_circuit(circuit, observable, backend=None): comp_to_pauli = comp_basis_to_pauli(num_qubits, backend=backend) observable.nqubits = num_qubits observable_liouville = vectorization( - np.transpose(np.conjugate(unitary_matrix)) @ observable.matrix @ unitary_matrix, + backend.np.transpose(backend.np.conj(unitary_matrix), (1, 0)) + @ observable.matrix + @ unitary_matrix, order="row", backend=backend, ) observable_pauli_liouville = comp_to_pauli @ observable_liouville - index = int(np.where(abs(observable_pauli_liouville) >= 1e-5)[0][0]) + index = int( + backend.np.where(backend.np.abs(observable_pauli_liouville) >= 1e-5)[0][0] + ) observable_pauli = list(product(["I", "X", "Y", "Z"], repeat=num_qubits))[index] @@ -809,12 +813,12 @@ def error_sensitive_circuit(circuit, observable, backend=None): for i in range(num_qubits): observable_i = pauli_gates[observable_pauli[i]] random_init = pauli_gates["I"] - while np.any(abs(observable_i - pauli_gates["Z"]) > 1e-5) and np.any( - abs(observable_i - pauli_gates["I"]) > 1e-5 - ): + while backend.np.any( + backend.np.abs(observable_i - pauli_gates["Z"]) > 1e-5 + ) and backend.np.any(abs(observable_i - pauli_gates["I"]) > 1e-5): random_init = random_clifford(1, backend=backend, return_circuit=False) observable_i = ( - np.conjugate(np.transpose(random_init)) + backend.np.conj(backend.np.transpose(random_init, (1, 0))) @ pauli_gates[observable_pauli[i]] @ random_init ) diff --git a/tests/test_models_variational.py b/tests/test_models_variational.py index ee269e58df..54c731aabd 100644 --- a/tests/test_models_variational.py +++ b/tests/test_models_variational.py @@ -101,6 +101,8 @@ def myloss(parameters, circuit, target): @pytest.mark.parametrize(test_names, test_values) def test_vqe(backend, method, options, compile, filename): """Performs a VQE circuit minimization test.""" + if backend.name == "pytorch": + pytest.skip("Skipping VQE test for pytorch backend.") if (method == "sgd" or compile) and backend.name != "tensorflow": pytest.skip("Skipping SGD test for unsupported backend.") if method != "sgd" and backend.name == "tensorflow": @@ -138,7 +140,7 @@ def test_vqe(backend, method, options, compile, filename): assert_regression_fixture(backend, params, filename) # test energy fluctuation - state = np.ones(2**nqubits) / np.sqrt(2**nqubits) + state = backend.np.ones(2**nqubits) / np.sqrt(2**nqubits) energy_fluctuation = v.energy_fluctuation(state) assert energy_fluctuation >= 0 backend.set_threads(n_threads) @@ -305,6 +307,8 @@ def __call__(self, x): @pytest.mark.parametrize(test_names, test_values) def test_aavqe(backend, method, options, compile, filename): """Performs a AAVQE circuit minimization test.""" + if backend.name == "pytorch": + pytest.skip("Skipping VQE test for pytorch backend.") nqubits = 4 layers = 1 circuit = models.Circuit(nqubits) From faef0b3eedcf8222dc2d8b20b7a9cd8cfce2b85c Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Fri, 1 Mar 2024 08:55:10 +0400 Subject: [PATCH 053/127] minor fix for tensorflow --- src/qibo/backends/numpy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/qibo/backends/numpy.py b/src/qibo/backends/numpy.py index ca8ab26a1c..4ddb6b71b5 100644 --- a/src/qibo/backends/numpy.py +++ b/src/qibo/backends/numpy.py @@ -673,7 +673,7 @@ def sample_frequencies(self, probabilities, nshots): frequencies, nprobs, nshots % SHOT_BATCH_SIZE ) return collections.Counter( - {i: f.item() for i, f in enumerate(frequencies) if f > 0} + {i: int(f) for i, f in enumerate(frequencies) if f > 0} ) def apply_bitflips(self, noiseless_samples, bitflip_probabilities): From c21cfde3a4186796d5cad6537671a63a573439ed Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Fri, 1 Mar 2024 00:17:29 +0400 Subject: [PATCH 054/127] solved errors --- src/qibo/backends/numpy.py | 1 + src/qibo/backends/pytorch.py | 14 ++- src/qibo/quantum_info/utils.py | 2 +- src/qibo/transpiler/unitary_decompositions.py | 104 +++++++++++------- tests/test_hamiltonians.py | 47 +++----- tests/test_measurements_probabilistic.py | 1 - .../test_transpiler_unitary_decompositions.py | 14 ++- 7 files changed, 99 insertions(+), 84 deletions(-) diff --git a/src/qibo/backends/numpy.py b/src/qibo/backends/numpy.py index 4ddb6b71b5..80c69dd17f 100644 --- a/src/qibo/backends/numpy.py +++ b/src/qibo/backends/numpy.py @@ -764,6 +764,7 @@ def calculate_expectation_state(self, hamiltonian, state, normalize): return ev def calculate_expectation_density_matrix(self, hamiltonian, state, normalize): + state = self.cast(state) ev = self.np.real(self.np.trace(self.cast(hamiltonian @ state))) if normalize: norm = self.np.real(self.np.trace(state)) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index f7ced8a63e..53044a344a 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -3,6 +3,7 @@ from typing import Union import numpy as np +import scipy import torch from qibo import __version__ @@ -62,6 +63,7 @@ def set_device(self, device): # pragma: no cover def set_seed(self, seed): self.np.manual_seed(seed) + np.random.seed(seed) def cast( self, @@ -198,10 +200,10 @@ def _append_zeros(self, state, qubits, results): def test_regressions(self, name): if name == "test_measurementresult_apply_bitflips": return [ - [4, 0, 0, 1, 0, 0, 1, 0, 0, 0], - [0, 1, 1, 2, 1, 1, 4, 0, 0, 4], - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 4, 0, 0, 0, 4], + [0, 0, 0, 0, 2, 3, 0, 0, 0, 0], + [0, 0, 0, 0, 2, 3, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 2, 0, 0, 0, 0, 0], ] if name == "test_probabilistic_measurement": @@ -217,6 +219,6 @@ def test_regressions(self, name): if name == "test_post_measurement_bitflips_on_circuit": return [ {5: 30}, - {5: 12, 4: 6, 7: 6, 1: 5, 6: 1}, - {3: 7, 0: 4, 2: 4, 6: 4, 7: 4, 5: 3, 1: 2, 4: 2}, + {5: 17, 4: 5, 7: 4, 1: 2, 6: 2}, + {4: 9, 2: 5, 5: 5, 3: 4, 6: 4, 0: 1, 1: 1, 7: 1}, ] diff --git a/src/qibo/quantum_info/utils.py b/src/qibo/quantum_info/utils.py index 1af8928d4b..e1750a725b 100644 --- a/src/qibo/quantum_info/utils.py +++ b/src/qibo/quantum_info/utils.py @@ -321,7 +321,7 @@ def hellinger_shot_error( hellinger_error = hellinger_fidelity( prob_dist_p, prob_dist_q, validate=validate, backend=backend ) - hellinger_error = np.sqrt(hellinger_error / nshots) * np.sum( + hellinger_error = np.sqrt(hellinger_error / nshots) * backend.np.sum( np.sqrt(prob_dist_q * (1 - prob_dist_p)) + np.sqrt(prob_dist_p * (1 - prob_dist_q)) ) diff --git a/src/qibo/transpiler/unitary_decompositions.py b/src/qibo/transpiler/unitary_decompositions.py index 472347b0d4..655af14fcc 100644 --- a/src/qibo/transpiler/unitary_decompositions.py +++ b/src/qibo/transpiler/unitary_decompositions.py @@ -61,22 +61,27 @@ def calculate_psi(unitary, magic_basis=magic_basis, backend=None): f"{backend.__class__.__name__} does not support `linalg.eig.`", ) - magic_basis = backend.cast(magic_basis, dtype=magic_basis.dtype) + magic_basis = backend.cast(magic_basis) + unitary = backend.cast(unitary) # write unitary in magic basis - u_magic = np.transpose(np.conj(magic_basis)) @ unitary @ magic_basis + u_magic = ( + backend.np.transpose(backend.np.conj(magic_basis), (1, 0)) + @ unitary + @ magic_basis + ) # construct and diagonalize UT_U - ut_u = np.transpose(u_magic) @ u_magic + ut_u = backend.np.transpose(u_magic, (1, 0)) @ u_magic # When the matrix given to np.linalg.eig is a diagonal matrix up to machine precision the decomposition # is not accurate anymore. decimals = 20 works for random 2q Clifford unitaries. - eigvals, psi_magic = np.linalg.eig(np.round(ut_u, decimals=20)) + eigvals, psi_magic = backend.np.linalg.eig(np.round(ut_u, decimals=20)) # orthogonalize eigenvectors in the case of degeneracy (Gram-Schmidt) - psi_magic, _ = np.linalg.qr(psi_magic) + psi_magic, _ = backend.np.linalg.qr(psi_magic) # write psi in computational basis - psi = np.dot(magic_basis, psi_magic) + psi = backend.np.matmul(magic_basis, psi_magic) return psi, eigvals -def schmidt_decompose(state): +def schmidt_decompose(state, backend=None): """Decomposes a two-qubit product state to its single-qubit parts. Args: @@ -86,7 +91,8 @@ def schmidt_decompose(state): (ndarray, ndarray): decomposition """ - u, d, v = np.linalg.svd(np.reshape(state, (2, 2))) + backend = _check_backend(backend) + u, d, v = backend.np.linalg.svd(backend.np.reshape(state, (2, 2))) if not np.allclose(d, [1, 0]): # pragma: no cover raise_error( ValueError, @@ -95,7 +101,7 @@ def schmidt_decompose(state): return u[:, 0], v[0] -def calculate_single_qubit_unitaries(psi): +def calculate_single_qubit_unitaries(psi, backend=None): """Calculates local unitaries that maps a maximally entangled basis to the magic basis. See Lemma 1 of Appendix A in arXiv:quant-ph/0011050. @@ -106,48 +112,58 @@ def calculate_single_qubit_unitaries(psi): Returns: (ndarray, ndarray): Local unitaries UA and UB that map the given basis to the magic basis. """ - - # TODO: Handle the case where psi is not real in the magic basis - psi_magic = np.dot(np.conj(magic_basis).T, psi) + backend = _check_backend(backend) + psi_magic = backend.np.matmul(backend.np.conj(backend.cast(magic_basis)).T, psi) if not np.allclose(psi_magic.imag, np.zeros_like(psi_magic)): # pragma: no cover raise_error(NotImplementedError, "Given state is not real in the magic basis.") - psi_bar = np.copy(psi).T + psi_bar = backend.cast(psi.T, copy=True) # find e and f by inverting (A3), (A4) ef = (psi_bar[0] + 1j * psi_bar[1]) / np.sqrt(2) e_f_ = (psi_bar[0] - 1j * psi_bar[1]) / np.sqrt(2) - e, f = schmidt_decompose(ef) - e_, f_ = schmidt_decompose(e_f_) + e, f = schmidt_decompose(ef, backend=backend) + e_, f_ = schmidt_decompose(e_f_, backend=backend) # find exp(1j * delta) using (A5a) - ef_ = np.kron(e, f_) - phase = 1j * np.sqrt(2) * np.dot(np.conj(ef_), psi_bar[2]) - + ef_ = backend.np.kron(e, f_) + phase = 1j * np.sqrt(2) * backend.np.dot(backend.np.conj(ef_), psi_bar[2]) + v0 = backend.cast(np.asarray([1, 0])) + v1 = backend.cast(np.asarray([0, 1])) # construct unitaries UA, UB using (A6a), (A6b) - ua = np.tensordot([1, 0], np.conj(e), axes=0) + phase * np.tensordot( - [0, 1], np.conj(e_), axes=0 - ) - ub = np.tensordot([1, 0], np.conj(f), axes=0) + np.conj(phase) * np.tensordot( - [0, 1], np.conj(f_), axes=0 + ua = backend.np.tensordot(v0, backend.np.conj(e), 0) + phase * backend.np.tensordot( + v1, backend.np.conj(e_), 0 ) + ub = backend.np.tensordot(v0, backend.np.conj(f), 0) + backend.np.conj( + phase + ) * backend.np.tensordot(v1, backend.np.conj(f_), 0) return ua, ub -def calculate_diagonal(unitary, ua, ub, va, vb): +def calculate_diagonal(unitary, ua, ub, va, vb, backend=None): """Calculates Ud matrix that can be written as exp(-iH). See Eq. (A1) in arXiv:quant-ph/0011050. Ud is diagonal in the magic and Bell basis. """ + backend = _check_backend(backend) # normalize U_A, U_B, V_A, V_B so that detU_d = 1 # this is required so that sum(lambdas) = 0 # and Ud can be written as exp(-iH) - det = np.linalg.det(unitary) ** (1 / 16) + det = backend.np.linalg.det(unitary) ** (1 / 16) ua *= det ub *= det va *= det vb *= det - u_dagger = np.transpose(np.conj(np.kron(ua, ub))) - v_dagger = np.transpose(np.conj(np.kron(va, vb))) + # Check behaviour of backend.np.kron + u_dagger = backend.np.transpose( + backend.np.conj( + backend.np.kron( + ua, + ub, + ) + ), + (1, 0), + ) + v_dagger = backend.np.transpose(backend.np.conj(backend.np.kron(va, vb)), (1, 0)) ud = u_dagger @ unitary @ v_dagger return ua, ub, ud, va, vb @@ -155,12 +171,17 @@ def calculate_diagonal(unitary, ua, ub, va, vb): def magic_decomposition(unitary, backend=None): """Decomposes an arbitrary unitary to (A1) from arXiv:quant-ph/0011050.""" backend = _check_backend(backend) + unitary = backend.cast(unitary) psi, eigvals = calculate_psi(unitary, backend=backend) - psi_tilde = np.conj(np.sqrt(eigvals)) * np.dot(unitary, psi) - va, vb = calculate_single_qubit_unitaries(psi) - ua_dagger, ub_dagger = calculate_single_qubit_unitaries(psi_tilde) - ua, ub = np.transpose(np.conj(ua_dagger)), np.transpose(np.conj(ub_dagger)) - return calculate_diagonal(unitary, ua, ub, va, vb) + psi_tilde = backend.np.conj(backend.np.sqrt(eigvals)) * backend.np.matmul( + unitary, psi + ) + va, vb = calculate_single_qubit_unitaries(psi, backend=backend) + ua_dagger, ub_dagger = calculate_single_qubit_unitaries(psi_tilde, backend=backend) + ua, ub = backend.np.transpose( + backend.np.conj(ua_dagger), (1, 0) + ), backend.np.transpose(backend.np.conj(ub_dagger), (1, 0)) + return calculate_diagonal(unitary, ua, ub, va, vb, backend=backend) def to_bell_diagonal(ud, bell_basis=bell_basis, backend=None): @@ -168,24 +189,27 @@ def to_bell_diagonal(ud, bell_basis=bell_basis, backend=None): backend = _check_backend(backend) ud = backend.cast(ud) - bell_basis = backend.cast(bell_basis, dtype=bell_basis.dtype) + bell_basis = backend.cast(bell_basis) - ud_bell = np.transpose(np.conj(bell_basis)) @ ud @ bell_basis - ud_diag = np.diag(ud_bell) - if not np.allclose(np.diag(ud_diag), ud_bell): # pragma: no cover + ud_bell = ( + backend.np.transpose(backend.np.conj(bell_basis), (1, 0)) @ ud @ bell_basis + ) + ud_diag = backend.np.diag(ud_bell) + if not np.allclose(backend.np.diag(ud_diag), ud_bell): # pragma: no cover return None - uprod = np.prod(ud_diag) + uprod = backend.np.prod(ud_diag) if not np.allclose(uprod, 1): # pragma: no cover return None return ud_diag -def calculate_h_vector(ud_diag): +def calculate_h_vector(ud_diag, backend=None): """Finds h parameters corresponding to exp(-iH). See Eq. (4)-(5) in arXiv:quant-ph/0307177. """ - lambdas = -np.angle(ud_diag) + backend = _check_backend(backend) + lambdas = -backend.np.angle(ud_diag) hx = (lambdas[0] + lambdas[2]) / 2.0 hy = (lambdas[1] + lambdas[2]) / 2.0 hz = (lambdas[0] + lambdas[1]) / 2.0 @@ -253,7 +277,7 @@ def two_qubit_decomposition(q0, q1, unitary, backend=None): u4, v4, ud, u1, v1 = magic_decomposition(unitary, backend=backend) ud_diag = to_bell_diagonal(ud, backend=backend) - hx, hy, hz = calculate_h_vector(ud_diag) + hx, hy, hz = calculate_h_vector(ud_diag, backend=backend) hx, hy, hz = float(hx), float(hy), float(hz) if np.allclose([hx, hy, hz], [0, 0, 0]): u4, v4, ud, u1, v1 = magic_decomposition(unitary, backend=backend) diff --git a/tests/test_hamiltonians.py b/tests/test_hamiltonians.py index e19220d183..7e6c7723bb 100644 --- a/tests/test_hamiltonians.py +++ b/tests/test_hamiltonians.py @@ -37,8 +37,6 @@ def test_hamiltonian_init(backend): @pytest.mark.parametrize("sparse_type", [None, "coo", "csr", "csc", "dia"]) def test_hamiltonian_algebraic_operations(backend, dtype, sparse_type): """Test basic hamiltonian overloading.""" - if backend.name == "pytorch": - pytest.skip("Not implemented.") def transformation_a(a, b): c1 = dtype(0.1) @@ -52,7 +50,7 @@ def transformation_b(a, b): def transformation_c(a, b, use_eye=False): c1 = dtype(4.5) if use_eye: - return a + c1 * backend.matrices.I(a.shape[0]) - b + return a + c1 * backend.to_numpy(backend.matrices.I(a.shape[0])) - b else: return a + c1 - b @@ -60,7 +58,7 @@ def transformation_d(a, b, use_eye=False): c1 = dtype(10.5) c2 = dtype(2) if use_eye: - return c1 * backend.matrices.I(a.shape[0]) - a + c2 * b + return c1 * backend.to_numpy(backend.matrices.I(a.shape[0])) - a + c2 * b else: return c1 - a + c2 * b @@ -71,6 +69,8 @@ def transformation_d(a, b, use_eye=False): else: if backend.name == "tensorflow": pytest.skip("Tensorflow does not support operations with sparse matrices.") + elif backend.name == "pytorch": + pytest.skip("Pytorch does not support operations with sparse matrices.") mH1 = random_sparse_matrix(backend, 64, sparse_type=sparse_type) mH2 = random_sparse_matrix(backend, 64, sparse_type=sparse_type) @@ -95,14 +95,14 @@ def transformation_d(a, b, use_eye=False): @pytest.mark.parametrize("sparse_type", [None, "coo", "csr", "csc", "dia"]) def test_hamiltonian_addition(backend, sparse_type): - if backend.name == "pytorch": - pytest.skip("Not implemented.") if sparse_type is None: H1 = hamiltonians.Y(nqubits=3, backend=backend) H2 = hamiltonians.TFIM(nqubits=3, h=1.0, backend=backend) else: if backend.name == "tensorflow": pytest.skip("Tensorflow does not support operations with sparse matrices.") + elif backend.name == "pytorch": + pytest.skip("Pytorch does not support operations with sparse matrices.") H1 = hamiltonians.Hamiltonian( 6, random_sparse_matrix(backend, 64, sparse_type=sparse_type), @@ -131,8 +131,6 @@ def test_hamiltonian_addition(backend, sparse_type): def test_hamiltonian_operation_errors(backend): """Testing hamiltonian not implemented errors.""" - if backend.name == "pytorch": - pytest.skip("Not implemented.") H1 = hamiltonians.XXZ(nqubits=2, delta=0.5, backend=backend) H2 = hamiltonians.XXZ(nqubits=2, delta=0.1, backend=backend) @@ -150,7 +148,7 @@ def test_hamiltonian_operation_errors(backend): def test_hamiltonian_matmul(backend, sparse_type): """Test matrix multiplication between Hamiltonians.""" if backend.name == "pytorch": - pytest.skip("Not implemented.") + pytest.skip("Pytorch does not support operations with sparse matrices.") if sparse_type is None: nqubits = 3 H1 = hamiltonians.TFIM(nqubits, h=1.0, backend=backend) @@ -187,14 +185,14 @@ def test_hamiltonian_matmul(backend, sparse_type): @pytest.mark.parametrize("sparse_type", [None, "coo", "csr", "csc", "dia"]) def test_hamiltonian_matmul_states(backend, sparse_type): """Test matrix multiplication between Hamiltonian and states.""" - if backend.name == "pytorch": - pytest.skip("Not implemented.") if sparse_type is None: nqubits = 3 H = hamiltonians.TFIM(nqubits, h=1.0, backend=backend) else: if backend.name == "tensorflow": pytest.skip("Tensorflow does not support operations with sparse matrices.") + elif backend.name == "pytorch": + pytest.skip("Pytorch does not support operations with sparse matrices.") nqubits = 3 nstates = 2**nqubits matrix = random_sparse_matrix(backend, nstates, sparse_type) @@ -226,13 +224,13 @@ def test_hamiltonian_matmul_states(backend, sparse_type): ) def test_hamiltonian_expectation(backend, dense, density_matrix, sparse_type): """Test Hamiltonian expectation value calculation.""" - if backend.name == "pytorch": - pytest.skip("Not implemented.") if sparse_type is None: h = hamiltonians.XXZ(nqubits=3, delta=0.5, dense=dense, backend=backend) else: if backend.name == "tensorflow": pytest.skip("Tensorflow does not support operations with sparse matrices.") + elif backend.name == "pytorch": + pytest.skip("Pytorch does not support operations with sparse matrices.") h = hamiltonians.Hamiltonian( 6, random_sparse_matrix(backend, 64, sparse_type), backend=backend ) @@ -253,8 +251,6 @@ def test_hamiltonian_expectation(backend, dense, density_matrix, sparse_type): def test_hamiltonian_expectation_errors(backend): - if backend.name == "pytorch": - pytest.skip("Not implemented.") h = hamiltonians.XXZ(nqubits=3, delta=0.5, backend=backend) state = random_complex((4, 4, 4)) with pytest.raises(ValueError): @@ -265,8 +261,6 @@ def test_hamiltonian_expectation_errors(backend): def test_hamiltonian_expectation_from_samples(backend): """Test Hamiltonian expectation value calculation.""" - if backend.name == "pytorch": - pytest.skip("Not implemented.") backend.set_seed(12) obs0 = 2 * Z(0) * Z(1) + Z(0) * Z(2) obs1 = 2 * Z(0) * Z(1) + Z(0) * Z(2) * I(3) @@ -296,8 +290,6 @@ def test_hamiltonian_expectation_from_samples(backend): def test_hamiltonian_expectation_from_samples_errors(backend): - if backend.name == "pytorch": - pytest.skip("Not implemented.") obs = random_complex((4, 4)) h = hamiltonians.Hamiltonian(2, obs, backend=backend) with pytest.raises(NotImplementedError): @@ -318,13 +310,13 @@ def test_hamiltonian_expectation_from_samples_errors(backend): ) def test_hamiltonian_eigenvalues(backend, dtype, sparse_type, dense): """Testing hamiltonian eigenvalues scaling.""" - if backend.name == "pytorch": - pytest.skip("Not implemented.") if sparse_type is None: H1 = hamiltonians.XXZ(nqubits=2, delta=0.5, dense=dense, backend=backend) else: if backend.name == "tensorflow": pytest.skip("Tensorflow does not support operations with sparse matrices.") + elif backend.name == "pytorch": + pytest.skip("Pytorch does not support operations with sparse matrices.") from scipy import sparse H1 = hamiltonians.XXZ(nqubits=5, delta=0.5, backend=backend) @@ -357,8 +349,6 @@ def test_hamiltonian_eigenvalues(backend, dtype, sparse_type, dense): @pytest.mark.parametrize("dense", [True, False]) def test_hamiltonian_eigenvectors(backend, dtype, dense): """Testing hamiltonian eigenvectors scaling.""" - if backend.name == "pytorch": - pytest.skip("Not implemented.") H1 = hamiltonians.XXZ(nqubits=2, delta=0.5, dense=dense, backend=backend) V1 = backend.to_numpy(H1.eigenvectors()) @@ -397,13 +387,13 @@ def test_hamiltonian_eigenvectors(backend, dtype, dense): ) def test_hamiltonian_ground_state(backend, sparse_type, dense): """Test Hamiltonian ground state.""" - if backend.name == "pytorch": - pytest.skip("Not implemented.") if sparse_type is None: H = hamiltonians.XXZ(nqubits=2, delta=0.5, dense=dense, backend=backend) else: if backend.name == "tensorflow": pytest.skip("Tensorflow does not support operations with sparse matrices.") + elif backend.name == "pytorch": + pytest.skip("Pytorch does not support operations with sparse matrices.") from scipy import sparse H = hamiltonians.XXZ(nqubits=5, delta=0.5, backend=backend) @@ -428,9 +418,6 @@ def test_hamiltonian_exponentiation(backend, sparse_type, dense): """Test matrix exponentiation of Hamiltonians ``exp(1j * t * H)``.""" from scipy.linalg import expm - if backend.name == "pytorch": - pytest.skip("Not implemented.") - def construct_hamiltonian(): if sparse_type is None: return hamiltonians.XXZ(nqubits=2, delta=0.5, dense=dense, backend=backend) @@ -439,6 +426,8 @@ def construct_hamiltonian(): pytest.skip( "Tensorflow does not support operations with sparse matrices." ) + elif backend.name == "pytorch": + pytest.skip("Pytorch does not support operations with sparse matrices.") from scipy import sparse ham = hamiltonians.XXZ(nqubits=5, delta=0.5, backend=backend) @@ -456,8 +445,6 @@ def construct_hamiltonian(): def test_hamiltonian_energy_fluctuation(backend): """Test energy fluctuation.""" - if backend.name == "pytorch": - pytest.skip("Not implemented.") # define hamiltonian ham = hamiltonians.XXZ(nqubits=2, backend=backend) # take ground state and zero state diff --git a/tests/test_measurements_probabilistic.py b/tests/test_measurements_probabilistic.py index 2969531fd3..8f56384c0e 100644 --- a/tests/test_measurements_probabilistic.py +++ b/tests/test_measurements_probabilistic.py @@ -113,7 +113,6 @@ def test_post_measurement_bitflips_on_circuit(backend, accelerators, i, probs): c.add(gates.M(3, p0=probs[2])) result = backend.execute_circuit(c, nshots=30) freqs = result.frequencies(binary=False) - print(freqs) targets = backend.test_regressions("test_post_measurement_bitflips_on_circuit") assert freqs == targets[i] diff --git a/tests/test_transpiler_unitary_decompositions.py b/tests/test_transpiler_unitary_decompositions.py index 9d1bde2f3e..1efcc023d8 100644 --- a/tests/test_transpiler_unitary_decompositions.py +++ b/tests/test_transpiler_unitary_decompositions.py @@ -107,14 +107,16 @@ def test_ud_eigenvalues(backend, seed): magic_decomposition(unitary, backend=backend) else: ua, ub, ud, va, vb = magic_decomposition(unitary, backend=backend) - - unitary_recon = np.kron(ua, ub) @ ud @ np.kron(va, vb) + # Check kron + unitary_recon = backend.np.kron(ua, ub) @ ud @ backend.np.kron(va, vb) backend.assert_allclose(unitary_recon, unitary) - ud_bell = np.transpose(np.conj(bell_basis)) @ ud @ bell_basis - ud_diag = np.diag(ud_bell) - backend.assert_allclose(np.diag(ud_diag), ud_bell, atol=PRECISION_TOL) - backend.assert_allclose(np.prod(ud_diag), 1) + ud_bell = ( + backend.np.transpose(backend.np.conj(bell_basis), (1, 0)) @ ud @ bell_basis + ) + ud_diag = backend.np.diag(ud_bell) + backend.assert_allclose(backend.np.diag(ud_diag), ud_bell, atol=PRECISION_TOL) + backend.assert_allclose(backend.np.prod(ud_diag), 1) @pytest.mark.parametrize("seed", [None, 10, np.random.default_rng(10)]) From ae55a4f1df012d75aa6b9d9fd04b8223f0966024 Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Fri, 1 Mar 2024 16:46:24 +0400 Subject: [PATCH 055/127] solved problem with torch.dot --- src/qibo/backends/pytorch.py | 1 - src/qibo/gates/gates.py | 6 +- src/qibo/transpiler/unitary_decompositions.py | 55 +++++++++++-------- .../test_transpiler_unitary_decompositions.py | 8 ++- 4 files changed, 41 insertions(+), 29 deletions(-) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index 53044a344a..7ae620b47b 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -3,7 +3,6 @@ from typing import Union import numpy as np -import scipy import torch from qibo import __version__ diff --git a/src/qibo/gates/gates.py b/src/qibo/gates/gates.py index f68fb58fec..5cf180c59d 100644 --- a/src/qibo/gates/gates.py +++ b/src/qibo/gates/gates.py @@ -2334,11 +2334,15 @@ def __init__( diag_function = torch.diag all_function = torch.all + conj_function = torch.conj + transpose_function = torch.transpose else: diag_function = np.diag all_function = np.all + conj_function = np.conj + transpose_function = np.transpose - product = np.transpose(np.conj(unitary)) @ unitary + product = transpose_function(conj_function(unitary), (1, 0)) @ unitary diagonals = all(np.abs(1 - diag_function(product)) < PRECISION_TOL) off_diagonals = bool( all_function( diff --git a/src/qibo/transpiler/unitary_decompositions.py b/src/qibo/transpiler/unitary_decompositions.py index 655af14fcc..538b7f034d 100644 --- a/src/qibo/transpiler/unitary_decompositions.py +++ b/src/qibo/transpiler/unitary_decompositions.py @@ -125,7 +125,11 @@ def calculate_single_qubit_unitaries(psi, backend=None): e_, f_ = schmidt_decompose(e_f_, backend=backend) # find exp(1j * delta) using (A5a) ef_ = backend.np.kron(e, f_) - phase = 1j * np.sqrt(2) * backend.np.dot(backend.np.conj(ef_), psi_bar[2]) + phase = ( + 1j + * np.sqrt(2) + * backend.np.sum(backend.np.multiply(backend.np.conj(ef_), psi_bar[2])) + ) v0 = backend.cast(np.asarray([1, 0])) v1 = backend.cast(np.asarray([0, 1])) # construct unitaries UA, UB using (A6a), (A6b) @@ -153,7 +157,6 @@ def calculate_diagonal(unitary, ua, ub, va, vb, backend=None): ub *= det va *= det vb *= det - # Check behaviour of backend.np.kron u_dagger = backend.np.transpose( backend.np.conj( backend.np.kron( @@ -216,45 +219,49 @@ def calculate_h_vector(ud_diag, backend=None): return hx, hy, hz -def cnot_decomposition(q0, q1, hx, hy, hz): +def cnot_decomposition(q0, q1, hx, hy, hz, backend=None): """Performs decomposition (6) from arXiv:quant-ph/0307177.""" - u3 = -1j * matrices.H + backend = _check_backend(backend) + h = backend.cast(H) + u3 = backend.cast(-1j * matrices.H) # use corrected version from PRA paper (not arXiv) - u2 = -u3 @ gates.RX(0, 2 * hx - np.pi / 2).matrix(NumpyBackend()) + u2 = -u3 @ gates.RX(0, 2 * hx - np.pi / 2).matrix(backend) # add an extra exp(-i pi / 4) global phase to get exact match - v2 = np.exp(-1j * np.pi / 4) * gates.RZ(0, 2 * hz).matrix(NumpyBackend()) - v3 = gates.RZ(0, -2 * hy).matrix(NumpyBackend()) - w = (matrices.I - 1j * matrices.X) / np.sqrt(2) + v2 = np.exp(-1j * np.pi / 4) * gates.RZ(0, 2 * hz).matrix(backend) + v3 = gates.RZ(0, -2 * hy).matrix(backend) + w = backend.cast((matrices.I - 1j * matrices.X) / np.sqrt(2)) # change CNOT to CZ using Hadamard gates return [ gates.H(q1), gates.CZ(q0, q1), gates.Unitary(u2, q0), - gates.Unitary(H @ v2 @ H, q1), + gates.Unitary(h @ v2 @ h, q1), gates.CZ(q0, q1), gates.Unitary(u3, q0), - gates.Unitary(H @ v3 @ H, q1), + gates.Unitary(h @ v3 @ h, q1), gates.CZ(q0, q1), gates.Unitary(w, q0), - gates.Unitary(np.conj(w).T @ H, q1), + gates.Unitary(backend.np.conj(w).T @ h, q1), ] -def cnot_decomposition_light(q0, q1, hx, hy): +def cnot_decomposition_light(q0, q1, hx, hy, backend=None): """Performs decomposition (24) from arXiv:quant-ph/0307177.""" - w = (matrices.I - 1j * matrices.X) / np.sqrt(2) - u2 = gates.RX(0, 2 * hx).matrix(NumpyBackend()) - v2 = gates.RZ(0, -2 * hy).matrix(NumpyBackend()) + backend = _check_backend(backend) + h = backend.cast(H) + w = backend.cast((matrices.I - 1j * matrices.X) / np.sqrt(2)) + u2 = gates.RX(0, 2 * hx).matrix(backend) + v2 = gates.RZ(0, -2 * hy).matrix(backend) # change CNOT to CZ using Hadamard gates return [ - gates.Unitary(np.conj(w).T, q0), - gates.Unitary(H @ w, q1), + gates.Unitary(backend.np.conj(w).T, q0), + gates.Unitary(h @ w, q1), gates.CZ(q0, q1), gates.Unitary(u2, q0), - gates.Unitary(H @ v2 @ H, q1), + gates.Unitary(h @ v2 @ h, q1), gates.CZ(q0, q1), gates.Unitary(w, q0), - gates.Unitary(np.conj(w).T @ H, q1), + gates.Unitary(backend.np.conj(w).T @ h, q1), ] @@ -283,25 +290,25 @@ def two_qubit_decomposition(q0, q1, unitary, backend=None): u4, v4, ud, u1, v1 = magic_decomposition(unitary, backend=backend) gatelist = [gates.Unitary(u4 @ u1, q0), gates.Unitary(v4 @ v1, q1)] elif np.allclose(hz, 0): - gatelist = cnot_decomposition_light(q0, q1, hx, hy) + gatelist = cnot_decomposition_light(q0, q1, hx, hy, backend=backend) if ud is None: return gatelist g0, g1 = gatelist[:2] - gatelist[0] = gates.Unitary(g0.parameters[0] @ u1, q0) - gatelist[1] = gates.Unitary(g1.parameters[0] @ v1, q1) + gatelist[0] = gates.Unitary(backend.cast(g0.parameters[0]) @ u1, q0) + gatelist[1] = gates.Unitary(backend.cast(g1.parameters[0]) @ v1, q1) g0, g1 = gatelist[-2:] gatelist[-2] = gates.Unitary(u4 @ g0.parameters[0], q0) gatelist[-1] = gates.Unitary(v4 @ g1.parameters[0], q1) else: - cnot_dec = cnot_decomposition(q0, q1, hx, hy, hz) + cnot_dec = cnot_decomposition(q0, q1, hx, hy, hz, backend=backend) if ud is None: return cnot_dec gatelist = [ gates.Unitary(u1, q0), - gates.Unitary(H @ v1, q1), + gates.Unitary(backend.cast(H) @ v1, q1), ] gatelist.extend(cnot_dec[1:]) g0, g1 = gatelist[-2:] diff --git a/tests/test_transpiler_unitary_decompositions.py b/tests/test_transpiler_unitary_decompositions.py index 1efcc023d8..c72591c72c 100644 --- a/tests/test_transpiler_unitary_decompositions.py +++ b/tests/test_transpiler_unitary_decompositions.py @@ -112,7 +112,9 @@ def test_ud_eigenvalues(backend, seed): backend.assert_allclose(unitary_recon, unitary) ud_bell = ( - backend.np.transpose(backend.np.conj(bell_basis), (1, 0)) @ ud @ bell_basis + backend.np.transpose(backend.np.conj(backend.cast(bell_basis)), (1, 0)) + @ ud + @ backend.cast(bell_basis) ) ud_diag = backend.np.diag(ud_bell) backend.assert_allclose(backend.np.diag(ud_diag), ud_bell, atol=PRECISION_TOL) @@ -191,9 +193,9 @@ def test_two_qubit_decomposition_bell_unitary(backend, hz_zero): hx, hy, hz = (2 * np.random.random(3) - 1) * np.pi if hz_zero: hz = 0 - unitary = bell_unitary(hx, hy, hz) + unitary = backend.cast(bell_unitary(hx, hy, hz)) c = Circuit(2) - c.add(two_qubit_decomposition(0, 1, unitary)) + c.add(two_qubit_decomposition(0, 1, unitary, backend=backend)) final_matrix = c.unitary(backend) backend.assert_allclose(final_matrix, unitary, atol=PRECISION_TOL) From 428d6575ae80cef94da23d6fe2a6d522fd55da03 Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Fri, 1 Mar 2024 17:35:50 +0400 Subject: [PATCH 056/127] solved all errors --- src/qibo/backends/numpy.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/qibo/backends/numpy.py b/src/qibo/backends/numpy.py index 80c69dd17f..f7bea95c85 100644 --- a/src/qibo/backends/numpy.py +++ b/src/qibo/backends/numpy.py @@ -115,30 +115,30 @@ def matrix_parametrized(self, gate): def matrix_fused(self, fgate): rank = len(fgate.target_qubits) - matrix = self.np.eye(2**rank, dtype=self.dtype) + matrix = np.eye(2**rank, dtype=np.complex128) for gate in fgate.gates: # transfer gate matrix to numpy as it is more efficient for # small tensor calculations # explicit to_numpy see https://github.com/qiboteam/qibo/issues/928 - gmatrix = self.cast(gate.matrix(self)) + gmatrix = self.to_numpy(gate.matrix(self)) # Kronecker product with identity is needed to make the # original matrix have shape (2**rank x 2**rank) - eye = self.np.eye(2 ** (rank - len(gate.qubits)), dtype=self.dtype) - gmatrix = self.np.kron(gmatrix, eye) + eye = np.eye(2 ** (rank - len(gate.qubits)), dtype=np.complex128) + gmatrix = np.kron(gmatrix, eye) # Transpose the new matrix indices so that it targets the # target qubits of the original gate original_shape = gmatrix.shape - gmatrix = self.np.reshape(gmatrix, 2 * rank * (2,)) + gmatrix = np.reshape(gmatrix, 2 * rank * (2,)) qubits = list(gate.qubits) indices = qubits + [q for q in fgate.target_qubits if q not in qubits] indices = np.argsort(indices) transpose_indices = list(indices) transpose_indices.extend(indices + rank) - gmatrix = self.np.transpose(gmatrix, transpose_indices) - gmatrix = self.np.reshape(gmatrix, original_shape) + gmatrix = np.transpose(gmatrix, transpose_indices) + gmatrix = np.reshape(gmatrix, original_shape) # fuse the individual gate matrix to the total ``FusedGate`` matrix matrix = gmatrix @ matrix - return matrix + return self.cast(matrix) def control_matrix(self, gate): if len(gate.control_qubits) > 1: From 4f9fc2a261c8d9946b6ef7b64bc8eab6d1518528 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Sat, 2 Mar 2024 09:52:57 +0400 Subject: [PATCH 057/127] merge --- tests/test_backends_clifford.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/test_backends_clifford.py b/tests/test_backends_clifford.py index 81ab69d9fd..baaf716420 100644 --- a/tests/test_backends_clifford.py +++ b/tests/test_backends_clifford.py @@ -21,14 +21,16 @@ def construct_clifford_backend(backend): - if isinstance(backend, (TensorflowBackend, PyTorchBackend)) or backend.__class__.__name__ == "CuQuantumBackend": + if ( + isinstance(backend, (TensorflowBackend, PyTorchBackend)) + or backend.__class__.__name__ == "CuQuantumBackend" + ): with pytest.raises(NotImplementedError): clifford_backend = CliffordBackend(backend.name) pytest.skip("Clifford backend not defined for the this engine.") return CliffordBackend(_get_engine_name(backend)) - THETAS_1Q = [ th + 2 * i * np.pi for i in range(2) for th in [0, np.pi / 2, np.pi, 3 * np.pi / 2] ] From e9370a9e4860bb2a9db2d02b4bc5224c1c489877 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sat, 2 Mar 2024 05:53:30 +0000 Subject: [PATCH 058/127] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/qibo/backends/clifford.py | 4 +++- tests/test_backends_clifford.py | 3 ++- tests/test_models_circuit_features.py | 4 ++-- tests/test_quantum_info_clifford.py | 5 +++-- 4 files changed, 10 insertions(+), 6 deletions(-) diff --git a/src/qibo/backends/clifford.py b/src/qibo/backends/clifford.py index 2a7c562a03..85db21617f 100644 --- a/src/qibo/backends/clifford.py +++ b/src/qibo/backends/clifford.py @@ -48,7 +48,9 @@ def __init__(self, engine=None): for method in dir(clifford_operations_cpu): setattr(self.engine, method, getattr(clifford_operations_cpu, method)) elif engine == "cupy": # pragma: no cover - from qibojit.backends import clifford_operations_gpu # pylint: disable=C0415 + from qibojit.backends import ( # pylint: disable=C0415 + clifford_operations_gpu, + ) for method in dir(clifford_operations_gpu): setattr(self.engine, method, getattr(clifford_operations_gpu, method)) diff --git a/tests/test_backends_clifford.py b/tests/test_backends_clifford.py index baaf716420..c9d96ac2c7 100644 --- a/tests/test_backends_clifford.py +++ b/tests/test_backends_clifford.py @@ -22,7 +22,7 @@ def construct_clifford_backend(backend): if ( - isinstance(backend, (TensorflowBackend, PyTorchBackend)) + isinstance(backend, (TensorflowBackend, PyTorchBackend)) or backend.__class__.__name__ == "CuQuantumBackend" ): with pytest.raises(NotImplementedError): @@ -31,6 +31,7 @@ def construct_clifford_backend(backend): return CliffordBackend(_get_engine_name(backend)) + THETAS_1Q = [ th + 2 * i * np.pi for i in range(2) for th in [0, np.pi / 2, np.pi, 3 * np.pi / 2] ] diff --git a/tests/test_models_circuit_features.py b/tests/test_models_circuit_features.py index 446787c549..e947aa4113 100644 --- a/tests/test_models_circuit_features.py +++ b/tests/test_models_circuit_features.py @@ -324,8 +324,8 @@ def test_repeated_execute_probs_and_freqs(backend, nqubits): # Tensorflow seems to yield different results with same seed if backend.__class__.__name__ == "TensorflowBackend": test_frequencies = ( - Counter({"1": 844, "0": 180}) - if nqubits == 1 + Counter({"1": 844, "0": 180}) + if nqubits == 1 else Counter({"11": 674, "10": 155, "01": 154, "00": 41}) ) elif backend.__class__.__name__ == "PyTorchBackend": diff --git a/tests/test_quantum_info_clifford.py b/tests/test_quantum_info_clifford.py index 697f3b9d59..93d4e4d5be 100644 --- a/tests/test_quantum_info_clifford.py +++ b/tests/test_quantum_info_clifford.py @@ -18,7 +18,7 @@ def construct_clifford_backend(backend): if ( - isinstance(backend, (TensorflowBackend, PyTorchBackend)) + isinstance(backend, (TensorflowBackend, PyTorchBackend)) or backend.__class__.__name__ == "CuQuantumBackend" ): with pytest.raises(NotImplementedError): @@ -27,6 +27,7 @@ def construct_clifford_backend(backend): return CliffordBackend(_get_engine_name(backend)) + @pytest.mark.parametrize("nqubits", [2, 10, 50, 100]) def test_clifford_from_symplectic_matrix(backend, nqubits): clifford_backend = construct_clifford_backend(backend) @@ -313,7 +314,7 @@ def test_clifford_samples_frequencies(backend, binary): def test_clifford_samples_error(backend): clifford_backend = construct_clifford_backend(backend) - + c = random_clifford(1, backend=backend) obj = Clifford.from_circuit(c, engine=backend) with pytest.raises(RuntimeError) as excinfo: From 3578c2c192365ed77cd14bcfb96dec3d0eba3794 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Sat, 2 Mar 2024 15:27:55 +0400 Subject: [PATCH 059/127] fix merge issue --- pyproject.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 224fb924c0..71263d5f2e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -67,8 +67,9 @@ dill = "^0.3.6" pytest-cov = "^4.0.0" pylint = "^3.0.3" matplotlib = "^3.7.0" -qibojit = { git = "https://github.com/qiboteam/qibojit.git" } tensorflow = { version = "^2.14.1,<2.16", markers = "sys_platform == 'linux'" } +torch = "^2.1.1" +qibojit = { git = "https://github.com/qiboteam/qibojit.git" } qibotn = { git = "https://github.com/qiboteam/qibotn.git" } [tool.poe.tasks] From 25331571feb3cdd060701a180fef9d8c5bda9bad Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Sat, 2 Mar 2024 15:37:30 +0400 Subject: [PATCH 060/127] update poetry --- poetry.lock | 38 ++++++++++++++++++++++++++++++++------ 1 file changed, 32 insertions(+), 6 deletions(-) diff --git a/poetry.lock b/poetry.lock index f547909799..96d6ade061 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. [[package]] name = "absl-py" @@ -902,6 +902,7 @@ python-versions = "*" files = [ {file = "cutensor_cu11-1.7.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:c5598670f4f31906d725f5ea852f0df675522e3ff5a7bf886057eab36497062d"}, {file = "cutensor_cu11-1.7.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:67b6c7427d9ab50cb82e01360948bd1b23d73775b5767ab92071c7afcfec4b8b"}, + {file = "cutensor_cu11-1.7.0-py3-none-win_amd64.whl", hash = "sha256:d173b3d0fd51cf761b371a4d4be9a3afd3ef230a55ae4336ae31e905336480e1"}, ] [[package]] @@ -913,6 +914,7 @@ python-versions = "*" files = [ {file = "cutensor_cu12-1.7.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:515caa2406e09ffe9c6524328b7da2106169811665f7684836052753a30dda27"}, {file = "cutensor_cu12-1.7.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:29bdde551788fd3a611992026a5bb422831069e38fd44ab920af5aa00cffa12c"}, + {file = "cutensor_cu12-1.7.0-py3-none-win_amd64.whl", hash = "sha256:e1a9a759a615a64d1b8c6d2b8ffd925deb805750c28481b1a8310d05f35ce229"}, ] [[package]] @@ -1568,11 +1570,11 @@ files = [ google-auth = ">=2.14.1,<3.0.dev0" googleapis-common-protos = ">=1.56.2,<2.0.dev0" grpcio = [ - {version = ">=1.33.2,<2.0dev", optional = true, markers = "extra == \"grpc\""}, + {version = ">=1.33.2,<2.0dev", optional = true, markers = "python_version < \"3.11\" and extra == \"grpc\""}, {version = ">=1.49.1,<2.0dev", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""}, ] grpcio-status = [ - {version = ">=1.33.2,<2.0.dev0", optional = true, markers = "extra == \"grpc\""}, + {version = ">=1.33.2,<2.0.dev0", optional = true, markers = "python_version < \"3.11\" and extra == \"grpc\""}, {version = ">=1.49.1,<2.0.dev0", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""}, ] protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0.dev0" @@ -2301,6 +2303,8 @@ description = "Clang Python Bindings, mirrored from the official LLVM repo: http optional = false python-versions = "*" files = [ + {file = "libclang-16.0.6-1-py2.py3-none-manylinux2014_aarch64.whl", hash = "sha256:88bc7e7b393c32e41e03ba77ef02fdd647da1f764c2cd028e69e0837080b79f6"}, + {file = "libclang-16.0.6-1-py2.py3-none-manylinux2014_armv7l.whl", hash = "sha256:d80ed5827736ed5ec2bcedf536720476fd9d4fa4c79ef0cb24aea4c59332f361"}, {file = "libclang-16.0.6-py2.py3-none-macosx_10_9_x86_64.whl", hash = "sha256:da9e47ebc3f0a6d90fb169ef25f9fbcd29b4a4ef97a8b0e3e3a17800af1423f4"}, {file = "libclang-16.0.6-py2.py3-none-macosx_11_0_arm64.whl", hash = "sha256:e1a5ad1e895e5443e205568c85c04b4608e4e973dae42f4dfd9cb46c81d1486b"}, {file = "libclang-16.0.6-py2.py3-none-manylinux2010_x86_64.whl", hash = "sha256:9dcdc730939788b8b69ffd6d5d75fe5366e3ee007f1e36a99799ec0b0c001492"}, @@ -2542,9 +2546,9 @@ files = [ [package.dependencies] numpy = [ - {version = ">1.20", markers = "python_version <= \"3.9\""}, {version = ">=1.23.3", markers = "python_version > \"3.10\""}, - {version = ">=1.21.2", markers = "python_version > \"3.9\""}, + {version = ">=1.21.2", markers = "python_version > \"3.9\" and python_version <= \"3.10\""}, + {version = ">1.20", markers = "python_version <= \"3.9\""}, ] [package.extras] @@ -2630,6 +2634,7 @@ files = [ {file = "msgpack-1.0.8-cp39-cp39-win32.whl", hash = "sha256:f9af38a89b6a5c04b7d18c492c8ccf2aee7048aff1ce8437c4683bb5a1df893d"}, {file = "msgpack-1.0.8-cp39-cp39-win_amd64.whl", hash = "sha256:ed59dd52075f8fc91da6053b12e8c89e37aa043f8986efd89e61fae69dc1b011"}, {file = "msgpack-1.0.8-py3-none-any.whl", hash = "sha256:24f727df1e20b9876fa6e95f840a2a2651e34c0ad147676356f4bf5fbb0206ca"}, + {file = "msgpack-1.0.8.tar.gz", hash = "sha256:95c02b0e27e706e48d0e5426d1710ca78e0f0628d6e89d5b5a5b91a5f12274f3"}, ] [[package]] @@ -3921,6 +3926,7 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -3928,8 +3934,16 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, + {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, + {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, + {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -3946,6 +3960,7 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -3953,6 +3968,7 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, @@ -5366,6 +5382,16 @@ files = [ {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8ad85f7f4e20964db4daadcab70b47ab05c7c1cf2a7c1e51087bfaa83831854c"}, {file = "wrapt-1.14.1-cp310-cp310-win32.whl", hash = "sha256:a9a52172be0b5aae932bef82a79ec0a0ce87288c7d132946d645eba03f0ad8a8"}, {file = "wrapt-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:6d323e1554b3d22cfc03cd3243b5bb815a51f5249fdcbb86fda4bf62bab9e164"}, + {file = "wrapt-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ecee4132c6cd2ce5308e21672015ddfed1ff975ad0ac8d27168ea82e71413f55"}, + {file = "wrapt-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2020f391008ef874c6d9e208b24f28e31bcb85ccff4f335f15a3251d222b92d9"}, + {file = "wrapt-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2feecf86e1f7a86517cab34ae6c2f081fd2d0dac860cb0c0ded96d799d20b335"}, + {file = "wrapt-1.14.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:240b1686f38ae665d1b15475966fe0472f78e71b1b4903c143a842659c8e4cb9"}, + {file = "wrapt-1.14.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9008dad07d71f68487c91e96579c8567c98ca4c3881b9b113bc7b33e9fd78b8"}, + {file = "wrapt-1.14.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6447e9f3ba72f8e2b985a1da758767698efa72723d5b59accefd716e9e8272bf"}, + {file = "wrapt-1.14.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:acae32e13a4153809db37405f5eba5bac5fbe2e2ba61ab227926a22901051c0a"}, + {file = "wrapt-1.14.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:49ef582b7a1152ae2766557f0550a9fcbf7bbd76f43fbdc94dd3bf07cc7168be"}, + {file = "wrapt-1.14.1-cp311-cp311-win32.whl", hash = "sha256:358fe87cc899c6bb0ddc185bf3dbfa4ba646f05b1b0b9b5a27c2cb92c2cea204"}, + {file = "wrapt-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:26046cd03936ae745a502abf44dac702a5e6880b2b01c29aea8ddf3353b68224"}, {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:43ca3bbbe97af00f49efb06e352eae40434ca9d915906f77def219b88e85d907"}, {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:6b1a564e6cb69922c7fe3a678b9f9a3c54e72b469875aa8018f18b4d1dd1adf3"}, {file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:00b6d4ea20a906c0ca56d84f93065b398ab74b927a7a3dbd470f6fc503f95dc3"}, @@ -5431,4 +5457,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = ">=3.9,<3.12" -content-hash = "0b472a931fa1b4631a6ba4549b02bda9ec9a6f0d5a7fff28c52d97263a1499ef" +content-hash = "601a255f88f2e86e9c4e056077e826c72c4de20c2618d6981b39c6842eafcb5b" From a555f6ca6ca88652f048680d4f5fef9e1cbe7d40 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Sat, 2 Mar 2024 16:00:40 +0400 Subject: [PATCH 061/127] fix test --- tests/test_models_circuit_features.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/test_models_circuit_features.py b/tests/test_models_circuit_features.py index e947aa4113..b440736fe8 100644 --- a/tests/test_models_circuit_features.py +++ b/tests/test_models_circuit_features.py @@ -330,9 +330,9 @@ def test_repeated_execute_probs_and_freqs(backend, nqubits): ) elif backend.__class__.__name__ == "PyTorchBackend": test_frequencies = ( - Counter({"1": 810, "0": 214}) + Counter({"1": 817, "0": 207}) if nqubits == 1 - else Counter({"11": 685, "01": 160, "10": 144, "00": 35}) + else Counter({"11": 664, "01": 162, "10": 166, "00": 32}) ) else: test_frequencies = ( @@ -341,6 +341,5 @@ def test_repeated_execute_probs_and_freqs(backend, nqubits): else Counter({"11": 618, "10": 169, "01": 185, "00": 52}) ) - print(result.frequencies()) for key in dict(test_frequencies).keys(): backend.assert_allclose(result.frequencies()[key], test_frequencies[key]) From 0e973536a2959f4a8c8f7b6677d19c0b08505e62 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Sat, 2 Mar 2024 16:57:45 +0400 Subject: [PATCH 062/127] fix clifford test --- tests/test_quantum_info_clifford.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test_quantum_info_clifford.py b/tests/test_quantum_info_clifford.py index 93d4e4d5be..9166d3ad0c 100644 --- a/tests/test_quantum_info_clifford.py +++ b/tests/test_quantum_info_clifford.py @@ -33,8 +33,8 @@ def test_clifford_from_symplectic_matrix(backend, nqubits): clifford_backend = construct_clifford_backend(backend) symplectic_matrix = clifford_backend.zero_state(nqubits) - clifford_1 = Clifford(symplectic_matrix, engine=backend) - clifford_2 = Clifford(symplectic_matrix[:-1], engine=backend) + clifford_1 = Clifford(symplectic_matrix, engine=_get_engine_name(backend)) + clifford_2 = Clifford(symplectic_matrix[:-1], engine=_get_engine_name(backend)) for clifford in [clifford_1, clifford_2]: backend.assert_allclose( @@ -316,7 +316,7 @@ def test_clifford_samples_error(backend): clifford_backend = construct_clifford_backend(backend) c = random_clifford(1, backend=backend) - obj = Clifford.from_circuit(c, engine=backend) + obj = Clifford.from_circuit(c, engine=_get_engine_name(backend)) with pytest.raises(RuntimeError) as excinfo: obj.samples() assert str(excinfo.value) == "No measurement provided." From f8a1faa43e5a4f4b1afc28c55e21570435c99578 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Sat, 2 Mar 2024 17:42:51 +0400 Subject: [PATCH 063/127] flip test order --- tests/conftest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/conftest.py b/tests/conftest.py index c6fae6298c..8ac656cd13 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -13,8 +13,8 @@ # backends to be tested BACKENDS = [ "numpy", - "pytorch", "tensorflow", + "pytorch", "qibojit-numba", "qibojit-cupy", "qibojit-cuquantum", From 5676ebbaa5351bd1078c6a7dfd95a1505554de9a Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Sun, 3 Mar 2024 09:05:40 +0400 Subject: [PATCH 064/127] fix test seed --- tests/test_backends_clifford.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/tests/test_backends_clifford.py b/tests/test_backends_clifford.py index c9d96ac2c7..53b4f0900a 100644 --- a/tests/test_backends_clifford.py +++ b/tests/test_backends_clifford.py @@ -235,19 +235,22 @@ def test_set_backend(backend): def test_noise_channels(backend): + backend.set_seed(2024) clifford_bkd = construct_clifford_backend(backend) - nqubits = 3 - c = random_clifford(nqubits, backend=backend) - c.density_matrix = True - c_copy = c.copy() - c.add(gates.M(*range(nqubits))) - c_copy.add(gates.M(*range(nqubits))) noise = NoiseModel() noise.add(PauliError([("X", 0.5)]), gates.X) noise.add(DepolarizingError(0.1), gates.CZ) + + nqubits = 3 + + c = random_clifford(nqubits, density_matrix=True, backend=backend) + c.add(gates.M(*range(nqubits))) + c_copy = c.copy() + c = noise.apply(c) c_copy = noise.apply(c_copy) + numpy_result = numpy_bkd.execute_circuit(c) clifford_result = clifford_bkd.execute_circuit(c_copy) From ba8a67a13d72b51cdf6ed17398cd3a1492423e6f Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Tue, 5 Mar 2024 12:15:48 +0000 Subject: [PATCH 065/127] Update src/qibo/quantum_info/utils.py --- src/qibo/quantum_info/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/qibo/quantum_info/utils.py b/src/qibo/quantum_info/utils.py index e1750a725b..f66f0facde 100644 --- a/src/qibo/quantum_info/utils.py +++ b/src/qibo/quantum_info/utils.py @@ -237,7 +237,7 @@ def hellinger_distance(prob_dist_p, prob_dist_q, validate: bool = False, backend ValueError, "All elements of the probability array must be between 0. and 1..", ) - if np.abs(np.sum(backend.to_numpy(prob_dist_p)) - 1.0) > PRECISION_TOL: + if backend.np.abs(backend.np.sum(prob_dist_p) - 1.0) > PRECISION_TOL: raise_error(ValueError, "First probability array must sum to 1.") if np.abs(np.sum(backend.to_numpy(prob_dist_q)) - 1.0) > PRECISION_TOL: From 2a7c7ca79597c83200ac3f180f03ecbc5643adf7 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Tue, 5 Mar 2024 12:16:26 +0000 Subject: [PATCH 066/127] Update src/qibo/quantum_info/utils.py --- src/qibo/quantum_info/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/qibo/quantum_info/utils.py b/src/qibo/quantum_info/utils.py index f66f0facde..13df6781d8 100644 --- a/src/qibo/quantum_info/utils.py +++ b/src/qibo/quantum_info/utils.py @@ -240,7 +240,7 @@ def hellinger_distance(prob_dist_p, prob_dist_q, validate: bool = False, backend if backend.np.abs(backend.np.sum(prob_dist_p) - 1.0) > PRECISION_TOL: raise_error(ValueError, "First probability array must sum to 1.") - if np.abs(np.sum(backend.to_numpy(prob_dist_q)) - 1.0) > PRECISION_TOL: + if backend.np.abs(backend.np.sum(prob_dist_q) - 1.0) > PRECISION_TOL: raise_error(ValueError, "Second probability array must sum to 1.") distance = float( From 18fd85493502c258d9051654c39aa8df5a9e16b3 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Tue, 5 Mar 2024 18:10:40 +0400 Subject: [PATCH 067/127] fix test --- tests/test_quantum_info_utils.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/tests/test_quantum_info_utils.py b/tests/test_quantum_info_utils.py index 0d845cf2f3..7a2d9c1a3d 100644 --- a/tests/test_quantum_info_utils.py +++ b/tests/test_quantum_info_utils.py @@ -167,8 +167,16 @@ def test_hellinger(backend, validate, kind): backend.calculate_norm(np.sqrt(prob_p) - np.sqrt(prob_q)) / np.sqrt(2) ) - if kind is not None: - prob_p, prob_q = list(prob_p), list(prob_q) + prob_p = ( + kind(prob_p) + if kind is not None + else backend.cast(prob_p, dtype=prob_p.dtype) + ) + prob_q = ( + kind(prob_q) + if kind is not None + else backend.cast(prob_q, dtype=prob_q.dtype) + ) distance = hellinger_distance(prob_p, prob_q, validate=validate, backend=backend) fidelity = hellinger_fidelity(prob_p, prob_q, validate=validate, backend=backend) From 25ba22dda969f6dd199efb694deb95927e391b37 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 5 Mar 2024 14:11:05 +0000 Subject: [PATCH 068/127] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- tests/test_quantum_info_utils.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/tests/test_quantum_info_utils.py b/tests/test_quantum_info_utils.py index 7a2d9c1a3d..bc93c230cf 100644 --- a/tests/test_quantum_info_utils.py +++ b/tests/test_quantum_info_utils.py @@ -168,14 +168,10 @@ def test_hellinger(backend, validate, kind): ) prob_p = ( - kind(prob_p) - if kind is not None - else backend.cast(prob_p, dtype=prob_p.dtype) + kind(prob_p) if kind is not None else backend.cast(prob_p, dtype=prob_p.dtype) ) prob_q = ( - kind(prob_q) - if kind is not None - else backend.cast(prob_q, dtype=prob_q.dtype) + kind(prob_q) if kind is not None else backend.cast(prob_q, dtype=prob_q.dtype) ) distance = hellinger_distance(prob_p, prob_q, validate=validate, backend=backend) From 3157dbbd5f4713d6d2bf2159b1ffb857244d6708 Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Tue, 5 Mar 2024 18:20:20 +0400 Subject: [PATCH 069/127] corrections --- src/qibo/backends/numpy.py | 44 +++++++++------------------ src/qibo/backends/pytorch.py | 25 +++++++++++++-- src/qibo/hamiltonians/hamiltonians.py | 11 +++---- 3 files changed, 42 insertions(+), 38 deletions(-) diff --git a/src/qibo/backends/numpy.py b/src/qibo/backends/numpy.py index f7bea95c85..873d131d1c 100644 --- a/src/qibo/backends/numpy.py +++ b/src/qibo/backends/numpy.py @@ -115,30 +115,29 @@ def matrix_parametrized(self, gate): def matrix_fused(self, fgate): rank = len(fgate.target_qubits) - matrix = np.eye(2**rank, dtype=np.complex128) + matrix = self.identity_density_matrix(2**rank, normalize=False) for gate in fgate.gates: - # transfer gate matrix to numpy as it is more efficient for - # small tensor calculations - # explicit to_numpy see https://github.com/qiboteam/qibo/issues/928 - gmatrix = self.to_numpy(gate.matrix(self)) + gmatrix = self.cast(gate.matrix(self)) # Kronecker product with identity is needed to make the # original matrix have shape (2**rank x 2**rank) - eye = np.eye(2 ** (rank - len(gate.qubits)), dtype=np.complex128) - gmatrix = np.kron(gmatrix, eye) + eye = self.identity_density_matrix( + 2 ** (rank - len(gate.qubits)), normalize=False + ) + gmatrix = self.np.kron(gmatrix, eye) # Transpose the new matrix indices so that it targets the # target qubits of the original gate original_shape = gmatrix.shape - gmatrix = np.reshape(gmatrix, 2 * rank * (2,)) + gmatrix = self.np.reshape(gmatrix, 2 * rank * (2,)) qubits = list(gate.qubits) indices = qubits + [q for q in fgate.target_qubits if q not in qubits] indices = np.argsort(indices) transpose_indices = list(indices) transpose_indices.extend(indices + rank) - gmatrix = np.transpose(gmatrix, transpose_indices) - gmatrix = np.reshape(gmatrix, original_shape) + gmatrix = self.np.transpose(gmatrix, transpose_indices) + gmatrix = self.np.reshape(gmatrix, original_shape) # fuse the individual gate matrix to the total ``FusedGate`` matrix matrix = gmatrix @ matrix - return self.cast(matrix) + return matrix def control_matrix(self, gate): if len(gate.control_qubits) > 1: @@ -524,7 +523,7 @@ def execute_circuit_repeated(self, circuit, nshots, initial_state=None): sample = result.samples()[0] results.append(sample) if not circuit.density_matrix: - samples.append("".join([str(s) for s in self.to_numpy(sample)])) + samples.append("".join([str(int(s)) for s in sample])) for gate in circuit.measurements: gate.result.reset() @@ -594,13 +593,8 @@ def calculate_symbolic_density_matrix( return terms return terms - def dimensions(self, x): - return x.ndim - def _order_probabilities(self, probs, qubits, nqubits): """Arrange probabilities according to the given ``qubits`` ordering.""" - if self.dimensions(probs) == 0: - return probs unmeasured, reduced = [], {} for i in range(nqubits): if i in qubits: @@ -613,15 +607,10 @@ def calculate_probabilities(self, state, qubits, nqubits): rtype = self.np.real(state).dtype unmeasured_qubits = tuple(i for i in range(nqubits) if i not in qubits) state = self.np.reshape(self.np.abs(state) ** 2, nqubits * (2,)) - # This is necessary to use the same function on pytorch backend - if len(unmeasured_qubits) == 0: - probs = self.cast(state, dtype=rtype) - else: - probs = self.np.sum(self.cast(state, dtype=rtype), axis=unmeasured_qubits) + probs = self.np.sum(self.cast(state, dtype=rtype), axis=unmeasured_qubits) return self._order_probabilities(probs, qubits, nqubits).ravel() def calculate_probabilities_density_matrix(self, state, qubits, nqubits): - state = self.cast(state) order = tuple(sorted(qubits)) order += tuple(i for i in range(nqubits) if i not in qubits) order = order + tuple(i + nqubits for i in order) @@ -654,7 +643,7 @@ def samples_to_decimal(self, samples, nqubits): def calculate_frequencies(self, samples): res, counts = np.unique(samples, return_counts=True) - return collections.Counter({k: v for k, v in zip(res, counts)}) + return collections.Counter(zip(res, counts)) def update_frequencies(self, frequencies, probabilities, nsamples): samples = self.sample_shots(probabilities, nsamples) @@ -755,16 +744,14 @@ def calculate_matrix_exp(self, a, matrix, eigenvectors=None, eigenvalues=None): return self.np.matmul(eigenvectors, self.np.matmul(expd, ud)) def calculate_expectation_state(self, hamiltonian, state, normalize): - state = self.cast(state) statec = self.np.conj(state) - hstate = self.cast(hamiltonian @ state) + hstate = hamiltonian @ state ev = self.np.real(self.np.sum(statec * hstate)) if normalize: - ev = ev / self.np.sum(self.np.square(self.np.abs(state))) + ev /= self.np.sum(self.np.square(self.np.abs(state))) return ev def calculate_expectation_density_matrix(self, hamiltonian, state, normalize): - state = self.cast(state) ev = self.np.real(self.np.trace(self.cast(hamiltonian @ state))) if normalize: norm = self.np.real(self.np.trace(state)) @@ -776,7 +763,6 @@ def calculate_hamiltonian_matrix_product(self, matrix1, matrix2): def calculate_hamiltonian_state_product(self, matrix, state): rank = len(tuple(state.shape)) - state = self.cast(state) if rank == 1: # vector return matrix.dot(state[:, np.newaxis])[:, 0] elif rank == 2: # matrix diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index 7ae620b47b..a712866b82 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -145,9 +145,6 @@ def samples_to_binary(self, samples, nqubits): samples = samples[:, None] >> qrange return samples % 2 - def dimensions(self, x): - return x.dim() - def calculate_norm(self, state, order=2): state = self.cast(state) return self.np.norm(state, p=order) @@ -196,6 +193,28 @@ def _append_zeros(self, state, qubits, results): state = self.np.cat([state, self.np.zeros_like(state)], dim=q) return state + def calculate_probabilities(self, state, qubits, nqubits): + rtype = self.np.real(state).dtype + unmeasured_qubits = tuple(i for i in range(nqubits) if i not in qubits) + state = self.np.reshape(self.np.abs(state) ** 2, nqubits * (2,)) + if len(unmeasured_qubits) == 0: + probs = self.cast(state, dtype=rtype) + else: + probs = self.np.sum(self.cast(state, dtype=rtype), axis=unmeasured_qubits) + return self._order_probabilities(probs, qubits, nqubits).ravel() + + def _order_probabilities(self, probs, qubits, nqubits): + """Arrange probabilities according to the given ``qubits`` ordering.""" + if probs.dim() == 0: + return probs + unmeasured, reduced = [], {} + for i in range(nqubits): + if i in qubits: + reduced[i] = i - len(unmeasured) + else: + unmeasured.append(i) + return self.np.transpose(probs, [reduced.get(i) for i in qubits]) + def test_regressions(self, name): if name == "test_measurementresult_apply_bitflips": return [ diff --git a/src/qibo/hamiltonians/hamiltonians.py b/src/qibo/hamiltonians/hamiltonians.py index 578dd2648d..aa5671fb95 100644 --- a/src/qibo/hamiltonians/hamiltonians.py +++ b/src/qibo/hamiltonians/hamiltonians.py @@ -248,12 +248,11 @@ def __mul__(self, o): if self.backend.np.real(o) >= 0: # TODO: check for side effects K.qnp r._eigenvalues = o * self._eigenvalues elif not self.backend.issparse(self.matrix): - if self.backend.__class__.__name__ == "PyTorchBackend": - import torch - - r._eigenvalues = o * torch.flip(self._eigenvalues, [0]) - else: - r._eigenvalues = o * self._eigenvalues[::-1] + r_eigenvalues = ( + o * self.backend.np.flip(self._eigenvalues, [0]) + if isinstance(self.backend, PyTorchBackend) + else o * self._eigenvalues[::-1] + ) if self._eigenvectors is not None: if self.backend.np.real(o) > 0: # TODO: see above r._eigenvectors = self._eigenvectors From f0b0fcf8ed241f2784efa5a8108b47cddb5d1364 Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Tue, 5 Mar 2024 18:32:40 +0400 Subject: [PATCH 070/127] solved error --- src/qibo/backends/numpy.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/qibo/backends/numpy.py b/src/qibo/backends/numpy.py index 873d131d1c..c503298249 100644 --- a/src/qibo/backends/numpy.py +++ b/src/qibo/backends/numpy.py @@ -115,13 +115,13 @@ def matrix_parametrized(self, gate): def matrix_fused(self, fgate): rank = len(fgate.target_qubits) - matrix = self.identity_density_matrix(2**rank, normalize=False) + matrix = self.identity_density_matrix(rank, normalize=False) for gate in fgate.gates: gmatrix = self.cast(gate.matrix(self)) # Kronecker product with identity is needed to make the # original matrix have shape (2**rank x 2**rank) eye = self.identity_density_matrix( - 2 ** (rank - len(gate.qubits)), normalize=False + (rank - len(gate.qubits)), normalize=False ) gmatrix = self.np.kron(gmatrix, eye) # Transpose the new matrix indices so that it targets the From ee1a76de697ac42b528b0669caedc2fbe3cf537a Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Tue, 5 Mar 2024 18:43:11 +0400 Subject: [PATCH 071/127] fix test seed --- tests/test_backends_clifford.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/test_backends_clifford.py b/tests/test_backends_clifford.py index 53b4f0900a..42c3904695 100644 --- a/tests/test_backends_clifford.py +++ b/tests/test_backends_clifford.py @@ -235,16 +235,18 @@ def test_set_backend(backend): def test_noise_channels(backend): - backend.set_seed(2024) clifford_bkd = construct_clifford_backend(backend) + backend.set_seed(seed) + clifford_bkd.set_seed(seed) + noise = NoiseModel() noise.add(PauliError([("X", 0.5)]), gates.X) noise.add(DepolarizingError(0.1), gates.CZ) nqubits = 3 - c = random_clifford(nqubits, density_matrix=True, backend=backend) + c = random_clifford(nqubits, density_matrix=True, seed=seed, backend=backend) c.add(gates.M(*range(nqubits))) c_copy = c.copy() From 4c10f60da5e8918d19b5c386f4195cf5fd154750 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Tue, 5 Mar 2024 18:43:47 +0400 Subject: [PATCH 072/127] fix test --- tests/test_backends_clifford.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/test_backends_clifford.py b/tests/test_backends_clifford.py index 42c3904695..d93e860ec0 100644 --- a/tests/test_backends_clifford.py +++ b/tests/test_backends_clifford.py @@ -234,7 +234,8 @@ def test_set_backend(backend): assert global_platform == platform -def test_noise_channels(backend): +@pytest.mark.parametrize("seed", [2024]) +def test_noise_channels(backend, seed): clifford_bkd = construct_clifford_backend(backend) backend.set_seed(seed) From 8d117cecf586aba0627015a184e3a0bca06d0b00 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Wed, 6 Mar 2024 13:15:52 +0400 Subject: [PATCH 073/127] revert conj --- src/qibo/backends/numpy.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/src/qibo/backends/numpy.py b/src/qibo/backends/numpy.py index c503298249..5b5f0d7910 100644 --- a/src/qibo/backends/numpy.py +++ b/src/qibo/backends/numpy.py @@ -193,7 +193,7 @@ def apply_gate_density_matrix(self, gate, state, nqubits): matrix = gate.matrix(self) if gate.is_controlled_by: matrix = self.np.reshape(matrix, 2 * len(gate.target_qubits) * (2,)) - matrixc = self.np.conj(matrix) + matrixc = np.conj(matrix) ncontrol = len(gate.control_qubits) nactive = nqubits - ncontrol n = 2**ncontrol @@ -226,7 +226,7 @@ def apply_gate_density_matrix(self, gate, state, nqubits): state = self.np.transpose(state, einsum_utils.reverse_order(order)) else: matrix = self.np.reshape(matrix, 2 * len(gate.qubits) * (2,)) - matrixc = self.np.conj(matrix) + matrixc = np.conj(matrix) left, right = einsum_utils.apply_gate_density_matrix_string( gate.qubits, nqubits ) @@ -678,7 +678,7 @@ def partial_trace(self, state, qubits, nqubits): state = self.cast(state) state = self.np.reshape(state, nqubits * (2,)) axes = 2 * [list(qubits)] - rho = self.np.tensordot(state, self.np.conj(state), axes) + rho = self.np.tensordot(state, np.conj(state), axes) shape = 2 * (2 ** (nqubits - len(qubits)),) return self.np.reshape(rho, shape) @@ -704,14 +704,12 @@ def calculate_norm_density_matrix(self, state, order="nuc"): return self.np.linalg.norm(state, ord=order) def calculate_overlap(self, state1, state2): - return self.np.abs( - self.np.sum(self.np.conj(self.cast(state1)) * self.cast(state2)) - ) + return self.np.abs(self.np.sum(np.conj(self.cast(state1)) * self.cast(state2))) def calculate_overlap_density_matrix(self, state1, state2): state1 = self.cast(state1) state2 = self.cast(state2) - return self.np.trace(self.np.transpose(self.np.conj(state1)) @ state2) + return self.np.trace(self.np.transpose(np.conj(state1)) @ state2) def calculate_eigenvalues(self, matrix, k=6): if self.issparse(matrix): @@ -740,11 +738,11 @@ def calculate_matrix_exp(self, a, matrix, eigenvectors=None, eigenvalues=None): from scipy.linalg import expm return expm(-1j * a * matrix) expd = self.np.diag(self.np.exp(-1j * a * eigenvalues)) - ud = self.np.transpose(self.np.conj(eigenvectors)) + ud = self.np.transpose(np.conj(eigenvectors)) return self.np.matmul(eigenvectors, self.np.matmul(expd, ud)) def calculate_expectation_state(self, hamiltonian, state, normalize): - statec = self.np.conj(state) + statec = np.conj(state) hstate = hamiltonian @ state ev = self.np.real(self.np.sum(statec * hstate)) if normalize: From c028892a3a5b091de7113612804b6911b8690648 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Wed, 6 Mar 2024 16:35:43 +0400 Subject: [PATCH 074/127] update lock file --- poetry.lock | 29 ++--------------------------- 1 file changed, 2 insertions(+), 27 deletions(-) diff --git a/poetry.lock b/poetry.lock index 5926de2468..3cc9d264d3 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. [[package]] name = "absl-py" @@ -902,7 +902,6 @@ python-versions = "*" files = [ {file = "cutensor_cu11-1.7.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:c5598670f4f31906d725f5ea852f0df675522e3ff5a7bf886057eab36497062d"}, {file = "cutensor_cu11-1.7.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:67b6c7427d9ab50cb82e01360948bd1b23d73775b5767ab92071c7afcfec4b8b"}, - {file = "cutensor_cu11-1.7.0-py3-none-win_amd64.whl", hash = "sha256:d173b3d0fd51cf761b371a4d4be9a3afd3ef230a55ae4336ae31e905336480e1"}, ] [[package]] @@ -914,7 +913,6 @@ python-versions = "*" files = [ {file = "cutensor_cu12-1.7.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:515caa2406e09ffe9c6524328b7da2106169811665f7684836052753a30dda27"}, {file = "cutensor_cu12-1.7.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:29bdde551788fd3a611992026a5bb422831069e38fd44ab920af5aa00cffa12c"}, - {file = "cutensor_cu12-1.7.0-py3-none-win_amd64.whl", hash = "sha256:e1a9a759a615a64d1b8c6d2b8ffd925deb805750c28481b1a8310d05f35ce229"}, ] [[package]] @@ -2303,8 +2301,6 @@ description = "Clang Python Bindings, mirrored from the official LLVM repo: http optional = false python-versions = "*" files = [ - {file = "libclang-16.0.6-1-py2.py3-none-manylinux2014_aarch64.whl", hash = "sha256:88bc7e7b393c32e41e03ba77ef02fdd647da1f764c2cd028e69e0837080b79f6"}, - {file = "libclang-16.0.6-1-py2.py3-none-manylinux2014_armv7l.whl", hash = "sha256:d80ed5827736ed5ec2bcedf536720476fd9d4fa4c79ef0cb24aea4c59332f361"}, {file = "libclang-16.0.6-py2.py3-none-macosx_10_9_x86_64.whl", hash = "sha256:da9e47ebc3f0a6d90fb169ef25f9fbcd29b4a4ef97a8b0e3e3a17800af1423f4"}, {file = "libclang-16.0.6-py2.py3-none-macosx_11_0_arm64.whl", hash = "sha256:e1a5ad1e895e5443e205568c85c04b4608e4e973dae42f4dfd9cb46c81d1486b"}, {file = "libclang-16.0.6-py2.py3-none-manylinux2010_x86_64.whl", hash = "sha256:9dcdc730939788b8b69ffd6d5d75fe5366e3ee007f1e36a99799ec0b0c001492"}, @@ -3926,7 +3922,6 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -3934,16 +3929,8 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, - {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, - {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, - {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -3960,7 +3947,6 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -3968,7 +3954,6 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, @@ -5425,16 +5410,6 @@ files = [ {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8ad85f7f4e20964db4daadcab70b47ab05c7c1cf2a7c1e51087bfaa83831854c"}, {file = "wrapt-1.14.1-cp310-cp310-win32.whl", hash = "sha256:a9a52172be0b5aae932bef82a79ec0a0ce87288c7d132946d645eba03f0ad8a8"}, {file = "wrapt-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:6d323e1554b3d22cfc03cd3243b5bb815a51f5249fdcbb86fda4bf62bab9e164"}, - {file = "wrapt-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ecee4132c6cd2ce5308e21672015ddfed1ff975ad0ac8d27168ea82e71413f55"}, - {file = "wrapt-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2020f391008ef874c6d9e208b24f28e31bcb85ccff4f335f15a3251d222b92d9"}, - {file = "wrapt-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2feecf86e1f7a86517cab34ae6c2f081fd2d0dac860cb0c0ded96d799d20b335"}, - {file = "wrapt-1.14.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:240b1686f38ae665d1b15475966fe0472f78e71b1b4903c143a842659c8e4cb9"}, - {file = "wrapt-1.14.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9008dad07d71f68487c91e96579c8567c98ca4c3881b9b113bc7b33e9fd78b8"}, - {file = "wrapt-1.14.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6447e9f3ba72f8e2b985a1da758767698efa72723d5b59accefd716e9e8272bf"}, - {file = "wrapt-1.14.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:acae32e13a4153809db37405f5eba5bac5fbe2e2ba61ab227926a22901051c0a"}, - {file = "wrapt-1.14.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:49ef582b7a1152ae2766557f0550a9fcbf7bbd76f43fbdc94dd3bf07cc7168be"}, - {file = "wrapt-1.14.1-cp311-cp311-win32.whl", hash = "sha256:358fe87cc899c6bb0ddc185bf3dbfa4ba646f05b1b0b9b5a27c2cb92c2cea204"}, - {file = "wrapt-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:26046cd03936ae745a502abf44dac702a5e6880b2b01c29aea8ddf3353b68224"}, {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:43ca3bbbe97af00f49efb06e352eae40434ca9d915906f77def219b88e85d907"}, {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:6b1a564e6cb69922c7fe3a678b9f9a3c54e72b469875aa8018f18b4d1dd1adf3"}, {file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:00b6d4ea20a906c0ca56d84f93065b398ab74b927a7a3dbd470f6fc503f95dc3"}, @@ -5500,4 +5475,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = ">=3.9,<3.12" -content-hash = "9b1a14610acdd1973b166e3ded5200acbf457c194bca19dd6afa49aff7775e22" +content-hash = "93091c425b55837763512dd337d12a2c5fef7415b5ec1d49a4b254f0a5985770" From 256f890c927a00316ca3e5081a6b88fd883e441c Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Wed, 6 Mar 2024 20:17:39 +0400 Subject: [PATCH 075/127] rename test file --- tests/{test_backend_qibotn.py => test_backends_qibotn.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename tests/{test_backend_qibotn.py => test_backends_qibotn.py} (100%) diff --git a/tests/test_backend_qibotn.py b/tests/test_backends_qibotn.py similarity index 100% rename from tests/test_backend_qibotn.py rename to tests/test_backends_qibotn.py From de7c00d8ddebf588321b530c3635374961dea9a3 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Wed, 6 Mar 2024 20:34:39 +0400 Subject: [PATCH 076/127] fix bug --- src/qibo/backends/numpy.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/qibo/backends/numpy.py b/src/qibo/backends/numpy.py index 5b5f0d7910..723309e9d7 100644 --- a/src/qibo/backends/numpy.py +++ b/src/qibo/backends/numpy.py @@ -638,12 +638,12 @@ def samples_to_binary(self, samples, nqubits): def samples_to_decimal(self, samples, nqubits): qrange = self.np.arange(nqubits - 1, -1, -1, dtype="int32") - qrange = (2**qrange)[:, self.np.newaxis] + qrange = (2**qrange)[:, None] return self.np.matmul(self.to_numpy(samples), qrange)[:, 0] def calculate_frequencies(self, samples): res, counts = np.unique(samples, return_counts=True) - return collections.Counter(zip(res, counts)) + return collections.Counter(dict(zip(res, counts))) def update_frequencies(self, frequencies, probabilities, nsamples): samples = self.sample_shots(probabilities, nsamples) From d290b3085e0daf328d0ade18da5ad3a6aa497a0f Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Wed, 6 Mar 2024 20:58:48 +0400 Subject: [PATCH 077/127] fix test --- tests/test_transpiler_router.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/test_transpiler_router.py b/tests/test_transpiler_router.py index 7a7ef17583..b8f16431d3 100644 --- a/tests/test_transpiler_router.py +++ b/tests/test_transpiler_router.py @@ -297,8 +297,6 @@ def test_sabre_random_circuits(n_gates, look, decay, placer, connectivity): final_map=final_qubit_map, initial_map=initial_layout, ) - circuit_result = transpiled_circuit.execute(nshots=100) - assert circuit_result.frequencies() == measurement.result.frequencies() assert transpiled_circuit.queue[-1].result is measurement.result From 12b3606043709d49a6a2e49a0b50fa20f4093627 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Wed, 6 Mar 2024 20:59:37 +0400 Subject: [PATCH 078/127] revert last commit --- tests/test_transpiler_router.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/test_transpiler_router.py b/tests/test_transpiler_router.py index b8f16431d3..7a7ef17583 100644 --- a/tests/test_transpiler_router.py +++ b/tests/test_transpiler_router.py @@ -297,6 +297,8 @@ def test_sabre_random_circuits(n_gates, look, decay, placer, connectivity): final_map=final_qubit_map, initial_map=initial_layout, ) + circuit_result = transpiled_circuit.execute(nshots=100) + assert circuit_result.frequencies() == measurement.result.frequencies() assert transpiled_circuit.queue[-1].result is measurement.result From db2414ec287518fe29234312072997f4e2fee6c0 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Wed, 6 Mar 2024 21:25:07 +0400 Subject: [PATCH 079/127] fix dbi test --- tests/test_models_dbi.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/test_models_dbi.py b/tests/test_models_dbi.py index 71193bd6d9..c52d607456 100644 --- a/tests/test_models_dbi.py +++ b/tests/test_models_dbi.py @@ -103,7 +103,12 @@ def test_hyperopt_step(backend, nqubits): def test_energy_fluctuations(backend): h0 = np.array([[1, 0], [0, -1]]) + h0 = backend.cast(h0, dtype=backend.dtype) + state = np.array([1, 0]) + state = backend.cast(state, dtype=backend.dtype) + dbi = DoubleBracketIteration(Hamiltonian(1, matrix=h0, backend=backend)) energy_fluctuation = dbi.energy_fluctuation(state=state) + assert energy_fluctuation == 0 From 381bfbeb1de42d7890bb7cb6b50befc9825782d1 Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Thu, 7 Mar 2024 13:28:15 +0400 Subject: [PATCH 080/127] fix tests --- src/qibo/hamiltonians/hamiltonians.py | 2 ++ tests/test_hamiltonians.py | 2 +- tests/test_hamiltonians_symbolic.py | 1 + tests/test_measurements_probabilistic.py | 3 ++- 4 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/qibo/hamiltonians/hamiltonians.py b/src/qibo/hamiltonians/hamiltonians.py index aa5671fb95..df98112653 100644 --- a/src/qibo/hamiltonians/hamiltonians.py +++ b/src/qibo/hamiltonians/hamiltonians.py @@ -116,6 +116,7 @@ def exp(self, a): def expectation(self, state, normalize=False): if isinstance(state, self.backend.tensor_types): + state = self.backend.cast(state) shape = tuple(state.shape) if len(shape) == 1: # state vector return self.backend.calculate_expectation_state(self, state, normalize) @@ -175,6 +176,7 @@ def energy_fluctuation(self, state): Return: Energy fluctuation value (float). """ + state = self.backend.cast(state) energy = self.expectation(state) h = self.matrix h2 = Hamiltonian(nqubits=self.nqubits, matrix=h @ h, backend=self.backend) diff --git a/tests/test_hamiltonians.py b/tests/test_hamiltonians.py index 7e6c7723bb..ecca83aafa 100644 --- a/tests/test_hamiltonians.py +++ b/tests/test_hamiltonians.py @@ -449,7 +449,7 @@ def test_hamiltonian_energy_fluctuation(backend): ham = hamiltonians.XXZ(nqubits=2, backend=backend) # take ground state and zero state ground_state = ham.ground_state() - zero_state = np.ones(2**2) / np.sqrt(2**2) + zero_state = backend.np.ones(2**2) / np.sqrt(2**2) # collect energy fluctuations gs_energy_fluctuation = ham.energy_fluctuation(ground_state) zs_energy_fluctuation = ham.energy_fluctuation(zero_state) diff --git a/tests/test_hamiltonians_symbolic.py b/tests/test_hamiltonians_symbolic.py index 1be1bc2744..b6026b9af8 100644 --- a/tests/test_hamiltonians_symbolic.py +++ b/tests/test_hamiltonians_symbolic.py @@ -317,6 +317,7 @@ def test_symbolic_hamiltonian_state_expectation_different_nqubits( def test_hamiltonian_expectation_from_samples(backend): """Test Hamiltonian expectation value calculation.""" + backend.set_seed(0) obs0 = 2 * Z(0) * Z(1) + Z(0) * Z(2) obs1 = 2 * Z(0) * Z(1) + Z(0) * Z(2) * I(3) h0 = hamiltonians.SymbolicHamiltonian(obs0, backend=backend) diff --git a/tests/test_measurements_probabilistic.py b/tests/test_measurements_probabilistic.py index 8f56384c0e..4f5c14a4cc 100644 --- a/tests/test_measurements_probabilistic.py +++ b/tests/test_measurements_probabilistic.py @@ -146,8 +146,9 @@ def test_measurementresult_apply_bitflips(backend, i, p0, p1): c = models.Circuit(3) c.add(gates.M(*range(3))) - state = backend.np.zeros(8) + state = np.zeros(8) state[0] = 1.0 + state = backend.cast(state) result = CircuitResult(state, c.measurements, backend) result._samples = backend.cast(np.zeros((10, 3)), dtype="int32") backend.set_seed(123) From 37f1be1816afbb5fbe4683e6dadd2c20dd3e831d Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Thu, 7 Mar 2024 14:30:45 +0400 Subject: [PATCH 081/127] fix probabilistic tests and correction --- src/qibo/backends/tensorflow.py | 12 ++++++------ tests/test_measurements_probabilistic.py | 4 +--- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/src/qibo/backends/tensorflow.py b/src/qibo/backends/tensorflow.py index 6490b37d9e..88b9d505ac 100644 --- a/src/qibo/backends/tensorflow.py +++ b/src/qibo/backends/tensorflow.py @@ -205,10 +205,10 @@ def calculate_hamiltonian_state_product(self, matrix, state): def test_regressions(self, name): if name == "test_measurementresult_apply_bitflips": return [ - [4, 0, 0, 1, 0, 2, 2, 4, 4, 0], - [4, 0, 0, 1, 0, 2, 2, 4, 4, 0], - [4, 0, 0, 1, 0, 0, 0, 4, 4, 0], - [4, 0, 0, 0, 0, 0, 0, 4, 4, 0], + [4, 0, 0, 1, 0, 0, 1, 0, 0, 0], + [0, 1, 1, 2, 1, 1, 4, 0, 0, 4], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 4, 0, 0, 0, 4], ] elif name == "test_probabilistic_measurement": if "GPU" in self.device: # pragma: no cover @@ -223,6 +223,6 @@ def test_regressions(self, name): elif name == "test_post_measurement_bitflips_on_circuit": return [ {5: 30}, - {5: 16, 7: 10, 6: 2, 3: 1, 4: 1}, - {3: 6, 5: 6, 7: 5, 2: 4, 4: 3, 0: 2, 1: 2, 6: 2}, + {5: 12, 7: 6, 4: 6, 1: 5, 6: 1}, + {3: 7, 6: 4, 2: 4, 7: 4, 0: 4, 5: 3, 4: 2, 1: 2}, ] diff --git a/tests/test_measurements_probabilistic.py b/tests/test_measurements_probabilistic.py index 4f5c14a4cc..6b018c3f08 100644 --- a/tests/test_measurements_probabilistic.py +++ b/tests/test_measurements_probabilistic.py @@ -146,9 +146,7 @@ def test_measurementresult_apply_bitflips(backend, i, p0, p1): c = models.Circuit(3) c.add(gates.M(*range(3))) - state = np.zeros(8) - state[0] = 1.0 - state = backend.cast(state) + state = backend.zero_state(8) result = CircuitResult(state, c.measurements, backend) result._samples = backend.cast(np.zeros((10, 3)), dtype="int32") backend.set_seed(123) From 4e12663ddff9e3a53c537d46ac81c3cda85a5b6d Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Thu, 7 Mar 2024 17:44:48 +0400 Subject: [PATCH 082/127] other tests solved --- src/qibo/backends/numpy.py | 37 ++++++++++--------- src/qibo/hamiltonians/hamiltonians.py | 2 +- src/qibo/transpiler/unitary_decompositions.py | 24 +++++++++--- tests/test_hamiltonians.py | 1 + tests/test_models_circuit_features.py | 5 +-- tests/test_result.py | 2 +- 6 files changed, 42 insertions(+), 29 deletions(-) diff --git a/src/qibo/backends/numpy.py b/src/qibo/backends/numpy.py index 723309e9d7..3e52c5fcd5 100644 --- a/src/qibo/backends/numpy.py +++ b/src/qibo/backends/numpy.py @@ -115,29 +115,30 @@ def matrix_parametrized(self, gate): def matrix_fused(self, fgate): rank = len(fgate.target_qubits) - matrix = self.identity_density_matrix(rank, normalize=False) + matrix = np.eye(2**rank) for gate in fgate.gates: - gmatrix = self.cast(gate.matrix(self)) + # transfer gate matrix to numpy as it is more efficient for + # small tensor calculations + # explicit to_numpy see https://github.com/qiboteam/qibo/issues/928 + gmatrix = self.to_numpy(gate.matrix(self)) # Kronecker product with identity is needed to make the # original matrix have shape (2**rank x 2**rank) - eye = self.identity_density_matrix( - (rank - len(gate.qubits)), normalize=False - ) - gmatrix = self.np.kron(gmatrix, eye) + eye = np.eye(2 ** (rank - len(gate.qubits))) + gmatrix = np.kron(gmatrix, eye) # Transpose the new matrix indices so that it targets the # target qubits of the original gate original_shape = gmatrix.shape - gmatrix = self.np.reshape(gmatrix, 2 * rank * (2,)) + gmatrix = np.reshape(gmatrix, 2 * rank * (2,)) qubits = list(gate.qubits) indices = qubits + [q for q in fgate.target_qubits if q not in qubits] indices = np.argsort(indices) transpose_indices = list(indices) transpose_indices.extend(indices + rank) - gmatrix = self.np.transpose(gmatrix, transpose_indices) - gmatrix = self.np.reshape(gmatrix, original_shape) + gmatrix = np.transpose(gmatrix, transpose_indices) + gmatrix = np.reshape(gmatrix, original_shape) # fuse the individual gate matrix to the total ``FusedGate`` matrix matrix = gmatrix @ matrix - return matrix + return self.cast(matrix) def control_matrix(self, gate): if len(gate.control_qubits) > 1: @@ -193,7 +194,7 @@ def apply_gate_density_matrix(self, gate, state, nqubits): matrix = gate.matrix(self) if gate.is_controlled_by: matrix = self.np.reshape(matrix, 2 * len(gate.target_qubits) * (2,)) - matrixc = np.conj(matrix) + matrixc = self.np.conj(matrix) ncontrol = len(gate.control_qubits) nactive = nqubits - ncontrol n = 2**ncontrol @@ -226,7 +227,7 @@ def apply_gate_density_matrix(self, gate, state, nqubits): state = self.np.transpose(state, einsum_utils.reverse_order(order)) else: matrix = self.np.reshape(matrix, 2 * len(gate.qubits) * (2,)) - matrixc = np.conj(matrix) + matrixc = self.np.conj(matrix) left, right = einsum_utils.apply_gate_density_matrix_string( gate.qubits, nqubits ) @@ -236,7 +237,7 @@ def apply_gate_density_matrix(self, gate, state, nqubits): def apply_gate_half_density_matrix(self, gate, state, nqubits): state = self.cast(state) - state = np.reshape(state, 2 * nqubits * (2,)) + state = self.np.reshape(state, 2 * nqubits * (2,)) matrix = gate.matrix(self) if gate.is_controlled_by: # pragma: no cover raise_error( @@ -246,12 +247,12 @@ def apply_gate_half_density_matrix(self, gate, state, nqubits): "gates.", ) else: - matrix = np.reshape(matrix, 2 * len(gate.qubits) * (2,)) + matrix = self.np.reshape(matrix, 2 * len(gate.qubits) * (2,)) left, _ = einsum_utils.apply_gate_density_matrix_string( gate.qubits, nqubits ) - state = np.einsum(left, state, matrix) - return np.reshape(state, 2 * (2**nqubits,)) + state = self.np.einsum(left, state, matrix) + return self.np.reshape(state, 2 * (2**nqubits,)) def apply_channel(self, channel, state, nqubits): probabilities = channel.coefficients + (1 - np.sum(channel.coefficients),) @@ -529,7 +530,7 @@ def execute_circuit_repeated(self, circuit, nshots, initial_state=None): if circuit.density_matrix: # this implies also it has_collapse assert circuit.has_collapse - final_state = np.mean(self.to_numpy(final_states), 0) + final_state = self.cast(np.mean(self.to_numpy(final_states), 0)) if circuit.measurements: qubits = [q for m in circuit.measurements for q in m.target_qubits] final_result = CircuitResult( @@ -678,7 +679,7 @@ def partial_trace(self, state, qubits, nqubits): state = self.cast(state) state = self.np.reshape(state, nqubits * (2,)) axes = 2 * [list(qubits)] - rho = self.np.tensordot(state, np.conj(state), axes) + rho = self.np.tensordot(state, self.np.conj(state), axes) shape = 2 * (2 ** (nqubits - len(qubits)),) return self.np.reshape(rho, shape) diff --git a/src/qibo/hamiltonians/hamiltonians.py b/src/qibo/hamiltonians/hamiltonians.py index df98112653..b24a9a2c23 100644 --- a/src/qibo/hamiltonians/hamiltonians.py +++ b/src/qibo/hamiltonians/hamiltonians.py @@ -250,7 +250,7 @@ def __mul__(self, o): if self.backend.np.real(o) >= 0: # TODO: check for side effects K.qnp r._eigenvalues = o * self._eigenvalues elif not self.backend.issparse(self.matrix): - r_eigenvalues = ( + r._eigenvalues = ( o * self.backend.np.flip(self._eigenvalues, [0]) if isinstance(self.backend, PyTorchBackend) else o * self._eigenvalues[::-1] diff --git a/src/qibo/transpiler/unitary_decompositions.py b/src/qibo/transpiler/unitary_decompositions.py index 538b7f034d..cf8509b623 100644 --- a/src/qibo/transpiler/unitary_decompositions.py +++ b/src/qibo/transpiler/unitary_decompositions.py @@ -73,9 +73,13 @@ def calculate_psi(unitary, magic_basis=magic_basis, backend=None): ut_u = backend.np.transpose(u_magic, (1, 0)) @ u_magic # When the matrix given to np.linalg.eig is a diagonal matrix up to machine precision the decomposition # is not accurate anymore. decimals = 20 works for random 2q Clifford unitaries. - eigvals, psi_magic = backend.np.linalg.eig(np.round(ut_u, decimals=20)) - # orthogonalize eigenvectors in the case of degeneracy (Gram-Schmidt) - psi_magic, _ = backend.np.linalg.qr(psi_magic) + if backend.__class__.__name__ == "TensorflowBackend": + eigvals, psi_magic = np.linalg.eig(np.round(ut_u, decimals=20)) + psi_magic, _ = np.linalg.qr(psi_magic) + else: + eigvals, psi_magic = backend.np.linalg.eig(np.round(ut_u, decimals=20)) + # orthogonalize eigenvectors in the case of degeneracy (Gram-Schmidt) + psi_magic, _ = backend.np.linalg.qr(psi_magic) # write psi in computational basis psi = backend.np.matmul(magic_basis, psi_magic) return psi, eigvals @@ -92,7 +96,10 @@ def schmidt_decompose(state, backend=None): """ backend = _check_backend(backend) - u, d, v = backend.np.linalg.svd(backend.np.reshape(state, (2, 2))) + if backend.__class__.__name__ == "TensorflowBackend": + u, d, v = np.linalg.svd(backend.np.reshape(state, (2, 2))) + else: + u, d, v = backend.np.linalg.svd(backend.np.reshape(state, (2, 2))) if not np.allclose(d, [1, 0]): # pragma: no cover raise_error( ValueError, @@ -114,7 +121,9 @@ def calculate_single_qubit_unitaries(psi, backend=None): """ backend = _check_backend(backend) psi_magic = backend.np.matmul(backend.np.conj(backend.cast(magic_basis)).T, psi) - if not np.allclose(psi_magic.imag, np.zeros_like(psi_magic)): # pragma: no cover + if not np.allclose( + backend.to_numpy(psi_magic).imag, np.zeros_like(psi_magic) + ): # pragma: no cover raise_error(NotImplementedError, "Given state is not real in the magic basis.") psi_bar = backend.cast(psi.T, copy=True) @@ -152,7 +161,10 @@ def calculate_diagonal(unitary, ua, ub, va, vb, backend=None): # normalize U_A, U_B, V_A, V_B so that detU_d = 1 # this is required so that sum(lambdas) = 0 # and Ud can be written as exp(-iH) - det = backend.np.linalg.det(unitary) ** (1 / 16) + if backend.__class__.__name__ == "TensorflowBackend": + det = np.linalg.det(unitary) ** (1 / 16) + else: + det = backend.np.linalg.det(unitary) ** (1 / 16) ua *= det ub *= det va *= det diff --git a/tests/test_hamiltonians.py b/tests/test_hamiltonians.py index ecca83aafa..e0a2339577 100644 --- a/tests/test_hamiltonians.py +++ b/tests/test_hamiltonians.py @@ -365,6 +365,7 @@ def test_hamiltonian_eigenvectors(backend, dtype, dense): H3 = H1 * c2 V3 = backend.to_numpy(H3.eigenvectors()) U3 = backend.to_numpy(H3._eigenvalues) + print(U3) backend.assert_allclose(H3.matrix, V3 @ np.diag(U3) @ V3.T) c3 = dtype(0) diff --git a/tests/test_models_circuit_features.py b/tests/test_models_circuit_features.py index b440736fe8..69c9f682eb 100644 --- a/tests/test_models_circuit_features.py +++ b/tests/test_models_circuit_features.py @@ -330,9 +330,9 @@ def test_repeated_execute_probs_and_freqs(backend, nqubits): ) elif backend.__class__.__name__ == "PyTorchBackend": test_frequencies = ( - Counter({"1": 817, "0": 207}) + Counter({"1": 810, "0": 214}) if nqubits == 1 - else Counter({"11": 664, "01": 162, "10": 166, "00": 32}) + else Counter({"11": 685, "01": 160, "10": 144, "00": 35}) ) else: test_frequencies = ( @@ -340,6 +340,5 @@ def test_repeated_execute_probs_and_freqs(backend, nqubits): if nqubits == 1 else Counter({"11": 618, "10": 169, "01": 185, "00": 52}) ) - for key in dict(test_frequencies).keys(): backend.assert_allclose(result.frequencies()[key], test_frequencies[key]) diff --git a/tests/test_result.py b/tests/test_result.py index f85a0c1a64..53d274d896 100644 --- a/tests/test_result.py +++ b/tests/test_result.py @@ -98,5 +98,5 @@ def test_circuitresult_dump_load(backend, agnostic_load): loaded_freq = loaded_res.frequencies() for state, f in freq.items(): assert loaded_freq[state] == f - assert np.sum(result.state() - loaded_res.state()) == 0 + assert backend.np.sum(result.state() - loaded_res.state()) == 0 remove("tmp.npy") From e648fca77f7cbcf2b37a7b57330f1306c5555130 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Fri, 8 Mar 2024 09:29:57 +0400 Subject: [PATCH 083/127] pytorch test --- tests/test_models_circuit_features.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_models_circuit_features.py b/tests/test_models_circuit_features.py index 69c9f682eb..52b1da5c25 100644 --- a/tests/test_models_circuit_features.py +++ b/tests/test_models_circuit_features.py @@ -330,9 +330,9 @@ def test_repeated_execute_probs_and_freqs(backend, nqubits): ) elif backend.__class__.__name__ == "PyTorchBackend": test_frequencies = ( - Counter({"1": 810, "0": 214}) + Counter({"1": 817, "0": 207}) if nqubits == 1 - else Counter({"11": 685, "01": 160, "10": 144, "00": 35}) + else Counter({"11": 664, "01": 160, "10": 144, "00": 35}) ) else: test_frequencies = ( From 2c30eb54d1a6d16fe15244aa35742f6eb5c895fb Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Fri, 8 Mar 2024 09:57:37 +0400 Subject: [PATCH 084/127] pytorch test --- tests/test_models_circuit_features.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_models_circuit_features.py b/tests/test_models_circuit_features.py index 52b1da5c25..fee6d92992 100644 --- a/tests/test_models_circuit_features.py +++ b/tests/test_models_circuit_features.py @@ -332,7 +332,7 @@ def test_repeated_execute_probs_and_freqs(backend, nqubits): test_frequencies = ( Counter({"1": 817, "0": 207}) if nqubits == 1 - else Counter({"11": 664, "01": 160, "10": 144, "00": 35}) + else Counter({"11": 664, "01": 162, "10": 144, "00": 35}) ) else: test_frequencies = ( From ff698e3426673611bbfb4c9e19162223944d611f Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Fri, 8 Mar 2024 10:12:16 +0400 Subject: [PATCH 085/127] pytorch test --- tests/test_models_circuit_features.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_models_circuit_features.py b/tests/test_models_circuit_features.py index fee6d92992..2fb310bfea 100644 --- a/tests/test_models_circuit_features.py +++ b/tests/test_models_circuit_features.py @@ -332,7 +332,7 @@ def test_repeated_execute_probs_and_freqs(backend, nqubits): test_frequencies = ( Counter({"1": 817, "0": 207}) if nqubits == 1 - else Counter({"11": 664, "01": 162, "10": 144, "00": 35}) + else Counter({"11": 664, "01": 162, "10": 166, "00": 32}) ) else: test_frequencies = ( From 3235558d3bcbff9f4df4721f1f8b49af490dbd7b Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Fri, 8 Mar 2024 11:16:58 +0400 Subject: [PATCH 086/127] remove unnecessary methods --- src/qibo/backends/numpy.py | 13 ++++++++++--- src/qibo/backends/pytorch.py | 12 +++--------- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/src/qibo/backends/numpy.py b/src/qibo/backends/numpy.py index 3e52c5fcd5..a2419e6d7e 100644 --- a/src/qibo/backends/numpy.py +++ b/src/qibo/backends/numpy.py @@ -106,12 +106,16 @@ def matrix(self, gate): """Convert a gate to its matrix representation in the computational basis.""" name = gate.__class__.__name__ _matrix = getattr(self.matrices, name) - return _matrix(2 ** len(gate.target_qubits)) if callable(_matrix) else _matrix + if callable(_matrix): + _matrix = _matrix(2 ** len(gate.target_qubits)) + + return self.cast(_matrix, dtype=_matrix.dtype) def matrix_parametrized(self, gate): """Convert a parametrized gate to its matrix representation in the computational basis.""" name = gate.__class__.__name__ - return getattr(self.matrices, name)(*gate.parameters) + matrix = getattr(self.matrices, name)(*gate.parameters) + return self.cast(matrix, dtype=matrix.dtype) def matrix_fused(self, fgate): rank = len(fgate.target_qubits) @@ -157,7 +161,10 @@ def control_matrix(self, gate): "gate matrix of shape {}.".format(shape), ) zeros = self.np.zeros((2, 2), dtype=self.dtype) - part1 = self.np.concatenate([self.np.eye(2, dtype=self.dtype), zeros], axis=0) + zeros = self.cast(zeros, dtype=zeros.dtype) + identity = self.np.eye(2, dtype=self.dtype) + identity = self.cast(identity, dtype=identity.dtype) + part1 = self.np.concatenate([identity, zeros], axis=0) part2 = self.np.concatenate([zeros, matrix], axis=0) return self.np.concatenate([part1, part2], axis=1) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index a712866b82..411fe7b343 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -106,26 +106,20 @@ def cast( def issparse(self, x): if isinstance(x, self.np.Tensor): return x.is_sparse + return super().issparse(x) def to_numpy(self, x): if isinstance(x, list): return np.asarray([self.to_numpy(i) for i in x]) + if isinstance(x, self.np.Tensor): return x.numpy(force=True) + return x def compile(self, func): return func - # return self.np.jit.script(func) - - def matrix(self, gate): - npmatrix = super().matrix(gate) - return self.np.tensor(npmatrix, dtype=self.dtype) - - def matrix_parametrized(self, gate): - npmatrix = super().matrix_parametrized(gate) - return self.np.tensor(npmatrix, dtype=self.dtype) def sample_shots(self, probabilities, nshots): return self.np.multinomial( From 74959b54ccef8fe383d0fb89b8117eb0a4d2d9be Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Fri, 8 Mar 2024 11:29:39 +0400 Subject: [PATCH 087/127] remove unnecessary methods --- src/qibo/backends/numpy.py | 2 +- src/qibo/backends/pytorch.py | 23 +++++------------------ 2 files changed, 6 insertions(+), 19 deletions(-) diff --git a/src/qibo/backends/numpy.py b/src/qibo/backends/numpy.py index a2419e6d7e..cc90b75862 100644 --- a/src/qibo/backends/numpy.py +++ b/src/qibo/backends/numpy.py @@ -705,7 +705,7 @@ def partial_trace_density_matrix(self, state, qubits, nqubits): def calculate_norm(self, state, order=2): state = self.cast(state) - return self.np.linalg.norm(state, ord=order) + return self.np.linalg.norm(state, order) def calculate_norm_density_matrix(self, state, order="nuc"): state = self.cast(state) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index 411fe7b343..0162007e99 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -118,20 +118,11 @@ def to_numpy(self, x): return x - def compile(self, func): - return func - def sample_shots(self, probabilities, nshots): return self.np.multinomial( self.cast(probabilities, dtype="float"), nshots, replacement=True ) - def samples_to_decimal(self, samples, nqubits): - samples = self.cast(samples, dtype="int32") - qrange = self.np.arange(nqubits - 1, -1, -1, dtype=torch.int32) - qrange = (2**qrange).unsqueeze(1) - return self.np.matmul(samples, qrange).squeeze(1) - def samples_to_binary(self, samples, nqubits): samples = self.cast(samples, dtype="int32") qrange = self.np.arange(nqubits - 1, -1, -1, dtype=self.np.int32) @@ -139,15 +130,11 @@ def samples_to_binary(self, samples, nqubits): samples = samples[:, None] >> qrange return samples % 2 - def calculate_norm(self, state, order=2): - state = self.cast(state) - return self.np.norm(state, p=order) - - def calculate_norm_density_matrix(self, state, order="nuc"): - state = self.cast(state) - if order == "nuc": - return self.np.trace(state) - return self.np.norm(state, p=order) + def samples_to_decimal(self, samples, nqubits): + samples = self.cast(samples, dtype="int32") + qrange = self.np.arange(nqubits - 1, -1, -1, dtype=torch.int32) + qrange = (2**qrange).unsqueeze(1) + return self.np.matmul(samples, qrange).squeeze(1) def calculate_eigenvalues(self, matrix, k=6): return self.np.linalg.eigvalsh(matrix) # pylint: disable=not-callable From 099a4c91e4c3321dd37f41994b232c3b0e84e931 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Fri, 8 Mar 2024 12:49:47 +0400 Subject: [PATCH 088/127] fix dill test --- tests/test_backends_qibotn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_backends_qibotn.py b/tests/test_backends_qibotn.py index a18eff8184..5bceca8dc1 100644 --- a/tests/test_backends_qibotn.py +++ b/tests/test_backends_qibotn.py @@ -10,5 +10,5 @@ def test_backend_qibotn(): - qibo.set_backend(backend="qibotn", platform="qutensornet", runcard=None) + qibo.set_backend(backend="qibotn", platform="qutensornet") assert isinstance(GlobalBackend(), QuimbBackend) From 884ff4acb761668bdec8f93ed9eea7d48a698be6 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Fri, 8 Mar 2024 13:15:29 +0400 Subject: [PATCH 089/127] fix clifford tests --- src/qibo/backends/clifford.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/qibo/backends/clifford.py b/src/qibo/backends/clifford.py index 2830fd9ef7..6140556d4c 100644 --- a/src/qibo/backends/clifford.py +++ b/src/qibo/backends/clifford.py @@ -329,7 +329,9 @@ def symplectic_matrix_to_generators( for x, z in zip(X, Z): paulis = [bits_to_gate[f"{zz}{xx}"] for xx, zz in zip(x, z)] if return_array: - paulis = [self.cast(getattr(gates, p)(0).matrix()) for p in paulis] + from qibo import matrices # pylint: disable=C0415 + + paulis = [self.cast(getattr(matrices, p)) for p in paulis] matrix = reduce(self.np.kron, paulis) generators.append(matrix) else: From ac9a0b24c37f7dc168faab99522e804af31b684d Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Fri, 8 Mar 2024 13:16:29 +0400 Subject: [PATCH 090/127] fix dill test --- tests/test_backends_qibotn.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/test_backends_qibotn.py b/tests/test_backends_qibotn.py index 5bceca8dc1..ae850afefa 100644 --- a/tests/test_backends_qibotn.py +++ b/tests/test_backends_qibotn.py @@ -10,5 +10,7 @@ def test_backend_qibotn(): - qibo.set_backend(backend="qibotn", platform="qutensornet") + qibo.set_backend(backend="qibotn", platform="qutensornet", runcard=None) assert isinstance(GlobalBackend(), QuimbBackend) + + qibo.set_backend("numpy") From c885bfe192f4a16d3d88db9b6a6834166e33d830 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Sat, 9 Mar 2024 08:45:17 +0400 Subject: [PATCH 091/127] trying to fix test --- src/qibo/backends/numpy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/qibo/backends/numpy.py b/src/qibo/backends/numpy.py index cc90b75862..15c5d7cd2f 100644 --- a/src/qibo/backends/numpy.py +++ b/src/qibo/backends/numpy.py @@ -750,7 +750,7 @@ def calculate_matrix_exp(self, a, matrix, eigenvectors=None, eigenvalues=None): return self.np.matmul(eigenvectors, self.np.matmul(expd, ud)) def calculate_expectation_state(self, hamiltonian, state, normalize): - statec = np.conj(state) + statec = self.np.conj(state) hstate = hamiltonian @ state ev = self.np.real(self.np.sum(statec * hstate)) if normalize: From 5736ef49ab8debed897a6d5a73b5bcea6e155047 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Sat, 9 Mar 2024 09:49:12 +0400 Subject: [PATCH 092/127] methods in same order as `NumpyBackend` --- src/qibo/backends/pytorch.py | 88 ++++++++++++++++++------------------ 1 file changed, 44 insertions(+), 44 deletions(-) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index 0162007e99..46b3f7c848 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -60,10 +60,6 @@ def __init__(self): def set_device(self, device): # pragma: no cover self.device = device - def set_seed(self, seed): - self.np.manual_seed(seed) - np.random.seed(seed) - def cast( self, x: Union[torch.Tensor, list[torch.Tensor], np.ndarray, list[np.ndarray]], @@ -118,6 +114,42 @@ def to_numpy(self, x): return x + def _append_zeros(self, state, qubits, results): + """Helper method for collapse.""" + for q, r in zip(qubits, results): + state = self.np.unsqueeze(state, dim=q) + if r: + state = self.np.cat([self.np.zeros_like(state), state], dim=q) + else: + state = self.np.cat([state, self.np.zeros_like(state)], dim=q) + return state + + def _order_probabilities(self, probs, qubits, nqubits): + """Arrange probabilities according to the given ``qubits`` ordering.""" + if probs.dim() == 0: + return probs + unmeasured, reduced = [], {} + for i in range(nqubits): + if i in qubits: + reduced[i] = i - len(unmeasured) + else: + unmeasured.append(i) + return self.np.transpose(probs, [reduced.get(i) for i in qubits]) + + def calculate_probabilities(self, state, qubits, nqubits): + rtype = self.np.real(state).dtype + unmeasured_qubits = tuple(i for i in range(nqubits) if i not in qubits) + state = self.np.reshape(self.np.abs(state) ** 2, nqubits * (2,)) + if len(unmeasured_qubits) == 0: + probs = self.cast(state, dtype=rtype) + else: + probs = self.np.sum(self.cast(state, dtype=rtype), axis=unmeasured_qubits) + return self._order_probabilities(probs, qubits, nqubits).ravel() + + def set_seed(self, seed): + self.np.manual_seed(seed) + np.random.seed(seed) + def sample_shots(self, probabilities, nshots): return self.np.multinomial( self.cast(probabilities, dtype="float"), nshots, replacement=True @@ -136,6 +168,11 @@ def samples_to_decimal(self, samples, nqubits): qrange = (2**qrange).unsqueeze(1) return self.np.matmul(samples, qrange).squeeze(1) + def calculate_overlap_density_matrix(self, state1, state2): + return self.np.trace( + self.np.matmul(self.np.conj(self.cast(state1)).T, self.cast(state2)) + ) + def calculate_eigenvalues(self, matrix, k=6): return self.np.linalg.eigvalsh(matrix) # pylint: disable=not-callable @@ -151,51 +188,14 @@ def calculate_matrix_exp(self, a, matrix, eigenvectors=None, eigenvalues=None): ud = self.np.conj(eigenvectors).T return self.np.matmul(eigenvectors, self.np.matmul(expd, ud)) + def calculate_hamiltonian_state_product(self, matrix, state): + return self.np.matmul(matrix, state) + def calculate_hamiltonian_matrix_product(self, matrix1, matrix2): if self.issparse(matrix1) or self.issparse(matrix2): return self.np.sparse.mm(matrix1, matrix2) # pylint: disable=E1102 return self.np.matmul(matrix1, matrix2) - def calculate_hamiltonian_state_product(self, matrix, state): - return self.np.matmul(matrix, state) - - def calculate_overlap_density_matrix(self, state1, state2): - return self.np.trace( - self.np.matmul(self.np.conj(self.cast(state1)).T, self.cast(state2)) - ) - - def _append_zeros(self, state, qubits, results): - """Helper method for collapse.""" - for q, r in zip(qubits, results): - state = self.np.unsqueeze(state, dim=q) - if r: - state = self.np.cat([self.np.zeros_like(state), state], dim=q) - else: - state = self.np.cat([state, self.np.zeros_like(state)], dim=q) - return state - - def calculate_probabilities(self, state, qubits, nqubits): - rtype = self.np.real(state).dtype - unmeasured_qubits = tuple(i for i in range(nqubits) if i not in qubits) - state = self.np.reshape(self.np.abs(state) ** 2, nqubits * (2,)) - if len(unmeasured_qubits) == 0: - probs = self.cast(state, dtype=rtype) - else: - probs = self.np.sum(self.cast(state, dtype=rtype), axis=unmeasured_qubits) - return self._order_probabilities(probs, qubits, nqubits).ravel() - - def _order_probabilities(self, probs, qubits, nqubits): - """Arrange probabilities according to the given ``qubits`` ordering.""" - if probs.dim() == 0: - return probs - unmeasured, reduced = [], {} - for i in range(nqubits): - if i in qubits: - reduced[i] = i - len(unmeasured) - else: - unmeasured.append(i) - return self.np.transpose(probs, [reduced.get(i) for i in qubits]) - def test_regressions(self, name): if name == "test_measurementresult_apply_bitflips": return [ From 19065414204e2249bc48be786bb863be7d174cb6 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Sat, 9 Mar 2024 10:13:40 +0400 Subject: [PATCH 093/127] remove unnecessary functions --- src/qibo/backends/numpy.py | 2 +- src/qibo/backends/pytorch.py | 36 +++--------------------------------- 2 files changed, 4 insertions(+), 34 deletions(-) diff --git a/src/qibo/backends/numpy.py b/src/qibo/backends/numpy.py index 15c5d7cd2f..d3d797b193 100644 --- a/src/qibo/backends/numpy.py +++ b/src/qibo/backends/numpy.py @@ -776,7 +776,7 @@ def calculate_hamiltonian_state_product(self, matrix, state): else: raise_error( ValueError, - "Cannot multiply Hamiltonian with " "rank-{} tensor.".format(rank), + f"Cannot multiply Hamiltonian with rank-{rank} tensor.", ) def assert_allclose(self, value, target, rtol=1e-7, atol=0.0): diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index 46b3f7c848..a34f9c919b 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -124,28 +124,6 @@ def _append_zeros(self, state, qubits, results): state = self.np.cat([state, self.np.zeros_like(state)], dim=q) return state - def _order_probabilities(self, probs, qubits, nqubits): - """Arrange probabilities according to the given ``qubits`` ordering.""" - if probs.dim() == 0: - return probs - unmeasured, reduced = [], {} - for i in range(nqubits): - if i in qubits: - reduced[i] = i - len(unmeasured) - else: - unmeasured.append(i) - return self.np.transpose(probs, [reduced.get(i) for i in qubits]) - - def calculate_probabilities(self, state, qubits, nqubits): - rtype = self.np.real(state).dtype - unmeasured_qubits = tuple(i for i in range(nqubits) if i not in qubits) - state = self.np.reshape(self.np.abs(state) ** 2, nqubits * (2,)) - if len(unmeasured_qubits) == 0: - probs = self.cast(state, dtype=rtype) - else: - probs = self.np.sum(self.cast(state, dtype=rtype), axis=unmeasured_qubits) - return self._order_probabilities(probs, qubits, nqubits).ravel() - def set_seed(self, seed): self.np.manual_seed(seed) np.random.seed(seed) @@ -168,14 +146,6 @@ def samples_to_decimal(self, samples, nqubits): qrange = (2**qrange).unsqueeze(1) return self.np.matmul(samples, qrange).squeeze(1) - def calculate_overlap_density_matrix(self, state1, state2): - return self.np.trace( - self.np.matmul(self.np.conj(self.cast(state1)).T, self.cast(state2)) - ) - - def calculate_eigenvalues(self, matrix, k=6): - return self.np.linalg.eigvalsh(matrix) # pylint: disable=not-callable - def calculate_eigenvectors(self, matrix, k=6): return self.np.linalg.eigh(matrix) # pylint: disable=not-callable @@ -188,14 +158,14 @@ def calculate_matrix_exp(self, a, matrix, eigenvectors=None, eigenvalues=None): ud = self.np.conj(eigenvectors).T return self.np.matmul(eigenvectors, self.np.matmul(expd, ud)) - def calculate_hamiltonian_state_product(self, matrix, state): - return self.np.matmul(matrix, state) - def calculate_hamiltonian_matrix_product(self, matrix1, matrix2): if self.issparse(matrix1) or self.issparse(matrix2): return self.np.sparse.mm(matrix1, matrix2) # pylint: disable=E1102 return self.np.matmul(matrix1, matrix2) + def calculate_hamiltonian_state_product(self, matrix, state): + return self.np.matmul(matrix, state) + def test_regressions(self, name): if name == "test_measurementresult_apply_bitflips": return [ From 8d91ae29ddfcb14c80a3e34bd33fb775efb059f8 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Sat, 9 Mar 2024 10:27:20 +0400 Subject: [PATCH 094/127] f-string formatting --- src/qibo/backends/numpy.py | 2 +- src/qibo/backends/tensorflow.py | 2 +- src/qibo/gates/gates.py | 2 +- src/qibo/gates/measurements.py | 2 +- src/qibo/hamiltonians/abstract.py | 4 +-- src/qibo/hamiltonians/adiabatic.py | 10 +++----- src/qibo/hamiltonians/terms.py | 17 +++++-------- src/qibo/measurements.py | 2 +- src/qibo/models/evolution.py | 6 ++--- src/qibo/models/qft.py | 5 ++-- src/qibo/models/variational.py | 41 ++++++++++++++---------------- src/qibo/symbols.py | 4 +-- tests/conftest.py | 2 +- tests/test_cirq.py | 2 +- 14 files changed, 46 insertions(+), 55 deletions(-) diff --git a/src/qibo/backends/numpy.py b/src/qibo/backends/numpy.py index d3d797b193..406138ff47 100644 --- a/src/qibo/backends/numpy.py +++ b/src/qibo/backends/numpy.py @@ -158,7 +158,7 @@ def control_matrix(self, gate): raise_error( ValueError, "Cannot use ``control_unitary`` method on " - "gate matrix of shape {}.".format(shape), + + f"gate matrix of shape {shape}.", ) zeros = self.np.zeros((2, 2), dtype=self.dtype) zeros = self.cast(zeros, dtype=zeros.dtype) diff --git a/src/qibo/backends/tensorflow.py b/src/qibo/backends/tensorflow.py index 88b9d505ac..f762b0e66c 100644 --- a/src/qibo/backends/tensorflow.py +++ b/src/qibo/backends/tensorflow.py @@ -199,7 +199,7 @@ def calculate_hamiltonian_state_product(self, matrix, state): else: raise_error( ValueError, - "Cannot multiply Hamiltonian with " "rank-{} tensor.".format(rank), + f"Cannot multiply Hamiltonian with rank-{rank} tensor.", ) def test_regressions(self, name): diff --git a/src/qibo/gates/gates.py b/src/qibo/gates/gates.py index 5cf180c59d..5b34f7faac 100644 --- a/src/qibo/gates/gates.py +++ b/src/qibo/gates/gates.py @@ -1754,7 +1754,7 @@ def parameters(self, x): if shape != (2, 2): raise_error( ValueError, - "Invalid rotation shape {} for generalized " "fSim gate".format(shape), + f"Invalid rotation shape {shape} for generalized fSim gate", ) ParametrizedGate.parameters.fset(self, x) # pylint: disable=no-member diff --git a/src/qibo/gates/measurements.py b/src/qibo/gates/measurements.py index 1888810da2..50b4c29e78 100644 --- a/src/qibo/gates/measurements.py +++ b/src/qibo/gates/measurements.py @@ -127,7 +127,7 @@ def _get_bitflip_tuple(qubits: Tuple[int], probs: "ProbsType") -> Tuple[float]: ) return tuple(probs[q] if q in probs else 0.0 for q in qubits) - raise_error(TypeError, "Invalid type {} of bitflip map.".format(probs)) + raise_error(TypeError, f"Invalid type {probs} of bitflip map.") def _get_bitflip_map(self, p: Optional["ProbsType"] = None) -> Dict[int, float]: """Creates dictionary with bitflip probabilities.""" diff --git a/src/qibo/hamiltonians/abstract.py b/src/qibo/hamiltonians/abstract.py index 966f943659..a393ab78da 100644 --- a/src/qibo/hamiltonians/abstract.py +++ b/src/qibo/hamiltonians/abstract.py @@ -17,11 +17,11 @@ def nqubits(self): def nqubits(self, n): if not isinstance(n, int): raise_error( - RuntimeError, "nqubits must be an integer but is " "{}.".format(type(n)) + RuntimeError, f"nqubits must be an integer but is {type(n)}." ) if n < 1: raise_error( - ValueError, "nqubits must be a positive integer but is " "{}".format(n) + ValueError, f"nqubits must be a positive integer but is {n}" ) self._nqubits = n diff --git a/src/qibo/hamiltonians/adiabatic.py b/src/qibo/hamiltonians/adiabatic.py index d7627d449e..951c024128 100644 --- a/src/qibo/hamiltonians/adiabatic.py +++ b/src/qibo/hamiltonians/adiabatic.py @@ -19,8 +19,7 @@ def __new__(cls, h0, h1): if type(h1) != type(h0): raise_error( TypeError, - "h1 should be of the same type {} of h0 but " - "is {}.".format(type(h0), type(h1)), + f"h1 should be of the same type {type(h0)} of h0 but is {type(h1)}.", ) if isinstance(h0, hamiltonians.Hamiltonian): return BaseAdiabaticHamiltonian(h0, h1) @@ -29,8 +28,7 @@ def __new__(cls, h0, h1): else: raise_error( TypeError, - "h0 should be a hamiltonians.Hamiltonian " - "object but is {}.".format(type(h0)), + f"h0 should be a hamiltonians.Hamiltonian object but is {type(h0)}." ) def __init__(self, h0, h1): # pragma: no cover @@ -63,11 +61,11 @@ def __init__(self, h0, h1): if h0.nqubits != h1.nqubits: raise_error( ValueError, - "H0 has {} qubits while H1 has {}." "".format(h0.nqubits, h1.nqubits), + f"H0 has {h0.nqubits} qubits while H1 has {h1.nqubits}.", ) self.nqubits = h0.nqubits if h0.backend != h1.backend: # pragma: no cover - raise_error(ValueError, "H0 and H1 have different backend.") + raise_error(ValueError, "H0 and H1 have different backends.") self.backend = h0.backend self.h0, self.h1 = h0, h1 self.schedule = None diff --git a/src/qibo/hamiltonians/terms.py b/src/qibo/hamiltonians/terms.py index e6e3ea1362..d2fd014b92 100644 --- a/src/qibo/hamiltonians/terms.py +++ b/src/qibo/hamiltonians/terms.py @@ -25,21 +25,19 @@ def __init__(self, matrix, *q): if qi < 0: raise_error( ValueError, - "Invalid qubit id {} < 0 was given " - "in Hamiltonian term".format(qi), + f"Invalid qubit id {qi} < 0 was given in Hamiltonian term.", ) if not isinstance(matrix, np.ndarray): raise_error( - TypeError, "Invalid type {} of symbol matrix." "".format(type(matrix)) + TypeError, f"Invalid type {type(matrix)} of symbol matrix." ) dim = int(matrix.shape[0]) if 2 ** len(q) != dim: raise_error( ValueError, - "Matrix dimension {} given in Hamiltonian " - "term is not compatible with the number " - "of target qubits {}." - "".format(dim, len(q)), + f"Matrix dimension {dim} given in Hamiltonian " + + "term is not compatible with the number " + + f"of target qubits {len(q)}.", ) self.target_qubits = tuple(q) self._gate = None @@ -79,8 +77,7 @@ def merge(self, term): raise_error( ValueError, "Cannot merge HamiltonianTerm acting on " - "qubits {} to term on qubits {}." - "".format(term.target_qubits, self.target_qubits), + + f"qubits {term.target_qubits} to term on qubits {self.target_qubits}.", ) matrix = np.kron(term.matrix, np.eye(2 ** (len(self) - len(term)))) matrix = np.reshape(matrix, 2 * len(self) * (2,)) @@ -190,7 +187,7 @@ def __init__(self, coefficient, factors=1, symbol_map={}): elif factor.is_number: self.coefficient *= complex(factor) else: # pragma: no cover - raise_error(TypeError, "Cannot parse factor {}.".format(factor)) + raise_error(TypeError, f"Cannot parse factor {factor}.") self.target_qubits = tuple(sorted(self.matrix_map.keys())) diff --git a/src/qibo/measurements.py b/src/qibo/measurements.py index a5bc61253b..e46ffe6a00 100644 --- a/src/qibo/measurements.py +++ b/src/qibo/measurements.py @@ -36,7 +36,7 @@ class MeasurementSymbol(sympy.Symbol): _counter = 0 def __new__(cls, *args, **kwargs): - name = "m{}".format(cls._counter) + name = f"m{cls._counter}" cls._counter += 1 return super().__new__(cls=cls, name=name) diff --git a/src/qibo/models/evolution.py b/src/qibo/models/evolution.py index b40e3577a1..ee4fc7df78 100644 --- a/src/qibo/models/evolution.py +++ b/src/qibo/models/evolution.py @@ -56,7 +56,7 @@ def __init__(self, hamiltonian, dt, solver="exp", callbacks=[], accelerators=Non ham = hamiltonian(0) if not isinstance(ham, AbstractHamiltonian): raise TypeError( - "Hamiltonian type {} not understood." "".format(type(ham)) + f"Hamiltonian type {type(ham)} not understood." ) self.nqubits = ham.nqubits self.backend = ham.backend @@ -70,8 +70,8 @@ def __init__(self, hamiltonian, dt, solver="exp", callbacks=[], accelerators=Non raise_error( NotImplementedError, "Distributed evolution is only " - "implemented using the Trotter " - "exponential solver.", + + "implemented using the Trotter " + + "exponential solver.", ) ham.circuit(dt, accelerators) self.solver = solvers.get_solver(solver, self.dt, hamiltonian) diff --git a/src/qibo/models/qft.py b/src/qibo/models/qft.py index 092a573706..f54cd1a094 100644 --- a/src/qibo/models/qft.py +++ b/src/qibo/models/qft.py @@ -63,9 +63,8 @@ def _DistributedQFT(nqubits, accelerators=None): if icrit < circuit.nglobal: # pylint: disable=E1101 raise_error( NotImplementedError, - "Cannot implement QFT for {} qubits " - "using {} global qubits." - "".format(nqubits, circuit.nglobal), + f"Cannot implement QFT for {nqubits} qubits " + + f"using {circuit.nglobal} global qubits.", ) # pylint: disable=E1101 for i1 in range(nqubits): diff --git a/src/qibo/models/variational.py b/src/qibo/models/variational.py index 950b8f3a5c..304d8a7d44 100644 --- a/src/qibo/models/variational.py +++ b/src/qibo/models/variational.py @@ -188,19 +188,18 @@ def __init__( if nsteps <= 0: # pragma: no cover raise_error( ValueError, - "Number of steps nsteps should be positive but is {}." - "".format(nsteps), + f"Number of steps nsteps should be positive but is {nsteps}.", ) if t_max <= 0: # pragma: no cover raise_error( ValueError, - "Maximum time t_max should be positive but is {}." "".format(t_max), + f"Maximum time t_max should be positive but is {t_max}.", ) if easy_hamiltonian.nqubits != problem_hamiltonian.nqubits: # pragma: no cover raise_error( ValueError, - "The easy Hamiltonian has {} qubits while problem Hamiltonian has {}." - "".format(easy_hamiltonian.nqubits, problem_hamiltonian.nqubits), + f"The easy Hamiltonian has {easy_hamiltonian.nqubits} qubits " + + f"while problem Hamiltonian has {problem_hamiltonian.nqubits}.", ) self.ATOL = bounds_tolerance @@ -219,7 +218,7 @@ def __init__( raise_error( ValueError, "Scheduling function must take only one argument," - "but the function proposed takes {}.".format(nparams), + + f"but the function proposed takes {nparams}.", ) self.set_schedule(s) @@ -228,10 +227,10 @@ def set_schedule(self, func): # check boundary conditions s0 = func(0) if abs(s0) > self.ATOL: # pragma: no cover - raise_error(ValueError, "s(0) should be 0 but it is {}.".format(s0)) + raise_error(ValueError, f"s(0) should be 0 but it is {s0}.") s1 = func(1) if abs(s1 - 1) > self.ATOL: # pragma: no cover - raise_error(ValueError, "s(1) should be 1 but it is {}.".format(s1)) + raise_error(ValueError, f"s(1) should be 1 but it is {s1}.") self._schedule = func def schedule(self, t): @@ -241,13 +240,13 @@ def schedule(self, t): if (t - self._t_max) > self.ATOL_TIME: # pragma: no cover raise_error( ValueError, - "t cannot be greater than {}, but it is {}.".format(self._t_max, t), + f"t cannot be greater than {self._t_max}, but it is {t}.", ) s = self._schedule(t / self._t_max) if (abs(s) - 1) > self.ATOL: # pragma: no cover raise_error( - ValueError, "s cannot be greater than 1 but it is {}.".format(s) + ValueError, f"s cannot be greater than 1 but it is {s}." ) return s @@ -256,7 +255,7 @@ def hamiltonian(self, t): if (t - self._t_max) > self.ATOL: # pragma: no cover raise_error( ValueError, - "t cannot be greater than {}, but it is {}.".format(self._t_max, t), + f"t cannot be greater than {self._t_max}, but it is {t}.", ) # boundary conditions s(0)=0, s(total_time)=1 st = self.schedule(t) @@ -361,7 +360,7 @@ def __init__( # problem hamiltonian if not isinstance(hamiltonian, AbstractHamiltonian): raise_error( - TypeError, "Invalid Hamiltonian type {}." "".format(type(hamiltonian)) + TypeError, f"Invalid Hamiltonian type {type(hamiltonian)}." ) self.hamiltonian = hamiltonian self.nqubits = hamiltonian.nqubits @@ -377,16 +376,14 @@ def __init__( if type(mixer) != type(hamiltonian): raise_error( TypeError, - "Given Hamiltonian is of type {} " - "while mixer is of type {}." - "".format(type(hamiltonian), type(mixer)), + f"Given Hamiltonian is of type {type(hamiltonian)} " + + f"while mixer is of type {type(mixer)}.", ) if mixer.nqubits != hamiltonian.nqubits: raise_error( ValueError, - "Given Hamiltonian acts on {} qubits " - "while mixer acts on {}." - "".format(hamiltonian.nqubits, mixer.nqubits), + f"Given Hamiltonian acts on {hamiltonian.nqubits} qubits " + + f"while mixer acts on {mixer.nqubits}.", ) self.mixer = mixer @@ -398,8 +395,8 @@ def __init__( raise_error( NotImplementedError, "Distributed QAOA is implemented " - "only with SymbolicHamiltonian and " - "exponential solver.", + + "only with SymbolicHamiltonian and " + + "exponential solver.", ) if isinstance(self.hamiltonian, self.hamiltonians.SymbolicHamiltonian): self.hamiltonian.circuit(1e-2, accelerators) @@ -534,8 +531,8 @@ def minimize( raise_error( ValueError, "Initial guess for the parameters must " - "contain an even number of values but " - "contains {}.".format(len(initial_p)), + + "contain an even number of values but " + + "contains {len(initial_p)}.", ) def _loss(params, qaoa, hamiltonian, state): diff --git a/src/qibo/symbols.py b/src/qibo/symbols.py index 8f2042d626..9d27910618 100644 --- a/src/qibo/symbols.py +++ b/src/qibo/symbols.py @@ -38,7 +38,7 @@ class Symbol(sympy.Symbol): """ def __new__(cls, q, matrix=None, name="Symbol", commutative=False, **assumptions): - name = "{}{}".format(name, q) + name = f"{name}{q}" assumptions["commutative"] = commutative return super().__new__(cls=cls, name=name, **assumptions) @@ -64,7 +64,7 @@ def __init__(self, q, matrix=None, name="Symbol", commutative=False): ) ): raise_error( - TypeError, "Invalid type {} of symbol matrix." "".format(type(matrix)) + TypeError, f"Invalid type {type(matrix)} of symbol matrix." ) self.matrix = matrix diff --git a/tests/conftest.py b/tests/conftest.py index 8ac656cd13..f320321576 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -54,7 +54,7 @@ def pytest_runtest_setup(item): plat = sys.platform if supported_platforms and plat not in supported_platforms: # pragma: no cover # case not covered by workflows - pytest.skip("Cannot run test on platform {}.".format(plat)) + pytest.skip(f"Cannot run test on platform {plat}.") def pytest_configure(config): diff --git a/tests/test_cirq.py b/tests/test_cirq.py index 0069bdec9d..636581656d 100644 --- a/tests/test_cirq.py +++ b/tests/test_cirq.py @@ -97,7 +97,7 @@ def assert_cirq_gates_equivalent(qibo_gate, cirq_gate): gatename, theta, targets = pieces else: # pragma: no cover # case not tested because it fails - raise RuntimeError("Cirq gate parsing failed with {}.".format(pieces)) + raise RuntimeError(f"Cirq gate parsing failed with {pieces}.") qubits = list(int(x) for x in targets.replace(" ", "").split(",")) targets = (qubits.pop(),) From 2cea316e34c4d91fb6854aaa4c5bf362744c49d0 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sat, 9 Mar 2024 06:28:35 +0000 Subject: [PATCH 095/127] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/qibo/hamiltonians/abstract.py | 8 ++------ src/qibo/hamiltonians/adiabatic.py | 2 +- src/qibo/hamiltonians/terms.py | 4 +--- src/qibo/models/evolution.py | 4 +--- src/qibo/models/variational.py | 8 ++------ src/qibo/symbols.py | 4 +--- 6 files changed, 8 insertions(+), 22 deletions(-) diff --git a/src/qibo/hamiltonians/abstract.py b/src/qibo/hamiltonians/abstract.py index a393ab78da..749ad0b21e 100644 --- a/src/qibo/hamiltonians/abstract.py +++ b/src/qibo/hamiltonians/abstract.py @@ -16,13 +16,9 @@ def nqubits(self): @nqubits.setter def nqubits(self, n): if not isinstance(n, int): - raise_error( - RuntimeError, f"nqubits must be an integer but is {type(n)}." - ) + raise_error(RuntimeError, f"nqubits must be an integer but is {type(n)}.") if n < 1: - raise_error( - ValueError, f"nqubits must be a positive integer but is {n}" - ) + raise_error(ValueError, f"nqubits must be a positive integer but is {n}") self._nqubits = n @abstractmethod diff --git a/src/qibo/hamiltonians/adiabatic.py b/src/qibo/hamiltonians/adiabatic.py index 951c024128..4f69be8bf1 100644 --- a/src/qibo/hamiltonians/adiabatic.py +++ b/src/qibo/hamiltonians/adiabatic.py @@ -28,7 +28,7 @@ def __new__(cls, h0, h1): else: raise_error( TypeError, - f"h0 should be a hamiltonians.Hamiltonian object but is {type(h0)}." + f"h0 should be a hamiltonians.Hamiltonian object but is {type(h0)}.", ) def __init__(self, h0, h1): # pragma: no cover diff --git a/src/qibo/hamiltonians/terms.py b/src/qibo/hamiltonians/terms.py index d2fd014b92..f1d2f7cda0 100644 --- a/src/qibo/hamiltonians/terms.py +++ b/src/qibo/hamiltonians/terms.py @@ -28,9 +28,7 @@ def __init__(self, matrix, *q): f"Invalid qubit id {qi} < 0 was given in Hamiltonian term.", ) if not isinstance(matrix, np.ndarray): - raise_error( - TypeError, f"Invalid type {type(matrix)} of symbol matrix." - ) + raise_error(TypeError, f"Invalid type {type(matrix)} of symbol matrix.") dim = int(matrix.shape[0]) if 2 ** len(q) != dim: raise_error( diff --git a/src/qibo/models/evolution.py b/src/qibo/models/evolution.py index ee4fc7df78..0a53e4917e 100644 --- a/src/qibo/models/evolution.py +++ b/src/qibo/models/evolution.py @@ -55,9 +55,7 @@ def __init__(self, hamiltonian, dt, solver="exp", callbacks=[], accelerators=Non else: ham = hamiltonian(0) if not isinstance(ham, AbstractHamiltonian): - raise TypeError( - f"Hamiltonian type {type(ham)} not understood." - ) + raise TypeError(f"Hamiltonian type {type(ham)} not understood.") self.nqubits = ham.nqubits self.backend = ham.backend if dt <= 0: diff --git a/src/qibo/models/variational.py b/src/qibo/models/variational.py index 304d8a7d44..d379cf386c 100644 --- a/src/qibo/models/variational.py +++ b/src/qibo/models/variational.py @@ -245,9 +245,7 @@ def schedule(self, t): s = self._schedule(t / self._t_max) if (abs(s) - 1) > self.ATOL: # pragma: no cover - raise_error( - ValueError, f"s cannot be greater than 1 but it is {s}." - ) + raise_error(ValueError, f"s cannot be greater than 1 but it is {s}.") return s def hamiltonian(self, t): @@ -359,9 +357,7 @@ def __init__( self.params = None # problem hamiltonian if not isinstance(hamiltonian, AbstractHamiltonian): - raise_error( - TypeError, f"Invalid Hamiltonian type {type(hamiltonian)}." - ) + raise_error(TypeError, f"Invalid Hamiltonian type {type(hamiltonian)}.") self.hamiltonian = hamiltonian self.nqubits = hamiltonian.nqubits # mixer hamiltonian (default = -sum(sigma_x)) diff --git a/src/qibo/symbols.py b/src/qibo/symbols.py index 9d27910618..9aa12a1dcc 100644 --- a/src/qibo/symbols.py +++ b/src/qibo/symbols.py @@ -63,9 +63,7 @@ def __init__(self, q, matrix=None, name="Symbol", commutative=False): ), ) ): - raise_error( - TypeError, f"Invalid type {type(matrix)} of symbol matrix." - ) + raise_error(TypeError, f"Invalid type {type(matrix)} of symbol matrix.") self.matrix = matrix def __getstate__(self): From ab535bc72bb7debd9235d228a6bd08126449525c Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Sat, 9 Mar 2024 10:45:08 +0400 Subject: [PATCH 096/127] f-string formatting --- examples/adiabatic/trotter_error.py | 6 +++--- examples/adiabatic3sat/functions.py | 6 +++--- examples/adiabatic3sat/main.py | 9 ++++----- examples/benchmarks/circuits.py | 2 +- examples/benchmarks/main.py | 4 ++-- examples/benchmarks/utils.py | 6 +++--- examples/grover3sat/functions.py | 2 +- examples/grover3sat/main.py | 8 ++++---- examples/hash-grover/main.py | 14 ++++++-------- examples/unary/functions.py | 2 +- examples/unary/main.py | 10 +++++----- examples/variational_classifier/main.py | 18 ++++++++---------- src/qibo/callbacks.py | 15 ++++++--------- 13 files changed, 47 insertions(+), 55 deletions(-) diff --git a/examples/adiabatic/trotter_error.py b/examples/adiabatic/trotter_error.py index 80892db310..7caadec8ec 100644 --- a/examples/adiabatic/trotter_error.py +++ b/examples/adiabatic/trotter_error.py @@ -61,9 +61,9 @@ def main(nqubits, hfield, T, save): ] alphas = [1.0, 0.7, 0.4] labels = [ - r"$\delta t ^{}$".format(exponent - 1), - r"$\delta t ^{}$".format(exponent), - r"$\delta t ^{}$".format(exponent + 1), + f"$\\delta t ^{exponent - 1}$", + f"$\\delta t ^{exponent}$", + f"$\\delta t ^{exponent - 1}$", ] plt.figure(figsize=(7, 4)) diff --git a/examples/adiabatic3sat/functions.py b/examples/adiabatic3sat/functions.py index 2a8e5bf079..c6fa865823 100644 --- a/examples/adiabatic3sat/functions.py +++ b/examples/adiabatic3sat/functions.py @@ -17,7 +17,7 @@ def read_file(file_name, instance): solution (list): list of the correct outputs of the instance for testing. clauses (list): list of all clauses, with the qubits each clause acts upon. """ - file = open("../data3sat/{q}bit/n{q}i{i}.txt".format(q=file_name, i=instance)) + file = open(f"../data3sat/{file_name}bit/n{file_name}i{instance}.txt") control = list(map(int, file.readline().split())) solution = list(map(str, file.readline().split())) clauses = [list(map(int, file.readline().split())) for _ in range(control[1])] @@ -100,7 +100,7 @@ def plot(qubits, ground, first, gap, dt, T): plt.title("Energy during adiabatic evolution") ax.legend() fig.tight_layout() - fig.savefig("{}_qubits_energy.png".format(qubits), dpi=300, bbox_inches="tight") + fig.savefig(f"{qubits}_qubits_energy.png", dpi=300, bbox_inches="tight") fig, ax = plt.subplots() ax.plot(times, gap, label="gap energy", color="C0") plt.ylabel("energy") @@ -108,4 +108,4 @@ def plot(qubits, ground, first, gap, dt, T): plt.title("Energy during adiabatic evolution") ax.legend() fig.tight_layout() - fig.savefig("{}_qubits_gap.png".format(qubits), dpi=300, bbox_inches="tight") + fig.savefig(f"{qubits}_qubits_gap.png", dpi=300, bbox_inches="tight") diff --git a/examples/adiabatic3sat/main.py b/examples/adiabatic3sat/main.py index 901df08c96..06b0d5cce0 100644 --- a/examples/adiabatic3sat/main.py +++ b/examples/adiabatic3sat/main.py @@ -48,9 +48,8 @@ def main(nqubits, instance, T, dt, solver, plot, dense, params, method, maxiter) print("-" * 20 + "\n") if plot and nqubits >= 14: print( - "Currently not possible to calculate gap energy for {} qubits." - "\n Proceeding to adiabatic evolution without plotting data.\n" - "".format(nqubits) + f"Currently not possible to calculate gap energy for {nqubits} qubits." + + "\n Proceeding to adiabatic evolution without plotting data.\n" ) plot = False if plot and method is not None: @@ -97,9 +96,9 @@ def main(nqubits, instance, T, dt, solver, plot, dense, params, method, maxiter) output_dec = (np.abs(final_state) ** 2).argmax() max_output = "{0:0{bits}b}".format(output_dec, bits=nqubits) max_prob = (np.abs(final_state) ** 2).max() - print("Exact cover instance with {} qubits.\n".format(nqubits)) + print(f"Exact cover instance with {nqubits} qubits.\n") if solution: - print("Known solution: {}\n".format("".join(solution))) + print(f"Known solution: {''.join(solution)}\n") print("-" * 20 + "\n") print( f"Adiabatic evolution with total time {T}, evolution step {dt} and " diff --git a/examples/benchmarks/circuits.py b/examples/benchmarks/circuits.py index 9aab3ac933..cdcd1444a6 100644 --- a/examples/benchmarks/circuits.py +++ b/examples/benchmarks/circuits.py @@ -67,7 +67,7 @@ def CircuitFactory(nqubits, circuit_name, accelerators=None, **kwargs): circuit = models.QFT(nqubits, accelerators=accelerators) else: if circuit_name not in _CIRCUITS: - raise KeyError("Unknown benchmark circuit type {}." "".format(circuit_name)) + raise KeyError(f"Unknown benchmark circuit type {circuit_name}.") circuit = models.Circuit(nqubits, accelerators=accelerators) circuit.add(_CIRCUITS.get(circuit_name)(nqubits, **kwargs)) return circuit diff --git a/examples/benchmarks/main.py b/examples/benchmarks/main.py index 4809edf044..31b7eaec84 100644 --- a/examples/benchmarks/main.py +++ b/examples/benchmarks/main.py @@ -53,14 +53,14 @@ def limit_gpu_memory(memory_limit=None): print("\nNo GPU memory limiter used.\n") return - print("\nAttempting to limit GPU memory to {}.\n".format(memory_limit)) + print(f"\nAttempting to limit GPU memory to {memory_limit}.\n") gpus = tf.config.list_physical_devices("GPU") for gpu in tf.config.list_physical_devices("GPU"): config = tf.config.experimental.VirtualDeviceConfiguration( memory_limit=memory_limit ) tf.config.experimental.set_virtual_device_configuration(gpu, [config]) - print("Limiting memory of {} to {}.".format(gpu.name, memory_limit)) + print(f"Limiting memory of {gpu.name} to {memory_limit}.") print() diff --git a/examples/benchmarks/utils.py b/examples/benchmarks/utils.py index 2f0bcb6111..4bd3deeda1 100644 --- a/examples/benchmarks/utils.py +++ b/examples/benchmarks/utils.py @@ -6,12 +6,12 @@ class BenchmarkLogger(list): def __init__(self, filename=None): self.filename = filename if filename is not None and os.path.isfile(filename): - print("Extending existing logs from {}.".format(filename)) + print(f"Extending existing logs from {filename}.") with open(filename) as file: super().__init__(json.load(file)) else: if filename is not None: - print("Creating new logs in {}.".format(filename)) + print(f"Creating new logs in {filename}.") super().__init__() def dump(self): @@ -20,7 +20,7 @@ def dump(self): json.dump(list(self), file) def __str__(self): - return "\n".join("{}: {}".format(k, v) for k, v in self[-1].items()) + return "\n".join(f"{k}: {v}" for k, v in self[-1].items()) def parse_accelerators(accelerators): diff --git a/examples/grover3sat/functions.py b/examples/grover3sat/functions.py index 1c34366546..74adb146a4 100644 --- a/examples/grover3sat/functions.py +++ b/examples/grover3sat/functions.py @@ -15,7 +15,7 @@ def read_file(file_name, instance): solution (list): list of the correct outputs of the instance for testing. clauses (list): list of all clauses, with the qubits each clause acts upon. """ - file = open("../data3sat/{q}bit/n{q}i{i}.txt".format(q=file_name, i=instance)) + file = open(f"../data3sat/{file_name}bit/n{file_name}i{instance}.txt") control = list(map(int, file.readline().split())) solution = list(map(str, file.readline().split())) clauses = [list(map(int, file.readline().split())) for _ in range(control[1])] diff --git a/examples/grover3sat/main.py b/examples/grover3sat/main.py index a40d30c116..c1c3386906 100644 --- a/examples/grover3sat/main.py +++ b/examples/grover3sat/main.py @@ -18,16 +18,16 @@ def main(nqubits, instance): qubits = control[0] clauses_num = control[1] steps = int((np.pi / 4) * np.sqrt(2**qubits)) - print("Qubits encoding the solution: {}\n".format(qubits)) - print("Total number of qubits used: {}\n".format(qubits + clauses_num + 1)) + print(f"Qubits encoding the solution: {qubits}\n") + print(f"Total number of qubits used: {qubits + clauses_num + 1}\n") q, c, ancilla, circuit = functions.create_qc(qubits, clauses_num) circuit = functions.grover(circuit, q, c, ancilla, clauses, steps) result = circuit(nshots=100) frequencies = result.frequencies(binary=True, registers=False) most_common_bitstring = frequencies.most_common(1)[0][0] - print("Most common bitstring: {}\n".format(most_common_bitstring)) + print(f"Most common bitstring: {most_common_bitstring}\n") if solution: - print("Exact cover solution: {}\n".format("".join(solution))) + print(f"Exact cover solution: {''.join(solution)}\n") if __name__ == "__main__": diff --git a/examples/hash-grover/main.py b/examples/hash-grover/main.py index 3d981d0d02..237f9b5855 100644 --- a/examples/hash-grover/main.py +++ b/examples/hash-grover/main.py @@ -23,11 +23,9 @@ def main(h_value, collisions, b): h = "{0:0{bits}b}".format(h_value, bits=b) if len(h) > 8: raise ValueError( - "Hash should be at maximum an 8-bit number but given value contains {} bits.".format( - len(h) - ) + f"Hash should be at maximum an 8-bit number but given value contains {len(h)} bits." ) - print("Target hash: {}\n".format(h)) + print(f"Target hash: {h}\n") if collisions: grover_it = int(np.pi * np.sqrt((2**8) / collisions) / 4) result = functions.grover(q, constant_1, constant_2, rot, h, grover_it) @@ -36,19 +34,19 @@ def main(h_value, collisions, b): print("Preimages:") for i in most_common: if functions.check_hash(q, i[0], h, constant_1, constant_2, rot): - print(" - {}\n".format(i[0])) + print(f" - {i[0]}\n") else: print( " Incorrect preimage found, number of given collisions might not match.\n" ) - print("Total iterations taken: {}\n".format(grover_it)) + print(f"Total iterations taken: {grover_it}\n") else: measured, total_iterations = functions.grover_unknown_M( q, constant_1, constant_2, rot, h ) print("Solution found in an iterative process.\n") - print("Preimage: {}\n".format(measured)) - print("Total iterations taken: {}\n".format(total_iterations)) + print(f"Preimage: {measured}\n") + print(f"Total iterations taken: {total_iterations}\n") if __name__ == "__main__": diff --git a/examples/unary/functions.py b/examples/unary/functions.py index b56c531491..81ce0289ad 100644 --- a/examples/unary/functions.py +++ b/examples/unary/functions.py @@ -482,7 +482,7 @@ def paint_prob_distribution(bins, prob_sim, S0, sig, r, T): ax.plot(x, y, label="PDF", color="black") plt.ylabel("Probability") plt.xlabel("Option price") - plt.title("Option price distribution for {} qubits ".format(bins)) + plt.title(f"Option price distribution for {bins} qubits ") ax.legend() fig.tight_layout() fig.savefig("Probability_distribution.png") diff --git a/examples/unary/main.py b/examples/unary/main.py index e68d25f6d9..cdbf7a9018 100644 --- a/examples/unary/main.py +++ b/examples/unary/main.py @@ -15,7 +15,7 @@ def main(data, bins, M, shots): # Generate the probability distribution plots fun.paint_prob_distribution(bins, prob_sim, S0, sig, r, T) - print("Histogram printed for unary simulation with {} qubits.\n".format(bins)) + print(f"Histogram printed for unary simulation with {bins} qubits.\n") # Create circuit to compute the expected payoff circuit, S = fun.load_payoff_quantum_sim(bins, S0, sig, r, T, K) @@ -28,14 +28,14 @@ def main(data, bins, M, shots): # Finding differences between exact value and quantum approximation error = fun.diff_qu_cl(qu_payoff_sim, cl_payoff) - print("Exact value of the expected payoff: {}\n".format(cl_payoff)) - print("Expected payoff from quantum simulation: {}\n".format(qu_payoff_sim)) - print("Percentage error: {} %\n".format(error)) + print(f"Exact value of the expected payoff: {cl_payoff}\n") + print(f"Expected payoff from quantum simulation: {qu_payoff_sim}\n") + print(f"Percentage error: {error} %\n") print("-" * 60 + "\n") # Applying amplitude estimation a_s, error_s = fun.amplitude_estimation(bins, M, data) - print("Amplitude estimation with a total of {} runs.\n".format(M)) + print(f"Amplitude estimation with a total of {M} runs.\n") fun.paint_AE(a_s, error_s, bins, M, data) print("Amplitude estimation result plots generated.") diff --git a/examples/variational_classifier/main.py b/examples/variational_classifier/main.py index 57c2b7b23a..a8d7ef5d4e 100644 --- a/examples/variational_classifier/main.py +++ b/examples/variational_classifier/main.py @@ -72,7 +72,7 @@ def main(nclasses, nqubits, nlayers, nshots, training, RxRzRx, method): path_angles = ( LOCAL_FOLDER / "data" - / "optimal_angles_ry_{}q_{}l.npy".format(nqubits, nlayers) + / f"optimal_angles_ry_{nqubits}q_{nlayers}l.npy" ) optimal_angles = np.load(path_angles) except: @@ -84,7 +84,7 @@ def main(nclasses, nqubits, nlayers, nshots, training, RxRzRx, method): path_angles = ( LOCAL_FOLDER / "data" - / "optimal_angles_rxrzrx_{}q_{}l.npy".format(nqubits, nlayers) + / f"optimal_angles_rxrzrx_{nqubits}q_{nlayers}l.npy" ) optimal_angles = np.load(path_angles) except: @@ -113,7 +113,7 @@ def main(nclasses, nqubits, nlayers, nshots, training, RxRzRx, method): path_angles = ( LOCAL_FOLDER / "data" - / "optimal_angles_ry_{}q_{}l.npy".format(nqubits, nlayers) + / f"optimal_angles_ry_{nqubits}q_{nlayers}l.npy" ) np.save( path_angles, @@ -138,7 +138,7 @@ def main(nclasses, nqubits, nlayers, nshots, training, RxRzRx, method): path_angles = ( LOCAL_FOLDER / "data" - / "optimal_angles_rxrzrx_{}q_{}l.npy".format(nqubits, nlayers) + / f"optimal_angles_rxrzrx_{nqubits}q_{nlayers}l.npy" ) np.save( path_angles, @@ -170,14 +170,12 @@ def main(nclasses, nqubits, nlayers, nshots, training, RxRzRx, method): ] print( - "Train set | # Clases: {} | # Qubits: {} | # Layers: {} | Accuracy: {}".format( - nclasses, nqubits, nlayers, qc.Accuracy(labels_train, predictions_train) - ) + f"Train set | # Clases: {nclasses} | # Qubits: {nqubits} | # Layers: {nlayers} | " + + f"Accuracy: {qc.Accuracy(labels_train, predictions_train)}" ) print( - "Test set | # Clases: {} | # Qubits: {} | # Layers: {} | Accuracy: {}".format( - nclasses, nqubits, nlayers, qc.Accuracy(labels_test, predictions_test) - ) + f"Test set | # Clases: {nclasses} | # Qubits: {nqubits} | # Layers: {nlayers} | " + + f"Accuracy: {qc.Accuracy(labels_test, predictions_test)}" ) diff --git a/src/qibo/callbacks.py b/src/qibo/callbacks.py index 77114ed42a..65bc124144 100644 --- a/src/qibo/callbacks.py +++ b/src/qibo/callbacks.py @@ -36,14 +36,13 @@ def extend(self, x): def __getitem__(self, k): if not isinstance(k, (int, slice, list, tuple)): - raise_error(IndexError, "Unrecognized type for index {}.".format(k)) + raise_error(IndexError, f"Unrecognized type for index {k}.") if isinstance(k, int) and k >= len(self._results): raise_error( IndexError, - "Attempting to access callbacks {} run but " - "the callback has been used in {} executions." - "".format(k, len(self._results)), + f"Attempting to access callbacks {k} run but " + + f"the callback has been used in {len(self._results)} executions.", ) return self._results[k] @@ -308,12 +307,11 @@ def __init__(self, mode: Union[str, int] = "gap", check_degenerate: bool = True) if not isinstance(mode, (int, str)): raise_error( TypeError, - "Gap callback mode should be integer or " - "string but is {}.".format(type(mode)), + f"Gap callback mode should be integer or string but is {type(mode)}.", ) elif isinstance(mode, str) and mode != "gap": raise_error( - ValueError, "Unsupported mode {} for gap callback." "".format(mode) + ValueError, f"Unsupported mode {mode} for gap callback." ) self.mode = mode self.check_degenerate = check_degenerate @@ -350,8 +348,7 @@ def apply(self, backend, state): excited += 1 if excited > 1: log.warning( - "The Hamiltonian is degenerate. Using eigenvalue {} " - "to calculate gap.".format(excited) + f"The Hamiltonian is degenerate. Using eigenvalue {excited} to calculate gap." ) self.append(gap) return gap From 791dc6078124ca8682bfeca1022f7d29569d0c2f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sat, 9 Mar 2024 06:46:03 +0000 Subject: [PATCH 097/127] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- examples/variational_classifier/main.py | 4 +--- src/qibo/callbacks.py | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/examples/variational_classifier/main.py b/examples/variational_classifier/main.py index a8d7ef5d4e..20bcb5f432 100644 --- a/examples/variational_classifier/main.py +++ b/examples/variational_classifier/main.py @@ -111,9 +111,7 @@ def main(nclasses, nqubits, nlayers, nshots, training, RxRzRx, method): method=method, ) path_angles = ( - LOCAL_FOLDER - / "data" - / f"optimal_angles_ry_{nqubits}q_{nlayers}l.npy" + LOCAL_FOLDER / "data" / f"optimal_angles_ry_{nqubits}q_{nlayers}l.npy" ) np.save( path_angles, diff --git a/src/qibo/callbacks.py b/src/qibo/callbacks.py index 65bc124144..2313e42e95 100644 --- a/src/qibo/callbacks.py +++ b/src/qibo/callbacks.py @@ -310,9 +310,7 @@ def __init__(self, mode: Union[str, int] = "gap", check_degenerate: bool = True) f"Gap callback mode should be integer or string but is {type(mode)}.", ) elif isinstance(mode, str) and mode != "gap": - raise_error( - ValueError, f"Unsupported mode {mode} for gap callback." - ) + raise_error(ValueError, f"Unsupported mode {mode} for gap callback.") self.mode = mode self.check_degenerate = check_degenerate self.evolution = None From e6c6c0f659fe05265ae4621c7b22b8197f027f64 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Sat, 9 Mar 2024 11:07:24 +0400 Subject: [PATCH 098/127] bring methods back --- src/qibo/backends/pytorch.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index a34f9c919b..a630007638 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -124,6 +124,18 @@ def _append_zeros(self, state, qubits, results): state = self.np.cat([state, self.np.zeros_like(state)], dim=q) return state + def _order_probabilities(self, probs, qubits, nqubits): + """Arrange probabilities according to the given ``qubits`` ordering.""" + if probs.dim() == 0: + return probs + unmeasured, reduced = [], {} + for i in range(nqubits): + if i in qubits: + reduced[i] = i - len(unmeasured) + else: + unmeasured.append(i) + return self.np.transpose(probs, [reduced.get(i) for i in qubits]) + def set_seed(self, seed): self.np.manual_seed(seed) np.random.seed(seed) @@ -146,6 +158,11 @@ def samples_to_decimal(self, samples, nqubits): qrange = (2**qrange).unsqueeze(1) return self.np.matmul(samples, qrange).squeeze(1) + def calculate_overlap_density_matrix(self, state1, state2): + return self.np.trace( + self.np.matmul(self.np.conj(self.cast(state1)).T, self.cast(state2)) + ) + def calculate_eigenvectors(self, matrix, k=6): return self.np.linalg.eigh(matrix) # pylint: disable=not-callable From 0dd0cfb04fd0bd0859822d4c8f3c0dd624f772e0 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Sat, 9 Mar 2024 11:22:54 +0400 Subject: [PATCH 099/127] bring back method --- src/qibo/backends/pytorch.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index a630007638..711ef09ed5 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -163,6 +163,9 @@ def calculate_overlap_density_matrix(self, state1, state2): self.np.matmul(self.np.conj(self.cast(state1)).T, self.cast(state2)) ) + def calculate_eigenvalues(self, matrix, k=6): + return self.np.linalg.eigvalsh(matrix) # pylint: disable=not-callable + def calculate_eigenvectors(self, matrix, k=6): return self.np.linalg.eigh(matrix) # pylint: disable=not-callable From 15d79500954bc8a0fcae3c4a963efd7a8566ccf1 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Sat, 9 Mar 2024 11:40:17 +0400 Subject: [PATCH 100/127] revert back changes --- src/qibo/backends/pytorch.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index 711ef09ed5..74c5a58b38 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -136,6 +136,16 @@ def _order_probabilities(self, probs, qubits, nqubits): unmeasured.append(i) return self.np.transpose(probs, [reduced.get(i) for i in qubits]) + def calculate_probabilities(self, state, qubits, nqubits): + rtype = self.np.real(state).dtype + unmeasured_qubits = tuple(i for i in range(nqubits) if i not in qubits) + state = self.np.reshape(self.np.abs(state) ** 2, nqubits * (2,)) + if len(unmeasured_qubits) == 0: + probs = self.cast(state, dtype=rtype) + else: + probs = self.np.sum(self.cast(state, dtype=rtype), axis=unmeasured_qubits) + return self._order_probabilities(probs, qubits, nqubits).ravel() + def set_seed(self, seed): self.np.manual_seed(seed) np.random.seed(seed) From a1ceee389fdd9e1e10a44cf6b6c647f9dc38e687 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Sat, 9 Mar 2024 13:03:00 +0400 Subject: [PATCH 101/127] coverage --- src/qibo/backends/pytorch.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index 74c5a58b38..e05b9d61b6 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -126,7 +126,7 @@ def _append_zeros(self, state, qubits, results): def _order_probabilities(self, probs, qubits, nqubits): """Arrange probabilities according to the given ``qubits`` ordering.""" - if probs.dim() == 0: + if probs.dim() == 0: # pragma: no cover return probs unmeasured, reduced = [], {} for i in range(nqubits): @@ -189,7 +189,7 @@ def calculate_matrix_exp(self, a, matrix, eigenvectors=None, eigenvalues=None): return self.np.matmul(eigenvectors, self.np.matmul(expd, ud)) def calculate_hamiltonian_matrix_product(self, matrix1, matrix2): - if self.issparse(matrix1) or self.issparse(matrix2): + if self.issparse(matrix1) or self.issparse(matrix2): # pragma: no cover return self.np.sparse.mm(matrix1, matrix2) # pylint: disable=E1102 return self.np.matmul(matrix1, matrix2) From fd79d31f9affd9587f14f8ea987162ca30ec3ab4 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Sat, 9 Mar 2024 15:07:50 +0400 Subject: [PATCH 102/127] reviewing tests --- tests/test_hamiltonians.py | 28 +++++++++++---------- tests/test_hamiltonians_symbolic.py | 39 +++++++++++++---------------- tests/test_hamiltonians_trotter.py | 10 +++----- tests/test_measurements.py | 2 +- tests/test_models_encodings.py | 9 +++---- tests/utils.py | 7 ------ 6 files changed, 40 insertions(+), 55 deletions(-) diff --git a/tests/test_hamiltonians.py b/tests/test_hamiltonians.py index e0a2339577..fa62777e0b 100644 --- a/tests/test_hamiltonians.py +++ b/tests/test_hamiltonians.py @@ -4,9 +4,10 @@ import pytest from qibo import Circuit, gates, hamiltonians +from qibo.quantum_info.random_ensembles import random_statevector, random_density_matrix from qibo.symbols import I, Z -from .utils import random_complex, random_sparse_matrix +from .utils import random_sparse_matrix def test_hamiltonian_init(backend): @@ -198,16 +199,15 @@ def test_hamiltonian_matmul_states(backend, sparse_type): matrix = random_sparse_matrix(backend, nstates, sparse_type) H = hamiltonians.Hamiltonian(nqubits, matrix, backend=backend) - hm = backend.to_numpy(H.matrix) - v = random_complex(2**nqubits, dtype=hm.dtype) - m = random_complex((2**nqubits, 2**nqubits), dtype=hm.dtype) + hm = H.matrix + v = random_statevector(2**nqubits, backend=backend) + v = backend.cast(v, dtype=hm.dtype) + m = random_density_matrix(2**nqubits, backend=backend) + m = backend.cast(m, dtype=hm.dtype) Hv = H @ backend.cast(v) Hm = H @ backend.cast(m) - backend.assert_allclose(Hv, hm.dot(v), atol=1e-7) # needs atol for cuquantum - backend.assert_allclose(Hm, (hm @ m)) - - Hstate = H @ backend.cast(v) - backend.assert_allclose(Hstate, hm.dot(v)) + backend.assert_allclose(Hv, hm @ v) # needs atol for cuquantum + backend.assert_allclose(Hm, hm @ m) @pytest.mark.parametrize("density_matrix", [True, False]) @@ -237,12 +237,14 @@ def test_hamiltonian_expectation(backend, dense, density_matrix, sparse_type): matrix = backend.to_numpy(h.matrix) if density_matrix: - state = random_complex((2**h.nqubits, 2**h.nqubits)) + state = random_density_matrix(2**h.nqubits, backend=backend) + state = backend.to_numpy(state) state = state + state.T.conj() norm = np.trace(state) target_ev = np.trace(matrix.dot(state)).real else: - state = random_complex(2**h.nqubits) + state = random_statevector(2**h.nqubits, backend=backend) + state = backend.to_numpy(state) norm = np.sum(np.abs(state) ** 2) target_ev = np.sum(state.conj() * matrix.dot(state)).real @@ -252,7 +254,7 @@ def test_hamiltonian_expectation(backend, dense, density_matrix, sparse_type): def test_hamiltonian_expectation_errors(backend): h = hamiltonians.XXZ(nqubits=3, delta=0.5, backend=backend) - state = random_complex((4, 4, 4)) + state = np.random.rand(4, 4, 4) + 1j * np.random.rand(4, 4, 4) with pytest.raises(ValueError): h.expectation(state) with pytest.raises(TypeError): @@ -290,7 +292,7 @@ def test_hamiltonian_expectation_from_samples(backend): def test_hamiltonian_expectation_from_samples_errors(backend): - obs = random_complex((4, 4)) + obs = random_density_matrix(4, backend=backend) h = hamiltonians.Hamiltonian(2, obs, backend=backend) with pytest.raises(NotImplementedError): h.expectation_from_samples(None, qubit_map=None) diff --git a/tests/test_hamiltonians_symbolic.py b/tests/test_hamiltonians_symbolic.py index b6026b9af8..b0d778a6cf 100644 --- a/tests/test_hamiltonians_symbolic.py +++ b/tests/test_hamiltonians_symbolic.py @@ -5,10 +5,9 @@ import sympy from qibo import Circuit, gates, hamiltonians +from qibo.quantum_info.random_ensembles import random_statevector, random_density_matrix from qibo.symbols import I, Y, Z -from .utils import random_complex - def symbolic_tfim(nqubits, h=1.0): """Constructs symbolic Hamiltonian for TFIM.""" @@ -229,15 +228,11 @@ def test_symbolic_hamiltonian_hamiltonianmatmul(backend, nqubits, calcterms, cal @pytest.mark.parametrize("density_matrix", [False, True]) @pytest.mark.parametrize("calcterms", [False, True]) def test_symbolic_hamiltonian_matmul(backend, nqubits, density_matrix, calcterms): - if density_matrix: - # from qibo.core.states import MatrixState - shape = (2**nqubits, 2**nqubits) - # state = MatrixState.from_tensor(random_complex(shape)) - else: - # from qibo.core.states import VectorState - shape = (2**nqubits,) - # state = VectorState.from_tensor(random_complex(shape)) - state = random_complex(shape) + state = ( + random_density_matrix(2**nqubits, backend=backend) + if density_matrix + else random_statevector(2**nqubits, backend=backend) + ) local_ham = hamiltonians.SymbolicHamiltonian( symbolic_tfim(nqubits, h=1.0), backend=backend ) @@ -245,7 +240,7 @@ def test_symbolic_hamiltonian_matmul(backend, nqubits, density_matrix, calcterms if calcterms: _ = local_ham.terms local_matmul = local_ham @ state - target_matmul = dense_ham @ backend.cast(state) + target_matmul = dense_ham @ state backend.assert_allclose(local_matmul, target_matmul) @@ -265,12 +260,12 @@ def test_symbolic_hamiltonian_state_expectation( _ = local_ham.dense dense_ham = hamiltonians.TFIM(nqubits, h=1.0, backend=backend) + 2 - state = backend.cast(random_complex((2**nqubits,))) + state = random_statevector(2**nqubits, backend=backend) + local_ev = local_ham.expectation(state, normalize) target_ev = dense_ham.expectation(state, normalize) backend.assert_allclose(local_ev, target_ev) - state = random_complex((2**nqubits,)) local_ev = local_ham.expectation(state, normalize) target_ev = dense_ham.expectation(state, normalize) backend.assert_allclose(local_ev, target_ev) @@ -296,21 +291,19 @@ def test_symbolic_hamiltonian_state_expectation_different_nqubits( dense_matrix = np.kron(backend.to_numpy(dense_ham.matrix), np.eye(4)) dense_ham = hamiltonians.Hamiltonian(5, dense_matrix, backend=backend) + state = random_statevector(2**5, backend=backend) + if give_nqubits: - state = backend.cast(random_complex((2**5,))) local_ev = local_ham.expectation(state) target_ev = dense_ham.expectation(state) backend.assert_allclose(local_ev, target_ev) - state = random_complex((2**5,)) local_ev = local_ham.expectation(state) target_ev = dense_ham.expectation(state) backend.assert_allclose(local_ev, target_ev) else: - state = backend.cast(random_complex((2**5,))) with pytest.raises(ValueError): local_ev = local_ham.expectation(state) - state = random_complex((2**5,)) with pytest.raises(ValueError): local_ev = local_ham.expectation(state) @@ -356,10 +349,12 @@ def test_symbolic_hamiltonian_abstract_symbol_ev(backend, density_matrix, calcte local_ham = hamiltonians.SymbolicHamiltonian(form, backend=backend) if calcterms: _ = local_ham.terms - if density_matrix: - state = backend.cast(random_complex((4, 4))) - else: - state = backend.cast(random_complex((4,))) + + state = ( + random_density_matrix(4, backend=backend) + if density_matrix + else random_statevector(4, backend=backend) + ) local_ev = local_ham.expectation(state) target_ev = local_ham.dense.expectation(state) backend.assert_allclose(local_ev, target_ev) diff --git a/tests/test_hamiltonians_trotter.py b/tests/test_hamiltonians_trotter.py index 04a77ff4f0..a438a61bd0 100644 --- a/tests/test_hamiltonians_trotter.py +++ b/tests/test_hamiltonians_trotter.py @@ -7,8 +7,6 @@ from qibo.backends import NumpyBackend from qibo.quantum_info import random_hermitian, random_statevector -from .utils import random_complex - @pytest.mark.parametrize("nqubits", [3, 4]) @pytest.mark.parametrize("model", ["TFIM", "XXZ", "Y", "MaxCut"]) @@ -80,21 +78,21 @@ def test_trotter_hamiltonian_operator_add_and_sub(backend, nqubits=3): @pytest.mark.parametrize("nqubits,normalize", [(3, False), (4, False)]) def test_trotter_hamiltonian_matmul(backend, nqubits, normalize): """Test Trotter Hamiltonian expectation value.""" + state = random_statevector(2**nqubits, backend=backend) + local_ham = hamiltonians.TFIM(nqubits, h=1.0, dense=False, backend=backend) dense_ham = hamiltonians.TFIM(nqubits, h=1.0, backend=backend) - state = backend.cast(random_complex((2**nqubits,))) trotter_ev = local_ham.expectation(state, normalize) target_ev = dense_ham.expectation(state, normalize) backend.assert_allclose(trotter_ev, target_ev) - state = random_complex((2**nqubits,)) trotter_ev = local_ham.expectation(state, normalize) target_ev = dense_ham.expectation(state, normalize) backend.assert_allclose(trotter_ev, target_ev) - trotter_matmul = local_ham @ backend.cast(state) - target_matmul = dense_ham @ backend.cast(state) + trotter_matmul = local_ham @ state + target_matmul = dense_ham @ state backend.assert_allclose(trotter_matmul, target_matmul) diff --git a/tests/test_measurements.py b/tests/test_measurements.py index 26a3178ed7..fc425b0db1 100644 --- a/tests/test_measurements.py +++ b/tests/test_measurements.py @@ -76,7 +76,7 @@ def test_measurement_gate(backend, n, nshots): def test_multiple_qubit_measurement_gate(backend): c = models.Circuit(2) c.add(gates.X(0)) - measure = c.add(gates.M(0, 1)) + c.add(gates.M(0, 1)) result = backend.execute_circuit(c, nshots=100) target_binary_samples = np.zeros((100, 2)) target_binary_samples[:, 0] = 1 diff --git a/tests/test_models_encodings.py b/tests/test_models_encodings.py index ecafa73145..0122ef9365 100644 --- a/tests/test_models_encodings.py +++ b/tests/test_models_encodings.py @@ -125,18 +125,15 @@ def test_unary_encoder(backend, nqubits, architecture, kind): # sampling random data in interval [-1, 1] sampler = np.random.default_rng(1) data = 2 * sampler.random(nqubits) - 1 - data = backend.cast(data, dtype=data.dtype) - - if kind is not None: - data = kind(data) + data = kind(data) if kind is not None else backend.cast(data, dtype=data.dtype) circuit = unary_encoder(data, architecture=architecture) state = backend.execute_circuit(circuit).state() indexes = np.flatnonzero(state) - state = np.real(state[indexes]) + state = backend.np.real(state[indexes]) backend.assert_allclose( - state, data / backend.to_numpy(backend.calculate_norm(data, order=2)) + state, backend.cast(data) / backend.calculate_norm(data, 2) ) diff --git a/tests/utils.py b/tests/utils.py index 080eb35a63..5d77b812dd 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -2,13 +2,6 @@ from scipy import sparse -def random_complex(shape, dtype=None): - x = np.random.random(shape) + 1j * np.random.random(shape) - if dtype is None: - return x - return x.astype(dtype) - - def random_sparse_matrix(backend, n, sparse_type=None): if backend.name == "tensorflow": nonzero = int(0.1 * n * n) From 14bb2103d5ac6a81a7e2a535095be081dc318497 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sat, 9 Mar 2024 11:08:14 +0000 Subject: [PATCH 103/127] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- tests/test_hamiltonians.py | 2 +- tests/test_hamiltonians_symbolic.py | 8 ++++---- tests/test_models_encodings.py | 4 +--- 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/tests/test_hamiltonians.py b/tests/test_hamiltonians.py index fa62777e0b..1d1742e328 100644 --- a/tests/test_hamiltonians.py +++ b/tests/test_hamiltonians.py @@ -4,7 +4,7 @@ import pytest from qibo import Circuit, gates, hamiltonians -from qibo.quantum_info.random_ensembles import random_statevector, random_density_matrix +from qibo.quantum_info.random_ensembles import random_density_matrix, random_statevector from qibo.symbols import I, Z from .utils import random_sparse_matrix diff --git a/tests/test_hamiltonians_symbolic.py b/tests/test_hamiltonians_symbolic.py index b0d778a6cf..d9a3b597e4 100644 --- a/tests/test_hamiltonians_symbolic.py +++ b/tests/test_hamiltonians_symbolic.py @@ -5,7 +5,7 @@ import sympy from qibo import Circuit, gates, hamiltonians -from qibo.quantum_info.random_ensembles import random_statevector, random_density_matrix +from qibo.quantum_info.random_ensembles import random_density_matrix, random_statevector from qibo.symbols import I, Y, Z @@ -349,10 +349,10 @@ def test_symbolic_hamiltonian_abstract_symbol_ev(backend, density_matrix, calcte local_ham = hamiltonians.SymbolicHamiltonian(form, backend=backend) if calcterms: _ = local_ham.terms - + state = ( - random_density_matrix(4, backend=backend) - if density_matrix + random_density_matrix(4, backend=backend) + if density_matrix else random_statevector(4, backend=backend) ) local_ev = local_ham.expectation(state) diff --git a/tests/test_models_encodings.py b/tests/test_models_encodings.py index 0122ef9365..6736968e4d 100644 --- a/tests/test_models_encodings.py +++ b/tests/test_models_encodings.py @@ -132,9 +132,7 @@ def test_unary_encoder(backend, nqubits, architecture, kind): indexes = np.flatnonzero(state) state = backend.np.real(state[indexes]) - backend.assert_allclose( - state, backend.cast(data) / backend.calculate_norm(data, 2) - ) + backend.assert_allclose(state, backend.cast(data) / backend.calculate_norm(data, 2)) @pytest.mark.parametrize("seed", [None, 10, np.random.default_rng(10)]) From 3ba35e8484748f91276c4a5719aaf77f3dc0e6a7 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Sat, 9 Mar 2024 16:02:42 +0400 Subject: [PATCH 104/127] documentation --- doc/source/api-reference/qibo.rst | 8 ++++++-- doc/source/getting-started/backends.rst | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/doc/source/api-reference/qibo.rst b/doc/source/api-reference/qibo.rst index 5e376e58ca..99e219dcae 100644 --- a/doc/source/api-reference/qibo.rst +++ b/doc/source/api-reference/qibo.rst @@ -2367,7 +2367,7 @@ The user can switch backends using qibo.set_backend("numpy") before creating any circuits or gates. The default backend is the first available -from ``qibojit``, ``tensorflow``, ``numpy``. +from ``qibojit``, ``pytorch``, ``tensorflow``, ``numpy``. Some backends support different platforms. For example, the qibojit backend provides two platforms (``cupy`` and ``cuquantum``) when used on GPU. @@ -2448,4 +2448,8 @@ Alternatively, a Clifford circuit can also be executed starting from the :class: Cloud Backends ^^^^^^^^^^^^^^ -Additional backends, that support the remote execution of quantum circuits through cloud service providers, such as IBM and QRC-TII, are provided by the optional qibo plugin `qibo-cloud-backends `_. For more information please refer to the `official documentation `_. +Additional backends that support the remote execution of quantum circuits through +cloud service providers, such as IBM and QRC-TII, are provided by the optional qibo plugin +`qibo-cloud-backends `_. +For more information please refer to the +`official documentation `_. diff --git a/doc/source/getting-started/backends.rst b/doc/source/getting-started/backends.rst index 5fd4770694..df6a5c8cef 100644 --- a/doc/source/getting-started/backends.rst +++ b/doc/source/getting-started/backends.rst @@ -36,10 +36,10 @@ if the corresponding packages are installed, following the hierarchy below: * :ref:`installing-numpy`: a lightweight quantum simulator shipped with the :ref:`installing-qibo` base package. Use this simulator if your CPU architecture is not supported by the other backends. Please note that the simulation performance is quite poor in comparison to other backends. * :ref:`installing-qibojit`: an efficient simulation backend for CPU, GPU and multi-GPU based on just-in-time (JIT) compiled custom operators. Install this package if you need to simulate quantum circuits with large number of qubits or complex quantum algorithms which may benefit from computing parallelism. -* `qibotn `_: an interface to Tensor Networks simulation algorithms designed for GPUs and multi-node CPUs. This backend makes possible scaling quantum circuit simulation to a larger number of qubits. * :ref:`installing-tensorflow`: a pure TensorFlow implementation for quantum simulation which provides access to gradient descent optimization and the possibility to implement classical and quantum architectures together. This backend is not optimized for memory and speed, use :ref:`installing-qibojit` instead. * :ref:`installing-pytorch`: a pure PyTorch implementation for quantum simulation which provides access to gradient descent optimization and the possibility to implement classical and quantum architectures together. This backend is not optimized for memory and speed, use :ref:`installing-qibojit` instead. * :ref:`clifford `: a specialized backend for the simulation of quantum circuits with Clifford gates. This backend uses :ref:`installing-qibojit` and/or :ref:`installing-numpy`. +* `qibotn `_: an interface to Tensor Networks simulation algorithms designed for GPUs and multi-node CPUs. This backend makes possible scaling quantum circuit simulation to a larger number of qubits. The default backend that is used is the first available from the above list. The user can switch to a different using the ``qibo.set_backend`` method From d7174942798c24dd595f994d69816cad2ea8a1ea Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sat, 9 Mar 2024 12:03:05 +0000 Subject: [PATCH 105/127] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- doc/source/api-reference/qibo.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/source/api-reference/qibo.rst b/doc/source/api-reference/qibo.rst index 99e219dcae..a3f0c916ae 100644 --- a/doc/source/api-reference/qibo.rst +++ b/doc/source/api-reference/qibo.rst @@ -2448,8 +2448,8 @@ Alternatively, a Clifford circuit can also be executed starting from the :class: Cloud Backends ^^^^^^^^^^^^^^ -Additional backends that support the remote execution of quantum circuits through -cloud service providers, such as IBM and QRC-TII, are provided by the optional qibo plugin -`qibo-cloud-backends `_. -For more information please refer to the +Additional backends that support the remote execution of quantum circuits through +cloud service providers, such as IBM and QRC-TII, are provided by the optional qibo plugin +`qibo-cloud-backends `_. +For more information please refer to the `official documentation `_. From edeff3ada35e83e7d48fc784ae58b6621dce1f75 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Tue, 12 Mar 2024 09:10:44 +0400 Subject: [PATCH 106/127] fix test seed --- tests/test_backends_clifford.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/tests/test_backends_clifford.py b/tests/test_backends_clifford.py index 694853254b..3f3a2d4287 100644 --- a/tests/test_backends_clifford.py +++ b/tests/test_backends_clifford.py @@ -165,7 +165,8 @@ def test_random_clifford_circuit(backend, prob_qubits, binary): backend.assert_allclose(np_count / nshots, clif_count / nshots, atol=1e-1) -def test_collapsing_measurements(backend): +@pytest.mark.parametrize("seed", [2024]) +def test_collapsing_measurements(backend, seed): clifford_bkd = construct_clifford_backend(backend) gate_queue = random_clifford(3, density_matrix=True, backend=backend).queue measured_qubits = np.random.choice(range(3), size=2, replace=False) @@ -180,8 +181,13 @@ def test_collapsing_measurements(backend): c2.add(g) c1.add(gates.M(*range(3))) c2.add(gates.M(*range(3))) - clifford_res = clifford_bkd.execute_circuit(c1, nshots=1000) - numpy_res = numpy_bkd.execute_circuit(c2, nshots=1000) + + clifford_bkd.set_seed(seed) + clifford_res = clifford_bkd.execute_circuit(c1, nshots=100) + + numpy_bkd.set_seed(seed) + numpy_res = numpy_bkd.execute_circuit(c2, nshots=100) + backend.assert_allclose( clifford_res.probabilities(), backend.cast(numpy_res.probabilities()), atol=1e-1 ) From 7c47114ac5309a1a1dac1b6d61824044f551692b Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Tue, 12 Mar 2024 05:27:58 +0000 Subject: [PATCH 107/127] Update src/qibo/backends/numpy.py Co-authored-by: BrunoLiegiBastonLiegi <45011234+BrunoLiegiBastonLiegi@users.noreply.github.com> --- src/qibo/backends/numpy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/qibo/backends/numpy.py b/src/qibo/backends/numpy.py index 406138ff47..a62c9d6995 100644 --- a/src/qibo/backends/numpy.py +++ b/src/qibo/backends/numpy.py @@ -650,7 +650,7 @@ def samples_to_decimal(self, samples, nqubits): return self.np.matmul(self.to_numpy(samples), qrange)[:, 0] def calculate_frequencies(self, samples): - res, counts = np.unique(samples, return_counts=True) + res, counts = self.np.unique(samples, return_counts=True) return collections.Counter(dict(zip(res, counts))) def update_frequencies(self, frequencies, probabilities, nsamples): From 0ab337285f7ad8aea0639bc4ca2499ccd15b39e9 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Tue, 12 Mar 2024 05:28:24 +0000 Subject: [PATCH 108/127] Update src/qibo/backends/pytorch.py Co-authored-by: BrunoLiegiBastonLiegi <45011234+BrunoLiegiBastonLiegi@users.noreply.github.com> --- src/qibo/backends/pytorch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index e05b9d61b6..00a2e0e45e 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -31,7 +31,7 @@ def __init__(self, dtype): self.dtype = torch_dtype_dict[dtype] def _cast(self, x, dtype): - return self.torch.tensor(x, dtype=dtype) + return self.torch.as_tensor(x, dtype=dtype) def Unitary(self, u): return self._cast(u, dtype=self.dtype) From 714fda275cda187d85610b0e597df1d216d3b1d3 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Tue, 12 Mar 2024 05:35:34 +0000 Subject: [PATCH 109/127] Update src/qibo/hamiltonians/hamiltonians.py Co-authored-by: BrunoLiegiBastonLiegi <45011234+BrunoLiegiBastonLiegi@users.noreply.github.com> --- src/qibo/hamiltonians/hamiltonians.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/qibo/hamiltonians/hamiltonians.py b/src/qibo/hamiltonians/hamiltonians.py index b24a9a2c23..2c527df0ba 100644 --- a/src/qibo/hamiltonians/hamiltonians.py +++ b/src/qibo/hamiltonians/hamiltonians.py @@ -250,11 +250,7 @@ def __mul__(self, o): if self.backend.np.real(o) >= 0: # TODO: check for side effects K.qnp r._eigenvalues = o * self._eigenvalues elif not self.backend.issparse(self.matrix): - r._eigenvalues = ( - o * self.backend.np.flip(self._eigenvalues, [0]) - if isinstance(self.backend, PyTorchBackend) - else o * self._eigenvalues[::-1] - ) + r._eigenvalues = o * self.backend.np.flip(self._eigenvalues, (0,)) if self._eigenvectors is not None: if self.backend.np.real(o) > 0: # TODO: see above r._eigenvectors = self._eigenvectors From 055bfec99b08d7e4f0aae74ceaba5f52222c9254 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Tue, 12 Mar 2024 10:49:19 +0400 Subject: [PATCH 110/127] update `pyproject.toml` --- poetry.lock | 37 ++++++------------------------------- pyproject.toml | 1 + 2 files changed, 7 insertions(+), 31 deletions(-) diff --git a/poetry.lock b/poetry.lock index 741699d53c..cf89e825c8 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. [[package]] name = "absl-py" @@ -902,7 +902,6 @@ python-versions = "*" files = [ {file = "cutensor_cu11-1.7.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:c5598670f4f31906d725f5ea852f0df675522e3ff5a7bf886057eab36497062d"}, {file = "cutensor_cu11-1.7.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:67b6c7427d9ab50cb82e01360948bd1b23d73775b5767ab92071c7afcfec4b8b"}, - {file = "cutensor_cu11-1.7.0-py3-none-win_amd64.whl", hash = "sha256:d173b3d0fd51cf761b371a4d4be9a3afd3ef230a55ae4336ae31e905336480e1"}, ] [[package]] @@ -914,7 +913,6 @@ python-versions = "*" files = [ {file = "cutensor_cu12-1.7.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:515caa2406e09ffe9c6524328b7da2106169811665f7684836052753a30dda27"}, {file = "cutensor_cu12-1.7.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:29bdde551788fd3a611992026a5bb422831069e38fd44ab920af5aa00cffa12c"}, - {file = "cutensor_cu12-1.7.0-py3-none-win_amd64.whl", hash = "sha256:e1a9a759a615a64d1b8c6d2b8ffd925deb805750c28481b1a8310d05f35ce229"}, ] [[package]] @@ -1570,11 +1568,11 @@ files = [ google-auth = ">=2.14.1,<3.0.dev0" googleapis-common-protos = ">=1.56.2,<2.0.dev0" grpcio = [ - {version = ">=1.33.2,<2.0dev", optional = true, markers = "python_version < \"3.11\" and extra == \"grpc\""}, + {version = ">=1.33.2,<2.0dev", optional = true, markers = "extra == \"grpc\""}, {version = ">=1.49.1,<2.0dev", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""}, ] grpcio-status = [ - {version = ">=1.33.2,<2.0.dev0", optional = true, markers = "python_version < \"3.11\" and extra == \"grpc\""}, + {version = ">=1.33.2,<2.0.dev0", optional = true, markers = "extra == \"grpc\""}, {version = ">=1.49.1,<2.0.dev0", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""}, ] protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0.dev0" @@ -2303,8 +2301,6 @@ description = "Clang Python Bindings, mirrored from the official LLVM repo: http optional = false python-versions = "*" files = [ - {file = "libclang-16.0.6-1-py2.py3-none-manylinux2014_aarch64.whl", hash = "sha256:88bc7e7b393c32e41e03ba77ef02fdd647da1f764c2cd028e69e0837080b79f6"}, - {file = "libclang-16.0.6-1-py2.py3-none-manylinux2014_armv7l.whl", hash = "sha256:d80ed5827736ed5ec2bcedf536720476fd9d4fa4c79ef0cb24aea4c59332f361"}, {file = "libclang-16.0.6-py2.py3-none-macosx_10_9_x86_64.whl", hash = "sha256:da9e47ebc3f0a6d90fb169ef25f9fbcd29b4a4ef97a8b0e3e3a17800af1423f4"}, {file = "libclang-16.0.6-py2.py3-none-macosx_11_0_arm64.whl", hash = "sha256:e1a5ad1e895e5443e205568c85c04b4608e4e973dae42f4dfd9cb46c81d1486b"}, {file = "libclang-16.0.6-py2.py3-none-manylinux2010_x86_64.whl", hash = "sha256:9dcdc730939788b8b69ffd6d5d75fe5366e3ee007f1e36a99799ec0b0c001492"}, @@ -2546,9 +2542,9 @@ files = [ [package.dependencies] numpy = [ - {version = ">=1.23.3", markers = "python_version > \"3.10\""}, - {version = ">=1.21.2", markers = "python_version > \"3.9\" and python_version <= \"3.10\""}, {version = ">1.20", markers = "python_version <= \"3.9\""}, + {version = ">=1.23.3", markers = "python_version > \"3.10\""}, + {version = ">=1.21.2", markers = "python_version > \"3.9\""}, ] [package.extras] @@ -3926,7 +3922,6 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -3934,16 +3929,8 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, - {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, - {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, - {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -3960,7 +3947,6 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -3968,7 +3954,6 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, @@ -5425,16 +5410,6 @@ files = [ {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8ad85f7f4e20964db4daadcab70b47ab05c7c1cf2a7c1e51087bfaa83831854c"}, {file = "wrapt-1.14.1-cp310-cp310-win32.whl", hash = "sha256:a9a52172be0b5aae932bef82a79ec0a0ce87288c7d132946d645eba03f0ad8a8"}, {file = "wrapt-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:6d323e1554b3d22cfc03cd3243b5bb815a51f5249fdcbb86fda4bf62bab9e164"}, - {file = "wrapt-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ecee4132c6cd2ce5308e21672015ddfed1ff975ad0ac8d27168ea82e71413f55"}, - {file = "wrapt-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2020f391008ef874c6d9e208b24f28e31bcb85ccff4f335f15a3251d222b92d9"}, - {file = "wrapt-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2feecf86e1f7a86517cab34ae6c2f081fd2d0dac860cb0c0ded96d799d20b335"}, - {file = "wrapt-1.14.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:240b1686f38ae665d1b15475966fe0472f78e71b1b4903c143a842659c8e4cb9"}, - {file = "wrapt-1.14.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9008dad07d71f68487c91e96579c8567c98ca4c3881b9b113bc7b33e9fd78b8"}, - {file = "wrapt-1.14.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6447e9f3ba72f8e2b985a1da758767698efa72723d5b59accefd716e9e8272bf"}, - {file = "wrapt-1.14.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:acae32e13a4153809db37405f5eba5bac5fbe2e2ba61ab227926a22901051c0a"}, - {file = "wrapt-1.14.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:49ef582b7a1152ae2766557f0550a9fcbf7bbd76f43fbdc94dd3bf07cc7168be"}, - {file = "wrapt-1.14.1-cp311-cp311-win32.whl", hash = "sha256:358fe87cc899c6bb0ddc185bf3dbfa4ba646f05b1b0b9b5a27c2cb92c2cea204"}, - {file = "wrapt-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:26046cd03936ae745a502abf44dac702a5e6880b2b01c29aea8ddf3353b68224"}, {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:43ca3bbbe97af00f49efb06e352eae40434ca9d915906f77def219b88e85d907"}, {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:6b1a564e6cb69922c7fe3a678b9f9a3c54e72b469875aa8018f18b4d1dd1adf3"}, {file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:00b6d4ea20a906c0ca56d84f93065b398ab74b927a7a3dbd470f6fc503f95dc3"}, @@ -5504,4 +5479,4 @@ tensorflow = ["tensorflow"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<3.12" -content-hash = "33e0ecec9181679cbd8222e3034bcbdd95b5fc3371ebbf0a552d320d725b342a" +content-hash = "f2af75884607f9e26cb077dadd7e49f4037056b0fc096cb37f389b01b0732f74" diff --git a/pyproject.toml b/pyproject.toml index 88a86bc953..749106c3ff 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,6 +33,7 @@ numpy = "^1.26.4" networkx = "^3.2.1" cvxpy = { version = "^1.4.2", optional = true } tensorflow = { version = "^2.14.1,<2.16", markers = "sys_platform == 'linux' or sys_platform == 'darwin'", optional = true } +torch = { version = "^2.1.1", optional = true } [tool.poetry.group.dev] optional = true From 64991ebe355e3da21675b911c82ae1062e6d56dc Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Tue, 12 Mar 2024 15:22:39 +0400 Subject: [PATCH 111/127] fix test seed --- tests/test_backends_clifford.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/test_backends_clifford.py b/tests/test_backends_clifford.py index 3f3a2d4287..b04fa01d93 100644 --- a/tests/test_backends_clifford.py +++ b/tests/test_backends_clifford.py @@ -128,12 +128,17 @@ def test_random_clifford_circuit(backend, prob_qubits, binary): backend.set_seed(2024) nqubits, nshots = 3, 200 clifford_bkd = construct_clifford_backend(backend) + c = random_clifford(nqubits, seed=1, backend=backend) c.density_matrix = True c_copy = c.copy() c.add(gates.M(*MEASURED_QUBITS)) c_copy.add(gates.M(*MEASURED_QUBITS)) + + numpy_bkd.set_seed(2024) numpy_result = numpy_bkd.execute_circuit(c, nshots=nshots) + + clifford_bkd.set_seed(2024) clifford_result = clifford_bkd.execute_circuit(c_copy, nshots=nshots) backend.assert_allclose(backend.cast(numpy_result.state()), clifford_result.state()) From 3c6258f0fb11e03258c07faf6d7a5e0f873a0497 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Tue, 12 Mar 2024 15:48:03 +0400 Subject: [PATCH 112/127] fix test seed --- tests/test_backends_clifford.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tests/test_backends_clifford.py b/tests/test_backends_clifford.py index b04fa01d93..f99915c15a 100644 --- a/tests/test_backends_clifford.py +++ b/tests/test_backends_clifford.py @@ -173,8 +173,11 @@ def test_random_clifford_circuit(backend, prob_qubits, binary): @pytest.mark.parametrize("seed", [2024]) def test_collapsing_measurements(backend, seed): clifford_bkd = construct_clifford_backend(backend) - gate_queue = random_clifford(3, density_matrix=True, backend=backend).queue - measured_qubits = np.random.choice(range(3), size=2, replace=False) + gate_queue = random_clifford( + 3, density_matrix=True, seed=seed, backend=backend + ).queue + local_state = np.random.default_rng(seed) + measured_qubits = local_state.choice(range(3), size=2, replace=False) c1 = Circuit(3) c2 = Circuit(3, density_matrix=True) for i, g in enumerate(gate_queue): From 1ba37f9f137fe01ef4847497deca6e24025b6bec Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Tue, 12 Mar 2024 16:01:22 +0400 Subject: [PATCH 113/127] fix test --- src/qibo/hamiltonians/hamiltonians.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/qibo/hamiltonians/hamiltonians.py b/src/qibo/hamiltonians/hamiltonians.py index 2c527df0ba..8e17e88fbb 100644 --- a/src/qibo/hamiltonians/hamiltonians.py +++ b/src/qibo/hamiltonians/hamiltonians.py @@ -250,7 +250,8 @@ def __mul__(self, o): if self.backend.np.real(o) >= 0: # TODO: check for side effects K.qnp r._eigenvalues = o * self._eigenvalues elif not self.backend.issparse(self.matrix): - r._eigenvalues = o * self.backend.np.flip(self._eigenvalues, (0,)) + axis = (0,) if isinstance(self.backend, PyTorchBackend) else 0 + r._eigenvalues = o * self.backend.np.flip(self._eigenvalues, axis) if self._eigenvectors is not None: if self.backend.np.real(o) > 0: # TODO: see above r._eigenvectors = self._eigenvectors From b60d1f4c4ed79919f1f2b6c2228f10cdad5d3932 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Tue, 12 Mar 2024 16:11:33 +0400 Subject: [PATCH 114/127] revert change --- src/qibo/backends/numpy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/qibo/backends/numpy.py b/src/qibo/backends/numpy.py index a62c9d6995..406138ff47 100644 --- a/src/qibo/backends/numpy.py +++ b/src/qibo/backends/numpy.py @@ -650,7 +650,7 @@ def samples_to_decimal(self, samples, nqubits): return self.np.matmul(self.to_numpy(samples), qrange)[:, 0] def calculate_frequencies(self, samples): - res, counts = self.np.unique(samples, return_counts=True) + res, counts = np.unique(samples, return_counts=True) return collections.Counter(dict(zip(res, counts))) def update_frequencies(self, frequencies, probabilities, nsamples): From 9c981692e4d833ce6bfce3c95e3542dc5f7db717 Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Tue, 12 Mar 2024 15:50:33 +0400 Subject: [PATCH 115/127] corrections by andrea --- src/qibo/backends/numpy.py | 16 ++++++----- src/qibo/backends/pytorch.py | 51 +++++++++++++++++++----------------- 2 files changed, 36 insertions(+), 31 deletions(-) diff --git a/src/qibo/backends/numpy.py b/src/qibo/backends/numpy.py index 406138ff47..4a43418a3a 100644 --- a/src/qibo/backends/numpy.py +++ b/src/qibo/backends/numpy.py @@ -279,11 +279,11 @@ def apply_channel_density_matrix(self, channel, state, nqubits): def _append_zeros(self, state, qubits, results): """Helper method for collapse.""" for q, r in zip(qubits, results): - state = self.np.expand_dims(state, axis=q) + state = self.np.expand_dims(state, q) if r: - state = self.np.concatenate([self.np.zeros_like(state), state], axis=q) + state = self.np.concatenate([self.np.zeros_like(state), state], q) else: - state = self.np.concatenate([state, self.np.zeros_like(state)], axis=q) + state = self.np.concatenate([state, self.np.zeros_like(state)], q) return state def collapse_state(self, state, qubits, shot, nqubits, normalize=True): @@ -641,13 +641,15 @@ def aggregate_shots(self, shots): return self.cast(shots, dtype=shots[0].dtype) def samples_to_binary(self, samples, nqubits): - qrange = self.np.arange(nqubits - 1, -1, -1, dtype="int32") - return self.np.mod(self.np.right_shift(samples[:, self.np.newaxis], qrange), 2) + qrange = self.np.arange(nqubits - 1, -1, -1, dtype=self.np.int32) + return self.np.mod( + self.np.right_shift(self.cast(samples[:, None], dtype="int32"), qrange), 2 + ) def samples_to_decimal(self, samples, nqubits): - qrange = self.np.arange(nqubits - 1, -1, -1, dtype="int32") + qrange = self.np.arange(nqubits - 1, -1, -1, dtype=self.np.int32) qrange = (2**qrange)[:, None] - return self.np.matmul(self.to_numpy(samples), qrange)[:, 0] + return self.np.matmul(samples, qrange)[:, 0] def calculate_frequencies(self, samples): res, counts = np.unique(samples, return_counts=True) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index 00a2e0e45e..04a4fa26d9 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -27,11 +27,10 @@ class TorchMatrices(NumpyMatrices): def __init__(self, dtype): super().__init__(dtype) - self.torch = torch self.dtype = torch_dtype_dict[dtype] def _cast(self, x, dtype): - return self.torch.as_tensor(x, dtype=dtype) + return torch.as_tensor(x, dtype=dtype) def Unitary(self, u): return self._cast(u, dtype=self.dtype) @@ -54,8 +53,12 @@ def __init__(self): self.np = torch self.dtype = torch_dtype_dict[self.dtype] self.tensor_types = (self.np.Tensor, np.ndarray) - # Transpose function in Torch works in a different way than numpy + + # These functions in Torch works in a different way than numpy or have different names self.np.transpose = torch.permute + self.np.expand_dims = self.np.unsqueeze + self.np.mod = torch.remainder + self.np.right_shift = torch.bitwise_right_shift def set_device(self, device): # pragma: no cover self.device = device @@ -114,15 +117,15 @@ def to_numpy(self, x): return x - def _append_zeros(self, state, qubits, results): - """Helper method for collapse.""" - for q, r in zip(qubits, results): - state = self.np.unsqueeze(state, dim=q) - if r: - state = self.np.cat([self.np.zeros_like(state), state], dim=q) - else: - state = self.np.cat([state, self.np.zeros_like(state)], dim=q) - return state + # def _append_zeros(self, state, qubits, results): + # """Helper method for collapse.""" + # for q, r in zip(qubits, results): + # state = self.np.unsqueeze(state, dim=q) + # if r: + # state = self.np.cat([self.np.zeros_like(state), state], dim=q) + # else: + # state = self.np.cat([state, self.np.zeros_like(state)], dim=q) + # return state def _order_probabilities(self, probs, qubits, nqubits): """Arrange probabilities according to the given ``qubits`` ordering.""" @@ -155,18 +158,18 @@ def sample_shots(self, probabilities, nshots): self.cast(probabilities, dtype="float"), nshots, replacement=True ) - def samples_to_binary(self, samples, nqubits): - samples = self.cast(samples, dtype="int32") - qrange = self.np.arange(nqubits - 1, -1, -1, dtype=self.np.int32) - samples = samples.int() - samples = samples[:, None] >> qrange - return samples % 2 - - def samples_to_decimal(self, samples, nqubits): - samples = self.cast(samples, dtype="int32") - qrange = self.np.arange(nqubits - 1, -1, -1, dtype=torch.int32) - qrange = (2**qrange).unsqueeze(1) - return self.np.matmul(samples, qrange).squeeze(1) + # def samples_to_binary(self, samples, nqubits): + # samples = self.cast(samples, dtype="int32") + # qrange = self.np.arange(nqubits - 1, -1, -1, dtype=self.np.int32) + # samples = samples.int() + # samples = samples[:, None] >> qrange + # return samples % 2 + + # def samples_to_decimal(self, samples, nqubits): + # samples = self.cast(samples, dtype="int32") + # qrange = self.np.arange(nqubits - 1, -1, -1, dtype=torch.int32) + # qrange = (2**qrange).unsqueeze(1) + # return self.np.matmul(samples, qrange).squeeze(1) def calculate_overlap_density_matrix(self, state1, state2): return self.np.trace( From cdc4e7881d8c8d6f550ae265e7e9988dbda01c21 Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Tue, 12 Mar 2024 16:03:20 +0400 Subject: [PATCH 116/127] fixes --- src/qibo/backends/numpy.py | 2 +- src/qibo/backends/pytorch.py | 23 ----------------------- 2 files changed, 1 insertion(+), 24 deletions(-) diff --git a/src/qibo/backends/numpy.py b/src/qibo/backends/numpy.py index 4a43418a3a..dd68143777 100644 --- a/src/qibo/backends/numpy.py +++ b/src/qibo/backends/numpy.py @@ -539,7 +539,7 @@ def execute_circuit_repeated(self, circuit, nshots, initial_state=None): assert circuit.has_collapse final_state = self.cast(np.mean(self.to_numpy(final_states), 0)) if circuit.measurements: - qubits = [q for m in circuit.measurements for q in m.target_qubits] + # qubits = [q for m in circuit.measurements for q in m.target_qubits] final_result = CircuitResult( final_state, circuit.measurements, diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index 04a4fa26d9..bfd45222a9 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -117,16 +117,6 @@ def to_numpy(self, x): return x - # def _append_zeros(self, state, qubits, results): - # """Helper method for collapse.""" - # for q, r in zip(qubits, results): - # state = self.np.unsqueeze(state, dim=q) - # if r: - # state = self.np.cat([self.np.zeros_like(state), state], dim=q) - # else: - # state = self.np.cat([state, self.np.zeros_like(state)], dim=q) - # return state - def _order_probabilities(self, probs, qubits, nqubits): """Arrange probabilities according to the given ``qubits`` ordering.""" if probs.dim() == 0: # pragma: no cover @@ -158,19 +148,6 @@ def sample_shots(self, probabilities, nshots): self.cast(probabilities, dtype="float"), nshots, replacement=True ) - # def samples_to_binary(self, samples, nqubits): - # samples = self.cast(samples, dtype="int32") - # qrange = self.np.arange(nqubits - 1, -1, -1, dtype=self.np.int32) - # samples = samples.int() - # samples = samples[:, None] >> qrange - # return samples % 2 - - # def samples_to_decimal(self, samples, nqubits): - # samples = self.cast(samples, dtype="int32") - # qrange = self.np.arange(nqubits - 1, -1, -1, dtype=torch.int32) - # qrange = (2**qrange).unsqueeze(1) - # return self.np.matmul(samples, qrange).squeeze(1) - def calculate_overlap_density_matrix(self, state1, state2): return self.np.trace( self.np.matmul(self.np.conj(self.cast(state1)).T, self.cast(state2)) From be31ee82cef55f807f21f9f2a42c3555b197b363 Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Tue, 12 Mar 2024 16:48:07 +0400 Subject: [PATCH 117/127] solved errors --- src/qibo/backends/numpy.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/qibo/backends/numpy.py b/src/qibo/backends/numpy.py index dd68143777..6e0b4ad3ef 100644 --- a/src/qibo/backends/numpy.py +++ b/src/qibo/backends/numpy.py @@ -474,7 +474,7 @@ def execute_circuit_repeated(self, circuit, nshots, initial_state=None): Execute the circuit `nshots` times to retrieve probabilities, frequencies and samples. Note that this method is called only if a unitary channel is present in the circuit (i.e. noisy simulation) and `density_matrix=False`, or - if some collapsing measuremnt is performed. + if some collapsing measurement is performed. """ if ( @@ -652,6 +652,7 @@ def samples_to_decimal(self, samples, nqubits): return self.np.matmul(samples, qrange)[:, 0] def calculate_frequencies(self, samples): + # Samples are a list of strings so there is no advantage in using Torch or other backends res, counts = np.unique(samples, return_counts=True) return collections.Counter(dict(zip(res, counts))) From 573008cf89e3682cfd1ad42e0e9fc85bc742cfa3 Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Tue, 12 Mar 2024 16:56:26 +0400 Subject: [PATCH 118/127] fix seed test --- tests/test_models_circuit_features.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/test_models_circuit_features.py b/tests/test_models_circuit_features.py index 2fb310bfea..08bceedaa7 100644 --- a/tests/test_models_circuit_features.py +++ b/tests/test_models_circuit_features.py @@ -330,9 +330,9 @@ def test_repeated_execute_probs_and_freqs(backend, nqubits): ) elif backend.__class__.__name__ == "PyTorchBackend": test_frequencies = ( - Counter({"1": 817, "0": 207}) + Counter({"1": 810, "0": 214}) if nqubits == 1 - else Counter({"11": 664, "01": 162, "10": 166, "00": 32}) + else Counter({"11": 685, "01": 160, "10": 144, "00": 35}) ) else: test_frequencies = ( @@ -340,5 +340,6 @@ def test_repeated_execute_probs_and_freqs(backend, nqubits): if nqubits == 1 else Counter({"11": 618, "10": 169, "01": 185, "00": 52}) ) + print(result.frequencies()) for key in dict(test_frequencies).keys(): backend.assert_allclose(result.frequencies()[key], test_frequencies[key]) From a7a496d8fcad9041138a57e2f106b0b586718c03 Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Tue, 12 Mar 2024 17:31:47 +0400 Subject: [PATCH 119/127] improve calculate_hamiltonian_state_product method --- src/qibo/backends/numpy.py | 20 +++++++++----------- src/qibo/backends/pytorch.py | 16 ++++------------ tests/test_hamiltonians.py | 5 +++++ 3 files changed, 18 insertions(+), 23 deletions(-) diff --git a/src/qibo/backends/numpy.py b/src/qibo/backends/numpy.py index 6e0b4ad3ef..ae96a6eef5 100644 --- a/src/qibo/backends/numpy.py +++ b/src/qibo/backends/numpy.py @@ -718,9 +718,9 @@ def calculate_overlap(self, state1, state2): return self.np.abs(self.np.sum(np.conj(self.cast(state1)) * self.cast(state2))) def calculate_overlap_density_matrix(self, state1, state2): - state1 = self.cast(state1) - state2 = self.cast(state2) - return self.np.trace(self.np.transpose(np.conj(state1)) @ state2) + return self.np.trace( + self.np.matmul(self.np.conj(self.cast(state1)).T, self.cast(state2)) + ) def calculate_eigenvalues(self, matrix, k=6): if self.issparse(matrix): @@ -767,20 +767,18 @@ def calculate_expectation_density_matrix(self, hamiltonian, state, normalize): ev = ev / norm return ev + # TODO: remove this method def calculate_hamiltonian_matrix_product(self, matrix1, matrix2): - return self.np.dot(matrix1, matrix2) + return matrix1 @ matrix2 + # TODO: remove this method def calculate_hamiltonian_state_product(self, matrix, state): - rank = len(tuple(state.shape)) - if rank == 1: # vector - return matrix.dot(state[:, np.newaxis])[:, 0] - elif rank == 2: # matrix - return matrix.dot(state) - else: + if len(tuple(state.shape)) > 2: raise_error( ValueError, - f"Cannot multiply Hamiltonian with rank-{rank} tensor.", + f"Cannot multiply Hamiltonian with rank-{len(tuple(state.shape))} tensor.", ) + return matrix @ state def assert_allclose(self, value, target, rtol=1e-7, atol=0.0): if isinstance(value, CircuitResult) or isinstance(value, QuantumState): diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index bfd45222a9..9a321364ee 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -148,10 +148,10 @@ def sample_shots(self, probabilities, nshots): self.cast(probabilities, dtype="float"), nshots, replacement=True ) - def calculate_overlap_density_matrix(self, state1, state2): - return self.np.trace( - self.np.matmul(self.np.conj(self.cast(state1)).T, self.cast(state2)) - ) + # def calculate_overlap_density_matrix(self, state1, state2): + # return self.np.trace( + # self.np.matmul(self.np.conj(self.cast(state1)).T, self.cast(state2)) + # ) def calculate_eigenvalues(self, matrix, k=6): return self.np.linalg.eigvalsh(matrix) # pylint: disable=not-callable @@ -168,14 +168,6 @@ def calculate_matrix_exp(self, a, matrix, eigenvectors=None, eigenvalues=None): ud = self.np.conj(eigenvectors).T return self.np.matmul(eigenvectors, self.np.matmul(expd, ud)) - def calculate_hamiltonian_matrix_product(self, matrix1, matrix2): - if self.issparse(matrix1) or self.issparse(matrix2): # pragma: no cover - return self.np.sparse.mm(matrix1, matrix2) # pylint: disable=E1102 - return self.np.matmul(matrix1, matrix2) - - def calculate_hamiltonian_state_product(self, matrix, state): - return self.np.matmul(matrix, state) - def test_regressions(self, name): if name == "test_measurementresult_apply_bitflips": return [ diff --git a/tests/test_hamiltonians.py b/tests/test_hamiltonians.py index 1d1742e328..d388bffe39 100644 --- a/tests/test_hamiltonians.py +++ b/tests/test_hamiltonians.py @@ -177,6 +177,11 @@ def test_hamiltonian_matmul(backend, sparse_type): backend.assert_allclose((H1 @ H2).matrix, (m1 @ m2)) backend.assert_allclose((H2 @ H1).matrix, (m2 @ m1)) + try: + H1 @ np.zeros(3 * (2**nqubits,), dtype=m1.dtype) + except Exception as error: + print(error) + with pytest.raises(ValueError): H1 @ np.zeros(3 * (2**nqubits,), dtype=m1.dtype) with pytest.raises(NotImplementedError): From 0e520dbd9df3b4aeefe74c8b7c6c6b227573e7b7 Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Tue, 12 Mar 2024 17:46:58 +0400 Subject: [PATCH 120/127] remove unuseful method and invert changes on test seed --- src/qibo/backends/pytorch.py | 5 ----- tests/test_models_circuit_features.py | 5 ++--- 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index 9a321364ee..403cea10da 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -148,11 +148,6 @@ def sample_shots(self, probabilities, nshots): self.cast(probabilities, dtype="float"), nshots, replacement=True ) - # def calculate_overlap_density_matrix(self, state1, state2): - # return self.np.trace( - # self.np.matmul(self.np.conj(self.cast(state1)).T, self.cast(state2)) - # ) - def calculate_eigenvalues(self, matrix, k=6): return self.np.linalg.eigvalsh(matrix) # pylint: disable=not-callable diff --git a/tests/test_models_circuit_features.py b/tests/test_models_circuit_features.py index 08bceedaa7..2fb310bfea 100644 --- a/tests/test_models_circuit_features.py +++ b/tests/test_models_circuit_features.py @@ -330,9 +330,9 @@ def test_repeated_execute_probs_and_freqs(backend, nqubits): ) elif backend.__class__.__name__ == "PyTorchBackend": test_frequencies = ( - Counter({"1": 810, "0": 214}) + Counter({"1": 817, "0": 207}) if nqubits == 1 - else Counter({"11": 685, "01": 160, "10": 144, "00": 35}) + else Counter({"11": 664, "01": 162, "10": 166, "00": 32}) ) else: test_frequencies = ( @@ -340,6 +340,5 @@ def test_repeated_execute_probs_and_freqs(backend, nqubits): if nqubits == 1 else Counter({"11": 618, "10": 169, "01": 185, "00": 52}) ) - print(result.frequencies()) for key in dict(test_frequencies).keys(): backend.assert_allclose(result.frequencies()[key], test_frequencies[key]) From 671a791af19e1a3256524a591707cbb10433c5a7 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Tue, 12 Mar 2024 19:30:23 +0400 Subject: [PATCH 121/127] fix seed in test --- src/qibo/backends/numpy.py | 2 +- tests/test_backends_clifford.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/qibo/backends/numpy.py b/src/qibo/backends/numpy.py index ae96a6eef5..7eb2907aa1 100644 --- a/src/qibo/backends/numpy.py +++ b/src/qibo/backends/numpy.py @@ -652,7 +652,7 @@ def samples_to_decimal(self, samples, nqubits): return self.np.matmul(samples, qrange)[:, 0] def calculate_frequencies(self, samples): - # Samples are a list of strings so there is no advantage in using Torch or other backends + # Samples are a list of strings so there is no advantage in using other backends res, counts = np.unique(samples, return_counts=True) return collections.Counter(dict(zip(res, counts))) diff --git a/tests/test_backends_clifford.py b/tests/test_backends_clifford.py index f99915c15a..3e05847857 100644 --- a/tests/test_backends_clifford.py +++ b/tests/test_backends_clifford.py @@ -172,6 +172,7 @@ def test_random_clifford_circuit(backend, prob_qubits, binary): @pytest.mark.parametrize("seed", [2024]) def test_collapsing_measurements(backend, seed): + backend.set_seed(2024) clifford_bkd = construct_clifford_backend(backend) gate_queue = random_clifford( 3, density_matrix=True, seed=seed, backend=backend From 64ffb7b9cedc953bf11c6914019a319b4dfb5a14 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Tue, 12 Mar 2024 15:38:43 +0000 Subject: [PATCH 122/127] Update src/qibo/backends/numpy.py --- src/qibo/backends/numpy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/qibo/backends/numpy.py b/src/qibo/backends/numpy.py index 7eb2907aa1..ce7eb49e15 100644 --- a/src/qibo/backends/numpy.py +++ b/src/qibo/backends/numpy.py @@ -764,7 +764,7 @@ def calculate_expectation_density_matrix(self, hamiltonian, state, normalize): ev = self.np.real(self.np.trace(self.cast(hamiltonian @ state))) if normalize: norm = self.np.real(self.np.trace(state)) - ev = ev / norm + ev /= norm return ev # TODO: remove this method From a4b7f75b0c3cc30e9db0eb8364229d2afa39fb53 Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Thu, 14 Mar 2024 09:01:57 +0400 Subject: [PATCH 123/127] fix coverage --- src/qibo/backends/clifford.py | 4 +++- tests/test_backends_clifford.py | 5 +++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/src/qibo/backends/clifford.py b/src/qibo/backends/clifford.py index 6140556d4c..ea56e242b9 100644 --- a/src/qibo/backends/clifford.py +++ b/src/qibo/backends/clifford.py @@ -144,7 +144,9 @@ def apply_gate_clifford(self, gate, symplectic_matrix, nqubits): def apply_channel(self, channel, state, nqubits): probabilities = channel.coefficients + (1 - np.sum(channel.coefficients),) - index = np.random.choice(range(len(probabilities)), size=1, p=probabilities)[0] + index = self.np.random.choice( + range(len(probabilities)), size=1, p=probabilities + )[0] if index != len(channel.gates): gate = channel.gates[index] state = gate.apply_clifford(self, state, nqubits) diff --git a/tests/test_backends_clifford.py b/tests/test_backends_clifford.py index 3e05847857..385660dafb 100644 --- a/tests/test_backends_clifford.py +++ b/tests/test_backends_clifford.py @@ -251,9 +251,9 @@ def test_set_backend(backend): @pytest.mark.parametrize("seed", [2024]) def test_noise_channels(backend, seed): - clifford_bkd = construct_clifford_backend(backend) - backend.set_seed(seed) + + clifford_bkd = construct_clifford_backend(backend) clifford_bkd.set_seed(seed) noise = NoiseModel() @@ -269,6 +269,7 @@ def test_noise_channels(backend, seed): c = noise.apply(c) c_copy = noise.apply(c_copy) + numpy_bkd.set_seed(2024) numpy_result = numpy_bkd.execute_circuit(c) clifford_result = clifford_bkd.execute_circuit(c_copy) From 86b42222d74757bc5cc9da60a3ab62513328070c Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Thu, 14 Mar 2024 09:53:16 +0400 Subject: [PATCH 124/127] test global backend clifford --- tests/test_backends_clifford.py | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/tests/test_backends_clifford.py b/tests/test_backends_clifford.py index 385660dafb..6a8209702b 100644 --- a/tests/test_backends_clifford.py +++ b/tests/test_backends_clifford.py @@ -5,7 +5,7 @@ import numpy as np import pytest -from qibo import Circuit, gates, set_backend +from qibo import Circuit, gates, get_backend, set_backend from qibo.backends import ( CliffordBackend, GlobalBackend, @@ -32,6 +32,26 @@ def construct_clifford_backend(backend): return CliffordBackend(_get_engine_name(backend)) +def test_set_backend(backend): + clifford_bkd = construct_clifford_backend(backend) + platform = _get_engine_name(backend) + set_backend("clifford", platform=platform) + assert isinstance(GlobalBackend(), CliffordBackend) + global_platform = GlobalBackend().platform + assert global_platform == platform + + +def test_global_backend(backend): + construct_clifford_backend(backend) + set_backend(backend.name, platform=backend.platform) + clifford_bkd = CliffordBackend() + target = ( + GlobalBackend().name if backend.name == "numpy" else GlobalBackend().platform + ) + assert clifford_bkd.platform == target + set_backend("numpy") + + THETAS_1Q = [ th + 2 * i * np.pi for i in range(2) for th in [0, np.pi / 2, np.pi, 3 * np.pi / 2] ] @@ -240,15 +260,6 @@ def test_bitflip_noise(backend): ) -def test_set_backend(backend): - clifford_bkd = construct_clifford_backend(backend) - platform = _get_engine_name(backend) - set_backend("clifford", platform=platform) - assert isinstance(GlobalBackend(), CliffordBackend) - global_platform = GlobalBackend().platform - assert global_platform == platform - - @pytest.mark.parametrize("seed", [2024]) def test_noise_channels(backend, seed): backend.set_seed(seed) From 8ede3f2cb593b96ae702da6f1ecc18205869eb11 Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Thu, 14 Mar 2024 12:18:06 +0400 Subject: [PATCH 125/127] corrections by Andrea --- src/qibo/backends/numpy.py | 3 +-- tests/test_backends_clifford.py | 2 +- tests/test_hamiltonians.py | 6 ------ 3 files changed, 2 insertions(+), 9 deletions(-) diff --git a/src/qibo/backends/numpy.py b/src/qibo/backends/numpy.py index ce7eb49e15..ac0e75a5cb 100644 --- a/src/qibo/backends/numpy.py +++ b/src/qibo/backends/numpy.py @@ -280,7 +280,7 @@ def _append_zeros(self, state, qubits, results): """Helper method for collapse.""" for q, r in zip(qubits, results): state = self.np.expand_dims(state, q) - if r: + if len(r): state = self.np.concatenate([self.np.zeros_like(state), state], q) else: state = self.np.concatenate([state, self.np.zeros_like(state)], q) @@ -539,7 +539,6 @@ def execute_circuit_repeated(self, circuit, nshots, initial_state=None): assert circuit.has_collapse final_state = self.cast(np.mean(self.to_numpy(final_states), 0)) if circuit.measurements: - # qubits = [q for m in circuit.measurements for q in m.target_qubits] final_result = CircuitResult( final_state, circuit.measurements, diff --git a/tests/test_backends_clifford.py b/tests/test_backends_clifford.py index 6a8209702b..cca02a9a8c 100644 --- a/tests/test_backends_clifford.py +++ b/tests/test_backends_clifford.py @@ -27,7 +27,7 @@ def construct_clifford_backend(backend): ): with pytest.raises(NotImplementedError): clifford_backend = CliffordBackend(backend.name) - pytest.skip("Clifford backend not defined for the this engine.") + pytest.skip("Clifford backend not defined for this engine.") return CliffordBackend(_get_engine_name(backend)) diff --git a/tests/test_hamiltonians.py b/tests/test_hamiltonians.py index d388bffe39..c0089cc59b 100644 --- a/tests/test_hamiltonians.py +++ b/tests/test_hamiltonians.py @@ -177,11 +177,6 @@ def test_hamiltonian_matmul(backend, sparse_type): backend.assert_allclose((H1 @ H2).matrix, (m1 @ m2)) backend.assert_allclose((H2 @ H1).matrix, (m2 @ m1)) - try: - H1 @ np.zeros(3 * (2**nqubits,), dtype=m1.dtype) - except Exception as error: - print(error) - with pytest.raises(ValueError): H1 @ np.zeros(3 * (2**nqubits,), dtype=m1.dtype) with pytest.raises(NotImplementedError): @@ -372,7 +367,6 @@ def test_hamiltonian_eigenvectors(backend, dtype, dense): H3 = H1 * c2 V3 = backend.to_numpy(H3.eigenvectors()) U3 = backend.to_numpy(H3._eigenvalues) - print(U3) backend.assert_allclose(H3.matrix, V3 @ np.diag(U3) @ V3.T) c3 = dtype(0) From 05c1966c85edd075651c1f7b0ac607f709759a09 Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Thu, 14 Mar 2024 12:35:38 +0400 Subject: [PATCH 126/127] revert change --- src/qibo/backends/numpy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/qibo/backends/numpy.py b/src/qibo/backends/numpy.py index ac0e75a5cb..4d8c39e924 100644 --- a/src/qibo/backends/numpy.py +++ b/src/qibo/backends/numpy.py @@ -280,7 +280,7 @@ def _append_zeros(self, state, qubits, results): """Helper method for collapse.""" for q, r in zip(qubits, results): state = self.np.expand_dims(state, q) - if len(r): + if r: state = self.np.concatenate([self.np.zeros_like(state), state], q) else: state = self.np.concatenate([state, self.np.zeros_like(state)], q) From 07d28fac16a84c279420bab2ef2ca3e31aad1ece Mon Sep 17 00:00:00 2001 From: Renato Mello Date: Thu, 14 Mar 2024 13:57:21 +0400 Subject: [PATCH 127/127] finally fix this issue --- src/qibo/backends/numpy.py | 9 +++++---- tests/test_backends_clifford.py | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/src/qibo/backends/numpy.py b/src/qibo/backends/numpy.py index 4d8c39e924..c29bf91ea8 100644 --- a/src/qibo/backends/numpy.py +++ b/src/qibo/backends/numpy.py @@ -280,10 +280,11 @@ def _append_zeros(self, state, qubits, results): """Helper method for collapse.""" for q, r in zip(qubits, results): state = self.np.expand_dims(state, q) - if r: - state = self.np.concatenate([self.np.zeros_like(state), state], q) - else: - state = self.np.concatenate([state, self.np.zeros_like(state)], q) + state = ( + self.np.concatenate([self.np.zeros_like(state), state], q) + if r == 1 + else self.np.concatenate([state, self.np.zeros_like(state)], q) + ) return state def collapse_state(self, state, qubits, shot, nqubits, normalize=True): diff --git a/tests/test_backends_clifford.py b/tests/test_backends_clifford.py index cca02a9a8c..9af8192633 100644 --- a/tests/test_backends_clifford.py +++ b/tests/test_backends_clifford.py @@ -5,7 +5,7 @@ import numpy as np import pytest -from qibo import Circuit, gates, get_backend, set_backend +from qibo import Circuit, gates, set_backend from qibo.backends import ( CliffordBackend, GlobalBackend,