diff --git a/doc/source/api-reference/qibo.rst b/doc/source/api-reference/qibo.rst index df449e8e78..c574c90145 100644 --- a/doc/source/api-reference/qibo.rst +++ b/doc/source/api-reference/qibo.rst @@ -2375,7 +2375,7 @@ The user can switch backends using qibo.set_backend("numpy") before creating any circuits or gates. The default backend is the first available -from ``qibojit``, ``tensorflow``, ``numpy``. +from ``qibojit``, ``pytorch``, ``tensorflow``, ``numpy``. Some backends support different platforms. For example, the qibojit backend provides two platforms (``cupy`` and ``cuquantum``) when used on GPU. @@ -2456,4 +2456,8 @@ Alternatively, a Clifford circuit can also be executed starting from the :class: Cloud Backends ^^^^^^^^^^^^^^ -Additional backends, that support the remote execution of quantum circuits through cloud service providers, such as IBM and QRC-TII, are provided by the optional qibo plugin `qibo-cloud-backends `_. For more information please refer to the `official documentation `_. +Additional backends that support the remote execution of quantum circuits through +cloud service providers, such as IBM and QRC-TII, are provided by the optional qibo plugin +`qibo-cloud-backends `_. +For more information please refer to the +`official documentation `_. diff --git a/doc/source/getting-started/backends.rst b/doc/source/getting-started/backends.rst index 5fd4770694..df6a5c8cef 100644 --- a/doc/source/getting-started/backends.rst +++ b/doc/source/getting-started/backends.rst @@ -36,10 +36,10 @@ if the corresponding packages are installed, following the hierarchy below: * :ref:`installing-numpy`: a lightweight quantum simulator shipped with the :ref:`installing-qibo` base package. Use this simulator if your CPU architecture is not supported by the other backends. Please note that the simulation performance is quite poor in comparison to other backends. * :ref:`installing-qibojit`: an efficient simulation backend for CPU, GPU and multi-GPU based on just-in-time (JIT) compiled custom operators. Install this package if you need to simulate quantum circuits with large number of qubits or complex quantum algorithms which may benefit from computing parallelism. -* `qibotn `_: an interface to Tensor Networks simulation algorithms designed for GPUs and multi-node CPUs. This backend makes possible scaling quantum circuit simulation to a larger number of qubits. * :ref:`installing-tensorflow`: a pure TensorFlow implementation for quantum simulation which provides access to gradient descent optimization and the possibility to implement classical and quantum architectures together. This backend is not optimized for memory and speed, use :ref:`installing-qibojit` instead. * :ref:`installing-pytorch`: a pure PyTorch implementation for quantum simulation which provides access to gradient descent optimization and the possibility to implement classical and quantum architectures together. This backend is not optimized for memory and speed, use :ref:`installing-qibojit` instead. * :ref:`clifford `: a specialized backend for the simulation of quantum circuits with Clifford gates. This backend uses :ref:`installing-qibojit` and/or :ref:`installing-numpy`. +* `qibotn `_: an interface to Tensor Networks simulation algorithms designed for GPUs and multi-node CPUs. This backend makes possible scaling quantum circuit simulation to a larger number of qubits. The default backend that is used is the first available from the above list. The user can switch to a different using the ``qibo.set_backend`` method diff --git a/examples/adiabatic/trotter_error.py b/examples/adiabatic/trotter_error.py index 80892db310..7caadec8ec 100644 --- a/examples/adiabatic/trotter_error.py +++ b/examples/adiabatic/trotter_error.py @@ -61,9 +61,9 @@ def main(nqubits, hfield, T, save): ] alphas = [1.0, 0.7, 0.4] labels = [ - r"$\delta t ^{}$".format(exponent - 1), - r"$\delta t ^{}$".format(exponent), - r"$\delta t ^{}$".format(exponent + 1), + f"$\\delta t ^{exponent - 1}$", + f"$\\delta t ^{exponent}$", + f"$\\delta t ^{exponent - 1}$", ] plt.figure(figsize=(7, 4)) diff --git a/examples/adiabatic3sat/functions.py b/examples/adiabatic3sat/functions.py index 2a8e5bf079..c6fa865823 100644 --- a/examples/adiabatic3sat/functions.py +++ b/examples/adiabatic3sat/functions.py @@ -17,7 +17,7 @@ def read_file(file_name, instance): solution (list): list of the correct outputs of the instance for testing. clauses (list): list of all clauses, with the qubits each clause acts upon. """ - file = open("../data3sat/{q}bit/n{q}i{i}.txt".format(q=file_name, i=instance)) + file = open(f"../data3sat/{file_name}bit/n{file_name}i{instance}.txt") control = list(map(int, file.readline().split())) solution = list(map(str, file.readline().split())) clauses = [list(map(int, file.readline().split())) for _ in range(control[1])] @@ -100,7 +100,7 @@ def plot(qubits, ground, first, gap, dt, T): plt.title("Energy during adiabatic evolution") ax.legend() fig.tight_layout() - fig.savefig("{}_qubits_energy.png".format(qubits), dpi=300, bbox_inches="tight") + fig.savefig(f"{qubits}_qubits_energy.png", dpi=300, bbox_inches="tight") fig, ax = plt.subplots() ax.plot(times, gap, label="gap energy", color="C0") plt.ylabel("energy") @@ -108,4 +108,4 @@ def plot(qubits, ground, first, gap, dt, T): plt.title("Energy during adiabatic evolution") ax.legend() fig.tight_layout() - fig.savefig("{}_qubits_gap.png".format(qubits), dpi=300, bbox_inches="tight") + fig.savefig(f"{qubits}_qubits_gap.png", dpi=300, bbox_inches="tight") diff --git a/examples/adiabatic3sat/main.py b/examples/adiabatic3sat/main.py index 901df08c96..06b0d5cce0 100644 --- a/examples/adiabatic3sat/main.py +++ b/examples/adiabatic3sat/main.py @@ -48,9 +48,8 @@ def main(nqubits, instance, T, dt, solver, plot, dense, params, method, maxiter) print("-" * 20 + "\n") if plot and nqubits >= 14: print( - "Currently not possible to calculate gap energy for {} qubits." - "\n Proceeding to adiabatic evolution without plotting data.\n" - "".format(nqubits) + f"Currently not possible to calculate gap energy for {nqubits} qubits." + + "\n Proceeding to adiabatic evolution without plotting data.\n" ) plot = False if plot and method is not None: @@ -97,9 +96,9 @@ def main(nqubits, instance, T, dt, solver, plot, dense, params, method, maxiter) output_dec = (np.abs(final_state) ** 2).argmax() max_output = "{0:0{bits}b}".format(output_dec, bits=nqubits) max_prob = (np.abs(final_state) ** 2).max() - print("Exact cover instance with {} qubits.\n".format(nqubits)) + print(f"Exact cover instance with {nqubits} qubits.\n") if solution: - print("Known solution: {}\n".format("".join(solution))) + print(f"Known solution: {''.join(solution)}\n") print("-" * 20 + "\n") print( f"Adiabatic evolution with total time {T}, evolution step {dt} and " diff --git a/examples/benchmarks/circuits.py b/examples/benchmarks/circuits.py index 9aab3ac933..cdcd1444a6 100644 --- a/examples/benchmarks/circuits.py +++ b/examples/benchmarks/circuits.py @@ -67,7 +67,7 @@ def CircuitFactory(nqubits, circuit_name, accelerators=None, **kwargs): circuit = models.QFT(nqubits, accelerators=accelerators) else: if circuit_name not in _CIRCUITS: - raise KeyError("Unknown benchmark circuit type {}." "".format(circuit_name)) + raise KeyError(f"Unknown benchmark circuit type {circuit_name}.") circuit = models.Circuit(nqubits, accelerators=accelerators) circuit.add(_CIRCUITS.get(circuit_name)(nqubits, **kwargs)) return circuit diff --git a/examples/benchmarks/main.py b/examples/benchmarks/main.py index 4809edf044..31b7eaec84 100644 --- a/examples/benchmarks/main.py +++ b/examples/benchmarks/main.py @@ -53,14 +53,14 @@ def limit_gpu_memory(memory_limit=None): print("\nNo GPU memory limiter used.\n") return - print("\nAttempting to limit GPU memory to {}.\n".format(memory_limit)) + print(f"\nAttempting to limit GPU memory to {memory_limit}.\n") gpus = tf.config.list_physical_devices("GPU") for gpu in tf.config.list_physical_devices("GPU"): config = tf.config.experimental.VirtualDeviceConfiguration( memory_limit=memory_limit ) tf.config.experimental.set_virtual_device_configuration(gpu, [config]) - print("Limiting memory of {} to {}.".format(gpu.name, memory_limit)) + print(f"Limiting memory of {gpu.name} to {memory_limit}.") print() diff --git a/examples/benchmarks/utils.py b/examples/benchmarks/utils.py index 2f0bcb6111..4bd3deeda1 100644 --- a/examples/benchmarks/utils.py +++ b/examples/benchmarks/utils.py @@ -6,12 +6,12 @@ class BenchmarkLogger(list): def __init__(self, filename=None): self.filename = filename if filename is not None and os.path.isfile(filename): - print("Extending existing logs from {}.".format(filename)) + print(f"Extending existing logs from {filename}.") with open(filename) as file: super().__init__(json.load(file)) else: if filename is not None: - print("Creating new logs in {}.".format(filename)) + print(f"Creating new logs in {filename}.") super().__init__() def dump(self): @@ -20,7 +20,7 @@ def dump(self): json.dump(list(self), file) def __str__(self): - return "\n".join("{}: {}".format(k, v) for k, v in self[-1].items()) + return "\n".join(f"{k}: {v}" for k, v in self[-1].items()) def parse_accelerators(accelerators): diff --git a/examples/grover3sat/functions.py b/examples/grover3sat/functions.py index 1c34366546..74adb146a4 100644 --- a/examples/grover3sat/functions.py +++ b/examples/grover3sat/functions.py @@ -15,7 +15,7 @@ def read_file(file_name, instance): solution (list): list of the correct outputs of the instance for testing. clauses (list): list of all clauses, with the qubits each clause acts upon. """ - file = open("../data3sat/{q}bit/n{q}i{i}.txt".format(q=file_name, i=instance)) + file = open(f"../data3sat/{file_name}bit/n{file_name}i{instance}.txt") control = list(map(int, file.readline().split())) solution = list(map(str, file.readline().split())) clauses = [list(map(int, file.readline().split())) for _ in range(control[1])] diff --git a/examples/grover3sat/main.py b/examples/grover3sat/main.py index a40d30c116..c1c3386906 100644 --- a/examples/grover3sat/main.py +++ b/examples/grover3sat/main.py @@ -18,16 +18,16 @@ def main(nqubits, instance): qubits = control[0] clauses_num = control[1] steps = int((np.pi / 4) * np.sqrt(2**qubits)) - print("Qubits encoding the solution: {}\n".format(qubits)) - print("Total number of qubits used: {}\n".format(qubits + clauses_num + 1)) + print(f"Qubits encoding the solution: {qubits}\n") + print(f"Total number of qubits used: {qubits + clauses_num + 1}\n") q, c, ancilla, circuit = functions.create_qc(qubits, clauses_num) circuit = functions.grover(circuit, q, c, ancilla, clauses, steps) result = circuit(nshots=100) frequencies = result.frequencies(binary=True, registers=False) most_common_bitstring = frequencies.most_common(1)[0][0] - print("Most common bitstring: {}\n".format(most_common_bitstring)) + print(f"Most common bitstring: {most_common_bitstring}\n") if solution: - print("Exact cover solution: {}\n".format("".join(solution))) + print(f"Exact cover solution: {''.join(solution)}\n") if __name__ == "__main__": diff --git a/examples/hash-grover/main.py b/examples/hash-grover/main.py index 3d981d0d02..237f9b5855 100644 --- a/examples/hash-grover/main.py +++ b/examples/hash-grover/main.py @@ -23,11 +23,9 @@ def main(h_value, collisions, b): h = "{0:0{bits}b}".format(h_value, bits=b) if len(h) > 8: raise ValueError( - "Hash should be at maximum an 8-bit number but given value contains {} bits.".format( - len(h) - ) + f"Hash should be at maximum an 8-bit number but given value contains {len(h)} bits." ) - print("Target hash: {}\n".format(h)) + print(f"Target hash: {h}\n") if collisions: grover_it = int(np.pi * np.sqrt((2**8) / collisions) / 4) result = functions.grover(q, constant_1, constant_2, rot, h, grover_it) @@ -36,19 +34,19 @@ def main(h_value, collisions, b): print("Preimages:") for i in most_common: if functions.check_hash(q, i[0], h, constant_1, constant_2, rot): - print(" - {}\n".format(i[0])) + print(f" - {i[0]}\n") else: print( " Incorrect preimage found, number of given collisions might not match.\n" ) - print("Total iterations taken: {}\n".format(grover_it)) + print(f"Total iterations taken: {grover_it}\n") else: measured, total_iterations = functions.grover_unknown_M( q, constant_1, constant_2, rot, h ) print("Solution found in an iterative process.\n") - print("Preimage: {}\n".format(measured)) - print("Total iterations taken: {}\n".format(total_iterations)) + print(f"Preimage: {measured}\n") + print(f"Total iterations taken: {total_iterations}\n") if __name__ == "__main__": diff --git a/examples/unary/functions.py b/examples/unary/functions.py index b56c531491..81ce0289ad 100644 --- a/examples/unary/functions.py +++ b/examples/unary/functions.py @@ -482,7 +482,7 @@ def paint_prob_distribution(bins, prob_sim, S0, sig, r, T): ax.plot(x, y, label="PDF", color="black") plt.ylabel("Probability") plt.xlabel("Option price") - plt.title("Option price distribution for {} qubits ".format(bins)) + plt.title(f"Option price distribution for {bins} qubits ") ax.legend() fig.tight_layout() fig.savefig("Probability_distribution.png") diff --git a/examples/unary/main.py b/examples/unary/main.py index e68d25f6d9..cdbf7a9018 100644 --- a/examples/unary/main.py +++ b/examples/unary/main.py @@ -15,7 +15,7 @@ def main(data, bins, M, shots): # Generate the probability distribution plots fun.paint_prob_distribution(bins, prob_sim, S0, sig, r, T) - print("Histogram printed for unary simulation with {} qubits.\n".format(bins)) + print(f"Histogram printed for unary simulation with {bins} qubits.\n") # Create circuit to compute the expected payoff circuit, S = fun.load_payoff_quantum_sim(bins, S0, sig, r, T, K) @@ -28,14 +28,14 @@ def main(data, bins, M, shots): # Finding differences between exact value and quantum approximation error = fun.diff_qu_cl(qu_payoff_sim, cl_payoff) - print("Exact value of the expected payoff: {}\n".format(cl_payoff)) - print("Expected payoff from quantum simulation: {}\n".format(qu_payoff_sim)) - print("Percentage error: {} %\n".format(error)) + print(f"Exact value of the expected payoff: {cl_payoff}\n") + print(f"Expected payoff from quantum simulation: {qu_payoff_sim}\n") + print(f"Percentage error: {error} %\n") print("-" * 60 + "\n") # Applying amplitude estimation a_s, error_s = fun.amplitude_estimation(bins, M, data) - print("Amplitude estimation with a total of {} runs.\n".format(M)) + print(f"Amplitude estimation with a total of {M} runs.\n") fun.paint_AE(a_s, error_s, bins, M, data) print("Amplitude estimation result plots generated.") diff --git a/examples/variational_classifier/main.py b/examples/variational_classifier/main.py index 57c2b7b23a..20bcb5f432 100644 --- a/examples/variational_classifier/main.py +++ b/examples/variational_classifier/main.py @@ -72,7 +72,7 @@ def main(nclasses, nqubits, nlayers, nshots, training, RxRzRx, method): path_angles = ( LOCAL_FOLDER / "data" - / "optimal_angles_ry_{}q_{}l.npy".format(nqubits, nlayers) + / f"optimal_angles_ry_{nqubits}q_{nlayers}l.npy" ) optimal_angles = np.load(path_angles) except: @@ -84,7 +84,7 @@ def main(nclasses, nqubits, nlayers, nshots, training, RxRzRx, method): path_angles = ( LOCAL_FOLDER / "data" - / "optimal_angles_rxrzrx_{}q_{}l.npy".format(nqubits, nlayers) + / f"optimal_angles_rxrzrx_{nqubits}q_{nlayers}l.npy" ) optimal_angles = np.load(path_angles) except: @@ -111,9 +111,7 @@ def main(nclasses, nqubits, nlayers, nshots, training, RxRzRx, method): method=method, ) path_angles = ( - LOCAL_FOLDER - / "data" - / "optimal_angles_ry_{}q_{}l.npy".format(nqubits, nlayers) + LOCAL_FOLDER / "data" / f"optimal_angles_ry_{nqubits}q_{nlayers}l.npy" ) np.save( path_angles, @@ -138,7 +136,7 @@ def main(nclasses, nqubits, nlayers, nshots, training, RxRzRx, method): path_angles = ( LOCAL_FOLDER / "data" - / "optimal_angles_rxrzrx_{}q_{}l.npy".format(nqubits, nlayers) + / f"optimal_angles_rxrzrx_{nqubits}q_{nlayers}l.npy" ) np.save( path_angles, @@ -170,14 +168,12 @@ def main(nclasses, nqubits, nlayers, nshots, training, RxRzRx, method): ] print( - "Train set | # Clases: {} | # Qubits: {} | # Layers: {} | Accuracy: {}".format( - nclasses, nqubits, nlayers, qc.Accuracy(labels_train, predictions_train) - ) + f"Train set | # Clases: {nclasses} | # Qubits: {nqubits} | # Layers: {nlayers} | " + + f"Accuracy: {qc.Accuracy(labels_train, predictions_train)}" ) print( - "Test set | # Clases: {} | # Qubits: {} | # Layers: {} | Accuracy: {}".format( - nclasses, nqubits, nlayers, qc.Accuracy(labels_test, predictions_test) - ) + f"Test set | # Clases: {nclasses} | # Qubits: {nqubits} | # Layers: {nlayers} | " + + f"Accuracy: {qc.Accuracy(labels_test, predictions_test)}" ) diff --git a/poetry.lock b/poetry.lock index 7fa758e28f..cf89e825c8 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. [[package]] name = "absl-py" @@ -902,7 +902,6 @@ python-versions = "*" files = [ {file = "cutensor_cu11-1.7.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:c5598670f4f31906d725f5ea852f0df675522e3ff5a7bf886057eab36497062d"}, {file = "cutensor_cu11-1.7.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:67b6c7427d9ab50cb82e01360948bd1b23d73775b5767ab92071c7afcfec4b8b"}, - {file = "cutensor_cu11-1.7.0-py3-none-win_amd64.whl", hash = "sha256:d173b3d0fd51cf761b371a4d4be9a3afd3ef230a55ae4336ae31e905336480e1"}, ] [[package]] @@ -914,7 +913,6 @@ python-versions = "*" files = [ {file = "cutensor_cu12-1.7.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:515caa2406e09ffe9c6524328b7da2106169811665f7684836052753a30dda27"}, {file = "cutensor_cu12-1.7.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:29bdde551788fd3a611992026a5bb422831069e38fd44ab920af5aa00cffa12c"}, - {file = "cutensor_cu12-1.7.0-py3-none-win_amd64.whl", hash = "sha256:e1a9a759a615a64d1b8c6d2b8ffd925deb805750c28481b1a8310d05f35ce229"}, ] [[package]] @@ -1389,6 +1387,22 @@ files = [ {file = "fastrlock-0.8.2.tar.gz", hash = "sha256:644ec9215cf9c4df8028d8511379a15d9c1af3e16d80e47f1b6fdc6ba118356a"}, ] +[[package]] +name = "filelock" +version = "3.13.1" +description = "A platform independent file lock." +optional = false +python-versions = ">=3.8" +files = [ + {file = "filelock-3.13.1-py3-none-any.whl", hash = "sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c"}, + {file = "filelock-3.13.1.tar.gz", hash = "sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e"}, +] + +[package.extras] +docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.24)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)"] +typing = ["typing-extensions (>=4.8)"] + [[package]] name = "flatbuffers" version = "23.5.26" @@ -1465,6 +1479,41 @@ ufo = ["fs (>=2.2.0,<3)"] unicode = ["unicodedata2 (>=15.1.0)"] woff = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "zopfli (>=0.1.4)"] +[[package]] +name = "fsspec" +version = "2024.2.0" +description = "File-system specification" +optional = false +python-versions = ">=3.8" +files = [ + {file = "fsspec-2024.2.0-py3-none-any.whl", hash = "sha256:817f969556fa5916bc682e02ca2045f96ff7f586d45110fcb76022063ad2c7d8"}, + {file = "fsspec-2024.2.0.tar.gz", hash = "sha256:b6ad1a679f760dda52b1168c859d01b7b80648ea6f7f7c7f5a8a91dc3f3ecb84"}, +] + +[package.extras] +abfs = ["adlfs"] +adl = ["adlfs"] +arrow = ["pyarrow (>=1)"] +dask = ["dask", "distributed"] +devel = ["pytest", "pytest-cov"] +dropbox = ["dropbox", "dropboxdrivefs", "requests"] +full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "dask", "distributed", "dropbox", "dropboxdrivefs", "fusepy", "gcsfs", "libarchive-c", "ocifs", "panel", "paramiko", "pyarrow (>=1)", "pygit2", "requests", "s3fs", "smbprotocol", "tqdm"] +fuse = ["fusepy"] +gcs = ["gcsfs"] +git = ["pygit2"] +github = ["requests"] +gs = ["gcsfs"] +gui = ["panel"] +hdfs = ["pyarrow (>=1)"] +http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)"] +libarchive = ["libarchive-c"] +oci = ["ocifs"] +s3 = ["s3fs"] +sftp = ["paramiko"] +smb = ["smbprotocol"] +ssh = ["paramiko"] +tqdm = ["tqdm"] + [[package]] name = "furo" version = "2022.12.7" @@ -1519,11 +1568,11 @@ files = [ google-auth = ">=2.14.1,<3.0.dev0" googleapis-common-protos = ">=1.56.2,<2.0.dev0" grpcio = [ - {version = ">=1.33.2,<2.0dev", optional = true, markers = "python_version < \"3.11\" and extra == \"grpc\""}, + {version = ">=1.33.2,<2.0dev", optional = true, markers = "extra == \"grpc\""}, {version = ">=1.49.1,<2.0dev", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""}, ] grpcio-status = [ - {version = ">=1.33.2,<2.0.dev0", optional = true, markers = "python_version < \"3.11\" and extra == \"grpc\""}, + {version = ">=1.33.2,<2.0.dev0", optional = true, markers = "extra == \"grpc\""}, {version = ">=1.49.1,<2.0.dev0", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""}, ] protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0.dev0" @@ -2252,8 +2301,6 @@ description = "Clang Python Bindings, mirrored from the official LLVM repo: http optional = false python-versions = "*" files = [ - {file = "libclang-16.0.6-1-py2.py3-none-manylinux2014_aarch64.whl", hash = "sha256:88bc7e7b393c32e41e03ba77ef02fdd647da1f764c2cd028e69e0837080b79f6"}, - {file = "libclang-16.0.6-1-py2.py3-none-manylinux2014_armv7l.whl", hash = "sha256:d80ed5827736ed5ec2bcedf536720476fd9d4fa4c79ef0cb24aea4c59332f361"}, {file = "libclang-16.0.6-py2.py3-none-macosx_10_9_x86_64.whl", hash = "sha256:da9e47ebc3f0a6d90fb169ef25f9fbcd29b4a4ef97a8b0e3e3a17800af1423f4"}, {file = "libclang-16.0.6-py2.py3-none-macosx_11_0_arm64.whl", hash = "sha256:e1a5ad1e895e5443e205568c85c04b4608e4e973dae42f4dfd9cb46c81d1486b"}, {file = "libclang-16.0.6-py2.py3-none-manylinux2010_x86_64.whl", hash = "sha256:9dcdc730939788b8b69ffd6d5d75fe5366e3ee007f1e36a99799ec0b0c001492"}, @@ -2495,9 +2542,9 @@ files = [ [package.dependencies] numpy = [ - {version = ">=1.23.3", markers = "python_version > \"3.10\""}, - {version = ">=1.21.2", markers = "python_version > \"3.9\" and python_version <= \"3.10\""}, {version = ">1.20", markers = "python_version <= \"3.9\""}, + {version = ">=1.23.3", markers = "python_version > \"3.10\""}, + {version = ">=1.21.2", markers = "python_version > \"3.9\""}, ] [package.extras] @@ -2783,6 +2830,147 @@ files = [ {file = "numpy-1.26.4.tar.gz", hash = "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010"}, ] +[[package]] +name = "nvidia-cublas-cu12" +version = "12.1.3.1" +description = "CUBLAS native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cublas_cu12-12.1.3.1-py3-none-manylinux1_x86_64.whl", hash = "sha256:ee53ccca76a6fc08fb9701aa95b6ceb242cdaab118c3bb152af4e579af792728"}, + {file = "nvidia_cublas_cu12-12.1.3.1-py3-none-win_amd64.whl", hash = "sha256:2b964d60e8cf11b5e1073d179d85fa340c120e99b3067558f3cf98dd69d02906"}, +] + +[[package]] +name = "nvidia-cuda-cupti-cu12" +version = "12.1.105" +description = "CUDA profiling tools runtime libs." +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cuda_cupti_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:e54fde3983165c624cb79254ae9818a456eb6e87a7fd4d56a2352c24ee542d7e"}, + {file = "nvidia_cuda_cupti_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:bea8236d13a0ac7190bd2919c3e8e6ce1e402104276e6f9694479e48bb0eb2a4"}, +] + +[[package]] +name = "nvidia-cuda-nvrtc-cu12" +version = "12.1.105" +description = "NVRTC native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:339b385f50c309763ca65456ec75e17bbefcbbf2893f462cb8b90584cd27a1c2"}, + {file = "nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:0a98a522d9ff138b96c010a65e145dc1b4850e9ecb75a0172371793752fd46ed"}, +] + +[[package]] +name = "nvidia-cuda-runtime-cu12" +version = "12.1.105" +description = "CUDA Runtime native Libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cuda_runtime_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:6e258468ddf5796e25f1dc591a31029fa317d97a0a94ed93468fc86301d61e40"}, + {file = "nvidia_cuda_runtime_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:dfb46ef84d73fababab44cf03e3b83f80700d27ca300e537f85f636fac474344"}, +] + +[[package]] +name = "nvidia-cudnn-cu12" +version = "8.9.2.26" +description = "cuDNN runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cudnn_cu12-8.9.2.26-py3-none-manylinux1_x86_64.whl", hash = "sha256:5ccb288774fdfb07a7e7025ffec286971c06d8d7b4fb162525334616d7629ff9"}, +] + +[package.dependencies] +nvidia-cublas-cu12 = "*" + +[[package]] +name = "nvidia-cufft-cu12" +version = "11.0.2.54" +description = "CUFFT native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cufft_cu12-11.0.2.54-py3-none-manylinux1_x86_64.whl", hash = "sha256:794e3948a1aa71fd817c3775866943936774d1c14e7628c74f6f7417224cdf56"}, + {file = "nvidia_cufft_cu12-11.0.2.54-py3-none-win_amd64.whl", hash = "sha256:d9ac353f78ff89951da4af698f80870b1534ed69993f10a4cf1d96f21357e253"}, +] + +[[package]] +name = "nvidia-curand-cu12" +version = "10.3.2.106" +description = "CURAND native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_curand_cu12-10.3.2.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:9d264c5036dde4e64f1de8c50ae753237c12e0b1348738169cd0f8a536c0e1e0"}, + {file = "nvidia_curand_cu12-10.3.2.106-py3-none-win_amd64.whl", hash = "sha256:75b6b0c574c0037839121317e17fd01f8a69fd2ef8e25853d826fec30bdba74a"}, +] + +[[package]] +name = "nvidia-cusolver-cu12" +version = "11.4.5.107" +description = "CUDA solver native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl", hash = "sha256:8a7ec542f0412294b15072fa7dab71d31334014a69f953004ea7a118206fe0dd"}, + {file = "nvidia_cusolver_cu12-11.4.5.107-py3-none-win_amd64.whl", hash = "sha256:74e0c3a24c78612192a74fcd90dd117f1cf21dea4822e66d89e8ea80e3cd2da5"}, +] + +[package.dependencies] +nvidia-cublas-cu12 = "*" +nvidia-cusparse-cu12 = "*" +nvidia-nvjitlink-cu12 = "*" + +[[package]] +name = "nvidia-cusparse-cu12" +version = "12.1.0.106" +description = "CUSPARSE native runtime libraries" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:f3b50f42cf363f86ab21f720998517a659a48131e8d538dc02f8768237bd884c"}, + {file = "nvidia_cusparse_cu12-12.1.0.106-py3-none-win_amd64.whl", hash = "sha256:b798237e81b9719373e8fae8d4f091b70a0cf09d9d85c95a557e11df2d8e9a5a"}, +] + +[package.dependencies] +nvidia-nvjitlink-cu12 = "*" + +[[package]] +name = "nvidia-nccl-cu12" +version = "2.19.3" +description = "NVIDIA Collective Communication Library (NCCL) Runtime" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_nccl_cu12-2.19.3-py3-none-manylinux1_x86_64.whl", hash = "sha256:a9734707a2c96443331c1e48c717024aa6678a0e2a4cb66b2c364d18cee6b48d"}, +] + +[[package]] +name = "nvidia-nvjitlink-cu12" +version = "12.3.101" +description = "Nvidia JIT LTO Library" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_nvjitlink_cu12-12.3.101-py3-none-manylinux1_x86_64.whl", hash = "sha256:64335a8088e2b9d196ae8665430bc6a2b7e6ef2eb877a9c735c804bd4ff6467c"}, + {file = "nvidia_nvjitlink_cu12-12.3.101-py3-none-win_amd64.whl", hash = "sha256:1b2e317e437433753530792f13eece58f0aec21a2b05903be7bffe58a606cbd1"}, +] + +[[package]] +name = "nvidia-nvtx-cu12" +version = "12.1.105" +description = "NVIDIA Tools Extension" +optional = false +python-versions = ">=3" +files = [ + {file = "nvidia_nvtx_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:dc21cf308ca5691e7c04d962e213f8a4aa9bbfa23d95412f452254c2caeb09e5"}, + {file = "nvidia_nvtx_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:65f4d98982b31b60026e0e6de73fbdfc09d08a96f4656dd3665ca616a11e1e82"}, +] + [[package]] name = "oauthlib" version = "3.2.2" @@ -2839,46 +3027,46 @@ tests = ["pytest", "pytest-cov", "pytest-pep8"] [[package]] name = "osqp" -version = "0.6.5" +version = "0.6.4" description = "OSQP: The Operator Splitting QP Solver" optional = false python-versions = "*" files = [ - {file = "osqp-0.6.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e8024dba07281111af39e71bff6449fb22a37bf3358aa0c7fd1daa6bca692c99"}, - {file = "osqp-0.6.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a68e247f2bbb53e87f1c1ca80ff3fc86b781f771d6da2a2ecd2f6e7492c802f3"}, - {file = "osqp-0.6.5-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e81e299637eb2342e30eb2df0ec45dc243683af0a71676c9b45b9337bb05da97"}, - {file = "osqp-0.6.5-cp310-cp310-win_amd64.whl", hash = "sha256:42425632927d983cbe935067783b944ebd4959e9eb6611da8401007b66a0c841"}, - {file = "osqp-0.6.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a7b180db09be1c3e3cb4109396b894f481ca9c6e160a530acd71f1769610f96c"}, - {file = "osqp-0.6.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:648f4beff10c16620f3b95e86dee702052d587b847ddbd5d8f71ad39ac36db3a"}, - {file = "osqp-0.6.5-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7649d56d775662e0a5d1665ed220d585f904d14a49cc6931bf27725bb9c4b2e0"}, - {file = "osqp-0.6.5-cp311-cp311-win_amd64.whl", hash = "sha256:b033b7aec973a655cfec4558e0c4fc92ee9f914bcb0a669e0156398d8ddbef8f"}, - {file = "osqp-0.6.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5c344619465e625aac6d13812d442dd31d4a9ab243e39abb5938c3f6116409b0"}, - {file = "osqp-0.6.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:000ad48aa071ecc4c75ebc39d1291752fe3a9937a30d00fff5dc61663ec67eeb"}, - {file = "osqp-0.6.5-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a36a40df69db5195fba613341663db2c7dcf977eb75b9578a8fd7682bbe02324"}, - {file = "osqp-0.6.5-cp312-cp312-win_amd64.whl", hash = "sha256:3d8212db7c55af1961ccce4a32fd382bfe34e2198664ea3f81cc47eef8d0f288"}, - {file = "osqp-0.6.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:ca7d80c0767b1350cd74e4f1446ec51661152690d38b1382ceccdfccd757afce"}, - {file = "osqp-0.6.5-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b15e2b96d4d9b2eff37a05405372c69cf17ada3d1e42c5e28cbdbd053189ab5"}, - {file = "osqp-0.6.5-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a41600e34ece7156606fd3620987fdf224b0a35c857540cb5bf45072f5c022b"}, - {file = "osqp-0.6.5-cp36-cp36m-win_amd64.whl", hash = "sha256:8c38574b35a3ddfb794aafee9bc5a74635160b9fc52bbc89ae6164fe207556de"}, - {file = "osqp-0.6.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:d06f614e3be1b1f3cd68569b2dc3628c2fdef1e7c4b992672fe05efb1add9801"}, - {file = "osqp-0.6.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25a6b995e0a022bd1c33d20d8846d9a068df89cec288b905b5cdfdb98a2ffae8"}, - {file = "osqp-0.6.5-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:09de9b53e7513ee4ade3024ce9f36ef993d916118d0927cce740d086882ea92c"}, - {file = "osqp-0.6.5-cp37-cp37m-win_amd64.whl", hash = "sha256:1f80f85d515ef29b90fb34f137857e75d4fcf21a715d644f54d2cf9494567fab"}, - {file = "osqp-0.6.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:de9b9e96001e8f0b2e474106ac75e220fd9279e1635b107b836a6035795e8d07"}, - {file = "osqp-0.6.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fe545d7a87a46cfc57dfb9f0aa2788d2f29e0c71dc1ac57e92f9c9d93064753"}, - {file = "osqp-0.6.5-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49ab020b5fd7abb5da99e01e47bf81f817ba1df6895e3d3ba4893722cc24d9b6"}, - {file = "osqp-0.6.5-cp38-cp38-win_amd64.whl", hash = "sha256:5d1b5ed6fc4faea94117a0abe140fefe980449b29d3907bd2e6ec1c18eca3d43"}, - {file = "osqp-0.6.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dca127b7a333ce53fb430fc441b2e0aee2df619693d967277a8f8fd095e95007"}, - {file = "osqp-0.6.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ec902844defedf7c5a5ed482b93286d1735a65b71bb27c93e18c929f313c93d"}, - {file = "osqp-0.6.5-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f25a9e1e8f1db38094dc7ee544e603e31fe7bf1b2a3fc75c78c1d39a727e2540"}, - {file = "osqp-0.6.5-cp39-cp39-win_amd64.whl", hash = "sha256:6dce90d8c4ad551489a452573ea819e089e1e1c3b23bbd8f155bb6059ce8ef36"}, - {file = "osqp-0.6.5.tar.gz", hash = "sha256:b2810aee7be2373add8b6c0be5ad99b810288774abca421751cb032d6a5aedef"}, + {file = "osqp-0.6.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1c34dc340b4dc46ed86f811b1015bb2ece444d310b4bb638e509a02df88594c1"}, + {file = "osqp-0.6.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e7fb1ae278d14b7080acfe4d252c4f6df563dd8622847e73f8e5d1f2e027db41"}, + {file = "osqp-0.6.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2488dc19d48fbb46118312cf1a1292942ab41cd5588cf6c75ff1b521afb99ce3"}, + {file = "osqp-0.6.4-cp310-cp310-win_amd64.whl", hash = "sha256:adaf59b134745aec21409e698dcd72d8997be2652e35ed1f5302aaba69654831"}, + {file = "osqp-0.6.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:20aa182b23ca5d433d1b8144d46296304a493d1cc1712cf45c591e5dd7a19436"}, + {file = "osqp-0.6.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21c79624c831e6070b3b1ca1df34032c222cc87e467def5e038713d20c9ffb5c"}, + {file = "osqp-0.6.4-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2eeb4a3982929f5ea89fc2cc0cef238c489020b02671012f0b60a7a7c1df5093"}, + {file = "osqp-0.6.4-cp311-cp311-win_amd64.whl", hash = "sha256:b62631f7388cdc49619e256110595fe741afab4d779fcc2b2ab55922cc93367f"}, + {file = "osqp-0.6.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a7d8dc0a5459558d3f2f975110e21e2292558c943047f09fb51ebc62d07a164c"}, + {file = "osqp-0.6.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89f1b270ed46a92384daa022ed336d58b5f06bdc49abe9684d41aaec02717895"}, + {file = "osqp-0.6.4-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78f7d8b91b0248beb95abda710bbf28ee98d5675dc9f77df7b5412da222e4f5c"}, + {file = "osqp-0.6.4-cp312-cp312-win_amd64.whl", hash = "sha256:ff72fc0cec63965979e86bc99bec1658b85c3e6d8e9f95c37cc5c531fa48aabe"}, + {file = "osqp-0.6.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b7dbc83605a68703f8e509f590ab71f0f6d6992443ae534a8d99d8878bfabd73"}, + {file = "osqp-0.6.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1603ff6d699adcbf7628dadfa54b566023412b60f04f6dda36fc81cf59a678c"}, + {file = "osqp-0.6.4-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:866b16ea55a7ec831ed4fce3c5c812a6fcb84d8b0016a858f1ecc9bf63dcbb00"}, + {file = "osqp-0.6.4-cp36-cp36m-win_amd64.whl", hash = "sha256:5764886a48fc670370283cb7b004cbd5b570967bde3ecf2905e7662d6223c5bc"}, + {file = "osqp-0.6.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:f606cce8f8b5bd9a6a80e3c25e2ffc0180a9da9b550731c0440b1de10565b89e"}, + {file = "osqp-0.6.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0faf25c10b84cd4005b24b290e0b6d885c3e30d01fc065f930a46c8da5401f49"}, + {file = "osqp-0.6.4-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac61b42c1944522bc2db6d38a55cc4b569c98c4e1e512a73d202af578d678f0f"}, + {file = "osqp-0.6.4-cp37-cp37m-win_amd64.whl", hash = "sha256:4f2f7fd96582a69c030d883b9f701028a6df690637d4a122e9043d3062e5e776"}, + {file = "osqp-0.6.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4c80a308d12c4f065ae069060d6ff1b64624d03f832221f073ddaef0ce387cfa"}, + {file = "osqp-0.6.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:baa763c67c3ba5ce1191e4ce4dfc54c6b5fc96e794ea5bae6b03793897af93cf"}, + {file = "osqp-0.6.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b57785b2ed7928b2535978fc862b5d6826a1db69d8d21151630f654d42d7c829"}, + {file = "osqp-0.6.4-cp38-cp38-win_amd64.whl", hash = "sha256:681e8881f71a997a1506ddb8631daa3207d03f59ac929987103f4289287c8065"}, + {file = "osqp-0.6.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bdbf25b567b53192a82a6495979d7714198a1500ca5339c55d851c8d5c7cb8e7"}, + {file = "osqp-0.6.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:72efd10d855c3ed5773ff7f72c76dcddff6bb2454149b27e262d611ba6fb2f28"}, + {file = "osqp-0.6.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f11bc1c5877610afae71ebff5b69325a5a4fc68b155613e454c793a66c5a11bd"}, + {file = "osqp-0.6.4-cp39-cp39-win_amd64.whl", hash = "sha256:702a33c736603e8457acb7512d706bf1d6903f6a75ad140f6c8d14a234cd3f35"}, + {file = "osqp-0.6.4.tar.gz", hash = "sha256:cfa33e0be422ee5d3e792e7c081bcbf6fa222fc2175b6fdde4c4a219354c5e42"}, ] [package.dependencies] numpy = ">=1.7" qdldl = "*" -scipy = ">=0.13.2,<1.12.0" +scipy = ">=0.13.2" [[package]] name = "packaging" @@ -3734,7 +3922,6 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -3742,16 +3929,8 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, - {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, - {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, - {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -3768,7 +3947,6 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -3776,7 +3954,6 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, @@ -3946,7 +4123,7 @@ name = "qibojit" version = "0.1.3" description = "Simulation tools based on numba and cupy." optional = false -python-versions = ">=3.9.0,<3.12" +python-versions = "^3.9,<3.12" files = [] develop = false @@ -4261,45 +4438,45 @@ tests = ["black (>=23.3.0)", "matplotlib (>=3.3.4)", "mypy (>=1.3)", "numpydoc ( [[package]] name = "scipy" -version = "1.11.4" +version = "1.12.0" description = "Fundamental algorithms for scientific computing in Python" optional = false python-versions = ">=3.9" files = [ - {file = "scipy-1.11.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bc9a714581f561af0848e6b69947fda0614915f072dfd14142ed1bfe1b806710"}, - {file = "scipy-1.11.4-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:cf00bd2b1b0211888d4dc75656c0412213a8b25e80d73898083f402b50f47e41"}, - {file = "scipy-1.11.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9999c008ccf00e8fbcce1236f85ade5c569d13144f77a1946bef8863e8f6eb4"}, - {file = "scipy-1.11.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:933baf588daa8dc9a92c20a0be32f56d43faf3d1a60ab11b3f08c356430f6e56"}, - {file = "scipy-1.11.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8fce70f39076a5aa62e92e69a7f62349f9574d8405c0a5de6ed3ef72de07f446"}, - {file = "scipy-1.11.4-cp310-cp310-win_amd64.whl", hash = "sha256:6550466fbeec7453d7465e74d4f4b19f905642c89a7525571ee91dd7adabb5a3"}, - {file = "scipy-1.11.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f313b39a7e94f296025e3cffc2c567618174c0b1dde173960cf23808f9fae4be"}, - {file = "scipy-1.11.4-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:1b7c3dca977f30a739e0409fb001056484661cb2541a01aba0bb0029f7b68db8"}, - {file = "scipy-1.11.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00150c5eae7b610c32589dda259eacc7c4f1665aedf25d921907f4d08a951b1c"}, - {file = "scipy-1.11.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:530f9ad26440e85766509dbf78edcfe13ffd0ab7fec2560ee5c36ff74d6269ff"}, - {file = "scipy-1.11.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5e347b14fe01003d3b78e196e84bd3f48ffe4c8a7b8a1afbcb8f5505cb710993"}, - {file = "scipy-1.11.4-cp311-cp311-win_amd64.whl", hash = "sha256:acf8ed278cc03f5aff035e69cb511741e0418681d25fbbb86ca65429c4f4d9cd"}, - {file = "scipy-1.11.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:028eccd22e654b3ea01ee63705681ee79933652b2d8f873e7949898dda6d11b6"}, - {file = "scipy-1.11.4-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:2c6ff6ef9cc27f9b3db93a6f8b38f97387e6e0591600369a297a50a8e96e835d"}, - {file = "scipy-1.11.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b030c6674b9230d37c5c60ab456e2cf12f6784596d15ce8da9365e70896effc4"}, - {file = "scipy-1.11.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad669df80528aeca5f557712102538f4f37e503f0c5b9541655016dd0932ca79"}, - {file = "scipy-1.11.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ce7fff2e23ab2cc81ff452a9444c215c28e6305f396b2ba88343a567feec9660"}, - {file = "scipy-1.11.4-cp312-cp312-win_amd64.whl", hash = "sha256:36750b7733d960d7994888f0d148d31ea3017ac15eef664194b4ef68d36a4a97"}, - {file = "scipy-1.11.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6e619aba2df228a9b34718efb023966da781e89dd3d21637b27f2e54db0410d7"}, - {file = "scipy-1.11.4-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:f3cd9e7b3c2c1ec26364856f9fbe78695fe631150f94cd1c22228456404cf1ec"}, - {file = "scipy-1.11.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d10e45a6c50211fe256da61a11c34927c68f277e03138777bdebedd933712fea"}, - {file = "scipy-1.11.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91af76a68eeae0064887a48e25c4e616fa519fa0d38602eda7e0f97d65d57937"}, - {file = "scipy-1.11.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6df1468153a31cf55ed5ed39647279beb9cfb5d3f84369453b49e4b8502394fd"}, - {file = "scipy-1.11.4-cp39-cp39-win_amd64.whl", hash = "sha256:ee410e6de8f88fd5cf6eadd73c135020bfbbbdfcd0f6162c36a7638a1ea8cc65"}, - {file = "scipy-1.11.4.tar.gz", hash = "sha256:90a2b78e7f5733b9de748f589f09225013685f9b218275257f8a8168ededaeaa"}, + {file = "scipy-1.12.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:78e4402e140879387187f7f25d91cc592b3501a2e51dfb320f48dfb73565f10b"}, + {file = "scipy-1.12.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:f5f00ebaf8de24d14b8449981a2842d404152774c1a1d880c901bf454cb8e2a1"}, + {file = "scipy-1.12.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e53958531a7c695ff66c2e7bb7b79560ffdc562e2051644c5576c39ff8efb563"}, + {file = "scipy-1.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e32847e08da8d895ce09d108a494d9eb78974cf6de23063f93306a3e419960c"}, + {file = "scipy-1.12.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4c1020cad92772bf44b8e4cdabc1df5d87376cb219742549ef69fc9fd86282dd"}, + {file = "scipy-1.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:75ea2a144096b5e39402e2ff53a36fecfd3b960d786b7efd3c180e29c39e53f2"}, + {file = "scipy-1.12.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:408c68423f9de16cb9e602528be4ce0d6312b05001f3de61fe9ec8b1263cad08"}, + {file = "scipy-1.12.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:5adfad5dbf0163397beb4aca679187d24aec085343755fcdbdeb32b3679f254c"}, + {file = "scipy-1.12.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3003652496f6e7c387b1cf63f4bb720951cfa18907e998ea551e6de51a04467"}, + {file = "scipy-1.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b8066bce124ee5531d12a74b617d9ac0ea59245246410e19bca549656d9a40a"}, + {file = "scipy-1.12.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8bee4993817e204d761dba10dbab0774ba5a8612e57e81319ea04d84945375ba"}, + {file = "scipy-1.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:a24024d45ce9a675c1fb8494e8e5244efea1c7a09c60beb1eeb80373d0fecc70"}, + {file = "scipy-1.12.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e7e76cc48638228212c747ada851ef355c2bb5e7f939e10952bc504c11f4e372"}, + {file = "scipy-1.12.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:f7ce148dffcd64ade37b2df9315541f9adad6efcaa86866ee7dd5db0c8f041c3"}, + {file = "scipy-1.12.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c39f92041f490422924dfdb782527a4abddf4707616e07b021de33467f917bc"}, + {file = "scipy-1.12.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a7ebda398f86e56178c2fa94cad15bf457a218a54a35c2a7b4490b9f9cb2676c"}, + {file = "scipy-1.12.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:95e5c750d55cf518c398a8240571b0e0782c2d5a703250872f36eaf737751338"}, + {file = "scipy-1.12.0-cp312-cp312-win_amd64.whl", hash = "sha256:e646d8571804a304e1da01040d21577685ce8e2db08ac58e543eaca063453e1c"}, + {file = "scipy-1.12.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:913d6e7956c3a671de3b05ccb66b11bc293f56bfdef040583a7221d9e22a2e35"}, + {file = "scipy-1.12.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:bba1b0c7256ad75401c73e4b3cf09d1f176e9bd4248f0d3112170fb2ec4db067"}, + {file = "scipy-1.12.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:730badef9b827b368f351eacae2e82da414e13cf8bd5051b4bdfd720271a5371"}, + {file = "scipy-1.12.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6546dc2c11a9df6926afcbdd8a3edec28566e4e785b915e849348c6dd9f3f490"}, + {file = "scipy-1.12.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:196ebad3a4882081f62a5bf4aeb7326aa34b110e533aab23e4374fcccb0890dc"}, + {file = "scipy-1.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:b360f1b6b2f742781299514e99ff560d1fe9bd1bff2712894b52abe528d1fd1e"}, + {file = "scipy-1.12.0.tar.gz", hash = "sha256:4bf5abab8a36d20193c698b0f1fc282c1d083c94723902c447e5d2f1780936a3"}, ] [package.dependencies] -numpy = ">=1.21.6,<1.28.0" +numpy = ">=1.22.4,<1.29.0" [package.extras] dev = ["click", "cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy", "pycodestyle", "pydevtool", "rich-click", "ruff", "types-psutil", "typing_extensions"] doc = ["jupytext", "matplotlib (>2)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-design (>=0.2.0)"] -test = ["asv", "gmpy2", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] +test = ["asv", "gmpy2", "hypothesis", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] [[package]] name = "scs" @@ -4918,6 +5095,64 @@ files = [ {file = "toolz-0.12.1.tar.gz", hash = "sha256:ecca342664893f177a13dac0e6b41cbd8ac25a358e5f215316d43e2100224f4d"}, ] +[[package]] +name = "torch" +version = "2.2.0" +description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "torch-2.2.0-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:d366158d6503a3447e67f8c0ad1328d54e6c181d88572d688a625fac61b13a97"}, + {file = "torch-2.2.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:707f2f80402981e9f90d0038d7d481678586251e6642a7a6ef67fc93511cb446"}, + {file = "torch-2.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:15c8f0a105c66b28496092fca1520346082e734095f8eaf47b5786bac24b8a31"}, + {file = "torch-2.2.0-cp310-none-macosx_10_9_x86_64.whl", hash = "sha256:0ca4df4b728515ad009b79f5107b00bcb2c63dc202d991412b9eb3b6a4f24349"}, + {file = "torch-2.2.0-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:3d3eea2d5969b9a1c9401429ca79efc668120314d443d3463edc3289d7f003c7"}, + {file = "torch-2.2.0-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:0d1c580e379c0d48f0f0a08ea28d8e373295aa254de4f9ad0631f9ed8bc04c24"}, + {file = "torch-2.2.0-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:9328e3c1ce628a281d2707526b4d1080eae7c4afab4f81cea75bde1f9441dc78"}, + {file = "torch-2.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:03c8e660907ac1b8ee07f6d929c4e15cd95be2fb764368799cca02c725a212b8"}, + {file = "torch-2.2.0-cp311-none-macosx_10_9_x86_64.whl", hash = "sha256:da0cefe7f84ece3e3b56c11c773b59d1cb2c0fd83ddf6b5f7f1fd1a987b15c3e"}, + {file = "torch-2.2.0-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:f81d23227034221a4a4ff8ef24cc6cec7901edd98d9e64e32822778ff01be85e"}, + {file = "torch-2.2.0-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:dcbfb2192ac41ca93c756ebe9e2af29df0a4c14ee0e7a0dd78f82c67a63d91d4"}, + {file = "torch-2.2.0-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:9eeb42971619e24392c9088b5b6d387d896e267889d41d267b1fec334f5227c5"}, + {file = "torch-2.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:c718b2ca69a6cac28baa36d86d8c0ec708b102cebd1ceb1b6488e404cd9be1d1"}, + {file = "torch-2.2.0-cp312-none-macosx_10_9_x86_64.whl", hash = "sha256:f11d18fceb4f9ecb1ac680dde7c463c120ed29056225d75469c19637e9f98d12"}, + {file = "torch-2.2.0-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:ee1da852bfd4a7e674135a446d6074c2da7194c1b08549e31eae0b3138c6b4d2"}, + {file = "torch-2.2.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:0d819399819d0862268ac531cf12a501c253007df4f9e6709ede8a0148f1a7b8"}, + {file = "torch-2.2.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:08f53ccc38c49d839bc703ea1b20769cc8a429e0c4b20b56921a9f64949bf325"}, + {file = "torch-2.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:93bffe3779965a71dab25fc29787538c37c5d54298fd2f2369e372b6fb137d41"}, + {file = "torch-2.2.0-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:c17ec323da778efe8dad49d8fb534381479ca37af1bfc58efdbb8607a9d263a3"}, + {file = "torch-2.2.0-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:c02685118008834e878f676f81eab3a952b7936fa31f474ef8a5ff4b5c78b36d"}, + {file = "torch-2.2.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:d9f39d6f53cec240a0e3baa82cb697593340f9d4554cee6d3d6ca07925c2fac0"}, + {file = "torch-2.2.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:51770c065206250dc1222ea7c0eff3f88ab317d3e931cca2aee461b85fbc2472"}, + {file = "torch-2.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:008e4c6ad703de55af760c73bf937ecdd61a109f9b08f2bbb9c17e7c7017f194"}, + {file = "torch-2.2.0-cp39-none-macosx_10_9_x86_64.whl", hash = "sha256:de8680472dd14e316f42ceef2a18a301461a9058cd6e99a1f1b20f78f11412f1"}, + {file = "torch-2.2.0-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:99e1dcecb488e3fd25bcaac56e48cdb3539842904bdc8588b0b255fde03a254c"}, +] + +[package.dependencies] +filelock = "*" +fsspec = "*" +jinja2 = "*" +networkx = "*" +nvidia-cublas-cu12 = {version = "12.1.3.1", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-cupti-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-nvrtc-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cuda-runtime-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cudnn-cu12 = {version = "8.9.2.26", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cufft-cu12 = {version = "11.0.2.54", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-curand-cu12 = {version = "10.3.2.106", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cusolver-cu12 = {version = "11.4.5.107", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-cusparse-cu12 = {version = "12.1.0.106", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-nccl-cu12 = {version = "2.19.3", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-nvtx-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +sympy = "*" +triton = {version = "2.2.0", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +typing-extensions = ">=4.8.0" + +[package.extras] +opt-einsum = ["opt-einsum (>=3.3)"] +optree = ["optree (>=0.9.1)"] + [[package]] name = "tornado" version = "6.4" @@ -4973,6 +5208,29 @@ files = [ docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0,<7.5)", "pytest-mock", "pytest-mypy-testing"] +[[package]] +name = "triton" +version = "2.2.0" +description = "A language and compiler for custom Deep Learning operations" +optional = false +python-versions = "*" +files = [ + {file = "triton-2.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2294514340cfe4e8f4f9e5c66c702744c4a117d25e618bd08469d0bfed1e2e5"}, + {file = "triton-2.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da58a152bddb62cafa9a857dd2bc1f886dbf9f9c90a2b5da82157cd2b34392b0"}, + {file = "triton-2.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af58716e721460a61886668b205963dc4d1e4ac20508cc3f623aef0d70283d5"}, + {file = "triton-2.2.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8fe46d3ab94a8103e291bd44c741cc294b91d1d81c1a2888254cbf7ff846dab"}, + {file = "triton-2.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8ce26093e539d727e7cf6f6f0d932b1ab0574dc02567e684377630d86723ace"}, + {file = "triton-2.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:227cc6f357c5efcb357f3867ac2a8e7ecea2298cd4606a8ba1e931d1d5a947df"}, +] + +[package.dependencies] +filelock = "*" + +[package.extras] +build = ["cmake (>=3.20)", "lit"] +tests = ["autopep8", "flake8", "isort", "numpy", "pytest", "scipy (>=1.7.1)", "torch"] +tutorials = ["matplotlib", "pandas", "tabulate", "torch"] + [[package]] name = "types-deprecated" version = "1.2.9.20240106" @@ -5152,16 +5410,6 @@ files = [ {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8ad85f7f4e20964db4daadcab70b47ab05c7c1cf2a7c1e51087bfaa83831854c"}, {file = "wrapt-1.14.1-cp310-cp310-win32.whl", hash = "sha256:a9a52172be0b5aae932bef82a79ec0a0ce87288c7d132946d645eba03f0ad8a8"}, {file = "wrapt-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:6d323e1554b3d22cfc03cd3243b5bb815a51f5249fdcbb86fda4bf62bab9e164"}, - {file = "wrapt-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ecee4132c6cd2ce5308e21672015ddfed1ff975ad0ac8d27168ea82e71413f55"}, - {file = "wrapt-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2020f391008ef874c6d9e208b24f28e31bcb85ccff4f335f15a3251d222b92d9"}, - {file = "wrapt-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2feecf86e1f7a86517cab34ae6c2f081fd2d0dac860cb0c0ded96d799d20b335"}, - {file = "wrapt-1.14.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:240b1686f38ae665d1b15475966fe0472f78e71b1b4903c143a842659c8e4cb9"}, - {file = "wrapt-1.14.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9008dad07d71f68487c91e96579c8567c98ca4c3881b9b113bc7b33e9fd78b8"}, - {file = "wrapt-1.14.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6447e9f3ba72f8e2b985a1da758767698efa72723d5b59accefd716e9e8272bf"}, - {file = "wrapt-1.14.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:acae32e13a4153809db37405f5eba5bac5fbe2e2ba61ab227926a22901051c0a"}, - {file = "wrapt-1.14.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:49ef582b7a1152ae2766557f0550a9fcbf7bbd76f43fbdc94dd3bf07cc7168be"}, - {file = "wrapt-1.14.1-cp311-cp311-win32.whl", hash = "sha256:358fe87cc899c6bb0ddc185bf3dbfa4ba646f05b1b0b9b5a27c2cb92c2cea204"}, - {file = "wrapt-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:26046cd03936ae745a502abf44dac702a5e6880b2b01c29aea8ddf3353b68224"}, {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:43ca3bbbe97af00f49efb06e352eae40434ca9d915906f77def219b88e85d907"}, {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:6b1a564e6cb69922c7fe3a678b9f9a3c54e72b469875aa8018f18b4d1dd1adf3"}, {file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:00b6d4ea20a906c0ca56d84f93065b398ab74b927a7a3dbd470f6fc503f95dc3"}, @@ -5231,4 +5479,4 @@ tensorflow = ["tensorflow"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<3.12" -content-hash = "b0ab747462936ae8521862d2a5c373226712b3ab06a95888d70e9b649b7ff484" +content-hash = "f2af75884607f9e26cb077dadd7e49f4037056b0fc096cb37f389b01b0732f74" diff --git a/pyproject.toml b/pyproject.toml index bd92a8fde2..d0490b164e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,6 +33,7 @@ numpy = "^1.26.4" networkx = "^3.2.1" cvxpy = { version = "^1.4.2", optional = true } tensorflow = { version = "^2.14.1,<2.16", markers = "sys_platform == 'linux' or sys_platform == 'darwin'", optional = true } +torch = { version = "^2.1.1", optional = true } [tool.poetry.group.dev] optional = true @@ -67,8 +68,9 @@ dill = "^0.3.6" pytest-cov = "^4.0.0" pylint = "^3.0.3" matplotlib = "^3.7.0" -qibojit = { git = "https://github.com/qiboteam/qibojit.git" } tensorflow = { version = "^2.14.1,<2.16", markers = "sys_platform == 'linux'" } +torch = "^2.1.1" +qibojit = { git = "https://github.com/qiboteam/qibojit.git" } qibotn = { git = "https://github.com/qiboteam/qibotn.git" } stim = "^1.12.0" diff --git a/src/qibo/backends/__init__.py b/src/qibo/backends/__init__.py index c340617a24..a80c9932e1 100644 --- a/src/qibo/backends/__init__.py +++ b/src/qibo/backends/__init__.py @@ -4,6 +4,7 @@ from qibo.backends.clifford import CliffordBackend from qibo.backends.npmatrices import NumpyMatrices from qibo.backends.numpy import NumpyBackend +from qibo.backends.pytorch import PyTorchBackend from qibo.backends.tensorflow import TensorflowBackend from qibo.config import log, raise_error @@ -28,6 +29,9 @@ def construct_backend(backend, **kwargs): elif backend == "tensorflow": return TensorflowBackend() + elif backend == "pytorch": + return PyTorchBackend() + elif backend == "numpy": return NumpyBackend() @@ -76,6 +80,7 @@ class GlobalBackend(NumpyBackend): {"backend": "qibojit", "platform": "numba"}, {"backend": "tensorflow"}, {"backend": "numpy"}, + {"backend": "pytorch"}, ] def __new__(cls): diff --git a/src/qibo/backends/clifford.py b/src/qibo/backends/clifford.py index 2830fd9ef7..ea56e242b9 100644 --- a/src/qibo/backends/clifford.py +++ b/src/qibo/backends/clifford.py @@ -144,7 +144,9 @@ def apply_gate_clifford(self, gate, symplectic_matrix, nqubits): def apply_channel(self, channel, state, nqubits): probabilities = channel.coefficients + (1 - np.sum(channel.coefficients),) - index = np.random.choice(range(len(probabilities)), size=1, p=probabilities)[0] + index = self.np.random.choice( + range(len(probabilities)), size=1, p=probabilities + )[0] if index != len(channel.gates): gate = channel.gates[index] state = gate.apply_clifford(self, state, nqubits) @@ -329,7 +331,9 @@ def symplectic_matrix_to_generators( for x, z in zip(X, Z): paulis = [bits_to_gate[f"{zz}{xx}"] for xx, zz in zip(x, z)] if return_array: - paulis = [self.cast(getattr(gates, p)(0).matrix()) for p in paulis] + from qibo import matrices # pylint: disable=C0415 + + paulis = [self.cast(getattr(matrices, p)) for p in paulis] matrix = reduce(self.np.kron, paulis) generators.append(matrix) else: diff --git a/src/qibo/backends/npmatrices.py b/src/qibo/backends/npmatrices.py index 4f180ef4c3..c6e447e9ea 100644 --- a/src/qibo/backends/npmatrices.py +++ b/src/qibo/backends/npmatrices.py @@ -60,7 +60,9 @@ def TDG(self): ) def I(self, n=2): - return self._cast(self.np.eye(n, dtype=self.dtype), dtype=self.dtype) + # dtype=complex is necessary for pytorch backend, + # _cast will take care of casting in the right dtype for all the backends + return self._cast(self.np.eye(n, dtype=complex), dtype=self.dtype) def Align(self, n=2): return self._cast(self.I(n), dtype=self.dtype) diff --git a/src/qibo/backends/numpy.py b/src/qibo/backends/numpy.py index 6f7c6215eb..c29bf91ea8 100644 --- a/src/qibo/backends/numpy.py +++ b/src/qibo/backends/numpy.py @@ -1,4 +1,5 @@ import collections +import math import numpy as np @@ -93,7 +94,7 @@ def identity_density_matrix(self, nqubits, normalize: bool = True): def plus_state(self, nqubits): state = self.np.ones(2**nqubits, dtype=self.dtype) - state /= self.np.sqrt(2**nqubits) + state /= math.sqrt(2**nqubits) return state def plus_density_matrix(self, nqubits): @@ -105,16 +106,20 @@ def matrix(self, gate): """Convert a gate to its matrix representation in the computational basis.""" name = gate.__class__.__name__ _matrix = getattr(self.matrices, name) - return _matrix(2 ** len(gate.target_qubits)) if callable(_matrix) else _matrix + if callable(_matrix): + _matrix = _matrix(2 ** len(gate.target_qubits)) + + return self.cast(_matrix, dtype=_matrix.dtype) def matrix_parametrized(self, gate): """Convert a parametrized gate to its matrix representation in the computational basis.""" name = gate.__class__.__name__ - return getattr(self.matrices, name)(*gate.parameters) + matrix = getattr(self.matrices, name)(*gate.parameters) + return self.cast(matrix, dtype=matrix.dtype) def matrix_fused(self, fgate): rank = len(fgate.target_qubits) - matrix = np.eye(2**rank, dtype=self.dtype) + matrix = np.eye(2**rank) for gate in fgate.gates: # transfer gate matrix to numpy as it is more efficient for # small tensor calculations @@ -122,7 +127,7 @@ def matrix_fused(self, fgate): gmatrix = self.to_numpy(gate.matrix(self)) # Kronecker product with identity is needed to make the # original matrix have shape (2**rank x 2**rank) - eye = np.eye(2 ** (rank - len(gate.qubits)), dtype=self.dtype) + eye = np.eye(2 ** (rank - len(gate.qubits))) gmatrix = np.kron(gmatrix, eye) # Transpose the new matrix indices so that it targets the # target qubits of the original gate @@ -137,7 +142,7 @@ def matrix_fused(self, fgate): gmatrix = np.reshape(gmatrix, original_shape) # fuse the individual gate matrix to the total ``FusedGate`` matrix matrix = gmatrix @ matrix - return matrix + return self.cast(matrix) def control_matrix(self, gate): if len(gate.control_qubits) > 1: @@ -153,10 +158,13 @@ def control_matrix(self, gate): raise_error( ValueError, "Cannot use ``control_unitary`` method on " - "gate matrix of shape {}.".format(shape), + + f"gate matrix of shape {shape}.", ) zeros = self.np.zeros((2, 2), dtype=self.dtype) - part1 = self.np.concatenate([self.np.eye(2, dtype=self.dtype), zeros], axis=0) + zeros = self.cast(zeros, dtype=zeros.dtype) + identity = self.np.eye(2, dtype=self.dtype) + identity = self.cast(identity, dtype=identity.dtype) + part1 = self.np.concatenate([identity, zeros], axis=0) part2 = self.np.concatenate([zeros, matrix], axis=0) return self.np.concatenate([part1, part2], axis=1) @@ -177,7 +185,7 @@ def apply_gate(self, gate, state, nqubits): updates = self.np.einsum(opstring, state[-1], matrix) # Concatenate the updated part of the state `updates` with the # part of of the state that remained unaffected `state[:-1]`. - state = self.np.concatenate([state[:-1], updates[self.np.newaxis]], axis=0) + state = self.np.concatenate([state[:-1], updates[None]], axis=0) state = self.np.reshape(state, nqubits * (2,)) # Put qubit indices back to their proper places state = self.np.transpose(state, einsum_utils.reverse_order(order)) @@ -219,11 +227,9 @@ def apply_gate_density_matrix(self, gate, state, nqubits): state00 = state[range(n - 1)] state00 = state00[:, range(n - 1)] - state01 = self.np.concatenate( - [state00, state01[:, self.np.newaxis]], axis=1 - ) - state10 = self.np.concatenate([state10, state11[self.np.newaxis]], axis=0) - state = self.np.concatenate([state01, state10[self.np.newaxis]], axis=0) + state01 = self.np.concatenate([state00, state01[:, None]], axis=1) + state10 = self.np.concatenate([state10, state11[None]], axis=0) + state = self.np.concatenate([state01, state10[None]], axis=0) state = self.np.reshape(state, 2 * nqubits * (2,)) state = self.np.transpose(state, einsum_utils.reverse_order(order)) else: @@ -238,7 +244,7 @@ def apply_gate_density_matrix(self, gate, state, nqubits): def apply_gate_half_density_matrix(self, gate, state, nqubits): state = self.cast(state) - state = np.reshape(state, 2 * nqubits * (2,)) + state = self.np.reshape(state, 2 * nqubits * (2,)) matrix = gate.matrix(self) if gate.is_controlled_by: # pragma: no cover raise_error( @@ -248,12 +254,12 @@ def apply_gate_half_density_matrix(self, gate, state, nqubits): "gates.", ) else: - matrix = np.reshape(matrix, 2 * len(gate.qubits) * (2,)) + matrix = self.np.reshape(matrix, 2 * len(gate.qubits) * (2,)) left, _ = einsum_utils.apply_gate_density_matrix_string( gate.qubits, nqubits ) - state = np.einsum(left, state, matrix) - return np.reshape(state, 2 * (2**nqubits,)) + state = self.np.einsum(left, state, matrix) + return self.np.reshape(state, 2 * (2**nqubits,)) def apply_channel(self, channel, state, nqubits): probabilities = channel.coefficients + (1 - np.sum(channel.coefficients),) @@ -273,11 +279,12 @@ def apply_channel_density_matrix(self, channel, state, nqubits): def _append_zeros(self, state, qubits, results): """Helper method for collapse.""" for q, r in zip(qubits, results): - state = self.np.expand_dims(state, axis=q) - if r: - state = self.np.concatenate([self.np.zeros_like(state), state], axis=q) - else: - state = self.np.concatenate([state, self.np.zeros_like(state)], axis=q) + state = self.np.expand_dims(state, q) + state = ( + self.np.concatenate([self.np.zeros_like(state), state], q) + if r == 1 + else self.np.concatenate([state, self.np.zeros_like(state)], q) + ) return state def collapse_state(self, state, qubits, shot, nqubits, normalize=True): @@ -324,7 +331,7 @@ def reset_error_density_matrix(self, gate, state, nqubits): trace = self.partial_trace_density_matrix(state, (q,), nqubits) trace = self.np.reshape(trace, 2 * (nqubits - 1) * (2,)) zero = self.zero_density_matrix(1) - zero = self.np.tensordot(trace, zero, axes=0) + zero = self.np.tensordot(trace, zero, 0) order = list(range(2 * nqubits - 2)) order.insert(q, 2 * nqubits - 2) order.insert(q + nqubits, 2 * nqubits - 1) @@ -347,7 +354,7 @@ def depolarizing_error_density_matrix(self, gate, state, nqubits): trace = self.np.reshape(trace, 2 * (nqubits - len(q)) * (2,)) identity = self.identity_density_matrix(len(q)) identity = self.np.reshape(identity, 2 * len(q) * (2,)) - identity = self.np.tensordot(trace, identity, axes=0) + identity = self.np.tensordot(trace, identity, 0) qubits = list(range(nqubits)) for j in q: qubits.pop(qubits.index(j)) @@ -369,6 +376,7 @@ def depolarizing_error_density_matrix(self, gate, state, nqubits): return state def execute_circuit(self, circuit, initial_state=None, nshots=1000): + if isinstance(initial_state, type(circuit)): if not initial_state.density_matrix == circuit.density_matrix: raise_error( @@ -386,6 +394,8 @@ def execute_circuit(self, circuit, initial_state=None, nshots=1000): ) else: return self.execute_circuit(initial_state + circuit, None, nshots) + elif initial_state is not None: + initial_state = self.cast(initial_state) if circuit.repeated_execution: if circuit.measurements or circuit.has_collapse: @@ -465,7 +475,7 @@ def execute_circuit_repeated(self, circuit, nshots, initial_state=None): Execute the circuit `nshots` times to retrieve probabilities, frequencies and samples. Note that this method is called only if a unitary channel is present in the circuit (i.e. noisy simulation) and `density_matrix=False`, or - if some collapsing measuremnt is performed. + if some collapsing measurement is performed. """ if ( @@ -522,15 +532,14 @@ def execute_circuit_repeated(self, circuit, nshots, initial_state=None): sample = result.samples()[0] results.append(sample) if not circuit.density_matrix: - samples.append("".join([str(s) for s in sample])) + samples.append("".join([str(int(s)) for s in sample])) for gate in circuit.measurements: gate.result.reset() if circuit.density_matrix: # this implies also it has_collapse assert circuit.has_collapse - final_state = np.mean(self.to_numpy(final_states), 0) + final_state = self.cast(np.mean(self.to_numpy(final_states), 0)) if circuit.measurements: - qubits = [q for m in circuit.measurements for q in m.target_qubits] final_result = CircuitResult( final_state, circuit.measurements, @@ -606,7 +615,7 @@ def calculate_probabilities(self, state, qubits, nqubits): rtype = self.np.real(state).dtype unmeasured_qubits = tuple(i for i in range(nqubits) if i not in qubits) state = self.np.reshape(self.np.abs(state) ** 2, nqubits * (2,)) - probs = self.np.sum(state.astype(rtype), axis=unmeasured_qubits) + probs = self.np.sum(self.cast(state, dtype=rtype), axis=unmeasured_qubits) return self._order_probabilities(probs, qubits, nqubits).ravel() def calculate_probabilities_density_matrix(self, state, qubits, nqubits): @@ -629,21 +638,23 @@ def sample_shots(self, probabilities, nshots): ) def aggregate_shots(self, shots): - return self.np.array(shots, dtype=shots[0].dtype) + return self.cast(shots, dtype=shots[0].dtype) def samples_to_binary(self, samples, nqubits): - qrange = self.np.arange(nqubits - 1, -1, -1, dtype="int32") - return self.np.mod(self.np.right_shift(samples[:, self.np.newaxis], qrange), 2) + qrange = self.np.arange(nqubits - 1, -1, -1, dtype=self.np.int32) + return self.np.mod( + self.np.right_shift(self.cast(samples[:, None], dtype="int32"), qrange), 2 + ) def samples_to_decimal(self, samples, nqubits): - qrange = self.np.arange(nqubits - 1, -1, -1, dtype="int32") - qrange = (2**qrange)[:, self.np.newaxis] - return self.np.matmul(self.to_numpy(samples), qrange)[:, 0] + qrange = self.np.arange(nqubits - 1, -1, -1, dtype=self.np.int32) + qrange = (2**qrange)[:, None] + return self.np.matmul(samples, qrange)[:, 0] def calculate_frequencies(self, samples): - res, counts = self.np.unique(samples, return_counts=True) - res, counts = self.np.array(res), self.np.array(counts) - return collections.Counter({k: v for k, v in zip(res, counts)}) + # Samples are a list of strings so there is no advantage in using other backends + res, counts = np.unique(samples, return_counts=True) + return collections.Counter(dict(zip(res, counts))) def update_frequencies(self, frequencies, probabilities, nsamples): samples = self.sample_shots(probabilities, nsamples) @@ -655,19 +666,21 @@ def sample_frequencies(self, probabilities, nshots): from qibo.config import SHOT_BATCH_SIZE nprobs = probabilities / self.np.sum(probabilities) - frequencies = self.np.zeros(len(nprobs), dtype="int64") + frequencies = self.np.zeros(len(nprobs), dtype=self.np.int64) for _ in range(nshots // SHOT_BATCH_SIZE): frequencies = self.update_frequencies(frequencies, nprobs, SHOT_BATCH_SIZE) frequencies = self.update_frequencies( frequencies, nprobs, nshots % SHOT_BATCH_SIZE ) - return collections.Counter({i: f for i, f in enumerate(frequencies) if f > 0}) + return collections.Counter( + {i: int(f) for i, f in enumerate(frequencies) if f > 0} + ) def apply_bitflips(self, noiseless_samples, bitflip_probabilities): - fprobs = self.np.array(bitflip_probabilities, dtype="float64") - sprobs = self.np.random.random(noiseless_samples.shape) - flip_0 = self.np.array(sprobs < fprobs[0], dtype=noiseless_samples.dtype) - flip_1 = self.np.array(sprobs < fprobs[1], dtype=noiseless_samples.dtype) + fprobs = self.cast(bitflip_probabilities, dtype="float64") + sprobs = self.cast(np.random.random(noiseless_samples.shape), dtype="float64") + flip_0 = self.cast(sprobs < fprobs[0], dtype=noiseless_samples.dtype) + flip_1 = self.cast(sprobs < fprobs[1], dtype=noiseless_samples.dtype) noisy_samples = noiseless_samples + (1 - noiseless_samples) * flip_0 noisy_samples = noisy_samples - noiseless_samples * flip_1 return noisy_samples @@ -676,7 +689,7 @@ def partial_trace(self, state, qubits, nqubits): state = self.cast(state) state = self.np.reshape(state, nqubits * (2,)) axes = 2 * [list(qubits)] - rho = self.np.tensordot(state, self.np.conj(state), axes=axes) + rho = self.np.tensordot(state, self.np.conj(state), axes) shape = 2 * (2 ** (nqubits - len(qubits)),) return self.np.reshape(rho, shape) @@ -695,21 +708,19 @@ def partial_trace_density_matrix(self, state, qubits, nqubits): def calculate_norm(self, state, order=2): state = self.cast(state) - return self.np.linalg.norm(state, ord=order) + return self.np.linalg.norm(state, order) def calculate_norm_density_matrix(self, state, order="nuc"): state = self.cast(state) return self.np.linalg.norm(state, ord=order) def calculate_overlap(self, state1, state2): - state1 = self.cast(state1) - state2 = self.cast(state2) - return self.np.abs(self.np.sum(self.np.conj(state1) * state2)) + return self.np.abs(self.np.sum(np.conj(self.cast(state1)) * self.cast(state2))) def calculate_overlap_density_matrix(self, state1, state2): - state1 = self.cast(state1) - state2 = self.cast(state2) - return self.np.trace(self.np.transpose(self.np.conj(state1)) @ state2) + return self.np.trace( + self.np.matmul(self.np.conj(self.cast(state1)).T, self.cast(state2)) + ) def calculate_eigenvalues(self, matrix, k=6): if self.issparse(matrix): @@ -737,42 +748,37 @@ def calculate_matrix_exp(self, a, matrix, eigenvectors=None, eigenvalues=None): else: from scipy.linalg import expm return expm(-1j * a * matrix) - else: - expd = self.np.diag(self.np.exp(-1j * a * eigenvalues)) - ud = self.np.transpose(self.np.conj(eigenvectors)) - return self.np.matmul(eigenvectors, self.np.matmul(expd, ud)) + expd = self.np.diag(self.np.exp(-1j * a * eigenvalues)) + ud = self.np.transpose(np.conj(eigenvectors)) + return self.np.matmul(eigenvectors, self.np.matmul(expd, ud)) def calculate_expectation_state(self, hamiltonian, state, normalize): statec = self.np.conj(state) hstate = hamiltonian @ state ev = self.np.real(self.np.sum(statec * hstate)) if normalize: - norm = self.np.sum(self.np.square(self.np.abs(state))) - ev = ev / norm + ev /= self.np.sum(self.np.square(self.np.abs(state))) return ev def calculate_expectation_density_matrix(self, hamiltonian, state, normalize): - ev = self.np.real(self.np.trace(hamiltonian @ state)) + ev = self.np.real(self.np.trace(self.cast(hamiltonian @ state))) if normalize: norm = self.np.real(self.np.trace(state)) - ev = ev / norm + ev /= norm return ev + # TODO: remove this method def calculate_hamiltonian_matrix_product(self, matrix1, matrix2): - return self.np.dot(matrix1, matrix2) + return matrix1 @ matrix2 + # TODO: remove this method def calculate_hamiltonian_state_product(self, matrix, state): - rank = len(tuple(state.shape)) - state = self.cast(state) - if rank == 1: # vector - return matrix.dot(state[:, np.newaxis])[:, 0] - elif rank == 2: # matrix - return matrix.dot(state) - else: + if len(tuple(state.shape)) > 2: raise_error( ValueError, - "Cannot multiply Hamiltonian with " "rank-{} tensor.".format(rank), + f"Cannot multiply Hamiltonian with rank-{len(tuple(state.shape))} tensor.", ) + return matrix @ state def assert_allclose(self, value, target, rtol=1e-7, atol=0.0): if isinstance(value, CircuitResult) or isinstance(value, QuantumState): diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py new file mode 100644 index 0000000000..403cea10da --- /dev/null +++ b/src/qibo/backends/pytorch.py @@ -0,0 +1,190 @@ +"""PyTorch backend.""" + +from typing import Union + +import numpy as np +import torch + +from qibo import __version__ +from qibo.backends.npmatrices import NumpyMatrices +from qibo.backends.numpy import NumpyBackend + +torch_dtype_dict = { + "int": torch.int32, + "float": torch.float32, + "complex": torch.complex64, + "int32": torch.int32, + "int64": torch.int64, + "float32": torch.float32, + "float64": torch.float64, + "complex64": torch.complex64, + "complex128": torch.complex128, +} + + +class TorchMatrices(NumpyMatrices): + """Matrix representation of every gate as a torch Tensor.""" + + def __init__(self, dtype): + super().__init__(dtype) + self.dtype = torch_dtype_dict[dtype] + + def _cast(self, x, dtype): + return torch.as_tensor(x, dtype=dtype) + + def Unitary(self, u): + return self._cast(u, dtype=self.dtype) + + +class PyTorchBackend(NumpyBackend): + def __init__(self): + super().__init__() + + self.name = "pytorch" + self.versions = { + "qibo": __version__, + "numpy": np.__version__, + "torch": torch.__version__, + } + + self.matrices = TorchMatrices(self.dtype) + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + self.nthreads = 0 + self.np = torch + self.dtype = torch_dtype_dict[self.dtype] + self.tensor_types = (self.np.Tensor, np.ndarray) + + # These functions in Torch works in a different way than numpy or have different names + self.np.transpose = torch.permute + self.np.expand_dims = self.np.unsqueeze + self.np.mod = torch.remainder + self.np.right_shift = torch.bitwise_right_shift + + def set_device(self, device): # pragma: no cover + self.device = device + + def cast( + self, + x: Union[torch.Tensor, list[torch.Tensor], np.ndarray, list[np.ndarray]], + dtype: Union[str, torch.dtype, np.dtype, type] = None, + copy: bool = False, + ): + """Casts input as a Torch tensor of the specified dtype. + + This method supports casting of single tensors or lists of tensors + as for the :class:`qibo.backends.PyTorchBackend`. + + Args: + x (Union[torch.Tensor, list[torch.Tensor], np.ndarray, list[np.ndarray], int, float, complex]): + Input to be casted. + dtype (Union[str, torch.dtype, np.dtype, type]): Target data type. + If ``None``, the default dtype of the backend is used. + Defaults to ``None``. + copy (bool, optional): If ``True``, the input tensor is copied before casting. + Defaults to ``False``. + """ + if dtype is None: + dtype = self.dtype + elif isinstance(dtype, type): + dtype = torch_dtype_dict[dtype.__name__] + elif not isinstance(dtype, torch.dtype): + dtype = torch_dtype_dict[str(dtype)] + + if isinstance(x, self.np.Tensor): + x = x.to(dtype) + elif isinstance(x, list) and all(isinstance(row, self.np.Tensor) for row in x): + x = self.np.stack(x) + else: + x = self.np.tensor(x, dtype=dtype) + + if copy: + return x.clone() + + return x + + def issparse(self, x): + if isinstance(x, self.np.Tensor): + return x.is_sparse + + return super().issparse(x) + + def to_numpy(self, x): + if isinstance(x, list): + return np.asarray([self.to_numpy(i) for i in x]) + + if isinstance(x, self.np.Tensor): + return x.numpy(force=True) + + return x + + def _order_probabilities(self, probs, qubits, nqubits): + """Arrange probabilities according to the given ``qubits`` ordering.""" + if probs.dim() == 0: # pragma: no cover + return probs + unmeasured, reduced = [], {} + for i in range(nqubits): + if i in qubits: + reduced[i] = i - len(unmeasured) + else: + unmeasured.append(i) + return self.np.transpose(probs, [reduced.get(i) for i in qubits]) + + def calculate_probabilities(self, state, qubits, nqubits): + rtype = self.np.real(state).dtype + unmeasured_qubits = tuple(i for i in range(nqubits) if i not in qubits) + state = self.np.reshape(self.np.abs(state) ** 2, nqubits * (2,)) + if len(unmeasured_qubits) == 0: + probs = self.cast(state, dtype=rtype) + else: + probs = self.np.sum(self.cast(state, dtype=rtype), axis=unmeasured_qubits) + return self._order_probabilities(probs, qubits, nqubits).ravel() + + def set_seed(self, seed): + self.np.manual_seed(seed) + np.random.seed(seed) + + def sample_shots(self, probabilities, nshots): + return self.np.multinomial( + self.cast(probabilities, dtype="float"), nshots, replacement=True + ) + + def calculate_eigenvalues(self, matrix, k=6): + return self.np.linalg.eigvalsh(matrix) # pylint: disable=not-callable + + def calculate_eigenvectors(self, matrix, k=6): + return self.np.linalg.eigh(matrix) # pylint: disable=not-callable + + def calculate_matrix_exp(self, a, matrix, eigenvectors=None, eigenvalues=None): + if eigenvectors is None or self.issparse(matrix): + return self.np.linalg.matrix_exp( # pylint: disable=not-callable + -1j * a * matrix + ) + expd = self.np.diag(self.np.exp(-1j * a * eigenvalues)) + ud = self.np.conj(eigenvectors).T + return self.np.matmul(eigenvectors, self.np.matmul(expd, ud)) + + def test_regressions(self, name): + if name == "test_measurementresult_apply_bitflips": + return [ + [0, 0, 0, 0, 2, 3, 0, 0, 0, 0], + [0, 0, 0, 0, 2, 3, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 2, 0, 0, 0, 0, 0], + ] + + if name == "test_probabilistic_measurement": + if self.device == "cuda": # pragma: no cover + return {0: 273, 1: 233, 2: 242, 3: 252} + return {1: 270, 2: 248, 3: 244, 0: 238} + + if name == "test_unbalanced_probabilistic_measurement": + if self.device == "cuda": # pragma: no cover + return {0: 196, 1: 153, 2: 156, 3: 495} + return {3: 492, 2: 176, 0: 168, 1: 164} + + if name == "test_post_measurement_bitflips_on_circuit": + return [ + {5: 30}, + {5: 17, 4: 5, 7: 4, 1: 2, 6: 2}, + {4: 9, 2: 5, 5: 5, 3: 4, 6: 4, 0: 1, 1: 1, 7: 1}, + ] diff --git a/src/qibo/backends/tensorflow.py b/src/qibo/backends/tensorflow.py index 6490b37d9e..f762b0e66c 100644 --- a/src/qibo/backends/tensorflow.py +++ b/src/qibo/backends/tensorflow.py @@ -199,16 +199,16 @@ def calculate_hamiltonian_state_product(self, matrix, state): else: raise_error( ValueError, - "Cannot multiply Hamiltonian with " "rank-{} tensor.".format(rank), + f"Cannot multiply Hamiltonian with rank-{rank} tensor.", ) def test_regressions(self, name): if name == "test_measurementresult_apply_bitflips": return [ - [4, 0, 0, 1, 0, 2, 2, 4, 4, 0], - [4, 0, 0, 1, 0, 2, 2, 4, 4, 0], - [4, 0, 0, 1, 0, 0, 0, 4, 4, 0], - [4, 0, 0, 0, 0, 0, 0, 4, 4, 0], + [4, 0, 0, 1, 0, 0, 1, 0, 0, 0], + [0, 1, 1, 2, 1, 1, 4, 0, 0, 4], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 4, 0, 0, 0, 4], ] elif name == "test_probabilistic_measurement": if "GPU" in self.device: # pragma: no cover @@ -223,6 +223,6 @@ def test_regressions(self, name): elif name == "test_post_measurement_bitflips_on_circuit": return [ {5: 30}, - {5: 16, 7: 10, 6: 2, 3: 1, 4: 1}, - {3: 6, 5: 6, 7: 5, 2: 4, 4: 3, 0: 2, 1: 2, 6: 2}, + {5: 12, 7: 6, 4: 6, 1: 5, 6: 1}, + {3: 7, 6: 4, 2: 4, 7: 4, 0: 4, 5: 3, 4: 2, 1: 2}, ] diff --git a/src/qibo/callbacks.py b/src/qibo/callbacks.py index 77114ed42a..2313e42e95 100644 --- a/src/qibo/callbacks.py +++ b/src/qibo/callbacks.py @@ -36,14 +36,13 @@ def extend(self, x): def __getitem__(self, k): if not isinstance(k, (int, slice, list, tuple)): - raise_error(IndexError, "Unrecognized type for index {}.".format(k)) + raise_error(IndexError, f"Unrecognized type for index {k}.") if isinstance(k, int) and k >= len(self._results): raise_error( IndexError, - "Attempting to access callbacks {} run but " - "the callback has been used in {} executions." - "".format(k, len(self._results)), + f"Attempting to access callbacks {k} run but " + + f"the callback has been used in {len(self._results)} executions.", ) return self._results[k] @@ -308,13 +307,10 @@ def __init__(self, mode: Union[str, int] = "gap", check_degenerate: bool = True) if not isinstance(mode, (int, str)): raise_error( TypeError, - "Gap callback mode should be integer or " - "string but is {}.".format(type(mode)), + f"Gap callback mode should be integer or string but is {type(mode)}.", ) elif isinstance(mode, str) and mode != "gap": - raise_error( - ValueError, "Unsupported mode {} for gap callback." "".format(mode) - ) + raise_error(ValueError, f"Unsupported mode {mode} for gap callback.") self.mode = mode self.check_degenerate = check_degenerate self.evolution = None @@ -350,8 +346,7 @@ def apply(self, backend, state): excited += 1 if excited > 1: log.warning( - "The Hamiltonian is degenerate. Using eigenvalue {} " - "to calculate gap.".format(excited) + f"The Hamiltonian is degenerate. Using eigenvalue {excited} to calculate gap." ) self.append(gap) return gap diff --git a/src/qibo/derivative.py b/src/qibo/derivative.py index 65d4ef87b0..72772cc006 100644 --- a/src/qibo/derivative.py +++ b/src/qibo/derivative.py @@ -1,5 +1,6 @@ import numpy as np +from qibo.backends.pytorch import PyTorchBackend from qibo.config import raise_error from qibo.hamiltonians.abstract import AbstractHamiltonian @@ -102,6 +103,12 @@ def circuit(nqubits = 1): # inheriting hamiltonian's backend backend = hamiltonian.backend + # TODO: make this work wih pytorch backend + if isinstance(backend, PyTorchBackend): + raise_error( + NotImplementedError, + "PyTorchBackend for the parameter shift rule is not supported.", + ) # getting the gate's type gate = circuit.associate_gates_with_parameters()[parameter_index] diff --git a/src/qibo/gates/gates.py b/src/qibo/gates/gates.py index ae56a2723b..5b34f7faac 100644 --- a/src/qibo/gates/gates.py +++ b/src/qibo/gates/gates.py @@ -1754,7 +1754,7 @@ def parameters(self, x): if shape != (2, 2): raise_error( ValueError, - "Invalid rotation shape {} for generalized " "fSim gate".format(shape), + f"Invalid rotation shape {shape} for generalized fSim gate", ) ParametrizedGate.parameters.fset(self, x) # pylint: disable=no-member @@ -2328,14 +2328,31 @@ def __init__( "trainable": trainable, } - # checking unitarity without invoking any backend if check_unitary: - product = np.transpose(np.conj(unitary)) @ unitary - sums = all(np.abs(1 - np.sum(product, axis=1)) < PRECISION_TOL) - diagonal = all(np.abs(1 - np.diag(product)) < PRECISION_TOL) + if unitary.__class__.__name__ == "Tensor": + import torch # pylint: disable=C0145 + + diag_function = torch.diag + all_function = torch.all + conj_function = torch.conj + transpose_function = torch.transpose + else: + diag_function = np.diag + all_function = np.all + conj_function = np.conj + transpose_function = np.transpose + + product = transpose_function(conj_function(unitary), (1, 0)) @ unitary + diagonals = all(np.abs(1 - diag_function(product)) < PRECISION_TOL) + off_diagonals = bool( + all_function( + np.abs(product - diag_function(diag_function(product))) + < PRECISION_TOL + ) + ) - self.unitary = True if sums and diagonal else False - del sums, diagonal, product + self.unitary = True if diagonals and off_diagonals else False + del diagonals, off_diagonals, product @Gate.parameters.setter def parameters(self, x): diff --git a/src/qibo/gates/measurements.py b/src/qibo/gates/measurements.py index 1888810da2..50b4c29e78 100644 --- a/src/qibo/gates/measurements.py +++ b/src/qibo/gates/measurements.py @@ -127,7 +127,7 @@ def _get_bitflip_tuple(qubits: Tuple[int], probs: "ProbsType") -> Tuple[float]: ) return tuple(probs[q] if q in probs else 0.0 for q in qubits) - raise_error(TypeError, "Invalid type {} of bitflip map.".format(probs)) + raise_error(TypeError, f"Invalid type {probs} of bitflip map.") def _get_bitflip_map(self, p: Optional["ProbsType"] = None) -> Dict[int, float]: """Creates dictionary with bitflip probabilities.""" diff --git a/src/qibo/hamiltonians/abstract.py b/src/qibo/hamiltonians/abstract.py index 966f943659..749ad0b21e 100644 --- a/src/qibo/hamiltonians/abstract.py +++ b/src/qibo/hamiltonians/abstract.py @@ -16,13 +16,9 @@ def nqubits(self): @nqubits.setter def nqubits(self, n): if not isinstance(n, int): - raise_error( - RuntimeError, "nqubits must be an integer but is " "{}.".format(type(n)) - ) + raise_error(RuntimeError, f"nqubits must be an integer but is {type(n)}.") if n < 1: - raise_error( - ValueError, "nqubits must be a positive integer but is " "{}".format(n) - ) + raise_error(ValueError, f"nqubits must be a positive integer but is {n}") self._nqubits = n @abstractmethod diff --git a/src/qibo/hamiltonians/adiabatic.py b/src/qibo/hamiltonians/adiabatic.py index d7627d449e..4f69be8bf1 100644 --- a/src/qibo/hamiltonians/adiabatic.py +++ b/src/qibo/hamiltonians/adiabatic.py @@ -19,8 +19,7 @@ def __new__(cls, h0, h1): if type(h1) != type(h0): raise_error( TypeError, - "h1 should be of the same type {} of h0 but " - "is {}.".format(type(h0), type(h1)), + f"h1 should be of the same type {type(h0)} of h0 but is {type(h1)}.", ) if isinstance(h0, hamiltonians.Hamiltonian): return BaseAdiabaticHamiltonian(h0, h1) @@ -29,8 +28,7 @@ def __new__(cls, h0, h1): else: raise_error( TypeError, - "h0 should be a hamiltonians.Hamiltonian " - "object but is {}.".format(type(h0)), + f"h0 should be a hamiltonians.Hamiltonian object but is {type(h0)}.", ) def __init__(self, h0, h1): # pragma: no cover @@ -63,11 +61,11 @@ def __init__(self, h0, h1): if h0.nqubits != h1.nqubits: raise_error( ValueError, - "H0 has {} qubits while H1 has {}." "".format(h0.nqubits, h1.nqubits), + f"H0 has {h0.nqubits} qubits while H1 has {h1.nqubits}.", ) self.nqubits = h0.nqubits if h0.backend != h1.backend: # pragma: no cover - raise_error(ValueError, "H0 and H1 have different backend.") + raise_error(ValueError, "H0 and H1 have different backends.") self.backend = h0.backend self.h0, self.h1 = h0, h1 self.schedule = None diff --git a/src/qibo/hamiltonians/hamiltonians.py b/src/qibo/hamiltonians/hamiltonians.py index 10b9419580..8e17e88fbb 100644 --- a/src/qibo/hamiltonians/hamiltonians.py +++ b/src/qibo/hamiltonians/hamiltonians.py @@ -6,6 +6,7 @@ import numpy as np import sympy +from qibo.backends import PyTorchBackend from qibo.config import EINSUM_CHARS, log, raise_error from qibo.hamiltonians.abstract import AbstractHamiltonian from qibo.symbols import Z @@ -115,6 +116,7 @@ def exp(self, a): def expectation(self, state, normalize=False): if isinstance(state, self.backend.tensor_types): + state = self.backend.cast(state) shape = tuple(state.shape) if len(shape) == 1: # state vector return self.backend.calculate_expectation_state(self, state, normalize) @@ -174,6 +176,7 @@ def energy_fluctuation(self, state): Return: Energy fluctuation value (float). """ + state = self.backend.cast(state) energy = self.expectation(state) h = self.matrix h2 = Hamiltonian(nqubits=self.nqubits, matrix=h @ h, backend=self.backend) @@ -242,11 +245,13 @@ def __mul__(self, o): ) new_matrix = self.matrix * o r = self.__class__(self.nqubits, new_matrix, backend=self.backend) + o = self.backend.cast(o) if self._eigenvalues is not None: if self.backend.np.real(o) >= 0: # TODO: check for side effects K.qnp r._eigenvalues = o * self._eigenvalues elif not self.backend.issparse(self.matrix): - r._eigenvalues = o * self._eigenvalues[::-1] + axis = (0,) if isinstance(self.backend, PyTorchBackend) else 0 + r._eigenvalues = o * self.backend.np.flip(self._eigenvalues, axis) if self._eigenvectors is not None: if self.backend.np.real(o) > 0: # TODO: see above r._eigenvectors = self._eigenvectors diff --git a/src/qibo/hamiltonians/terms.py b/src/qibo/hamiltonians/terms.py index e6e3ea1362..f1d2f7cda0 100644 --- a/src/qibo/hamiltonians/terms.py +++ b/src/qibo/hamiltonians/terms.py @@ -25,21 +25,17 @@ def __init__(self, matrix, *q): if qi < 0: raise_error( ValueError, - "Invalid qubit id {} < 0 was given " - "in Hamiltonian term".format(qi), + f"Invalid qubit id {qi} < 0 was given in Hamiltonian term.", ) if not isinstance(matrix, np.ndarray): - raise_error( - TypeError, "Invalid type {} of symbol matrix." "".format(type(matrix)) - ) + raise_error(TypeError, f"Invalid type {type(matrix)} of symbol matrix.") dim = int(matrix.shape[0]) if 2 ** len(q) != dim: raise_error( ValueError, - "Matrix dimension {} given in Hamiltonian " - "term is not compatible with the number " - "of target qubits {}." - "".format(dim, len(q)), + f"Matrix dimension {dim} given in Hamiltonian " + + "term is not compatible with the number " + + f"of target qubits {len(q)}.", ) self.target_qubits = tuple(q) self._gate = None @@ -79,8 +75,7 @@ def merge(self, term): raise_error( ValueError, "Cannot merge HamiltonianTerm acting on " - "qubits {} to term on qubits {}." - "".format(term.target_qubits, self.target_qubits), + + f"qubits {term.target_qubits} to term on qubits {self.target_qubits}.", ) matrix = np.kron(term.matrix, np.eye(2 ** (len(self) - len(term)))) matrix = np.reshape(matrix, 2 * len(self) * (2,)) @@ -190,7 +185,7 @@ def __init__(self, coefficient, factors=1, symbol_map={}): elif factor.is_number: self.coefficient *= complex(factor) else: # pragma: no cover - raise_error(TypeError, "Cannot parse factor {}.".format(factor)) + raise_error(TypeError, f"Cannot parse factor {factor}.") self.target_qubits = tuple(sorted(self.matrix_map.keys())) diff --git a/src/qibo/measurements.py b/src/qibo/measurements.py index a5bc61253b..e46ffe6a00 100644 --- a/src/qibo/measurements.py +++ b/src/qibo/measurements.py @@ -36,7 +36,7 @@ class MeasurementSymbol(sympy.Symbol): _counter = 0 def __new__(cls, *args, **kwargs): - name = "m{}".format(cls._counter) + name = f"m{cls._counter}" cls._counter += 1 return super().__new__(cls=cls, name=name) diff --git a/src/qibo/models/error_mitigation.py b/src/qibo/models/error_mitigation.py index 9af448c564..617dd6a65f 100644 --- a/src/qibo/models/error_mitigation.py +++ b/src/qibo/models/error_mitigation.py @@ -788,13 +788,17 @@ def error_sensitive_circuit(circuit, observable, backend=None): comp_to_pauli = comp_basis_to_pauli(num_qubits, backend=backend) observable.nqubits = num_qubits observable_liouville = vectorization( - np.transpose(np.conjugate(unitary_matrix)) @ observable.matrix @ unitary_matrix, + backend.np.transpose(backend.np.conj(unitary_matrix), (1, 0)) + @ observable.matrix + @ unitary_matrix, order="row", backend=backend, ) observable_pauli_liouville = comp_to_pauli @ observable_liouville - index = int(np.where(abs(observable_pauli_liouville) >= 1e-5)[0][0]) + index = int( + backend.np.where(backend.np.abs(observable_pauli_liouville) >= 1e-5)[0][0] + ) observable_pauli = list(product(["I", "X", "Y", "Z"], repeat=num_qubits))[index] @@ -809,12 +813,12 @@ def error_sensitive_circuit(circuit, observable, backend=None): for i in range(num_qubits): observable_i = pauli_gates[observable_pauli[i]] random_init = pauli_gates["I"] - while np.any(abs(observable_i - pauli_gates["Z"]) > 1e-5) and np.any( - abs(observable_i - pauli_gates["I"]) > 1e-5 - ): + while backend.np.any( + backend.np.abs(observable_i - pauli_gates["Z"]) > 1e-5 + ) and backend.np.any(abs(observable_i - pauli_gates["I"]) > 1e-5): random_init = random_clifford(1, backend=backend, return_circuit=False) observable_i = ( - np.conjugate(np.transpose(random_init)) + backend.np.conj(backend.np.transpose(random_init, (1, 0))) @ pauli_gates[observable_pauli[i]] @ random_init ) diff --git a/src/qibo/models/evolution.py b/src/qibo/models/evolution.py index b40e3577a1..0a53e4917e 100644 --- a/src/qibo/models/evolution.py +++ b/src/qibo/models/evolution.py @@ -55,9 +55,7 @@ def __init__(self, hamiltonian, dt, solver="exp", callbacks=[], accelerators=Non else: ham = hamiltonian(0) if not isinstance(ham, AbstractHamiltonian): - raise TypeError( - "Hamiltonian type {} not understood." "".format(type(ham)) - ) + raise TypeError(f"Hamiltonian type {type(ham)} not understood.") self.nqubits = ham.nqubits self.backend = ham.backend if dt <= 0: @@ -70,8 +68,8 @@ def __init__(self, hamiltonian, dt, solver="exp", callbacks=[], accelerators=Non raise_error( NotImplementedError, "Distributed evolution is only " - "implemented using the Trotter " - "exponential solver.", + + "implemented using the Trotter " + + "exponential solver.", ) ham.circuit(dt, accelerators) self.solver = solvers.get_solver(solver, self.dt, hamiltonian) diff --git a/src/qibo/models/qft.py b/src/qibo/models/qft.py index 092a573706..f54cd1a094 100644 --- a/src/qibo/models/qft.py +++ b/src/qibo/models/qft.py @@ -63,9 +63,8 @@ def _DistributedQFT(nqubits, accelerators=None): if icrit < circuit.nglobal: # pylint: disable=E1101 raise_error( NotImplementedError, - "Cannot implement QFT for {} qubits " - "using {} global qubits." - "".format(nqubits, circuit.nglobal), + f"Cannot implement QFT for {nqubits} qubits " + + f"using {circuit.nglobal} global qubits.", ) # pylint: disable=E1101 for i1 in range(nqubits): diff --git a/src/qibo/models/variational.py b/src/qibo/models/variational.py index 950b8f3a5c..d379cf386c 100644 --- a/src/qibo/models/variational.py +++ b/src/qibo/models/variational.py @@ -188,19 +188,18 @@ def __init__( if nsteps <= 0: # pragma: no cover raise_error( ValueError, - "Number of steps nsteps should be positive but is {}." - "".format(nsteps), + f"Number of steps nsteps should be positive but is {nsteps}.", ) if t_max <= 0: # pragma: no cover raise_error( ValueError, - "Maximum time t_max should be positive but is {}." "".format(t_max), + f"Maximum time t_max should be positive but is {t_max}.", ) if easy_hamiltonian.nqubits != problem_hamiltonian.nqubits: # pragma: no cover raise_error( ValueError, - "The easy Hamiltonian has {} qubits while problem Hamiltonian has {}." - "".format(easy_hamiltonian.nqubits, problem_hamiltonian.nqubits), + f"The easy Hamiltonian has {easy_hamiltonian.nqubits} qubits " + + f"while problem Hamiltonian has {problem_hamiltonian.nqubits}.", ) self.ATOL = bounds_tolerance @@ -219,7 +218,7 @@ def __init__( raise_error( ValueError, "Scheduling function must take only one argument," - "but the function proposed takes {}.".format(nparams), + + f"but the function proposed takes {nparams}.", ) self.set_schedule(s) @@ -228,10 +227,10 @@ def set_schedule(self, func): # check boundary conditions s0 = func(0) if abs(s0) > self.ATOL: # pragma: no cover - raise_error(ValueError, "s(0) should be 0 but it is {}.".format(s0)) + raise_error(ValueError, f"s(0) should be 0 but it is {s0}.") s1 = func(1) if abs(s1 - 1) > self.ATOL: # pragma: no cover - raise_error(ValueError, "s(1) should be 1 but it is {}.".format(s1)) + raise_error(ValueError, f"s(1) should be 1 but it is {s1}.") self._schedule = func def schedule(self, t): @@ -241,14 +240,12 @@ def schedule(self, t): if (t - self._t_max) > self.ATOL_TIME: # pragma: no cover raise_error( ValueError, - "t cannot be greater than {}, but it is {}.".format(self._t_max, t), + f"t cannot be greater than {self._t_max}, but it is {t}.", ) s = self._schedule(t / self._t_max) if (abs(s) - 1) > self.ATOL: # pragma: no cover - raise_error( - ValueError, "s cannot be greater than 1 but it is {}.".format(s) - ) + raise_error(ValueError, f"s cannot be greater than 1 but it is {s}.") return s def hamiltonian(self, t): @@ -256,7 +253,7 @@ def hamiltonian(self, t): if (t - self._t_max) > self.ATOL: # pragma: no cover raise_error( ValueError, - "t cannot be greater than {}, but it is {}.".format(self._t_max, t), + f"t cannot be greater than {self._t_max}, but it is {t}.", ) # boundary conditions s(0)=0, s(total_time)=1 st = self.schedule(t) @@ -360,9 +357,7 @@ def __init__( self.params = None # problem hamiltonian if not isinstance(hamiltonian, AbstractHamiltonian): - raise_error( - TypeError, "Invalid Hamiltonian type {}." "".format(type(hamiltonian)) - ) + raise_error(TypeError, f"Invalid Hamiltonian type {type(hamiltonian)}.") self.hamiltonian = hamiltonian self.nqubits = hamiltonian.nqubits # mixer hamiltonian (default = -sum(sigma_x)) @@ -377,16 +372,14 @@ def __init__( if type(mixer) != type(hamiltonian): raise_error( TypeError, - "Given Hamiltonian is of type {} " - "while mixer is of type {}." - "".format(type(hamiltonian), type(mixer)), + f"Given Hamiltonian is of type {type(hamiltonian)} " + + f"while mixer is of type {type(mixer)}.", ) if mixer.nqubits != hamiltonian.nqubits: raise_error( ValueError, - "Given Hamiltonian acts on {} qubits " - "while mixer acts on {}." - "".format(hamiltonian.nqubits, mixer.nqubits), + f"Given Hamiltonian acts on {hamiltonian.nqubits} qubits " + + f"while mixer acts on {mixer.nqubits}.", ) self.mixer = mixer @@ -398,8 +391,8 @@ def __init__( raise_error( NotImplementedError, "Distributed QAOA is implemented " - "only with SymbolicHamiltonian and " - "exponential solver.", + + "only with SymbolicHamiltonian and " + + "exponential solver.", ) if isinstance(self.hamiltonian, self.hamiltonians.SymbolicHamiltonian): self.hamiltonian.circuit(1e-2, accelerators) @@ -534,8 +527,8 @@ def minimize( raise_error( ValueError, "Initial guess for the parameters must " - "contain an even number of values but " - "contains {}.".format(len(initial_p)), + + "contain an even number of values but " + + "contains {len(initial_p)}.", ) def _loss(params, qaoa, hamiltonian, state): diff --git a/src/qibo/quantum_info/basis.py b/src/qibo/quantum_info/basis.py index dcb5d7ab84..24b2a17507 100644 --- a/src/qibo/quantum_info/basis.py +++ b/src/qibo/quantum_info/basis.py @@ -115,7 +115,7 @@ def pauli_basis( else: basis = basis_full - basis = backend.cast(basis) + basis = backend.cast(basis, dtype=backend.dtype) if normalize: basis /= np.sqrt(2**nqubits) diff --git a/src/qibo/quantum_info/entropies.py b/src/qibo/quantum_info/entropies.py index 716bdda3fd..3d4e7ac027 100644 --- a/src/qibo/quantum_info/entropies.py +++ b/src/qibo/quantum_info/entropies.py @@ -54,12 +54,14 @@ def shannon_entropy(prob_dist, base: float = 2, backend=None): "All elements of the probability array must be between 0. and 1..", ) - if np.abs(np.sum(prob_dist) - 1.0) > PRECISION_TOL: + total_sum = backend.np.sum(prob_dist) + + if np.abs(total_sum - 1.0) > PRECISION_TOL: raise_error(ValueError, "Probability array must sum to 1.") log_prob = np.where(prob_dist != 0, np.log2(prob_dist) / np.log2(base), 0.0) - shan_entropy = -np.sum(prob_dist * log_prob) + shan_entropy = -backend.np.sum(prob_dist * log_prob) # absolute value if entropy == 0.0 to avoid returning -0.0 shan_entropy = np.abs(shan_entropy) if shan_entropy == 0.0 else shan_entropy @@ -119,10 +121,14 @@ def classical_relative_entropy(prob_dist_p, prob_dist_q, base: float = 2, backen ValueError, "All elements of the probability array must be between 0. and 1..", ) - if np.abs(np.sum(prob_dist_p) - 1.0) > PRECISION_TOL: + total_sum_p = backend.np.sum(prob_dist_p) + + total_sum_q = backend.np.sum(prob_dist_q) + + if np.abs(total_sum_p - 1.0) > PRECISION_TOL: raise_error(ValueError, "First probability array must sum to 1.") - if np.abs(np.sum(prob_dist_q) - 1.0) > PRECISION_TOL: + if np.abs(total_sum_q - 1.0) > PRECISION_TOL: raise_error(ValueError, "Second probability array must sum to 1.") entropy_p = -1 * shannon_entropy(prob_dist_p, base=base, backend=backend) @@ -133,7 +139,7 @@ def classical_relative_entropy(prob_dist_p, prob_dist_q, base: float = 2, backen log_prob = np.where(prob_dist_p != 0.0, log_prob_q, 0.0) - relative = np.sum(prob_dist_p * log_prob) + relative = backend.np.sum(prob_dist_p * log_prob) return entropy_p - relative @@ -206,7 +212,9 @@ def classical_renyi_entropy( "All elements of the probability array must be between 0. and 1..", ) - if np.abs(np.sum(prob_dist) - 1.0) > PRECISION_TOL: + total_sum = backend.np.sum(prob_dist) + + if np.abs(total_sum - 1.0) > PRECISION_TOL: raise_error(ValueError, "Probability array must sum to 1.") if alpha == 0.0: @@ -218,7 +226,9 @@ def classical_renyi_entropy( if alpha == np.inf: return -1 * np.log2(max(prob_dist)) / np.log2(base) - renyi_ent = (1 / (1 - alpha)) * np.log2(np.sum(prob_dist**alpha)) / np.log2(base) + total_sum = backend.np.sum(prob_dist**alpha) + + renyi_ent = (1 / (1 - alpha)) * np.log2(total_sum) / np.log2(base) return renyi_ent @@ -299,14 +309,21 @@ def classical_relative_renyi_entropy( ValueError, "All elements of the probability array must be between 0. and 1..", ) - if np.abs(np.sum(prob_dist_p) - 1.0) > PRECISION_TOL: + + total_sum_p = backend.np.sum(prob_dist_p) + total_sum_q = backend.np.sum(prob_dist_q) + + if np.abs(total_sum_p - 1.0) > PRECISION_TOL: raise_error(ValueError, "First probability array must sum to 1.") - if np.abs(np.sum(prob_dist_q) - 1.0) > PRECISION_TOL: + if np.abs(total_sum_q - 1.0) > PRECISION_TOL: raise_error(ValueError, "Second probability array must sum to 1.") if alpha == 0.5: - return -2 * np.log2(np.sum(np.sqrt(prob_dist_p * prob_dist_q))) / np.log2(base) + total_sum = np.sqrt(prob_dist_p * prob_dist_q) + total_sum = backend.np.sum(total_sum) + + return -2 * np.log2(total_sum) / np.log2(base) if alpha == 1.0: return classical_relative_entropy( @@ -319,7 +336,9 @@ def classical_relative_renyi_entropy( prob_p = prob_dist_p**alpha prob_q = prob_dist_q ** (1 - alpha) - return (1 / (alpha - 1)) * np.log2(np.sum(prob_p * prob_q)) / np.log2(base) + total_sum = backend.np.sum(prob_p * prob_q) + + return (1 / (alpha - 1)) * np.log2(total_sum) / np.log2(base) def classical_tsallis_entropy(prob_dist, alpha: float, base: float = 2, backend=None): @@ -375,13 +394,18 @@ def classical_tsallis_entropy(prob_dist, alpha: float, base: float = 2, backend= "All elements of the probability array must be between 0. and 1..", ) - if np.abs(np.sum(prob_dist) - 1.0) > PRECISION_TOL: + total_sum = backend.np.sum(prob_dist) + + if np.abs(total_sum - 1.0) > PRECISION_TOL: raise_error(ValueError, "Probability array must sum to 1.") if alpha == 1.0: return shannon_entropy(prob_dist, base=base, backend=backend) - return (1 / (1 - alpha)) * (np.sum(prob_dist**alpha) - 1) + total_sum = prob_dist**alpha + total_sum = backend.np.sum(total_sum) + + return (1 / (1 - alpha)) * (total_sum - 1) def von_neumann_entropy( diff --git a/src/qibo/quantum_info/quantum_networks.py b/src/qibo/quantum_info/quantum_networks.py index 8992169ae5..e7bf52e0ad 100644 --- a/src/qibo/quantum_info/quantum_networks.py +++ b/src/qibo/quantum_info/quantum_networks.py @@ -164,7 +164,7 @@ def is_unital( self._matrix = self._full() self._pure = False - partial_trace = np.einsum("jkjl -> kl", self._matrix) + partial_trace = self._einsum("jkjl -> kl", self._matrix) identity = self._backend.cast( np.eye(partial_trace.shape[0]), dtype=partial_trace.dtype ) @@ -212,7 +212,7 @@ def is_causal( self._matrix = self._full() self._pure = False - partial_trace = np.einsum("jklk -> jl", self._matrix) + partial_trace = self._einsum("jklk -> jl", self._matrix) identity = self._backend.cast( np.eye(partial_trace.shape[0]), dtype=partial_trace.dtype ) @@ -292,12 +292,12 @@ def apply(self, state): Returns: ndarray: Resulting state :math:`\\mathcal{E}(\\varrho)`. """ - matrix = np.copy(self._matrix) + matrix = self._backend.cast(self._matrix, copy=True) if self.is_pure(): - return np.einsum("kj,ml,jl -> km", matrix, np.conj(matrix), state) + return self._einsum("kj,ml,jl -> km", matrix, np.conj(matrix), state) - return np.einsum("jklm,km -> jl", matrix, state) + return self._einsum("jklm,km -> jl", matrix, state) def link_product(self, second_network, subscripts: str = "ij,jk -> ik"): """Link product between two quantum networks. @@ -353,7 +353,7 @@ def link_product(self, second_network, subscripts: str = "ij,jk -> ik"): if super_subscripts: cexpr = "jklmnopq,klop->jmnq" return QuantumNetwork( - np.einsum(cexpr, first_matrix, second_matrix), + self._einsum(cexpr, first_matrix, second_matrix), [self.partition[0] + self.partition[-1]], ) @@ -361,12 +361,12 @@ def link_product(self, second_network, subscripts: str = "ij,jk -> ik"): if inv_subscripts: return QuantumNetwork( - np.einsum(cexpr, second_matrix, first_matrix), + self._einsum(cexpr, second_matrix, first_matrix), [second_network.partition[0], self.partition[1]], ) return QuantumNetwork( - np.einsum(cexpr, first_matrix, second_matrix), + self._einsum(cexpr, first_matrix, second_matrix), [self.partition[0], second_network.partition[1]], ) @@ -633,6 +633,8 @@ def _set_tensor_and_parameters(self): """Sets tensor based on inputs.""" self._backend = _check_backend(self._backend) + self._einsum = self._backend.np.einsum + if isinstance(self.partition, list): self.partition = tuple(self.partition) @@ -661,11 +663,10 @@ def _set_tensor_and_parameters(self): def _full(self): """Reshapes input matrix based on purity.""" - matrix = np.copy(self._matrix) - if self.is_pure(): - matrix = np.einsum("jk,lm -> kjml", matrix, np.conj(matrix)) + matrix = self._backend.cast(self._matrix, copy=True) - return matrix + if self.is_pure(): + matrix = self._einsum("jk,lm -> kjml", matrix, np.conj(matrix)) return matrix diff --git a/src/qibo/quantum_info/random_ensembles.py b/src/qibo/quantum_info/random_ensembles.py index e388baf62c..e14db95f35 100644 --- a/src/qibo/quantum_info/random_ensembles.py +++ b/src/qibo/quantum_info/random_ensembles.py @@ -239,7 +239,8 @@ def random_unitary(dims: int, measure: Optional[str] = None, seed=None, backend= H = random_hermitian(dims, seed=seed, backend=NumpyBackend()) unitary = expm(-1.0j * H / 2) - unitary = backend.cast(unitary, dtype=unitary.dtype) + + unitary = backend.cast(unitary, dtype=unitary.dtype) return unitary @@ -1191,11 +1192,11 @@ def _super_op_from_bcsz_measure(dims: int, rank: int, order: str, seed, backend) operator += eigenvalue * np.outer(eigenvector, np.conj(eigenvector)) if order == "row": - operator = np.kron( + operator = backend.np.kron( backend.identity_density_matrix(nqubits, normalize=False), operator ) if order == "column": - operator = np.kron( + operator = backend.np.kron( operator, backend.identity_density_matrix(nqubits, normalize=False) ) diff --git a/src/qibo/quantum_info/superoperator_transformations.py b/src/qibo/quantum_info/superoperator_transformations.py index c888ed5b18..05c8622428 100644 --- a/src/qibo/quantum_info/superoperator_transformations.py +++ b/src/qibo/quantum_info/superoperator_transformations.py @@ -488,6 +488,8 @@ def choi_to_kraus( kraus_right.append( coeff * unvectorization(eigenvector_right, order=order, backend=backend) ) + kraus_left = backend.cast(kraus_left) + kraus_right = backend.cast(kraus_right) kraus_ops = backend.cast([kraus_left, kraus_right]) else: # when choi_super_op is CP diff --git a/src/qibo/quantum_info/utils.py b/src/qibo/quantum_info/utils.py index a162561fd2..13df6781d8 100644 --- a/src/qibo/quantum_info/utils.py +++ b/src/qibo/quantum_info/utils.py @@ -237,10 +237,10 @@ def hellinger_distance(prob_dist_p, prob_dist_q, validate: bool = False, backend ValueError, "All elements of the probability array must be between 0. and 1..", ) - if np.abs(np.sum(prob_dist_p) - 1.0) > PRECISION_TOL: + if backend.np.abs(backend.np.sum(prob_dist_p) - 1.0) > PRECISION_TOL: raise_error(ValueError, "First probability array must sum to 1.") - if np.abs(np.sum(prob_dist_q) - 1.0) > PRECISION_TOL: + if backend.np.abs(backend.np.sum(prob_dist_q) - 1.0) > PRECISION_TOL: raise_error(ValueError, "Second probability array must sum to 1.") distance = float( @@ -321,7 +321,7 @@ def hellinger_shot_error( hellinger_error = hellinger_fidelity( prob_dist_p, prob_dist_q, validate=validate, backend=backend ) - hellinger_error = np.sqrt(hellinger_error / nshots) * np.sum( + hellinger_error = np.sqrt(hellinger_error / nshots) * backend.np.sum( np.sqrt(prob_dist_q * (1 - prob_dist_p)) + np.sqrt(prob_dist_p * (1 - prob_dist_q)) ) diff --git a/src/qibo/result.py b/src/qibo/result.py index 5d74157007..934f156e39 100644 --- a/src/qibo/result.py +++ b/src/qibo/result.py @@ -325,7 +325,7 @@ def samples(self, binary: bool = True, registers: bool = False): qubits = self.measurement_gate.target_qubits if self._samples is None: if self.measurements[0].result.has_samples(): - self._samples = np.concatenate( + self._samples = self.backend.np.concatenate( [gate.result.samples() for gate in self.measurements], axis=1 ) else: @@ -353,7 +353,7 @@ def samples(self, binary: bool = True, registers: bool = False): qubit_map = { q: i for i, q in enumerate(self.measurement_gate.target_qubits) } - self._samples = np.array(samples, dtype="int32") + self._samples = self.backend.cast(samples, "int32") for gate in self.measurements: rqubits = tuple(qubit_map.get(q) for q in gate.target_qubits) gate.result.register_samples( diff --git a/src/qibo/solvers.py b/src/qibo/solvers.py index 8a24ff3c3d..69ab0d3622 100644 --- a/src/qibo/solvers.py +++ b/src/qibo/solvers.py @@ -74,7 +74,7 @@ class Exponential(BaseSolver): def __call__(self, state): propagator = self.current_hamiltonian.exp(self.dt) self.t += self.dt - return (propagator @ state[:, self.backend.np.newaxis])[:, 0] + return (propagator @ state[:, None])[:, 0] class RungeKutta4(BaseSolver): diff --git a/src/qibo/symbols.py b/src/qibo/symbols.py index 8f2042d626..9aa12a1dcc 100644 --- a/src/qibo/symbols.py +++ b/src/qibo/symbols.py @@ -38,7 +38,7 @@ class Symbol(sympy.Symbol): """ def __new__(cls, q, matrix=None, name="Symbol", commutative=False, **assumptions): - name = "{}{}".format(name, q) + name = f"{name}{q}" assumptions["commutative"] = commutative return super().__new__(cls=cls, name=name, **assumptions) @@ -63,9 +63,7 @@ def __init__(self, q, matrix=None, name="Symbol", commutative=False): ), ) ): - raise_error( - TypeError, "Invalid type {} of symbol matrix." "".format(type(matrix)) - ) + raise_error(TypeError, f"Invalid type {type(matrix)} of symbol matrix.") self.matrix = matrix def __getstate__(self): diff --git a/src/qibo/transpiler/unitary_decompositions.py b/src/qibo/transpiler/unitary_decompositions.py index 472347b0d4..cf8509b623 100644 --- a/src/qibo/transpiler/unitary_decompositions.py +++ b/src/qibo/transpiler/unitary_decompositions.py @@ -61,22 +61,31 @@ def calculate_psi(unitary, magic_basis=magic_basis, backend=None): f"{backend.__class__.__name__} does not support `linalg.eig.`", ) - magic_basis = backend.cast(magic_basis, dtype=magic_basis.dtype) + magic_basis = backend.cast(magic_basis) + unitary = backend.cast(unitary) # write unitary in magic basis - u_magic = np.transpose(np.conj(magic_basis)) @ unitary @ magic_basis + u_magic = ( + backend.np.transpose(backend.np.conj(magic_basis), (1, 0)) + @ unitary + @ magic_basis + ) # construct and diagonalize UT_U - ut_u = np.transpose(u_magic) @ u_magic + ut_u = backend.np.transpose(u_magic, (1, 0)) @ u_magic # When the matrix given to np.linalg.eig is a diagonal matrix up to machine precision the decomposition # is not accurate anymore. decimals = 20 works for random 2q Clifford unitaries. - eigvals, psi_magic = np.linalg.eig(np.round(ut_u, decimals=20)) - # orthogonalize eigenvectors in the case of degeneracy (Gram-Schmidt) - psi_magic, _ = np.linalg.qr(psi_magic) + if backend.__class__.__name__ == "TensorflowBackend": + eigvals, psi_magic = np.linalg.eig(np.round(ut_u, decimals=20)) + psi_magic, _ = np.linalg.qr(psi_magic) + else: + eigvals, psi_magic = backend.np.linalg.eig(np.round(ut_u, decimals=20)) + # orthogonalize eigenvectors in the case of degeneracy (Gram-Schmidt) + psi_magic, _ = backend.np.linalg.qr(psi_magic) # write psi in computational basis - psi = np.dot(magic_basis, psi_magic) + psi = backend.np.matmul(magic_basis, psi_magic) return psi, eigvals -def schmidt_decompose(state): +def schmidt_decompose(state, backend=None): """Decomposes a two-qubit product state to its single-qubit parts. Args: @@ -86,7 +95,11 @@ def schmidt_decompose(state): (ndarray, ndarray): decomposition """ - u, d, v = np.linalg.svd(np.reshape(state, (2, 2))) + backend = _check_backend(backend) + if backend.__class__.__name__ == "TensorflowBackend": + u, d, v = np.linalg.svd(backend.np.reshape(state, (2, 2))) + else: + u, d, v = backend.np.linalg.svd(backend.np.reshape(state, (2, 2))) if not np.allclose(d, [1, 0]): # pragma: no cover raise_error( ValueError, @@ -95,7 +108,7 @@ def schmidt_decompose(state): return u[:, 0], v[0] -def calculate_single_qubit_unitaries(psi): +def calculate_single_qubit_unitaries(psi, backend=None): """Calculates local unitaries that maps a maximally entangled basis to the magic basis. See Lemma 1 of Appendix A in arXiv:quant-ph/0011050. @@ -106,48 +119,66 @@ def calculate_single_qubit_unitaries(psi): Returns: (ndarray, ndarray): Local unitaries UA and UB that map the given basis to the magic basis. """ - - # TODO: Handle the case where psi is not real in the magic basis - psi_magic = np.dot(np.conj(magic_basis).T, psi) - if not np.allclose(psi_magic.imag, np.zeros_like(psi_magic)): # pragma: no cover + backend = _check_backend(backend) + psi_magic = backend.np.matmul(backend.np.conj(backend.cast(magic_basis)).T, psi) + if not np.allclose( + backend.to_numpy(psi_magic).imag, np.zeros_like(psi_magic) + ): # pragma: no cover raise_error(NotImplementedError, "Given state is not real in the magic basis.") - psi_bar = np.copy(psi).T + psi_bar = backend.cast(psi.T, copy=True) # find e and f by inverting (A3), (A4) ef = (psi_bar[0] + 1j * psi_bar[1]) / np.sqrt(2) e_f_ = (psi_bar[0] - 1j * psi_bar[1]) / np.sqrt(2) - e, f = schmidt_decompose(ef) - e_, f_ = schmidt_decompose(e_f_) + e, f = schmidt_decompose(ef, backend=backend) + e_, f_ = schmidt_decompose(e_f_, backend=backend) # find exp(1j * delta) using (A5a) - ef_ = np.kron(e, f_) - phase = 1j * np.sqrt(2) * np.dot(np.conj(ef_), psi_bar[2]) - - # construct unitaries UA, UB using (A6a), (A6b) - ua = np.tensordot([1, 0], np.conj(e), axes=0) + phase * np.tensordot( - [0, 1], np.conj(e_), axes=0 + ef_ = backend.np.kron(e, f_) + phase = ( + 1j + * np.sqrt(2) + * backend.np.sum(backend.np.multiply(backend.np.conj(ef_), psi_bar[2])) ) - ub = np.tensordot([1, 0], np.conj(f), axes=0) + np.conj(phase) * np.tensordot( - [0, 1], np.conj(f_), axes=0 + v0 = backend.cast(np.asarray([1, 0])) + v1 = backend.cast(np.asarray([0, 1])) + # construct unitaries UA, UB using (A6a), (A6b) + ua = backend.np.tensordot(v0, backend.np.conj(e), 0) + phase * backend.np.tensordot( + v1, backend.np.conj(e_), 0 ) + ub = backend.np.tensordot(v0, backend.np.conj(f), 0) + backend.np.conj( + phase + ) * backend.np.tensordot(v1, backend.np.conj(f_), 0) return ua, ub -def calculate_diagonal(unitary, ua, ub, va, vb): +def calculate_diagonal(unitary, ua, ub, va, vb, backend=None): """Calculates Ud matrix that can be written as exp(-iH). See Eq. (A1) in arXiv:quant-ph/0011050. Ud is diagonal in the magic and Bell basis. """ + backend = _check_backend(backend) # normalize U_A, U_B, V_A, V_B so that detU_d = 1 # this is required so that sum(lambdas) = 0 # and Ud can be written as exp(-iH) - det = np.linalg.det(unitary) ** (1 / 16) + if backend.__class__.__name__ == "TensorflowBackend": + det = np.linalg.det(unitary) ** (1 / 16) + else: + det = backend.np.linalg.det(unitary) ** (1 / 16) ua *= det ub *= det va *= det vb *= det - u_dagger = np.transpose(np.conj(np.kron(ua, ub))) - v_dagger = np.transpose(np.conj(np.kron(va, vb))) + u_dagger = backend.np.transpose( + backend.np.conj( + backend.np.kron( + ua, + ub, + ) + ), + (1, 0), + ) + v_dagger = backend.np.transpose(backend.np.conj(backend.np.kron(va, vb)), (1, 0)) ud = u_dagger @ unitary @ v_dagger return ua, ub, ud, va, vb @@ -155,12 +186,17 @@ def calculate_diagonal(unitary, ua, ub, va, vb): def magic_decomposition(unitary, backend=None): """Decomposes an arbitrary unitary to (A1) from arXiv:quant-ph/0011050.""" backend = _check_backend(backend) + unitary = backend.cast(unitary) psi, eigvals = calculate_psi(unitary, backend=backend) - psi_tilde = np.conj(np.sqrt(eigvals)) * np.dot(unitary, psi) - va, vb = calculate_single_qubit_unitaries(psi) - ua_dagger, ub_dagger = calculate_single_qubit_unitaries(psi_tilde) - ua, ub = np.transpose(np.conj(ua_dagger)), np.transpose(np.conj(ub_dagger)) - return calculate_diagonal(unitary, ua, ub, va, vb) + psi_tilde = backend.np.conj(backend.np.sqrt(eigvals)) * backend.np.matmul( + unitary, psi + ) + va, vb = calculate_single_qubit_unitaries(psi, backend=backend) + ua_dagger, ub_dagger = calculate_single_qubit_unitaries(psi_tilde, backend=backend) + ua, ub = backend.np.transpose( + backend.np.conj(ua_dagger), (1, 0) + ), backend.np.transpose(backend.np.conj(ub_dagger), (1, 0)) + return calculate_diagonal(unitary, ua, ub, va, vb, backend=backend) def to_bell_diagonal(ud, bell_basis=bell_basis, backend=None): @@ -168,69 +204,76 @@ def to_bell_diagonal(ud, bell_basis=bell_basis, backend=None): backend = _check_backend(backend) ud = backend.cast(ud) - bell_basis = backend.cast(bell_basis, dtype=bell_basis.dtype) + bell_basis = backend.cast(bell_basis) - ud_bell = np.transpose(np.conj(bell_basis)) @ ud @ bell_basis - ud_diag = np.diag(ud_bell) - if not np.allclose(np.diag(ud_diag), ud_bell): # pragma: no cover + ud_bell = ( + backend.np.transpose(backend.np.conj(bell_basis), (1, 0)) @ ud @ bell_basis + ) + ud_diag = backend.np.diag(ud_bell) + if not np.allclose(backend.np.diag(ud_diag), ud_bell): # pragma: no cover return None - uprod = np.prod(ud_diag) + uprod = backend.np.prod(ud_diag) if not np.allclose(uprod, 1): # pragma: no cover return None return ud_diag -def calculate_h_vector(ud_diag): +def calculate_h_vector(ud_diag, backend=None): """Finds h parameters corresponding to exp(-iH). See Eq. (4)-(5) in arXiv:quant-ph/0307177. """ - lambdas = -np.angle(ud_diag) + backend = _check_backend(backend) + lambdas = -backend.np.angle(ud_diag) hx = (lambdas[0] + lambdas[2]) / 2.0 hy = (lambdas[1] + lambdas[2]) / 2.0 hz = (lambdas[0] + lambdas[1]) / 2.0 return hx, hy, hz -def cnot_decomposition(q0, q1, hx, hy, hz): +def cnot_decomposition(q0, q1, hx, hy, hz, backend=None): """Performs decomposition (6) from arXiv:quant-ph/0307177.""" - u3 = -1j * matrices.H + backend = _check_backend(backend) + h = backend.cast(H) + u3 = backend.cast(-1j * matrices.H) # use corrected version from PRA paper (not arXiv) - u2 = -u3 @ gates.RX(0, 2 * hx - np.pi / 2).matrix(NumpyBackend()) + u2 = -u3 @ gates.RX(0, 2 * hx - np.pi / 2).matrix(backend) # add an extra exp(-i pi / 4) global phase to get exact match - v2 = np.exp(-1j * np.pi / 4) * gates.RZ(0, 2 * hz).matrix(NumpyBackend()) - v3 = gates.RZ(0, -2 * hy).matrix(NumpyBackend()) - w = (matrices.I - 1j * matrices.X) / np.sqrt(2) + v2 = np.exp(-1j * np.pi / 4) * gates.RZ(0, 2 * hz).matrix(backend) + v3 = gates.RZ(0, -2 * hy).matrix(backend) + w = backend.cast((matrices.I - 1j * matrices.X) / np.sqrt(2)) # change CNOT to CZ using Hadamard gates return [ gates.H(q1), gates.CZ(q0, q1), gates.Unitary(u2, q0), - gates.Unitary(H @ v2 @ H, q1), + gates.Unitary(h @ v2 @ h, q1), gates.CZ(q0, q1), gates.Unitary(u3, q0), - gates.Unitary(H @ v3 @ H, q1), + gates.Unitary(h @ v3 @ h, q1), gates.CZ(q0, q1), gates.Unitary(w, q0), - gates.Unitary(np.conj(w).T @ H, q1), + gates.Unitary(backend.np.conj(w).T @ h, q1), ] -def cnot_decomposition_light(q0, q1, hx, hy): +def cnot_decomposition_light(q0, q1, hx, hy, backend=None): """Performs decomposition (24) from arXiv:quant-ph/0307177.""" - w = (matrices.I - 1j * matrices.X) / np.sqrt(2) - u2 = gates.RX(0, 2 * hx).matrix(NumpyBackend()) - v2 = gates.RZ(0, -2 * hy).matrix(NumpyBackend()) + backend = _check_backend(backend) + h = backend.cast(H) + w = backend.cast((matrices.I - 1j * matrices.X) / np.sqrt(2)) + u2 = gates.RX(0, 2 * hx).matrix(backend) + v2 = gates.RZ(0, -2 * hy).matrix(backend) # change CNOT to CZ using Hadamard gates return [ - gates.Unitary(np.conj(w).T, q0), - gates.Unitary(H @ w, q1), + gates.Unitary(backend.np.conj(w).T, q0), + gates.Unitary(h @ w, q1), gates.CZ(q0, q1), gates.Unitary(u2, q0), - gates.Unitary(H @ v2 @ H, q1), + gates.Unitary(h @ v2 @ h, q1), gates.CZ(q0, q1), gates.Unitary(w, q0), - gates.Unitary(np.conj(w).T @ H, q1), + gates.Unitary(backend.np.conj(w).T @ h, q1), ] @@ -253,31 +296,31 @@ def two_qubit_decomposition(q0, q1, unitary, backend=None): u4, v4, ud, u1, v1 = magic_decomposition(unitary, backend=backend) ud_diag = to_bell_diagonal(ud, backend=backend) - hx, hy, hz = calculate_h_vector(ud_diag) + hx, hy, hz = calculate_h_vector(ud_diag, backend=backend) hx, hy, hz = float(hx), float(hy), float(hz) if np.allclose([hx, hy, hz], [0, 0, 0]): u4, v4, ud, u1, v1 = magic_decomposition(unitary, backend=backend) gatelist = [gates.Unitary(u4 @ u1, q0), gates.Unitary(v4 @ v1, q1)] elif np.allclose(hz, 0): - gatelist = cnot_decomposition_light(q0, q1, hx, hy) + gatelist = cnot_decomposition_light(q0, q1, hx, hy, backend=backend) if ud is None: return gatelist g0, g1 = gatelist[:2] - gatelist[0] = gates.Unitary(g0.parameters[0] @ u1, q0) - gatelist[1] = gates.Unitary(g1.parameters[0] @ v1, q1) + gatelist[0] = gates.Unitary(backend.cast(g0.parameters[0]) @ u1, q0) + gatelist[1] = gates.Unitary(backend.cast(g1.parameters[0]) @ v1, q1) g0, g1 = gatelist[-2:] gatelist[-2] = gates.Unitary(u4 @ g0.parameters[0], q0) gatelist[-1] = gates.Unitary(v4 @ g1.parameters[0], q1) else: - cnot_dec = cnot_decomposition(q0, q1, hx, hy, hz) + cnot_dec = cnot_decomposition(q0, q1, hx, hy, hz, backend=backend) if ud is None: return cnot_dec gatelist = [ gates.Unitary(u1, q0), - gates.Unitary(H @ v1, q1), + gates.Unitary(backend.cast(H) @ v1, q1), ] gatelist.extend(cnot_dec[1:]) g0, g1 = gatelist[-2:] diff --git a/tests/conftest.py b/tests/conftest.py index 618423bbb6..f320321576 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -11,7 +11,14 @@ from qibo.backends import construct_backend # backends to be tested -BACKENDS = ["numpy", "tensorflow", "qibojit-numba", "qibojit-cupy", "qibojit-cuquantum"] +BACKENDS = [ + "numpy", + "tensorflow", + "pytorch", + "qibojit-numba", + "qibojit-cupy", + "qibojit-cuquantum", +] # multigpu configurations to be tested (only with qibojit-cupy) ACCELERATORS = [ {"/GPU:0": 1, "/GPU:1": 1}, @@ -47,7 +54,7 @@ def pytest_runtest_setup(item): plat = sys.platform if supported_platforms and plat not in supported_platforms: # pragma: no cover # case not covered by workflows - pytest.skip("Cannot run test on platform {}.".format(plat)) + pytest.skip(f"Cannot run test on platform {plat}.") def pytest_configure(config): diff --git a/tests/test_backends.py b/tests/test_backends.py index a4714bd378..61ed409f49 100644 --- a/tests/test_backends.py +++ b/tests/test_backends.py @@ -108,7 +108,7 @@ def test_control_matrix_unitary(backend): u = np.random.random((2, 2)) gate = gates.Unitary(u, 0).controlled_by(1) matrix = backend.control_matrix(gate) - target_matrix = np.eye(4, dtype=backend.dtype) + target_matrix = np.eye(4, dtype=np.complex128) target_matrix[2:, 2:] = u backend.assert_allclose(matrix, target_matrix) diff --git a/tests/test_backends_clifford.py b/tests/test_backends_clifford.py index 6ee566e52a..9af8192633 100644 --- a/tests/test_backends_clifford.py +++ b/tests/test_backends_clifford.py @@ -1,10 +1,18 @@ +"""Tests for Clifford backend.""" + from itertools import product import numpy as np import pytest from qibo import Circuit, gates, set_backend -from qibo.backends import CliffordBackend, GlobalBackend, NumpyBackend +from qibo.backends import ( + CliffordBackend, + GlobalBackend, + NumpyBackend, + PyTorchBackend, + TensorflowBackend, +) from qibo.backends.clifford import _get_engine_name from qibo.noise import DepolarizingError, NoiseModel, PauliError from qibo.quantum_info.random_ensembles import random_clifford @@ -13,14 +21,37 @@ def construct_clifford_backend(backend): - if backend.__class__.__name__ in ["TensorflowBackend", "CuQuantumBackend"]: + if ( + isinstance(backend, (TensorflowBackend, PyTorchBackend)) + or backend.__class__.__name__ == "CuQuantumBackend" + ): with pytest.raises(NotImplementedError): clifford_backend = CliffordBackend(backend.name) - pytest.skip("Clifford backend not defined for the this engine.") + pytest.skip("Clifford backend not defined for this engine.") return CliffordBackend(_get_engine_name(backend)) +def test_set_backend(backend): + clifford_bkd = construct_clifford_backend(backend) + platform = _get_engine_name(backend) + set_backend("clifford", platform=platform) + assert isinstance(GlobalBackend(), CliffordBackend) + global_platform = GlobalBackend().platform + assert global_platform == platform + + +def test_global_backend(backend): + construct_clifford_backend(backend) + set_backend(backend.name, platform=backend.platform) + clifford_bkd = CliffordBackend() + target = ( + GlobalBackend().name if backend.name == "numpy" else GlobalBackend().platform + ) + assert clifford_bkd.platform == target + set_backend("numpy") + + THETAS_1Q = [ th + 2 * i * np.pi for i in range(2) for th in [0, np.pi / 2, np.pi, 3 * np.pi / 2] ] @@ -117,12 +148,17 @@ def test_random_clifford_circuit(backend, prob_qubits, binary): backend.set_seed(2024) nqubits, nshots = 3, 200 clifford_bkd = construct_clifford_backend(backend) + c = random_clifford(nqubits, seed=1, backend=backend) c.density_matrix = True c_copy = c.copy() c.add(gates.M(*MEASURED_QUBITS)) c_copy.add(gates.M(*MEASURED_QUBITS)) + + numpy_bkd.set_seed(2024) numpy_result = numpy_bkd.execute_circuit(c, nshots=nshots) + + clifford_bkd.set_seed(2024) clifford_result = clifford_bkd.execute_circuit(c_copy, nshots=nshots) backend.assert_allclose(backend.cast(numpy_result.state()), clifford_result.state()) @@ -154,10 +190,15 @@ def test_random_clifford_circuit(backend, prob_qubits, binary): backend.assert_allclose(np_count / nshots, clif_count / nshots, atol=1e-1) -def test_collapsing_measurements(backend): +@pytest.mark.parametrize("seed", [2024]) +def test_collapsing_measurements(backend, seed): + backend.set_seed(2024) clifford_bkd = construct_clifford_backend(backend) - gate_queue = random_clifford(3, density_matrix=True, backend=backend).queue - measured_qubits = np.random.choice(range(3), size=2, replace=False) + gate_queue = random_clifford( + 3, density_matrix=True, seed=seed, backend=backend + ).queue + local_state = np.random.default_rng(seed) + measured_qubits = local_state.choice(range(3), size=2, replace=False) c1 = Circuit(3) c2 = Circuit(3, density_matrix=True) for i, g in enumerate(gate_queue): @@ -169,8 +210,13 @@ def test_collapsing_measurements(backend): c2.add(g) c1.add(gates.M(*range(3))) c2.add(gates.M(*range(3))) - clifford_res = clifford_bkd.execute_circuit(c1, nshots=1000) - numpy_res = numpy_bkd.execute_circuit(c2, nshots=1000) + + clifford_bkd.set_seed(seed) + clifford_res = clifford_bkd.execute_circuit(c1, nshots=100) + + numpy_bkd.set_seed(seed) + numpy_res = numpy_bkd.execute_circuit(c2, nshots=100) + backend.assert_allclose( clifford_res.probabilities(), backend.cast(numpy_res.probabilities()), atol=1e-1 ) @@ -214,20 +260,11 @@ def test_bitflip_noise(backend): ) -def test_set_backend(backend): - clifford_bkd = construct_clifford_backend(backend) - platform = _get_engine_name(backend) - set_backend("clifford", platform=platform) - assert isinstance(GlobalBackend(), CliffordBackend) - global_platform = GlobalBackend().platform - assert global_platform == platform - - @pytest.mark.parametrize("seed", [2024]) def test_noise_channels(backend, seed): - clifford_bkd = construct_clifford_backend(backend) - backend.set_seed(seed) + + clifford_bkd = construct_clifford_backend(backend) clifford_bkd.set_seed(seed) noise = NoiseModel() @@ -243,6 +280,7 @@ def test_noise_channels(backend, seed): c = noise.apply(c) c_copy = noise.apply(c_copy) + numpy_bkd.set_seed(2024) numpy_result = numpy_bkd.execute_circuit(c) clifford_result = clifford_bkd.execute_circuit(c_copy) diff --git a/tests/test_backend_qibotn.py b/tests/test_backends_qibotn.py similarity index 92% rename from tests/test_backend_qibotn.py rename to tests/test_backends_qibotn.py index a18eff8184..ae850afefa 100644 --- a/tests/test_backend_qibotn.py +++ b/tests/test_backends_qibotn.py @@ -12,3 +12,5 @@ def test_backend_qibotn(): qibo.set_backend(backend="qibotn", platform="qutensornet", runcard=None) assert isinstance(GlobalBackend(), QuimbBackend) + + qibo.set_backend("numpy") diff --git a/tests/test_callbacks.py b/tests/test_callbacks.py index 4c19b859a9..5ff73f66a5 100644 --- a/tests/test_callbacks.py +++ b/tests/test_callbacks.py @@ -306,10 +306,14 @@ def test_overlap(backend, nqubits, density_matrix, seed): if density_matrix: final_overlap = overlap.apply_density_matrix(backend, state1) - target_overlap = np.trace(np.transpose(np.conj(state0)) @ state1) + target_overlap = np.trace( + np.transpose(np.conj(backend.to_numpy(state0))) @ backend.to_numpy(state1) + ) else: final_overlap = overlap.apply(backend, state1) - target_overlap = np.abs(np.sum(np.conj(state0) * state1)) + target_overlap = np.abs( + np.sum(np.conj(backend.to_numpy(state0)) * backend.to_numpy(state1)) + ) backend.assert_allclose(final_overlap, target_overlap) diff --git a/tests/test_cirq.py b/tests/test_cirq.py index 0069bdec9d..636581656d 100644 --- a/tests/test_cirq.py +++ b/tests/test_cirq.py @@ -97,7 +97,7 @@ def assert_cirq_gates_equivalent(qibo_gate, cirq_gate): gatename, theta, targets = pieces else: # pragma: no cover # case not tested because it fails - raise RuntimeError("Cirq gate parsing failed with {}.".format(pieces)) + raise RuntimeError(f"Cirq gate parsing failed with {pieces}.") qubits = list(int(x) for x in targets.replace(" ", "").split(",")) targets = (qubits.pop(),) diff --git a/tests/test_derivative.py b/tests/test_derivative.py index 60ea06244e..a1c6e97e0e 100644 --- a/tests/test_derivative.py +++ b/tests/test_derivative.py @@ -2,6 +2,7 @@ import pytest from qibo import Circuit, gates, hamiltonians +from qibo.backends.pytorch import PyTorchBackend from qibo.derivative import finite_differences, parameter_shift from qibo.symbols import Z @@ -31,6 +32,7 @@ def circuit(nqubits=1): [(1, [-8.51104358e-02, -5.20075970e-01, 0]), (0.5, [-0.02405061, -0.13560379, 0])], ) def test_standard_parameter_shift(backend, nshots, atol, scale_factor, grads): + # initializing the circuit c = circuit(nqubits=1) backend.set_seed(42) @@ -55,34 +57,45 @@ def test_standard_parameter_shift(backend, nshots, atol, scale_factor, grads): circuit=c, hamiltonian=c, parameter_index=0, nshots=nshots ) - # executing all the procedure - grad_0 = parameter_shift( - circuit=c, - hamiltonian=test_hamiltonian, - parameter_index=0, - scale_factor=scale_factor, - nshots=nshots, - ) - grad_1 = parameter_shift( - circuit=c, - hamiltonian=test_hamiltonian, - parameter_index=1, - scale_factor=scale_factor, - nshots=nshots, - ) - grad_2 = parameter_shift( - circuit=c, - hamiltonian=test_hamiltonian, - parameter_index=2, - scale_factor=scale_factor, - nshots=nshots, - ) + if isinstance(backend, PyTorchBackend): + with pytest.raises(NotImplementedError) as excinfo: + grad = parameter_shift( + circuit=c, hamiltonian=test_hamiltonian, parameter_index=0 + ) + assert ( + str(excinfo.value) + == "PyTorchBackend for the parameter shift rule is not supported." + ) + + else: + # executing all the procedure + grad_0 = parameter_shift( + circuit=c, + hamiltonian=test_hamiltonian, + parameter_index=0, + scale_factor=scale_factor, + nshots=nshots, + ) + grad_1 = parameter_shift( + circuit=c, + hamiltonian=test_hamiltonian, + parameter_index=1, + scale_factor=scale_factor, + nshots=nshots, + ) + grad_2 = parameter_shift( + circuit=c, + hamiltonian=test_hamiltonian, + parameter_index=2, + scale_factor=scale_factor, + nshots=nshots, + ) - # check of known values - # calculated using tf.GradientTape - backend.assert_allclose(grad_0, grads[0], atol=atol) - backend.assert_allclose(grad_1, grads[1], atol=atol) - backend.assert_allclose(grad_2, grads[2], atol=atol) + # check of known values + # calculated using tf.GradientTape + backend.assert_allclose(grad_0, grads[0], atol=atol) + backend.assert_allclose(grad_1, grads[1], atol=atol) + backend.assert_allclose(grad_2, grads[2], atol=atol) @pytest.mark.parametrize("step_size", [10**-i for i in range(5, 10, 1)]) diff --git a/tests/test_gates_gates.py b/tests/test_gates_gates.py index caabc73e57..77eda97cc0 100644 --- a/tests/test_gates_gates.py +++ b/tests/test_gates_gates.py @@ -93,11 +93,14 @@ def test_sx(backend): # testing random expectation value due to global phase difference observable = random_hermitian(2**nqubits, backend=backend) + np_final_state_decompose = backend.to_numpy(final_state_decompose) + np_obs = backend.to_numpy(observable) + np_target_state = backend.to_numpy(target_state) backend.assert_allclose( - np.transpose(np.conj(final_state_decompose)) - @ observable - @ final_state_decompose, - np.transpose(np.conj(target_state)) @ observable @ target_state, + np.transpose(np.conj(np_final_state_decompose)) + @ np_obs + @ np_final_state_decompose, + np.transpose(np.conj(np_target_state)) @ np_obs @ np_target_state, ) assert gates.SX(0).qasm_label == "sx" @@ -130,11 +133,14 @@ def test_sxdg(backend): # testing random expectation value due to global phase difference observable = random_hermitian(2**nqubits, backend=backend) + np_final_state_decompose = backend.to_numpy(final_state_decompose) + np_obs = backend.to_numpy(observable) + np_target_state = backend.to_numpy(target_state) backend.assert_allclose( - np.transpose(np.conj(final_state_decompose)) - @ observable - @ final_state_decompose, - np.transpose(np.conj(target_state)) @ observable @ target_state, + np.transpose(np.conj(np_final_state_decompose)) + @ np_obs + @ np_final_state_decompose, + np.transpose(np.conj(np_target_state)) @ np_obs @ np_target_state, ) assert gates.SXDG(0).qasm_label == "sxdg" @@ -280,8 +286,8 @@ def test_ry(backend, theta): phase = np.exp(1j * theta / 2.0) gate = np.array([[phase.real, -phase.imag], [phase.imag, phase.real]]) - gate = backend.cast(gate, dtype=gate.dtype) - target_state = gate @ initial_state + gate = backend.cast(gate, dtype="complex128") + target_state = gate @ backend.cast(initial_state, dtype="complex128") backend.assert_allclose(final_state, target_state) @@ -427,12 +433,13 @@ def test_u3(backend, seed_state, seed_observable): # testing random expectation value due to global phase difference observable = random_hermitian(2**nqubits, seed=seed_observable, backend=backend) backend.assert_allclose( - np.transpose(np.conj(final_state_decompose)) + backend.cast(np.transpose(np.conj(final_state_decompose))) @ observable @ final_state_decompose, - np.transpose(np.conj(target_state)) @ observable @ target_state, + backend.cast(np.transpose(np.conj(target_state))) + @ observable + @ backend.cast(target_state), ) - assert gates.U3(0, theta, phi, lam).qasm_label == "u3" assert not gates.U3(0, theta, phi, lam).clifford assert gates.U3(0, theta, phi, lam).unitary @@ -518,10 +525,12 @@ def test_cy(backend, controlled_by, seed_state, seed_observable): # testing random expectation value due to global phase difference observable = random_hermitian(2**nqubits, seed=seed_observable, backend=backend) backend.assert_allclose( - np.transpose(np.conj(final_state_decompose)) + backend.cast(np.transpose(np.conj(final_state_decompose))) @ observable @ final_state_decompose, - np.transpose(np.conj(target_state)) @ observable @ target_state, + backend.cast(np.transpose(np.conj(target_state))) + @ observable + @ backend.cast(target_state), ) assert gates.CY(0, 1).qasm_label == "cy" @@ -562,10 +571,12 @@ def test_cz(backend, controlled_by, seed_state, seed_observable): # testing random expectation value due to global phase difference observable = random_hermitian(2**nqubits, seed=seed_observable, backend=backend) backend.assert_allclose( - np.transpose(np.conj(final_state_decompose)) + backend.cast(np.transpose(np.conj(final_state_decompose))) @ observable @ final_state_decompose, - np.transpose(np.conj(target_state)) @ observable @ target_state, + backend.cast(np.transpose(np.conj(target_state))) + @ observable + @ backend.cast(target_state), ) assert gates.CZ(0, 1).qasm_label == "cz" @@ -747,7 +758,7 @@ def test_fswap(backend): [0, 1, 0, 0], [0, 0, 0, -1], ], - dtype=backend.dtype, + dtype=np.complex128, ) matrix = backend.cast(matrix, dtype=matrix.dtype) target_state = matrix @ initial_state @@ -809,7 +820,7 @@ def test_sycamore(backend): [0, -1j, 0, 0], [0, 0, 0, np.exp(-1j * np.pi / 6)], ], - dtype=backend.dtype, + dtype=np.complex128, ) matrix = backend.cast(matrix, dtype=matrix.dtype) target_state = matrix @ initial_state @@ -949,7 +960,7 @@ def test_rzx(backend): [0, 0, cos, 1j * sin], [0, 0, 1j * sin, cos], ], - dtype=backend.dtype, + dtype=np.complex128, ) matrix = backend.cast(matrix, dtype=matrix.dtype) target_state = matrix @ initial_state @@ -990,7 +1001,7 @@ def test_rxxyy(backend): [0, -1j * sin, cos, 0], [0, 0, 0, 1], ], - dtype=backend.dtype, + dtype=np.complex128, ) matrix = backend.cast(matrix, dtype=matrix.dtype) target_state = matrix @ initial_state @@ -1000,10 +1011,10 @@ def test_rxxyy(backend): backend.assert_allclose(final_state, target_state) # testing random expectation value due to global phase difference backend.assert_allclose( - np.transpose(np.conj(final_state_decompose)) + backend.cast(np.transpose(np.conj(final_state_decompose))) @ observable @ final_state_decompose, - np.transpose(np.conj(target_state)) @ observable @ target_state, + backend.cast(np.transpose(np.conj(target_state))) @ observable @ target_state, ) with pytest.raises(NotImplementedError): @@ -1075,7 +1086,7 @@ def test_givens(backend): [0, np.sin(theta), np.cos(theta), 0], [0, 0, 0, 1], ], - dtype=backend.dtype, + dtype=np.complex128, ) matrix = backend.cast(matrix, dtype=matrix.dtype) @@ -1115,7 +1126,7 @@ def test_rbs(backend): [0, -np.sin(theta), np.cos(theta), 0], [0, 0, 0, 1], ], - dtype=backend.dtype, + dtype=np.complex128, ) matrix = backend.cast(matrix, dtype=matrix.dtype) @@ -1154,7 +1165,7 @@ def test_ecr(backend): [1, -1j, 0, 0], [-1j, 1, 0, 0], ], - dtype=backend.dtype, + dtype=np.complex128, ) / np.sqrt(2) matrix = backend.cast(matrix, dtype=matrix.dtype) @@ -1163,10 +1174,10 @@ def test_ecr(backend): # testing random expectation value due to global phase difference observable = random_hermitian(2**nqubits, backend=backend) backend.assert_allclose( - np.transpose(np.conj(final_state_decompose)) + backend.cast(np.transpose(np.conj(final_state_decompose))) @ observable @ final_state_decompose, - np.transpose(np.conj(target_state)) @ observable @ target_state, + backend.cast(np.transpose(np.conj(target_state))) @ observable @ target_state, ) with pytest.raises(NotImplementedError): @@ -1218,7 +1229,7 @@ def test_deutsch(backend): [0, 0, 0, 0, 0, 0, 1j * cos, sin], [0, 0, 0, 0, 0, 0, sin, 1j * cos], ], - dtype=backend.dtype, + dtype=np.complex128, ) matrix = backend.cast(matrix, dtype=matrix.dtype) @@ -1245,6 +1256,7 @@ def test_unitary(backend, nqubits): def test_unitary_initialization(backend): + matrix = np.random.random((4, 4)) gate = gates.Unitary(matrix, 0, 1) backend.assert_allclose(gate.parameters[0], matrix) diff --git a/tests/test_hamiltonians.py b/tests/test_hamiltonians.py index a00bb53828..c0089cc59b 100644 --- a/tests/test_hamiltonians.py +++ b/tests/test_hamiltonians.py @@ -4,9 +4,10 @@ import pytest from qibo import Circuit, gates, hamiltonians +from qibo.quantum_info.random_ensembles import random_density_matrix, random_statevector from qibo.symbols import I, Z -from .utils import random_complex, random_sparse_matrix +from .utils import random_sparse_matrix def test_hamiltonian_init(backend): @@ -50,7 +51,7 @@ def transformation_b(a, b): def transformation_c(a, b, use_eye=False): c1 = dtype(4.5) if use_eye: - return a + c1 * backend.matrices.I(a.shape[0]) - b + return a + c1 * backend.to_numpy(backend.matrices.I(a.shape[0])) - b else: return a + c1 - b @@ -58,7 +59,7 @@ def transformation_d(a, b, use_eye=False): c1 = dtype(10.5) c2 = dtype(2) if use_eye: - return c1 * backend.matrices.I(a.shape[0]) - a + c2 * b + return c1 * backend.to_numpy(backend.matrices.I(a.shape[0])) - a + c2 * b else: return c1 - a + c2 * b @@ -69,6 +70,9 @@ def transformation_d(a, b, use_eye=False): else: if backend.name == "tensorflow": pytest.skip("Tensorflow does not support operations with sparse matrices.") + elif backend.name == "pytorch": + pytest.skip("Pytorch does not support operations with sparse matrices.") + mH1 = random_sparse_matrix(backend, 64, sparse_type=sparse_type) mH2 = random_sparse_matrix(backend, 64, sparse_type=sparse_type) H1 = hamiltonians.Hamiltonian(6, mH1, backend=backend) @@ -98,6 +102,8 @@ def test_hamiltonian_addition(backend, sparse_type): else: if backend.name == "tensorflow": pytest.skip("Tensorflow does not support operations with sparse matrices.") + elif backend.name == "pytorch": + pytest.skip("Pytorch does not support operations with sparse matrices.") H1 = hamiltonians.Hamiltonian( 6, random_sparse_matrix(backend, 64, sparse_type=sparse_type), @@ -142,6 +148,8 @@ def test_hamiltonian_operation_errors(backend): @pytest.mark.parametrize("sparse_type", [None, "coo", "csr", "csc", "dia"]) def test_hamiltonian_matmul(backend, sparse_type): """Test matrix multiplication between Hamiltonians.""" + if backend.name == "pytorch": + pytest.skip("Pytorch does not support operations with sparse matrices.") if sparse_type is None: nqubits = 3 H1 = hamiltonians.TFIM(nqubits, h=1.0, backend=backend) @@ -184,21 +192,22 @@ def test_hamiltonian_matmul_states(backend, sparse_type): else: if backend.name == "tensorflow": pytest.skip("Tensorflow does not support operations with sparse matrices.") + elif backend.name == "pytorch": + pytest.skip("Pytorch does not support operations with sparse matrices.") nqubits = 3 nstates = 2**nqubits matrix = random_sparse_matrix(backend, nstates, sparse_type) H = hamiltonians.Hamiltonian(nqubits, matrix, backend=backend) - hm = backend.to_numpy(H.matrix) - v = random_complex(2**nqubits, dtype=hm.dtype) - m = random_complex((2**nqubits, 2**nqubits), dtype=hm.dtype) + hm = H.matrix + v = random_statevector(2**nqubits, backend=backend) + v = backend.cast(v, dtype=hm.dtype) + m = random_density_matrix(2**nqubits, backend=backend) + m = backend.cast(m, dtype=hm.dtype) Hv = H @ backend.cast(v) Hm = H @ backend.cast(m) - backend.assert_allclose(Hv, hm.dot(v), atol=1e-7) # needs atol for cuquantum - backend.assert_allclose(Hm, (hm @ m)) - - Hstate = H @ backend.cast(v) - backend.assert_allclose(Hstate, hm.dot(v)) + backend.assert_allclose(Hv, hm @ v) # needs atol for cuquantum + backend.assert_allclose(Hm, hm @ m) @pytest.mark.parametrize("density_matrix", [True, False]) @@ -220,18 +229,22 @@ def test_hamiltonian_expectation(backend, dense, density_matrix, sparse_type): else: if backend.name == "tensorflow": pytest.skip("Tensorflow does not support operations with sparse matrices.") + elif backend.name == "pytorch": + pytest.skip("Pytorch does not support operations with sparse matrices.") h = hamiltonians.Hamiltonian( 6, random_sparse_matrix(backend, 64, sparse_type), backend=backend ) matrix = backend.to_numpy(h.matrix) if density_matrix: - state = random_complex((2**h.nqubits, 2**h.nqubits)) + state = random_density_matrix(2**h.nqubits, backend=backend) + state = backend.to_numpy(state) state = state + state.T.conj() norm = np.trace(state) target_ev = np.trace(matrix.dot(state)).real else: - state = random_complex(2**h.nqubits) + state = random_statevector(2**h.nqubits, backend=backend) + state = backend.to_numpy(state) norm = np.sum(np.abs(state) ** 2) target_ev = np.sum(state.conj() * matrix.dot(state)).real @@ -241,7 +254,7 @@ def test_hamiltonian_expectation(backend, dense, density_matrix, sparse_type): def test_hamiltonian_expectation_errors(backend): h = hamiltonians.XXZ(nqubits=3, delta=0.5, backend=backend) - state = random_complex((4, 4, 4)) + state = np.random.rand(4, 4, 4) + 1j * np.random.rand(4, 4, 4) with pytest.raises(ValueError): h.expectation(state) with pytest.raises(TypeError): @@ -279,7 +292,7 @@ def test_hamiltonian_expectation_from_samples(backend): def test_hamiltonian_expectation_from_samples_errors(backend): - obs = random_complex((4, 4)) + obs = random_density_matrix(4, backend=backend) h = hamiltonians.Hamiltonian(2, obs, backend=backend) with pytest.raises(NotImplementedError): h.expectation_from_samples(None, qubit_map=None) @@ -304,6 +317,8 @@ def test_hamiltonian_eigenvalues(backend, dtype, sparse_type, dense): else: if backend.name == "tensorflow": pytest.skip("Tensorflow does not support operations with sparse matrices.") + elif backend.name == "pytorch": + pytest.skip("Pytorch does not support operations with sparse matrices.") from scipy import sparse H1 = hamiltonians.XXZ(nqubits=5, delta=0.5, backend=backend) @@ -379,6 +394,8 @@ def test_hamiltonian_ground_state(backend, sparse_type, dense): else: if backend.name == "tensorflow": pytest.skip("Tensorflow does not support operations with sparse matrices.") + elif backend.name == "pytorch": + pytest.skip("Pytorch does not support operations with sparse matrices.") from scipy import sparse H = hamiltonians.XXZ(nqubits=5, delta=0.5, backend=backend) @@ -411,6 +428,8 @@ def construct_hamiltonian(): pytest.skip( "Tensorflow does not support operations with sparse matrices." ) + elif backend.name == "pytorch": + pytest.skip("Pytorch does not support operations with sparse matrices.") from scipy import sparse ham = hamiltonians.XXZ(nqubits=5, delta=0.5, backend=backend) @@ -432,7 +451,7 @@ def test_hamiltonian_energy_fluctuation(backend): ham = hamiltonians.XXZ(nqubits=2, backend=backend) # take ground state and zero state ground_state = ham.ground_state() - zero_state = np.ones(2**2) / np.sqrt(2**2) + zero_state = backend.np.ones(2**2) / np.sqrt(2**2) # collect energy fluctuations gs_energy_fluctuation = ham.energy_fluctuation(ground_state) zs_energy_fluctuation = ham.energy_fluctuation(zero_state) diff --git a/tests/test_hamiltonians_symbolic.py b/tests/test_hamiltonians_symbolic.py index e2b214108f..d9a3b597e4 100644 --- a/tests/test_hamiltonians_symbolic.py +++ b/tests/test_hamiltonians_symbolic.py @@ -5,10 +5,9 @@ import sympy from qibo import Circuit, gates, hamiltonians +from qibo.quantum_info.random_ensembles import random_density_matrix, random_statevector from qibo.symbols import I, Y, Z -from .utils import random_complex - def symbolic_tfim(nqubits, h=1.0): """Constructs symbolic Hamiltonian for TFIM.""" @@ -229,15 +228,11 @@ def test_symbolic_hamiltonian_hamiltonianmatmul(backend, nqubits, calcterms, cal @pytest.mark.parametrize("density_matrix", [False, True]) @pytest.mark.parametrize("calcterms", [False, True]) def test_symbolic_hamiltonian_matmul(backend, nqubits, density_matrix, calcterms): - if density_matrix: - # from qibo.core.states import MatrixState - shape = (2**nqubits, 2**nqubits) - # state = MatrixState.from_tensor(random_complex(shape)) - else: - # from qibo.core.states import VectorState - shape = (2**nqubits,) - # state = VectorState.from_tensor(random_complex(shape)) - state = random_complex(shape) + state = ( + random_density_matrix(2**nqubits, backend=backend) + if density_matrix + else random_statevector(2**nqubits, backend=backend) + ) local_ham = hamiltonians.SymbolicHamiltonian( symbolic_tfim(nqubits, h=1.0), backend=backend ) @@ -265,12 +260,12 @@ def test_symbolic_hamiltonian_state_expectation( _ = local_ham.dense dense_ham = hamiltonians.TFIM(nqubits, h=1.0, backend=backend) + 2 - state = backend.cast(random_complex((2**nqubits,))) + state = random_statevector(2**nqubits, backend=backend) + local_ev = local_ham.expectation(state, normalize) target_ev = dense_ham.expectation(state, normalize) backend.assert_allclose(local_ev, target_ev) - state = random_complex((2**nqubits,)) local_ev = local_ham.expectation(state, normalize) target_ev = dense_ham.expectation(state, normalize) backend.assert_allclose(local_ev, target_ev) @@ -296,27 +291,26 @@ def test_symbolic_hamiltonian_state_expectation_different_nqubits( dense_matrix = np.kron(backend.to_numpy(dense_ham.matrix), np.eye(4)) dense_ham = hamiltonians.Hamiltonian(5, dense_matrix, backend=backend) + state = random_statevector(2**5, backend=backend) + if give_nqubits: - state = backend.cast(random_complex((2**5,))) local_ev = local_ham.expectation(state) target_ev = dense_ham.expectation(state) backend.assert_allclose(local_ev, target_ev) - state = random_complex((2**5,)) local_ev = local_ham.expectation(state) target_ev = dense_ham.expectation(state) backend.assert_allclose(local_ev, target_ev) else: - state = backend.cast(random_complex((2**5,))) with pytest.raises(ValueError): local_ev = local_ham.expectation(state) - state = random_complex((2**5,)) with pytest.raises(ValueError): local_ev = local_ham.expectation(state) def test_hamiltonian_expectation_from_samples(backend): """Test Hamiltonian expectation value calculation.""" + backend.set_seed(0) obs0 = 2 * Z(0) * Z(1) + Z(0) * Z(2) obs1 = 2 * Z(0) * Z(1) + Z(0) * Z(2) * I(3) h0 = hamiltonians.SymbolicHamiltonian(obs0, backend=backend) @@ -355,10 +349,12 @@ def test_symbolic_hamiltonian_abstract_symbol_ev(backend, density_matrix, calcte local_ham = hamiltonians.SymbolicHamiltonian(form, backend=backend) if calcterms: _ = local_ham.terms - if density_matrix: - state = backend.cast(random_complex((4, 4))) - else: - state = backend.cast(random_complex((4,))) + + state = ( + random_density_matrix(4, backend=backend) + if density_matrix + else random_statevector(4, backend=backend) + ) local_ev = local_ham.expectation(state) target_ev = local_ham.dense.expectation(state) backend.assert_allclose(local_ev, target_ev) diff --git a/tests/test_hamiltonians_trotter.py b/tests/test_hamiltonians_trotter.py index a3e22551d3..a438a61bd0 100644 --- a/tests/test_hamiltonians_trotter.py +++ b/tests/test_hamiltonians_trotter.py @@ -7,8 +7,6 @@ from qibo.backends import NumpyBackend from qibo.quantum_info import random_hermitian, random_statevector -from .utils import random_complex - @pytest.mark.parametrize("nqubits", [3, 4]) @pytest.mark.parametrize("model", ["TFIM", "XXZ", "Y", "MaxCut"]) @@ -80,15 +78,15 @@ def test_trotter_hamiltonian_operator_add_and_sub(backend, nqubits=3): @pytest.mark.parametrize("nqubits,normalize", [(3, False), (4, False)]) def test_trotter_hamiltonian_matmul(backend, nqubits, normalize): """Test Trotter Hamiltonian expectation value.""" + state = random_statevector(2**nqubits, backend=backend) + local_ham = hamiltonians.TFIM(nqubits, h=1.0, dense=False, backend=backend) dense_ham = hamiltonians.TFIM(nqubits, h=1.0, backend=backend) - state = backend.cast(random_complex((2**nqubits,))) trotter_ev = local_ham.expectation(state, normalize) target_ev = dense_ham.expectation(state, normalize) backend.assert_allclose(trotter_ev, target_ev) - state = random_complex((2**nqubits,)) trotter_ev = local_ham.expectation(state, normalize) target_ev = dense_ham.expectation(state, normalize) backend.assert_allclose(trotter_ev, target_ev) diff --git a/tests/test_measurements_probabilistic.py b/tests/test_measurements_probabilistic.py index ebe8ec84f3..6b018c3f08 100644 --- a/tests/test_measurements_probabilistic.py +++ b/tests/test_measurements_probabilistic.py @@ -65,6 +65,7 @@ def test_unbalanced_probabilistic_measurement(backend, use_samples): decimal_frequencies = backend.test_regressions( "test_unbalanced_probabilistic_measurement" ) + assert sum(result.frequencies().values()) == 1000 assert_result(backend, result, decimal_frequencies=decimal_frequencies) @@ -145,10 +146,9 @@ def test_measurementresult_apply_bitflips(backend, i, p0, p1): c = models.Circuit(3) c.add(gates.M(*range(3))) - state = np.zeros(8) - state[0] = 1.0 + state = backend.zero_state(8) result = CircuitResult(state, c.measurements, backend) - result._samples = np.zeros((10, 3), dtype="int32") + result._samples = backend.cast(np.zeros((10, 3)), dtype="int32") backend.set_seed(123) noisy_samples = result.apply_bitflips(p0, p1) targets = backend.test_regressions("test_measurementresult_apply_bitflips") diff --git a/tests/test_models_circuit_features.py b/tests/test_models_circuit_features.py index d3e7a5f0a4..2fb310bfea 100644 --- a/tests/test_models_circuit_features.py +++ b/tests/test_models_circuit_features.py @@ -328,12 +328,17 @@ def test_repeated_execute_probs_and_freqs(backend, nqubits): if nqubits == 1 else Counter({"11": 674, "10": 155, "01": 154, "00": 41}) ) + elif backend.__class__.__name__ == "PyTorchBackend": + test_frequencies = ( + Counter({"1": 817, "0": 207}) + if nqubits == 1 + else Counter({"11": 664, "01": 162, "10": 166, "00": 32}) + ) else: test_frequencies = ( Counter({"1": 790, "0": 234}) if nqubits == 1 else Counter({"11": 618, "10": 169, "01": 185, "00": 52}) ) - for key in dict(test_frequencies).keys(): backend.assert_allclose(result.frequencies()[key], test_frequencies[key]) diff --git a/tests/test_models_dbi.py b/tests/test_models_dbi.py index 71193bd6d9..c52d607456 100644 --- a/tests/test_models_dbi.py +++ b/tests/test_models_dbi.py @@ -103,7 +103,12 @@ def test_hyperopt_step(backend, nqubits): def test_energy_fluctuations(backend): h0 = np.array([[1, 0], [0, -1]]) + h0 = backend.cast(h0, dtype=backend.dtype) + state = np.array([1, 0]) + state = backend.cast(state, dtype=backend.dtype) + dbi = DoubleBracketIteration(Hamiltonian(1, matrix=h0, backend=backend)) energy_fluctuation = dbi.energy_fluctuation(state=state) + assert energy_fluctuation == 0 diff --git a/tests/test_models_encodings.py b/tests/test_models_encodings.py index 98e2ed7143..1d60c7b584 100644 --- a/tests/test_models_encodings.py +++ b/tests/test_models_encodings.py @@ -127,17 +127,14 @@ def test_unary_encoder(backend, nqubits, architecture, kind): # sampling random data in interval [-1, 1] sampler = np.random.default_rng(1) data = 2 * sampler.random(nqubits) - 1 - data = backend.cast(data, dtype=data.dtype) - - if kind is not None: - data = kind(data) + data = kind(data) if kind is not None else backend.cast(data, dtype=data.dtype) circuit = unary_encoder(data, architecture=architecture) state = backend.execute_circuit(circuit).state() indexes = np.flatnonzero(state) - state = np.real(state[indexes]) + state = backend.np.real(state[indexes]) - backend.assert_allclose(state, data / backend.calculate_norm(data, order=2)) + backend.assert_allclose(state, backend.cast(data) / backend.calculate_norm(data, 2)) @pytest.mark.parametrize("seed", [None, 10, np.random.default_rng(10)]) diff --git a/tests/test_models_variational.py b/tests/test_models_variational.py index ee269e58df..54c731aabd 100644 --- a/tests/test_models_variational.py +++ b/tests/test_models_variational.py @@ -101,6 +101,8 @@ def myloss(parameters, circuit, target): @pytest.mark.parametrize(test_names, test_values) def test_vqe(backend, method, options, compile, filename): """Performs a VQE circuit minimization test.""" + if backend.name == "pytorch": + pytest.skip("Skipping VQE test for pytorch backend.") if (method == "sgd" or compile) and backend.name != "tensorflow": pytest.skip("Skipping SGD test for unsupported backend.") if method != "sgd" and backend.name == "tensorflow": @@ -138,7 +140,7 @@ def test_vqe(backend, method, options, compile, filename): assert_regression_fixture(backend, params, filename) # test energy fluctuation - state = np.ones(2**nqubits) / np.sqrt(2**nqubits) + state = backend.np.ones(2**nqubits) / np.sqrt(2**nqubits) energy_fluctuation = v.energy_fluctuation(state) assert energy_fluctuation >= 0 backend.set_threads(n_threads) @@ -305,6 +307,8 @@ def __call__(self, x): @pytest.mark.parametrize(test_names, test_values) def test_aavqe(backend, method, options, compile, filename): """Performs a AAVQE circuit minimization test.""" + if backend.name == "pytorch": + pytest.skip("Skipping VQE test for pytorch backend.") nqubits = 4 layers = 1 circuit = models.Circuit(nqubits) diff --git a/tests/test_quantum_info_clifford.py b/tests/test_quantum_info_clifford.py index 497de1ebaa..9166d3ad0c 100644 --- a/tests/test_quantum_info_clifford.py +++ b/tests/test_quantum_info_clifford.py @@ -5,7 +5,7 @@ import pytest from qibo import Circuit, gates -from qibo.backends import CliffordBackend, TensorflowBackend +from qibo.backends import CliffordBackend, PyTorchBackend, TensorflowBackend from qibo.backends.clifford import _get_engine_name from qibo.quantum_info._clifford_utils import ( _cnot_cost, @@ -17,22 +17,24 @@ def construct_clifford_backend(backend): - if isinstance(backend, TensorflowBackend): + if ( + isinstance(backend, (TensorflowBackend, PyTorchBackend)) + or backend.__class__.__name__ == "CuQuantumBackend" + ): with pytest.raises(NotImplementedError): clifford_backend = CliffordBackend(backend.name) - else: - return CliffordBackend(_get_engine_name(backend)) + pytest.skip("Clifford backend not defined for the this engine.") + + return CliffordBackend(_get_engine_name(backend)) @pytest.mark.parametrize("nqubits", [2, 10, 50, 100]) def test_clifford_from_symplectic_matrix(backend, nqubits): clifford_backend = construct_clifford_backend(backend) - if not clifford_backend: - return - engine = _get_engine_name(backend) + symplectic_matrix = clifford_backend.zero_state(nqubits) - clifford_1 = Clifford(symplectic_matrix, engine=engine) - clifford_2 = Clifford(symplectic_matrix[:-1], engine=engine) + clifford_1 = Clifford(symplectic_matrix, engine=_get_engine_name(backend)) + clifford_2 = Clifford(symplectic_matrix[:-1], engine=_get_engine_name(backend)) for clifford in [clifford_1, clifford_2]: backend.assert_allclose( @@ -62,8 +64,7 @@ def test_clifford_from_circuit(backend, measurement): @pytest.mark.parametrize("algorithm", ["AG04", "BM20"]) @pytest.mark.parametrize("nqubits", [1, 2, 3, 10, 50]) def test_clifford_to_circuit(backend, nqubits, algorithm, seed): - if backend.__class__.__name__ == "TensorflowBackend": - pytest.skip("CliffordBackend not defined for Tensorflow engine.") + clifford_backend = construct_clifford_backend(backend) clifford = random_clifford(nqubits, seed=seed, backend=backend) @@ -115,6 +116,8 @@ def test_clifford_to_circuit(backend, nqubits, algorithm, seed): def test_clifford_initialization(backend, nqubits): if backend.__class__.__name__ == "TensorflowBackend": pytest.skip("CliffordBackend not defined for Tensorflow engine.") + elif backend.__class__.__name__ == "PyTorchBackend": + pytest.skip("CliffordBackend not defined for PyTorch engine.") clifford_backend = construct_clifford_backend(backend) @@ -310,22 +313,19 @@ def test_clifford_samples_frequencies(backend, binary): def test_clifford_samples_error(backend): + clifford_backend = construct_clifford_backend(backend) + c = random_clifford(1, backend=backend) - if isinstance(backend, TensorflowBackend): - with pytest.raises(NotImplementedError): - clifford_backend = CliffordBackend(backend) - else: - obj = Clifford.from_circuit(c, engine=_get_engine_name(backend)) - with pytest.raises(RuntimeError) as excinfo: - obj.samples() - assert str(excinfo.value) == "No measurement provided." + obj = Clifford.from_circuit(c, engine=_get_engine_name(backend)) + with pytest.raises(RuntimeError) as excinfo: + obj.samples() + assert str(excinfo.value) == "No measurement provided." @pytest.mark.parametrize("deep", [False, True]) @pytest.mark.parametrize("nqubits", [1, 10, 100]) def test_clifford_copy(backend, nqubits, deep): - if backend.__class__.__name__ == "TensorflowBackend": - pytest.skip("CliffordBackend not defined for Tensorflow engine.") + clifford_backend = construct_clifford_backend(backend) circuit = random_clifford(nqubits, backend=backend) clifford = Clifford.from_circuit(circuit, engine=_get_engine_name(backend)) @@ -344,7 +344,7 @@ def test_clifford_copy(backend, nqubits, deep): @pytest.mark.parametrize("pauli_2", ["Z", "Y", "Y"]) @pytest.mark.parametrize("pauli_1", ["X", "Y", "Z"]) -def test_one_qubit_paulis_string_product(backend, pauli_1, pauli_2): +def test_one_qubit_paulis_string_product(pauli_1, pauli_2): products = { "XY": "iZ", "YZ": "iX", @@ -379,7 +379,7 @@ def test_one_qubit_paulis_string_product(backend, pauli_1, pauli_2): [["iY", "iX"], "iZ"], ], ) -def test_string_product(backend, operators, target): +def test_string_product(operators, target): product = _string_product(operators) assert product == target diff --git a/tests/test_quantum_info_entropies.py b/tests/test_quantum_info_entropies.py index 3cf8b2d721..5013f4f70d 100644 --- a/tests/test_quantum_info_entropies.py +++ b/tests/test_quantum_info_entropies.py @@ -54,6 +54,7 @@ def test_shannon_entropy_errors(backend): @pytest.mark.parametrize("base", [2, 10, np.e, 5]) def test_shannon_entropy(backend, base): prob_array = [1.0, 0.0] + prob_array = backend.cast(prob_array, dtype=np.float64) result = shannon_entropy(prob_array, base, backend=backend) backend.assert_allclose(result, 0.0) @@ -115,6 +116,9 @@ def test_classical_relative_entropy(backend, base, kind): if kind is not None: prob_p, prob_q = kind(prob_p), kind(prob_q) + else: + prob_p = np.real(backend.cast(prob_p)) + prob_q = np.real(backend.cast(prob_q)) divergence = classical_relative_entropy(prob_p, prob_q, base=base, backend=backend) @@ -168,7 +172,9 @@ def test_classical_renyi_entropy(backend, alpha, base, kind): if alpha == 0.0: target = np.log2(len(prob_dist)) / np.log2(base) elif alpha == 1: - target = shannon_entropy(prob_dist, base=base, backend=backend) + target = shannon_entropy( + backend.cast(prob_dist, dtype=np.float64), base=base, backend=backend + ) elif alpha == 2: target = -1 * np.log2(np.sum(prob_dist**2)) / np.log2(base) elif alpha == np.inf: @@ -178,6 +184,8 @@ def test_classical_renyi_entropy(backend, alpha, base, kind): if kind is not None: prob_dist = kind(prob_dist) + else: + prob_dist = np.real(backend.cast(prob_dist)) renyi_ent = classical_renyi_entropy(prob_dist, alpha, base=base, backend=backend) @@ -261,7 +269,12 @@ def test_classical_relative_renyi_entropy(backend, alpha, base, kind): if alpha == 0.5: target = -2 * np.log2(np.sum(np.sqrt(prob_p * prob_q))) / np.log2(base) elif alpha == 1.0: - target = classical_relative_entropy(prob_p, prob_q, base=base, backend=backend) + target = classical_relative_entropy( + np.real(backend.cast(prob_p)), + np.real(backend.cast(prob_q)), + base=base, + backend=backend, + ) elif alpha == np.inf: target = np.log2(max(prob_p / prob_q)) / np.log2(base) else: @@ -273,6 +286,9 @@ def test_classical_relative_renyi_entropy(backend, alpha, base, kind): if kind is not None: prob_p, prob_q = kind(prob_p), kind(prob_q) + else: + prob_p = np.real(backend.cast(prob_p)) + prob_q = np.real(backend.cast(prob_q)) divergence = classical_relative_renyi_entropy( prob_p, prob_q, alpha=alpha, base=base, backend=backend @@ -326,12 +342,16 @@ def test_classical_tsallis_entropy(backend, alpha, base, kind): prob_dist /= np.sum(prob_dist) if alpha == 1.0: - target = shannon_entropy(prob_dist, base=base, backend=backend) + target = shannon_entropy( + np.real(backend.cast(prob_dist)), base=base, backend=backend + ) else: target = (1 / (1 - alpha)) * (np.sum(prob_dist**alpha) - 1) if kind is not None: prob_dist = kind(prob_dist) + else: + prob_dist = np.real(backend.cast(prob_dist)) backend.assert_allclose( classical_tsallis_entropy(prob_dist, alpha=alpha, base=base, backend=backend), diff --git a/tests/test_quantum_info_random.py b/tests/test_quantum_info_random.py index 0749723637..27c250765e 100644 --- a/tests/test_quantum_info_random.py +++ b/tests/test_quantum_info_random.py @@ -56,7 +56,8 @@ def test_uniform_sampling_U3(backend, seed): ] ) expectation_values = backend.cast(expectation_values) - expectation_values = np.mean(expectation_values, axis=0) + + expectation_values = backend.np.mean(expectation_values, axis=0) backend.assert_allclose(expectation_values[0], expectation_values[1], atol=1e-1) backend.assert_allclose(expectation_values[0], expectation_values[2], atol=1e-1) @@ -150,42 +151,38 @@ def test_random_hermitian(backend): backend.assert_allclose(all(eigenvalues <= 1), True) -def test_random_unitary(backend): +@pytest.mark.parametrize("measure", [None, "haar"]) +def test_random_unitary(backend, measure): with pytest.raises(TypeError): dims = np.array([1]) - random_unitary(dims, backend=backend) + random_unitary(dims, measure=measure, backend=backend) with pytest.raises(TypeError): dims = 2 - measure = 1 - random_unitary(dims, measure, backend=backend) + random_unitary(dims, measure=1, backend=backend) with pytest.raises(ValueError): dims = 0 - random_unitary(dims, backend=backend) + random_unitary(dims, measure=measure, backend=backend) with pytest.raises(ValueError): dims = 2 random_unitary(dims, measure="gaussian", backend=backend) with pytest.raises(TypeError): dims = 2 - random_unitary(dims=2, seed=0.1, backend=backend) + random_unitary(dims=2, measure=measure, seed=0.1, backend=backend) # tests if operator is unitary (measure == "haar") dims = 4 - matrix = random_unitary(dims, backend=backend) + matrix = random_unitary(dims, measure=measure, backend=backend) matrix_dagger = np.transpose(np.conj(matrix)) - matrix_inv = np.linalg.inv(matrix) + matrix_inv = ( + backend.np.inverse(matrix) + if backend.name == "pytorch" + else np.linalg.inv(matrix) + ) norm = float( backend.calculate_norm_density_matrix(matrix_inv - matrix_dagger, order=2) ) backend.assert_allclose(norm < PRECISION_TOL, True) - # tests if operator is unitary (measure == None) - dims, measure = 4, None - matrix = random_unitary(dims, measure, backend=backend) - matrix_dagger = np.transpose(np.conj(matrix)) - matrix_inv = np.linalg.inv(matrix) - norm = float(backend.calculate_norm(matrix_inv - matrix_dagger, order=2)) - backend.assert_allclose(norm < PRECISION_TOL, True) - @pytest.mark.parametrize("order", ["row", "column"]) @pytest.mark.parametrize("rank", [None, 4]) @@ -465,8 +462,9 @@ def test_random_pauli( ) else: matrix = np.transpose(matrix, (1, 0, 2, 3)) - matrix = [reduce(np.kron, row) for row in matrix] - matrix = reduce(np.dot, matrix) + matrix = [reduce(backend.np.kron, row) for row in matrix] + dot = backend.np.matmul if backend.name == "pytorch" else np.dot + matrix = reduce(dot, matrix) if subset is None: backend.assert_allclose( @@ -560,7 +558,7 @@ def test_random_stochastic_matrix(backend): # tests if matrix is row-stochastic dims = 4 matrix = random_stochastic_matrix(dims, backend=backend) - sum_rows = np.sum(matrix, axis=1) + sum_rows = backend.np.sum(matrix, axis=1) backend.assert_allclose(all(sum_rows < 1 + PRECISION_TOL), True) backend.assert_allclose(all(sum_rows > 1 - PRECISION_TOL), True) @@ -570,18 +568,19 @@ def test_random_stochastic_matrix(backend): matrix = random_stochastic_matrix( dims, diagonally_dominant=True, max_iterations=1000, backend=backend ) - sum_rows = np.sum(matrix, axis=1) + + sum_rows = backend.np.sum(matrix, axis=1) backend.assert_allclose(all(sum_rows < 1 + PRECISION_TOL), True) backend.assert_allclose(all(sum_rows > 1 - PRECISION_TOL), True) - backend.assert_allclose(all(2 * np.diag(matrix) - sum_rows > 0), True) + backend.assert_allclose(all(2 * backend.np.diag(matrix) - sum_rows > 0), True) # tests if matrix is bistochastic dims = 4 matrix = random_stochastic_matrix(dims, bistochastic=True, backend=backend) - sum_rows = np.sum(matrix, axis=1) - column_rows = np.sum(matrix, axis=0) + sum_rows = backend.np.sum(matrix, axis=1) + column_rows = backend.np.sum(matrix, axis=0) backend.assert_allclose(all(sum_rows < 1 + PRECISION_TOL), True) backend.assert_allclose(all(sum_rows > 1 - PRECISION_TOL), True) @@ -598,8 +597,8 @@ def test_random_stochastic_matrix(backend): max_iterations=1000, backend=backend, ) - sum_rows = np.sum(matrix, axis=1) - column_rows = np.sum(matrix, axis=0) + sum_rows = backend.np.sum(matrix, axis=1) + column_rows = backend.np.sum(matrix, axis=0) backend.assert_allclose(all(sum_rows < 1 + PRECISION_TOL), True) backend.assert_allclose(all(sum_rows > 1 - PRECISION_TOL), True) @@ -607,8 +606,8 @@ def test_random_stochastic_matrix(backend): backend.assert_allclose(all(column_rows < 1 + PRECISION_TOL), True) backend.assert_allclose(all(column_rows > 1 - PRECISION_TOL), True) - backend.assert_allclose(all(2 * np.diag(matrix) - sum_rows > 0), True) - backend.assert_allclose(all(2 * np.diag(matrix) - column_rows > 0), True) + backend.assert_allclose(all(2 * backend.np.diag(matrix) - sum_rows > 0), True) + backend.assert_allclose(all(2 * backend.np.diag(matrix) - column_rows > 0), True) # tests warning for max_iterations dims = 4 diff --git a/tests/test_quantum_info_superoperator_transformations.py b/tests/test_quantum_info_superoperator_transformations.py index 5923181db9..9cb316e1ae 100644 --- a/tests/test_quantum_info_superoperator_transformations.py +++ b/tests/test_quantum_info_superoperator_transformations.py @@ -396,8 +396,8 @@ def test_choi_to_kraus( test_a0 = backend.cast(test_a0, dtype=test_a0.dtype) test_a1 = backend.cast(test_a1, dtype=test_a1.dtype) - test_kraus_left = backend.cast(test_kraus_left, dtype=test_kraus_left.dtype) - test_kraus_right = backend.cast(test_kraus_right, dtype=test_kraus_right.dtype) + test_kraus_left = backend.cast(test_kraus_left, dtype=backend.dtype) + test_kraus_right = backend.cast(test_kraus_right, dtype=backend.dtype) state = random_density_matrix(2, backend=backend) @@ -790,8 +790,8 @@ def test_pauli_to_choi(backend, normalize, order, pauli_order, test_superop): dim = int(np.sqrt(test_pauli.shape[0])) aux = dim**2 if normalize == False else dim - test_pauli = backend.cast(test_pauli, dtype=test_pauli.dtype) - test_superop = backend.cast(test_superop, dtype=test_superop.dtype) + test_pauli = backend.cast(test_pauli, dtype=backend.dtype) + test_superop = backend.cast(test_superop, dtype=backend.dtype) choi_super_op = pauli_to_choi( test_pauli / aux, normalize, order, pauli_order, backend=backend ) @@ -813,7 +813,7 @@ def test_pauli_to_kraus(backend, normalize, order, pauli_order, test_a0, test_a1 dim = int(np.sqrt(test_pauli.shape[0])) aux = dim**2 if normalize == False else dim - test_pauli = backend.cast(test_pauli, dtype=test_pauli.dtype) + test_pauli = backend.cast(test_pauli, dtype=backend.dtype) kraus_ops, _ = pauli_to_kraus( test_pauli / aux, @@ -852,8 +852,8 @@ def test_pauli_to_chi(backend, normalize, order, pauli_order): dim = int(np.sqrt(test_pauli.shape[0])) aux = dim**2 if normalize == False else dim - test_chi = backend.cast(test_chi, dtype=test_chi.dtype) - test_pauli = backend.cast(test_pauli / aux, dtype=test_pauli.dtype) + test_chi = backend.cast(test_chi, dtype=backend.dtype) + test_pauli = backend.cast(test_pauli / aux, dtype=backend.dtype) chi_matrix = pauli_to_chi( test_pauli, normalize, order, pauli_order, backend=backend @@ -881,12 +881,12 @@ def test_pauli_to_stinespring( test_a1, ): test_pauli = pauli_superop(pauli_order) - test_pauli = backend.cast(test_pauli, dtype=test_pauli.dtype) + test_pauli = backend.cast(test_pauli, dtype=backend.dtype) dim = 2**nqubits aux = dim**2 if normalize is False else dim - test_a0 = backend.cast(test_a0, dtype=test_a0.dtype) - test_a1 = backend.cast(test_a1, dtype=test_a1.dtype) + test_a0 = backend.cast(test_a0, dtype=backend.dtype) + test_a1 = backend.cast(test_a1, dtype=backend.dtype) stinespring = pauli_to_stinespring( test_pauli, @@ -936,8 +936,8 @@ def test_chi_to_choi(backend, normalize, order, pauli_order, test_superop): dim = int(np.sqrt(test_chi.shape[0])) aux = dim**2 if normalize == False else dim - test_chi = backend.cast(test_chi, dtype=test_chi.dtype) - test_superop = backend.cast(test_superop, dtype=test_superop.dtype) + test_chi = backend.cast(test_chi, dtype=backend.dtype) + test_superop = backend.cast(test_superop, dtype=backend.dtype) axes = [1, 2] if order == "row" else [0, 3] test_choi = np.swapaxes(np.reshape(test_superop, [2] * 4), *axes).reshape([4, 4]) @@ -958,8 +958,8 @@ def test_chi_to_liouville(backend, normalize, order, pauli_order, test_superop): dim = int(np.sqrt(test_chi.shape[0])) aux = dim**2 if normalize == False else dim - test_chi = backend.cast(test_chi, dtype=test_chi.dtype) - test_superop = backend.cast(test_superop, dtype=test_superop.dtype) + test_chi = backend.cast(test_chi, dtype=backend.dtype) + test_superop = backend.cast(test_superop, dtype=backend.dtype) super_op = chi_to_liouville( test_chi / aux, normalize, order, pauli_order, backend=backend @@ -977,8 +977,8 @@ def test_chi_to_pauli(backend, normalize, order, pauli_order): dim = int(np.sqrt(test_pauli.shape[0])) aux = 1.0 if normalize else dim**2 - test_chi = backend.cast(test_chi, dtype=test_chi.dtype) - test_pauli = backend.cast(test_pauli, dtype=test_pauli.dtype) + test_chi = backend.cast(test_chi, dtype=backend.dtype) + test_pauli = backend.cast(test_pauli, dtype=backend.dtype) pauli_op = chi_to_pauli( test_chi / aux, normalize, order, pauli_order, backend=backend @@ -997,7 +997,7 @@ def test_chi_to_kraus(backend, normalize, order, pauli_order, test_a0, test_a1): dim = int(np.sqrt(test_chi.shape[0])) aux = dim**2 if normalize == False else dim - test_chi = backend.cast(test_chi, dtype=test_chi.dtype) + test_chi = backend.cast(test_chi, dtype=backend.dtype) kraus_ops, _ = chi_to_kraus( test_chi / aux, normalize, order=order, pauli_order=pauli_order, backend=backend @@ -1034,12 +1034,12 @@ def test_chi_to_stinespring( backend, normalize, order, pauli_order, validate_cp, nqubits, test_a0, test_a1 ): test_chi = chi_superop(pauli_order) - test_chi = backend.cast(test_chi, dtype=test_chi.dtype) + test_chi = backend.cast(test_chi, dtype=backend.dtype) dim = int(np.sqrt(test_chi.shape[0])) aux = dim**2 if normalize == False else dim - test_a0 = backend.cast(test_a0, dtype=test_a0.dtype) - test_a1 = backend.cast(test_a1, dtype=test_a1.dtype) + test_a0 = backend.cast(test_a0, dtype=backend.dtype) + test_a1 = backend.cast(test_a1, dtype=backend.dtype) stinespring = chi_to_stinespring( test_chi, diff --git a/tests/test_quantum_info_utils.py b/tests/test_quantum_info_utils.py index 0d845cf2f3..bc93c230cf 100644 --- a/tests/test_quantum_info_utils.py +++ b/tests/test_quantum_info_utils.py @@ -167,8 +167,12 @@ def test_hellinger(backend, validate, kind): backend.calculate_norm(np.sqrt(prob_p) - np.sqrt(prob_q)) / np.sqrt(2) ) - if kind is not None: - prob_p, prob_q = list(prob_p), list(prob_q) + prob_p = ( + kind(prob_p) if kind is not None else backend.cast(prob_p, dtype=prob_p.dtype) + ) + prob_q = ( + kind(prob_q) if kind is not None else backend.cast(prob_q, dtype=prob_q.dtype) + ) distance = hellinger_distance(prob_p, prob_q, validate=validate, backend=backend) fidelity = hellinger_fidelity(prob_p, prob_q, validate=validate, backend=backend) diff --git a/tests/test_result.py b/tests/test_result.py index f85a0c1a64..53d274d896 100644 --- a/tests/test_result.py +++ b/tests/test_result.py @@ -98,5 +98,5 @@ def test_circuitresult_dump_load(backend, agnostic_load): loaded_freq = loaded_res.frequencies() for state, f in freq.items(): assert loaded_freq[state] == f - assert np.sum(result.state() - loaded_res.state()) == 0 + assert backend.np.sum(result.state() - loaded_res.state()) == 0 remove("tmp.npy") diff --git a/tests/test_transpiler_unitary_decompositions.py b/tests/test_transpiler_unitary_decompositions.py index 9d1bde2f3e..c72591c72c 100644 --- a/tests/test_transpiler_unitary_decompositions.py +++ b/tests/test_transpiler_unitary_decompositions.py @@ -107,14 +107,18 @@ def test_ud_eigenvalues(backend, seed): magic_decomposition(unitary, backend=backend) else: ua, ub, ud, va, vb = magic_decomposition(unitary, backend=backend) - - unitary_recon = np.kron(ua, ub) @ ud @ np.kron(va, vb) + # Check kron + unitary_recon = backend.np.kron(ua, ub) @ ud @ backend.np.kron(va, vb) backend.assert_allclose(unitary_recon, unitary) - ud_bell = np.transpose(np.conj(bell_basis)) @ ud @ bell_basis - ud_diag = np.diag(ud_bell) - backend.assert_allclose(np.diag(ud_diag), ud_bell, atol=PRECISION_TOL) - backend.assert_allclose(np.prod(ud_diag), 1) + ud_bell = ( + backend.np.transpose(backend.np.conj(backend.cast(bell_basis)), (1, 0)) + @ ud + @ backend.cast(bell_basis) + ) + ud_diag = backend.np.diag(ud_bell) + backend.assert_allclose(backend.np.diag(ud_diag), ud_bell, atol=PRECISION_TOL) + backend.assert_allclose(backend.np.prod(ud_diag), 1) @pytest.mark.parametrize("seed", [None, 10, np.random.default_rng(10)]) @@ -189,9 +193,9 @@ def test_two_qubit_decomposition_bell_unitary(backend, hz_zero): hx, hy, hz = (2 * np.random.random(3) - 1) * np.pi if hz_zero: hz = 0 - unitary = bell_unitary(hx, hy, hz) + unitary = backend.cast(bell_unitary(hx, hy, hz)) c = Circuit(2) - c.add(two_qubit_decomposition(0, 1, unitary)) + c.add(two_qubit_decomposition(0, 1, unitary, backend=backend)) final_matrix = c.unitary(backend) backend.assert_allclose(final_matrix, unitary, atol=PRECISION_TOL) diff --git a/tests/utils.py b/tests/utils.py index 080eb35a63..5d77b812dd 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -2,13 +2,6 @@ from scipy import sparse -def random_complex(shape, dtype=None): - x = np.random.random(shape) + 1j * np.random.random(shape) - if dtype is None: - return x - return x.astype(dtype) - - def random_sparse_matrix(backend, n, sparse_type=None): if backend.name == "tensorflow": nonzero = int(0.1 * n * n)