From 364acbfb7b229e8f2185573997f4426957d88517 Mon Sep 17 00:00:00 2001 From: simone bordoni Date: Mon, 16 Sep 2024 16:55:07 +0400 Subject: [PATCH] merge master --- doc/source/code-examples/test.py | 38 ++++++++++++++++++++++++++++++++ src/qibo/backends/pytorch.py | 6 +++-- 2 files changed, 42 insertions(+), 2 deletions(-) create mode 100644 doc/source/code-examples/test.py diff --git a/doc/source/code-examples/test.py b/doc/source/code-examples/test.py new file mode 100644 index 0000000000..090ae0d9fd --- /dev/null +++ b/doc/source/code-examples/test.py @@ -0,0 +1,38 @@ +import qibo + +qibo.set_backend("pytorch") +import torch + +from qibo import gates, models + +torch.set_anomaly_enabled(True) + +# Optimization parameters +nepochs = 1 +optimizer = torch.optim.Adam +target_state = torch.ones(4, dtype=torch.complex128) / 2.0 + +# Define circuit ansatz +params = torch.rand(2, dtype=torch.float64, requires_grad=True) +print(params) +optimizer = optimizer([params]) +c = models.Circuit(2) +c.add(gates.RX(0, params[0])) +c.add(gates.RY(1, params[1])) +gate = gates.RY(0, params[1]) + +print("Gate", gate.matrix()) +print(torch.norm(gate.matrix()).grad) + +# for _ in range(nepochs): +# optimizer.zero_grad() +# c.set_parameters(params) +# final_state = c().state() +# print("state", final_state) +# fidelity = torch.abs(torch.sum(torch.conj(target_state) * final_state)) +# loss = 1 - fidelity +# loss.backward() +# optimizer.step() +# print("state", final_state) +# print("params", params) +# print("loss", loss.grad) diff --git a/src/qibo/backends/pytorch.py b/src/qibo/backends/pytorch.py index 392aeed405..e57977a1ee 100644 --- a/src/qibo/backends/pytorch.py +++ b/src/qibo/backends/pytorch.py @@ -1,5 +1,7 @@ """PyTorch backend.""" +from typing import Optional + import numpy as np from qibo import __version__ @@ -85,7 +87,7 @@ def cast( x, dtype=None, copy: bool = False, - requires_grad: bool = None, + requires_grad: Optional[bool] = None, ): """Casts input as a Torch tensor of the specified dtype. @@ -117,7 +119,6 @@ def cast( # check if dtype is an integer to remove gradients if dtype in [self.np.int32, self.np.int64, self.np.int8, self.np.int16]: requires_grad = False - if isinstance(x, self.np.Tensor): x = x.to(dtype) elif isinstance(x, list) and all(isinstance(row, self.np.Tensor) for row in x): @@ -128,6 +129,7 @@ def cast( if copy: return x.clone() + print("Casting", x) return x def is_sparse(self, x):