Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix pytorch gradients #1450

Merged
merged 50 commits into from
Oct 10, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
50 commits
Select commit Hold shift + click to select a range
0f800b2
create pr
Simone-Bordoni Aug 26, 2024
5281882
torch example
Simone-Bordoni Sep 10, 2024
ff9fab6
Merge branch 'master' into vqe_example
Simone-Bordoni Sep 10, 2024
b02222c
Merge branch 'master' into vqe_example
Simone-Bordoni Sep 16, 2024
364acbf
merge master
Simone-Bordoni Sep 16, 2024
9c96d41
refactor casting parameters
Simone-Bordoni Sep 17, 2024
b0d805c
gradients passing but value is zero...
Simone-Bordoni Sep 17, 2024
0e193a4
working gradient
Simone-Bordoni Sep 17, 2024
9ab0e99
Merge branch 'master' into fix_autodiff
Simone-Bordoni Sep 23, 2024
061a43d
solved errors
Simone-Bordoni Sep 23, 2024
3ce8a2d
solve errors
Simone-Bordoni Sep 24, 2024
21b08c1
fixed last tests
Simone-Bordoni Sep 24, 2024
cba7a01
fix coverage
Simone-Bordoni Sep 25, 2024
ad25c2c
improve coverage
Simone-Bordoni Sep 25, 2024
af6cdf0
Update src/qibo/backends/numpy.py
renatomello Sep 26, 2024
53931be
Update src/qibo/backends/numpy.py
renatomello Sep 26, 2024
502f0a2
Update src/qibo/gates/gates.py
renatomello Sep 26, 2024
3fdf63b
Update src/qibo/gates/gates.py
renatomello Sep 26, 2024
2f80f99
improve docstring
renatomello Sep 26, 2024
3cf4a8c
Merge branch 'fix_autodiff' of github.com:qiboteam/qibo into fix_auto…
renatomello Sep 26, 2024
612d138
Merge branch 'master' into fix_autodiff
renatomello Sep 26, 2024
5577d51
some corrections by renato
Simone-Bordoni Sep 30, 2024
23d8204
Merge branch 'master' into fix_autodiff
renatomello Sep 30, 2024
b77723c
corrections by renato
Simone-Bordoni Sep 30, 2024
e3d96f3
added test with gradients
Simone-Bordoni Sep 30, 2024
0a76d55
solve errors
Simone-Bordoni Sep 30, 2024
4cf7cdc
corrections by andrea
Simone-Bordoni Oct 1, 2024
e57a0b1
example of Andrea's suggestion
renatomello Oct 1, 2024
2c871d0
other corrections
Simone-Bordoni Oct 1, 2024
c790480
fix torch test
Simone-Bordoni Oct 1, 2024
524fd6c
use infidelity from quantum info in test
Simone-Bordoni Oct 1, 2024
77f29c9
test gradients only on linux
Simone-Bordoni Oct 2, 2024
23877f3
Merge branch 'master' into fix_autodiff
renatomello Oct 2, 2024
43db105
Merge branch 'fix_autodiff' of github.com:qiboteam/qibo into fix_auto…
renatomello Oct 2, 2024
a7a481e
corrections by andrea
Simone-Bordoni Oct 3, 2024
e7b7217
more corrections
Simone-Bordoni Oct 3, 2024
3b731fd
restore error in circuit quasm
Simone-Bordoni Oct 3, 2024
7e9c1e8
remove requires_grad from cast
Simone-Bordoni Oct 3, 2024
c6efb1f
Merge branch 'master' into fix_autodiff
renatomello Oct 4, 2024
2e2787b
Merge branch 'fix_autodiff' of github.com:qiboteam/qibo into fix_auto…
renatomello Oct 4, 2024
b48928a
fix tests
Simone-Bordoni Oct 4, 2024
96f6b75
Merge branch 'master' into fix_autodiff
renatomello Oct 7, 2024
699fd26
Merge branch 'master' into fix_autodiff
renatomello Oct 7, 2024
0b77687
Merge branch 'master' into fix_autodiff
renatomello Oct 7, 2024
571c701
Merge branch 'master' into fix_autodiff
renatomello Oct 8, 2024
acdc560
fix merge
renatomello Oct 8, 2024
0be1e8c
Merge branch 'master' into fix_autodiff
renatomello Oct 9, 2024
c01cd33
last corrections by andrea
Simone-Bordoni Oct 9, 2024
71bd989
Merge branch 'master' into fix_autodiff
renatomello Oct 9, 2024
3f22d8d
fix test
Simone-Bordoni Oct 9, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 0 additions & 3 deletions src/qibo/backends/numpy.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,6 @@ def matrix_fused(self, fgate):
def apply_gate(self, gate, state, nqubits):
state = self.np.reshape(state, nqubits * (2,))
matrix = gate.matrix(self)
print("matrix applied:", matrix)
if gate.is_controlled_by:
matrix = self.np.reshape(matrix, 2 * len(gate.target_qubits) * (2,))
ncontrol = len(gate.control_qubits)
Expand Down Expand Up @@ -437,10 +436,8 @@ def execute_circuit(self, circuit, initial_state=None, nshots=1000):
if initial_state is None:
state = self.zero_state(nqubits)

renatomello marked this conversation as resolved.
Show resolved Hide resolved
print("state before circuit execution:", state)
for gate in circuit.queue:
state = gate.apply(self, state, nqubits)
print("state during circuit execution:", state)

if circuit.has_unitary_channel:
# here we necessarily have `density_matrix=True`, otherwise
Expand Down
35 changes: 23 additions & 12 deletions src/qibo/backends/pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,23 +121,34 @@ def matrix_parametrized(self, gate):
name = gate.__class__.__name__
_matrix = getattr(self.matrices, name)
if name == "GeneralizedRBS":
for parameter in ["theta", "phi"]:
if not isinstance(gate.init_kwargs[parameter], self.np.Tensor):
Simone-Bordoni marked this conversation as resolved.
Show resolved Hide resolved
gate.init_kwargs[parameter] = self.cast_parameter(
gate.init_kwargs[parameter], trainable=gate.trainable
)
elif gate.init_kwargs[parameter].requires_grad == True:
Simone-Bordoni marked this conversation as resolved.
Show resolved Hide resolved
gate.trainable = True
else:
gate.trainable = False
_matrix = _matrix(
qubits_in=gate.init_args[0],
qubits_out=gate.init_args[1],
theta=self.cast_parameter(
gate.init_kwargs["theta"], trainable=gate.trainable
),
phi=self.cast_parameter(
gate.init_kwargs["phi"], trainable=gate.trainable
),
theta=gate.init_kwargs["theta"],
phi=gate.init_kwargs["phi"],
)
else:
parameters = (
self.cast_parameter(param, trainable=gate.trainable)
for param in gate.parameters
)
_matrix = _matrix(*parameters)
print("parameterized matrix:", _matrix)
if not isinstance(gate.parameters[0], self.np.Tensor):
parameters = tuple(
self.cast_parameter(param, trainable=gate.trainable)
for param in gate.parameters
)
gate.parameters = parameters
elif gate.parameters[0].requires_grad == True:
Simone-Bordoni marked this conversation as resolved.
Show resolved Hide resolved
gate.trainable = True
print("gate.parameters:", *gate.parameters)
else:
gate.trainable = False
_matrix = _matrix(*gate.parameters)
return _matrix

def cast_parameter(self, x, trainable):
Expand Down
12 changes: 6 additions & 6 deletions tests/test_torch_gradients.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,21 +9,21 @@
nepochs = 2
optimizer = torch.optim.Adam
target_state = torch.ones(2, dtype=torch.complex128) / 2.0
# Define circuit ansatz
params = torch.tensor(torch.rand(1, dtype=torch.float64), requires_grad=True)
print(params)
params = torch.rand(1, dtype=torch.float64, requires_grad=True)
print("Initial params", params)
c = models.Circuit(1)
c.add(gates.RX(0, params[0]))
gate = gates.RX(0, params)
c.add(gate)

optimizer = optimizer([params])
for _ in range(nepochs):
optimizer.zero_grad()
c.set_parameters(params)
final_state = c().state()
print("final state:", final_state)
fidelity = torch.abs(torch.sum(torch.conj(target_state) * final_state))
loss = 1 - fidelity
loss.backward()
print("loss:", loss)
print("params.grad:", params.grad)
optimizer.step()
print(params)
print("Final parameters:", params)
Loading