diff --git a/src/qibo/models/circuit.py b/src/qibo/models/circuit.py index a7c264e1ac..35d18100a6 100644 --- a/src/qibo/models/circuit.py +++ b/src/qibo/models/circuit.py @@ -1177,10 +1177,7 @@ def to_qasm(self): qubits = ",".join(f"q[{i}]" for i in gate.qubits) if isinstance(gate, gates.ParametrizedGate): - if any(x.__class__.__name__ == "Tensor" for x in gate.parameters): - params = (str(x.detach().item()) for x in gate.parameters) - else: - params = (str(x) for x in gate.parameters) + params = (str(float(x)) for x in gate.parameters) name = f"{gate.qasm_label}({', '.join(params)})" else: name = gate.qasm_label diff --git a/src/qibo/models/error_mitigation.py b/src/qibo/models/error_mitigation.py index 336f1f5acb..60a7da94af 100644 --- a/src/qibo/models/error_mitigation.py +++ b/src/qibo/models/error_mitigation.py @@ -330,6 +330,7 @@ def _curve_fit( if backend.name == "pytorch": # pytorch has some problems with the `scipy.optim.curve_fit` function # thus we use a `torch.optim` optimizer + params.requires_grad = True loss = lambda pred, target: backend.np.mean((pred - target) ** 2) optimizer = backend.np.optim.LBFGS( [params], lr=lr, max_iter=max_iter, tolerance_grad=tolerance_grad @@ -431,8 +432,6 @@ def CDR( len(signature(model).parameters) - 1 ) # first arg is the input and the *params afterwards params = backend.cast(local_state.random(nparams), backend.precision) - if backend.name == "pytorch": - params.requires_grad = True optimal_params = _curve_fit( backend, model, @@ -554,8 +553,6 @@ def vnCDR( -1, len(noise_levels) ) params = backend.cast(local_state.random(len(noise_levels)), backend.precision) - if backend.name == "pytorch": - params.requires_grad = True optimal_params = _curve_fit( backend, model,