Skip to content

Commit

Permalink
Fix executing time-dependent blocks with convenience functions
Browse files Browse the repository at this point in the history
  • Loading branch information
vytautas-a committed Jul 1, 2024
1 parent 4863f37 commit 90b97ae
Show file tree
Hide file tree
Showing 4 changed files with 23 additions and 23 deletions.
10 changes: 1 addition & 9 deletions qadence/backends/pyqtorch/convert_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@
from torch import dtype as torch_dtype
from torch.nn import Module

from qadence import stringify
from qadence.backends.utils import (
finitediff,
pyqify,
Expand All @@ -48,7 +47,7 @@
block_to_tensor,
)
from qadence.blocks.primitive import ProjectorBlock
from qadence.blocks.utils import parameters, uuid_to_expression
from qadence.blocks.utils import parameters
from qadence.operations import (
U,
multi_qubit_gateset,
Expand Down Expand Up @@ -316,13 +315,6 @@ def forward(
state: Tensor,
values: dict[str, Tensor],
) -> Tensor:
# convert values dict keys from uuids to expression strings if needed
uuid_dict = uuid_to_expression(self.block.generator) # type: ignore [arg-type]
if list(uuid_dict.keys())[0] in values:
orig_param_values = values["orig_param_values"]
values = {stringify(v): values[k] for k, v in uuid_dict.items()}
values["orig_param_values"] = orig_param_values

if getattr(self.block.generator, "is_time_dependent", False): # type: ignore [union-attr]

def Ht(t: Tensor | float) -> Tensor:
Expand Down
1 change: 0 additions & 1 deletion qadence/blocks/embedding.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,6 @@ def embedding_fn(params: ParamDictType, inputs: ParamDictType) -> ParamDictType:
gate_lvl_params: ParamDictType = {}
for uuid, e in uuid_to_expr.items():
gate_lvl_params[uuid] = embedded_params[e]
gate_lvl_params.update({"orig_param_values": inputs})
return gate_lvl_params
else:
out = {stringify(k): v for k, v in embedded_params.items()}
Expand Down
14 changes: 11 additions & 3 deletions qadence/execution.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,10 @@ def _(
endianness: Endianness = Endianness.BIG,
configuration: Union[BackendConfiguration, dict, None] = None,
) -> Tensor:
bknd = backend_factory(backend, configuration=configuration)
diff_mode = None
if backend == BackendName.PYQTORCH:
diff_mode = DiffMode.AD
bknd = backend_factory(backend, diff_mode=diff_mode, configuration=configuration)
conv = bknd.convert(circuit)
with no_grad():
return bknd.run(
Expand Down Expand Up @@ -147,7 +150,10 @@ def _(
endianness: Endianness = Endianness.BIG,
configuration: Union[BackendConfiguration, dict, None] = None,
) -> list[Counter]:
bknd = backend_factory(backend, configuration=configuration)
diff_mode = None
if backend == BackendName.PYQTORCH:
diff_mode = DiffMode.AD
bknd = backend_factory(backend, diff_mode=diff_mode, configuration=configuration)
conv = bknd.convert(circuit)
return bknd.sample(
circuit=conv.circuit,
Expand Down Expand Up @@ -242,7 +248,9 @@ def _(
configuration: Union[BackendConfiguration, dict, None] = None,
) -> Tensor:
observable = observable if isinstance(observable, list) else [observable]
bknd = backend_factory(backend, configuration=configuration, diff_mode=diff_mode)
if backend == BackendName.PYQTORCH:
diff_mode = DiffMode.AD
bknd = backend_factory(backend, diff_mode=diff_mode, configuration=configuration)
conv = bknd.convert(circuit, observable)

def _expectation() -> Tensor:
Expand Down
21 changes: 11 additions & 10 deletions tests/backends/pyq/test_time_dependent_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,7 @@
from metrics import MIDDLE_ACCEPTANCE
from pyqtorch.utils import SolverType

from qadence import (
AbstractBlock,
HamEvo,
QuantumCircuit,
QuantumModel,
Register,
)
from qadence import AbstractBlock, HamEvo, QuantumCircuit, QuantumModel, Register, run


@pytest.mark.parametrize("ode_solver", [SolverType.DP5_SE, SolverType.KRYLOV_SE])
Expand All @@ -29,19 +23,26 @@ def test_time_dependent_generator(
duration = 1.0
n_steps = 500

# simulate with qadence HamEvo
# simulate with qadence HamEvo usin QuantumModel
hamevo = HamEvo(qadence_generator, 0.0, duration=duration)
reg = Register(2)
circ = QuantumCircuit(reg, hamevo)
model = QuantumModel(circ, configuration={"ode_solver": ode_solver, "n_steps_hevo": n_steps})
state_qadence = model.run(
state_qadence0 = model.run(
values={"x": torch.tensor(feature_param_x), "y": torch.tensor(feature_param_y)}
)

state_qadence1 = run(
hamevo,
values={"x": torch.tensor(feature_param_x), "y": torch.tensor(feature_param_y)},
configuration={"ode_solver": ode_solver, "n_steps_hevo": n_steps},
)

# simulate with qutip
t_points = np.linspace(0, duration, n_steps)
psi_0 = qutip.basis(4, 0)
result = qutip.sesolve(qutip_generator, psi_0, t_points)
state_qutip = torch.tensor(result.states[-1].full().T)

assert torch.allclose(state_qadence, state_qutip, atol=MIDDLE_ACCEPTANCE)
assert torch.allclose(state_qadence0, state_qutip, atol=MIDDLE_ACCEPTANCE)
assert torch.allclose(state_qadence1, state_qutip, atol=MIDDLE_ACCEPTANCE)

0 comments on commit 90b97ae

Please sign in to comment.