Skip to content

Commit

Permalink
[Feature] Apply AD to parametric observables in PSR routine
Browse files Browse the repository at this point in the history
  • Loading branch information
dominikandreasseitz committed Dec 13, 2023
1 parent c64ed02 commit 378cdd7
Show file tree
Hide file tree
Showing 2 changed files with 45 additions and 14 deletions.
39 changes: 26 additions & 13 deletions qadence/engines/torch/differentiable_expectation.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,15 @@
from qadence.blocks.utils import uuid_to_block, uuid_to_eigen
from qadence.circuit import QuantumCircuit
from qadence.extensions import get_gpsr_fns
from qadence.logger import get_logger
from qadence.measurements import Measurements
from qadence.mitigations import Mitigations
from qadence.ml_tools import promote_to_tensor
from qadence.noise import Noise
from qadence.types import DiffMode, Endianness

logger = get_logger(__name__)


class PSRExpectation(Function):
"""Overloads the PyTorch AD system to perform parameter shift rule on quantum circuits."""
Expand Down Expand Up @@ -190,7 +193,11 @@ def psr(self, psr_fn: Callable, **psr_args: int | float | None) -> Tensor:
if isinstance(self.observable, ConvertedObservable):
self.observable = [self.observable]
param_to_psr = self.construct_rules(
self.circuit.abstract, [o.abstract for o in self.observable], psr_fn, **psr_args
self.circuit,
[o for o in self.observable],
psr_fn,
bknd_expfn=self.backend.expectation,
**psr_args,
)

# Select the subset of all parameters for which PSR apply
Expand All @@ -202,14 +209,15 @@ def psr(self, psr_fn: Callable, **psr_args: int | float | None) -> Tensor:
# Make PSR construction a static method to avoid unhashability issues.
@staticmethod
def construct_rules(
circuit: QuantumCircuit,
observable: list[AbstractBlock],
circuit: ConvertedCircuit,
observable: list[ConvertedObservable],
psr_fn: Callable,
bknd_expfn: Callable,
**psr_args: int | float | None,
) -> dict[str, Callable]:
"""Create a mapping between parameters and PSR functions."""

uuid_to_eigs = uuid_to_eigen(circuit.block)
uuid_to_eigs = uuid_to_eigen(circuit.abstract.block)
# We currently rely on implicit ordering to match the PSR to the parameter,
# because we want to cache PSRs.

Expand All @@ -218,18 +226,23 @@ def construct_rules(
if eigenvalues is None:
raise ValueError(
f"Eigenvalues are not defined for param_id {param_id}\n"
# f"of type {type(block)}.\n"
"PSR cannot be defined in that case."
)

param_to_psr[param_id] = psr_fn(eigenvalues, **psr_args)

def ad_expectation(exp_fn: Callable, param_name: str, param_values: dict) -> Tensor:
expval = bknd_expfn(circuit, observable, param_values)
return torch.autograd.grad(
expval, param_values[param_name], torch.ones_like(expval), create_graph=True
)[0]

for obs in observable:
for param_id, _ in uuid_to_eigen(obs).items():
# We need the embedded fixed params of the observable in the param_values dict
# to be able to call expectation. Since torch backward requires
# a list of param_ids and values of equal length, we need to pass them to PSR too.
# Since they are constants their gradients are 0.
param_to_psr[param_id] = lambda x: torch.tensor([0.0], requires_grad=False)
for param_id, _ in uuid_to_block(obs.abstract).items():
# Trainable parameters in the observable can only be differentiated using AD.
param_to_psr[param_id] = lambda exp_fn, param_values, param_id: ad_expectation(
exp_fn=exp_fn, param_name=param_id, param_values=param_values
)
return param_to_psr


Expand Down Expand Up @@ -365,15 +378,15 @@ def circuit(self, circuit: QuantumCircuit) -> ConvertedCircuit:

def observable(self, observable: AbstractBlock, n_qubits: int) -> ConvertedObservable:
if observable is not None and observable.is_parametric:
raise ValueError("PSR cannot be applied to a parametric observable.")
logger.info("PSR cannot be applied to a parametric observable. Using AD.")
return self.backend.observable(observable, n_qubits)

def convert(
self,
circuit: QuantumCircuit,
observable: list[AbstractBlock] | AbstractBlock | None = None,
) -> Converted:
if self.diff_mode != DiffMode.AD and observable is not None:
if self.diff_mode == DiffMode.ADJOINT and observable is not None:
msg = (
f"Differentiation mode '{self.diff_mode}' does not support parametric observables."
)
Expand Down
20 changes: 19 additions & 1 deletion tests/backends/test_gpsr.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
import torch
from metrics import GPSR_ACCEPTANCE, PSR_ACCEPTANCE

from qadence import DiffMode, Parameter, QuantumCircuit
from qadence import DiffMode, Parameter, QuantumCircuit, backend_factory
from qadence.analog import add_background_hamiltonian
from qadence.backends.pyqtorch import Backend as PyQBackend
from qadence.blocks import add, chain
Expand Down Expand Up @@ -235,3 +235,21 @@ def test_expectation_psr(n_qubits: int, batch_size: int, n_obs: int, circuit_fn:
assert torch.allclose(
dexpval_xxtheta, dexpval_psr_xxtheta, atol=atol
), "d3f/dx2dtheta not equal."


def test_diff_parametric_obs() -> None:
obs = Parameter("phi") * Z(0)
block = RX(0, "theta")
phi_val = torch.rand(1, requires_grad=True)
grads = {}
for diff_mode in ["ad", "gpsr"]:
bknd = backend_factory("pyqtorch", diff_mode)
conv = bknd.convert(QuantumCircuit(1, block), obs)
expval = bknd.expectation(
conv.circuit,
conv.observable, # type: ignore[arg-type]
param_values=conv.embedding_fn(conv.params, {"phi": phi_val}),
)
dfdphi = torch.autograd.grad(expval, phi_val, torch.ones_like(expval), create_graph=True)[0]
grads[diff_mode] = dfdphi
assert torch.allclose(grads["ad"], grads["gpsr"])

0 comments on commit 378cdd7

Please sign in to comment.