diff --git a/.github/workflows/build_docs.yml b/.github/workflows/build_docs.yml new file mode 100644 index 000000000..bb9e693de --- /dev/null +++ b/.github/workflows/build_docs.yml @@ -0,0 +1,56 @@ +name: Build documentation + +on: + push: + branches: + - main + pull_request: + branches: + - main + workflow_dispatch: {} + +concurrency: + group: docs-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + test_qadence_ubuntu: + name: Test Qadence docs (ubuntu) + runs-on: ubuntu-22.04 + steps: + + - uses: actions/checkout@v4 + + - name: Install JetBrains Mono font + run: | + sudo apt install -y wget unzip fontconfig + wget https://download.jetbrains.com/fonts/JetBrainsMono-2.304.zip + unzip JetBrainsMono-2.304.zip -d JetBrainsMono + mkdir -p /usr/share/fonts/truetype/jetbrains + cp JetBrainsMono/fonts/ttf/*.ttf /usr/share/fonts/truetype/jetbrains/ + fc-cache -f -v + + - name: Install graphviz + run: sudo apt-get install -y graphviz + + - name: Set up Python 3.10 + uses: actions/setup-python@v4 + with: + python-version: '3.10' + + - name: Install Hatch + run: | + pip install hatch + + - name: Build docs + if: ${{ !startsWith(github.ref, 'ref/tags/v') }} + run: | + hatch -v run docs:build + + - name: Deploy docs + if: startsWith(github.ref, 'ref/tags/v') + run: | + echo "VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV + git config user.name "GitHub Actions" + git config user.email "actions@github.com" + mike deploy --push --update-aliases $VERSION latest diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index a709539df..9d15a2ec9 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -1,6 +1,9 @@ -name: Lint and type check Qadence. +name: linting and type check on: + push: + branches: + - main pull_request: {} workflow_dispatch: {} @@ -10,7 +13,8 @@ jobs: runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@v4 + - name: Checkout Qadence + uses: actions/checkout@v4 - name: Set up Python 3.x uses: actions/setup-python@v4 with: diff --git a/.github/workflows/test_all.yml b/.github/workflows/test_all.yml index 8feec9215..3bcb04534 100644 --- a/.github/workflows/test_all.yml +++ b/.github/workflows/test_all.yml @@ -14,12 +14,15 @@ jobs: test_qadence_ubuntu: name: Test Qadence (ubuntu) runs-on: ubuntu-22.04 + strategy: + matrix: + python-version: ["3.9", "3.10"] steps: - uses: actions/checkout@v4 - - name: Select Python 3.10 + - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v4 with: - python-version: '3.10' + python-version: ${{ matrix.python-version }} - name: Install Hatch run: | pip install hatch diff --git a/.github/workflows/test_docs.yml b/.github/workflows/test_docs.yml deleted file mode 100644 index d131ade9e..000000000 --- a/.github/workflows/test_docs.yml +++ /dev/null @@ -1,33 +0,0 @@ -name: Run Qadence docs tests. - -on: - push: - branches: - - main - pull_request: - branches: - - main - paths: - - docs - workflow_dispatch: {} - -concurrency: - group: ${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -jobs: - test_qadence_ubuntu: - name: Test Qadence (ubuntu) - runs-on: ubuntu-22.04 - steps: - - uses: actions/checkout@v4 - - name: Select Python 3.10 - uses: actions/setup-python@v4 - with: - python-version: '3.10' - - name: Install Hatch - run: | - pip install hatch - - name: Run doc tests - run: | - hatch -e docs run test-docs diff --git a/.github/workflows/test_examples.yml b/.github/workflows/test_examples_notebooks.yml similarity index 73% rename from .github/workflows/test_examples.yml rename to .github/workflows/test_examples_notebooks.yml index cdb4eae7d..51c74eff9 100644 --- a/.github/workflows/test_examples.yml +++ b/.github/workflows/test_examples_notebooks.yml @@ -1,4 +1,4 @@ -name: Run Qadence example tests. +name: Run Qadence example and notebook tests. on: push: @@ -9,8 +9,10 @@ on: - main paths: - examples - paths-ignore: - - examples/notebooks + - notebooks + schedule: + # 03:00 every Saturday morning + - cron: '0 3 * * 6' workflow_dispatch: {} concurrency: @@ -33,3 +35,6 @@ jobs: - name: Run example tests run: | hatch -v run test-examples + - name: Run notebooks tests + run: | + hatch -v run test-notebooks diff --git a/.github/workflows/test_fast.yml b/.github/workflows/test_fast.yml index 5eb5bcea3..603a0ba0f 100644 --- a/.github/workflows/test_fast.yml +++ b/.github/workflows/test_fast.yml @@ -1,4 +1,4 @@ -name: Run Qadence fast tests. +name: fast tests on: push: @@ -14,18 +14,27 @@ concurrency: cancel-in-progress: true jobs: - test_qadence_ubuntu: + test_qadence_ubuntu_310: name: Test Qadence (ubuntu) - runs-on: ubuntu-22.04 + runs-on: ubuntu-latest + container: + image: pasqalofficial/qadence_fast_310:latest steps: - - uses: actions/checkout@v4 - - name: Select Python 3.10 - uses: actions/setup-python@v4 + - name: Checkout Qadence + uses: actions/checkout@v3 + - name: Set up Python 3.10 + uses: actions/setup-python@v3 with: - python-version: '3.10' + python-version: "3.10" - name: Install Hatch run: | pip install hatch - name: Run fast tests run: | hatch -v run test -m "not slow" + - name: Upload coverage data + uses: actions/upload-artifact@v3 + with: + name: "coverage-data" + path: .coverage.* + if-no-files-found: ignore diff --git a/.github/workflows/test_notebooks.yml b/.github/workflows/test_notebooks.yml deleted file mode 100644 index a7b11190d..000000000 --- a/.github/workflows/test_notebooks.yml +++ /dev/null @@ -1,33 +0,0 @@ -name: Run Qadence notebooks tests. - -on: - push: - branches: - - main - pull_request: - branches: - - main - paths: - - examples/notebooks - workflow_dispatch: {} - -concurrency: - group: ${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -jobs: - test_qadence_ubuntu: - name: Test Qadence (ubuntu) - runs-on: ubuntu-22.04 - steps: - - uses: actions/checkout@v4 - - name: Select Python 3.10 - uses: actions/setup-python@v4 - with: - python-version: '3.10' - - name: Install Hatch - run: | - pip install hatch - - name: Run notebooks tests - run: | - hatch -v run test-notebooks diff --git a/.gitignore b/.gitignore index 376db79f3..9d6144216 100644 --- a/.gitignore +++ b/.gitignore @@ -49,12 +49,28 @@ runs/ # Mkdocs site/ -# converted notebooks -docs/**/*.py - # pt files *.pt # event files events.out.tfevents.* /examples/notebooks/onboarding_sandbox.ipynb + +# latex +*.aux +*.lof +*.log +*.lot +*.fls +*.out +*.toc +*.fmt +*.fot +*.cb +*.cb2 +*.lb +*.pdf +*.ps +*.dvi + +*.gv diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 52afd857a..9feb2c2ea 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -9,13 +9,13 @@ repos: args: ['--maxkb=600'] - repo: https://github.com/astral-sh/ruff-pre-commit - rev: "v0.0.288" + rev: "v0.0.291" hooks: - id: ruff - args: [--fix, --show-fixes, --show-source] + args: [--fix, --show-fixes, --show-source, --exclude, examples/draw.py] - repo: https://github.com/ambv/black - rev: 23.7.0 + rev: 23.9.1 hooks: - id: black diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..edf83de80 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,33 @@ +# Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +- Using welcoming and inclusive language +- Being respectful of differing viewpoints and experiences +- Gracefully accepting constructive criticism +- Focusing on what is best for the community +- Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +- The use of sexualized language or imagery and unwelcome sexual attention or advances +- Trolling, insulting/derogatory comments, and personal or political attacks +- Public or private harassment +- Publishing others' private information, such as a physical or electronic address, without explicit permission +- Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..f02ad471d --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,90 @@ +# How to contribute + +We're grateful for your interest in participating in Qadence. Please follow our guidelines to ensure a smooth contribution process. + +## Reporting an issue or proposing a feature + +Your course of action will depend on your objective, but generally, you should start by creating an issue. If you've discovered a bug or have a feature you'd like to see added to **qadence**, feel free to create an issue on [qadence's GitHub issue tracker](https://github.com/pasqal-io/qadence/issues). Here are some steps to take: + +1. Quickly search the existing issues using relevant keywords to ensure your issue hasn't been addressed already. +2. If your issue is not listed, create a new one. Try to be as detailed and clear as possible in your description. + +- If you're merely suggesting an improvement or reporting a bug, that's already excellent! We thank you for it. Your issue will be listed and, hopefully, addressed at some point. +- However, if you're willing to be the one solving the issue, that would be even better! In such instances, you would proceed by preparing a [Pull Request](#submitting-a-pull-request). + +## Submitting a pull request + +We're excited that you're eager to contribute to Qadence. To contribute, fork the `main` branch of qadence repository and once you are satisfied with your feature and all the tests pass create a [Pull Request](https://github.com/pasqal-io/qadence/pulls). + +Here's the process for making a contribution: + +Click the "Fork" button at the upper right corner of the [repo page](https://github.com/pasqal-io/qadence) to create a new GitHub repo at `https://github.com/USERNAME/qadence`, where `USERNAME` is your GitHub ID. Then, `cd` into the directory where you want to place your new fork and clone it: + +```shell +git clone https://github.com/USERNAME/qadence.git +``` + +Next, navigate to your new qadence fork directory and mark the main qadence repository as the `upstream`: + +```shell +git remote add upstream https://github.com/pasqal-io/qadence.git +``` + +## Setting up your development environment + +We recommended to use `hatch` for managing environments: + +To develop within qadence, use: +```shell +pip install hatch +hatch -v shell +``` + +To run qadence tests, use: + +```shell +hatch -e tests run test +``` + +If you don't want to use `hatch`, you can use the environment manager of your +choice (e.g. Conda) and execute the following: + +```shell +pip install pytest +pip install -e . +pytest +``` + +### Useful things for your workflow: linting and testing + +Use `pre-commit` to lint your code and run the unit tests before pushing a new commit. + +Using `hatch`, it's simply: + +```shell +hatch -e tests run pre-commit run --all-files +hatch -e tests run test +``` + +Our CI/CD pipeline will also test if the documentation can be built correctly. To test it locally, please run: + +```shell +hatch -e docs run mkdocs build --clean --strict +``` + +Without `hatch`, `pip` install those libraries first: +"mkdocs", +"mkdocs-material", +"mkdocstrings", +"mkdocstrings-python", +"mkdocs-section-index", +"mkdocs-jupyter", +"mkdocs-exclude", +"markdown-exec" + + +And then: + +```shell + mkdocs build --clean --strict +``` diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 000000000..65d2954a4 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,2 @@ +include qadence/gpsr/dfdx_expressions.pkl +include qadence/gpsr/dfdx_variance_expressions.pkl diff --git a/README.md b/README.md new file mode 100644 index 000000000..0cad241e6 --- /dev/null +++ b/README.md @@ -0,0 +1,74 @@ +# Qadence + +**Qadence** is a Python package that provides a simple interface to build _**digital-analog quantum +programs**_ with tunable qubit interaction defined on _**arbitrary register topologies**_ realizable on neutral atom devices. + +[![pre-commit](https://github.com/pasqal-io/qadence/actions/workflows/lint.yml/badge.svg)](https://github.com/pasqal-io/qadence/actions/workflows/lint.yml) +[![tests](https://github.com/pasqal-io/qadence/actions/workflows/test_fast.yml/badge.svg)](https://github.com/pasqal-io/qadence/actions/workflows/test_fast.yml) +[![Build documentation](https://github.com/pasqal-io/qadence/actions/workflows/build_docs.yml/badge.svg)](https://pasqal-io.github.io/qadence/latest) + +Documentation can be found [here](https://pasqal-io.github.io/qadence/latest). + +## Feature highlights + +* A [block-based system](tutorials/getting_started.md) for composing _**complex digital-analog + programs**_ in a flexible and scalable manner, inspired by the Julia quantum SDK + [Yao.jl](https://github.com/QuantumBFS/Yao.jl) and functional programming concepts. + +* A [simple interface](digital_analog_qc/analog-basics.md) to work with _**interacting neutral-atom qubit systems**_ + using [arbitrary registers topologies](tutorials/register.md). + +* An intuitive [expression-based system](tutorials/parameters.md) developed on top of the symbolic library [Sympy](https://www.sympy.org/en/index.html) to construct _**parametric quantum programs**_ easily. + +* [High-order generalized parameter shift rules](link to psr tutorial) for _**differentiating parametrized quantum operations**_. + +* Out-of-the-box _**automatic differentiability**_ of quantum programs with [PyTorch](https://pytorch.org/) integration. + +* _**Efficient execution**_ on a variety of different purpose backends: from state vector simulators to tensor network emulators and real devices. + +## Installation guide + +Qadence can be installed from PyPI with `pip` as follows: + +```bash +pip install qadence +``` + +The default backend for Qadence is [PyQTorch](https://github.com/pasqal-io/pyqtorch), a differentiable state vector simulator for digital-analog simulation. It is possible to install additional backends and the circuit visualization library using the following extras: + +* `braket`: the [Braket](https://github.com/amazon-braket/amazon-braket-sdk-python) backend. +* `pulser`: the [Pulser](https://github.com/pasqal-io/Pulser) backend for composing, simulating and executing pulse sequences for neutral-atom quantum devices. +* `visualization`: to display diagrammatically quantum circuits. + +by running: + +```bash +pip install qadence[braket, pulser, visualization] +``` + +!!! warning + In order to correctly install the `visualization` extra, the `graphviz` package needs to be installed + in your system: + + ```bash + # on Ubuntu + sudo apt install graphviz + + # on MacOS + brew install graphviz + + # via conda + conda install python-graphviz + ``` + +## Citation + +If you use Qadence for a publication, we kindly ask you to cite our work using the following BibTex entry: + +``` +@misc{qadence2023pasqal, + url = {https://github.com/pasqal-io/qadence}, + title = {Qadence: {A} {D}igital-analog quantum programming interface.}, + year = {2023} +} +``` diff --git a/docs/advanced_tutorials/custom-models.md b/docs/advanced_tutorials/custom-models.md new file mode 100644 index 000000000..f27e13158 --- /dev/null +++ b/docs/advanced_tutorials/custom-models.md @@ -0,0 +1,137 @@ +In `qadence`, the `QuantumModel` is the central class point for executing +`QuantumCircuit`s. The idea of a `QuantumModel` is to decouple the backend +execution from the management of circuit parameters and desired quantum +computation output. + +In the following, we create a custom `QuantumModel` instance which introduces +some additional optimizable parameters: +* an adjustable scaling factor in front of the observable to measured +* adjustable scale and shift factors to be applied to the model output before returning the result + +This can be easily done using PyTorch flexible model definition, and it will +automatically work with the rest of `qadence` infrastructure. + + +```python exec="on" source="material-block" session="custom-model" +import torch +from qadence import QuantumModel, QuantumCircuit + + +class CustomQuantumModel(QuantumModel): + + def __init__(self, circuit: QuantumCircuit, observable, backend="pyqtorch", diff_mode="ad"): + super().__init__(circuit, observable=observable, backend=backend, diff_mode=diff_mode) + + self.n_qubits = circuit.n_qubits + + # define some additional parameters which will scale and shift (variationally) the + # output of the QuantumModel + # you can use all torch machinery for building those + self.scale_out = torch.nn.Parameter(torch.ones(1)) + self.shift_out = torch.nn.Parameter(torch.ones(1)) + + # override the forward pass of the model + # the forward pass is the output of your QuantumModel and in this case + # it's the (scaled) expectation value of the total magnetization with + # a variable coefficient in front + def forward(self, values: dict[str, torch.Tensor]) -> torch.Tensor: + + # scale the observable + res = self.expectation(values) + + # scale and shift the result before returning + return self.shift_out + res * self.scale_out +``` + +The custom model can be used like any other `QuantumModel`: +```python exec="on" source="material-block" result="json" session="custom-model" +from qadence import Parameter, RX, CNOT, QuantumCircuit +from qadence import chain, kron, hamiltonian_factory, Z +from sympy import acos + +def quantum_circuit(n_qubits): + + x = Parameter("x", trainable=False) + fm = kron(RX(i, acos(x) * (i+1)) for i in range(n_qubits)) + + ansatz = kron(RX(i, f"theta{i}") for i in range(n_qubits)) + ansatz = chain(ansatz, CNOT(0, n_qubits-1)) + + block = chain(fm, ansatz) + block.tag = "circuit" + return QuantumCircuit(n_qubits, block) + +n_qubits = 4 +batch_size = 10 +circuit = quantum_circuit(n_qubits) +observable = hamiltonian_factory(n_qubits, detuning=Z) # Total magnetization + +model = CustomQuantumModel(circuit, observable, backend="pyqtorch") + +values = {"x": torch.rand(batch_size)} +res = model(values) +print("Model output: ", res) +assert len(res) == batch_size +``` + + +## Quantum model with wavefunction overlaps + +`QuantumModel`'s can also use different quantum operations in their forward +pass, such as wavefunction overlaps described [here](../tutorials/overlap.md). Beware that the resulting overlap tensor +has to be differentiable to apply gradient-based optimization. This is only applicable to the `"EXACT"` overlap method. + +Here we show how to use overlap calculation when fitting a parameterized quantum circuit to act as a standard Hadamard gate. + +```python exec="on" source="material-block" result="json" session="custom-model" +from qadence import RY, RX, H, Overlap + +# create a quantum model which acts as an Hadamard gate after training +class LearnHadamard(QuantumModel): + def __init__( + self, + train_circuit: QuantumCircuit, + target_circuit: QuantumCircuit, + backend="pyqtorch", + ): + super().__init__(circuit=train_circuit, backend=backend) + self.overlap_fn = Overlap(train_circuit, target_circuit, backend=backend, method="exact", diff_mode='ad') + + def forward(self): + return self.overlap_fn() + + # compute the wavefunction of the associated train circuit + def wavefunction(self): + return model.overlap_fn.run({}) + + +train_circuit = QuantumCircuit(1, chain(RX(0, "phi"), RY(0, "theta"))) +target_circuit = QuantumCircuit(1, H(0)) + +model = LearnHadamard(train_circuit, target_circuit) + +# get the overlap between model and target circuit wavefunctions +print(model()) +``` + +This model can then be trained with the standard Qadence helper functions. + +```python exec="on" source="material-block" result="json" session="custom-model" +from qadence import run +from qadence.ml_tools import train_with_grad, TrainConfig + +criterion = torch.nn.MSELoss() +optimizer = torch.optim.Adam(model.parameters(), lr=1e-1) + +def loss_fn(model: LearnHadamard, _unused) -> tuple[torch.Tensor, dict]: + loss = criterion(torch.tensor([[1.0]]), model()) + return loss, {} + +config = TrainConfig(max_iter=2500) +model, optimizer = train_with_grad( + model, None, optimizer, config, loss_fn=loss_fn +) + +wf_target = run(target_circuit) +assert torch.allclose(wf_target, model.wavefunction(), atol=1e-2) +``` diff --git a/docs/advanced_tutorials/differentiability.md b/docs/advanced_tutorials/differentiability.md new file mode 100644 index 000000000..bb6034fe6 --- /dev/null +++ b/docs/advanced_tutorials/differentiability.md @@ -0,0 +1,167 @@ +# Differentiability + +Many application in quantum computing and quantum machine learning more specifically requires the differentiation +of a quantum circuit with respect to its parameters. + +In Qadence, we perform quantum computations via the `QuantumModel` interface. The derivative of the outputs of quantum +models with respect to feature and variational parameters in the quantum circuit can be implemented in Qadence +with two different modes: + +- Automatic differentiation (AD) mode [^1]. This mode allows to differentiation both +`run()` and `expectation()` methods of the `QuantumModel` and it is the fastest +available differentiation method. Under the hood, it is based on the PyTorch autograd engine wrapped by +the `DifferentiableBackend` class. This mode is not working on quantum devices. +- Generalized parameter shift rule (GPSR) mode. This is general implementation of the well known parameter + shift rule algorithm [^2] which works for arbitrary quantum operations [^3]. This mode is only applicable to + the `expectation()` method of `QuantumModel` but it is compatible with execution or quantum devices. + +## Automatic differentiation + +Automatic differentiation [^1] is a procedure to derive a complex function defined as a sequence of elementary +mathematical operations in +the form of a computer program. Automatic differentiation is a cornerstone of modern machine learning and a crucial +ingredient of its recent successes. In its so-called *reverse mode*, it follows this sequence of operations in reverse order by systematically applying the chain rule to recover the exact value of derivative. Reverse mode automatic differentiation +is implemented in Qadence leveraging the PyTorch `autograd` engine. + +!!! warning "Only available with PyQTorch backend" + Currently, automatic differentiation mode is only + available when the `pyqtorch` backend is selected. + +## Generalized parameter shift rule + +The generalized parameter shift rule implementation in Qadence was introduced in [^3]. Here the standard parameter shift rules, +which only works for quantum operations whose generator has a single gap in its eigenvalue spectrum, was generalized +to work with arbitrary generators of quantum operations. + +For this, we define the differentiable function as quantum expectation value + +$$ +f(x) = \left\langle 0\right|\hat{U}^{\dagger}(x)\hat{C}\hat{U}(x)\left|0\right\rangle +$$ + +where $\hat{U}(x)={\rm exp}{\left( -i\frac{x}{2}\hat{G}\right)}$ is the quantum evolution operator with generator $\hat{G}$ representing the structure of the underlying quantum circuit and $\hat{C}$ is the cost operator. Then using the eigenvalue spectrum $\left\{ \lambda_n\right\}$ of the generator $\hat{G}$ we calculate the full set of corresponding unique non-zero spectral gaps $\left\{ \Delta_s\right\}$ (differences between eigenvalues). It can be shown that the final expression of derivative of $f(x)$ is then given by the following expression: + +$\begin{equation} +\frac{{\rm d}f\left(x\right)}{{\rm d}x}=\overset{S}{\underset{s=1}{\sum}}\Delta_{s}R_{s}, +\end{equation}$ + +where $S$ is the number of unique non-zero spectral gaps and $R_s$ are real quantities that are solutions of a system of linear equations + +$\begin{equation} +\begin{cases} +F_{1} & =4\overset{S}{\underset{s=1}{\sum}}{\rm sin}\left(\frac{\delta_{1}\Delta_{s}}{2}\right)R_{s},\\ +F_{2} & =4\overset{S}{\underset{s=1}{\sum}}{\rm sin}\left(\frac{\delta_{2}\Delta_{s}}{2}\right)R_{s},\\ + & ...\\ +F_{S} & =4\overset{S}{\underset{s=1}{\sum}}{\rm sin}\left(\frac{\delta_{M}\Delta_{s}}{2}\right)R_{s}. +\end{cases} +\end{equation}$ + +Here $F_s=f(x+\delta_s)-f(x-\delta_s)$ denotes the difference between values of functions evaluated at shifted arguments $x\pm\delta_s$. + +## Usage + +### Basics + +In Qadence, the GPSR differentiation engine can be selected by passing `diff_mode="gpsr"` or, equivalently, `diff_mode=DiffMode.GPSR` to a `QuantumModel` instance. The code in the box below shows how to create `QuantumModel` instances with both AD and GPSR engines. + +```python exec="on" source="material-block" session="differentiability" +from qadence import (FeatureParameter, HamEvo, X, I, Z, + hamiltonian_factory, QuantumCircuit, + QuantumModel, BackendName, DiffMode) +import torch + +n_qubits = 2 + +# define differentiation parameter +x = FeatureParameter("x") + +# define generator and HamEvo block +generator = X(0) + X(1) + 0.2 * (Z(0) + I(1)) * (I(0) + Z(1)) +block = HamEvo(generator, x) + +# create quantum circuit +circuit = QuantumCircuit(n_qubits, block) + +# create total magnetization cost operator +obs = hamiltonian_factory(n_qubits, detuning=Z) + +# create models with AD and GPSR differentiation engines +model_ad = QuantumModel(circuit, obs, + backend=BackendName.PYQTORCH, + diff_mode=DiffMode.AD) +model_gpsr = QuantumModel(circuit, obs, + backend=BackendName.PYQTORCH, + diff_mode=DiffMode.GPSR) + +# generate value for circuit's parameter +xs = torch.linspace(0, 2*torch.pi, 100, requires_grad=True) +values = {"x": xs} + +# calculate function f(x) +exp_val_ad = model_ad.expectation(values) +exp_val_gpsr = model_gpsr.expectation(values) + +# calculate derivative df/dx using the PyTorch +# autograd engine +dexpval_x_ad = torch.autograd.grad( + exp_val_ad, values["x"], torch.ones_like(exp_val_ad), create_graph=True +)[0] +dexpval_x_gpsr = torch.autograd.grad( + exp_val_gpsr, values["x"], torch.ones_like(exp_val_gpsr), create_graph=True +)[0] +``` + +We can plot the resulting derivatives and see that in both cases they coincide. + +```python exec="on" source="material-block" session="differentiability" +import matplotlib.pyplot as plt + +# plot f(x) and df/dx derivatives calculated using AD and GPSR +# differentiation engines +fig, ax = plt.subplots() +ax.scatter(xs.detach().numpy(), + exp_val_ad.detach().numpy(), + label="f(x)") +ax.scatter(xs.detach().numpy(), + dexpval_x_ad.detach().numpy(), + label="df/dx AD") +ax.scatter(xs.detach().numpy(), + dexpval_x_gpsr.detach().numpy(), + s=5, + label="df/dx GPSR") +plt.legend() +from docs import docsutils # markdown-exec: hide +print(docsutils.fig_to_html(plt.gcf())) # markdown-exec: hide +``` + +### Low-level control on the shift values + +In order to get a finer control over the GPSR differentiation engine we can use the low-level Qadence API to define a `DifferentiableBackend`. + +```python exec="on" source="material-block" session="differentiability" +from qadence import DifferentiableBackend +from qadence.backends.pyqtorch import Backend as PyQBackend + +# define differentiable quantum backend +quantum_backend = PyQBackend() +conv = quantum_backend.convert(circuit, obs) +pyq_circ, pyq_obs, embedding_fn, params = conv +diff_backend = DifferentiableBackend(quantum_backend, diff_mode=DiffMode.GPSR, shift_prefac=0.2) + +# calculate function f(x) +expval = diff_backend.expectation(pyq_circ, pyq_obs, embedding_fn(params, values)) +``` + +Here we passed an additional argument `shift_prefac` to the `DifferentiableBackend` instance that governs the magnitude of shifts $\delta\equiv\alpha\delta^\prime$ shown in equation (2) above. In this relation $\delta^\prime$ is set internally and $\alpha$ is the value passed by `shift_prefac` and the resulting shift value $\delta$ is then used in all the following GPSR calculations. + +Tuning parameter $\alpha$ is useful to improve results +when the generator $\hat{G}$ or the quantum operation is a dense matrix, for example a complex `HamEvo` operation; if many entries of this matrix are sufficiently larger than 0 the operation is equivalent to a strongly interacting system. In such case parameter $\alpha$ should be gradually lowered in order to achieve exact derivative values. + + +## References + +[^1]: [A. G. Baydin et al., Automatic Differentiation in Machine Learning: a Survey](https://www.jmlr.org/papers/volume18/17-468/17-468.pdf) + +[^2]: [Schuld et al., Evaluating analytic gradients on quantum hardware (2018).](https://arxiv.org/abs/1811.11184) + +[^3]: [Kyriienko et al., General quantum circuit differentiation rules](https://arxiv.org/abs/2108.01218) diff --git a/docs/advanced_tutorials/vqe.md b/docs/advanced_tutorials/vqe.md new file mode 100644 index 000000000..66b8e40f5 --- /dev/null +++ b/docs/advanced_tutorials/vqe.md @@ -0,0 +1,153 @@ +## Restricted Hamiltonian + +Simple implementation of the UCC ansatz for computing the ground state of the H2 +molecule. The Hamiltonian coefficients are taken from the following paper: +https://arxiv.org/pdf/1512.06860.pdf. + +Simple 2 qubits unitary coupled cluster ansatz for H2 molecule +```python exec="on" source="material-block" html="1" session="vqe" +import torch +from qadence import X, RX, RY, RZ, CNOT, chain, kron + +def UCC_ansatz_H2(): + ansatz=chain( + kron(chain(X(0), RX(0, -torch.pi/2)), RY(1, torch.pi/2)), + CNOT(1,0), + RZ(0, f"theta"), + CNOT(1,0), + kron(RX(0, torch.pi/2), RY(1, -torch.pi/2)) + ) + return ansatz + + +from qadence.draw import html_string # markdown-exec: hide +print(html_string(UCC_ansatz_H2())) # markdown-exec: hide +``` + + +Let's define the Hamiltonian of the problem in the following form: hamilt = +[list of coefficients, list of Pauli operators, list of qubits]. For example: +`hamilt=[[3,4],[[X,X],[Y]],[[0,1],[3]]]`. + +In the following function we generate the Hamiltonian with the format above. + +```python exec="on" source="material-block" html="1" session="vqe" +from typing import Iterable +from qadence import X, Y, Z, I, add +def make_hamiltonian(hamilt: Iterable, nqubits: int): + + nb_terms = len(hamilt[0]) + blocks = [] + + for iter in range(nb_terms): + block = kron(gate(qubit) for gate,qubit in zip(hamilt[1][iter], hamilt[2][iter])) + blocks.append(hamilt[0][iter] * block) + + return add(*blocks) + + +nqbits = 2 + +# Hamiltonian definition using the convention outlined above +hamilt_R07 = [ + [0.2976, 0.3593, -0.4826,0.5818, 0.0896, 0.0896], + [[I,I],[Z],[Z],[Z,Z],[X,X],[Y,Y]], + [[0,1],[0],[1],[0,1],[0,1],[0,1]] +] + +hamiltonian = make_hamiltonian(hamilt_R07, nqbits) +from qadence.draw import html_string # markdown-exec: hide +print(html_string(hamiltonian)) # markdown-exec: hide +``` + +Let's now create a `QuantumCircuit` representing the variational ansatz and plug +it into a `QuantumModel` instance. From there, it is very easy to compute the +energy by simply evaluating the expectation value of the Hamiltonian operator. + +```python exec="on" source="material-block" result="json" session="vqe" +from qadence import QuantumCircuit, QuantumModel + +ansatz = QuantumCircuit(nqbits, UCC_ansatz_H2()) +model = QuantumModel(ansatz, observable=hamiltonian, backend="pyqtorch", diff_mode="ad") + +values={} +out = model.expectation(values) +print(out) +``` +Let's now resent the parameters and set them randomly before starting the optimization loop. + +```python exec="on" source="material-block" result="json" session="vqe" +init_params = torch.rand(model.num_vparams) +model.reset_vparams(init_params) + +n_epochs = 100 +lr = 0.05 +optimizer = torch.optim.Adam(model.parameters(), lr=lr) +for i in range(n_epochs): + optimizer.zero_grad() + out=model.expectation({}) + out.backward() + optimizer.step() + +print("Ground state energy =", out.item(), "Hatree") +``` + +## Unrestricted Hamiltonian + +This result is in line with what obtained in the reference paper. Let's now +perform the same calculations but with a standard hardware efficient ansatz +(i.e. not specifically tailored for the H2 molecule) and with an unrestricted +Hamiltonian on 4 qubits. The values of the coefficients are taken from BK Hamiltonian, page 28[^2]. + +```python exec="on" source="material-block" html="1" session="vqe" +from qadence import hea + +nqbits = 4 + +gates = [[I,I,I,I],[Z],[Z],[Z],[Z,Z],[Z,Z],[Z,Z],[X,Z,X],[Y,Z,Y],[Z,Z,Z],[Z,Z,Z],[Z,Z,Z],[Z,X,Z,X],[Z,Y,Z,Y],[Z,Z,Z,Z]] +qubits = [[0,1,2,3],[0],[1],[2],[0,1],[0,2],[1,3],[2,1,0],[2,1,0],[2,1,0],[3,2,0],[3,2,1],[3,2,1,0],[3,2,1,0],[3,2,1,0]] +coeffs = [ + -0.81261,0.171201,0.16862325,- 0.2227965,0.171201,0.12054625,0.17434925 ,0.04532175,0.04532175,0.165868 , + 0.12054625,-0.2227965 ,0.04532175 ,0.04532175,0.165868 +] + +hamilt_R074_bis = [coeffs,gates,qubits] +Hamiltonian_bis = make_hamiltonian(hamilt_R074_bis, nqbits) +ansatz_bis = QuantumCircuit(4, hea(nqbits)) + +from qadence.draw import html_string # markdown-exec: hide +print(html_string(ansatz_bis)) # markdown-exec: hide +``` +```python exec="on" source="material-block" result="json" session="vqe" +model = QuantumModel(ansatz_bis, observable=Hamiltonian_bis, backend="pyqtorch", diff_mode="ad") + +values={} +out=model.expectation(values) + +# initialize some random initial parameters +init_params = torch.rand(model.num_vparams) +model.reset_vparams(init_params) + +n_epochs = 100 +lr = 0.05 +optimizer = torch.optim.Adam(model.parameters(), lr=lr) +for i in range(n_epochs): + + optimizer.zero_grad() + out=model.expectation(values) + out.backward() + optimizer.step() + if (i+1) % 10 == 0: + print(f"Epoch {i+1} - Loss: {out.item()}") + +print("Ground state energy =", out.item(),"a.u") +``` + +In a.u, the final ground state energy is a bit higher the expected -1.851 a.u +(see page 33 of the reference paper mentioned above). Increasing the ansatz +depth is enough to reach the desired accuracy. + + +## References + +[^1]: [Seeley et al.](https://arxiv.org/abs/1208.5986) - The Bravyi-Kitaev transformation for quantum computation of electronic structure diff --git a/docs/backends/backend.md b/docs/backends/backend.md new file mode 100644 index 000000000..967fea42f --- /dev/null +++ b/docs/backends/backend.md @@ -0,0 +1 @@ +### ::: qadence.backend diff --git a/docs/backends/braket.md b/docs/backends/braket.md new file mode 100644 index 000000000..23e5bf133 --- /dev/null +++ b/docs/backends/braket.md @@ -0,0 +1,5 @@ +## Braket Digital backend + +### ::: qadence.backends.braket.backend + +### ::: qadence.backends.braket.convert_ops diff --git a/docs/backends/differentiable.md b/docs/backends/differentiable.md new file mode 100644 index 000000000..3341e5a23 --- /dev/null +++ b/docs/backends/differentiable.md @@ -0,0 +1 @@ +### ::: qadence.backends.pytorch_wrapper diff --git a/docs/backends/pulser.md b/docs/backends/pulser.md new file mode 100644 index 000000000..460562277 --- /dev/null +++ b/docs/backends/pulser.md @@ -0,0 +1,16 @@ +The **Pulser backend** features a basic integration with the pulse-level programming +interface Pulser. This backend offers for now few simple operations +which are translated into a valid, non time-dependent pulse sequence. In particular, one has access to: + +* analog rotations: `AnalogRx` and `AnalogRy` blocks +* free evolution blocks (basically no pulse, just interaction): `AnalogWait` block +* a block for creating entangled states: `AnalogEntanglement` +* digital rotation `Rx` and `Ry` + +### ::: qadence.backends.pulser.backend + +### ::: qadence.backends.pulser.devices + +### ::: qadence.backends.pulser.pulses + +### ::: qadence.backends.pulser.convert_ops diff --git a/docs/backends/pyqtorch.md b/docs/backends/pyqtorch.md new file mode 100644 index 000000000..7ac7edfb2 --- /dev/null +++ b/docs/backends/pyqtorch.md @@ -0,0 +1,10 @@ +Fast differentiable statevector emulator based on PyTorch. The code is open source, +hosted on [Github](https://github.com/pasqal-io/PyQ) and maintained by Pasqal. + +### ::: qadence.backends.pyqtorch.backend + options: + inherited_members: true + +### ::: qadence.backends.pyqtorch.config + +### ::: qadence.backends.pyqtorch.convert_ops diff --git a/docs/css/mkdocstrings.css b/docs/css/mkdocstrings.css new file mode 100644 index 000000000..7c7c6d292 --- /dev/null +++ b/docs/css/mkdocstrings.css @@ -0,0 +1,24 @@ +/* Indentation. */ +div.doc-contents:not(.first) { + padding-left: 25px; + border-left: 4px solid rgba(230, 230, 230); + margin-bottom: 80px; + } + +/* Justified text */ +.md-content p { + text-align: justify; +} + +/* Avoid breaking parameters name, etc. in table cells. */ +td code { + word-break: normal !important; +} + +:root { + --md-primary-fg-color: #00704a; +} + +.md-content p { + text-align: justify; +} diff --git a/docs/development/architecture.md b/docs/development/architecture.md new file mode 100644 index 000000000..452430705 --- /dev/null +++ b/docs/development/architecture.md @@ -0,0 +1,175 @@ +Qadence as a software library mixes functional and object-oriented programming. We do that by maintaining core objects and operating on them with functions. + +Furthermore, Qadence strives at keeping the lower level abstraction layers for automatic differentiation and quantum computation +fully stateless while only the frontend layer which is the main user-facing interface is stateful. + +!!! note "**Code design philosopy**" + Functional, stateless core with object-oriented, stateful user interface. + +## Abstraction layers + +In Qadence there are 4 main objects spread across 3 different levels of abstraction: + +* **Frontend layer**: The user facing layer and encompasses two objects: + * [`QuantumCircuit`][qadence.circuit.QuantumCircuit]: A class representing an abstract quantum + circuit not tight not any particular framework. Parameters are represented symbolically using + `sympy` expressions. + * [`QuantumModel`][qadence.models.QuantumModel]: The models are higher-level abstraction + providing an interface for executing different kinds of common quantum computing models such + quantum neural networks (QNNs), quantum kernels etc. + +* **Differentiation layer**: Intermediate layer has the purpose of integrating quantum + computation with a given automatic differentiation engine. It is meant to be purely stateless and + contains one object: + * [`DifferentiableBackend`][qadence.backends.pytorch_wrapper.DifferentiableBackend]: + An abstract class whose concrete implementation wraps a quantum backend and make it + automatically differentiable using different engines (e.g. PyTorch or Jax). + Note, that today only PyTorch is supported but there is plan to add also a Jax + differentiable backend which will require some changes in the base class implementation. + +* **Quantum layer**: The lower-level layer which directly interfaces with quantum emulators + and processing units. It is meant to be purely stateless and it contains one base object which is + specialized for each supported backend: + * [`Backend`][qadence.backend.Backend]: An abstract class whose concrete implementation + enables the execution of quantum circuit with a variety of quantum backends (normally non + automatically differentiable by default) such as PyQTorch, Pulser or Braket. + + +## Main components + +### `QuantumCircuit` + +We consider `QuantumCircuit` to be an abstract object, i.e. it is not tied to any backend. However, it blocks are even more abstract. This is because we consider `QuantumCircuit`s "real", whereas the blocks are largely considered just syntax. + +Unitary `QuantumCircuits` (this encompasses digital, or gate-based, circuits as well as analog circuits) are constructed by [`PrimitiveBlocks`] using a syntax that allows you to execute them in sequence, dubbed `ChainBlock` in the code, or in parallel (i.e. at the same time) where applicable, dubbed `KronBlock` in the code. +Notice that this differs from other packages by providing more control of the layout of the circuit than conventional packages like Qiskit, and from Yao where the blocks are the primary type. + +### `QuantumModel` + +`QuantumModel`s are meant to be the main entry point for quantum computations in `qadence`. In general, they take one or more +quantum circuit as input and they wrap all the necessary boiler plate code to make the circuit executable and differentiable +on the chosen backend. + +Models are meant to be specific for a certain kind of quantum problem or algorithm and you can easily create new ones starting +from the base class `QuantumModel`, as explained in the [custom model tutorial](../advanced_tutorials/custom-models.md). Currently, Qadence offers +a `QNN` model class which provides convenient methods to work with quantum neural networks with multi-dimensional inputs +and outputs. + +### `DifferentiableBackend` + +The differentiable backend is a thin wrapper which takes as input a `QuantumCircuit` instance and a chosen quantum backend and make the circuit execution routines (expectation value, overalap, etc.) differentiable. Currently, the only implemented differentiation engine is PyTorch but it is easy to add support to another one like Jax. + +### Quantum `Backend` + +For execution the primary object is the `Backend`. Backends maintain the same user-facing interface, and internally connects to other libraries to execute circuits. Those other libraries can execute the code on QPUs and local or cloud-based emulators. The `Backends` use PyTorch tensors to represent data and leverages PyTorchs autograd to help compute derivatives of circuits. + +## Symbolic parameters + +To illustrate how parameters work in Qadence, let's consider the following simple block composed of just two rotations: + +```python exec="on" source="material-block" session="architecture" +import sympy +from qadence import Parameter, RX + +param = Parameter("phi", trainable=False) +block = RX(0, param) * RX(1, sympy.acos(param)) +``` + +The rotation angles assigned to `RX` (and to any Qadence quantum operation) are defined as arbitrary expressions of `Parameter`'s. `Parameter` is a subclass of `sympy.Symbol`, thus fully interoperable with it. + +To assign values of the parameter `phi` in a quantum model, one should use a dictionary containing the a key with parameter name and the corresponding values values: + +```python exec="on" source="material-block" session="architecture" +import torch +from qadence import run + +values = {"phi": torch.rand(10)} +wf = run(block, values=values) +``` + +This is the only interface for parameter assignment exposed to the user. Under the hood, parameters applied to every quantum operation are identified in different ways: + +* By default, with a stringified version of the `sympy` expression supplied to the quantum operation. Notice that multiple operations can have the same expression. + +* In certain case, e.g. for constructing parameter shift rules, one must access a *unique* identifier of the parameter for each quantum operation. Therefore, Qadence also creates unique identifiers for each parametrized operation (see the [`ParamMap`][qadence.parameters.ParamMap] class). + +By default, when one constructs a new backend, the parameter identifiers are the `sympy` expressions +which are used when converting an abstract block into a native circuit for the chosen backend. +However, one can use the unique identifiers as parameter names by setting the private flag +`_use_gate_params` to `True` in the backend configuration +[`BackendConfiguration`][qadence.backend.BackendConfiguration]. +This is automatically set when PSR differentiation is selected (see next section for more details). + +You can see the logic for choosing the parameter identifier in [`get_param_name`][qadence.backend.BackendConfiguration.get_param_name]. + +## Differentiation with parameter shift rules (PSR) + +In Qadence, parameter shift rules are implemented by extending the PyTorch autograd engine using custom `Function` +objects. The implementation is based on this PyTorch [guide](https://pytorch.org/docs/stable/notes/extending.html). + +A custom PyTorch `Function` looks like this: + +```python +import torch +from torch.autograd import Function + +class CustomFunction(Function): + + # forward pass implementation giving the output of the module + @staticmethod + def forward(ctx, inputs: torch.Tensor, params: torch.Tensor): + ctx.save_for_backward(inputs, params) + ... + + # backward pass implementation giving the derivative of the module + # with respect to the parameters. This must return the whole vector-jacobian + # product to integrate within the autograd engine + @staticmethod + def backward(ctx, grad_output: torch.Tensor): + inputs, params = ctx.saved_tensors + ... +``` + +The class [`PSRExpectation`][qadence.backends.pytorch_wrapper.PSRExpectation] implements parameter shift rules for all parameters using +a custom function as the one above. There are a few implementation details to keep in mind if you want +to modify the PSR code: + +* **PyTorch `Function` only works with tensor arguments**. Parameters in Qadence are passed around as + dictionaries with parameter names as keys and current parameter values (tensors) + as values. This works for both variational and feature parameters. However, the `Function` class + only work with PyTorch tensors as input, not dictionaries. Therefore, the forward pass of + `PSRExpectation` accepts one argument `param_keys` with the + parameter keys and a variadic positional argument `param_values` with the parameter values one by + one. The dictionary is reconstructed within the `forward()` pass body. + +* **Higher-order derivatives with PSR**. Higher-order PSR derivatives can be tricky. Parameter shift + rules calls, under the hood, the `QuantumBackend` expectation value routine that usually yield a + non-differentiable output. Therefore, a second call to the backward pass would not work. However, + Qadence employs a very simple trick to make higher-order derivatives work: instead of using + directly the expectation value of the quantum backend, the PSR backward pass uses the PSR forward + pass itself as expectation value function (see the code below). In this way, multiple calls to the + backward pass are allowed since the `expectation_fn` routine is always differentiable by + definition. Notice that this implementation is simple but suboptimal since, in some corner cases, + higher-order derivates might include some repeated terms that, with this implementation, are + always recomputed. + +```python +# expectation value used in the PSR backward pass +def expectation_fn(params: dict[str, Tensor]) -> Tensor: + return PSRExpectation.apply( + ctx.expectation_fn, + ctx.param_psrs, + params.keys(), + *params.values(), + ) +``` + +* **Operation parameters must be uniquely identified for PSR to work**. Parameter shift rules work at the level of individual quantum operations. This means that, given a parameter `x`, one needs to sum the contributions from shifting the parameter values of **all** the operation where the parameter `x` appears. When constructing the PSR rules, one must access a unique parameter identifier for each operation even if the corresponding user-facing parameter is the same. Therefore, when PSR differentiation is selected, the flag `_use_gate_params` is automatically set to `True` in the backend configuration [`BackendConfiguration`][qadence.backend.BackendConfiguration] (see previous section). + +* **PSR must not be applied to observable parameters**. In Qadence, Pauli observables can also be parametrized. However, the tunable parameters of observables are purely classical and should not be included in the differentiation with PSRs. However, the quantum expectation value depends on them, thus they still need to enter into the PSR evaluation. To solve this issue, the code sets the `requires_grad` attribute of all observable parameters to `False` when constructing the PSRs for the circuit as in the snippet below: + +```python +for obs in observable: + for param_id, _ in uuid_to_eigen(obs).items(): + param_to_psr[param_id] = lambda x: torch.tensor([0.0], requires_grad=False) +``` diff --git a/docs/development/contributing.md b/docs/development/contributing.md new file mode 100644 index 000000000..f02ad471d --- /dev/null +++ b/docs/development/contributing.md @@ -0,0 +1,90 @@ +# How to contribute + +We're grateful for your interest in participating in Qadence. Please follow our guidelines to ensure a smooth contribution process. + +## Reporting an issue or proposing a feature + +Your course of action will depend on your objective, but generally, you should start by creating an issue. If you've discovered a bug or have a feature you'd like to see added to **qadence**, feel free to create an issue on [qadence's GitHub issue tracker](https://github.com/pasqal-io/qadence/issues). Here are some steps to take: + +1. Quickly search the existing issues using relevant keywords to ensure your issue hasn't been addressed already. +2. If your issue is not listed, create a new one. Try to be as detailed and clear as possible in your description. + +- If you're merely suggesting an improvement or reporting a bug, that's already excellent! We thank you for it. Your issue will be listed and, hopefully, addressed at some point. +- However, if you're willing to be the one solving the issue, that would be even better! In such instances, you would proceed by preparing a [Pull Request](#submitting-a-pull-request). + +## Submitting a pull request + +We're excited that you're eager to contribute to Qadence. To contribute, fork the `main` branch of qadence repository and once you are satisfied with your feature and all the tests pass create a [Pull Request](https://github.com/pasqal-io/qadence/pulls). + +Here's the process for making a contribution: + +Click the "Fork" button at the upper right corner of the [repo page](https://github.com/pasqal-io/qadence) to create a new GitHub repo at `https://github.com/USERNAME/qadence`, where `USERNAME` is your GitHub ID. Then, `cd` into the directory where you want to place your new fork and clone it: + +```shell +git clone https://github.com/USERNAME/qadence.git +``` + +Next, navigate to your new qadence fork directory and mark the main qadence repository as the `upstream`: + +```shell +git remote add upstream https://github.com/pasqal-io/qadence.git +``` + +## Setting up your development environment + +We recommended to use `hatch` for managing environments: + +To develop within qadence, use: +```shell +pip install hatch +hatch -v shell +``` + +To run qadence tests, use: + +```shell +hatch -e tests run test +``` + +If you don't want to use `hatch`, you can use the environment manager of your +choice (e.g. Conda) and execute the following: + +```shell +pip install pytest +pip install -e . +pytest +``` + +### Useful things for your workflow: linting and testing + +Use `pre-commit` to lint your code and run the unit tests before pushing a new commit. + +Using `hatch`, it's simply: + +```shell +hatch -e tests run pre-commit run --all-files +hatch -e tests run test +``` + +Our CI/CD pipeline will also test if the documentation can be built correctly. To test it locally, please run: + +```shell +hatch -e docs run mkdocs build --clean --strict +``` + +Without `hatch`, `pip` install those libraries first: +"mkdocs", +"mkdocs-material", +"mkdocstrings", +"mkdocstrings-python", +"mkdocs-section-index", +"mkdocs-jupyter", +"mkdocs-exclude", +"markdown-exec" + + +And then: + +```shell + mkdocs build --clean --strict +``` diff --git a/docs/development/draw.md b/docs/development/draw.md new file mode 100644 index 000000000..803101bf0 --- /dev/null +++ b/docs/development/draw.md @@ -0,0 +1,210 @@ +# `qadence.draw` example plots + +Mostly for quick, manual checking of correct plotting output. + +```python exec="on" source="material-block" html="1" +from qadence import X, Y, kron +from qadence.draw import display + +b = kron(X(0), Y(1)) +from qadence.draw import html_string # markdown-exec: hide +print(html_string(b)) # markdown-exec: hide +``` + +```python exec="on" source="material-block" html="1" +from qadence import X, Y, chain +from qadence.draw import display + +b = chain(X(0), Y(0)) +from qadence.draw import html_string # markdown-exec: hide +print(html_string(b)) # markdown-exec: hide +``` + +```python exec="on" source="material-block" html="1" +from qadence import X, Y, chain +from qadence.draw import display + +b = chain(X(0), Y(1)) +from qadence.draw import html_string # markdown-exec: hide +print(html_string(b)) # markdown-exec: hide +``` + +```python exec="on" source="material-block" html="1" +from qadence import X, Y, add +from qadence.draw import display + +b = add(X(0), Y(1), X(2)) +from qadence.draw import html_string # markdown-exec: hide +print(html_string(b)) # markdown-exec: hide +``` + +```python exec="on" source="material-block" html="1" +from qadence import CNOT, RX, HamEvo, X, Y, Z, chain, kron + +rx = kron(RX(3,0.5), RX(2, "x")) +rx.tag = "rx" +gen = chain(Z(i) for i in range(4)) + +# `chain` puts things in sequence +block = chain( + kron(X(0), Y(1), rx), + CNOT(2,3), + HamEvo(gen, 10) +) +from qadence.draw import html_string # markdown-exec: hide +print(html_string(block)) # markdown-exec: hide +``` + +```python exec="on" source="material-block" html="1" +from qadence import feature_map, hea, chain + +block = chain(feature_map(4, fm_type="tower"), hea(4,2)) +from qadence.draw import html_string # markdown-exec: hide +print(html_string(block)) # markdown-exec: hide +``` + + +## Developer documentation + +This section contains examples in pure graphviz that can be used to understand roughly what is done +in the actual drawing backend. + +```python exec="on" source="material-block" result="json" session="draw-dev" +import graphviz + +font_name = "Sans-Serif" +font_size = "8" + +graph_attr = { + "rankdir": "LR", # LR = left to right, TB = top to bottom + "nodesep": "0.1", # In inches, tells distance between nodes without edges + "compound": "true", # Needed to draw properly edges in hamevo when content is hidden + "splines": "false", # Needed to draw control gates vertical lines one over the other +} # These are the default values for graphs + +node_attr = { + "shape": "box", # 'box' for normal nodes, 'point' for control gates or 'plaintext' for starting nodes (the qubit label). + "style": "rounded", # Unfortunately we can't specify the radius of the rounded, at least for this version + "fontname": font_name, + "fontsize": font_size, + "width": "0.1", # In inches, it doesn't get tinier than the label font. + "height": "0.1" # In inches, it doesn't get tinier than the label font. +} # These are the defaults values that can be overridden at node declaration. + +default_cluster_attr = { + "fontname": font_name, + "fontsize": font_size, + "labelloc": "b", # location of cluster label. b as bottom, t as top + "style": "rounded" +} # These are the defaults values that can be overridden at sub graph declaration + +hamevo_cluster_attr = { + "label": "HamEvo(t=10)" +} +hamevo_cluster_attr.update(default_cluster_attr) + +h = graphviz.Graph(graph_attr=graph_attr, node_attr=node_attr) +h.node("Hello World!") +h +``` + +```python exec="on" source="material-block" result="json" session="draw-dev" +# Define graph +h = graphviz.Graph(node_attr=node_attr, graph_attr=graph_attr) + +# Add start and end nodes +for i in range(4): + h.node(f's{i}', shape="plaintext", label=f'{i}', group=f"{i}") + h.node(f'e{i}', style='invis', group=f"{i}") + +# Add nodes +h.node('X', group="0") +h.node('Y', group="1") + +# Add hamevo and its nodes +hamevo = graphviz.Graph(name='cluster_hamevo', graph_attr=hamevo_cluster_attr) +for i in range(4): + hamevo.node(f'z{i}', shape="box", style="invis", label=f'{i}', group=f"{i}") +h.subgraph(hamevo) + +# Add rx gates cluster and its nodes +cluster_attr = {"label": "RX gates"} +cluster_attr.update(default_cluster_attr) +cluster = graphviz.Graph(name="cluster_0", graph_attr=cluster_attr) +cluster.node('RX(x)', group="2") +cluster.node('RX(0.5)', group="3") +h.subgraph(cluster) + +h.node('cnot0', label='', shape='point', width='0.1', group='0') +h.node('cnot1', label='X', group='1') +h.node('cnot2', label='', shape='point', width='0.1', group='2') +h.node('cnot3', label='', shape='point', width='0.1', group='3') + +# Add edges +h.edge('s0', 'X') +h.edge('X', 'cnot0') +h.edge('cnot0', 'z0', lhead='cluster_hamevo') +h.edge('z0', 'e0', ltail='cluster_hamevo') +h.edge('s1', 'Y') +h.edge('Y', 'cnot1') +h.edge('cnot1', 'z1', lhead='cluster_hamevo') +h.edge('z1', 'e1', ltail='cluster_hamevo') +h.edge('s2', 'RX(x)') +h.edge('RX(x)', 'cnot2') +h.edge('cnot2', 'z2', lhead='cluster_hamevo') +h.edge('z2', 'e2', ltail='cluster_hamevo') +h.edge('s3', 'RX(0.5)') +h.edge('RX(0.5)', 'cnot3') +h.edge('cnot3', 'z3', lhead='cluster_hamevo') +h.edge('z3', 'e3', ltail='cluster_hamevo') +h.edge('cnot1', 'cnot0', constraint='false') # constraint: false is needed to draw vertical edges +h.edge('cnot1', 'cnot2', constraint='false') # constraint: false is needed to draw vertical edges +h.edge('cnot1', 'cnot3', constraint='false') # constraint: false is needed to draw vertical edges +h +``` + +### Example of cluster of clusters +```python exec="on" source="material-block" result="json" session="draw-dev" +# Define graph +h = graphviz.Graph(node_attr=node_attr, graph_attr=graph_attr) + +# Define start and end nodes +for i in range(4): + h.node(f's{i}', shape="plaintext", label=f'{i}', group=f"{i}") + h.node(f'e{i}', style='invis', group=f"{i}") + +# Define outer cluster +cluster_attr = {"label": "Outer cluster"} +cluster_attr.update(default_cluster_attr) +outer_cluster = graphviz.Graph(name="cluster_outer", graph_attr=cluster_attr) + +# Define inner cluster 1 and its nodes +cluster_attr = {"label": "Inner cluster 1"} +cluster_attr.update(default_cluster_attr) +inner1_cluster = graphviz.Graph(name="cluster_inner1", graph_attr=cluster_attr) +inner1_cluster.node("a0", group="0") +inner1_cluster.node("a1", group="1") +outer_cluster.subgraph(inner1_cluster) + +# Define inner cluster 2 and its nodes +cluster_attr = {"label": "Inner cluster 2"} +cluster_attr.update(default_cluster_attr) +inner2_cluster = graphviz.Graph(name="cluster_inner2", graph_attr=cluster_attr) +inner2_cluster.node("a2", group="2") +inner2_cluster.node("a3", group="3") +outer_cluster.subgraph(inner2_cluster) + +# This has to be done here, after inner clusters definitions +h.subgraph(outer_cluster) + +# Define more nodes +for i in range(4): + h.node(f"b{i}", group=f"{i}") + +for i in range(4): + h.edge(f's{i}', f'a{i}') + h.edge(f'a{i}', f'b{i}') + h.edge(f'b{i}', f'e{i}') + +h +``` diff --git a/docs/digital_analog_qc/analog-basics.md b/docs/digital_analog_qc/analog-basics.md new file mode 100644 index 000000000..f4f310b67 --- /dev/null +++ b/docs/digital_analog_qc/analog-basics.md @@ -0,0 +1,200 @@ +# Digital-Analog Emulation + +!!! note "TL;DR: Automatic emulation in the `pyqtorch` backend" + + All analog blocks are automatically translated to their emulated version when running them + with the `pyqtorch` backend (by calling `add_interaction` on them under the hood): + + ```python exec="on" source="material-block" result="json" + import torch + from qadence import Register, AnalogRX, sample + + reg = Register.from_coordinates([(0,0), (0,5)]) + print(sample(reg, AnalogRX(torch.pi))) + ``` + + +Qadence includes primitives for the simple construction of ising-like +Hamiltonians to account for the interaction among qubits. This allows to +simulate systems closer to real quantum computing platforms such as +neutral atoms. The constructed Hamiltonians are of the form + +$$ +\mathcal{H} = \sum_{i} \frac{\hbar\Omega}{2} \hat\sigma^x_i - \sum_{i} \hbar\delta \hat n_i + \mathcal{H}_{int}, +$$ + + +where $\hat n = \frac{1-\hat\sigma_z}{2}$, and $\mathcal{H}_{int}$ is a pair-wise interaction term. + + +We currently have two central operations that can be used to compose analog programs. + +- [`WaitBlock`][qadence.blocks.analog.WaitBlock] for interactions +- [`ConstantAnalogRotation`][qadence.blocks.analog.ConstantAnalogRotation] + +Both are _time-independent_ and can be emulated by calling `add_interaction`. + +To compose analog blocks you can use `chain` and `kron` as usual with the following restrictions: + +- [`AnalogChain`][qadence.blocks.analog.AnalogChain]s can only be constructed from AnalogKron blocks + or _**globally supported**_ primitive, analog blocks. +- [`AnalogKron`][qadence.blocks.analog.AnalogKron]s can only be constructed from _**non-global**_, + analog blocks with the _**same duration**_. + +The `wait` operation can be emulated with an *Ising* or an $XY$-interaction: + +```python exec="on" source="material-block" result="json" +from qadence import Register, wait, add_interaction, run + +block = wait(duration=3000) +print(block) + +print("") # markdown-exec: hide +reg = Register.from_coordinates([(0,0), (0,5)]) # we need atomic distances +emulated = add_interaction(reg, block, interaction="XY") # or: interaction="Ising" +print(emulated.generator) +``` + + +The `AnalogRot` constructor can create any constant (in time), analog rotation. + +```python exec="on" source="material-block" result="json" +import torch +from qadence import AnalogRot, AnalogRX + +# implement a global RX rotation +block = AnalogRot( + duration=1000., # [ns] + omega=torch.pi, # [rad/μs] + delta=0, # [rad/μs] + phase=0, # [rad] +) +print(block) + +# or use the short hand +block = AnalogRX(torch.pi) +print(block) +``` + +Analog blocks can also be `chain`ed, and `kron`ed like all other blocks, but with two small caveats: + +```python exec="on" source="material-block" +import torch +from qadence import AnalogRot, kron, chain, wait + +# only blocks with the same `duration` can be `kron`ed +kron( + wait(duration=1000, qubit_support=(0,1)), + AnalogRot(duration=1000, omega=2.0, qubit_support=(2,3)) +) + +# only blocks with `"global"` or the same qubit support can be `chain`ed +chain(wait(duration=200), AnalogRot(duration=300, omega=2.0)) +``` + +!!! note "Composing digital & analog blocks" + You can also compose digital and analog blocks where the additional restrictions of `chain`/`kron` + only apply to composite blocks which only contain analog blocks. For more details/examples, see + [`AnalogChain`][qadence.blocks.analog.AnalogChain] and [`AnalogKron`][qadence.blocks.analog.AnalogKron]. + + +## Fitting a simple function + +Just as most other blocks, analog blocks can be parametrized, and thus we can build a +small ansatz which can fit a sine wave. When using the `pyqtorch` backend the +`add_interaction` function is called automatically. As usual, we can choose which +differentiation backend we want to use: autodiff or parameter shift rule (PSR). + +First we define an ansatz block and an observable +```python exec="on" source="material-block" session="sin" +import torch +from qadence import Register, FeatureParameter, VariationalParameter +from qadence import AnalogRX, AnalogRZ, Z +from qadence import wait, chain, add + +pi = torch.pi + +# two qubit register +reg = Register.from_coordinates([(0, 0), (0, 12)]) + +# analog ansatz with input parameter +t = FeatureParameter("t") +block = chain( + AnalogRX(pi / 2), + AnalogRZ(t), + wait(1000 * VariationalParameter("theta", value=0.5)), + AnalogRX(pi / 2), +) + +# observable +obs = add(Z(i) for i in range(reg.n_qubits)) +``` + +```python exec="on" session="sin" +def plot(ax, x, y, **kwargs): + xnp = x.detach().cpu().numpy().flatten() + ynp = y.detach().cpu().numpy().flatten() + ax.plot(xnp, ynp, **kwargs) + +def scatter(ax, x, y, **kwargs): + xnp = x.detach().cpu().numpy().flatten() + ynp = y.detach().cpu().numpy().flatten() + ax.scatter(xnp, ynp, **kwargs) +``` + +Then we define the dataset we want to train on and plot the initial prediction. +```python exec="on" source="material-block" html="1" result="json" session="sin" +import matplotlib.pyplot as plt +from qadence import QuantumCircuit, QuantumModel + +# define quantum model; including digital-analog emulation +circ = QuantumCircuit(reg, block) +model = QuantumModel(circ, obs, diff_mode="gpsr") + +x_train = torch.linspace(0, 6, steps=30) +y_train = -0.64 * torch.sin(x_train + 0.33) + 0.1 +y_pred_initial = model.expectation({"t": x_train}) + +fig, ax = plt.subplots() +scatter(ax, x_train, y_train, label="Training points", marker="o", color="green") +plot(ax, x_train, y_pred_initial, label="Initial prediction") +plt.legend() +from docs import docsutils # markdown-exec: hide +print(docsutils.fig_to_html(fig)) # markdown-exec: hide +``` + +The rest is the usual PyTorch training routine. +```python exec="on" source="material-block" html="1" result="json" session="sin" +mse_loss = torch.nn.MSELoss() +optimizer = torch.optim.Adam(model.parameters(), lr=5e-2) + + +def loss_fn(x_train, y_train): + return mse_loss(model.expectation({"t": x_train}).squeeze(), y_train) + + +# train +n_epochs = 200 + +for i in range(n_epochs): + optimizer.zero_grad() + + loss = loss_fn(x_train, y_train) + loss.backward() + optimizer.step() + + # if (i + 1) % 10 == 0: + # print(f"Epoch {i+1:0>3} - Loss: {loss.item()}\n") + +# visualize +y_pred = model.expectation({"t": x_train}) + +fig, ax = plt.subplots() +scatter(ax, x_train, y_train, label="Training points", marker="o", color="green") +plot(ax, x_train, y_pred_initial, label="Initial prediction") +plot(ax, x_train, y_pred, label="Final prediction") +plt.legend() +from docs import docsutils # markdown-exec: hide +print(docsutils.fig_to_html(fig)) # markdown-exec: hide +assert loss_fn(x_train, y_train) < 0.05 # markdown-exec: hide +``` diff --git a/docs/digital_analog_qc/analog-qubo.md b/docs/digital_analog_qc/analog-qubo.md new file mode 100644 index 000000000..301adac53 --- /dev/null +++ b/docs/digital_analog_qc/analog-qubo.md @@ -0,0 +1,195 @@ +In this notebook we solve a quadratic unconstrained optimization problem with +`qadence` emulated analog interface using the QAOA variational algorithm. The +problem is detailed in the Pulser documentation +[here](https://pulser.readthedocs.io/en/stable/tutorials/qubo.html). + + +??? note "Construct QUBO register (defines `qubo_register_coords` function)" + Before we start we have to define a register that fits into our device. + ```python exec="on" source="material-block" session="qubo" + import torch + import numpy as np + from scipy.optimize import minimize + from scipy.spatial.distance import pdist, squareform + + from pulser.devices import Chadoq2 + + seed = 0 + np.random.seed(seed) + torch.manual_seed(seed) + + + def qubo_register_coords(Q): + """Compute coordinates for register.""" + bitstrings = [np.binary_repr(i, len(Q)) for i in range(len(Q) ** 2)] + costs = [] + # this takes exponential time with the dimension of the QUBO + for b in bitstrings: + z = np.array(list(b), dtype=int) + cost = z.T @ Q @ z + costs.append(cost) + zipped = zip(bitstrings, costs) + sort_zipped = sorted(zipped, key=lambda x: x[1]) + + def evaluate_mapping(new_coords, *args): + """Cost function to minimize. Ideally, the pairwise + distances are conserved""" + Q, shape = args + new_coords = np.reshape(new_coords, shape) + new_Q = squareform(Chadoq2.interaction_coeff / pdist(new_coords) ** 6) + return np.linalg.norm(new_Q - Q) + + shape = (len(Q), 2) + costs = [] + np.random.seed(0) + x0 = np.random.random(shape).flatten() + res = minimize( + evaluate_mapping, + x0, + args=(Q, shape), + method="Nelder-Mead", + tol=1e-6, + options={"maxiter": 200000, "maxfev": None}, + ) + return [(x, y) for (x, y) in np.reshape(res.x, (len(Q), 2))] + ``` + + +## Define and solve QUBO + +```python exec="on" source="material-block" session="qubo" +import matplotlib.pyplot as plt +import numpy as np +import torch + +from qadence import add_interaction, chain +from qadence import QuantumModel, QuantumCircuit, AnalogRZ, AnalogRX, Register + +seed = 0 +np.random.seed(seed) +torch.manual_seed(seed) +``` + +The QUBO is defined by weighted connections `Q` and a cost function. + +```python exec="on" source="material-block" session="qubo" +def cost_colouring(bitstring, Q): + z = np.array(list(bitstring), dtype=int) + cost = z.T @ Q @ z + return cost + + +def cost_fn(counter, Q): + cost = sum(counter[key] * cost_colouring(key, Q) for key in counter) + return cost / sum(counter.values()) # Divide by total samples + + +Q = np.array( + [ + [-10.0, 19.7365809, 19.7365809, 5.42015853, 5.42015853], + [19.7365809, -10.0, 20.67626392, 0.17675796, 0.85604541], + [19.7365809, 20.67626392, -10.0, 0.85604541, 0.17675796], + [5.42015853, 0.17675796, 0.85604541, -10.0, 0.32306662], + [5.42015853, 0.85604541, 0.17675796, 0.32306662, -10.0], + ] +) +``` + +Build a register from graph extracted from the QUBO exactly +as you would do with Pulser. +```python exec="on" source="material-block" session="qubo" +reg = Register.from_coordinates(qubo_register_coords(Q)) +``` + +The analog circuit is composed of two global rotations per layer. The first +rotation corresponds to the mixing Hamiltonian and the second one to the +embedding Hamiltonian. Subsequently we add the Ising interaction term to +emulate the analog circuit. This uses a principal quantum number n=70 for the +Rydberg level under the hood. +```python exec="on" source="material-block" result="json" session="qubo" +from qadence.transpile.emulate import ising_interaction + +LAYERS = 2 +block = chain(*[AnalogRX(f"t{i}") * AnalogRZ(f"s{i}") for i in range(LAYERS)]) + +emulated = add_interaction( + reg, block, interaction=lambda r, ps: ising_interaction(r, ps, rydberg_level=70) +) +print(emulated) +``` + +Sample the model to get the initial solution. +```python exec="on" source="material-block" session="qubo" +model = QuantumModel(QuantumCircuit(reg, emulated), backend="pyqtorch", diff_mode='gpsr') +initial_counts = model.sample({}, n_shots=1000)[0] +``` + +The loss function is defined by averaging over the evaluated bitstrings. +```python exec="on" source="material-block" session="qubo" +def loss(param, *args): + Q = args[0] + param = torch.tensor(param) + model.reset_vparams(param) + C = model.sample({}, n_shots=1000)[0] + return cost_fn(C, Q) +``` +Here we use a gradient-free optimization loop for reaching the optimal solution. +```python exec="on" source="material-block" result="json" session="qubo" +# +for i in range(20): + try: + res = minimize( + loss, + args=Q, + x0=np.random.uniform(1, 10, size=2 * LAYERS), + method="COBYLA", + tol=1e-8, + options={"maxiter": 20}, + ) + except Exception: + pass + +# sample the optimal solution +model.reset_vparams(res.x) +optimal_count_dict = model.sample({}, n_shots=1000)[0] +print(optimal_count_dict) +``` + +```python exec="on" source="material-block" html="1" session="qubo" +fig, axs = plt.subplots(1, 2, figsize=(12, 4)) + +# known solutions to the QUBO +solution_bitstrings=["01011", "00111"] + +n_to_show = 20 +xs, ys = zip(*sorted( + initial_counts.items(), + key=lambda item: item[1], + reverse=True +)) +colors = ["r" if x in solution_bitstrings else "g" for x in xs] + +axs[0].set_xlabel("bitstrings") +axs[0].set_ylabel("counts") +axs[0].bar(xs[:n_to_show], ys[:n_to_show], width=0.5, color=colors) +axs[0].tick_params(axis="x", labelrotation=90) +axs[0].set_title("Initial solution") + +xs, ys = zip(*sorted(optimal_count_dict.items(), + key=lambda item: item[1], + reverse=True +)) +# xs = list(xs) # markdown-exec: hide +# assert (xs[0] == "01011" and xs[1] == "00111") or (xs[1] == "01011" and xs[0] == "00111"), print(f"{xs=}") # markdown-exec: hide + +colors = ["r" if x in solution_bitstrings else "g" for x in xs] + +axs[1].set_xlabel("bitstrings") +axs[1].set_ylabel("counts") +axs[1].bar(xs[:n_to_show], ys[:n_to_show], width=0.5, color=colors) +axs[1].tick_params(axis="x", labelrotation=90) +axs[1].set_title("Optimal solution") +plt.tight_layout() +from docs import docsutils # markdown-exec: hide +print(docsutils.fig_to_html(fig)) # markdown-exec: hide +``` diff --git a/docs/digital_analog_qc/daqc-basics.md b/docs/digital_analog_qc/daqc-basics.md new file mode 100644 index 000000000..d20bf9ad9 --- /dev/null +++ b/docs/digital_analog_qc/daqc-basics.md @@ -0,0 +1,28 @@ +# Digital-Analog Quantum Computation + +_**Digital-analog quantum computation**_ (DAQC) is a universal quantum computing +paradigm[^1], based on two primary computations: + +- Fast single-qubit operations (digital). +- Multi-partite entangling operations acting on all qubits (analog). + +The DAQC paradigm is typically implemented on quantum computing hardware based on neutral-atoms where both these computations are realizable. + +## Digital-Analog Emulation + +Qadence simplifies the execution of DAQC programs on either emulated or real neutral-atom devices +by providing a simplified interface for customizing interactions and interfacing +with pulse-level programming in `Pulser`[^3]. + +## Digital-Analog Transformation + +Furthermore, the essence of digital-analog computation is the ability to represent any analog operation, _i.e._ any arbitrary Hamiltonian, using an +auxiliary device-amenable Hamiltonian, such as the ubiquitous Ising model[^2]. This is at the core of the DAQC implementation in Qadence. + +## References + +[^1]: [Dodd _et al._, Universal quantum computation and simulation using any entangling Hamiltonian and local unitaries, PRA 65, 040301 (2002).](https://arxiv.org/abs/quant-ph/0106064) + +[^2]: [Pulser: An open-source package for the design of pulse sequences in programmable neutral-atom arrays](https://pulser.readthedocs.io/en/stable/) + +[^3]: [Parra-Rodriguez _et al._, Digital-Analog Quantum Computation, PRA 101, 022305 (2020).](https://arxiv.org/abs/1812.03637) diff --git a/docs/digital_analog_qc/daqc-cnot.md b/docs/digital_analog_qc/daqc-cnot.md new file mode 100644 index 000000000..a3e3db380 --- /dev/null +++ b/docs/digital_analog_qc/daqc-cnot.md @@ -0,0 +1,265 @@ +# DAQC Transform + +Digital-analog quantum computing focuses on using simple digital gates combined with more complex and device-dependent analog interactions to represent quantum programs. Such techniques have been shown to be universal for quantum computation [^1]. However, while this approach may have advantages when adapting quantum programs to real devices, known quantum algorithms are very often expressed in a fully digital paradigm. As such, it is also important to have concrete ways to transform from one paradigm to another. + +In this tutorial we will exemplify this transformation starting with the representation of a simple digital CNOT using the universality of the Ising Hamiltonian [^2]. + +## CNOT with CPHASE + +Let's look at a single example of how the digital-analog transformation can be used to perform a CNOT on two qubits inside a register of globally interacting qubits. + +First, note that the CNOT can be decomposed with two Hadamard and a CPHASE gate with $\phi=\pi$: + + +```python exec="on" source="material-block" result="json" session="daqc-cnot" +import torch +import qadence as qd + +from qadence.draw import display +from qadence import X, I, Z, H, N, CPHASE, CNOT, HamEvo +from qadence.draw import html_string # markdown-exec: hide + +n_qubits = 2 + +# CNOT gate +cnot_gate = CNOT(0, 1) + +# CNOT decomposed +phi = torch.pi +cnot_decomp = qd.chain(H(1), CPHASE(0, 1, phi), H(1)) + +init_state = qd.product_state("10") + +print(qd.sample(n_qubits, block = cnot_gate, state = init_state, n_shots = 100)) +print(qd.sample(n_qubits, block = cnot_decomp, state = init_state, n_shots = 100)) +``` + +The CPHASE gate is fully diagonal, and can be implemented by exponentiating an Ising-like Hamiltonian, or *generator*, + +$$\text{CPHASE}(i,j,\phi)=\text{exp}\left(-i\phi \mathcal{H}_\text{CP}(i, j)\right)$$ + +$$\begin{aligned} +\mathcal{H}_\text{CP}&=-\frac{1}{4}(I_i-Z_i)(I_j-Z_j)\\ +&=-N_iN_j +\end{aligned}$$ + +where we used the number operator $N_i = \frac{1}{2}(I_i-Z_i)$, leading to an Ising-like interaction $N_iN_j$ that is common in neutral-atom systems. Let's rebuild the CNOT using this evolution. + +```python exec="on" source="material-block" session="daqc-cnot" +# Hamiltonian for the CPHASE gate +h_cphase = (-1.0) * qd.kron(N(0), N(1)) + +# Exponentiating the Hamiltonian +cphase_evo = HamEvo(h_cphase, phi) + +# Check that we have the CPHASE gate: +cphase_matrix = qd.block_to_tensor(CPHASE(0, 1, phi)) +cphase_evo_matrix = qd.block_to_tensor(cphase_evo) + +assert torch.allclose(cphase_matrix, cphase_evo_matrix) +``` + +Now that we have checked the generator of the CPHASE gate, we can use it to apply the CNOT: + + +```python exec="on" source="material-block" result="json" session="daqc-cnot" +# CNOT with Hamiltonian Evolution +cnot_evo = qd.chain( + H(1), + cphase_evo, + H(1) +) + +init_state = qd.product_state("10") + +print(qd.sample(n_qubits, block = cnot_gate, state = init_state, n_shots = 100)) +print(qd.sample(n_qubits, block = cnot_evo, state = init_state, n_shots = 100)) +``` + +Thus, a CNOT gate can be applied by combining a few single-qubit gates together with a 2-qubit Ising interaction between the control and the target qubit. This is important because it now allows us to exemplify the usage of the Ising transform proposed in the DAQC paper [^2]. In the paper, the transform is described for $ZZ$ interactions. In `qadence` it works both with $ZZ$ and $NN$ interactions. + +## CNOT in an interacting system of 3 qubits + +Consider a simple experimental setup with $n=3$ interacting qubits in a triangular grid. For simplicity let's consider that all qubits interact with each other with an Ising ($NN$) interaction of constant strength $g_\text{int}$. The Hamiltonian for the system can be written by summing this interaction over all pairs: + +$$\mathcal{H}_\text{sys}=\sum_{i=0}^{n}\sum_{j=0}^{i-1}g_\text{int}N_iN_j,$$ + +which in this case leads to only three interaction terms, + +$$\mathcal{H}_\text{sys}=g_\text{int}(N_0N_1+N_1N_2+N_0N_2)$$ + +This generator can be easily built: + + +```python exec="on" source="material-block" result="json" session="daqc-cnot" +n_qubits = 3 + +g_int = 1.0 + +interaction_list = [] +for i in range(n_qubits): + for j in range(i): + interaction_list.append(g_int * qd.kron(N(i), N(j))) + +h_sys = qd.add(*interaction_list) + +print(h_sys) +``` + +Now let's consider that the experimental system is fixed, and we cannot isolate the qubits from each other. All we can do is the following: + +- Turn on or off the global system Hamiltonian. +- Perform single-qubit rotations on individual qubits. + +How can we perform a CNOT on two specific qubits of our choice? + +To perform a *fully digital* CNOT we would need to isolate the control and target qubit from the third one and have those interact to implement the gate directly. While this may be relatively simple for a 3-qubit system, the experimental burden becomes much greater when we start going into the dozens of qubits. + +However, with the digital-analog paradigm that is not the case! In fact, we can represent the two qubit Ising interaction required for the CNOT by combining the global system Hamiltonian with a specific set of single-qubit rotations. The full details of this transformation are described in the DAQC paper [^2], and it is available in `qadence` by calling the `daqc_transform` function. + +The `daqc_transform` function will essentially return a program that represents the evolution of an Hamiltonian $H_\text{target}$ (*target Hamiltonian*) for a specified time $t_f$ by using only the evolution of an Hamiltonian $H_\text{build}$ (*build Hamiltonian*) for specific intervals of time together with specific single-qubit $X$ rotations. Currently, in `qadence` it is available for resource and target Hamiltonians composed only of $ZZ$ or $NN$ interactions. The generators are parsed by the `daqc_transform` function, the appropriate type is automatically determined, and the appropriate single-qubit detunings and global phases are applied. + +Let's exemplify it for our CNOT problem: + + +```python exec="on" source="material-block" html="1" result="json" session="daqc-cnot" +# The target operation +i = 0 # Control +j = 1 # Target +k = 2 # The extra qubit + +# CNOT on control and target, Identity on the extra qubit +cnot_target = qd.kron(CNOT(i, j), I(k)) + +# The two-qubit Ising (NN) interaction for the CPHASE +h_int = (-1.0) * qd.kron(N(i), N(j)) + +# Transforming the two-qubit Ising interaction using only our system Hamiltonian +transformed_ising = qd.daqc_transform( + n_qubits = 3, # Total number of qubits in the transformation + gen_target = h_int, # The target Ising generator + t_f = torch.pi, # The target evolution time + gen_build = h_sys, # The building block Ising generator to be used + strategy = "sDAQC", # Currently only sDAQC is implemented + ignore_global_phases = False # Global phases from mapping between Z and N +) + +# display(transformed_ising) +print(html_string(transformed_ising)) # markdown-exec: hide +``` + +The circuit above actually only uses two evolutions of the global Hamiltonian. In the displayed circuit also see other instances of `HamEvo` which account for global-phases and single-qubit detunings related to the mapping between the $Z$ and $N$ operator. Optionally, the application of the global phases can also be ignored, as shown in the input of `daqc_transform`. This will not create exactly the same state or operator matrix in tensor form, but in practice they will be equivalent. + +In general, the mapping of a $n$-qubit Ising Hamiltonian will require at most $n(n-1)$ evolutions. The transformed circuit performs these evolutions for specific times that are computed from the solution of a linear system of equations involving the set of interactions in the target and build Hamiltonians. + +In this case the mapping is exact, since we used the *step-wise* DAQC technique (sDAQC). In *banged* DAQC (bDAQC) the mapping is not exact, but is easier to implement on a physical device with always-on interactions such as neutral-atom systems. Currently, only the sDAQC technique is available in `qadence`. + +Just as before, we can check that using the transformed Ising circuit we exactly recover the CPHASE gate: + + +```python exec="on" source="material-block" session="daqc-cnot" +# CPHASE on (i, j), Identity on third qubit: +cphase_matrix = qd.block_to_tensor(qd.kron(CPHASE(i, j, phi), I(k))) + +# CPHASE using the transformed circuit: +cphase_evo_matrix = qd.block_to_tensor(transformed_ising) + +# Will fail if global phases are ignored: +assert torch.allclose(cphase_matrix, cphase_evo_matrix) +``` + +And we can now build the CNOT gate: + +```python exec="on" source="material-block" result="json" session="daqc-cnot" +cnot_daqc = qd.chain( + H(j), + transformed_ising, + H(j) +) + +# And finally run the CNOT on a specific 3-qubit initial state: +init_state = qd.product_state("101") + +# Check we get an equivalent wavefunction (will still pass if global phases are ignored) +wf_cnot = qd.run(n_qubits, block = cnot_target, state = init_state) +wf_daqc = qd.run(n_qubits, block = cnot_daqc, state = init_state) +assert qd.equivalent_state(wf_cnot, wf_daqc) + +# Visualize the CNOT bit-flip: +print(qd.sample(n_qubits, block = cnot_target, state = init_state, n_shots = 100)) +print(qd.sample(n_qubits, block = cnot_daqc, state = init_state, n_shots = 100)) +``` + +And we are done! We have effectively performed a CNOT operation on our desired target qubits by using only the global interaction of the system as the building block Hamiltonian, together with single-qubit rotations. Going through the trouble of decomposing a single digital gate into its Ising Hamiltonian is certainly not very practical, but it serves as a proof of principle for the potential of this technique to represent universal quantum computation. In the next example, we will see it applied to the digital-analog Quantum Fourier Transform. + +## Technical details on the DAQC transformation + +- The mapping between target generator and final circuit is performed by solving a linear system of size $n(n-1)$ where $n$ is the number of qubits, so it can be computed *efficiently* (i.e., with a polynomial cost in the number of qubits). +- The linear system to be solved is actually not invertible for $n=4$ qubits. This is very specific edge case requiring a workaround, that is currently not yet implemented. +- As mentioned, the final circuit has at most $n(n-1)$ slices, so there is at most a polynomial overhead in circuit depth. + +Finally, and most important to its usage: + +- The target Hamiltonian should be *sufficiently* represented in the building block Hamiltonian. + +To illustrate this point, consider the following target and build Hamiltonians: + +```python exec="on" source="material-block" session="daqc-cnot" +# Interaction between qubits 0 and 1 +gen_target = 1.0 * (Z(0) @ Z(1)) + +# Fixed interaction between qubits 1 and 2, and customizable between 0 and 1 +def gen_build(g_int): + return g_int * (Z(0) @ Z(1)) + 1.0 * (Z(1) @ Z(2)) +``` + +And now we perform the DAQC transform by setting `g_int = 1.0`, matching the target Hamiltonian: + +```python exec="on" source="material-block" html="1" result="json" session="daqc-cnot" +transformed_ising = qd.daqc_transform( + n_qubits = 3, + gen_target = gen_target, + t_f = 1.0, + gen_build = gen_build(g_int = 1.0), +) + +# display(transformed_ising) +print(html_string(transformed_ising)) # markdown-exec: hide +``` + +And we get the transformed circuit. What if our build Hamiltonian has a very weak interaction between qubits 0 and 1? + +```python exec="on" source="material-block" html="1" result="json" session="daqc-cnot" +transformed_ising = qd.daqc_transform( + n_qubits = 3, + gen_target = gen_target, + t_f = 1.0, + gen_build = gen_build(g_int = 0.001), +) + +# display(transformed_ising) +print(html_string(transformed_ising)) # markdown-exec: hide +``` + +As we can see, to represent the same interaction between 0 and 1, the slices using the build Hamiltonian need to evolve for much longer, since the target interaction is not sufficiently represented in the building block Hamiltonian. + +In the limit where that interaction is not present at all, the transform will not work: + + +```python exec="on" source="material-block" result="json" session="daqc-cnot" +try: + transformed_ising = qd.daqc_transform( + n_qubits = 3, + gen_target = gen_target, + t_f = 1.0, + gen_build = gen_build(g_int = 0.0), + ) +except ValueError as error: + print("Error:", error) +``` + +## References + +[^1]: [Dodd et al., Universal quantum computation and simulation using any entangling Hamiltonian and local unitaries, PRA 65, 040301 (2002).](https://arxiv.org/abs/quant-ph/0106064) + +[^2]: [Parra-Rodriguez et al., Digital-Analog Quantum Computation, PRA 101, 022305 (2020).](https://arxiv.org/abs/1812.03637) diff --git a/docs/digital_analog_qc/daqc-qft.md b/docs/digital_analog_qc/daqc-qft.md new file mode 100644 index 000000000..5b6936efe --- /dev/null +++ b/docs/digital_analog_qc/daqc-qft.md @@ -0,0 +1,270 @@ +# Digital-Analog QFT (Advanced) + +Following the work in the DAQC paper [^1], the authors also proposed an algorithm using this technique to perform the well-known Quantum Fourier Transform [^2]. In this tutorial we will go over how the Ising transform used in the DAQC technique can be used to recreate the results for the DA-QFT. + +## The (standard) digital QFT + +The standard Quantum Fourier Transform can be easily built in `qadence` by calling the `qft` function. It accepts three arguments: + +- `reverse_in` (default `False`): reverses the order of the input qubits +- `swaps_out` (default `False`): swaps the qubit states at the output +- `inverse` (default `False`): performs the inverse QFT + + +```python exec="on" source="material-block" html="1" result="json" session="daqc-cnot" +import torch +import qadence as qd + +from qadence.draw import display +from qadence import X, I, Z, H, CPHASE, CNOT, HamEvo +from qadence.draw import html_string # markdown-exec: hide + +n_qubits = 4 + +qft_circuit = qd.qft(n_qubits) + +# display(qft_circuit) +print(html_string(qft_circuit)) # markdown-exec: hide +``` + +Most importantly, the circuit has a layered structure. The QFT for $n$ qubits has a total of $n$ layers, and each layer starts with a Hadamard gate on the first qubit and then builds a ladder of `CPHASE` gates. Let's see how we can easily build a function to replicate this circuit. + +```python exec="on" source="material-block" session="daqc-cnot" +def qft_layer(n_qubits, layer_ix): + qubit_range = range(layer_ix + 1, n_qubits) + # CPHASE ladder + cphases = [] + for j in qubit_range: + angle = torch.pi / (2 ** (j - layer_ix)) + cphases.append(CPHASE(j, layer_ix, angle)) + # Return Hadamard followed by CPHASEs + return qd.chain(H(layer_ix), *cphases) +``` + +With the layer function we can easily write the full QFT: + +```python exec="on" source="material-block" html="1" result="json" session="daqc-cnot" +def qft_digital(n_qubits): + return qd.chain(qft_layer(n_qubits, i) for i in range(n_qubits)) + +qft_circuit = qft_digital(4) + +# display(qft_circuit) +print(html_string(qft_circuit)) # markdown-exec: hide +``` + +## Decomposing the CPHASE ladder + +As we already saw in the [previous DAQC tutorial](daqc-cnot.md), the CPHASE gate has a well-known decomposition into an Ising Hamiltonian. For the CNOT example, we used the decomposition into $NN$ interactions. However, here we will use the decomposition into $ZZ$ interactions to be consistent with the description in the original DA-QFT paper [^2]. The decomposition is the following: + +$$\text{CPHASE}(i,j,\phi)=\text{exp}\left(-i\phi H_\text{CP}(i, j)\right)$$ + +$$\begin{aligned} +H_\text{CP}&=-\frac{1}{4}(I_i-Z_i)(I_j-Z_j)\\ +&=-\frac{1}{4}(I_iI_j-Z_i-Z_j)-\frac{1}{4}Z_iZ_j +\end{aligned}$$ + +where the terms in $(I_iI_j-Z_i-Z_j)$ represents single-qubit rotations, while the interaction is given by the Ising term $Z_iZ_j$. + +Just as we did for the CNOT, to build the DA-QFT we need to write the CPHASE ladder as an Ising Hamiltonian. To do so, we again write the Hamiltonian consisting of the single-qubit rotations from all CPHASEs in the layer, as well as the Hamiltonian for the two-qubit Ising interactions so that we can then use the DAQC transformation. The full mathematical details for this are written in the paper [^2], and below we write the necessary code for it, using the same notation as in the paper, including indices running from 1 to N. + + +```python exec="on" source="material-block" session="daqc-cnot" +# The angle of the CPHASE used in the single-qubit rotations: +def theta(k): + """Eq. (16) from [^2].""" + return torch.pi / (2 ** (k + 1)) + +# The angle of the CPHASE used in the two-qubit interactions: +def alpha(c, k, m): + """Eq. (16) from [^2].""" + return torch.pi / (2 ** (k - m + 2)) if c == m else 0.0 +``` + +The first two functions represent the angles of the various `CPHASE` gates that will be used to build the qubit Hamiltonians representing each QFT layer. In the `alpha` function we include an implicit kronecker delta between the indices `m` and `c`, following the conventions and equations written in the paper [^2]. This is simply because when building the Hamiltonian the paper sums through all possible $n(n-1)$ interacting pairs, but only the pairs that are connected by a `CPHASE` in each QFT layer should have a non-zero interaction. + + +```python exec="on" source="material-block" session="daqc-cnot" +# Building the generator for the single-qubit rotations +def build_sqg_gen(n_qubits, m): + """Generator in Eq. (13) from [^2] without the Hadamard.""" + k_sqg_range = range(2, n_qubits - m + 2) + sqg_gen_list = [] + for k in k_sqg_range: + sqg_gen = qd.kron(I(j) for j in range(n_qubits)) - Z(k+m-2) - Z(m-1) + sqg_gen_list.append(theta(k) * sqg_gen) + return sqg_gen_list + +# Building the generator for the two-qubit interactions +def build_tqg_gen(n_qubits, m): + """Generator in Eq. (14) from [^2].""" + k_tqg_range = range(2, n_qubits + 1) + tqg_gen_list = [] + for k in k_tqg_range: + for c in range(1, k): + tqg_gen = qd.kron(Z(c-1), Z(k-1)) + tqg_gen_list.append(alpha(c, k, m) * tqg_gen) + return tqg_gen_list +``` + +There's a lot to process in the above functions, and it might be worth taking some time to go through them with the help of the description in [^2]. + +Let's convince ourselves that they are doing what they are supposed to: perform one layer of the QFT using a decomposition of the CPHASE gates into an Ising Hamiltonian. We start by defining the function that will produce a given QFT layer: + + +```python exec="on" source="material-block" session="daqc-cnot" +def qft_layer_decomposed(n_qubits, layer_ix): + m = layer_ix + 1 # Paper index convention + + # Step 1: + # List of generator terms for the single-qubit rotations + sqg_gen_list = build_sqg_gen(n_qubits, m) + # Exponentiate the generator for single-qubit rotations: + sq_rotations = HamEvo(qd.add(*sqg_gen_list), -1.0) + + # Step 2: + # List of generator for the two-qubit interactions + ising_gen_list = build_tqg_gen(n_qubits, m) + # Exponentiating the Ising interactions: + ising_cphase = HamEvo(qd.add(*ising_gen_list), -1.0) + + # Add the explicit Hadamard to start followed by the Hamiltonian evolutions + if len(sqg_gen_list) > 0: + return qd.chain(H(layer_ix), sq_rotations, ising_cphase) + else: + # If the generator lists are empty returns just the Hadamard of the final layer + return H(layer_ix) +``` + +And now we build a layer of the QFT for both the digital and the decomposed case and check that they match: + +```python exec="on" source="material-block" session="daqc-cnot" +n_qubits = 3 +layer_ix = 0 + +# Building the layer with the digital QFT: +digital_layer_block = qft_layer(n_qubits, layer_ix) + +# Building the layer with the Ising decomposition: +decomposed_layer_block = qft_layer_decomposed(n_qubits, layer_ix) + +# Check that we get the same block in matrix form: +block_digital_matrix = qd.block_to_tensor(digital_layer_block) +block_decomposed_matrix = qd.block_to_tensor(decomposed_layer_block) + +assert torch.allclose(block_digital_matrix, block_decomposed_matrix) +``` + +## Performing the DAQC transformation + +We now have all the ingredients to build the Digital-Analog QFT: + +- In the [previous DAQC tutorial](daqc-cnot.md) we have learned about transforming an arbitrary Ising Hamiltonian into a program executing only a fixed, system-specific one. +- In this tutorial we have so far learned how to "extract" the arbitrary Ising Hamiltonian being used in each QFT layer. + +All that is left for us to do is to specify our system Hamiltonian, apply the DAQC transform, and build the Digital-Analog QFT layer function. + +For simplicity, we will once again consider an all-to-all Ising Hamiltonian with a constant interaction strength, but this step generalizes so any other Hamiltonian (given the limitations already discussed in the [previous DAQC tutorial](daqc-cnot.md)). + +```python exec="on" source="material-block" session="daqc-cnot" +def h_sys(n_qubits, g_int = 1.0): + interaction_list = [] + for i in range(n_qubits): + for j in range(i): + interaction_list.append(g_int * qd.kron(Z(i), Z(j))) + return qd.add(*interaction_list) +``` + +Now, all we have to do is re-write the qft layer function but replace Step 2. with the transformed evolution: + +```python exec="on" source="material-block" session="daqc-cnot" +def qft_layer_DAQC(n_qubits, layer_ix): + m = layer_ix + 1 # Paper index convention + + # Step 1: + # List of generator terms for the single-qubit rotations + sqg_gen_list = build_sqg_gen(n_qubits, m) + # Exponentiate the generator for single-qubit rotations: + sq_rotations = HamEvo(qd.add(*sqg_gen_list), -1.0) + + # Step 2: + # List of generator for the two-qubit interactions + ising_gen_list = build_tqg_gen(n_qubits, m) + # Transforming the target generator with DAQC: + gen_target = qd.add(*ising_gen_list) + + transformed_ising = qd.daqc_transform( + n_qubits = n_qubits, # Total number of qubits in the transformation + gen_target = gen_target, # The target Ising generator + t_f = -1.0, # The target evolution time + gen_build = h_sys(n_qubits), # The building block Ising generator to be used + ) + + # Add the explicit Hadamard to start followed by the Hamiltonian evolutions + if len(sqg_gen_list) > 0: + return qd.chain(H(layer_ix), sq_rotations, transformed_ising) + else: + # If the generator lists are empty returns just the Hadamard of the final layer + return H(layer_ix) +``` + +And finally, to convince ourselves that the results are correct, let's build the full DA-QFT and compare it with the digital version: + +```python exec="on" source="material-block" html="1" session="daqc-cnot" +def qft_digital_analog(n_qubits): + return qd.chain(qft_layer_DAQC(n_qubits, i) for i in range(n_qubits)) + +n_qubits = 3 + +digital_qft_block = qft_digital(n_qubits) + +daqc_qft_block = qft_digital_analog(n_qubits) + +# Check that we get the same block in matrix form: +block_digital_matrix = qd.block_to_tensor(digital_qft_block) +block_daqc_matrix = qd.block_to_tensor(daqc_qft_block) + +assert torch.allclose(block_digital_matrix, block_daqc_matrix) +``` + +And we can now display the program for the DA-QFT: + +```python exec="on" source="material-block" html="1" result="json" session="daqc-cnot" + +# display(daqc_qft_block) +print(html_string(daqc_qft_block)) # markdown-exec: hide +``` + +## The DA-QFT in `qadence`: + +The digital-analog QFT is available directly by using the `strategy` argument in the QFT: + +```python exec="on" source="material-block" html="1" result="json" session="daqc-cnot" +n_qubits = 3 + +qft_circuit = qd.qft(n_qubits, strategy = qd.Strategy.SDAQC) + +# display(qft_circuit) +print(html_string(qft_circuit)) # markdown-exec: hide +``` + +Just like with the `daqc_transform`, we can pass a different build Hamiltonian to it for the analog blocks, including one composed of $NN$ interactions: + +```python exec="on" source="material-block" html="1" result="json" session="daqc-cnot" +from qadence import hamiltonian_factory, Interaction + +n_qubits = 3 + +gen_build = hamiltonian_factory(n_qubits, interaction = Interaction.NN) + +qft_circuit = qd.qft(n_qubits, strategy = qd.Strategy.SDAQC, gen_build = gen_build) + +# display(qft_circuit) +print(html_string(qft_circuit)) # markdown-exec: hide +``` + +## References + +[^1]: [Parra-Rodriguez et al., Digital-Analog Quantum Computation. PRA 101, 022305 (2020).](https://arxiv.org/abs/1812.03637) + +[^2]: [Martin, Ana, et al. Digital-analog quantum algorithm for the quantum Fourier transform. Phys. Rev. Research 2.1, 013012 (2020).](https://arxiv.org/abs/1906.07635) diff --git a/docs/digital_analog_qc/pulser-basic.md b/docs/digital_analog_qc/pulser-basic.md new file mode 100644 index 000000000..a51a65684 --- /dev/null +++ b/docs/digital_analog_qc/pulser-basic.md @@ -0,0 +1,299 @@ +!!! warning + This tutorial needs to be fixed. + + +Qadence offers a direct interface with Pulser[^1], an open-source pulse-level interface written in Python and specifically designed for programming neutral atom quantum computers. + +Using directly Pulser requires deep knowledge on pulse-level programming and on how neutral atom devices work. Qadence abstracts out this complexity by using the familiar block-based interface for building pulse sequences in Pulser while leaving the possibility +to directly manipulate them if required. + +!!! note + The Pulser backend is still experimental and the interface might change in the future. + +Let's see it in action. + +## Default qubit interaction + +When simulating pulse sequences written using Pulser, the underlying Hamiltonian it +constructs is equivalent to a digital-analog quantum computing program with the following interaction +Hamiltonian (see [digital-analog emulation](analog-basics.md) for more details): + +$$ +\mathcal{H}_{int} = \sum_{i str: + buffer = StringIO() + fig.savefig(buffer, format="svg") + return buffer.getvalue() + + +def hardware_efficient_ansatz(n_qubits: int = 2, depth: int = 1) -> AbstractBlock: + return hea(n_qubits=n_qubits, depth=depth) + + +def digital_analog_ansatz( + h_generator: AbstractBlock, n_qubits: int = 2, depth: int = 1, t_evo: float = 1.0 +) -> AbstractBlock: + time_evolution = HamEvo(h_generator, t_evo) + + it = itertools.count() + ops = [] + for _ in range(depth): + layer = kron( + *[ + chain(*(gate(n, f"theta{next(it)}") for gate in [RX, RY, RX])) + for n in range(n_qubits) + ] + ) + ops.append(chain(layer, time_evolution)) + return chain(*ops) + + +def qcl_circuit(n_qubits: int = 2, depth: int = 1, use_digital_analog: bool = False): + # Chebyshev feature map with input parameter defined as non trainable + phi = Parameter("phi", trainable=False) + fm = chain(*[RY(i, phi) for i in range(n_qubits)]) + tag(fm, "feature_map") + + if not use_digital_analog: + # hardware-efficient ansatz + ansatz = hardware_efficient_ansatz(n_qubits=n_qubits, depth=depth) + else: + # Hamiltonian evolution ansatz (digital-analog) + t_evo = 3.0 # length of the time evolution + h_generator = add( + *[Z(i) for i in range(n_qubits)] + ) # use total magnetization as Hamiltonian + ansatz = digital_analog_ansatz(h_generator, n_qubits=n_qubits, depth=depth, t_evo=t_evo) + + tag(ansatz, "ansatz") + + # add a final fixed layer or rotations + fixed_layer = chain(*[RY(i, np.pi / 2) for i in range(n_qubits)]) + tag(fixed_layer, "fixed") + + blocks = [fm, ansatz, fixed_layer] + return QuantumCircuit(n_qubits, *blocks) + + +def qcl_training_data( + fn: Callable, domain: tuple = (0, 2 * np.pi), n_teacher: int = 100 +) -> tuple[torch.tensor, torch.tensor]: + start, end = domain + x_rand_np = np.sort(np.random.uniform(low=start, high=end, size=n_teacher)) + y_rand_np = fn(x_rand_np) + + x_rand = torch.tensor(x_rand_np) + y_rand = torch.tensor(y_rand_np) + + return x_rand, y_rand + + +def qcl_train_model( + model: QNN, x_train: torch.Tensor, y_train: torch.Tensor, n_epochs: int = 50, lr: float = 1.0 +) -> QNN: + mse_loss = torch.nn.MSELoss() + optimizer = torch.optim.Adam(model.parameters(), lr=lr) + + print(f"Initial loss: {mse_loss(model(x_train), y_train)}") + + for i in range(n_epochs): + optimizer.zero_grad() + + loss = mse_loss(model(x_train), y_train) + loss.backward() + optimizer.step() + + if (i + 1) % 10 == 0: + print(f"Epoch {i+1} training - Loss: {loss.item()}") + + return model diff --git a/docs/environment.yml b/docs/environment.yml new file mode 100644 index 000000000..52d7743a8 --- /dev/null +++ b/docs/environment.yml @@ -0,0 +1,19 @@ +name: readthedocs +channels: + - defaults +dependencies: + - python=3.10 + - python-graphviz + - pip + - pip: + - markdown-exec + - mkdocs-exclude + - mkdocs-jupyter + - mkdocs-material + - mkdocs-section-index==0.3.6 + - mkdocs==1.5.2 + - mkdocstrings + - mkdocstrings-python + - -e ../ + - pulser>=0.12.0 + - amazon-braket-sdk diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 000000000..94cdd545b --- /dev/null +++ b/docs/index.md @@ -0,0 +1,162 @@ +**Qadence** is a Python package that provides a simple interface to build _**digital-analog quantum +programs**_ with tunable qubit interaction defined on _**arbitrary register topologies**_ realizable on neutral atom devices. + +## Feature highlights + +* A [block-based system](tutorials/getting_started.md) for composing _**complex digital-analog + programs**_ in a flexible and scalable manner, inspired by the Julia quantum SDK + [Yao.jl](https://github.com/QuantumBFS/Yao.jl) and functional programming concepts. + +* A [simple interface](digital_analog_qc/analog-basics.md) to work with _**interacting neutral-atom qubit systems**_ + using [arbitrary registers topologies](tutorials/register.md). + +* An intuitive [expression-based system](tutorials/parameters.md) developed on top of the symbolic library [Sympy](https://www.sympy.org/en/index.html) to construct _**parametric quantum programs**_ easily. + +* [High-order generalized parameter shift rules](advanced_tutorials/differentiability.md) for _**differentiating parametrized quantum operations**_. + +* Out-of-the-box _**automatic differentiability**_ of quantum programs with [PyTorch](https://pytorch.org/) integration. + +* _**Efficient execution**_ on a variety of different purpose backends: from state vector simulators to tensor network emulators and real devices. + +In following are some examples of Qadence possibilites in the digital, analog and digital-analog paradigms. + +## Sampling the canonical Bell state + +This example illustrates how to prepare a [Bell state](https://en.wikipedia.org/wiki/Bell_state) using digital gates and sampling from the outcome bitstring distribution: + +```python exec="on" source="material-block" result="json" +import torch # markdown-exec: hide +torch.manual_seed(0) # markdown-exec: hide +from qadence import CNOT, H, chain, sample + +# Preparing a Bell state by composing a Hadamard and CNOT gates in sequence. +bell_state = chain(H(0), CNOT(0,1)) + +# Sample with 100 shots. +samples = sample(bell_state, n_shots=100) +print(f"samples = {samples}") # markdown-exec: hide +from qadence.divergences import js_divergence # markdown-exec: hide +from collections import Counter # markdown-exec: hide +js = js_divergence(samples[0], Counter({"00":50, "11":50})) # markdown-exec: hide +assert js < 0.005 # markdown-exec: hide +``` + +## Analog emulation of a perfect state transfer + +This next example showcases the construction and sampling of a system that admits a perfect state transfer between the two edge qubits of a three qubit register laid out in a +line. This relies on time-evolving a Hamiltonian for a custom defined qubit interation until $t=\frac{\pi}{\sqrt 2}$. + +```python exec="on" source="material-block" result="json" +from torch import pi +from qadence import X, Y, HamEvo, Register, product_state, sample, add + +# Define the qubit-qubit interaction term. +def interaction(i, j): + return 0.5 * (X(i) @ X(j) + Y(i) @ Y(j)) # Compose gates in parallel and sum their contribution. + +# Initial state with left-most qubit in the 1 state. +init_state = product_state("100") + +# Define a register of 3 qubits laid out in a line. +register = Register.line(n_qubits=3) + +# Define an interaction Hamiltonian by summing interactions on indexed qubits. +# hamiltonian = interaction(0, 1) + interaction(1, 2) +hamiltonian = add(interaction(*edge) for edge in register.edges) + +# Define and time-evolve the Hamiltonian until t=pi/sqrt(2). +t = pi/(2**0.5) # Dimensionless. +evolution = HamEvo(hamiltonian, t) + +# Sample with 100 shots. +samples = sample(register, evolution, state=init_state, n_shots=100) +print(f"{samples = }") # markdown-exec: hide +from collections import Counter # markdown-exec: hide +assert samples[0] == Counter({"001": 100}) # markdown-exec: hide +``` + +## Digital-analog example + +This final example deals with the construction and sampling of an Ising Hamiltonian that includes a distance-based interaction between qubits and a global analog block of rotations around the $X$-axis. Here, _global_ has to be understood as applied to the whole register for qubits. + +```python exec="on" source="material-block" result="json" +from torch import pi +from qadence import Register, AnalogRX, sample + +# Global analog RX block. +block = AnalogRX(pi) + +# Almost non-interacting qubits as too far apart. +register = Register.from_coordinates([(0,0), (0,15)]) # Dimensionless. +samples = sample(register, block) +print(f"distance = 15: {samples = }") # markdown-exec: hide +from collections import Counter # markdown-exec: hide +from qadence.divergences import js_divergence # markdown-exec: hide +js = js_divergence(samples[0], Counter({"11": 100})) # markdown-exec: hide +assert js < 0.01 # markdown-exec: hide + +# Interacting qubits as close together. +register = Register.from_coordinates([(0,0), (0,5)]) +samples = sample(register, AnalogRX(pi)) +print(f"distance = 5: {samples = }") # markdown-exec: hide +js = js_divergence(samples[0], Counter({"01":33, "10":33, "00":33, "11":1})) # markdown-exec: hide +assert js < 0.05 # markdown-exec: hide``` +``` + +## Further resources + +For a more comprehensive introduction and advanced topics, please have a look at the following tutorials: + +* [Quantum state conventions](tutorials/state_conventions.md) used throughout Qadence. +* [Basic tutorials](tutorials/getting_started.md) for first hands-on. +* [Digital-analog basics](digital_analog_qc/analog-basics.md) to build quantum programs in the digital-analog paradigm. +* [Parametric quantum circuits](tutorials/parameters.md) for the generation and manipulation of parametric programs. +* [Advanced features](advanced_tutorials/differentiability.md) about low-level backend interface and differentiablity. +* [`QuantumModel`](advanced_tutorials/custom-models.md) for defining custom models. + +## Installation guide + +Qadence can be installed from PyPI with `pip` as follows: + +```bash +pip install qadence +``` + +The default backend for Qadence is [PyQTorch](https://github.com/pasqal-io/pyqtorch), a differentiable state vector simulator for digital-analog simulation. It is possible to install additional backends and the circuit visualization library using the following extras: + +* `braket`: the [Braket](https://github.com/amazon-braket/amazon-braket-sdk-python) backend. +* `pulser`: the [Pulser](https://github.com/pasqal-io/Pulser) backend for composing, simulating and executing pulse sequences for neutral-atom quantum devices. +* `visualization`: to display diagrammatically quantum circuits. + +by running: + +```bash +pip install qadence[braket, pulser, visualization] +``` + +!!! warning + In order to correctly install the `visualization` extra, the `graphviz` package needs to be installed + in your system: + + ```bash + # on Ubuntu + sudo apt install graphviz + + # on MacOS + brew install graphviz + + # via conda + conda install python-graphviz + ``` + +## Citation + +If you use Qadence for a publication, we kindly ask you to cite our work using the following BibTex entry: + +``` +@misc{qadence2023pasqal, + url = {https://github.com/pasqal-io/qadence}, + title = {Qadence: {A} {D}igital-analog quantum programming interface.}, + year = {2023} +} +``` diff --git a/docs/javascripts/mathjax.js b/docs/javascripts/mathjax.js new file mode 100644 index 000000000..fd764a732 --- /dev/null +++ b/docs/javascripts/mathjax.js @@ -0,0 +1,16 @@ +window.MathJax = { + tex: { + inlineMath: [["\\(", "\\)"]], + displayMath: [["\\[", "\\]"]], + processEscapes: true, + processEnvironments: true + }, + options: { + ignoreHtmlClass: ".*|", + processHtmlClass: "arithmatex" + } +}; + +document$.subscribe(() => { + MathJax.typesetPromise() +}) diff --git a/docs/models.md b/docs/models.md new file mode 100644 index 000000000..0c7e4e49e --- /dev/null +++ b/docs/models.md @@ -0,0 +1,3 @@ +::: qadence.models.quantum_model + +::: qadence.models.qnn diff --git a/docs/qadence/blocks.md b/docs/qadence/blocks.md new file mode 100644 index 000000000..15a441853 --- /dev/null +++ b/docs/qadence/blocks.md @@ -0,0 +1,44 @@ +`qadence` offers a block-based system to construct quantum circuits in a flexible manner. + +::: qadence.blocks.abstract + +## Primitive blocks + +::: qadence.blocks.primitive + + +## Analog blocks + +To learn how to use analog blocks and how to mix digital & analog blocks, check out the +[digital-analog section](../digital_analog_qc/analog-basics.md) of the documentation. + +Examples on how to use digital-analog blocks can be found in the +*examples folder of the qadence repo: + +- Fit a simple sinus: `examples/digital-analog/fit-sin.py` +- Solve a QUBO: `examples/digital-analog/qubo.py` + +::: qadence.blocks.analog + +## Composite blocks + +::: qadence.blocks.utils.chain + options: + show_root_heading: true + show_root_full_path: false + +::: qadence.blocks.utils.kron + options: + show_root_heading: true + show_root_full_path: false + +::: qadence.blocks.utils.add + options: + show_root_heading: true + show_root_full_path: false + +::: qadence.blocks.composite + +## Converting blocks to matrices + +::: qadence.blocks.block_to_tensor diff --git a/docs/qadence/constructors.md b/docs/qadence/constructors.md new file mode 100644 index 000000000..3d935c366 --- /dev/null +++ b/docs/qadence/constructors.md @@ -0,0 +1,17 @@ +# Constructors for common quantum circuits + +### ::: qadence.constructors.feature_maps + +### ::: qadence.constructors.ansatze + +### ::: qadence.constructors.hamiltonians + +### ::: qadence.constructors.qft + +## The DAQC Transform + +### ::: qadence.constructors.daqc.daqc + +## Some utility functions + +### ::: qadence.constructors.utils diff --git a/docs/qadence/execution.md b/docs/qadence/execution.md new file mode 100644 index 000000000..ac6843494 --- /dev/null +++ b/docs/qadence/execution.md @@ -0,0 +1,2 @@ + +::: qadence.execution diff --git a/docs/qadence/ml_tools.md b/docs/qadence/ml_tools.md new file mode 100644 index 000000000..0c5ee50b6 --- /dev/null +++ b/docs/qadence/ml_tools.md @@ -0,0 +1,13 @@ +## ML Tools + +This module implements gradient-free and gradient-based training loops for torch Modules and QuantumModel. + +### ::: qadence.ml_tools.config + +### ::: qadence.ml_tools.parameters + +### ::: qadence.ml_tools.optimize_step + +### ::: qadence.ml_tools.train_grad + +### ::: qadence.ml_tools.train_no_grad diff --git a/docs/qadence/operations.md b/docs/qadence/operations.md new file mode 100644 index 000000000..61aea5e97 --- /dev/null +++ b/docs/qadence/operations.md @@ -0,0 +1,155 @@ + +Operations are common [`PrimitiveBlocks`][qadence.blocks.primitive.PrimitiveBlock], these are often +called *gates* elsewhere. + +## Constant blocks + +::: qadence.operations.X + options: + show_root_heading: true + show_root_full_path: false + +::: qadence.operations.Y + options: + show_root_heading: true + show_root_full_path: false + +::: qadence.operations.Z + options: + show_root_heading: true + show_root_full_path: false + +::: qadence.operations.I + options: + show_root_heading: true + show_root_full_path: false + +::: qadence.operations.H + options: + show_root_heading: true + show_root_full_path: false + +::: qadence.operations.S + options: + show_root_heading: true + show_root_full_path: false + +::: qadence.operations.SDagger + options: + show_root_heading: true + show_root_full_path: false + +::: qadence.operations.SWAP + options: + show_root_heading: true + show_root_full_path: false + +::: qadence.operations.T + options: + show_root_heading: true + show_root_full_path: false + +::: qadence.operations.TDagger + options: + show_root_heading: true + show_root_full_path: false + +::: qadence.operations.CNOT + options: + show_root_heading: true + show_root_full_path: false + +!!! warning "CY gate not implemented" + +::: qadence.operations.CZ + options: + show_root_heading: true + show_root_full_path: false + +::: qadence.operations.CPHASE + options: + show_root_heading: true + show_root_full_path: false + +--- + +## Parametrized blocks + +::: qadence.operations.RX + options: + show_root_heading: true + show_root_full_path: false + +::: qadence.operations.RY + options: + show_root_heading: true + show_root_full_path: false + +::: qadence.operations.RZ + options: + show_root_heading: true + show_root_full_path: false + +::: qadence.operations.CRX + options: + show_root_heading: true + show_root_full_path: false + +::: qadence.operations.CRY + options: + show_root_heading: true + show_root_full_path: false + +::: qadence.operations.CRZ + options: + show_root_heading: true + show_root_full_path: false + +::: qadence.operations.PHASE + options: + show_root_heading: true + show_root_full_path: false + +--- + +## Hamiltonian Evolution + +::: qadence.operations.HamEvo + options: + show_root_heading: true + show_root_full_path: false + +::: qadence.operations.AnalogSWAP + options: + show_root_heading: true + show_root_full_path: false +!!! warning "AnalogSWAP should be turned into a proper analog block" + +--- + +## Analog blocks + +::: qadence.operations.AnalogRX + options: + show_root_heading: true + show_root_full_path: false + +::: qadence.operations.AnalogRY + options: + show_root_heading: true + show_root_full_path: false + +::: qadence.operations.AnalogRZ + options: + show_root_heading: true + show_root_full_path: false + +::: qadence.operations.AnalogRot + options: + show_root_heading: true + show_root_full_path: false + +::: qadence.operations.wait + options: + show_root_heading: true + show_root_full_path: false diff --git a/docs/qadence/parameters.md b/docs/qadence/parameters.md new file mode 100644 index 000000000..8b71f3891 --- /dev/null +++ b/docs/qadence/parameters.md @@ -0,0 +1,8 @@ +## Parameters + +### ::: qadence.parameters + + +## Parameter embedding + +::: qadence.blocks.embedding diff --git a/docs/qadence/quantumcircuit.md b/docs/qadence/quantumcircuit.md new file mode 100644 index 000000000..ad14cac36 --- /dev/null +++ b/docs/qadence/quantumcircuit.md @@ -0,0 +1,5 @@ +## QuantumCircuit + +The abstract `QuantumCircuit` is the key object in Qadence, as it is what can be executed. + +### ::: qadence.circuit diff --git a/docs/qadence/register.md b/docs/qadence/register.md new file mode 100644 index 000000000..a01484a33 --- /dev/null +++ b/docs/qadence/register.md @@ -0,0 +1,3 @@ +## Quantum Registers + +### ::: qadence.register diff --git a/docs/qadence/serialization.md b/docs/qadence/serialization.md new file mode 100644 index 000000000..a0a193c5d --- /dev/null +++ b/docs/qadence/serialization.md @@ -0,0 +1,3 @@ +## Serialization + +### ::: qadence.serialization diff --git a/docs/qadence/states.md b/docs/qadence/states.md new file mode 100644 index 000000000..980305c15 --- /dev/null +++ b/docs/qadence/states.md @@ -0,0 +1,3 @@ +## State Preparation Routines + +### ::: qadence.states diff --git a/docs/qadence/transpile.md b/docs/qadence/transpile.md new file mode 100644 index 000000000..4999ba43b --- /dev/null +++ b/docs/qadence/transpile.md @@ -0,0 +1,9 @@ +Contains functions that operate on blocks and circuits to `transpile` them to new blocks/circuits. + +::: qadence.transpile.transpile + +::: qadence.transpile.block + +::: qadence.transpile.circuit + +::: qadence.transpile.emulate diff --git a/docs/qadence/types.md b/docs/qadence/types.md new file mode 100644 index 000000000..f6592f127 --- /dev/null +++ b/docs/qadence/types.md @@ -0,0 +1,3 @@ +## Qadence Types + +### ::: qadence.types diff --git a/docs/qml/index.md b/docs/qml/index.md new file mode 100644 index 000000000..6a873df74 --- /dev/null +++ b/docs/qml/index.md @@ -0,0 +1,92 @@ +Variational algorithms on noisy devices and quantum machine learning (QML) [^1] in particular are +the target applications for Qadence. For this purpose, the +library offers both flexible symbolic expressions for the +quantum circuit parameters via `sympy` (see [here](../tutorials/parameters.md) for more +details) and native automatic differentiation via integration with +[PyTorch](https://pytorch.org/) deep learning framework. + +Qadence symbolic parameter interface allows to create +arbitrary feature maps to encode classical data into quantum circuits +with an arbitrary non-linear function embedding for the input values: + +```python exec="on" source="material-block" html="1" result="json" session="qml" +import qadence as qd +from qadence.operations import * +import torch +from sympy import acos + +n_qubits = 4 + +fp = qd.FeatureParameter("phi") +feature_map = qd.kron(RX(i, 2 * acos(fp)) for i in range(n_qubits)) + +# the key in the dictionary must correspond to +# the name of the assigned to the feature parameter +inputs = {"phi": torch.rand(3)} +samples = qd.sample(feature_map, values=inputs) +print(samples) +``` + +The [`constructors.feature_map`][qadence.constructors.feature_map] module provides +convenience functions to build commonly used feature maps where the input parameter +is encoded in the single-qubit gates rotation angle. + +Furthermore, Qadence is natively integrated with PyTorch automatic differentiation engine thus +Qadence quantum models can be used seamlessly in a PyTorch workflow. + +Let's create a quantum neural network model using the feature map just defined, a +digital-analog variational ansaztz and a simple observable $X(0) \otimes X(1)$. We +use the convenience `QNN` quantum model abstraction. + +```python exec="on" source="material-block" result="json" session="qml" +ansatz = qd.hea(n_qubits, strategy="sDAQC") +circuit = qd.QuantumCircuit(n_qubits, feature_map, ansatz) +observable = qd.kron(X(0), X(1)) + +model = qd.QNN(circuit, observable) + +# NOTE: the `QNN` is a torch.nn.Module +assert isinstance(model, torch.nn.Module) +``` + +Differentiation works the same way as any other PyTorch module: + +```python exec="on" source="material-block" html="1" result="json" session="qml" +values = {"phi": torch.rand(10, requires_grad=True)} + +# the forward pass of the quantum model returns the expectation +# value of the input observable +out = model(values) +print(f"Quantum model output: {out}") + +# you can compute the gradient with respect to inputs using +# PyTorch autograd differentiation engine +dout = torch.autograd.grad(out, values["phi"], torch.ones_like(out), create_graph=True)[0] +print(f"First-order derivative w.r.t. the feature parameter: {dout}") + +# you can also call directly a backward pass to compute derivatives with respect +# to the variational parameters and use it for implementing variational +# optimization +out.sum().backward() +``` + +To run QML on real devices, Qadence offers generalized parameter shift rules (GPSR) [^2] +for arbitrary quantum operations which can be selected when constructing the +`QNN` model: + +```python exec="on" source="material-block" html="1" result="json" session="qml" +model = qd.QNN(circuit, observable, diff_mode="gpsr") +out = model(values) + +dout = torch.autograd.grad(out, values["phi"], torch.ones_like(out), create_graph=True)[0] +print(f"First-order derivative w.r.t. the feature parameter: {dout}") +``` + +See [here](../advanced_tutorials/differentiability.md) for more details on how the parameter +shift rules implementation works in Qadence. + +## References + +[^1] Schuld, Petruccione, Machine learning on Quantum Computers, Springer Nature (2021) + +[^2]: [Kyriienko et al., General quantum circuit differentiation rules](https://arxiv.org/abs/2108.01218) diff --git a/docs/qml/qaoa.md b/docs/qml/qaoa.md new file mode 100644 index 000000000..9700ffcfa --- /dev/null +++ b/docs/qml/qaoa.md @@ -0,0 +1,170 @@ +In this tutorial, we show how to solve the maximum cut (MaxCut) combinatorial +optimization problem on a graph using the Quantum Approximate Optimization +Algorithm (QAOA[^1]), introduced in 2014. This showcases the flexibility of +Qadence for implementing variational algorithms without classical input +data. + +Given an arbitrary graph, the MaxCut problem consists in finding a cut +partitioning the nodes into two sets, such that the number edges that are the +cut is maximized. This is a very common combinatorial problem, the interested +reader can refer to this introduction. +Let's first generate a random graph using the `networkx` library. + +```python exec="on" source="material-block" html="1" session="qaoa" +import numpy as np +import networkx as nx +import matplotlib.pyplot as plt + +# ensure reproducibility +seed = 10 +np.random.seed(seed) + +n_nodes = 8 +graph = nx.gnp_random_graph(n_nodes, 0.5) + +plt.clf() # markdown-exec: hide +nx.draw(graph) +from docs import docsutils # markdown-exec: hide +print(docsutils.fig_to_html(plt.gcf())) # markdown-exec: hide +``` + +The goal of the MaxCut algorithm is to maximize the following cost function: + +$$ +\mathcal{C}(p) = \sum_{\alpha}^m \mathcal{C}_{\alpha}(p) +$$ + +where $p$ is the given partition of the graph, $\alpha$ is an index over the edges and $\mathcal{C}_{\alpha}(p)$ is written such that if the nodes connected by the $\alpha$ edge are in the same set, it returns $0$, otherwise it returns $1$. + +## The QAOA quantum circuit + +Let's see how to solve this problem using a parametrized quantum circuit. The +QAOA algorithm requires a circuit with two main components: + +* the cost component is a circuit generated by a diagonal Hamiltonian which + encodes the cost function described above into a quantum circuit. +* the mixing component is a simple set of single qubit rotations with adjustable + angles which are tuned during the classical optimization loop + +First, construct the generators associated with the edges of the given graph. These +will be used both in the definition of the loss function of our problem and in +constructing the quantum circuit. + +```python exec="on" source="material-block" session="qaoa" +from qadence import kron, Z + +zz_ops = [kron(Z(edge[0]), Z(edge[1])) for edge in graph.edges()] +``` + +Let's now define the QAOA quantum circuits with the cost and mixing components. +```python exec="on" source="material-block" html="1" session="qaoa" +from qadence import Zero, I, HamEvo, tag, chain, QuantumCircuit, RX + +n_qubits = graph.number_of_nodes() +n_layers = 2 + +cost_ham = Zero() +for op in zz_ops: + cost_ham += 0.5 * op +cost_ham = 0.5 * kron(I(i) for i in range(n_qubits)) - cost_ham + +layers = [] +for layer in range(n_layers): + + # cost layer with digital decomposition + cost_layer = HamEvo(cost_ham, f"g{layer}").digital_decomposition() + cost_layer = tag(cost_layer, "cost") + + # mixing layer with single qubit rotations + mixing_layer = kron(RX(i, f"b{layer}{i}") for i in range(n_qubits)) + mixing_layer = tag(mixing_layer, "mixing") + + # putting all together in a single ChainBlock + layers.append(chain(cost_layer, mixing_layer)) + +final_b = chain(*layers) + +circuit = QuantumCircuit(n_qubits, final_b) +from qadence.draw import html_string # markdown-exec: hide +print(html_string(circuit)) # markdown-exec: hide +``` +Here we used the `digital_decomposition()` method provided by Qadence for +obtaining the set of gates corresponding to the Hamiltonian evolution operation +in the cost layer. + +## Train the QAOA circuit to solve MaxCut + +Now that we have the circuit, we can create the associated Qadence `QuantumModel` +and train it using standard gradient based optimization. Notice that we give the +full list of edge generators since the loss function to be minimized reads: + +$$ +\mathcal{L} = \sum_{i,j}^{N_{\mathcal{E}}} \frac{1}{2} \left(1 - \langle \psi | \sigma_i^z \sigma_j^z | \psi \rangle \right) +$$ + +where $\psi(\beta, \gamma)$ is the wavefunction obtained by propagating the QAQA +quantum circuit and the sum runs over the edges of the graph $N_{\mathcal{E}}$. + +```python exec="on" source="material-block" result="json" session="qaoa" +import torch +from qadence import QuantumModel + +model = QuantumModel(circuit, backend="pyqtorch", observable=zz_ops, diff_mode='gpsr') + +_ = torch.manual_seed(seed) + +def loss_function(_model: QuantumModel): + expval_ops = model.expectation().squeeze() + # this corresponds to the MaxCut cost by definition + # with negative sign in front to perform maximization + expval = 0.0 + for val in expval_ops: + expval += 0.5 * (1 - val) + return -1.0 * expval + +# initialize the parameters to random values +model.reset_vparams(torch.rand(model.num_vparams)) +initial_loss = loss_function(model) +print(f"Initial loss: {initial_loss}") + +# train the model +n_epochs = 100 +lr = 1.0 + +optimizer = torch.optim.Adagrad(model.parameters(), lr=lr) + +for i in range(n_epochs): + optimizer.zero_grad() + loss = loss_function(model) + loss.backward() + optimizer.step() + if (i+1) % (n_epochs // 10) == 0: + print(f"MaxCut cost at iteration {i+1}: {-loss.item()}") +``` +## Results + +Given the optimized model, we need now to sample the resulting quantum state to +recover the bitstring with the highest probability which corresponds to the maximum +cut of the graph. +```python exec="on" source="material-block" html="1" session="qaoa" +samples = model.sample(n_shots=100)[0] +most_frequent = max(samples, key=samples.get) + +print(f"Most frequently sampled bitstring corresponding to the maximum cut: {most_frequent}") + +# let's now draw the cut obtained with the QAOA procedure +colors = [] +labels = {} +for node, b in zip(graph.nodes(), most_frequent): + colors.append("green") if int(b) == 0 else colors.append("red") + labels[node] = "A" if int(b) == 0 else "B" + +plt.clf() # markdown-exec: hide +nx.draw_networkx(graph, node_color=colors, with_labels=True, labels=labels) +from docs import docsutils # markdown-exec: hide +print(docsutils.fig_to_html(plt.gcf())) # markdown-exec: hide +``` + +## References + +[^1]: [Farhi et al.](https://arxiv.org/abs/1411.4028) - A Quantum Approximate Optimization Algorithm diff --git a/docs/qml/qcl.md b/docs/qml/qcl.md new file mode 100644 index 000000000..a06309fba --- /dev/null +++ b/docs/qml/qcl.md @@ -0,0 +1,141 @@ +In this tutorial, we show how to apply `qadence` for solving a basic quantum +machine learning application: fitting a simple function with the +quantum circuit learning (QCL) algorithm. + +Quantum circuit learning [^1] is a supervised quantum machine learning algorithm that uses +parametrized quantum neural networks to learn the behavior of an arbitrary +mathematical function starting from some training data extracted from it. We +choose the function + +For this tutorial, we show how to fit the $sin(x)$ function in the domain $[-1, 1]$. + +Let's start with defining training and test data. + +```python exec="on" source="material-block" session="qcl" result="json" +from typing import Callable + +import torch + +# make sure all tensors are kept on the same device +# only available from PyTorch 2.0 +device = "cuda" if torch.cuda.is_available() else "cpu" +torch.set_default_device(device) + +# notice that the domain does not include 1 and -1 +# this avoids a singularity in the rotation angles when +# when encoding the domain points into the quantum circuit +# with a non-linear transformation (see below) +def qcl_training_data( + domain: tuple = (-0.99, 0.99), n_points: int = 100 +) -> tuple[torch.Tensor, torch.Tensor]: + + start, end = domain + + x_rand, _ = torch.sort(torch.DoubleTensor(n_points).uniform_(start, end)) + y_rand = torch.sin(x_rand) + + return x_rand, y_rand + +test_frac = 0.25 +x, y = qcl_training_data() +n_test = int(len(x) * test_frac) +x_train, y_train = x[0:n_test-len(x)], y[0:n_test-len(x)] +x_test, y_test = x[n_test-len(x):], y[n_test-len(x):] +``` + +## Train the QCL model + +Qadence provides the [`QNN`][qadence.models.qnn.QNN] convenience constructor to build a quantum neural network. +The `QNN` class needs a circuit and a list of observables; both the number of feature parameters and the number +of observables in the list must be equal to the number of desired outputs of the quantum neural network. + +As observable, we use the total qubit magnetization leveraging a convenience constructor provided by `qadence`: + +$$ +\hat{O} = \sum_i^N \hat{\sigma}_i^z +$$ + +```python exec="on" source="material-block" session="qcl" result="json" +import sympy +import qadence as qd +from qadence.operations import RX + +n_qubits = 8 + +# create a simple feature map with a non-linear parameter transformation +feature_param = qd.FeatureParameter("phi") +feature_map = qd.kron(RX(i, feature_param) for i in range(n_qubits)) +featre_map = qd.tag(feature_map, "feature_map") + +# create a digital-analog variational ansatz using Qadence convenience constructors +ansatz = qd.hea(n_qubits, depth=n_qubits, strategy=qd.Strategy.SDAQC) +ansatz = qd.tag(ansatz, "ansatz") + +# total magnetization observable +observable = qd.hamiltonian_factory(n_qubits, detuning = qd.Z) + +circuit = qd.QuantumCircuit(n_qubits, feature_map, ansatz) +model = qd.QNN(circuit, [observable]) +expval = model(values=torch.rand(10)) +print(expval) +``` + +The QCL algorithm uses the output of the quantum neural network as a tunable +function approximator. We can use standard PyTorch code for training the QNN +using a mean-square error loss, the Adam optimizer and also train on the GPU +if any is available: + +```python exec="on" source="material-block" session="qcl" result="json" + +# train the model +n_epochs = 200 +lr = 0.5 + +input_values = {"phi": x_train} +mse_loss = torch.nn.MSELoss() # standard PyTorch loss function +optimizer = torch.optim.Adam(model.parameters(), lr=lr) # standard PyTorch Adam optimizer + +print(f"Initial loss: {mse_loss(model(input_values), y_train)}") + +y_pred_initial = model({"phi": x_test}) + +running_loss = 0.0 +for i in range(n_epochs): + + optimizer.zero_grad() + + loss = mse_loss(model(input_values), y_train) + loss.backward() + optimizer.step() + + if (i+1) % 20 == 0: + print(f"Epoch {i+1} - Loss: {loss.item()}") +``` + +The quantum model is now trained on the training data points. Let's see how well it fits the +function on the test set. + +```python exec="on" source="material-block" session="qcl" result="json" +import matplotlib.pyplot as plt + +y_pred = model({"phi": x_test}) + +# convert all the results to numpy arrays for plotting +x_train_np = x_train.cpu().detach().numpy().flatten() +y_train_np = y_train.cpu().detach().numpy().flatten() +x_test_np = x_test.cpu().detach().numpy().flatten() +y_pred_initial_np = y_pred_initial.cpu().detach().numpy().flatten() +y_pred_np = y_pred.cpu().detach().numpy().flatten() + +fig, _ = plt.subplots() +plt.scatter(x_train_np, y_train_np, label="Training points", marker="o", color="orange") +plt.plot(x_test_np, y_pred_initial_np, label="Initial prediction", color="green", alpha=0.5) +plt.plot(x_test_np, y_pred_np, label="Final prediction") +plt.legend() +from docs import docsutils as du # markdown-exec: hide +print(du.fig_to_html(fig)) # markdown-exec: hide +``` + +## References + +[^1]: [Mitarai et al., Quantum Circuit Learning](https://arxiv.org/abs/1803.00745) diff --git a/docs/tutorials/backends.md b/docs/tutorials/backends.md new file mode 100644 index 000000000..509dd4237 --- /dev/null +++ b/docs/tutorials/backends.md @@ -0,0 +1,197 @@ +Backends allow execution of Qadence abstract quantum circuits. They could be chosen from a variety of simulators, emulators and hardware +and can enable circuit [differentiability](https://en.wikipedia.org/wiki/Automatic_differentiation). The primary way to interact and configure +a backend is via the high-level API `QuantumModel`. + +!!! note "Not all backends are equivalent" + Not all backends support the same set of operations, especially while executing analog blocks. + Qadence will throw descriptive errors in such cases. + +## Execution backends + +[_**PyQTorch**_](https://github.com/pasqal-io/PyQ): An efficient, large-scale simulator designed for +quantum machine learning, seamlessly integrated with the popular [PyTorch](https://pytorch.org/) deep learning framework for automatic differentiability. +It also offers analog computing for time-independent pulses. See [`PyQTorchBackend`][qadence.backends.pyqtorch.backend.Backend]. + +[_**Pulser**_](https://github.com/pasqal-io/Pulser): A Python library for pulse-level/analog control of +neutral atom devices. Execution via [QuTiP](https://qutip.org/). See [`PulserBackend`][qadence.backends.pulser.backend.Backend]. + +[_**Braket**_](https://github.com/aws/amazon-braket-sdk-python): A Python SDK for interacting with +quantum devices on Amazon Braket. Currently, only the devices with the digital interface of Amazon Braket +are supported and execution is performed using the local simulator. Execution on remote simulators and +quantum processing units will be available soon. See [`BraketBackend`][qadence.backends.braket.backend.Backend] + +_**More**_: Proprietary Qadence extensions provide more high-performance backends based on tensor networks or differentiation engines. +For more enquiries, please contact: [`info@pasqal.com`](mailto:info@pasqal.com). + +## Differentiation backend + +The [`DifferentiableBackend`][qadence.backends.pytorch_wrapper.DifferentiableBackend] class enables different differentiation modes +for the given backend. This can be chosen from two types: + +- Automatic differentiation (AD): available for PyTorch based backends (PyQTorch). +- Parameter Shift Rules (PSR): available for all backends. See this [section](/advanced_tutorials/differentiability) for more information on differentiability and PSR. + +In practice, only a `diff_mode` should be provided in the `QuantumModel`. Please note that `diff_mode` defaults to `None`: + +```python exec="on" source="material-block" result="json" session="diff-backend" +import sympy +import torch +from qadence import Parameter, RX, RZ, Z, CNOT, QuantumCircuit, QuantumModel, chain, BackendName, DiffMode + +x = Parameter("x", trainable=False) +y = Parameter("y", trainable=False) +fm = chain( + RX(0, 3 * x), + RX(0, x), + RZ(1, sympy.exp(y)), + RX(0, 3.14), + RZ(1, "theta") +) + +ansatz = CNOT(0, 1) +block = chain(fm, ansatz) + +circuit = QuantumCircuit(2, block) + +observable = Z(0) + +# DiffMode.GPSR is available for any backend. +# DiffMode.AD is only available for natively differentiable backends. +model = QuantumModel(circuit, observable, backend=BackendName.PYQTORCH, diff_mode=DiffMode.GPSR) + +# Get some values for the feature parameters. +values = {"x": (x := torch.tensor([0.5], requires_grad=True)), "y": torch.tensor([0.1])} + +# Compute expectation. +exp = model.expectation(values) + +# Differentiate the expectation wrt x. +dexp_dx = torch.autograd.grad(exp, x, torch.ones_like(exp)) +print(f"{dexp_dx = }") # markdown-exec: hide +``` + +## Low-level `backend_factory` interface + +Every backend in Qadence inherits from the abstract `Backend` class: +[`Backend`](../backends/backend.md) and implement the following methods: + +- [`run`][qadence.backend.Backend.run]: propagate the initial state according to the quantum circuit and return the final wavefunction object. +- [`sample`][qadence.backend.Backend.sample]: sample from a circuit. +- [`expectation`][qadence.backend.Backend.expectation]: computes the expectation of a circuit given an observable. +- [`convert`][qadence.backend.Backend.convert]: convert the abstract `QuantumCircuit` object to its backend-native representation including a backend specific parameter embedding function. + +Backends are purely functional objects which take as input the values for the circuit +parameters and return the desired output from a call to a method. In order to use a backend directly, +*embedded* parameters must be supplied as they are returned by the backend specific embedding function. + +Here is a simple demonstration of the use of the Braket backend to execute a circuit in non-differentiable mode: + +```python exec="on" source="material-block" session="low-level-braket" +from qadence import QuantumCircuit, FeatureParameter, RX, RZ, CNOT, hea, chain + +# Construct a feature map. +x = FeatureParameter("x") +z = FeatureParameter("y") +fm = chain(RX(0, 3 * x), RZ(1, z), CNOT(0, 1)) + +# Construct a circuit with an hardware-efficient ansatz. +circuit = QuantumCircuit(3, fm, hea(3,1)) +``` + +The abstract `QuantumCircuit` can now be converted to its native representation via the Braket +backend. + +```python exec="on" source="material-block" result="json" session="low-level-braket" +from qadence import backend_factory + +# Use only Braket in non-differentiable mode: +backend = backend_factory("braket") + +# The `Converted` object +# (contains a `ConvertedCircuit` with the original and native representation) +conv = backend.convert(circuit) +print(f"{conv.circuit.original = }") # markdown-exec: hide +print(f"{conv.circuit.native = }") # markdown-exec: hide +``` + +Additionally, `Converted` contains all fixed and variational parameters, as well as an embedding +function which accepts feature parameters to construct a dictionary of *circuit native parameters*. +These are needed as each backend uses a different representation of the circuit parameters: + +```python exec="on" source="material-block" result="json" session="low-level-braket" +import torch + +# Contains fixed parameters and variational (from the HEA) +conv.params +print("conv.params = {") # markdown-exec: hide +for k, v in conv.params.items(): print(f" {k}: {v}") # markdown-exec: hide +print("}") # markdown-exec: hide + +inputs = {"x": torch.tensor([1., 1.]), "y":torch.tensor([2., 2.])} + +# get all circuit parameters (including feature params) +embedded = conv.embedding_fn(conv.params, inputs) +print("embedded = {") # markdown-exec: hide +for k, v in embedded.items(): print(f" {k}: {v}") # markdown-exec: hide +print("}") # markdown-exec: hide +``` + +Note that above the parameters keys have changed as they now address the keys on the +Braket device. A more readable embedding is provided by the PyQTorch backend: + +```python exec="on" source="material-block" result="json" session="low-level-braket" +from qadence import BackendName, DiffMode +pyq_backend = backend_factory(backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD) + +# the `Converted` object +# (contains a `ConvertedCircuit` wiht the original and native representation) +pyq_conv = pyq_backend.convert(circuit) +embedded = pyq_conv.embedding_fn(pyq_conv.params, inputs) +print("embedded = {") # markdown-exec: hide +for k, v in embedded.items(): print(f" {k}: {v}") # markdown-exec: hide +print("}") # markdown-exec: hide +``` + +With the embedded parameters, `QuantumModel` methods are accessible: + +```python exec="on" source="material-block" result="json" session="low-level-braket" +embedded = conv.embedding_fn(conv.params, inputs) +samples = backend.run(conv.circuit, embedded) +print(f"{samples = }") +``` + +## Lower-level: the `Backend` representation + +If there is a requirement to work with a specific backend, it is possible to access _**directly the native circuit**_. +For example, Braket noise features can be imported which are not exposed directly by Qadence. + +```python exec="on" source="material-block" session="low-level-braket" +from braket.circuits import Noise + +# Get the native Braket circuit with the given parameters +inputs = {"x": torch.rand(1), "y":torch.rand(1)} +embedded = conv.embedding_fn(conv.params, inputs) +native = backend.assign_parameters(conv.circuit, embedded) + +# Define a noise channel +noise = Noise.Depolarizing(probability=0.1) + +# Add noise to every gate in the circuit +native.apply_gate_noise(noise) +``` + +In order to run this noisy circuit, the density matrix simulator is needed in Braket: + +```python exec="on" source="material-block" result="json" session="low-level-braket" +from braket.devices import LocalSimulator + +device = LocalSimulator("braket_dm") +result = device.run(native, shots=1000).result().measurement_counts +print(result) +``` +```python exec="on" source="material-block" result="json" session="low-level-braket" +print(conv.circuit.native.diagram()) +``` +```python exec="on" source="material-block" result="json" session="low-level-braket" +print(native.diagram()) +``` diff --git a/docs/tutorials/getting_started.md b/docs/tutorials/getting_started.md new file mode 100644 index 000000000..bace4776d --- /dev/null +++ b/docs/tutorials/getting_started.md @@ -0,0 +1,204 @@ +Quantum programs in Qadence are constructed via a block-system, with an emphasis on composability of +*primitive* blocks to obtain larger, *composite* blocks. This functional approach is different from other frameworks +which follow a more object-oriented way to construct circuits and express programs. + +??? note "How to visualize blocks" + + There are two ways to display blocks in a Python interpreter: either as a tree in ASCII format using `print`: + + ```python exec="on" source="material-block" result="json" + from qadence import X, Y, kron + + kron_block = kron(X(0), Y(1)) + print(kron_block) + ``` + + Or using the visualization package which opens an interactive window: + + ```python exec="on" source="material-block" html="1" + from qadence import X, Y, kron + #from visualization import display + + kron_block = kron(X(0), Y(1)) + #display(kron_block) + + from qadence.draw import html_string # markdown-exec: hide + from qadence import chain # markdown-exec: hide + print(html_string(kron(X(0), Y(1))), size="2,2") # markdown-exec: hide + ``` + +## Primitive blocks + +A [`PrimitiveBlock`][qadence.blocks.primitive.PrimitiveBlock] represents a digital or an analog time-evolution quantum operation applied to a qubit support. +Programs can always be decomposed down into a sequence of `PrimitiveBlock` elements. + +Two canonical examples of digital primitive blocks are the parametrized `RX` and the `CNOT` gates: + +```python exec="on" source="material-block" html="1" +from qadence import RX + +# A rotation gate on qubit 0 with a fixed numerical parameter. +rx_gate = RX(0, 0.5) + +from qadence.draw import html_string # markdown-exec: hide +from qadence import chain # markdown-exec: hide +print(html_string(chain(rx_gate), size="2,2")) # markdown-exec: hide +``` + +```python exec="on" source="material-block" html="1" +from qadence import CNOT + +# A CNOT gate with control on qubit 0 and target on qubit 1. +cnot_gate = CNOT(0, 1) +from qadence.draw import html_string # markdown-exec: hide +from qadence import chain # markdown-exec: hide +print(html_string(chain(cnot_gate), size="2,2")) # markdown-exec: hide +``` + +A list of all instances of primitive blocks (also referred to as *operations*) can be found [here](../qadence/operations.md). + +## Composite Blocks + +Programs can be expressed by composing blocks to result in a larger [`CompositeBlock`][qadence.blocks.composite.CompositeBlock] using three fundamental operations: +_chain_, _kron_, and _add_. + +- [**chain**][qadence.blocks.utils.chain] applies a set of blocks in sequence on the *same or overlapping qubit supports* and results in a `ChainBlock` type. +It is akin to applying a matrix product of the sub-blocks with the `*` operator. + +```python exec="on" source="material-block" html="1" session="i-xx" +from qadence import X, chain + +# Chaining on the same qubit using a call to the function. +chain_x = chain(X(0), X(0)) +from qadence.draw import html_string # markdown-exec: hide +print(html_string(chain_x, size="2,2")) # markdown-exec: hide +``` +```python exec="on" source="material-block" html="1" session="i-xx" +# Chaining on different qubits using the operator overload. +# Identical to the kron operation. +chain_xx = X(0) * X(1) +from qadence.draw import html_string # markdown-exec: hide +print(html_string(chain_xx, size="2,2")) # markdown-exec: hide +``` + +- [**kron**][qadence.blocks.utils.kron] applies a set of blocks in parallel (simultaneously) on *disjoint qubit support* and results in a `KronBlock` type. This is akin to applying a tensor product of the sub-blocks with the `@` operator. + +```python exec="on" source="material-block" html="1" session="i-xx" +from qadence import X, kron + +kron_xx = kron(X(0), X(1)) # Equivalent to X(0) @ X(1) +from qadence.draw import html_string # markdown-exec: hide +print(html_string(kron_xx, size="2,2")) # markdown-exec: hide +``` + +For the digital case, it should be noted that `kron` and `chain` are semantically equivalent up to the diagrammatic representation as `chain` implicitly fills blank wires with identities. +However, Qadence also supports *analog* blocks, for which composing sequentially or in parallel becomes non-equivalent. More +about analog blocks can be found in the [digital-analog](/digital_analog_qc/analog-basics) section. + +- [**add**][qadence.blocks.utils.add] sums the corresponding matrix of +each sub-block and results in a `AddBlock` type which can be used to construct Pauli operators. +Please note that `AddBlock` can give rise to non-unitary computations that might not be supported by all backends. + +??? note "Get the matrix of a block" + It is always possible to retrieve the matrix representation of a block by calling the `block.tensor()` method. + Please note that the returned tensor contains a batch dimension for the purposes of block parametrization. + + ```python exec="on" source="material-block" result="json" session="i-xx" + print(f"X(0) * X(0) tensor = {chain_x.tensor()}") # markdown-exec: hide + print(f"X(0) @ X(1) tensor = {chain_xx.tensor()}") # markdown-exec: hide + ``` + +```python exec="on" source="material-block" result="json" +from qadence import X, Z + +xz = X(0) + Z(0) +print(xz.tensor()) +``` + +Finally, it is possible to tag blocks with human-readable names: + +```python exec="on" source="material-block" html="1" session="getting_started" +from qadence import X, Y, CNOT, kron, chain, tag + +xy = kron(X(0), Y(1)) +tag(xy, "subblock") + +composite_block = kron(xy, CNOT(3,4)) +final_block = chain(composite_block, composite_block) + +from qadence.draw import html_string # markdown-exec: hide +print(html_string(final_block, size="4,4")) # markdown-exec: hide +``` + +## Block execution + +To quickly run quantum operations and access wavefunctions, samples or expectation values of +observables, one can use the convenience functions `run`, `sample` and `expectation`. The following +example shows an execution workflow with the natively available `PyQTorch` backend: + +```python exec="on" source="material-block" result="json" session="index" +from qadence import chain, add, H, Z, run, sample, expectation + +n_qubits = 2 +block = chain(H(0), H(1)) + +# Compute the wavefunction. +# Please check the documentation for other available backends. +wf = run(block) +print(f"{wf = }") # markdown-exec: hide + +# Sample the resulting wavefunction with a given number of shots. +xs = sample(block, n_shots=1000) +print(f"{xs = }") # markdown-exec: hide + +# Compute an expectation based on an observable of Pauli-Z operators. +obs = add(Z(i) for i in range(n_qubits)) +ex = expectation(block, obs) +print(f"{ex = }") # markdown-exec: hide +``` + +More fine-grained control and better performance is provided via the high-level `QuantumModel` abstraction. + +## Execution via `QuantumCircuit` and `QuantumModel` + +Quantum programs in Qadence are constructed in two steps: + +1. Build a [`QuantumCircuit`][qadence.circuit.QuantumCircuit] which ties together a composite block and a register. +2. Define a [`QuantumModel`](/tutorials/quantummodels) which differentiates, compiles and executes the circuit. + +`QuantumCircuit` is a central class in Qadence and circuits are abstract +objects from the actual hardware/simulator that they are expected to be executed on. +They require to specify the `Register` of resources to execute your program on. Previous examples +were already using `QuantumCircuit` with a `Register` that fits the qubit support for the given block. + +```python exec="on" source="material-block" result="json" +from qadence import QuantumCircuit, Register, H, chain + +# NOTE: Run a block which supports two qubits +# on a register of three qubits. +register = Register(3) +circuit = QuantumCircuit(register, chain(H(0), H(1))) +print(f"circuit = {circuit}") # markdown-exec: hide +``` + +!!! note "Registers and qubit supports" + Registers can also be constructed from qubit coordinates to create arbitrary register + topologies. See details in the [digital-analog](/digital_analog_qc/analog-basics.md) section. + Qubit supports are subsets of the circuit register tied to blocks. + + +`QuantumModel` is another central class in Qadence. It specifies a [Backend](/tutorials/backend.md) for +the differentiation, compilation and execution of the abstract circuit. + +```python exec="on" source="material-block" result="json" +from qadence import BackendName, DiffMode, QuantumCircuit, QuantumModel, Register, H, chain + +reg = Register(3) +circ = QuantumCircuit(reg, chain(H(0), H(1))) +model = QuantumModel(circ, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD) + +xs = model.sample(n_shots=100) +print(f"{xs = }") # markdown-exec: hide +``` + +For more details on `QuantumModel`, see [here](/tutorials/quantummodels). diff --git a/docs/tutorials/hamiltonians.md b/docs/tutorials/hamiltonians.md new file mode 100644 index 000000000..2695ed7e4 --- /dev/null +++ b/docs/tutorials/hamiltonians.md @@ -0,0 +1,132 @@ +# Constructing arbitrary Hamiltonians + +At the heart of digital-analog quantum computing is the description and execution of analog blocks, which represent a set of interacting qubits under some interaction Hamiltonian. +For this purpose, Qadence relies on the [`hamiltonian_factory`](../qadence/constructors.md) function to create arbitrary Hamiltonian blocks to be used as generators of `HamEvo` or as observables to be measured. + +## Arbitrary all-to-all Hamiltonians + +Arbitrary all-to-all interaction Hamiltonians can be easily created by passing the number of qubits in the first argument. The type of `interaction` can be chosen from the available ones in the [`Interaction`](../qadence/types.md) enum type. + +```python exec="on" source="material-block" result="json" session="hamiltonians" +from qadence import hamiltonian_factory +from qadence import N, X, Y, Z +from qadence import Interaction + +n_qubits = 3 + +hamilt = hamiltonian_factory(n_qubits, interaction=Interaction.ZZ) + +print(hamilt) # markdown-exec: hide +``` + +Single-qubit terms can also be added by passing the respective operator directly to the `detuning` argument. For example, the total magnetization is commonly used as an observable to be measured: + +```python exec="on" source="material-block" result="json" session="hamiltonians" +total_mag = hamiltonian_factory(n_qubits, detuning = Z) +print(total_mag) # markdown-exec: hide +``` + +For further customization, arbitrary coefficients can be passed as arrays to the `interaction_strength` and `detuning_strength` arguments for the two-qubits and single-qubit terms respectively. + +```python exec="on" source="material-block" result="json" session="hamiltonians" +n_qubits = 3 + +hamilt = hamiltonian_factory( + n_qubits, + interaction=Interaction.ZZ, + detuning=Z, + interaction_strength=[0.5, 0.2, 0.1], + detuning_strength=[0.1, 0.5, -0.3] +) +print(hamilt) # markdown-exec: hide +``` + +!!! warning "Ordering interaction strengths matters" + + When passing interaction strengths as an array, the ordering must be indentical to the one + obtained from the `edge` property of a Qadence [`Register`](register.md): + + ```python exec="on" source="material-block" result="json" session="hamiltonians" + from qadence import Register + + print(Register(n_qubits).edges) + ``` + +For one more example, let's create a transverse-field Ising model, + +```python exec="on" source="material-block" result="json" session="hamiltonians" +n_qubits = 4 +n_edges = int(0.5 * n_qubits * (n_qubits - 1)) + +z_terms = [1.0] * n_qubits +zz_terms = [2.0] * n_edges + +zz_ham = hamiltonian_factory( + n_qubits, + interaction=Interaction.ZZ, + detuning=Z, + interaction_strength=zz_terms, + detuning_strength=z_terms +) + +x_terms = [-1.0] * n_qubits +x_ham = hamiltonian_factory(n_qubits, detuning = X, detuning_strength = x_terms) + +transverse_ising = zz_ham + x_ham + +print(transverse_ising) # markdown-exec: hide +``` + +!!! note "Random interaction coefficients" + Random interaction coefficients can be chosen between -1 and 1 by simply passing `random_strength = True` instead of `detuning_strength` + and `interaction_strength`. + + +## Arbitrary Hamiltonian topologies + +Arbitrary interaction topologies can be created using the Qadence [`Register`](register.md). +Simply pass the register with the desired topology as the first argument to the `hamiltonian_factory`: + +```python exec="on" source="material-block" result="json" session="hamiltonians" +from qadence import Register + +reg = Register.square(qubits_side=2) + +square_hamilt = hamiltonian_factory(reg, interaction=Interaction.NN) +print(square_hamilt) # markdown-exec: hide +``` + +Custom Hamiltonian coefficients can also be added to the register beforehand using the `"strength"` key. + +```python exec="on" source="material-block" result="json" session="hamiltonians" + +reg = Register.square(qubits_side = 2) + +for i, edge in enumerate(reg.edges): + reg.edges[edge]["strength"] = (0.5 * i) ** 2 + +square_hamilt = hamiltonian_factory(reg, interaction=Interaction.NN) +print(square_hamilt) # markdown-exec: hide +``` + +Alternatively, if the register already stores interaction or detuning strengths, it is possible to override them in the Hamiltonian creation by using `force_update = True`. + + +## Adding variational parameters + +Finally, fully parameterized Hamiltonians can be created by passing a string to the strength arguments: + + +```python exec="on" source="material-block" result="json" session="hamiltonians" +n_qubits = 3 + +nn_ham = hamiltonian_factory( + n_qubits, + interaction=Interaction.NN, + detuning=N, + interaction_strength="c", + detuning_strength="d" +) + +print(nn_ham) # markdown-exec: hide +``` diff --git a/docs/tutorials/ml_tools.md b/docs/tutorials/ml_tools.md new file mode 100644 index 000000000..652c216dc --- /dev/null +++ b/docs/tutorials/ml_tools.md @@ -0,0 +1,154 @@ +`qadence` also offers a out-of-the-box training routine called `train_with_grad` +for optimizing fully-differentiable models like `QNN`s and `QuantumModel`s containing either *trainable* and/or *non-trainable* parameters (i.e., inputs). Feel free to [refresh your memory about different parameter types](/tutorials/parameters). + +## ML tools Basics + +`train_with_grad` performs training, logging/printing loss metrics and storing intermediate checkpoints of models. + +As every other training routine commonly used in Machine Learning, it requires +`model`, `data` and an `optimizer` as input arguments. +However, in addition, it requires a `loss_fn` and a `TrainConfig`. +A `loss_fn` is required to be a function which expects both a model and data and returns a tuple of (loss, metrics: dict), where `metrics` is a dict of scalars which can be customized too. + +```python exec="on" source="material-block" result="json" +import torch +from itertools import count +cnt = count() +criterion = torch.nn.MSELoss() + +def loss_fn(model: torch.nn.Module, data: torch.Tensor) -> tuple[torch.Tensor, dict]: + next(cnt) + x, y = data[0], data[1] + out = model(x) + loss = criterion(out, y) + return loss, {} + +``` + +The `TrainConfig` [qadence.ml_tools.config] tells `train_with_grad` what batch_size should be used, how many epochs to train, in which intervals to print/log metrics and how often to store intermediate checkpoints. + +```python exec="on" source="material-block" result="json" +from qadence.ml_tools import TrainConfig + +batch_size = 5 +n_epochs = 100 + +config = TrainConfig( + folder="some_path/", + max_iter=n_epochs, + checkpoint_every=100, + write_every=100, + batch_size=batch_size, +) + +``` +## Fitting a funtion with a QNN using ml_tools + +Let's look at a complete example of how to use `train_with_grad` now. + +```python exec="on" source="material-block" result="json" +from pathlib import Path +import torch +from itertools import count +from qadence.constructors import hamiltonian_factory, hea, feature_map +from qadence import chain, Parameter, QuantumCircuit, Z +from qadence.models import QNN +from qadence.ml_tools import train_with_grad, TrainConfig +import matplotlib.pyplot as plt + +n_qubits = 2 +fm = feature_map(n_qubits) +ansatz = hea(n_qubits=n_qubits, depth=3) +observable = hamiltonian_factory(n_qubits, detuning=Z) +circuit = QuantumCircuit(n_qubits, fm, ansatz) + +model = QNN(circuit, observable, backend="pyqtorch", diff_mode="ad") +batch_size = 1 +input_values = {"phi": torch.rand(batch_size, requires_grad=True)} +pred = model(input_values) + +cnt = count() +criterion = torch.nn.MSELoss() +optimizer = torch.optim.Adam(model.parameters(), lr=0.1) + +def loss_fn(model: torch.nn.Module, data: torch.Tensor) -> tuple[torch.Tensor, dict]: + next(cnt) + x, y = data[0], data[1] + out = model(x) + loss = criterion(out, y) + return loss, {} + +tmp_path = Path("/tmp") + +n_epochs = 5 + +config = TrainConfig( + folder=tmp_path, + max_iter=n_epochs, + checkpoint_every=100, + write_every=100, + batch_size=batch_size, +) + +batch_size = 25 + +x = torch.linspace(0, 1, batch_size).reshape(-1, 1) +y = torch.sin(x) + +train_with_grad(model, (x, y), optimizer, config, loss_fn=loss_fn) + +plt.plot(y.numpy()) +plt.plot(model(input_values).detach().numpy()) + +``` + +For users who want to use the low-level API of `qadence`, here is the example from above +written without `train_with_grad`. + +## Fitting a function - Low-level API + +```python exec="on" source="material-block" result="json" +from pathlib import Path +import torch +from itertools import count +from qadence.constructors import hamiltonian_factory, hea, feature_map +from qadence import chain, Parameter, QuantumCircuit, Z +from qadence.models import QNN +from qadence.ml_tools import train_with_grad, TrainConfig + +n_qubits = 2 +fm = feature_map(n_qubits) +ansatz = hea(n_qubits=n_qubits, depth=3) +observable = hamiltonian_factory(n_qubits, detuning=Z) +circuit = QuantumCircuit(n_qubits, fm, ansatz) + +model = QNN(circuit, observable, backend="pyqtorch", diff_mode="ad") +batch_size = 1 +input_values = {"phi": torch.rand(batch_size, requires_grad=True)} +pred = model(input_values) + +criterion = torch.nn.MSELoss() +optimizer = torch.optim.Adam(model.parameters(), lr=0.1) +n_epochs=50 +cnt = count() + +tmp_path = Path("/tmp") + +config = TrainConfig( + folder=tmp_path, + max_iter=n_epochs, + checkpoint_every=100, + write_every=100, + batch_size=batch_size, +) + +x = torch.linspace(0, 1, batch_size).reshape(-1, 1) +y = torch.sin(x) + +for i in range(n_epochs): + out = model(x) + loss = criterion(out, y) + loss.backward() + optimizer.step() + +``` diff --git a/docs/tutorials/overlap.md b/docs/tutorials/overlap.md new file mode 100644 index 000000000..36c5a3d39 --- /dev/null +++ b/docs/tutorials/overlap.md @@ -0,0 +1,75 @@ +Qadence offers convenience functions for computing the overlap between the +wavefunctions generated by two quantum circuits $U$ and $W$ as: + +$$ +S = |\langle \psi_U | \psi_W \rangle|^2 \quad \textrm{where} \quad \psi_U = U|\psi_0\rangle +$$ + +Here is an example on how to compute the overlap between two very simple parametric circuits +consisting of a single `RX` rotation on different qubits. The overlap is expected to be +non-zero only when the rotation angle is different from $\pi \; \textrm{mod}\; 2\pi$ for both rotations: + +```python exec="on" source="material-block" result="json" session="overlap" +import torch +import numpy as np +from qadence import Overlap, OverlapMethod, QuantumCircuit, H, RX, X, FeatureParameter, hea + + +# Create two quantum circuits +# with a single qubit rotation on two random qubits +n_qubits = 4 +qubits = np.random.choice(list(range(n_qubits)), n_qubits, replace=True) + +phi = FeatureParameter("phi") +circuit_bra = QuantumCircuit(n_qubits, RX(qubits[0], phi)) + +psi = FeatureParameter("psi") +circuit_ket = QuantumCircuit(n_qubits, RX(qubits[1], psi)) + +# Values for the feature parameters +values_bra = {"phi": torch.Tensor([torch.pi / 2, torch.pi])} +values_ket = {"psi": torch.Tensor([torch.pi / 2, torch.pi])} + +# Calculate overlap by assigning values to the given bra and ket circuits +ovrlp = Overlap(circuit_bra, circuit_ket) +ovrlp = ovrlp(bra_param_values=values_bra, ket_param_values=values_ket) + +print("Overlap with exact method:\n", ovrlp) # markdown-exec: hide +``` + +The `Overlap` class above inherits from `QuantumModel` and is executed through its inherited forward method +for the given input parameter values. By default, +the overlap is computed exactly by performing the dot product of the wavefunction propagated +from bra and ket circuits. + +However, it is possible to choose a different method from the `OverlapMethod` enumeration +to be passed via the `overlap_method` argument in the `Overlap` initializer. +Currently, one can choose from: + +* `EXACT`: exact computation using the wavefunction matrix representation. Does not work with real devices since it assumes access to the complete qubit system wavefunction. +* `COMPUTE_UNCOMPUTE`: exact or sampling-based computation using bra $U$ and ket $W^{\dagger}$ unitaries. +* `SWAP_TEST`: exact or sampling-based computation using the SWAP test method. +* `HADAMARD_TEST`: exact or sampling-based computation using the Hadamard test method. +* `JENSEN_SHANNON`: compute the overlap using the Jensen-Shannon divergence of the two +probability distributions obtained by sampling the propagated circuits. This will yield a different +result than the other methods. + +All methods (except for the `EXACT` method) take an optional `n_shots` argument which can be used +to perform shot-based calculations. + +!!! warning + If you select a finite number of shots, the overlap is not differentiable. Therefore, + it cannot be used as output of a quantum model if gradients are required. + +```python exec="on" source="material-block" result="json" session="overlap" +# Calculate overlap with SWAP test +ovrlp = Overlap(circuit_bra, circuit_ket, method=OverlapMethod.SWAP_TEST) +ovrlp_ha = ovrlp(values_bra, values_ket) +print("Overlap with SWAP test:\n", ovrlp_ha) # markdown-exec: hide + +# Calculate overlap with SWAP test +# using a finite number of shots +ovrlp = Overlap(circuit_bra, circuit_ket, method=OverlapMethod.SWAP_TEST) +ovrlp_ha = ovrlp(values_bra, values_ket, n_shots=10_000) +print("Overlap with SWAP test with finite number of shots:\n", ovrlp_ha) # markdown-exec: hide +``` diff --git a/docs/tutorials/parameters.md b/docs/tutorials/parameters.md new file mode 100644 index 000000000..ea8929626 --- /dev/null +++ b/docs/tutorials/parameters.md @@ -0,0 +1,325 @@ +Qadence base `Parameter` type is a subtype of `sympy.Symbol`. There are three kinds of parameter subtypes used: + +- _**Fixed Parameter**_: A constant with a fixed, non-trainable value (_e.g._ $\dfrac{\pi}{2}$). +- _**Variational Parameter**_: A trainable parameter which can be be optimized. +- _**Feature Parameter**_: A non-trainable parameter which can be used to encode classical data into a quantum state. + +## Fixed Parameters + +To pass a fixed parameter to a gate (or any parametrizable block), one can simply use either Python numeric types or wrapped in +a `torch.Tensor`. + +```python exec="on" source="material-block" result="json" +from torch import pi +from qadence import RX, run + +# Let's use a torch type. +block = RX(0, pi) +wf = run(block) +print(f"{wf = }") # markdown-exec: hide + +# Let's pass a simple float. +block = RX(0, 1.) +wf = run(block) +print(f"{wf = }") # markdown-exec: hide +``` + +## Variational Parameters + +To parametrize a block by an angle `theta`, either a Python `string` or an instance of `VariationalParameter` can be passed instead of a numeric type to the gate constructor: + +```python exec="on" source="material-block" result="json" +from qadence import RX, run, VariationalParameter + +block = RX(0, "theta") +# This is equivalent to: +block = RX(0, VariationalParameter("theta")) + +wf = run(block) +print(f"{wf = }") # markdown-exec: hide +``` + +In the first case in the above example, `theta` is automatically inferred as a `VariationalParameter` (_i.e._ trainable). It is initialized to a random value for the purposes of execution. In the context of a `QuantumModel`, there is no need to pass a value for `theta` to the `run` method since it is stored within the underlying model parameter dictionary. + +## Feature Parameters + +`FeatureParameter` types (_i.e._ inputs), always need to be provided with a value or a batch of values as a dictionary: + +```python exec="on" source="material-block" result="json" +from torch import tensor +from qadence import RX, run, FeatureParameter + +block = RX(0, FeatureParameter("phi")) + +wf = run(block, values={"phi": tensor([1., 2.])}) +print(f"{wf = }") # markdown-exec: hide +``` + +Now, `run` returns a batch of states, one for every provided angle which coincides with the value of the particular `FeatureParameter`. + +## Multiparameter Expressions + +However, an angle can itself be an expression `Parameter` types of any kind. +As such, any sympy expression `expr: sympy.Basic` consisting of a combination of free symbols (_i.e._ `sympy` types) and Qadence `Parameter` can +be passed to a block, including trigonometric functions. + +```python exec="on" source="material-block" result="json" +from torch import tensor +from qadence import RX, Parameter, run, FeatureParameter +from sympy import sin + +theta, phi = Parameter("theta"), FeatureParameter("phi") +block = RX(0, sin(theta+phi)) + +# Remember, to run the block, only FeatureParameter values have to be provided: +values = {"phi": tensor([1.0, 2.0])} +wf = run(block, values=values) +print(f"{wf = }") # markdown-exec: hide +``` + +## Parameters Redundancy + +Parameters are uniquely defined by their name and redundancy is allowed in composite blocks to +assign the same value to different blocks. + +```python exec="on" source="material-block" result="json" +import torch +from qadence import RX, RY, run, chain, kron + +block = chain( + kron(RX(0, "phi"), RY(1, "theta")), + kron(RX(0, "phi"), RY(1, "theta")), +) + +wf = run(block) # Same random initialization for all instances of phi and theta. +print(f"{wf = }") # markdown-exec: hide +``` + +## Parametrized Circuits + +Now, let's have a look at the construction of a variational ansatz which composes `FeatureParameter` and `VariationalParameter` types: + +```python exec="on" source="material-block" html="1" +import sympy +from qadence import RX, RY, RZ, CNOT, Z, run, chain, kron, FeatureParameter, VariationalParameter + +phi = FeatureParameter("phi") +theta = VariationalParameter("theta") + +block = chain( + kron( + RX(0, phi/theta), + RY(1, theta*2), + RZ(2, sympy.cos(phi)), + ), + kron( + RX(0, phi), + RY(1, theta), + RZ(2, phi), + ), + kron( + RX(0, phi), + RY(1, theta), + RZ(2, phi), + ), + kron( + RX(0, phi + theta), + RY(1, theta**2), + RZ(2, sympy.cos(phi)), + ), + chain(CNOT(0,1), CNOT(1,2)) +) +block.tag = "Rotations" + +obs = 2*kron(*map(Z, range(3))) +block = chain(block, obs) + +from qadence.draw import html_string # markdown-exec: hide +print(html_string(block, size="4,4")) # markdown-exec: hide +``` + +Please note the different colors for the parametrization with different types. The default palette assigns light blue for `VariationalParameter`, light green for `FeatureParameter` and shaded red for observables. + +## Parametrized QuantumModels + +As a quick reminder: `FeatureParameter` are used for data input and data encoding into a quantum state. +`VariationalParameter` are trainable parameters in a variational ansatz. When used within a [`QuantumModel`][qadence.models.quantum_model.QuantumModel], an abstract quantum circuit is made differentiable with respect to both variational and feature +parameters which are uniquely identified by their name. + +```python exec="on" source="material-block" session="parametrized-models" +from qadence import FeatureParameter, Parameter, VariationalParameter + +# Feature parameters are non-trainable parameters. +# Their primary use is input data encoding. +fp = FeatureParameter("x") +assert fp == Parameter("x", trainable=False) + +# Variational parameters are trainable parameters. +# Their primary use is for optimization. +vp = VariationalParameter("y") +assert vp == Parameter("y", trainable=True) +``` + +Let's construct a parametric quantum circuit. + +```python exec="on" source="material-block" result="json" session="parametrized-models" +from qadence import QuantumCircuit, RX, RY, chain, kron + +theta = VariationalParameter("theta") +phi = FeatureParameter("phi") + +block = chain( + kron(RX(0, theta), RY(1, theta)), + kron(RX(0, phi), RY(1, phi)), +) + +circuit = QuantumCircuit(2, block) +unique_params = circuit.unique_parameters + +print(f"{unique_params = }") # markdown-exec: hide +``` + +In the circuit above, four parameters are defined but only two unique names. Therefore, there will be only one +variational parameter to be optimized. + +The `QuantumModel` class also provides convenience methods to manipulate parameters. + +```python exec="on" source="material-block" result="json" session="parametrized-models" +from qadence import QuantumModel, BackendName, DiffMode + +model = QuantumModel(circuit, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD) +num_vparams = model.num_vparams +vparams_values = model.vparams + +print(f"{num_vparams = }") # markdown-exec: hide +print(f"{vparams_values = }") # markdown-exec: hide +``` + +!!! note "Only provide feature parameter values to the quantum model" + In order to `run` the variational circuit _**only feature parameter values**_ have to be provided. + Variational parameters are stored in the model itself. If multiple feature parameters are present, + values must be provided in batches of same length. + + ```python exec="on" source="material-block" result="json" session="parametrized-models" + import torch + + values = {"phi": torch.rand(3)} # theta does not appear here + wf = model.run(values) + print(f"{wf = }") # markdown-exec: hide + ``` + +## Standard constructors + +The unique parameter identification is relevant when using built-in Qadence block +constructors in the `qadence.constructors` module such as feature maps and hardware +efficient ansatze (HEA). + +```python exec="on" source="material-block" result="json" session="parametrized-constructors" +from qadence import QuantumCircuit, hea + +n_qubits = 4 +depth = 2 + +hea1 = hea(n_qubits=n_qubits, depth=depth) +circuit = QuantumCircuit(n_qubits, hea1) +num_unique_parameters = circuit.num_unique_parameters +print(f"Unique parameters with a single HEA: {num_unique_parameters}") # markdown-exec: hide +``` +```python exec="on" html="1" session="parametrized-constructors" +from qadence.draw import html_string +print(html_string(circuit)) +``` + +A new circuit can be created by adding another identical HEA. As expected, the number of unique parameters +is the same. + +```python exec="on" source="material-block" result="json" session="parametrized-constructors" +hea2 = hea(n_qubits=n_qubits, depth=depth) + +circuit = QuantumCircuit(n_qubits, hea1, hea2) +num_unique_params_two_heas = circuit.num_unique_parameters +print(f"Unique parameters with two stacked HEAs: {num_unique_params_two_heas}") # markdown-exec: hide +``` +```python exec="on" html="1" session="parametrized-constructors" +from qadence.draw import html_string # markdown-exec: hide +print(html_string(circuit)) # markdown-exec: hide +``` + +!!! warning "Avoid non-unique names by prefixing" + A parameter prefix for each HEA can be passed as follows: + + ```python exec="on" source="material-block" result="json" session="parametrized-constructors" + hea1 = hea(n_qubits=n_qubits, depth=depth, param_prefix="p1") + hea2 = hea(n_qubits=n_qubits, depth=depth, param_prefix="p2") + + circuit = QuantumCircuit(n_qubits, hea1, hea2) + n_params_two_heas = circuit.num_unique_parameters + print(f"Unique parameters with two stacked HEAs: {n_params_two_heas}") # markdown-exec: hide + ``` + ```python exec="on" html="1" session="parametrized-constructors" + from qadence.draw import html_string # markdown-exec: hide + print(html_string(circuit)) # markdown-exec: hide + ``` + +The `hea` function will be further explored in the [QML Constructors tutorial](qml_tools.md). + +## Parametric observables + +In Qadence, one can define quantum observables with classical optimizable parameters to +improve the convergence of QML calculations. This is particularly useful for differentiable quantum circuits. + +```python exec="on" source="material-block" session="parametrized-constructors" +from qadence import VariationalParameter, Z, add, tag + +s = VariationalParameter("s") +observable = add(s * Z(i) for i in range(n_qubits)) +``` + +Now, a quantum model can be created with the parametric observable. +The observable variational parameters are included among the model ones. + +```python exec="on" source="material-block" result="json" session="parametrized-constructors" +from qadence import QuantumModel, QuantumCircuit + +circuit = QuantumCircuit(n_qubits, hea(n_qubits, depth)) +model = QuantumModel(circuit, observable=observable) +print(f"Variational parameters = {model.vparams}") # markdown-exec: hide +``` + +One optimization step (forward and backward pass) can be performed using built-in `torch` functionalities. Variational parameters +can be checked to have been updated accordingly: + +```python exec="on" source="material-block" result="json" session="parametrized-constructors" +import torch + +mse_loss = torch.nn.MSELoss() +optimizer = torch.optim.Adam(model.parameters()) + +# Compute forward & backward pass +optimizer.zero_grad() +loss = mse_loss(model.expectation({}), torch.zeros(1)) +loss.backward() + +# Update the parameters and check the parameters. +optimizer.step() +print(f"Variational parameters = {model.vparams}") # markdown-exec: hide +``` + +## Non-unitary circuits + +Qadence allows to compose with non-unitary blocks. +Here is an example of a non-unitary block as a sum of Pauli operators with complex coefficients. + +!!! warning "Currently, only the `PyQTorch` backend fully supports execution with non-unitary circuits." + +```python exec="on" source="material-block" result="json" session="non-unitary" +from qadence import QuantumModel, QuantumCircuit, Z, X +c1 = 2.0 +c2 = 2.0 + 2.0j + +block = c1 * Z(0) + c2 * X(1) + c1 * c2 * (Z(2) + X(3)) +circuit = QuantumCircuit(4, block) + +model = QuantumModel(circuit) # BackendName.PYQTORCH and DiffMode.AD by default. +print(f"wf = {model.run({})}") # markdown-exec: hide +``` diff --git a/docs/tutorials/qml_tools.md b/docs/tutorials/qml_tools.md new file mode 100644 index 000000000..cb213051d --- /dev/null +++ b/docs/tutorials/qml_tools.md @@ -0,0 +1,254 @@ +# Quantum Machine Learning Constructors + +Besides the [arbitrary Hamiltonian constructors](hamiltonians.md), Qadence also provides a complete set of program constructors useful for digital-analog quantum machine learning programs. + +## Feature Maps + +A few feature maps are directly available for feature loading, + +```python exec="on" source="material-block" result="json" session="fms" +from qadence import feature_map + +n_qubits = 3 + +fm = feature_map(n_qubits, fm_type="fourier") +print(f"Fourier = {fm}") # markdown-exec: hide + +fm = feature_map(n_qubits, fm_type="chebyshev") +print(f"Chebyshev {fm}") # markdown-exec: hide + +fm = feature_map(n_qubits, fm_type="tower") +print(f"Tower {fm}") # markdown-exec: hide +``` + +## Hardware-Efficient Ansatz + +Ansatze blocks for quantum machine-learning are typically built following the Hardware-Efficient Ansatz formalism (HEA). Both fully digital and digital-analog HEAs can easily be built with the `hea` function. By default, the digital version is returned: + +```python exec="on" source="material-block" html="1" session="ansatz" +from qadence import hea +from qadence.draw import display + +n_qubits = 3 +depth = 2 + +ansatz = hea(n_qubits, depth) +from qadence.draw import html_string # markdown-exec: hide +print(html_string(ansatz, size="4,4")) # markdown-exec: hide +``` + +As seen above, the rotation layers are automatically parameterized, and the prefix `"theta"` can be changed with the `param_prefix` argument. + +Furthermore, both the single-qubit rotations and the two-qubit entangler can be customized with the `operations` and `entangler` argument. The operations can be passed as a list of single-qubit rotations, while the entangler should be either `CNOT`, `CZ`, `CRX`, `CRY`, `CRZ` or `CPHASE`. + +```python exec="on" source="material-block" html="1" session="ansatz" +from qadence import RX, RY, CPHASE + +ansatz = hea( + n_qubits=n_qubits, + depth=depth, + param_prefix="phi", + operations=[RX, RY, RX], + entangler=CPHASE +) +from qadence.draw import html_string # markdown-exec: hide +print(html_string(ansatz, size="4,4")) # markdown-exec: hide +``` + +Having a truly *hardware-efficient* ansatz means that the entangling operation can be chosen according to each device's native interactions. Besides digital operations, in Qadence it is also possible to build digital-analog HEAs with the entanglement produced by the natural evolution of a set of interacting qubits, as is natural in neutral atom devices. As with other digital-analog functions, this can be controlled with the `strategy` argument which can be chosen from the [`Strategy`](../qadence/types.md) enum type. Currently, only `Strategy.DIGITAL` and `Strategy.SDAQC` are available. By default, calling `strategy = Strategy.SDAQC` will use a global entangling Hamiltonian with Ising-like NN interactions and constant interaction strength inside a `HamEvo` operation, + +```python exec="on" source="material-block" html="1" session="ansatz" +from qadence import Strategy + +ansatz = hea( + n_qubits, + depth=depth, + strategy=Strategy.SDAQC +) +from qadence.draw import html_string # markdown-exec: hide +print(html_string(ansatz, size="4,4")) # markdown-exec: hide +``` + +Note that, by default, only the time-parameter is automatically parameterized when building a digital-analog HEA. However, as described in the [Hamiltonians tutorial](hamiltonians.md), arbitrary interaction Hamiltonians can be easily built with the `hamiltonian_factory` function, with both customized or fully parameterized interactions, and these can be directly passed as the `entangler` for a customizable digital-analog HEA. + +```python exec="on" source="material-block" html="1" session="ansatz" +from qadence import hamiltonian_factory, Interaction, N, Register, hea + +# Build a parameterized neutral-atom Hamiltonian following a honeycomb_lattice: +register = Register.honeycomb_lattice(1, 1) + +entangler = hamiltonian_factory( + register, + interaction=Interaction.NN, + detuning=N, + interaction_strength="e", + detuning_strength="n" +) + +# Build a fully parameterized Digital-Analog HEA: +n_qubits = register.n_qubits +depth = 2 + +ansatz = hea( + n_qubits=register.n_qubits, + depth=depth, + operations=[RX, RY, RX], + entangler=entangler, + strategy=Strategy.SDAQC +) +from qadence.draw import html_string # markdown-exec: hide +print(html_string(ansatz, size="4,4")) # markdown-exec: hide +``` +Qadence also offers a out-of-the-box training routine called `train_with_grad` +for optimizing fully-differentiable models like `QNN`s and `QuantumModel`s containing either *trainable* and/or *non-trainable* parameters (i.e., inputs). Feel free to [refresh your memory about different parameter types](/tutorials/parameters). + +## Machine Learning Tools + +`train_with_grad` performs training, logging/printing loss metrics and storing intermediate checkpoints of models. + +As every other training routine commonly used in Machine Learning, it requires +`model`, `data` and an `optimizer` as input arguments. +However, in addition, it requires a `loss_fn` and a `TrainConfig`. +A `loss_fn` is required to be a function which expects both a model and data and returns a tuple of (loss, metrics: ``), where `metrics` is a dict of scalars which can be customized too. + +```python exec="on" source="material-block" result="json" +import torch +from itertools import count +cnt = count() +criterion = torch.nn.MSELoss() + +def loss_fn(model: torch.nn.Module, data: torch.Tensor) -> tuple[torch.Tensor, dict]: + next(cnt) + x, y = data[0], data[1] + out = model(x) + loss = criterion(out, y) + return loss, {} + +``` + +The `TrainConfig` [qadence.ml_tools.config] tells `train_with_grad` what batch_size should be used, how many epochs to train, in which intervals to print/log metrics and how often to store intermediate checkpoints. + +```python exec="on" source="material-block" result="json" +from qadence.ml_tools import TrainConfig + +batch_size = 5 +n_epochs = 100 + +config = TrainConfig( + folder="some_path/", + max_iter=n_epochs, + checkpoint_every=100, + write_every=100, + batch_size=batch_size, +) +``` +## Fitting a funtion with a QNN using `ml_tools` + +Let's look at a complete example of how to use `train_with_grad` now. + +```python exec="on" source="material-block" result="json" +from pathlib import Path +import torch +from itertools import count +from qadence.constructors import hamiltonian_factory, hea, feature_map +from qadence import chain, Parameter, QuantumCircuit, Z +from qadence.models import QNN +from qadence.ml_tools import train_with_grad, TrainConfig +import matplotlib.pyplot as plt + +n_qubits = 2 +fm = feature_map(n_qubits) +ansatz = hea(n_qubits=n_qubits, depth=3) +observable = hamiltonian_factory(n_qubits, detuning=Z) +circuit = QuantumCircuit(n_qubits, fm, ansatz) + +model = QNN(circuit, observable, backend="pyqtorch", diff_mode="ad") +batch_size = 1 +input_values = {"phi": torch.rand(batch_size, requires_grad=True)} +pred = model(input_values) + +cnt = count() +criterion = torch.nn.MSELoss() +optimizer = torch.optim.Adam(model.parameters(), lr=0.1) + +def loss_fn(model: torch.nn.Module, data: torch.Tensor) -> tuple[torch.Tensor, dict]: + next(cnt) + x, y = data[0], data[1] + out = model(x) + loss = criterion(out, y) + return loss, {} + +tmp_path = Path("/tmp") + +n_epochs = 5 + +config = TrainConfig( + folder=tmp_path, + max_iter=n_epochs, + checkpoint_every=100, + write_every=100, + batch_size=batch_size, +) + +batch_size = 25 + +x = torch.linspace(0, 1, batch_size).reshape(-1, 1) +y = torch.sin(x) + +train_with_grad(model, (x, y), optimizer, config, loss_fn=loss_fn) + +plt.plot(y.numpy()) +plt.plot(model(input_values).detach().numpy()) + +``` + +For users who want to use the low-level API of `qadence`, here is the example from above +written without `train_with_grad`. + +## Fitting a function - Low-level API + +```python exec="on" source="material-block" result="json" +from pathlib import Path +import torch +from itertools import count +from qadence.constructors import hamiltonian_factory, hea, feature_map +from qadence import chain, Parameter, QuantumCircuit, Z +from qadence.models import QNN +from qadence.ml_tools import train_with_grad, TrainConfig + +n_qubits = 2 +fm = feature_map(n_qubits) +ansatz = hea(n_qubits=n_qubits, depth=3) +observable = hamiltonian_factory(n_qubits, detuning=Z) +circuit = QuantumCircuit(n_qubits, fm, ansatz) + +model = QNN(circuit, observable, backend="pyqtorch", diff_mode="ad") +batch_size = 1 +input_values = {"phi": torch.rand(batch_size, requires_grad=True)} +pred = model(input_values) + +criterion = torch.nn.MSELoss() +optimizer = torch.optim.Adam(model.parameters(), lr=0.1) +n_epochs=50 +cnt = count() + +tmp_path = Path("/tmp") + +config = TrainConfig( + folder=tmp_path, + max_iter=n_epochs, + checkpoint_every=100, + write_every=100, + batch_size=batch_size, +) + +x = torch.linspace(0, 1, batch_size).reshape(-1, 1) +y = torch.sin(x) + +for i in range(n_epochs): + out = model(x) + loss = criterion(out, y) + loss.backward() + optimizer.step() + +``` diff --git a/docs/tutorials/quantummodels.md b/docs/tutorials/quantummodels.md new file mode 100644 index 000000000..42e3e8295 --- /dev/null +++ b/docs/tutorials/quantummodels.md @@ -0,0 +1,90 @@ +A quantum program can be expressed and executed using the [`QuantumModel`][qadence.models.quantum_model.QuantumModel] type. +It serves three primary purposes: + +_**Parameter handling**_: by conveniently handling and embedding the two parameter types that Qadence supports: +*feature* and *variational* (see more details in [this section](parameters.md)). + +_**Differentiability**_: by enabling a *differentiable backend* that supports two differentiable modes: automated differentiation (AD) and parameter shift rule (PSR). +The former is used to differentiate non-gate parameters and enabled for PyTorch-based simulators only. The latter is used to differentiate gate parameters and is enabled for all backends. + +_**Execution**_: by defining which backend the program is expected to be executed on. Qadence supports circuit compilation to the native backend representation. + +!!! note "Backends" + Quantum models can execute on a number of different purpose backends: simulators, emulators or real hardware. + By default, Qadence executes on the [PyQTorch](https://github.com/pasqal-io/PyQ) backend which + implements a state vector simulator. Other choices include the [Pulser](https://pulser.readthedocs.io/en/stable/) + backend (pulse sequences on programmable neutral atom arrays). For more information see + [backend tutorial](backends.md). + +The base `QuantumModel` exposes the following methods: + +* `QuantumModel.run()`: To extract the wavefunction after circuit execution. Not supported by all backends. +* `QuantumModel.sample()`: Sample a bitstring from the resulting quantum state after circuit execution. Supported by all backends. +* `QuantumModel.expectation()`: Compute the expectation value of an observable. + +Every `QuantumModel` is an instance of a +[`torch.nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html) that enables differentiability for +its `expectation` method. + +Upon construction of the model, a compiled version of the abstract `QuantumCircuit` is +created: + +```python exec="on" source="material-block" result="json" session="quantum-model" +from qadence import QuantumCircuit, QuantumModel, RX, Z, chain, BackendName, Parameter + +# Construct a parametrized abstract circuit. +# At this point we cannot run anything yet. + +x = Parameter("x") + +n_qubits = 2 +block = chain(RX(0, x), RX(1, x)) +circuit = QuantumCircuit(n_qubits, block) +observable = Z(0) + +# Construct a QuantumModel which will compile +# the abstract circuit to targetted backend. +# By default, diff_mode=DiffMode.AD. +model = QuantumModel(circuit, observable, backend=BackendName.PYQTORCH) + +# The converted circuit is a private attribute and should not +# manually be tampered with, but we can at least verify its there +# by printing it. +print(model._circuit.native) + +from pyqtorch.modules import QuantumCircuit as PyQCircuit # markdown-exec: hide +assert isinstance(model._circuit.native, PyQCircuit) # markdown-exec: hide +``` + +Now, the wavefunction, sample, or expectation value are computable by passing a batch of values : + +```python exec="on" source="material-block" result="json" session="quantum-model" +import torch + +# Set a batch of random parameter values. +values = {"x": torch.rand(3)} + +wf = model.run(values) +print(f"{wf = }") # markdown-exec: hide + +xs = model.sample(values, n_shots=100) +print(f"{xs = }") # markdown-exec: hide + +ex = model.expectation(values) +print(f"{ex = }") # markdown-exec: hide +``` + +You can also measure multiple observables by passing a list of blocks. + +```python exec="on" source="material-block" result="json" session="quantum-model" +# By default, backend=BackendName.PYQTORCH. +model = QuantumModel(circuit, [Z(0), Z(1)]) +ex = model.expectation(values) +print(f"{ex = }") # markdown-exec: hide +``` + +### Quantum Neural Network (QNN) + +The `QNN` is a subclass of the `QuantumModel` geared towards quantum machine learning and parameter optimisation. See the [ML +Tools](/tutorials/ml_tools) section or the [`QNN` API reference][qadence.models.QNN] for more detailed +information, and the [parametric program tutorial](parameters.md) for parameterization. diff --git a/docs/tutorials/register.md b/docs/tutorials/register.md new file mode 100644 index 000000000..9a1ddfee1 --- /dev/null +++ b/docs/tutorials/register.md @@ -0,0 +1,122 @@ +In Qadence, quantum programs can be executed by specifying the layout of a register of resources as a lattice. +Built-in [`Register`][qadence.register.Register] types can be used or constructed for arbitrary topologies. +Common register topologies are available and illustrated in the plot below. + +```python exec="on" html="1" +import numpy as np +import matplotlib.pyplot as plt +from qadence.register import LatticeTopology, Register + +argss = [ + (("line", 4), (-1,4), (-2,2)), + (("square", 3), (-2,2), (-2,2)), + (("circle", 8), (-1.5,1.5), (-1.5,1.5)), + (("rectangular_lattice", 2, 3), (-1,3), (-1.5,2.0)), + (("triangular_lattice", 2, 3), (-2,3), (-2,3)), + (("honeycomb_lattice", 2, 3), (-1,7), (-1,7)), + (("all_to_all", 7), (-1.3,1.3), (-1.3,1.3)), +] +# make sure that we are plotting all different constructors +assert len(argss) == len(LatticeTopology)-1 + +s = np.sqrt(len(argss)) +width, height = int(np.floor(s)), int(np.ceil(s)) +while width * height < len(argss): + height += 1 + +fig, axs = plt.subplots(width, height, figsize=(width*5.5, height*2.6)) +fig.suptitle("Predefined register topolgies") +axs = axs.flatten() +for i, (args, xl, yl) in enumerate(argss): + reg = Register.lattice(*args) + plt.sca(axs[i]) + reg.draw() + axs[i].set_title(f"{args[0]}") + axs[i].set(aspect="equal") + axs[i].set_xlim(*xl) + axs[i].set_ylim(*yl) +# make rest of plots invisible +for i in range(len(argss), len(axs)): + ax = axs[i] + ax.set_xticks([]) + ax.set_yticks([]) + ax.spines['top'].set_visible(False) + ax.spines['bottom'].set_visible(False) + ax.spines['left'].set_visible(False) + ax.spines['right'].set_visible(False) +plt.tight_layout() +from docs import docsutils # markdown-exec: hide +print(docsutils.fig_to_html(fig)) # markdown-exec: hide +``` + +## Building and drawing registers + +Built-in topologies are directly accessible in the `Register`: + +```python exec="on" source="material-block" html="1" +from qadence import Register + +reg = Register.honeycomb_lattice(2, 3) +import matplotlib.pyplot as plt # markdown-exec: hide +plt.clf() # markdown-exec: hide +reg.draw() +from docs import docsutils # markdown-exec: hide +fig = plt.gcf() # markdown-exec: hide +fig.set_size_inches(3, 3) # markdown-exec: hide +print(docsutils.fig_to_html(plt.gcf())) # markdown-exec: hide +``` + +Arbitrarily shaped registers can be constructed by providing coordinates. + +!!! note "Registers defined from coordinates" + `Register` constructed via the `from_coordinates` method do not define edges in the connectivity graph. + +```python exec="on" source="material-block" html="1" +import numpy as np +from qadence import Register + +reg = Register.from_coordinates( + [(x, np.sin(x)) for x in np.linspace(0, 2*np.pi, 10)] +) + +import matplotlib.pyplot as plt # markdown-exec: hide +plt.clf() # markdown-exec: hide +reg.draw() +fig = plt.gcf() # markdown-exec: hide +fig.set_size_inches(4, 2) # markdown-exec: hide +plt.tight_layout() # markdown-exec: hide +from docs import docsutils # markdown-exec: hide +print(docsutils.fig_to_html(fig)) # markdown-exec: hide +``` + +!!! warning "Units for qubit coordinates" + Qubits coordinates in Qadence are *dimensionless* but converted to the required unit when executed on a backend. + For instance, [Pulser](https://github.com/pasqal-io/Pulser) uses $\mu \textrm{m}$. + +## Connectivity graphs + +Register topology is often asssumed in simulations to be an all-to-all qubit connectivity. +When running on real devices that enable the [digital-analog](/digital_analog_qc/index.md) computing paradigm, +qubit interaction must be specified either by specifying distances between qubits, +or by defining edges in the register connectivity graph. + +It is possible to access the abstract graph nodes and edges to work with if needed as in the [perfect state +transfer](/#perfect-state-transfer) example. + +```python exec="on" source="material-block" result="json" session="reg-usage" +from qadence import Register + +reg = Register.rectangular_lattice(2,3) +print(f"{reg.nodes = }") # markdown-exec: hide +print(f"{reg.edges = }") # markdown-exec: hide +``` + +It is possible to customize qubit interaction through the [`add_interaction`][qadence.transpile.emulate.add_interaction] method. +In that case, `Register.coords` are accessible from the concrete graph: + + +```python exec="on" source="material-block" result="json" session="reg-usage" +print(f"{reg.coords = }") # markdown-exec: hide +``` + +More details about their usage in the digital-analog paradigm can be found in the [digital-analog basics](/digital_analog_qc/analog-basics) section. diff --git a/docs/tutorials/serializ_and_prep.md b/docs/tutorials/serializ_and_prep.md new file mode 100644 index 000000000..a0fe5600d --- /dev/null +++ b/docs/tutorials/serializ_and_prep.md @@ -0,0 +1,61 @@ +Qadence offers convenience functions for serializing and deserializing any +quantum program. This is useful for storing quantum programs and +sending them for execution over the network via an API. + +!!! note + Qadence currently uses a custom JSON serialization as interchange format. Support for QASM + format for digital quantum programs is currently under consideration. + +* `serialize/deserialize`: serialize and deserialize a Qadence object into a dictionary +* `save/load`: save and load a Qadence object to a file with one of the supported + formats. Currently, these are `.json` and the PyTorch-compatible `.pt` format. + +Let's start with serialization into a dictionary. + +```python exec="on" source="material-block" session="seralize_2" +import torch +from qadence import QuantumCircuit, QuantumModel, DiffMode +from qadence import chain, hamiltonian_factory, feature_map, hea, Z +from qadence.serialization import serialize, deserialize + +n_qubits = 4 + +my_block = chain(feature_map(n_qubits, param="x"), hea(n_qubits, depth=2)) +obs = hamiltonian_factory(n_qubits, detuning=Z) + +# Use the block defined above to create a quantum circuit +# serialize/deserialize it +qc = QuantumCircuit(n_qubits, my_block) +qc_dict = serialize(qc) +qc_deserialized = deserialize(qc_dict) +assert qc == qc_deserialized + +# Let's wrap it in a QuantumModel +# and serialize it +qm = QuantumModel(qc, obs, diff_mode=DiffMode.AD) +qm_dict = serialize(qm) +qm_deserialized = deserialize(qm_dict) + +# Check if the loaded QuantumModel returns the same expectation +values = {"x": torch.rand(10)} +assert torch.allclose(qm.expectation(values=values), qm_deserialized.expectation(values=values)) +``` + + +Finally, we can save the quantum circuit and the model with the two supported formats. + +```python exec="on" source="material-block" session="seralize_2" +from qadence.serialization import serialize, deserialize, save, load, SerializationFormat +qc_fname = "circuit" +save(qc, folder=".", file_name=qc_fname, format=SerializationFormat.PT) +loaded_qc = load(f"{qc_fname}.pt") +assert qc == loaded_qc + +qm_fname = "model" +save(qm, folder=".", file_name=qm_fname, format=SerializationFormat.JSON) +model = load(f"{qm_fname}.json") +assert isinstance(model, QuantumModel) +import os # markdown-exec: hide +os.remove(f"{qc_fname}.pt") # markdown-exec: hide +os.remove(f"{qm_fname}.json") # markdown-exec: hide +``` diff --git a/docs/tutorials/state_conventions.md b/docs/tutorials/state_conventions.md new file mode 100644 index 000000000..5ccd8cfbb --- /dev/null +++ b/docs/tutorials/state_conventions.md @@ -0,0 +1,153 @@ +# State Conventions + +Here is an overview of the state conventions used in Qadence together with practical examples. + +## Qubit register order + +Qubit registers in quantum computing are often indexed in increasing or decreasing order from left to right. In Qadence, the convention is qubit indexation in increasing order. For example, a register of four qubits in bra-ket notation reads: + +$$|q_0, q_1, q_2, q_3\rangle$$ + +Furthermore, when displaying a quantum circuit, qubits are ordered from top to bottom. + +## Basis state order + +Basis state ordering refers to how basis states are ordered when considering the conversion from bra-ket notation to the standard linear algebra basis. In Qadence, basis states are ordered in the following manner: + +$$ +\begin{align} +|00\rangle = [1, 0, 0, 0]^T\\ +|01\rangle = [0, 1, 0, 0]^T\\ +|10\rangle = [0, 0, 1, 0]^T\\ +|11\rangle = [0, 0, 0, 1]^T +\end{align} +$$ + +## Endianness + +Endianness refers to the storage convention for binary information (in *bytes*) in a classical memory register. In quantum computing, information is either stored in bits or in qubits. The most commonly used conventions are: + +- A **big-endian** system stores the **most significant bit** of a binary word at the smallest memory address. +- A **little-endian** system stores the **least significant bit** of a binary word at the smallest memory address. + +Given the register convention in Qadence, the integer $2$ written in binary big-endian as $10$ can be encoded in a qubit register in both big-endian as $|10\rangle$ or little-endian as $|01\rangle$. + +The convention for Qadence is **big-endian**. + +## Quantum states + +In practical scenarios, conventions regarding *register order*, *basis state order* and *endianness* are very much intertwined, and identical results can be obtained by fixing or varying any of them. In Qadence, we assume that qubit ordering and basis state ordering is fixed, and allow an `endianness` argument that can be passed to control the expected result. Here are a few examples: + +A simple and direct way to exemplify the endianness convention is using convenience functions for state preparation. + +!!! note "Bitstring convention as inputs" + When a bitstring is passed as input to a function for state preparation, it has to be understood in + **big-endian** convention. + +```python exec="on" source="material-block" result="json" session="end-0" +from qadence import Endianness, product_state + +# The state |10>, the 3rd basis state. +state_big = product_state("10", endianness=Endianness.BIG) # or just "Big" + +# The state |01>, the 2nd basis state. +state_little = product_state("10", endianness=Endianness.LITTLE) # or just "Little" + +print(f"State in big endian = {state_big}") # markdown-exec: hide +print(f"State in little endian = {state_little}") # markdown-exec: hide +``` + +Here, a bitword expressed as a Python string to encode the integer 2 in big-endian is used to create the respective basis state in both conventions. However, note that the same results can be obtained by fixing the endianness convention as big-endian (thus creating the state $|10\rangle$ in both cases), and changing the basis state ordering. A similar argument holds for fixing both endianness and basis state ordering and simply changing the qubit index order. + +Another example where endianness directly comes into play is when *measuring* a register. A big- or little-endian measurement will choose the first or the last qubit, respectively, as the most significant bit. Let's see this in an example: + +```python exec="on" source="material-block" result="json" session="end-0" +from qadence import I, H, sample + +# Create superposition state: |00> + |01> (normalized) +block = I(0) @ H(1) # Identity on qubit 0, Hadamard on qubit 1 + +# Generate bitword samples following both conventions +# Samples "00" and "01" +result_big = sample(block, endianness=Endianness.BIG) +# Samples "00" and "10" +result_little = sample(block, endianness=Endianness.LITTLE) + +print(f"Sample in big endian = {result_big}") # markdown-exec: hide +print(f"Sample in little endian = {result_little}") # markdown-exec: hide +``` + +In Qadence, endianness can be flipped for many relevant objects: + +```python exec="on" source="material-block" result="json" session="end-0" +from qadence import invert_endianness + +# Equivalent to sampling in little-endian. +flip_big_sample = invert_endianness(result_big) +print(f"Flipped sample = {flip_big_sample}") # markdown-exec: hide + +# Equivalent to a state created in little-endian. +flip_big_state = invert_endianness(state_big) +print(f"Flipped state = {flip_big_state}") # markdown-exec: hide +``` + +## Quantum operations + +When looking at the matricial form of quantum operations, the usage of the term *endianness* becomes slightly abusive. To exemplify, we may consider the `CNOT` operation with `control = 0` and `target = 1`. This operation is often described with two different matrices: + +$$ +\text{CNOT(0, 1)} = +\begin{bmatrix} +1 & 0 & 0 & 0 \\ +0 & 1 & 0 & 0 \\ +0 & 0 & 0 & 1 \\ +0 & 0 & 1 & 0 \\ +\end{bmatrix} +\qquad +\text{or} +\qquad +\text{CNOT(0, 1)} = +\begin{bmatrix} +1 & 0 & 0 & 0 \\ +0 & 0 & 0 & 1 \\ +0 & 0 & 1 & 0 \\ +0 & 1 & 0 & 0 \\ +\end{bmatrix} +$$ + +The difference can be easily explained either by considering a different ordering of the qubit indices, or a different ordering of the basis states. In Qadence, both can be retrieved through the `endianness` argument: + +```python exec="on" source="material-block" result="json" session="end-0" +from qadence import block_to_tensor, CNOT + +matrix_big = block_to_tensor(CNOT(0, 1), endianness=Endianness.BIG) +print("CNOT matrix in big endian =\n") # markdown-exec: hide +print(f"{matrix_big.detach()}\n") # markdown-exec: hide +matrix_little = block_to_tensor(CNOT(0, 1), endianness=Endianness.LITTLE) +print("CNOT matrix in little endian =\n") # markdown-exec: hide +print(f"{matrix_little.detach()}") # markdown-exec: hide +``` + +## Backends + +An important part of having clear state conventions is that we need to make sure our results are consistent accross different computational backends, which may have their own conventions. In Qadence, this is taken care for automatically: by calling operations for different backends, the result is expected to be equivalent up to qubit ordering. + +```python exec="on" source="material-block" result="json" session="end-0" +import warnings # markdown-exec: hide +warnings.filterwarnings("ignore") # markdown-exec: hide +from qadence import BackendName, RX, run, sample +import torch + +# RX(pi/4) on qubit 1 +n_qubits = 2 +op = RX(1, torch.pi/4) + +print("Same sampling order in big endian:\n") # markdown-exec: hide +print(f"On PyQTorch = {sample(n_qubits, op, endianness=Endianness.BIG, backend=BackendName.PYQTORCH)}") # markdown-exec: hide +print(f"On Braket = {sample(n_qubits, op, endianness=Endianness.BIG, backend=BackendName.BRAKET)}") # markdown-exec: hide +print(f"On Pulser = {sample(n_qubits, op, endianness=Endianness.BIG, backend=BackendName.PULSER)}\n") # markdown-exec: hide +print("Same wavefunction order:\n") # markdown-exec: hide +print(f"On PyQTorch = {run(n_qubits, op, endianness=Endianness.BIG, backend=BackendName.PYQTORCH)}") # markdown-exec: hide +print(f"On Braket = {run(n_qubits, op, endianness=Endianness.BIG, backend=BackendName.BRAKET)}") # markdown-exec: hide +print(f"On Pulser = {run(n_qubits, op, endianness=Endianness.BIG, backend=BackendName.PULSER)}") # markdown-exec: hide +``` diff --git a/docs/tutorials/state_init.md b/docs/tutorials/state_init.md new file mode 100644 index 000000000..3d2738b03 --- /dev/null +++ b/docs/tutorials/state_init.md @@ -0,0 +1,195 @@ +# State initialization + +Qadence offers convenience routines for preparing initial quantum states. +These routines are divided into two approaches: + +- As a dense matrix. +- From a suitable quantum circuit. This is available for every backend and it should be added +in front of the desired quantum circuit to simulate. + +Let's illustrate the usage of the state preparation routine. + +```python exec="on" source="material-block" result="json" session="seralize" +from qadence import random_state, product_state, is_normalized, StateGeneratorType + +# Random initial state. +# the default `type` is StateGeneratorType.HaarMeasureFast +state = random_state(n_qubits=2, type=StateGeneratorType.RANDOM_ROTATIONS) +print("Random initial state generated with rotations:\n") # markdown-exec: hide +print(f"state = {state.detach().numpy().flatten()}\n") # markdown-exec: hide + +# Check the normalization. +assert is_normalized(state) + +# Product state from a given bitstring. +# NB: Qadence follows the big endian convention. +state = product_state("01") +print("Product state corresponding to bitstring '01':\n") # markdown-exec: hide +print(f"state = {state.detach().numpy().flatten()}") # markdown-exec: hide +``` + +Now we see how to generate the product state corresponding to the one above with +a suitable quantum circuit. + +```python exec="on" source="material-block" html="1" +from qadence import product_block, tag, hea, QuantumCircuit +from qadence.draw import display + +state_prep_block = product_block("01") +display(state_prep_block) + +# Let's now prepare a circuit. +n_qubits = 4 + +state_prep_block = product_block("0001") +tag(state_prep_block, "Prep block") + +circuit_block = tag(hea(n_qubits, depth = 2), "Circuit block") + +qc_with_state_prep = QuantumCircuit(n_qubits, state_prep_block, circuit_block) +from qadence.draw import html_string # markdown-exec: hide +print(html_string(qc_with_state_prep), size="4,4") # markdown-exec: hide +``` +Several standard quantum states can be conveniently initialized in Qadence, both in statevector form as well as in block form as shown in following. + +## State vector initialization + +Qadence offers a number of constructor functions for state vector preparation. + +```python exec="on" source="material-block" result="json" session="states" +from qadence import uniform_state, zero_state, one_state + +n_qubits = 3 +batch_size = 2 + +uniform_state = uniform_state(n_qubits, batch_size) +zero_state = zero_state(n_qubits, batch_size) +one_state = one_state(n_qubits, batch_size) +print("Uniform state = \n") # markdown-exec: hide +print(f"{uniform_state}") # markdown-exec: hide +print("Zero state = \n") # markdown-exec: hide +print(f"{zero_state}") # markdown-exec: hide +print("One state = \n") # markdown-exec: hide +print(f"{one_state}") # markdown-exec: hide +``` + +As already seen, product states can be easily created, even in batches: + +```python exec="on" source="material-block" result="json" session="states" +from qadence import product_state, rand_product_state + +# From a bitsring "100" +prod_state = product_state("100", batch_size) +print("Product state = \n") # markdown-exec: hide +print(f"{prod_state}\n") # markdown-exec: hide + +# Or a random product state +rand_state = rand_product_state(n_qubits, batch_size) +print("Random state = \n") # markdown-exec: hide +print(f"{rand_state}") # markdown-exec: hide +``` + +Creating a GHZ state: + +```python exec="on" source="material-block" result="json" session="states" +from qadence import ghz_state + +ghz = ghz_state(n_qubits, batch_size) + +print("GHZ state = \n") # markdown-exec: hide +print(f"{ghz}") # markdown-exec: hide +``` + +Creating a random state uniformly sampled from a Haar measure: + +```python exec="on" source="material-block" result="json" session="states" +from qadence import random_state + +rand_haar_state = random_state(n_qubits, batch_size) + +print("Random state from Haar = \n") # markdown-exec: hide +print(f"{rand_haar_state}") # markdown-exec: hide +``` + +Custom initial states can then be passed to either `run`, `sample` and `expectation` through the `state` argument + +```python exec="on" source="material-block" result="json" session="states" +from qadence import random_state, product_state, CNOT, run + +init_state = product_state("10") +final_state = run(CNOT(0, 1), state=init_state) + +print(f"Final state = {final_state}") # markdown-exec: hide +``` + +## Block initialization + +Not all backends support custom statevector initialization, however previous utility functions have their counterparts to initialize the respective blocks: + +```python exec="on" source="material-block" result="json" session="states" +from qadence import uniform_block, one_block + +n_qubits = 3 + +uniform_block = uniform_block(n_qubits) +print(uniform_block) # markdown-exec: hide + +one_block = one_block(n_qubits) +print(one_block) # markdown-exec: hide +``` + +Similarly, for product states: + +```python exec="on" source="material-block" result="json" session="states" +from qadence import product_block, rand_product_block + +product_block = product_block("100") +print(product_block) # markdown-exec: hide + +rand_product_block = rand_product_block(n_qubits) +print(rand_product_block) # markdown-exec: hide +``` + +And GHZ states: + +```python exec="on" source="material-block" result="json" session="states" +from qadence import ghz_block + +ghz_block = ghz_block(n_qubits) +print(ghz_block) # markdown-exec: hide +``` + +Initial state blocks can simply be chained at the start of a given circuit. + +## Utility functions + +Some state vector utility functions are also available. We can easily create the probability mass function of a given statevector using `torch.distributions.Categorical` + +```python exec="on" source="material-block" result="json" session="states" +from qadence import random_state, pmf + +n_qubits = 3 + +state = random_state(n_qubits) +distribution = pmf(state) +print(distribution) # markdown-exec: hide +``` + +We can also check if a state is normalized: + +```python exec="on" source="material-block" result="json" session="states" +from qadence import random_state, is_normalized + +state = random_state(n_qubits) +print(is_normalized(state)) +``` + +Or normalize a state: + +```python exec="on" source="material-block" result="json" session="states" +import torch +from qadence import normalize, is_normalized + +state = torch.tensor([[1, 1, 1, 1]], dtype = torch.cdouble) +print(normalize(state)) +``` diff --git a/examples/backends/README.md b/examples/backends/README.md new file mode 100644 index 000000000..8737d0438 --- /dev/null +++ b/examples/backends/README.md @@ -0,0 +1 @@ +Here we show how to use the backends diff --git a/examples/backends/differentiable_backend.py b/examples/backends/differentiable_backend.py new file mode 100644 index 000000000..11a2b5250 --- /dev/null +++ b/examples/backends/differentiable_backend.py @@ -0,0 +1,95 @@ +from __future__ import annotations + +import numpy as np +import sympy +import torch + +from qadence import ( + CNOT, + RX, + RY, + DifferentiableBackend, + Parameter, + QuantumCircuit, + chain, + total_magnetization, +) +from qadence.backends.pyqtorch.backend import Backend as PyQTorchBackend + +torch.manual_seed(42) + + +def circuit(n_qubits): + """Helper function to make an example circuit""" + + x = Parameter("x", trainable=False) + theta = Parameter("theta") + + fm = chain(RX(0, 3 * x), RY(1, sympy.exp(x)), RX(0, theta), RY(1, np.pi / 2)) + ansatz = CNOT(0, 1) + block = chain(fm, ansatz) + + circ = QuantumCircuit(n_qubits, block) + + return circ + + +if __name__ == "__main__": + torch.manual_seed(42) + n_qubits = 2 + batch_size = 5 + + # Making circuit with AD + circ = circuit(n_qubits) + observable = total_magnetization(n_qubits=n_qubits) + quantum_backend = PyQTorchBackend() + diff_backend = DifferentiableBackend(quantum_backend, diff_mode="ad") + diff_circ, diff_obs, embed, params = diff_backend.convert(circ, observable) + + # Running for some inputs + values = {"x": torch.rand(batch_size, requires_grad=True)} + wf = diff_backend.run(diff_circ, embed(params, values)) + expval = diff_backend.expectation(diff_circ, diff_obs, embed(params, values)) + dexpval_x = torch.autograd.grad( + expval, values["x"], torch.ones_like(expval), create_graph=True + )[0] + dexpval_xx = torch.autograd.grad( + dexpval_x, values["x"], torch.ones_like(dexpval_x), create_graph=True + )[0] + dexpval_xxtheta = torch.autograd.grad( + dexpval_xx, + list(params.values())[0], + torch.ones_like(dexpval_xx), + retain_graph=True, + )[0] + dexpval_theta = torch.autograd.grad(expval, list(params.values())[0], torch.ones_like(expval))[ + 0 + ] + + # Now running stuff for PSR + diff_backend = DifferentiableBackend(quantum_backend, diff_mode="gpsr") + expval = diff_backend.expectation(diff_circ, diff_obs, embed(params, values)) + dexpval_psr_x = torch.autograd.grad( + expval, values["x"], torch.ones_like(expval), create_graph=True + )[0] + dexpval_psr_xx = torch.autograd.grad( + dexpval_psr_x, values["x"], torch.ones_like(dexpval_psr_x), create_graph=True + )[0] + dexpval_psr_xxtheta = torch.autograd.grad( + dexpval_psr_xx, + list(params.values())[0], + torch.ones_like(dexpval_psr_xx), + retain_graph=True, + )[0] + dexpval_psr_theta = torch.autograd.grad( + expval, list(params.values())[0], torch.ones_like(expval) + )[0] + + print(f"Derivative with respect to 'x' with AD: {dexpval_x}") + print(f"Derivative with respect to 'x' with PSR: {dexpval_psr_x}") + print(f"Derivative with respect to 'xx' with AD: {dexpval_xx}") + print(f"Derivative with respect to 'xx' with PSR: {dexpval_psr_xx}") + print(f"Derivative with respect to 'xx, theta' with AD: {dexpval_xxtheta}") + print(f"Derivative with respect to 'xx, theta' with PSR: {dexpval_psr_xxtheta}") + print(f"Derivative with respect to 'theta' with ad: {dexpval_theta}") + print(f"Derivative with respect to 'theta' with PSR: {dexpval_psr_theta}") diff --git a/examples/backends/low_level/README.md b/examples/backends/low_level/README.md new file mode 100644 index 000000000..35e5a9539 --- /dev/null +++ b/examples/backends/low_level/README.md @@ -0,0 +1,6 @@ +These examples show how to use the backends directly. That is, how to use +qadence to define the `QuantumCircuit` but have it executed directly in backend without +using the autodiff wrapper. + +Although it is straight-forward, this shouldn't be necessary for most usecases. +Please shoot us a quick message before using this approach for a project. diff --git a/examples/backends/low_level/braket_digital.py b/examples/backends/low_level/braket_digital.py new file mode 100644 index 000000000..ba5785cfa --- /dev/null +++ b/examples/backends/low_level/braket_digital.py @@ -0,0 +1,125 @@ +from __future__ import annotations + +import numpy as np +import sympy +from braket.circuits import Noise +from braket.devices import LocalSimulator + +from qadence import ( + CNOT, + RX, + RZ, + Parameter, + QuantumCircuit, + backend_factory, + chain, + total_magnetization, +) +from qadence.backend import BackendName +from qadence.backends.pytorch_wrapper import DiffMode + +# def circuit(n_qubits): +# # make feature map with input parameters +# fm = chain(RX(0, 3 * x), RZ(1, z), CNOT(0, 1)) +# fm = set_trainable(fm, value=False) + +# # make trainable ansatz +# ansatz = [] +# for i, q in enumerate(range(n_qubits)): +# ansatz.append( +# chain( +# RX(q, f"theta_0{i}"), +# RZ(q, f"theta_1{i}"), +# RX(q, f"theta_2{i}"), +# ) +# ) +# ansatz = kron(ansatz[0], ansatz[1]) +# ansatz *= CNOT(0, 1) + +# block = chain(fm, ansatz) +# circ = QuantumCircuit(n_qubits=n_qubits, blocks=block) +# return circ + + +def circuit(n_qubits): + """Helper function to make an example circuit""" + + x = Parameter("x", trainable=False) + y = Parameter("y", trainable=False) + + fm = chain(RX(0, 3 * x), RZ(1, sympy.exp(y)), RX(0, np.pi / 2), RZ(1, "theta")) + ansatz = CNOT(0, 1) + block = chain(fm, ansatz) + + circ = QuantumCircuit(n_qubits, block) + return circ + + +if __name__ == "__main__": + import torch + + torch.manual_seed(10) + + n_qubits = 2 + circ = circuit(n_qubits) + + observable = total_magnetization(n_qubits=n_qubits) + braket_backend = backend_factory(backend=BackendName.BRAKET, diff_mode=DiffMode.GPSR) + + batch_size = 1 + values = { + "x": torch.rand(batch_size, requires_grad=True), + "y": torch.rand(batch_size, requires_grad=True), + } + + # you can unpack the conversion result or just use conv.circuit, etc. + conv = braket_backend.convert(circ, observable) + (braket_circuit, braket_observable, embed, params) = conv + + wf = braket_backend.run(braket_circuit, embed(params, values)) + expval = braket_backend.expectation(braket_circuit, braket_observable, embed(params, values)) + dexpval_braket = torch.autograd.grad( + expval, values["x"], torch.ones_like(expval), retain_graph=True + )[0] + + pyq_backend = backend_factory(backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD) + conv = pyq_backend.convert(circ, observable) + + wf = pyq_backend.run(conv.circuit, conv.embedding_fn(conv.params, values)) + expval = pyq_backend.expectation( + conv.circuit, conv.observable, conv.embedding_fn(conv.params, values) + ) + dexpval_pyq = torch.autograd.grad( + expval, values["x"], torch.ones_like(expval), retain_graph=True + )[0] + + assert torch.allclose(dexpval_braket, dexpval_pyq, atol=1e-4, rtol=1e-4) + + # sample + samples = braket_backend.sample(braket_circuit, embed(params, values), n_shots=1000) + print(f"Samples: {samples}") + + ## use the backend with the low-level interface + + # retrieve parameters + params = embed(params, values) + + # use the native representation directly + native = braket_circuit.native + + # define a noise channel + noise = Noise.Depolarizing(probability=0.1) + + # add noise to every gate in the circuit + native.apply_gate_noise(noise) + + # use density matrix simulator for noise simulations + device = LocalSimulator("braket_dm") + native = braket_backend.assign_parameters(braket_circuit, params) + result = device.run(native, shots=1000).result().measurement_counts + print("With noise") + print(result) + print("Noisy circuit") + + # obtain the braket diagram + print(native.diagram()) diff --git a/examples/backends/low_level/overlap.py b/examples/backends/low_level/overlap.py new file mode 100644 index 000000000..d6e8bc106 --- /dev/null +++ b/examples/backends/low_level/overlap.py @@ -0,0 +1,101 @@ +from __future__ import annotations + +import numpy as np +import torch + +from qadence import ( + RX, + RY, + BackendName, + FeatureParameter, + H, + Overlap, + OverlapMethod, + QuantumCircuit, + QuantumModel, + VariationalParameter, + chain, + kron, + tag, +) + +n_qubits = 1 + +# prepare circuit for bras +param_bra = FeatureParameter("phi") +block_bra = kron(*[RX(qubit, param_bra) for qubit in range(n_qubits)]) +fm_bra = tag(block_bra, tag="feature-map-bra") +circuit_bra = QuantumCircuit(n_qubits, fm_bra) + +# prepare circuit for kets +param_ket = FeatureParameter("psi") +block_ket = kron(*[RX(qubit, param_ket) for qubit in range(n_qubits)]) +fm_ket = tag(block_ket, tag="feature-map-ket") +circuit_ket = QuantumCircuit(n_qubits, fm_ket) + +# values for circuits +values_bra = {"phi": torch.Tensor([np.pi, np.pi / 4, np.pi / 3])} +values_ket = {"psi": torch.Tensor([np.pi, np.pi / 2, np.pi / 5])} + +backend_name = BackendName.PYQTORCH + +# calculate overlap with exact method +ovrlp = Overlap(circuit_bra, circuit_ket, backend=backend_name, method=OverlapMethod.EXACT) +ovrlp_exact = ovrlp(values_bra, values_ket) +print("Exact overlap:\n", ovrlp_exact) + +# calculate overlap with shots +ovrlp = Overlap(circuit_bra, circuit_ket, backend=backend_name, method=OverlapMethod.JENSEN_SHANNON) +ovrlp_js = ovrlp(values_bra, values_ket, n_shots=10000) +print("Jensen-Shannon overlap:\n", ovrlp_js) + + +class LearnHadamard(QuantumModel): + def __init__( + self, + train_circuit: QuantumCircuit, + target_circuit: QuantumCircuit, + backend: BackendName = BackendName.PYQTORCH, + ): + super().__init__(circuit=train_circuit, backend=backend) + + self.overlap_fn = Overlap( + train_circuit, target_circuit, backend=backend, method=OverlapMethod.EXACT + ) + + def forward(self): + return self.overlap_fn() + + +phi = VariationalParameter("phi") +theta = VariationalParameter("theta") + +train_circuit = QuantumCircuit(1, chain(RX(0, phi), RY(0, theta))) +target_circuit = QuantumCircuit(1, H(0)) + +model = LearnHadamard(train_circuit, target_circuit) + + +# Applies the Hadamard on the 0 state +print("BEFORE TRAINING:") +print(model.overlap_fn.ket_model.run({}).detach()) +print(model.overlap_fn.run({}).detach()) +print() + +optimizer = torch.optim.Adam(model.parameters(), lr=0.25) +loss_criterion = torch.nn.MSELoss() +n_epochs = 1000 +loss_save = [] + +for i in range(n_epochs): + optimizer.zero_grad() + loss = loss_criterion(torch.tensor([[1.0]]), model()) + loss.backward() + optimizer.step() + loss_save.append(loss.item()) + + +# Applies the Hadamard on the 0 state +print("AFTER TRAINING:") +print(model.overlap_fn.ket_model.run({}).detach()) +print(model.overlap_fn.run({}).detach()) diff --git a/examples/backends/low_level/pyq.py b/examples/backends/low_level/pyq.py new file mode 100644 index 000000000..e5b2b87be --- /dev/null +++ b/examples/backends/low_level/pyq.py @@ -0,0 +1,59 @@ +from __future__ import annotations + +import numpy as np +import sympy +import torch + +from qadence import CNOT, RX, RZ, Parameter, QuantumCircuit, chain, total_magnetization +from qadence.backends.pyqtorch.backend import Backend as PyQTorchBackend + +torch.manual_seed(42) + + +def circuit(n_qubits): + """Helper function to make an example circuit""" + + x = Parameter("x", trainable=False) + y = Parameter("y", trainable=False) + theta = Parameter("theta") + + fm = chain(RX(0, 3 * x), RX(1, sympy.exp(y)), RX(0, theta), RZ(1, np.pi / 2)) + ansatz = CNOT(0, 1) + block = chain(fm, ansatz) + + circ = QuantumCircuit(n_qubits, block) + + return circ + + +if __name__ == "__main__": + torch.manual_seed(42) + n_qubits = 2 + batch_size = 5 + + # Making circuit with AD + circ = circuit(n_qubits) + observable = total_magnetization(n_qubits=n_qubits) + backend = PyQTorchBackend() + pyq_circ, pyq_obs, embed, params = backend.convert(circ, observable) + + batch_size = 5 + values = { + "x": torch.rand(batch_size, requires_grad=True), + "y": torch.rand(batch_size, requires_grad=True), + } + + wf = backend.run(pyq_circ, embed(params, values)) + samples = backend.sample(pyq_circ, embed(params, values)) + expval = backend.expectation(pyq_circ, pyq_obs, embed(params, values)) + dexpval_x = torch.autograd.grad( + expval, values["x"], torch.ones_like(expval), retain_graph=True + )[0] + dexpval_y = torch.autograd.grad( + expval, values["y"], torch.ones_like(expval), retain_graph=True + )[0] + + print(f"Statevector: {wf}") + print(f"Samples: {samples}") + print(f"Gradient w.r.t. 'x': {dexpval_x}") + print(f"Gradient w.r.t. 'y': {dexpval_y}") diff --git a/examples/digital-analog/fit-sin.py b/examples/digital-analog/fit-sin.py new file mode 100644 index 000000000..75cfc2b4d --- /dev/null +++ b/examples/digital-analog/fit-sin.py @@ -0,0 +1,108 @@ +from __future__ import annotations + +import sys +from timeit import timeit + +import matplotlib.pyplot as plt +import torch + +from qadence import ( + AnalogRX, + AnalogRZ, + FeatureParameter, + QuantumCircuit, + QuantumModel, + Register, + VariationalParameter, + Z, + add, + chain, + expectation, + wait, +) +from qadence.backends.pytorch_wrapper import DiffMode + +pi = torch.pi +SHOW_PLOTS = sys.argv[1] == "show" if len(sys.argv) == 2 else False + + +def plot(x, y, **kwargs): + xnp = x.detach().cpu().numpy().flatten() + ynp = y.detach().cpu().numpy().flatten() + return plt.plot(xnp, ynp, **kwargs) + + +def scatter(x, y, **kwargs): + xnp = x.detach().cpu().numpy().flatten() + ynp = y.detach().cpu().numpy().flatten() + return plt.scatter(xnp, ynp, **kwargs) + + +# two qubit register +reg = Register.from_coordinates([(0, 0), (0, 12)]) + +# analog ansatz with input parameter +t = FeatureParameter("t") + +block = chain( + AnalogRX(pi / 2), + AnalogRZ(t), + # NOTE: for a better fit, manually set delta + # AnalogRot(duration=1000 / (6 * torch.pi) * t, delta=6 * torch.pi), # RZ + wait(1000 * VariationalParameter("theta", value=0.5)), + AnalogRX(pi / 2), +) + +# observable +obs = add(Z(i) for i in range(reg.n_qubits)) + + +# define problem +x_train = torch.linspace(0, 6, steps=30) +y_train = -0.64 * torch.sin(x_train + 0.33) + 0.1 + +y_pred_initial = expectation(reg, block, obs, values={"t": x_train}) + + +# define quantum model; including digital-analog emulation +circ = QuantumCircuit(reg, block) +model = QuantumModel(circ, obs, diff_mode=DiffMode.GPSR) + +mse_loss = torch.nn.MSELoss() +optimizer = torch.optim.Adam(model.parameters(), lr=5e-2) + + +def loss_fn(x_train, y_train): + return mse_loss(model.expectation({"t": x_train}).squeeze(), y_train) + + +print(loss_fn(x_train, y_train)) +print(timeit(lambda: loss_fn(x_train, y_train), number=5)) + +# train +n_epochs = 200 + +for i in range(n_epochs): + optimizer.zero_grad() + + loss = loss_fn(x_train, y_train) + loss.backward() + optimizer.step() + + if (i + 1) % 10 == 0: + print(f"Epoch {i+1:0>3} - Loss: {loss.item()}") + +# visualize +y_pred = model.expectation({"t": x_train}) + +plt.figure() +scatter(x_train, y_train, label="Training points", marker="o", color="green") +plot(x_train, y_pred_initial, label="Initial prediction") +plot(x_train, y_pred, label="Final prediction") + + +plt.legend() +if SHOW_PLOTS: + plt.show() + +assert loss_fn(x_train, y_train) < 0.05 diff --git a/examples/digital-analog/qubo.py b/examples/digital-analog/qubo.py new file mode 100644 index 000000000..41f688d3a --- /dev/null +++ b/examples/digital-analog/qubo.py @@ -0,0 +1,149 @@ +from __future__ import annotations + +import random + +import matplotlib.pyplot as plt +import numpy as np +import torch +from pulser.devices import Chadoq2 +from scipy.optimize import minimize +from scipy.spatial.distance import pdist, squareform + +from qadence import ( + AnalogRX, + AnalogRZ, + QuantumCircuit, + QuantumModel, + Register, + add_interaction, + chain, +) +from qadence.transpile.emulate import ising_interaction + +SHOW_PLOTS = False +torch.manual_seed(0) +np.random.seed(0) +random.seed(0) + + +def qubo_register_coords(Q): + """Compute coordinates for register.""" + bitstrings = [np.binary_repr(i, len(Q)) for i in range(len(Q) ** 2)] + costs = [] + # this takes exponential time with the dimension of the QUBO + for b in bitstrings: + z = np.array(list(b), dtype=int) + cost = z.T @ Q @ z + costs.append(cost) + zipped = zip(bitstrings, costs) + sort_zipped = sorted(zipped, key=lambda x: x[1]) + print(sort_zipped[:3]) + + def evaluate_mapping(new_coords, *args): + """Cost function to minimize. Ideally, the pairwise + distances are conserved""" + Q, shape = args + new_coords = np.reshape(new_coords, shape) + new_Q = squareform(Chadoq2.interaction_coeff / pdist(new_coords) ** 6) + return np.linalg.norm(new_Q - Q) + + shape = (len(Q), 2) + costs = [] + np.random.seed(0) + x0 = np.random.random(shape).flatten() + res = minimize( + evaluate_mapping, + x0, + args=(Q, shape), + method="Nelder-Mead", + tol=1e-6, + options={"maxiter": 200000, "maxfev": None}, + ) + return [(x, y) for (x, y) in np.reshape(res.x, (len(Q), 2))] + + +def cost_colouring(bitstring, Q): + z = np.array(list(bitstring), dtype=int) + cost = z.T @ Q @ z + return cost + + +def cost(counter, Q): + cost = sum(counter[key] * cost_colouring(key, Q) for key in counter) + return cost / sum(counter.values()) # Divide by total samples + + +def plot_distribution(counter, solution_bitstrings=["01011", "00111"], ax=None): + if ax is None: + _, ax = plt.subplots(figsize=(12, 6)) + + xs, ys = zip(*sorted(counter.items(), key=lambda item: item[1], reverse=True)) + colors = ["r" if x in solution_bitstrings else "g" for x in xs] + + ax.set_xlabel("bitstrings") + ax.set_ylabel("counts") + ax.bar(xs, ys, width=0.5, color=colors) + ax.tick_params(axis="x", labelrotation=90) + return ax + + +fig, ax = plt.subplots(1, 2, figsize=(12, 4)) + +Q = np.array( + [ + [-10.0, 19.7365809, 19.7365809, 5.42015853, 5.42015853], + [19.7365809, -10.0, 20.67626392, 0.17675796, 0.85604541], + [19.7365809, 20.67626392, -10.0, 0.85604541, 0.17675796], + [5.42015853, 0.17675796, 0.85604541, -10.0, 0.32306662], + [5.42015853, 0.85604541, 0.17675796, 0.32306662, -10.0], + ] +) + + +LAYERS = 2 +reg = Register.from_coordinates(qubo_register_coords(Q)) +block = chain(*[AnalogRX(f"t{i}") * AnalogRZ(f"s{i}") for i in range(LAYERS)]) +emulated = add_interaction( + reg, block, interaction=lambda r, ps: ising_interaction(r, ps, rydberg_level=70) +) +model = QuantumModel(QuantumCircuit(reg, emulated), diff_mode="gpsr") +cnts = model.sample({}, n_shots=1000)[0] + +plot_distribution(cnts, ax=ax[0]) + + +def loss(param, *args): + Q = args[0] + param = torch.tensor(param) + model.reset_vparams(param) + C = model.sample({}, n_shots=1000)[0] + return cost(C, Q) + + +scores = [] +params = [] +for repetition in range(30): + try: + res = minimize( + loss, + args=Q, + x0=np.random.uniform(1, 10, size=2 * LAYERS), + method="Nelder-Mead", + tol=1e-5, + options={"maxiter": 20}, + ) + scores.append(res.fun) + params.append(res.x) + except Exception as e: + pass + +model.reset_vparams(params[np.argmin(scores)]) +optimal_count_dict = model.sample({}, n_shots=1000)[0] +plot_distribution(optimal_count_dict, ax=ax[1]) +plt.tight_layout() + +if SHOW_PLOTS: + plt.show() + +xs, _ = zip(*sorted(optimal_count_dict.items(), key=lambda item: item[1], reverse=True)) +assert (xs[0] == "01011" and xs[1] == "00111") or (xs[1] == "01011" and xs[0] == "00111"), f"{xs}" diff --git a/examples/draw.py b/examples/draw.py new file mode 100644 index 000000000..3861e10e3 --- /dev/null +++ b/examples/draw.py @@ -0,0 +1,78 @@ +from __future__ import annotations + +import os +from sympy import cos, sin + +from qadence import * +from qadence.draw import display, savefig +from qadence.draw.themes import BaseTheme + + +class CustomTheme(BaseTheme): + background_color = "white" + color = "black" + fontname = "Comic Sans MS" + fontsize = "30" + primitive_node = {"fillcolor": "green", "color": "black"} + variational_parametric_node = {"fillcolor": "blue", "color": "black"} + fixed_parametric_node = {"fillcolor": "red", "color": "black"} + feature_parametric_node = {"fillcolor": "yellow", "color": "black"} + hamevo_cluster = {"fillcolor": "pink", "color": "black"} + add_cluster = {"fillcolor": "white", "color": "black"} + scale_cluster = {"fillcolor": "white", "color": "black"} + + +x = Parameter("x") +y = Parameter("y", trainable=False) + +constants = kron(tag(kron(X(0), Y(1), H(2)), "a"), tag(kron(Z(5), Z(6)), "z")) +constants.tag = "const" + +fixed = kron(RX(0, 0.511111), RY(1, 0.8), RZ(2, 0.9), CRZ(3, 4, 2.2), PHASE(6, 1.1)) +fixed.tag = "fixed" + +feat = kron(RX(0, y), RY(1, sin(y)), RZ(2, cos(y)), CRZ(3, 4, y**2), PHASE(6, y)) +feat.tag = "feat" + +vari = kron(RX(0, x**2), RY(1, sin(x)), CZ(3, 2), MCRY([4, 5], 6, "x")) +vari.tag = "vari" + +hamevo = HamEvo(kron(*map(Z, range(constants.n_qubits))), 10) + +b = chain( + feature_map(constants.n_qubits, fm_type="tower"), + hea(constants.n_qubits, 1), + constants, + fixed, + hamevo, + feat, + HamEvo(kron(*map(Z, range(constants.n_qubits))), 10), + AnalogRX("x"), + AnalogRX("x", qubit_support=(3, 4, 5)), + wait("x"), + vari, + add(*map(X, range(constants.n_qubits))), + 2.1 * kron(*map(X, range(constants.n_qubits))), + SWAP(0, 1), + kron(SWAP(0, 1), SWAP(3, 4)), +) +# b = chain(feature_map(4, fm_type="tower"), hea(4,1, strategy=Strategy.SDAQC)) +# d = make_diagram(b) +# d.show() + +circuit = QuantumCircuit(b.n_qubits, b) +# you can use the custom theme like this +# display(circuit, theme=CustomTheme()) + + +if os.environ.get("CI") == "true": + savefig(circuit, "test.svg") +else: + display(circuit, theme="light") + +# FIXME: this is not working yet because total_magnetization blocks completely mess up the +# graph layout for some reason :( +# o = total_magnetization(b.n_qubits) +# m = QuantumModel(c, o) +# d = make_diagram(m) +# d.show() diff --git a/examples/models/qnn.py b/examples/models/qnn.py new file mode 100644 index 000000000..43f1c2e3a --- /dev/null +++ b/examples/models/qnn.py @@ -0,0 +1,67 @@ +from __future__ import annotations + +try: + import matplotlib.pyplot as plt +except ImportError: + plt = None +import numpy as np +import torch +from torch.autograd import grad + +from qadence import QNN, QuantumCircuit, chebyshev_feature_map, hea, total_magnetization +from qadence.transpile import set_trainable + +torch.manual_seed(42) +np.random.seed(42) + +do_plotting = False + + +# Equation we want to learn +def f(x): + return 3 * x**2 + 2 * x - 1 + + +# calculation of constant terms +# -> d2ydx2 = 6 +# -> dydx = 6 * x + 2 +# dy[0] = 2 +# y[0] = -1 + + +# Using torch derivatives directly +def d2y(ufa, x): + y = ufa(x) + dydx = grad(y, x, torch.ones_like(y), create_graph=True, retain_graph=True)[0] + d2ydx2 = grad(dydx, x, torch.ones_like(dydx), create_graph=True, retain_graph=True)[0] + return d2ydx2 - 6.0 + + +def dy0(ufa, x): + y = ufa(x) + dydx = grad(y, x, torch.ones_like(y), create_graph=True)[0] + return dydx - 2.0 + + +n_qubits = 5 +batch_size = 100 +x = torch.linspace(-0.5, 0.5, batch_size).reshape(batch_size, 1).requires_grad_() +x0 = torch.zeros((1, 1), requires_grad=True) +x1 = torch.zeros((1, 1), requires_grad=True) + +feature_map = set_trainable(chebyshev_feature_map(n_qubits=5), False) +ansatz = set_trainable(hea(n_qubits=5, depth=5, periodic=True)) +circ = QuantumCircuit(5, feature_map, ansatz) +ufa = QNN(circ, observable=total_magnetization(n_qubits=5)) + +x = torch.linspace(-0.5, 0.5, 100).reshape(-1, 1) +y = ufa(x) + +if do_plotting: + xn = x.detach().numpy().reshape(-1) + yn = y.detach().numpy().reshape(-1) + yt = f(x) + plt.plot(xn, yt, label="Truth") + plt.plot(xn, yn, label="Pred.") + plt.legend() + plt.show() diff --git a/examples/models/quantum_model.py b/examples/models/quantum_model.py new file mode 100644 index 000000000..539b42d1f --- /dev/null +++ b/examples/models/quantum_model.py @@ -0,0 +1,86 @@ +from __future__ import annotations + +import numpy as np +import sympy +import torch + +from qadence import ( + CNOT, + RX, + RZ, + Parameter, + QuantumCircuit, + QuantumModel, + chain, + total_magnetization, +) +from qadence.backend import BackendName +from qadence.backends.pytorch_wrapper import DiffMode + +torch.manual_seed(42) + + +def circuit(n_qubits): + x = Parameter("x", trainable=False) + y = Parameter("y", trainable=False) + + fm = chain(RX(0, 3 * x), RZ(1, sympy.exp(y)), RX(0, np.pi / 2), RZ(1, "theta")) + ansatz = CNOT(0, 1) + block = chain(fm, ansatz) + + return QuantumCircuit(n_qubits, block) + + +if __name__ == "__main__": + n_qubits = 2 + batch_size = 5 + + observable = total_magnetization(n_qubits) + model = QuantumModel( + circuit(n_qubits), + observable=observable, + backend=BackendName.PYQTORCH, + diff_mode=DiffMode.AD, + ) + print(list(model.parameters())) + nx = torch.rand(batch_size, requires_grad=True) + ny = torch.rand(batch_size, requires_grad=True) + values = {"x": nx, "y": ny} + + print(f"Expectation values: {model.expectation(values)}") + + # This works! + model.zero_grad() + loss = torch.mean(model.expectation(values)) + loss.backward() + + print("Gradients using autograd: \n") + print("Gradient in model: \n") + for key, param in model.named_parameters(): + print(f"{key}: {param.grad}") + + # This works too! + print("Gradient of inputs: \n") + print(torch.autograd.grad(torch.mean(model.expectation(values)), nx)) + print(torch.autograd.grad(torch.mean(model.expectation(values)), ny)) + + # Now using PSR + model = QuantumModel( + circuit(n_qubits), + observable=observable, + backend=BackendName.PYQTORCH, + diff_mode=DiffMode.GPSR, + ) + model.zero_grad() + loss = torch.mean(model.expectation(values)) + loss.backward() + + print("Gradients using PSR: \n") + print("Gradient in model: \n") + for key, param in model.named_parameters(): + print(f"{key}: {param.grad}") + + # This works too! + print("Gradient of inputs: \n") + print(torch.autograd.grad(torch.mean(model.expectation(values)), nx)) + print(torch.autograd.grad(torch.mean(model.expectation(values)), ny)) diff --git a/examples/quick_start.py b/examples/quick_start.py new file mode 100644 index 000000000..aa6659d9f --- /dev/null +++ b/examples/quick_start.py @@ -0,0 +1,55 @@ +from __future__ import annotations + +import torch + +# qadence has many submodules +from qadence.blocks import kron # block system +from qadence.circuit import QuantumCircuit # circuit to assemble quantum operations +from qadence.ml_tools import TrainConfig, train_with_grad # tools for ML simulations +from qadence.models import QuantumModel # quantum model for execution +from qadence.operations import RX, HamEvo, X, Y, Zero # quantum operations +from qadence.parameters import VariationalParameter # trainable parameters + +# all of the above can also be imported directly from the qadence namespace + +n_qubits = 4 +n_circ_params = n_qubits + +# define some variational parameters +circ_params = [VariationalParameter(f"theta{i}") for i in range(n_circ_params)] + +# block with single qubit rotations +rot_block = kron(RX(i, param) for i, param in enumerate(circ_params)) + +# block with Hamiltonian evolution +t_evo = 2.0 +generator = 0.25 * X(0) + 0.25 * X(1) + 0.5 * Y(2) + 0.5 * Y(3) +ent_block = HamEvo(generator, t_evo) + +# create an observable to measure with tunable coefficients +obs_params = [VariationalParameter(f"phi{i}") for i in range(n_qubits)] +obs = Zero() +for i in range(n_qubits): + obs += obs_params[i] * X(i) + +# create circuit and executable quantum model +circuit = QuantumCircuit(n_qubits, rot_block, ent_block) +model = QuantumModel(circuit, observable=obs, diff_mode="ad") + +samples = model.sample({}, n_shots=1000) +print(samples) # this returns a Counter instance + +# compute the expectation value of the observable +expval = model.expectation({}) +print(expval) + + +# define a loss function and train the model +# using qadence built-in ML tools +def loss_fn(model_: QuantumModel, _): + return model_.expectation({}).squeeze(), {} + + +optimizer = torch.optim.Adam(model.parameters(), lr=0.1) +config = TrainConfig(max_iter=100, checkpoint_every=10, print_every=10) +train_with_grad(model, None, optimizer, config, loss_fn=loss_fn) diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 000000000..bb89cf5ab --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1,151 @@ +site_name: Qadence +repo_url: "https://github.com/pasqal-io/qadence/" +site_url: "https://pasqal-io.github.io/qadence/" +repo_name: "qadence" + +nav: + + - Qadence: + - Qadence: index.md + - Getting Started: tutorials/getting_started.md + - Quantum Models: tutorials/quantummodels.md + - Parametric Programs: tutorials/parameters.md + - Quantum Registers: tutorials/register.md + - State Conventions: tutorials/state_conventions.md + - State Initialization: tutorials/state_init.md + - Arbitrary Hamiltonians: tutorials/hamiltonians.md + - Wavefunction Overlaps: tutorials/overlap.md + - Serialization: tutorials/serializ_and_prep.md + - Backends: tutorials/backends.md + - Tools for Classical and Quantum Machine Learning: tutorials/qml_tools.md + + - Digital-Analog Quantum Computing: + - digital_analog_qc/daqc-basics.md + - Digital-Analog Emulation: + - Basics: digital_analog_qc/analog-basics.md + - Solve a QUBO Problem: digital_analog_qc/analog-qubo.md + - Pulse-level Programming with Pulser: digital_analog_qc/pulser-basic.md + - DAQC Transform: + - CNOT with Interacting Qubits: digital_analog_qc/daqc-cnot.md + - Digital-analog QFT: digital_analog_qc/daqc-qft.md + + - Variational Quantum Algorithms: + - qml/index.md + - Quantum Circuit Learning: qml/qcl.md + - QAOA for Solving MaxCut: qml/qaoa.md + + - Advanced Tutorials: + - Quantum circuits differentiation: advanced_tutorials/differentiability.md + - Custom QuantumModels: advanced_tutorials/custom-models.md + - Ground-state VQE: advanced_tutorials/vqe.md + + - API: + - Block system: qadence/blocks.md + - Operations: qadence/operations.md + - Register: qadence/register.md + - QuantumCircuit: qadence/quantumcircuit.md + - Parameters: qadence/parameters.md + - State Preparation: qadence/states.md + - Constructors: qadence/constructors.md + - Transpilation: qadence/transpile.md + - Execution: qadence/execution.md + - Quantum Models: models.md + - Machine Learning Tools: qadence/ml_tools.md + - Serialization: qadence/serialization.md + - Types: qadence/types.md + + - Backends: + - Abstract Backend: backends/backend.md + - PyQTorch: backends/pyqtorch.md + - Amazon Braket: backends/braket.md + - Pulser: backends/pulser.md + - DifferentiableBackend: backends/differentiable.md + + - Development: + - Architecture and Sharp Bits: development/architecture.md + - Drawing: development/draw.md + - Contributing: development/contributing.md + +edit_uri: edit/main/docs/ + +theme: + name: material + features: + - content.code.annotate + - content.action.view + - content.action.edit + - navigation.tabs + - navigation.indexes + - navigation.sections + - content.code.copy + - content.code.annotate + + palette: + - media: "(prefers-color-scheme: light)" + scheme: default + primary: custom + accent: custom + toggle: + icon: material/weather-sunny + name: Switch to dark mode + - media: "(prefers-color-scheme: dark)" + scheme: slate + primary: custom + accent: custom + toggle: + icon: material/weather-night + name: Switch to light mode + +markdown_extensions: +- admonition # for notes +- footnotes +- pymdownx.arithmatex: # for mathjax + generic: true +- pymdownx.highlight: + anchor_linenums: true +- pymdownx.inlinehilite +- pymdownx.snippets +- pymdownx.details +- pymdownx.superfences: + custom_fences: + - name: python + class: python + validator: "!!python/name:markdown_exec.validator" + format: "!!python/name:markdown_exec.formatter" + +plugins: +- search +- section-index +- mkdocstrings: + default_handler: python + handlers: + python: + selection: + filters: + - "!^_" # exlude all members starting with _ + - "^__init__$" # but always include __init__ modules and methods + - "^__new__$" # but always include __init__ modules and methods + options: + show_root_toc_entry: false + heading_level: 3 + merge_init_into_class: true + docstring_section_style: spacy + +- markdown-exec + +extra: + version: + provider: mike + +# To get nice tabs +extra_css: +- css/mkdocstrings.css + +# For mathjax +extra_javascript: + - javascripts/mathjax.js + - https://polyfill.io/v3/polyfill.min.js?features=es6 + - https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js + +watch: +- qadence diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 000000000..531b241b1 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,200 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "qadence" +description = "Pasqal interface for circuit-based quantum computing SDKs" +readme = "README.md" +authors = [ + { name = "Aleksander Wennersteen", email = "aleksander.wennersteen@pasqal.com" }, + { name = "Gert-Jan Both", email = "gert-jan.both@pasqal.com" }, + { name = "Niklas Heim", email = "niklas.heim@pasqal.com" }, + { name = "Mario Dagrada", email = "mario.dagrada@pasqal.com" }, + { name = "Vincent Elfving", email = "vincent.elfving@pasqal.com" }, + { name = "Dominik Seitz", email = "dominik.seitz@pasqal.com" }, + { name = "Roland Guichard", email = "roland.guichard@pasqal.com" }, + { name = "Joao P. Moutinho", email = "joao.moutinho@pasqal.com"}, + { name = "Vytautas Abramavicius", email = "vytautas.abramavicius@pasqal.com" }, +] +requires-python = ">=3.9,<3.11" +license = {text = "Apache 2.0"} +version = "0.9.1" +classifiers=[ + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: Implementation :: CPython", + "Programming Language :: Python :: Implementation :: PyPy", +] +dependencies = [ + "openfermion", + "torch", + "sympytorch>=0.1.2", + "rich", + "tensorboard>=2.12.0", + "deepdiff", + "jsonschema", + "nevergrad", + "scipy<1.11", + "pyqtorch==0.5.0", + "matplotlib" +] + +[tool.hatch.metadata] +allow-direct-references = true +allow-ambiguous-features = true + +[project.optional-dependencies] +pulser = ["pulser>=v0.12.0"] +braket = ["amazon-braket-sdk"] +visualization = [ + "graphviz", + # FIXME: will be needed once we support latex labels + # "latex2svg @ git+https://github.com/Moonbase59/latex2svg.git#egg=latex2svg", + # "scour", +] +all = [ + "pulser>=0.12.0", + "amazon-braket-sdk", + "graphviz", + # FIXME: will be needed once we support latex labels + # "latex2svg @ git+https://github.com/Moonbase59/latex2svg.git#egg=latex2svg", + # "scour", + "scipy<1.11" +] + +[tool.hatch.envs.default] +dependencies = [ + "flaky", + "hypothesis", + "pytest", + "pytest-cov", + "pytest-mypy", + "pytest-xdist", + "ipykernel", + "pre-commit", + "black", + "isort", + "ruff", +] +features = ["all"] + +[tool.hatch.envs.default.scripts] +test = "pytest --cov-report lcov --cov-config=pyproject.toml --cov=qadence --cov=tests --ignore=./tests/test_examples.py --ignore=./tests/test_notebooks.py {args}" +test-examples = "pytest ./tests/test_examples.py {args}" +test-notebooks = "pytest ./tests/test_notebooks.py {args}" +no-cov = "cov --no-cov {args}" +test-docs = "mkdocs build --clean --strict" +test-all = "pytest -n auto {args} && mkdocs build --clean --strict" + +[tool.pytest.ini_options] +markers = [ + "slow: marks tests as slow (deselect with '-m \"not slow\"')", +] +testpaths = ["tests"] +addopts = """-vvv""" +xfail_strict = true +filterwarnings = [ + "ignore:Call to deprecated create function FieldDescriptor", + "ignore:Call to deprecated create function Descriptor", + "ignore:Call to deprecated create function EnumDescriptor", + "ignore:Call to deprecated create function EnumValueDescriptor", + "ignore:Call to deprecated create function FileDescriptor", + "ignore:Call to deprecated create function OneofDescriptor", + "ignore:distutils Version classes are deprecated.", + "ignore::DeprecationWarning" +] + + +[tool.hatch.envs.docs] +dependencies = [ + "mkdocs==1.5.2", + "mkdocs-material", + "mkdocstrings", + "mkdocstrings-python", + "mkdocs-section-index==0.3.6", + "mkdocs-exclude", + "markdown-exec", + "mike", +] +features = ["pulser", "braket", "visualization"] + +[tool.hatch.envs.docs.scripts] +build = "mkdocs build --clean --strict" +serve = "mkdocs serve --dev-addr localhost:8000" + +[[tool.hatch.envs.test.matrix]] +python = ["39", "310"] + +[tool.hatch.envs.tests] +features = ["all"] + +[tool.hatch.build.targets.sdist] +exclude = [ + "/.gitignore", + "/.gitlab-ci-yml", + "/.pre-commit-config.yml", + "/tests", + "/docs", + "/examples", +] + +[tool.hatch.build.targets.wheel] +packages = ["qadence"] + +[tool.coverage.run] +branch = true +parallel = true + +[tool.coverage.report] +exclude_lines = [ + "no cov", + "if __name__ == .__main__.:", + "if TYPE_CHECKING:", +] + +[tool.ruff] +select = ["E", "F", "I", "Q"] +extend-ignore = ["F841","F403"] +line-length = 100 + +[tool.ruff.isort] +required-imports = ["from __future__ import annotations"] + +[tool.ruff.per-file-ignores] +"__init__.py" = ["F401"] +"operations.py" = ["E742"] # Avoid ambiguous class name warning for identity. + +[tool.ruff.mccabe] +max-complexity = 15 + +[tool.ruff.flake8-quotes] +docstring-quotes = "double" + +[tool.black] +line-length = 100 +include = '\.pyi?$' +exclude = ''' +/( + \.git + | \.hg + | \.mypy_cache + | \.tox + | \.venv + | _build + | buck-out + | build + | dist +)/ +''' + +[tool.mypy] +python_version = "3.10" +warn_return_any = true +warn_unused_configs = true +disallow_untyped_defs = true +no_implicit_optional = false +ignore_missing_imports = true diff --git a/qadence/__init__.py b/qadence/__init__.py new file mode 100644 index 000000000..c4f673fe6 --- /dev/null +++ b/qadence/__init__.py @@ -0,0 +1,65 @@ +from __future__ import annotations + +from importlib import import_module + +from torch import cdouble, set_default_dtype +from torch import float64 as torchfloat64 + +from .backend import * +from .backends import * +from .blocks import * +from .circuit import * +from .constructors import * +from .errors import * +from .execution import * +from .measurements import * +from .ml_tools import * +from .models import * +from .operations import * +from .overlap import * +from .parameters import * +from .register import * +from .serialization import * +from .states import * +from .transpile import * +from .types import * +from .utils import * + +DEFAULT_FLOAT_DTYPE = torchfloat64 +DEFAULT_COMPLEX_DTYPE = cdouble +set_default_dtype(DEFAULT_FLOAT_DTYPE) +""" +The imports above fetch the functions defined in the __all__ of each sub-module +to the qadence name space. Make sure each added submodule has the respective definition: + + - `__all__ = ["function0", "function1", ...]` + +Furthermore, add the submodule to the list below to automatically build +the __all__ of the qadence namespace. Make sure to keep alphabetical ordering. +""" + +list_of_submodules = [ + ".backends", + ".blocks", + ".circuit", + ".constructors", + ".errors", + ".execution", + ".measurements", + ".ml_tools", + ".models", + ".operations", + ".overlap", + ".parameters", + ".register", + ".serialization", + ".states", + ".transpile", + ".types", + ".utils", +] + +__all__ = [] +for submodule in list_of_submodules: + __all_submodule__ = getattr(import_module(submodule, package="qadence"), "__all__") + __all__ += __all_submodule__ diff --git a/qadence/backend.py b/qadence/backend.py new file mode 100644 index 000000000..29348840a --- /dev/null +++ b/qadence/backend.py @@ -0,0 +1,326 @@ +from __future__ import annotations + +from abc import ABC, abstractmethod +from collections import Counter +from dataclasses import dataclass, fields +from typing import Any, Callable, Iterator, Tuple + +from openfermion import QubitOperator +from torch import Tensor +from torch.nn import Module + +from qadence.blocks import ( + AbstractBlock, + CompositeBlock, + ParametricBlock, + PrimitiveBlock, + ScaleBlock, + TimeEvolutionBlock, + embedding, +) +from qadence.blocks.analog import ConstantAnalogRotation, WaitBlock +from qadence.circuit import QuantumCircuit +from qadence.measurements import Measurements +from qadence.parameters import stringify +from qadence.types import BackendName, DiffMode, Endianness + + +@dataclass +class BackendConfiguration: + _use_gate_params: bool = True + use_sparse_observable: bool = False + use_gradient_checkpointing: bool = False + use_single_qubit_composition: bool = False + + def available_options(self) -> str: + """Return as a string the available fields with types of the configuration + + Returns: + str: a string with all the available fields, one per line + """ + conf_msg = "" + for field in fields(self): + if not field.name.startswith("_"): + conf_msg += ( + f"Name: {field.name} - Type: {field.type} - Default value: {field.default}\n" + ) + return conf_msg + + @classmethod + def from_dict(cls, values: dict) -> BackendConfiguration: + field_names = {field.name for field in fields(cls)} + init_data = {} + + for key, value in values.items(): + if key not in field_names: + raise ValueError(f"Unknown field in the configuration: '{key}'.") + else: + init_data[key] = value + + return cls(**init_data) + + def get_param_name(self, blk: AbstractBlock) -> Tuple[str, ...]: + """Return parameter names for the current backend. Depending on which backend is in use this + function returns either UUIDs or expressions of parameters.""" + param_ids: Tuple + # FIXME: better type hiearchy? + types = (TimeEvolutionBlock, ParametricBlock, ConstantAnalogRotation, WaitBlock) + if not isinstance(blk, types): + raise TypeError(f"Can not infer param name from {type(blk)}") + else: + if self._use_gate_params: + param_ids = tuple(blk.parameters.uuids()) + else: + param_ids = tuple(map(stringify, blk.parameters.expressions())) + return param_ids + + +@dataclass(frozen=True, eq=True) +class Backend(ABC): + """The abstract class that defines the interface for the backends + + Attributes: + name: backend unique string identifier + supports_ad: whether or not the backend has a native autograd + supports_bp: whether or not the backend has a native backprop + is_remote: whether computations are executed locally or remotely on this + backend, useful when using cloud platforms where credentials are + needed for example. + with_measurements: whether it supports counts or not + with_noise: whether to add realistic noise or not + """ + + name: BackendName + supports_ad: bool + support_bp: bool + is_remote: bool + with_measurements: bool + native_endianness: Endianness + + # FIXME: should this also go into the configuration? + with_noise: bool + + # additional configuration specific for each backend + # some backends might not provide any additional configuration + # but they will still have an empty Configuration class + config: BackendConfiguration + + def __post_init__(self) -> None: + if isinstance(self.config, dict): + default_conf = self.default_configuration() + ConfCls = default_conf.__class__ + + try: + new_conf = ConfCls.from_dict(self.config) + + # need this since it is a frozen dataclass + # see reference documentation + # https://docs.python.org/3/library/dataclasses.html#frozen-instances + super().__setattr__("config", new_conf) + + except ValueError as e: + raise ValueError(f"Wrong configuration provided.\n{str(e)}") + + @abstractmethod + def circuit(self, circuit: QuantumCircuit) -> ConvertedCircuit: + """Converts an abstract `QuantumCircuit` to the native backend representation. + + Arguments: + circuit: A circuit, for example: `QuantumCircuit(2, X(0))` + + Returns: + A converted circuit `c`. You can access the original, arbstract circuit via `c.abstract` + and the converted (or backend *native*) circuit via `c.native`. + """ + raise NotImplementedError + + @abstractmethod + def observable(self, observable: AbstractBlock, n_qubits: int) -> ConvertedObservable: + """Converts an abstract observable (which is just an `AbstractBlock`) to the native backend + representation. + + Arguments: + observable: An observable. + n_qubits: Number of qubits the observable covers. This is typically `circuit.n_qubits`. + + Returns: + A converted observable `o`. You can access the original, arbstract observable via + `o.abstract` and the converted (or backend *native*) observable via `o.native`. + """ + raise NotImplementedError + + def convert( + self, circuit: QuantumCircuit, observable: list[AbstractBlock] | AbstractBlock | None = None + ) -> Converted: + """Convert an abstract circuit (and optionally and observable) to their native + representation. Additionally this function constructs an embedding function which maps from + user-facing parameters to device parameters (read more on parameter embedding + [here][qadence.blocks.embedding.embedding]). + """ + + def check_observable(obs_obj: Any) -> AbstractBlock: + if isinstance(obs_obj, QubitOperator): + from qadence.blocks.manipulate import from_openfermion + + assert len(obs_obj.terms) > 0, "Make sure to give a non-empty qubit hamiltonian" + + return from_openfermion(obs_obj) + + elif isinstance(obs_obj, (CompositeBlock, PrimitiveBlock, ScaleBlock)): + from qadence.blocks.utils import block_is_qubit_hamiltonian + + assert block_is_qubit_hamiltonian( + obs_obj + ), "Make sure the QubitHamiltonian consists only of Pauli operators X, Y, Z, I" + return obs_obj + raise TypeError( + "qubit_hamiltonian should be a Pauli-like AbstractBlock or a QubitOperator" + ) + + conv_circ = self.circuit(circuit) + circ_params, circ_embedding_fn = embedding( + conv_circ.abstract.block, self.config._use_gate_params + ) + params = circ_params + if observable is not None: + observable = observable if isinstance(observable, list) else [observable] + conv_obs = [] + obs_embedding_fn_list = [] + + for obs in observable: + obs = check_observable(obs) + c_obs = self.observable(obs, max(circuit.n_qubits, obs.n_qubits)) + obs_params, obs_embedding_fn = embedding( + c_obs.abstract, self.config._use_gate_params + ) + params.update(obs_params) + obs_embedding_fn_list.append(obs_embedding_fn) + conv_obs.append(c_obs) + + def embedding_fn_dict(a: dict, b: dict) -> dict: + embedding_dict = circ_embedding_fn(a, b) + for o in obs_embedding_fn_list: + embedding_dict.update(o(a, b)) + return embedding_dict + + return Converted(conv_circ, conv_obs, embedding_fn_dict, params) + + def embedding_fn(a: dict, b: dict) -> dict: + return circ_embedding_fn(a, b) + + return Converted(conv_circ, None, embedding_fn, params) + + @abstractmethod + def sample( + self, + circuit: ConvertedCircuit, + param_values: dict[str, Tensor] = {}, + n_shots: int = 1000, + state: Tensor | None = None, + endianness: Endianness = Endianness.BIG, + ) -> list[Counter]: + """Sample bit strings. + + Arguments: + circuit: A converted circuit as returned by `backend.circuit`. + param_values: _**Already embedded**_ parameters of the circuit. See + [`embedding`][qadence.blocks.embedding.embedding] for more info. + n_shots: Number of shots to sample. + state: Initial state. + endianness: Endianness of the resulting bitstrings. + """ + raise NotImplementedError + + @abstractmethod + def run( + self, + circuit: ConvertedCircuit, + param_values: dict[str, Tensor] = {}, + state: Tensor | None = None, + endianness: Endianness = Endianness.BIG, + ) -> Tensor: + """Run a circuit and return the resulting wave function. + + Arguments: + circuit: A converted circuit as returned by `backend.circuit`. + param_values: _**Already embedded**_ parameters of the circuit. See + [`embedding`][qadence.blocks.embedding.embedding] for more info. + state: Initial state. + endianness: Endianness of the resulting samples. + + Returns: + A list of Counter objects where each key represents a bitstring + and its value the number of times it has been sampled from the given wave function. + """ + raise NotImplementedError + + @abstractmethod + def expectation( + self, + circuit: ConvertedCircuit, + observable: list[ConvertedObservable] | ConvertedObservable, + param_values: dict[str, Tensor] = {}, + state: Tensor | None = None, + protocol: Measurements | None = None, + endianness: Endianness = Endianness.BIG, + ) -> Tensor: + """Compute the expectation value of the `circuit` with the given `observable`. + + Arguments: + circuit: A converted circuit as returned by `backend.circuit`. + param_values: _**Already embedded**_ parameters of the circuit. See + [`embedding`][qadence.blocks.embedding.embedding] for more info. + state: Initial state. + endianness: Endianness of the resulting bitstrings. + """ + raise NotImplementedError + + @abstractmethod + def assign_parameters(self, circuit: ConvertedCircuit, param_values: dict[str, Tensor]) -> Any: + raise NotImplementedError + + @staticmethod + @abstractmethod + def _overlap(bras: Tensor, kets: Tensor) -> Tensor: + raise NotImplementedError + + @staticmethod + @abstractmethod + def default_configuration() -> BackendConfiguration: + raise NotImplementedError + + def default_diffmode(self) -> DiffMode: + if self.supports_ad: + return DiffMode.AD + else: + return DiffMode.GPSR + + +class ConvertedCircuit(Module): + def __init__(self, native: Any, abstract: QuantumCircuit, original: QuantumCircuit): + super().__init__() + self.native = native + self.abstract = abstract + self.original = original + + +class ConvertedObservable(Module): + def __init__(self, native: Any, abstract: AbstractBlock, original: AbstractBlock): + super().__init__() + self.native = native + self.abstract = abstract + self.original = original + + +@dataclass(frozen=True) +class Converted: + circuit: ConvertedCircuit + observable: list[ConvertedObservable] | ConvertedObservable | None + embedding_fn: Callable + params: dict[str, Tensor] + + def __iter__(self) -> Iterator: + yield self.circuit + yield self.observable + yield self.embedding_fn + yield self.params diff --git a/qadence/backends/__init__.py b/qadence/backends/__init__.py new file mode 100644 index 000000000..4d3ad8c77 --- /dev/null +++ b/qadence/backends/__init__.py @@ -0,0 +1,8 @@ +# flake8: noqa F401 +from __future__ import annotations + +from .api import backend_factory, config_factory +from .pytorch_wrapper import DifferentiableBackend, DiffMode + +# Modules to be automatically added to the qadence namespace +__all__ = ["backend_factory", "config_factory", "DifferentiableBackend", "DiffMode"] diff --git a/qadence/backends/api.py b/qadence/backends/api.py new file mode 100644 index 000000000..06993c26c --- /dev/null +++ b/qadence/backends/api.py @@ -0,0 +1,62 @@ +from __future__ import annotations + +from qadence.backend import Backend, BackendConfiguration +from qadence.backends.pytorch_wrapper import DifferentiableBackend, DiffMode +from qadence.extensions import available_backends, set_backend_config +from qadence.types import BackendName + +__all__ = ["backend_factory", "config_factory"] + + +def backend_factory( + backend: BackendName | str, + diff_mode: DiffMode | str | None = None, + configuration: BackendConfiguration | dict | None = None, +) -> Backend | DifferentiableBackend: + backend_inst: Backend | DifferentiableBackend + backend_name = BackendName(backend) + backends = available_backends() + + try: + BackendCls = backends[backend_name] + except (KeyError, ValueError): + raise NotImplementedError(f"The requested backend '{backend_name}' is not implemented.") + + default_config = BackendCls.default_configuration() + if configuration is None: + configuration = default_config + elif isinstance(configuration, dict): + configuration = config_factory(backend_name, configuration) + else: + # NOTE: types have to match exactly, hence we use `type` + if not isinstance(configuration, type(BackendCls.default_configuration())): + raise ValueError( + f"Given config class '{type(configuration)}' does not match the backend", + f" class: '{BackendCls}'. Expected: '{type(BackendCls.default_configuration())}.'", + ) + + # Create the backend + backend_inst = BackendCls( + config=configuration + if configuration is not None + else BackendCls.default_configuration() # type: ignore[attr-defined] + ) + + # Set backend configurations which depend on the differentiation mode + set_backend_config(backend_inst, diff_mode) + + if diff_mode is not None: + backend_inst = DifferentiableBackend(backend_inst, DiffMode(diff_mode)) + return backend_inst + + +def config_factory(name: BackendName | str, config: dict) -> BackendConfiguration: + backends = available_backends() + + try: + BackendCls = backends[BackendName(name)] + except KeyError: + raise NotImplementedError(f"The requested backend '{name}' is not implemented!") + + BackendConfigCls = type(BackendCls.default_configuration()) + return BackendConfigCls(**config) # type: ignore[no-any-return] diff --git a/qadence/backends/backends/__init__.py b/qadence/backends/backends/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/qadence/backends/braket/__init__.py b/qadence/backends/braket/__init__.py new file mode 100644 index 000000000..714abe652 --- /dev/null +++ b/qadence/backends/braket/__init__.py @@ -0,0 +1,4 @@ +from __future__ import annotations + +from .backend import Backend, Configuration +from .convert_ops import supported_gates diff --git a/qadence/backends/braket/backend.py b/qadence/backends/braket/backend.py new file mode 100644 index 000000000..5c7aa8687 --- /dev/null +++ b/qadence/backends/braket/backend.py @@ -0,0 +1,200 @@ +from __future__ import annotations + +from collections import Counter +from dataclasses import dataclass +from typing import Any + +import numpy as np +import torch +from braket.circuits import Circuit as BraketCircuit +from braket.devices import LocalSimulator +from torch import Tensor + +from qadence.backend import Backend as BackendInterface +from qadence.backend import BackendName, ConvertedCircuit, ConvertedObservable +from qadence.backends.utils import to_list_of_dicts +from qadence.blocks import AbstractBlock, block_to_tensor +from qadence.circuit import QuantumCircuit +from qadence.measurements import Measurements +from qadence.overlap import overlap_exact +from qadence.utils import Endianness + +from .config import Configuration +from .convert_ops import convert_block + + +def promote_parameters(parameters: dict[str, Tensor | float]) -> dict[str, float]: + float_params = {} + for name, value in parameters.items(): + try: + v = value if isinstance(value, float) else value.item() + float_params[name] = v + except ValueError: + raise ValueError("Currently batching is not supported with Braket digital") + return float_params + + +@dataclass(frozen=True, eq=True) +class Backend(BackendInterface): + # set standard interface parameters + name: BackendName = BackendName.BRAKET + supports_ad: bool = False + support_bp: bool = False + is_remote: bool = False + with_measurements: bool = True + with_noise: bool = False + native_endianness: Endianness = Endianness.BIG + config: Configuration = Configuration() + + # braket specifics + # TODO: include it in the configuration? + _device: LocalSimulator = LocalSimulator() + + def __post_init__(self) -> None: + if self.is_remote: + raise NotImplementedError("Braket backend does not support cloud execution yet") + + def circuit(self, circ: QuantumCircuit) -> ConvertedCircuit: + from qadence.transpile import digitalize, fill_identities, transpile + + # make sure that we don't have empty wires. braket does not like it. + transpilations = [fill_identities, digitalize] + abstract_circ = transpile(*transpilations)(circ) # type: ignore[call-overload] + native = BraketCircuit(convert_block(abstract_circ.block)) + return ConvertedCircuit(native=native, abstract=abstract_circ, original=circ) + + def observable(self, obs: AbstractBlock, n_qubits: int = None) -> Any: + if n_qubits is None: + n_qubits = obs.n_qubits + native = block_to_tensor( + block=obs, + values={}, + qubit_support=tuple([i for i in range(n_qubits)]), + endianness=Endianness.BIG, + ).squeeze(0) + return ConvertedObservable(native=native, abstract=obs, original=obs) + + def run( + self, + circuit: ConvertedCircuit, + param_values: dict[str, Tensor] = {}, + state: Tensor | None = None, + endianness: Endianness = Endianness.BIG, + ) -> Tensor: + """ + Execute the circuit and return a wavefunction in form of a statevector. + + Arguments: + circuit: The circuit that is executed. + param_values: Parameters of the circuit (after calling the embedding + function on the user-facing parameters). + state: Initial state. + endianness: The endianness of the wave function. + """ + + if state is not None: + raise NotImplementedError + + if self.is_remote: + # handle here, or different backends? + raise NotImplementedError + + # loop over all values in the batch + results = [] + for vals in to_list_of_dicts(param_values): + final_circuit = self.assign_parameters(circuit, vals) + + final_circuit.state_vector() # set simulation type + task = self._device.run(final_circuit, 0) + results.append(task.result().values[0]) + states = torch.tensor(np.array(results)) + + n_qubits = circuit.abstract.n_qubits + if endianness != self.native_endianness and n_qubits > 1: + from qadence.transpile import invert_endianness + + states = invert_endianness(states) + return states + + def sample( + self, + circuit: ConvertedCircuit, + param_values: dict[str, Tensor] = {}, + n_shots: int = 1, + state: Tensor | None = None, + endianness: Endianness = Endianness.BIG, + ) -> list[Counter]: + """Execute the circuit and return samples of the resulting wavefunction.""" + if state is not None: + raise NotImplementedError("Braket cannot handle a custom initial state.") + + if n_shots < 1: + raise ValueError("You can only call sample with n_shots>0.") + + if self.is_remote: + # handle here, or different backends? + raise NotImplementedError + + # loop over all values in the batch + samples = [] + for vals in to_list_of_dicts(param_values): + final_circuit = self.assign_parameters(circuit, vals) + task = self._device.run(final_circuit, n_shots) + samples.append(task.result().measurement_counts) + if endianness != self.native_endianness: + from qadence.transpile import invert_endianness + + samples = invert_endianness(samples) + return samples + + def expectation( + self, + circuit: ConvertedCircuit, + observable: list[ConvertedObservable] | ConvertedObservable, + param_values: dict[str, Tensor] = {}, + state: Tensor | None = None, + protocol: Measurements | None = None, + endianness: Endianness = Endianness.BIG, + ) -> Tensor: + # Do not flip endianness here because then we would have to reverse the observable + wfs = self.run(circuit, param_values, state=state, endianness=Endianness.BIG) + + # TODO: Handle batching + res = [] + observable = observable if isinstance(observable, list) else [observable] + for wf in wfs: + res.append([torch.vdot(wf, obs.native @ wf).real for obs in observable]) + + return torch.tensor(res) + + def assign_parameters( + self, circuit: ConvertedCircuit, param_values: dict[str, Tensor | float] + ) -> BraketCircuit: + """Assign numerical values to the circuit parameters""" + if param_values is None: + return circuit.native() + + params_copy = param_values.copy() + pnames = [p.name for p in circuit.native.parameters] + + # account for fixed parameters + for name in param_values.keys(): + if name not in pnames: + params_copy.pop(name) + + # make sure that all the parameters are single floats + # otherwise it won't be accepted by Braket + native_params = promote_parameters(params_copy) + + # assign the parameters to the circuit + assigned_circuit = circuit.native(**native_params) + + return assigned_circuit + + @staticmethod + def _overlap(bras: Tensor, kets: Tensor) -> Tensor: + return overlap_exact(bras, kets) + + @staticmethod + def default_configuration() -> Configuration: + return Configuration() diff --git a/qadence/backends/braket/config.py b/qadence/backends/braket/config.py new file mode 100644 index 000000000..9f83f8cd1 --- /dev/null +++ b/qadence/backends/braket/config.py @@ -0,0 +1,15 @@ +from __future__ import annotations + +from dataclasses import dataclass, field + +from qadence.backend import BackendConfiguration + + +@dataclass +class Configuration(BackendConfiguration): + # FIXME: currently not used + # credentials for connecting to the cloud + # and executing on real QPUs + cloud_credentials: dict = field(default_factory=dict) + # Braket requires gate-level parameters + use_gate_params = True diff --git a/qadence/backends/braket/convert_ops.py b/qadence/backends/braket/convert_ops.py new file mode 100644 index 000000000..33087494a --- /dev/null +++ b/qadence/backends/braket/convert_ops.py @@ -0,0 +1,89 @@ +from __future__ import annotations + +from itertools import chain as flatten +from typing import Callable, Dict, List + +from braket.circuits.gates import CZ, CNot, CPhaseShift, H, I, Rx, Ry, Rz, S, Swap, T, X, Y, Z +from braket.circuits.instruction import Instruction +from braket.parametric import FreeParameter + +from qadence.blocks import AbstractBlock, CompositeBlock, PrimitiveBlock +from qadence.errors import NotSupportedError +from qadence.operations import OpName +from qadence.parameters import evaluate + +single_qubit: Dict[str, Callable] = { + OpName.I: I.i, + OpName.H: H.h, + OpName.X: X.x, + OpName.Y: Y.y, + OpName.Z: Z.z, + OpName.S: S.s, + OpName.T: T.t, +} +single_qubit_parameterized: Dict[str, Callable] = { + OpName.RX: Rx.rx, + OpName.RY: Ry.ry, + OpName.RZ: Rz.rz, +} +two_qubit: Dict[str, Callable] = {OpName.CNOT: CNot.cnot, OpName.SWAP: Swap.swap, OpName.CZ: CZ.cz} +two_qubit_parametrized: Dict[str, Callable] = { + OpName.CPHASE: CPhaseShift.cphaseshift, +} + +ops_map = { + **single_qubit, + **single_qubit_parameterized, + **two_qubit, + **two_qubit_parametrized, +} + +supported_gates = list(ops_map.keys()) + + +def BraketOperation(block: PrimitiveBlock) -> Instruction: + operation = block.name + + if operation in single_qubit: + return single_qubit[operation](target=block.qubit_support) + + elif operation in single_qubit_parameterized: + ((uuid, expr),) = block.parameters.items() # type: ignore [attr-defined] + if expr.is_number: + return single_qubit_parameterized[operation]( + target=block.qubit_support, angle=evaluate(expr) # type: ignore + ) + else: + return single_qubit_parameterized[operation]( + target=block.qubit_support, + angle=FreeParameter(uuid), # type: ignore + ) + + elif operation in two_qubit: + return two_qubit[operation](block.qubit_support[0], block.qubit_support[1]) + + elif operation in two_qubit_parametrized: + (expr,) = block.parameters.expressions() # type: ignore [attr-defined] + angle_value = evaluate(expr) + return two_qubit_parametrized[operation]( + control=block.qubit_support[0], + target=block.qubit_support[1], + angle=angle_value, + ) + + else: + raise NotSupportedError( + "Operation type {} is not supported for Braket backend.".format(type(block)) + ) + + +def convert_block(block: AbstractBlock) -> List[Instruction]: + if isinstance(block, PrimitiveBlock): + ops = [BraketOperation(block=block)] + elif isinstance(block, CompositeBlock): + ops = list(flatten(convert_block(b) for b in block.blocks)) + else: + raise NotSupportedError( + "Operation type {} is not supported for Braket backend.".format(type(block)) + ) + return ops diff --git a/qadence/backends/gpsr.py b/qadence/backends/gpsr.py new file mode 100644 index 000000000..6029c963a --- /dev/null +++ b/qadence/backends/gpsr.py @@ -0,0 +1,129 @@ +from __future__ import annotations + +from functools import partial +from typing import Callable + +import torch +from torch import Tensor + +from qadence.utils import _round_complex + + +def general_psr(spectrum: Tensor, n_eqs: int | None = None, shift_prefac: float = 0.5) -> Callable: + diffs = _round_complex(spectrum - spectrum.reshape(-1, 1)) + sorted_unique_spectral_gaps = torch.unique(torch.abs(torch.tril(diffs))) + + # We have to filter out zeros + sorted_unique_spectral_gaps = sorted_unique_spectral_gaps[sorted_unique_spectral_gaps > 0] + n_eqs = len(sorted_unique_spectral_gaps) + sorted_unique_spectral_gaps = torch.tensor(list(sorted_unique_spectral_gaps)) + + if n_eqs == 1: + return partial(single_gap_psr, spectral_gap=sorted_unique_spectral_gaps.item()) + else: + return partial( + multi_gap_psr, + spectral_gaps=sorted_unique_spectral_gaps, + shift_prefac=shift_prefac, + ) + + +def single_gap_psr( + expectation_fn: Callable[[dict[str, Tensor]], Tensor], + param_dict: dict[str, Tensor], + param_name: str, + spectral_gap: Tensor = torch.tensor([2], dtype=torch.get_default_dtype()), + shift: Tensor = torch.tensor([torch.pi / 2], dtype=torch.get_default_dtype()), +) -> Tensor: + """Implements single qubit PSR rule. + + Args: + expectation_fn (Callable[[dict[str, Tensor]], Tensor]): backend-dependent function + to calculate expectation value + + param_dict (dict[str, Tensor]): dict storing parameters of parameterized blocks + param_name (str): name of parameter with respect to that differentiation is performed + + Returns: + Tensor: tensor containing derivative values + """ + + # + pi/2 shift + shifted_params = param_dict.copy() + shifted_params[param_name] = shifted_params[param_name] + shift + f_plus = expectation_fn(shifted_params) + + # - pi/2 shift + shifted_params = param_dict.copy() + shifted_params[param_name] = shifted_params[param_name] - shift + f_min = expectation_fn(shifted_params) + + return spectral_gap * (f_plus - f_min) / (4 * torch.sin(spectral_gap * shift / 2)) + + +def multi_gap_psr( + expectation_fn: Callable[[dict[str, Tensor]], Tensor], + param_dict: dict[str, Tensor], + param_name: str, + spectral_gaps: Tensor, + shift_prefac: float = 0.5, +) -> Tensor: + """Implements multi-gap multi-qubit GPSR rule. + + Args: + expectation_fn (Callable[[dict[str, Tensor]], Tensor]): backend-dependent function + to calculate expectation value + + param_dict (dict[str, Tensor]): dict storing parameters values of parameterized blocks + param_name (str): name of parameter with respect to that differentiation is performed + spectral_gaps (Tensor): tensor containing spectral gap values + shift_prefac (float): prefactor governing the magnitude of parameter shift values - + select smaller value if spectral gaps are large + + Returns: + Tensor: tensor containing derivative values + """ + n_eqs = len(spectral_gaps) + batch_size = max(t.size(0) for t in param_dict.values()) + + # get shift values + shifts = shift_prefac * torch.linspace( + torch.pi / 2 - torch.pi / 5, torch.pi / 2 + torch.pi / 5, n_eqs + ) + + # calculate F vector and M matrix + # (see: https://arxiv.org/pdf/2108.01218.pdf on p. 4 for definitions) + F = [] + M = torch.empty((n_eqs, n_eqs)) + n_obs = 1 + for i in range(n_eqs): + # + shift + shifted_params = param_dict.copy() + shifted_params[param_name] = shifted_params[param_name] + shifts[i] + f_plus = expectation_fn(shifted_params) + + # - shift + shifted_params = param_dict.copy() + shifted_params[param_name] = shifted_params[param_name] - shifts[i] + f_minus = expectation_fn(shifted_params) + + F.append((f_plus - f_minus)) + + # calculate M matrix + for j in range(n_eqs): + M[i, j] = 4 * torch.sin(shifts[i] * spectral_gaps[j] / 2) + + # get number of observables from expectation value tensor + if f_plus.numel() > 1: + n_obs = F[0].shape[1] + + # reshape F vector + F = torch.stack(F).reshape(n_eqs, -1) + + # calculate R vector + R = torch.linalg.solve(M, F) + + # calculate df/dx + dfdx = torch.sum(spectral_gaps[:, None] * R, dim=0).reshape(batch_size, n_obs) + + return dfdx diff --git a/qadence/backends/pulser/__init__.py b/qadence/backends/pulser/__init__.py new file mode 100644 index 000000000..f4faa0356 --- /dev/null +++ b/qadence/backends/pulser/__init__.py @@ -0,0 +1,5 @@ +from __future__ import annotations + +from .backend import Backend, Configuration +from .devices import Device +from .pulses import supported_gates diff --git a/qadence/backends/pulser/backend.py b/qadence/backends/pulser/backend.py new file mode 100644 index 000000000..af94c6c26 --- /dev/null +++ b/qadence/backends/pulser/backend.py @@ -0,0 +1,242 @@ +from __future__ import annotations + +from collections import Counter +from dataclasses import dataclass +from typing import Any + +import numpy as np +import qutip +import torch +from pulser import Register as PulserRegister +from pulser import Sequence +from pulser.pulse import Pulse +from pulser_simulation.simresults import SimulationResults +from pulser_simulation.simulation import QutipEmulator +from torch import Tensor + +from qadence.backend import Backend as BackendInterface +from qadence.backend import BackendName, ConvertedCircuit, ConvertedObservable +from qadence.backends.utils import to_list_of_dicts +from qadence.blocks import AbstractBlock +from qadence.circuit import QuantumCircuit +from qadence.measurements import Measurements +from qadence.overlap import overlap_exact +from qadence.register import Register +from qadence.utils import Endianness + +from .channels import GLOBAL_CHANNEL, LOCAL_CHANNEL +from .config import Configuration +from .convert_ops import convert_observable +from .devices import Device, IdealDevice, RealisticDevice +from .pulses import add_pulses + +WEAK_COUPLING_CONST = 1.2 + +DEFAULT_SPACING = 8.0 # µm (standard value) + + +def create_register(register: Register, spacing: float = DEFAULT_SPACING) -> PulserRegister: + """Create Pulser register instance. + + Args: + register (Register): graph representing a register with accompanying coordinate data + spacing (float): distance between qubits in micrometers + + Returns: + Register: Pulser register + """ + + # create register from coordinates + coords = np.array(list(register.coords.values())) + return PulserRegister.from_coordinates(coords * spacing) + + +def make_sequence(circ: QuantumCircuit, config: Configuration) -> Sequence: + if config.device_type == Device.IDEALIZED: + device = IdealDevice + elif config.device_type == Device.REALISTIC: + device = RealisticDevice + else: + raise ValueError("Specified device is not supported.") + + max_amp = device.channels["rydberg_global"].max_amp + min_duration = device.channels["rydberg_global"].min_duration + + if config.spacing is not None: + spacing = config.spacing + elif max_amp is not None: + # Ideal spacing for entanglement gate + spacing = WEAK_COUPLING_CONST * device.rydberg_blockade_radius(max_amp) # type: ignore + else: + spacing = DEFAULT_SPACING + + pulser_register = create_register(circ.register, spacing) + + sequence = Sequence(pulser_register, device) + sequence.declare_channel(GLOBAL_CHANNEL, "rydberg_global") + sequence.declare_channel(LOCAL_CHANNEL, "rydberg_local", initial_target=0) + + # add a minimum duration pulse omega=0 pulse at the beginning for simulation convergence reasons + # since Pulser's QutipEmulator doesn't allow simulation of sequences with total duration < 4ns + zero_pulse = Pulse.ConstantPulse( + duration=max(sequence.device.channels["rydberg_global"].min_duration, 4), + amplitude=0.0, + detuning=0.0, + phase=0.0, + ) + sequence.add(zero_pulse, GLOBAL_CHANNEL, "wait-for-all") + + add_pulses(sequence, circ.block, config, circ.register, spacing) + sequence.measure() + + return sequence + + +# TODO: make it parallelized +# TODO: add execution on the cloud platform +def simulate_sequence( + sequence: Sequence, config: Configuration, state: Tensor +) -> SimulationResults: + simulation = QutipEmulator.from_sequence( + sequence, + sampling_rate=config.sampling_rate, + config=config.sim_config, + with_modulation=config.with_modulation, + ) + if state is not None: + simulation.set_initial_state(qutip.Qobj(state.cpu().numpy())) + + return simulation.run(nsteps=config.n_steps_solv, method=config.method_solv) + + +@dataclass(frozen=True, eq=True) +class Backend(BackendInterface): + """The Pulser backend""" + + name: BackendName = BackendName.PULSER + supports_ad: bool = False + support_bp: bool = False + is_remote: bool = False + with_measurements: bool = True + with_noise: bool = False + native_endianness: Endianness = Endianness.BIG + config: Configuration = Configuration() + + def circuit(self, circ: QuantumCircuit) -> Sequence: + native = make_sequence(circ, self.config) + + return ConvertedCircuit(native=native, abstract=circ, original=circ) + + def observable(self, observable: AbstractBlock, n_qubits: int = None) -> Tensor: + from qadence.transpile import flatten, scale_primitive_blocks_only, transpile + + # make sure only leaves, i.e. primitive blocks are scaled + block = transpile(flatten, scale_primitive_blocks_only)(observable) + + (native,) = convert_observable(block, n_qubits=n_qubits, config=self.config) + return ConvertedObservable(native=native, abstract=block, original=observable) + + def assign_parameters( + self, + circuit: ConvertedCircuit, + param_values: dict[str, Tensor], + ) -> Any: + if param_values == {} and circuit.native.is_parametrized(): + missing = list(circuit.native.declared_variables.keys()) + raise ValueError(f"Please, provide values for the following parameters: {missing}") + + if param_values == {}: + return circuit.native + + numpy_param_values = { + k: v.detach().cpu().numpy() + for (k, v) in param_values.items() + if k in circuit.native.declared_variables + } + + return circuit.native.build(**numpy_param_values) + + def run( + self, + circuit: ConvertedCircuit, + param_values: dict[str, Tensor] = {}, + state: Tensor | None = None, + endianness: Endianness = Endianness.BIG, + ) -> Tensor: + vals = to_list_of_dicts(param_values) + + batched_wf = np.zeros((len(vals), 2**circuit.abstract.n_qubits), dtype=np.complex128) + + for i, param_values_el in enumerate(vals): + sequence = self.assign_parameters(circuit, param_values_el) + sim_result = simulate_sequence(sequence, self.config, state) + wf = ( + sim_result.get_final_state(ignore_global_phase=False, normalize=True) + .full() + .flatten() + ) + + # We flip the wavefunction coming out of pulser, + # essentially changing logic 0 with logic 1 in the basis states. + batched_wf[i] = np.flip(wf) + + batched_wf_torch = torch.from_numpy(batched_wf) + + if endianness != self.native_endianness: + from qadence.transpile import invert_endianness + + batched_wf_torch = invert_endianness(batched_wf_torch) + + return batched_wf_torch + + def sample( + self, + circuit: ConvertedCircuit, + param_values: dict[str, Tensor] = {}, + n_shots: int = 1, + state: Tensor | None = None, + endianness: Endianness = Endianness.BIG, + ) -> list[Counter]: + if n_shots < 1: + raise ValueError("You can only call sample with n_shots>0.") + + vals = to_list_of_dicts(param_values) + + samples = [] + for param_values_el in vals: + sequence = self.assign_parameters(circuit, param_values_el) + sim_result = simulate_sequence(sequence, self.config, state) + sample = sim_result.sample_final_state(n_shots) + samples.append(sample) + if endianness != self.native_endianness: + from qadence.transpile import invert_endianness + + samples = invert_endianness(samples) + return samples + + def expectation( + self, + circuit: ConvertedCircuit, + observable: list[ConvertedObservable] | ConvertedObservable, + param_values: dict[str, Tensor] = {}, + state: Tensor | None = None, + protocol: Measurements | None = None, + endianness: Endianness = Endianness.BIG, + ) -> Tensor: + state = self.run(circuit, param_values=param_values, state=state, endianness=endianness) + + observables = observable if isinstance(observable, list) else [observable] + support = sorted(list(circuit.abstract.register.support)) + res_list = [obs.native(state, param_values, qubit_support=support) for obs in observables] + + res = torch.transpose(torch.stack(res_list), 0, 1) + res = res if len(res.shape) > 0 else res.reshape(1) + return res.real + + @staticmethod + def _overlap(bras: Tensor, kets: Tensor) -> Tensor: + return overlap_exact(bras, kets) + + @staticmethod + def default_configuration() -> Configuration: + return Configuration() diff --git a/qadence/backends/pulser/channels.py b/qadence/backends/pulser/channels.py new file mode 100644 index 000000000..8d576e42f --- /dev/null +++ b/qadence/backends/pulser/channels.py @@ -0,0 +1,16 @@ +from __future__ import annotations + +from dataclasses import dataclass + +from pulser.channels.channels import Rydberg + +GLOBAL_CHANNEL = "Global" +LOCAL_CHANNEL = "Local" + + +@dataclass(frozen=True) +class CustomRydberg(Rydberg): + name: str = "Rydberg" + + duration_steps: int = 1 # ns + amplitude_steps: float = 0.01 # rad/µs diff --git a/qadence/backends/pulser/config.py b/qadence/backends/pulser/config.py new file mode 100644 index 000000000..d247f9125 --- /dev/null +++ b/qadence/backends/pulser/config.py @@ -0,0 +1,54 @@ +from __future__ import annotations + +from dataclasses import dataclass +from typing import Optional + +from pulser_simulation.simconfig import SimConfig + +from qadence.backend import BackendConfiguration +from qadence.blocks.analog import Interaction + +from .devices import Device + + +@dataclass +class Configuration(BackendConfiguration): + # device type + device_type: Device = Device.IDEALIZED + + # atomic spacing + spacing: Optional[float] = None + + # sampling rate to be used for local simulations + sampling_rate: float = 1.0 + + # solver method to pass to the Qutip solver + method_solv: str = "adams" + + # number of solver steps to pass to the Qutip solver + n_steps_solv: float = 1e8 + + # simulation configuration with optional noise options + sim_config: Optional[SimConfig] = None + + # add modulation to the local execution + with_modulation: bool = False + + # Use gate-level parameters + use_gate_params = True + + # pulse amplitude on local channel + amplitude_local: Optional[float] = None + + # pulse amplitude on global channel + amplitude_global: Optional[float] = None + + # detuning value + detuning: Optional[float] = None + + # interaction type + interaction: Interaction = Interaction.NN + + def __post_init__(self) -> None: + if self.sim_config is not None and not isinstance(self.sim_config, SimConfig): + raise TypeError("Wrong 'sim_config' attribute type, pass a valid SimConfig object!") diff --git a/qadence/backends/pulser/convert_ops.py b/qadence/backends/pulser/convert_ops.py new file mode 100644 index 000000000..1b254e795 --- /dev/null +++ b/qadence/backends/pulser/convert_ops.py @@ -0,0 +1,42 @@ +from __future__ import annotations + +from typing import Sequence + +import torch +from torch.nn import Module + +from qadence.blocks import ( + AbstractBlock, +) +from qadence.blocks.block_to_tensor import ( + block_to_tensor, +) +from qadence.utils import Endianness + +from .config import Configuration + + +def convert_observable( + block: AbstractBlock, n_qubits: int | None, config: Configuration = None +) -> Sequence[Module]: + return [PulserObservable(block, n_qubits)] + + +class PulserObservable(Module): + def __init__(self, block: AbstractBlock, n_qubits: int | None): + super().__init__() + self.block = block + self.n_qubits = n_qubits + + def forward( + self, + state: torch.Tensor, + values: dict[str, torch.Tensor] | list = {}, + qubit_support: tuple | None = None, + endianness: Endianness = Endianness.BIG, + ) -> torch.Tensor: + # FIXME: cache this, it is very inefficient for non-parametric observables + block_mat = block_to_tensor( + self.block, values, qubit_support=qubit_support, endianness=endianness # type: ignore [arg-type] # noqa + ).squeeze(0) + return torch.sum(torch.matmul(state, block_mat) * state.conj(), dim=1) diff --git a/qadence/backends/pulser/devices.py b/qadence/backends/pulser/devices.py new file mode 100644 index 000000000..7e08b0eb3 --- /dev/null +++ b/qadence/backends/pulser/devices.py @@ -0,0 +1,77 @@ +from __future__ import annotations + +from numpy import pi +from pulser.channels.channels import Rydberg +from pulser.channels.eom import RydbergBeam, RydbergEOM +from pulser.devices._device_datacls import Device as PulserDevice +from pulser.devices._device_datacls import VirtualDevice + +from qadence.types import StrEnum + +# Idealized virtual device +IdealDevice = VirtualDevice( + name="IdealizedDevice", + dimensions=2, + rydberg_level=60, + max_atom_num=100, + max_radial_distance=100, + min_atom_distance=0, + channel_objects=( + Rydberg.Global(max_abs_detuning=2 * pi * 4, max_amp=2 * pi * 3), + Rydberg.Local(max_targets=1000, max_abs_detuning=2 * pi * 4, max_amp=2 * pi * 3), + ), +) + + +# device with realistic specs with local channels and custom bandwith. +RealisticDevice = PulserDevice( + name="RealisticDevice", + dimensions=2, + rydberg_level=60, + max_atom_num=100, + max_radial_distance=60, + min_atom_distance=5, + channel_objects=( + Rydberg.Global( + max_abs_detuning=2 * pi * 4, + max_amp=2 * pi * 1.5, + clock_period=4, + min_duration=16, + max_duration=4000, + mod_bandwidth=16, + eom_config=RydbergEOM( + limiting_beam=RydbergBeam.RED, + max_limiting_amp=40 * 2 * pi, + intermediate_detuning=700 * 2 * pi, + mod_bandwidth=24, + controlled_beams=(RydbergBeam.BLUE,), + ), + ), + Rydberg.Local( + max_targets=20, + max_abs_detuning=2 * pi * 4, + max_amp=2 * pi * 3, + clock_period=4, + min_duration=16, + max_duration=2**26, + mod_bandwidth=16, + eom_config=RydbergEOM( + limiting_beam=RydbergBeam.RED, + max_limiting_amp=40 * 2 * pi, + intermediate_detuning=700 * 2 * pi, + mod_bandwidth=24, + controlled_beams=(RydbergBeam.BLUE,), + ), + ), + ), +) + + +class Device(StrEnum): + """Supported types of devices for Pulser backend""" + + IDEALIZED = IdealDevice + "idealized device, least realistic" + + REALISTIC = RealisticDevice + "device with realistic specs" diff --git a/qadence/backends/pulser/pulses.py b/qadence/backends/pulser/pulses.py new file mode 100644 index 000000000..877346bcc --- /dev/null +++ b/qadence/backends/pulser/pulses.py @@ -0,0 +1,216 @@ +from __future__ import annotations + +from functools import partial +from typing import Union + +import numpy as np +from pulser.channels.base_channel import Channel +from pulser.parametrized.variable import Variable, VariableItem +from pulser.pulse import Pulse +from pulser.sequence.sequence import Sequence +from pulser.waveforms import CompositeWaveform, ConstantWaveform, RampWaveform + +from qadence import Register +from qadence.blocks import AbstractBlock, CompositeBlock +from qadence.blocks.analog import ( + AnalogBlock, + AnalogComposite, + ConstantAnalogRotation, + Interaction, + WaitBlock, +) +from qadence.operations import RX, RY, AnalogEntanglement, OpName +from qadence.parameters import evaluate + +from .channels import GLOBAL_CHANNEL, LOCAL_CHANNEL +from .config import Configuration +from .waveforms import SquareWaveform + +TVar = Union[Variable, VariableItem] + +supported_gates = [ + OpName.ZERO, + OpName.RX, + OpName.RY, + OpName.ANALOGENTANG, + OpName.ANALOGRX, + OpName.ANALOGRY, + OpName.ANALOGRZ, + OpName.ANALOGSWAP, + OpName.WAIT, +] + + +def add_pulses( + sequence: Sequence, + block: AbstractBlock, + config: Configuration, + qc_register: Register, + spacing: float, +) -> None: + # we need this because of the case with a single type of block in a KronBlock + # TODO: document properly + + n_qubits = len(sequence.register.qubits) + + # define qubit support + qubit_support = block.qubit_support + if not isinstance(qubit_support[0], int): + qubit_support = tuple(range(n_qubits)) + + if isinstance(block, AnalogBlock) and config.interaction != Interaction.NN: + raise ValueError(f"Pulser does not support other interactions than '{Interaction.NN}'") + + local_channel = sequence.device.channels["rydberg_local"] + global_channel = sequence.device.channels["rydberg_global"] + + rx = partial(digital_rot_pulse, channel=local_channel, phase=0, config=config) + ry = partial(digital_rot_pulse, channel=local_channel, phase=np.pi / 2, config=config) + + # TODO: lets move those to `@singledipatch`ed functions + if isinstance(block, WaitBlock): + # wait if its a global wait + if block.qubit_support.is_global: + (uuid, duration) = block.parameters.uuid_param("duration") + t = evaluate(duration) if duration.is_number else sequence.declare_variable(uuid) + pulse = Pulse.ConstantPulse(duration=t, amplitude=0, detuning=0, phase=0) + sequence.add(pulse, GLOBAL_CHANNEL, "wait-for-all") + + # do nothing if its a non-global wait, because that means we are doing a rotation + # on other qubits + else: + support = set(block.qubit_support) + if not support.issubset(sequence.register.qubits): + raise ValueError("Trying to wait on qubits outside of support.") + + elif isinstance(block, ConstantAnalogRotation): + ps = block.parameters + (a_uuid, alpha) = ps.uuid_param("alpha") + (w_uuid, omega) = ps.uuid_param("omega") + (p_uuid, phase) = ps.uuid_param("phase") + (d_uuid, detuning) = ps.uuid_param("delta") + + a = evaluate(alpha) if alpha.is_number else sequence.declare_variable(a_uuid) + w = evaluate(omega) if omega.is_number else sequence.declare_variable(w_uuid) + p = evaluate(phase) if phase.is_number else sequence.declare_variable(p_uuid) + d = evaluate(detuning) if detuning.is_number else sequence.declare_variable(d_uuid) + + # calculate generator eigenvalues + block.eigenvalues_generator = block.compute_eigenvalues_generator( + qc_register, block, spacing + ) + + if block.qubit_support.is_global: + pulse = analog_rot_pulse(a, w, p, d, global_channel, config) + sequence.add(pulse, GLOBAL_CHANNEL, protocol="wait-for-all") + else: + pulse = analog_rot_pulse(a, w, p, d, local_channel, config) + sequence.target(qubit_support, LOCAL_CHANNEL) + sequence.add(pulse, LOCAL_CHANNEL, protocol="wait-for-all") + + elif isinstance(block, AnalogEntanglement): + (uuid, duration) = block.parameters.uuid_param("duration") + t = evaluate(duration) if duration.is_number else sequence.declare_variable(uuid) + sequence.add( + entangle_pulse(t, global_channel, config), GLOBAL_CHANNEL, protocol="wait-for-all" + ) + + elif isinstance(block, (RX, RY)): + (uuid, p) = block.parameters.uuid_param("parameter") + angle = evaluate(p) if p.is_number else sequence.declare_variable(uuid) + pulse = rx(angle) if isinstance(block, RX) else ry(angle) + sequence.target(qubit_support, LOCAL_CHANNEL) + sequence.add(pulse, LOCAL_CHANNEL, protocol="wait-for-all") + + elif isinstance(block, CompositeBlock) or isinstance(block, AnalogComposite): + for block in block.blocks: + add_pulses(sequence, block, config, qc_register, spacing) + + else: + msg = f"The pulser backend currently does not support blocks of type: {type(block)}" + raise NotImplementedError(msg) + + +def analog_rot_pulse( + alpha: TVar | float, + omega: TVar | float, + phase: TVar | float, + detuning: TVar | float, + channel: Channel, + config: Configuration | None = None, +) -> Pulse: + # omega in rad/us; detuning in rad/us + if config is not None: + if channel.addressing == "Global": + max_amp = config.amplitude_global if config.amplitude_global is not None else omega + elif channel.addressing == "Local": + max_amp = config.amplitude_local if config.amplitude_local is not None else omega + max_det = config.detuning if config.detuning is not None else detuning + else: + max_amp = omega + max_det = detuning + + # get pulse duration in ns + duration = 1000 * abs(alpha) / np.sqrt(omega**2 + detuning**2) + + # create amplitude waveform + amp_wf = SquareWaveform.from_duration( + duration=duration, # type: ignore + max_amp=max_amp, # type: ignore[arg-type] + duration_steps=channel.clock_period, # type: ignore[attr-defined] + min_duration=channel.min_duration, + ) + + # create detuning waveform + det_wf = SquareWaveform.from_duration( + duration=duration, # type: ignore + max_amp=max_det, # type: ignore[arg-type] + duration_steps=channel.clock_period, # type: ignore[attr-defined] + min_duration=channel.min_duration, + ) + + return Pulse(amplitude=amp_wf, detuning=det_wf, phase=abs(phase)) + + +def entangle_pulse( + duration: TVar | float, channel: Channel, config: Configuration | None = None +) -> Pulse: + if config is None: + max_amp = channel.max_amp + else: + max_amp = ( + config.amplitude_global if config.amplitude_global is not None else channel.max_amp + ) + + clock = channel.clock_period + delay_wf = ConstantWaveform(clock * np.ceil(duration / clock), 0) # type: ignore + half_pi_wf = SquareWaveform.from_area( + area=np.pi / 2, + max_amp=max_amp, # type: ignore[arg-type] + duration_steps=clock, # type: ignore[attr-defined] + min_duration=channel.min_duration, + ) + + detuning_wf = RampWaveform(duration=half_pi_wf.duration, start=0, stop=np.pi) + amplitude = CompositeWaveform(half_pi_wf, delay_wf) + detuning = CompositeWaveform(detuning_wf, delay_wf) + return Pulse(amplitude=amplitude, detuning=detuning, phase=np.pi / 2) + + +def digital_rot_pulse( + angle: TVar | float, phase: float, channel: Channel, config: Configuration | None = None +) -> Pulse: + if config is None: + max_amp = channel.max_amp + else: + max_amp = config.amplitude_local if config.amplitude_local is not None else channel.max_amp + + # TODO: Implement reverse rotation for angles bigger than π + amplitude_wf = SquareWaveform.from_area( + area=abs(angle), # type: ignore + max_amp=max_amp, # type: ignore[arg-type] + duration_steps=channel.clock_period, # type: ignore[attr-defined] + min_duration=channel.min_duration, + ) + + return Pulse.ConstantDetuning(amplitude=amplitude_wf, detuning=0, phase=phase) diff --git a/qadence/backends/pulser/waveforms.py b/qadence/backends/pulser/waveforms.py new file mode 100644 index 000000000..aac0a6538 --- /dev/null +++ b/qadence/backends/pulser/waveforms.py @@ -0,0 +1,78 @@ +from __future__ import annotations + +import numpy as np +from pulser.parametrized.decorators import parametrize +from pulser.waveforms import ConstantWaveform + +# determined by hardware team as a safe resolution +MAX_AMPLITUDE_SCALING = 0.1 +EPS = 1e-9 + + +class SquareWaveform(ConstantWaveform): + def __init__(self, duration: int, value: float): + super().__init__(duration, value) + + @classmethod + @parametrize + def from_area( + cls, + area: float, + max_amp: float, + duration_steps: int = 1, + min_duration: int = 1, + ) -> SquareWaveform: + amp_steps = MAX_AMPLITUDE_SCALING * max_amp + + duration = max( + duration_steps * np.round(area / (duration_steps * max_amp) * 1e3), + min_duration, + ) + amplitude = min( + amp_steps * np.ceil(area / (amp_steps * duration) * 1e3), + max_amp, + ) + delta = np.abs(1e-3 * duration * amplitude - area) + + new_duration = duration + duration_steps + new_amplitude = max( + amp_steps * np.ceil(area / (amp_steps * new_duration) * 1e3), + max_amp, + ) + new_delta = np.abs(1e-3 * new_duration * new_amplitude - area) + + while new_delta < delta: + duration = new_duration + amplitude = new_amplitude + delta = new_delta + + new_duration = duration + duration_steps + new_amplitude = max( + amp_steps * np.ceil(area / (amp_steps * new_duration) * 1e3), + max_amp, + ) + new_delta = np.abs(1e-3 * new_duration * new_amplitude - area) + + return cls(duration, amplitude) + + @classmethod + @parametrize + def from_duration( + cls, + duration: int, + max_amp: float, + duration_steps: int = 1, + min_duration: int = 1, + ) -> SquareWaveform: + amp_steps = MAX_AMPLITUDE_SCALING * max_amp + + duration = max( + duration_steps * np.round(duration / duration_steps), + min_duration, + ) + amplitude = min( + amp_steps * np.ceil(max_amp / (amp_steps + EPS) * 1e3), + max_amp, + ) + + return cls(duration, amplitude) diff --git a/qadence/backends/pyqtorch/__init__.py b/qadence/backends/pyqtorch/__init__.py new file mode 100644 index 000000000..59cb7d4dd --- /dev/null +++ b/qadence/backends/pyqtorch/__init__.py @@ -0,0 +1,5 @@ +from __future__ import annotations + +from .backend import Backend +from .config import Configuration +from .convert_ops import supported_gates diff --git a/qadence/backends/pyqtorch/backend.py b/qadence/backends/pyqtorch/backend.py new file mode 100644 index 000000000..e2d8100bc --- /dev/null +++ b/qadence/backends/pyqtorch/backend.py @@ -0,0 +1,239 @@ +from __future__ import annotations + +from collections import Counter +from dataclasses import dataclass +from math import prod +from typing import Any + +import pyqtorch.modules as pyq +import torch +from torch import Tensor + +from qadence.backend import Backend as BackendInterface +from qadence.backend import BackendName, ConvertedCircuit, ConvertedObservable +from qadence.backends.utils import to_list_of_dicts +from qadence.blocks import AbstractBlock +from qadence.circuit import QuantumCircuit +from qadence.measurements import Measurements +from qadence.overlap import overlap_exact +from qadence.states import zero_state +from qadence.transpile import ( + add_interaction, + blockfn_to_circfn, + chain_single_qubit_ops, + flatten, + scale_primitive_blocks_only, + transpile, +) +from qadence.utils import Endianness, int_to_basis + +from .config import Configuration +from .convert_ops import convert_block, convert_observable + + +@dataclass(frozen=True, eq=True) +class Backend(BackendInterface): + """PyQTorch backend.""" + + # set standard interface parameters + name: BackendName = BackendName.PYQTORCH + supports_ad: bool = True + support_bp: bool = True + is_remote: bool = False + with_measurements: bool = True + with_noise: bool = False + native_endianness: Endianness = Endianness.BIG + config: Configuration = Configuration() + + def circuit(self, circuit: QuantumCircuit) -> ConvertedCircuit: + transpilations = [ + lambda circ: add_interaction(circ, interaction=self.config.interaction), + lambda circ: blockfn_to_circfn(chain_single_qubit_ops)(circ) + if self.config.use_single_qubit_composition + else blockfn_to_circfn(flatten)(circ), + blockfn_to_circfn(scale_primitive_blocks_only), + ] + + abstract = transpile(*transpilations)(circuit) # type: ignore[call-overload] + ops = convert_block(abstract.block, n_qubits=circuit.n_qubits, config=self.config) + native = pyq.QuantumCircuit(abstract.n_qubits, ops) + return ConvertedCircuit(native=native, abstract=abstract, original=circuit) + + def observable(self, observable: AbstractBlock, n_qubits: int) -> ConvertedObservable: + # make sure only leaves, i.e. primitive blocks are scaled + transpilations = [ + lambda block: chain_single_qubit_ops(block) + if self.config.use_single_qubit_composition + else flatten(block), + scale_primitive_blocks_only, + ] + block = transpile(*transpilations)(observable) # type: ignore[call-overload] + + (native,) = convert_observable(block, n_qubits=n_qubits, config=self.config) + return ConvertedObservable(native=native, abstract=block, original=observable) + + def run( + self, + circuit: ConvertedCircuit, + param_values: dict[str, Tensor] = {}, + state: Tensor | None = None, + endianness: Endianness = Endianness.BIG, + pyqify_state: bool = True, + unpyqify_state: bool = True, + ) -> Tensor: + n_qubits = circuit.abstract.n_qubits + + if state is not None: + if pyqify_state: + if (state.ndim != 2) or (state.size(1) != 2**n_qubits): + raise ValueError( + "The initial state must be composed of tensors of size " + f"(batch_size, 2**n_qubits). Found: {state.size() = }." + ) + + # PyQ expects a column vector for the initial state + # where each element is of dim=2. + state = state.T.reshape([2] * n_qubits + [state.size(0)]) + else: + if prod(state.size()[:-1]) != 2**n_qubits: + raise ValueError( + "A pyqified initial state must be composed of tensors of size " + f"(2, 2, ..., batch_size). Found: {state.size() = }." + ) + else: + # infer batch_size without state + if len(param_values) == 0: + batch_size = 1 + else: + batch_size = max([len(tensor) for tensor in param_values.values()]) + state = circuit.native.init_state(batch_size=batch_size) + state = circuit.native(state, param_values) + + # make sure that the batch dimension is the first one, as standard + # for PyTorch, and not the last one as done in PyQ + if unpyqify_state: + state = torch.flatten(state, start_dim=0, end_dim=-2).t() + + if endianness != self.native_endianness: + from qadence.transpile import invert_endianness + + state = invert_endianness(state) + return state + + def _batched_expectation( + self, + circuit: ConvertedCircuit, + observable: list[ConvertedObservable] | ConvertedObservable, + param_values: dict[str, Tensor] = {}, + state: Tensor | None = None, + protocol: Measurements | None = None, + endianness: Endianness = Endianness.BIG, + ) -> Tensor: + state = self.run( + circuit, + param_values=param_values, + state=state, + endianness=endianness, + pyqify_state=True, + # we are calling the native observable directly, so we want to use pyq shapes + unpyqify_state=False, + ) + observable = observable if isinstance(observable, list) else [observable] + _expectation = torch.hstack( + [obs.native(state, param_values).reshape(-1, 1) for obs in observable] + ) + return _expectation + + def _looped_expectation( + self, + circuit: ConvertedCircuit, + observable: list[ConvertedObservable] | ConvertedObservable, + param_values: dict[str, Tensor] = {}, + state: Tensor | None = None, + protocol: Measurements | None = None, + endianness: Endianness = Endianness.BIG, + ) -> Tensor: + state = zero_state(circuit.abstract.n_qubits, batch_size=1) if state is None else state + if state.size(0) != 1: + raise ValueError( + "Looping expectation does not make sense with batched initial state. " + "Define your initial state with `batch_size=1`" + ) + + list_expvals = [] + observables = observable if isinstance(observable, list) else [observable] + for vals in to_list_of_dicts(param_values): + wf = self.run(circuit, vals, state, endianness, pyqify_state=True, unpyqify_state=False) + exs = torch.cat([obs.native(wf, vals) for obs in observables], 0) + list_expvals.append(exs) + + batch_expvals = torch.vstack(list_expvals) + return batch_expvals if len(batch_expvals.shape) > 0 else batch_expvals.reshape(1) + + def expectation( + self, + circuit: ConvertedCircuit, + observable: list[ConvertedObservable] | ConvertedObservable, + param_values: dict[str, Tensor] = {}, + state: Tensor | None = None, + protocol: Measurements | None = None, + endianness: Endianness = Endianness.BIG, + ) -> Tensor: + fn = self._looped_expectation if self.config.loop_expectation else self._batched_expectation + return fn( + circuit=circuit, + observable=observable, + param_values=param_values, + state=state, + protocol=protocol, + endianness=endianness, + ) + + def sample( + self, + circuit: ConvertedCircuit, + param_values: dict[str, Tensor] = {}, + n_shots: int = 1, + state: Tensor | None = None, + endianness: Endianness = Endianness.BIG, + ) -> list[Counter]: + if n_shots < 1: + raise ValueError("You can only call sample with n_shots>0.") + + def _sample(_probs: Tensor, n_shots: int, endianness: Endianness, n_qubits: int) -> Counter: + return Counter( + { + int_to_basis(k=k, n_qubits=n_qubits, endianness=endianness): count.item() + for k, count in enumerate( + torch.bincount( + torch.multinomial(input=_probs, num_samples=n_shots, replacement=True) + ) + ) + if count > 0 + } + ) + + wf = self.run(circuit=circuit, param_values=param_values, state=state) + probs = torch.abs(torch.pow(wf, 2)) + return list( + map( + lambda _probs: _sample( + _probs=_probs, + n_shots=n_shots, + endianness=endianness, + n_qubits=circuit.abstract.n_qubits, + ), + probs, + ) + ) + + def assign_parameters(self, circuit: ConvertedCircuit, param_values: dict[str, Tensor]) -> Any: + raise NotImplementedError + + @staticmethod + def _overlap(bras: Tensor, kets: Tensor) -> Tensor: + return overlap_exact(bras, kets) + + @staticmethod + def default_configuration() -> Configuration: + return Configuration() diff --git a/qadence/backends/pyqtorch/config.py b/qadence/backends/pyqtorch/config.py new file mode 100644 index 000000000..df9e95a7d --- /dev/null +++ b/qadence/backends/pyqtorch/config.py @@ -0,0 +1,31 @@ +from __future__ import annotations + +from dataclasses import dataclass +from typing import Callable + +from qadence.backend import BackendConfiguration +from qadence.types import AlgoHEvo, Interaction + + +@dataclass +class Configuration(BackendConfiguration): + # FIXME: currently not used + # determine which kind of Hamiltonian evolution + # algorithm to use + algo_hevo: AlgoHEvo = AlgoHEvo.EXP + + # number of steps for the Hamiltonian evolution + n_steps_hevo: int = 100 + + use_gradient_checkpointing: bool = False + """Use gradient checkpointing. Recommended for higher-order optimization tasks.""" + + use_single_qubit_composition: bool = False + """Composes chains of single qubit gates into a single matmul if possible.""" + + interaction: Callable | Interaction | str = Interaction.NN + """Digital-analog emulation interaction that is used for `AnalogBlock`s.""" + + loop_expectation: bool = False + """When computing batches of expectation values, only allocate one wavefunction and loop over + the batch of parameters to only allocate a single wavefunction at any given time.""" diff --git a/qadence/backends/pyqtorch/convert_ops.py b/qadence/backends/pyqtorch/convert_ops.py new file mode 100644 index 000000000..437f557f7 --- /dev/null +++ b/qadence/backends/pyqtorch/convert_ops.py @@ -0,0 +1,435 @@ +from __future__ import annotations + +from functools import reduce +from itertools import chain as flatten +from operator import add +from typing import Callable, Sequence + +import pyqtorch.modules as pyq +import sympy +import torch +from pyqtorch.core.utils import _apply_batch_gate +from torch.nn import Module +from torch.utils.checkpoint import checkpoint + +from qadence.blocks import ( + AbstractBlock, + AddBlock, + ChainBlock, + CompositeBlock, + MatrixBlock, + ParametricBlock, + PrimitiveBlock, + ScaleBlock, + TimeEvolutionBlock, +) +from qadence.blocks.block_to_tensor import ( + _block_to_tensor_embedded, + block_to_diagonal, + block_to_tensor, +) +from qadence.operations import OpName, U + +from .config import Configuration + +# Tdagger is not supported currently +supported_gates = list(set(OpName.list()) - set([OpName.TDAGGER])) +"""The set of supported gates. Tdagger is currently not supported.""" + + +def is_single_qubit_chain(block: AbstractBlock) -> bool: + return ( + isinstance(block, (ChainBlock)) + and block.n_supports == 1 + and all([isinstance(b, (ParametricBlock, PrimitiveBlock)) for b in block]) + and not any([isinstance(b, (ScaleBlock, U)) for b in block]) + ) + + +def convert_observable( + block: AbstractBlock, n_qubits: int, config: Configuration = None +) -> Sequence[Module]: + return [PyQObservable(block, n_qubits, config)] + + +def convert_block( + block: AbstractBlock, n_qubits: int = None, config: Configuration = None +) -> Sequence[Module]: + if n_qubits is None: + n_qubits = max(block.qubit_support) + 1 + + if config is None: + config = Configuration() + + if isinstance(block, ScaleBlock): + return [ScalePyQOperation(n_qubits, block, config)] + + elif isinstance(block, AddBlock): + ops = list(flatten(*(convert_block(b, n_qubits, config) for b in block.blocks))) + return [AddPyQOperation(block.qubit_support, n_qubits, ops, config)] + + elif isinstance(block, ParametricBlock): + if isinstance(block, TimeEvolutionBlock): + op = HEvoPyQOperation( + qubits=block.qubit_support, + n_qubits=n_qubits, + # TODO: use the hevo_algo configuration here to switch between different algorithms + # for executing the Hamiltonian evolution + operation=pyq.HamiltonianEvolution( + block.qubit_support, + n_qubits, + n_steps=config.n_steps_hevo, + ), + block=block, + config=config, + ) + else: + op = ParametricPyQOperation(n_qubits, block, config) + return [op] + elif isinstance(block, MatrixBlock): + return [PyQMatrixBlock(block, n_qubits, config)] + elif isinstance(block, PrimitiveBlock): + return [PyQOperation(n_qubits, block)] + elif isinstance(block, CompositeBlock): + ops = list(flatten(*(convert_block(b, n_qubits, config) for b in block.blocks))) + if is_single_qubit_chain(block) and config.use_single_qubit_composition: + return [PyQComposedBlock(ops, block.qubit_support, n_qubits, config)] + else: + # NOTE: without wrapping in a pyq.QuantumCircuit here the kron/chain + # blocks won't be properly nested which leads to incorrect results from + # the `AddBlock`s. For example: + # add(chain(Z(0), Z(1))) has to result in the following (pseudo-code) + # AddPyQOperation(pyq.QuantumCircuit(Z, Z)) + # as opposed to + # AddPyQOperation(Z, Z) + # which would be wrong. + return [pyq.QuantumCircuit(n_qubits, ops)] + + else: + msg = ( + f"Non supported operation of type {type(block)}. " + "In case you are trying to run an `AnalogBlock`, try converting it " + "with `add_interaction` first." + ) + raise NotImplementedError(msg) + + +class PyQOperation(Module): + def __init__(self, n_qubits: int, block: AbstractBlock): + super().__init__() + name = block.name[1:] if block.name.startswith("MC") else block.name + Op = getattr(pyq, name) + self.operation = Op(block.qubit_support, n_qubits) + + # primitive blocks do not require any parameter value, hence the + # second empty argument added here + def forward(self, state: torch.Tensor, _: dict[str, torch.Tensor] = None) -> torch.Tensor: + return self.apply(self.matrices(), state) + + def matrices(self, _: dict[str, torch.Tensor] = None) -> torch.Tensor: + return self.operation.matrix + + def apply(self, matrices: torch.Tensor, state: torch.Tensor) -> torch.Tensor: + return self.operation.apply(matrices, state) + + +class ParametricPyQOperation(Module): + def __init__(self, n_qubits: int, block: ParametricBlock, config: Configuration): + super().__init__() + name = block.name[1:] if block.name.startswith("MC") else block.name + Op = getattr(pyq, name) + self.operation = Op(block.qubit_support, n_qubits) + self.param_names = config.get_param_name(block) + num_params = len(self.param_names) + if num_params == 1: + + def _fwd(state: torch.Tensor, values: dict[str, torch.Tensor]) -> torch.Tensor: + return self.apply(self.matrices(values), state) + + else: + + def _fwd(state: torch.Tensor, values: dict[str, torch.Tensor]) -> torch.Tensor: + op_params = {key: values[key] for key in self.param_names} + max_batch_size = max(p.size() for p in values.values()) + new_values = { + k: (v if v.size() == max_batch_size else v.repeat(max_batch_size, 1, 1)) + for k, v in op_params.items() + } + return self.apply(self.matrices(new_values), state) + + if config.use_gradient_checkpointing: + + def _forward(state: torch.Tensor, values: dict[str, torch.Tensor]) -> torch.Tensor: + return checkpoint(_fwd, state, values, use_reentrant=False) + + else: + + def _forward(state: torch.Tensor, values: dict[str, torch.Tensor]) -> torch.Tensor: + return _fwd(state, values) + + self._forward = _forward + + def matrices(self, values: dict[str, torch.Tensor]) -> torch.Tensor: + thetas = torch.vstack([values[name] for name in self.param_names]) + return self.operation.matrices(thetas) + + def apply(self, matrices: torch.Tensor, state: torch.Tensor) -> torch.Tensor: + return self.operation.apply(matrices, state) + + def forward(self, state: torch.Tensor, values: dict[str, torch.Tensor]) -> torch.Tensor: + return self._forward(state, values) + + +class PyQMatrixBlock(Module): + def __init__(self, block: MatrixBlock, n_qubits: int, config: Configuration = None): + super().__init__() + self.n_qubits = n_qubits + self.qubits = block.qubit_support + self.register_buffer("mat", block.matrix.unsqueeze(2)) + + def forward(self, state: torch.Tensor, _: dict[str, torch.Tensor] = None) -> torch.Tensor: + return self.apply(self.mat, state) + + def apply(self, matrices: torch.Tensor, state: torch.Tensor) -> torch.Tensor: + batch_size = state.size(-1) + return _apply_batch_gate(state, matrices, self.qubits, self.n_qubits, batch_size) + + +class PyQComposedBlock(Module): + def __init__( + self, + ops: list[Module], + qubits: list[int] | tuple, + n_qubits: int, + config: Configuration = None, + ): + """Compose a chain of single qubit operations on the same qubit into a single + call to _apply_batch_gate.""" + super().__init__() + self.operations = ops + self.qubits = qubits + self.n_qubits = n_qubits + + def forward( + self, state: torch.Tensor, values: dict[str, torch.Tensor] | None = None + ) -> torch.Tensor: + batch_size = state.size(-1) + return self.apply(self.matrices(values, batch_size), state) + + def apply(self, matrices: torch.Tensor, state: torch.Tensor) -> torch.Tensor: + batch_size = state.size(-1) + return _apply_batch_gate(state, matrices, self.qubits, self.n_qubits, batch_size) + + def matrices(self, values: dict[str, torch.Tensor] | None, batch_size: int) -> torch.Tensor: + perm = (2, 0, 1) # We permute the dims since torch.bmm expects the batch_dim at 0. + + def _expand_mat(m: torch.Tensor) -> torch.Tensor: + if len(m.size()) == 2: + m = m.unsqueeze(2).repeat( + 1, 1, batch_size + ) # Primitive gates are 2D, so we expand them. + elif m.shape != (2, 2, batch_size): + m = m.repeat(1, 1, batch_size) # In case a tensor is 3D doesnt have batch_size. + return torch.permute(m, perm) # This returns shape (batch_size, 2, 2) + + # We reverse the list of tensors here since matmul is not commutative. + return torch.permute( + reduce( + torch.bmm, (_expand_mat(op.matrices(values)) for op in reversed(self.operations)) + ), + tuple( + torch.argsort(torch.tensor(perm)) + ), # We need to undo the permute since PyQ expects (2, 2, batch_size). + ) + + +class PyQObservable(Module): + def __init__(self, block: AbstractBlock, n_qubits: int, config: Configuration = None): + super().__init__() + if config is None: + config = Configuration() + self.n_qubits = n_qubits + if block._is_diag_pauli and not block.is_parametric: + diag = block_to_diagonal(block, tuple(range(n_qubits))) + self.register_buffer("diag", diag) + + def sparse_operation( + state: torch.Tensor, values: dict[str, torch.Tensor] = None + ) -> torch.Tensor: + state = state.reshape(2**self.n_qubits, state.size(-1)) + return (diag * state.T).T + + self.operation = sparse_operation + else: + self.operation = pyq.QuantumCircuit( + n_qubits, + convert_block(block, n_qubits, config), + ) + + if config.use_gradient_checkpointing: + + def _forward( + state: torch.Tensor, values: dict[str, torch.Tensor] = None + ) -> torch.Tensor: + new_state = checkpoint(self.operation, state, values, use_reentrant=False) + return pyq.overlap(state, new_state) + + else: + + def _forward( + state: torch.Tensor, values: dict[str, torch.Tensor] = None + ) -> torch.Tensor: + return pyq.overlap(state, self.operation(state, values)) + + self._forward = _forward + + def forward(self, state: torch.Tensor, values: dict[str, torch.Tensor]) -> torch.Tensor: + return self._forward(state, values) + + +class HEvoPyQOperation(Module): + def __init__( + self, + qubits: Sequence, + n_qubits: int, + operation: Callable, + block: TimeEvolutionBlock, + config: Configuration, + ): + super().__init__() + self.qubits = qubits + self.n_qubits = n_qubits + self.operation = operation + self.param_names = config.get_param_name(block) + self._has_parametric_generator: bool + self.block = block + + if isinstance(block.generator, AbstractBlock) and not block.generator.is_parametric: + hmat = block_to_tensor( + block.generator, + qubit_support=tuple(self.qubits), + use_full_support=False, + ) + hmat = hmat.permute(1, 2, 0) + + def _fwd(state: torch.Tensor, values: dict[str, torch.Tensor]) -> torch.Tensor: + tevo = values[self.param_names[0]] + return self.operation(hmat, tevo, state) + + elif isinstance(block.generator, torch.Tensor): + m = block.generator.to(dtype=torch.cdouble) + hmat = block_to_tensor( + MatrixBlock(m, qubit_support=block.qubit_support), + qubit_support=tuple(self.qubits), + use_full_support=False, + ) + hmat = hmat.permute(1, 2, 0) + + def _fwd(state: torch.Tensor, values: dict[str, torch.Tensor]) -> torch.Tensor: + tevo = values[self.param_names[0]] + return self.operation(hmat, tevo, state) + + elif isinstance(block.generator, sympy.Basic): + + def _fwd(state: torch.Tensor, values: dict[str, torch.Tensor]) -> torch.Tensor: + tevo = values[self.param_names[0]] + hmat = values[self.param_names[1]] + hmat = hmat.squeeze(3) # FIXME: why is this necessary? + hmat = hmat.permute(1, 2, 0) + return self.operation(hmat, tevo, state) + + else: + + def _fwd(state: torch.Tensor, values: dict[str, torch.Tensor]) -> torch.Tensor: + hmat = _block_to_tensor_embedded( + block.generator, # type: ignore[arg-type] + values=values, + qubit_support=tuple(self.qubits), + use_full_support=False, + ) + hmat = hmat.permute(1, 2, 0) + tevo = values[self.param_names[0]] + return self.operation(hmat, tevo, state) + + if config.use_gradient_checkpointing: + + def _forward(state: torch.Tensor, values: dict[str, torch.Tensor]) -> torch.Tensor: + return checkpoint(_fwd, state, values, use_reentrant=False) + + else: + + def _forward(state: torch.Tensor, values: dict[str, torch.Tensor]) -> torch.Tensor: + return _fwd(state, values) + + self._forward = _forward + + def forward(self, state: torch.Tensor, values: dict[str, torch.Tensor]) -> torch.Tensor: + return self._forward(state, values) + + +class AddPyQOperation(Module): + def __init__( + self, qubits: Sequence, n_qubits: int, operations: list[Module], config: Configuration + ): + super().__init__() + self.operations = operations + + def _fwd(state: torch.Tensor, values: dict[str, torch.Tensor]) -> torch.Tensor: + return reduce(add, (op(state, values) for op in self.operations)) + + if config.use_gradient_checkpointing: + + def _forward(state: torch.Tensor, values: dict[str, torch.Tensor]) -> torch.Tensor: + return checkpoint(_fwd, state, values, use_reentrant=False) + + else: + + def _forward(state: torch.Tensor, values: dict[str, torch.Tensor]) -> torch.Tensor: + return _fwd(state, values) + + self._forward = _forward + + def forward(self, state: torch.Tensor, values: dict[str, torch.Tensor]) -> torch.Tensor: + return self._forward(state, values) + + +class ScalePyQOperation(Module): + """ + Computes: + + M = matrix(op, theta) + scale * matmul(M, state) + """ + + def __init__(self, n_qubits: int, block: ScaleBlock, config: Configuration): + super().__init__() + (self.param_name,) = config.get_param_name(block) + if not isinstance(block.block, PrimitiveBlock): + raise NotImplementedError( + "The pyqtorch backend can currently only scale `PrimitiveBlock` types.\ + Please use the following transpile function on your circuit first:\ + from qadence.transpile import scale_primitive_blocks_only" + ) + self.operation = convert_block(block.block, n_qubits, config)[0] + + def _fwd(state: torch.Tensor, values: dict[str, torch.Tensor]) -> torch.Tensor: + return values[self.param_name] * self.operation(state, values) + + if config.use_gradient_checkpointing: + + def _forward(state: torch.Tensor, values: dict[str, torch.Tensor]) -> torch.Tensor: + return checkpoint(_fwd, state, values, use_reentrant=False) + + else: + + def _forward(state: torch.Tensor, values: dict[str, torch.Tensor]) -> torch.Tensor: + return _fwd(state, values) + + self._forward = _forward + + def matrices(self, values: dict[str, torch.Tensor]) -> torch.Tensor: + thetas = values[self.param_name] + return (thetas * self.operation.matrices()).unsqueeze(2) + + def forward(self, state: torch.Tensor, values: dict[str, torch.Tensor]) -> torch.Tensor: + return self._forward(state, values) diff --git a/qadence/backends/pytorch_wrapper.py b/qadence/backends/pytorch_wrapper.py new file mode 100644 index 000000000..4c0fca561 --- /dev/null +++ b/qadence/backends/pytorch_wrapper.py @@ -0,0 +1,328 @@ +from __future__ import annotations + +from collections import Counter, OrderedDict +from dataclasses import dataclass +from functools import partial +from typing import Any, Callable, Sequence + +import torch +from torch import Tensor, nn +from torch.autograd import Function + +from qadence.backend import Backend as QuantumBackend +from qadence.backend import Converted, ConvertedCircuit, ConvertedObservable +from qadence.backends.utils import param_dict +from qadence.blocks import AbstractBlock, PrimitiveBlock +from qadence.blocks.utils import uuid_to_block, uuid_to_eigen +from qadence.circuit import QuantumCircuit +from qadence.extensions import get_gpsr_fns +from qadence.measurements import Measurements +from qadence.ml_tools import promote_to_tensor +from qadence.types import DiffMode, Endianness + + +class PSRExpectation(Function): + """Overloads the PyTorch AD system to perform parameter shift rule on quantum circuits.""" + + @staticmethod + def forward( + ctx: Any, + expectation_fn: Callable[[dict[str, Tensor]], Tensor], + param_psrs: Sequence[Callable], + param_keys: Sequence[str], + *param_values: Tensor, + ) -> Tensor: + for param in param_values: + param.detach() + + ctx.expectation_fn = expectation_fn + ctx.param_psrs = param_psrs + ctx.param_keys = param_keys + ctx.save_for_backward(*param_values) + + expectation_values = expectation_fn(param_values=param_dict(param_keys, param_values)) # type: ignore[call-arg] # noqa: E501 + # Stack batches of expectations if so. + if isinstance(expectation_values, list): + return torch.stack(expectation_values) + else: + return expectation_values + + @staticmethod + def backward(ctx: Any, grad_out: Tensor) -> tuple: + params = param_dict(ctx.param_keys, ctx.saved_tensors) + + def expectation_fn(params: dict[str, Tensor]) -> Tensor: + return PSRExpectation.apply( + ctx.expectation_fn, + ctx.param_psrs, + params.keys(), + *params.values(), + ) + + def vjp(psr: Callable, name: str) -> Tensor: + """ + !!! warn + Sums over gradients corresponding to different observables. + """ + return (grad_out * psr(expectation_fn, params, name)).sum(dim=1) + + grads = [ + vjp(psr, name) if needs_grad else None + for psr, name, needs_grad in zip( + ctx.param_psrs, ctx.param_keys, ctx.needs_input_grad[3:] + ) + ] + return (None, None, None, *grads) + + +@dataclass +class DifferentiableExpectation: + """A handler for differentiating expectation estimation using various engines.""" + + backend: QuantumBackend + circuit: ConvertedCircuit + observable: list[ConvertedObservable] | ConvertedObservable + param_values: dict[str, Tensor] + state: Tensor | None = None + protocol: Measurements | None = None + endianness: Endianness = Endianness.BIG + + def ad(self) -> Tensor: + self.observable = ( + self.observable if isinstance(self.observable, list) else [self.observable] + ) + if self.protocol: + expectation_fn = self.protocol.get_measurement_fn() + expectations = expectation_fn( + circuit=self.circuit.original, + observables=[obs.original for obs in self.observable], + param_values=self.param_values, + options=self.protocol.options, + state=self.state, + endianness=self.endianness, + ) + else: + expectations = self.backend.expectation( + circuit=self.circuit, + observable=self.observable, + param_values=self.param_values, + state=self.state, + endianness=self.endianness, + ) + return promote_to_tensor( + expectations if isinstance(expectations, Tensor) else torch.tensor(expectations) + ) + + def psr(self, psr_fn: Callable, **psr_args: int | float | None) -> Tensor: + # wrapper which unpacks the parameters + # as pytorch grads can only calculated w.r.t tensors + # so we unpack the params, feed in the names separately + # as apply doesnt take keyword arguments + # We also fold in the observable into the backend which makes + # life easier in the custom autodiff. + self.observable = ( + self.observable if isinstance(self.observable, list) else [self.observable] + ) + + if self.protocol is not None: + expectation_fn = partial( + self.protocol.get_measurement_fn(), + circuit=self.circuit.original, + observables=[obs.original for obs in self.observable], + options=self.protocol.options, + state=self.state, + endianness=self.endianness, + ) + else: + expectation_fn = partial( + self.backend.expectation, + circuit=self.circuit, + observable=self.observable, + state=self.state, + endianness=self.endianness, + ) + # PSR only applies to parametric circuits. + if isinstance(self.observable, ConvertedObservable): + self.observable = [self.observable] + param_to_psr = self.construct_rules( + self.circuit.abstract, [o.abstract for o in self.observable], psr_fn, **psr_args + ) + + # Select the subset of all parameters for which PSR apply + # which are from the circuit only. + self.param_values = {k: self.param_values[k] for k in param_to_psr.keys()} + + return PSRExpectation.apply(expectation_fn, param_to_psr.values(), self.param_values.keys(), *self.param_values.values()) # type: ignore # noqa: E501 + + # Make PSR construction a static method to avoid unhashability issues. + @staticmethod + def construct_rules( + circuit: QuantumCircuit, + observable: list[AbstractBlock], + psr_fn: Callable, + **psr_args: int | float | None, + ) -> dict[str, Callable]: + """Create a mapping between parameters and PSR functions.""" + + uuid_to_eigs = uuid_to_eigen(circuit.block) + # We currently rely on implicit ordering to match the PSR to the parameter, + # because we want to cache PSRs. + + param_to_psr = OrderedDict() + for param_id, eigenvalues in uuid_to_eigs.items(): + if eigenvalues is None: + raise ValueError( + f"Eigenvalues are not defined for param_id {param_id}\n" + # f"of type {type(block)}.\n" + "PSR cannot be defined in that case." + ) + + param_to_psr[param_id] = psr_fn(eigenvalues, **psr_args) + for obs in observable: + for param_id, _ in uuid_to_eigen(obs).items(): + # We need the embedded fixed params of the observable in the param_values dict + # to be able to call expectation. Since torch backward requires + # a list of param_ids and values of equal length, we need to pass them to PSR too. + # Since they are constants their gradients are 0. + param_to_psr[param_id] = lambda x: torch.tensor([0.0], requires_grad=False) + return param_to_psr + + +class DifferentiableBackend(nn.Module): + """A class to abstract the operations done by the autodiff engine + + Arguments: + backend: An instance of the QuantumBackend type perform execution. + diff_mode: A differentiable mode supported by the differentiation engine. + **psr_args: Arguments that will be passed on to `DifferentiableExpectation`. + """ + + def __init__( + self, + backend: QuantumBackend, + diff_mode: DiffMode = DiffMode.AD, + **psr_args: int | float | None, + ) -> None: + super().__init__() + + self.backend = backend + self.diff_mode = diff_mode + self.psr_args = psr_args + # TODO: Add differentiable overlap calculation + self._overlap: Callable = None # type: ignore [assignment] + + def run( + self, + circuit: ConvertedCircuit, + param_values: dict = {}, + state: Tensor | None = None, + endianness: Endianness = Endianness.BIG, + ) -> Tensor: + """Run on the underlying backend.""" + return self.backend.run( + circuit=circuit, param_values=param_values, state=state, endianness=endianness + ) + + def expectation( + self, + circuit: ConvertedCircuit, + observable: list[ConvertedObservable] | ConvertedObservable, + param_values: dict[str, Tensor] = {}, + state: Tensor | None = None, + protocol: Measurements | None = None, + endianness: Endianness = Endianness.BIG, + ) -> Tensor: + """Compute the expectation value of a given observable. + + Arguments: + circuit: A backend native quantum circuit to be executed. + observable: A backend native observable to compute the expectation value from. + param_values: A dict of values for symbolic substitution. + state: An initial state. + protocol: A shot-based measurement protocol. + endianness: Endianness of the state. + + Returns: + A tensor of expectation values. + """ + observable = observable if isinstance(observable, list) else [observable] + differentiable_expectation = DifferentiableExpectation( + backend=self.backend, + circuit=circuit, + observable=observable, + param_values=param_values, + state=state, + protocol=protocol, + endianness=endianness, + ) + + if self.diff_mode == DiffMode.AD: + expectation = differentiable_expectation.ad + else: + try: + fns = get_gpsr_fns() + psr_fn = fns[self.diff_mode] + except KeyError: + raise ValueError(f"{self.diff_mode} differentiation mode is not supported") + expectation = partial(differentiable_expectation.psr, psr_fn=psr_fn, **self.psr_args) + return expectation() + + def sample( + self, + circuit: ConvertedCircuit, + param_values: dict[str, Tensor], + state: Tensor | None = None, + n_shots: int = 1, + endianness: Endianness = Endianness.BIG, + ) -> list[Counter]: + """Sample bitstring from the registered circuit. + + Arguments: + circuit: A backend native quantum circuit to be executed. + param_values: The values of the parameters after embedding + n_shots: The number of shots. Defaults to 1. + + Returns: + An iterable with all the sampled bitstrings + """ + with torch.no_grad(): + return self.backend.sample( + circuit=circuit, + param_values=param_values, + state=state, + n_shots=n_shots, + endianness=endianness, + ) + + def circuit(self, circuit: QuantumCircuit) -> ConvertedCircuit: + parametrized_blocks = list(uuid_to_block(circuit.block).values()) + non_prim_blocks = filter(lambda b: not isinstance(b, PrimitiveBlock), parametrized_blocks) + if len(list(non_prim_blocks)) > 0: + raise ValueError( + "The circuit contains non-primitive blocks that are currently not supported by the " + "PSR differentiable mode." + ) + return self.backend.circuit(circuit) + + def observable(self, observable: AbstractBlock, n_qubits: int) -> ConvertedObservable: + if observable is not None and observable.is_parametric: + raise ValueError("PSR cannot be applied to a parametric observable.") + return self.backend.observable(observable, n_qubits) + + def convert( + self, + circuit: QuantumCircuit, + observable: list[AbstractBlock] | AbstractBlock | None = None, + ) -> Converted: + if self.diff_mode != DiffMode.AD and observable is not None: + if isinstance(observable, list): + for obs in observable: + if obs.is_parametric: + raise ValueError("PSR cannot be applied to a parametric observable.") + else: + if observable.is_parametric: + raise ValueError("PSR cannot be applied to a parametric observable.") + return self.backend.convert(circuit, observable) + + def assign_parameters(self, circuit: ConvertedCircuit, param_values: dict[str, Tensor]) -> Any: + return self.backend.assign_parameters(circuit, param_values) diff --git a/qadence/backends/utils.py b/qadence/backends/utils.py new file mode 100644 index 000000000..8275b7371 --- /dev/null +++ b/qadence/backends/utils.py @@ -0,0 +1,92 @@ +from __future__ import annotations + +from collections import Counter +from typing import Sequence + +import numpy as np +import torch +from torch import Tensor + +from qadence.utils import Endianness, int_to_basis + +# Dict of NumPy dtype -> torch dtype (when the correspondence exists) +numpy_to_torch_dtype_dict = { + np.bool_: torch.bool, + np.uint8: torch.uint8, + np.int8: torch.int8, + np.int16: torch.int16, + np.int32: torch.int32, + np.int64: torch.int64, + np.float16: torch.float16, + np.float32: torch.float32, + np.float64: torch.float64, + np.complex64: torch.complex64, + np.complex128: torch.complex128, + int: torch.int64, + float: torch.float64, + complex: torch.complex128, +} + + +def param_dict(keys: Sequence[str], values: Sequence[Tensor]) -> dict[str, Tensor]: + return {key: val for key, val in zip(keys, values)} + + +def numpy_to_tensor( + x: np.ndarray, + device: torch.device = torch.device("cpu"), + dtype: torch.dtype = torch.complex128, + requires_grad: bool = False, +) -> Tensor: + """This only copies the numpy array if device or dtype are different than the ones of x.""" + return torch.as_tensor(x, dtype=dtype, device=device).requires_grad_(requires_grad) + + +def promote_to_tensor( + x: Tensor | np.ndarray | float, + dtype: torch.dtype = torch.complex128, + requires_grad: bool = True, +) -> Tensor: + """Convert the given type inco a torch.Tensor""" + if isinstance(x, float): + return torch.tensor([[x]], dtype=dtype, requires_grad=requires_grad) + elif isinstance(x, np.ndarray): + return numpy_to_tensor( + x, dtype=numpy_to_torch_dtype_dict.get(x.dtype), requires_grad=requires_grad + ) + elif isinstance(x, Tensor): + return x.requires_grad_(requires_grad) + else: + raise ValueError(f"Don't know how to promote {type(x)} to Tensor") + + +# FIXME: Not being used, maybe remove in v1.0.0 +def count_bitstrings(sample: Tensor, endianness: Endianness = Endianness.BIG) -> Counter: + # Convert to a tensor of integers. + n_qubits = sample.size()[1] + base = torch.ones(n_qubits, dtype=torch.int64) * 2 + powers_of_2 = torch.pow(base, reversed(torch.arange(n_qubits))) + int_tensor = torch.matmul(sample, powers_of_2) + # Count occurences of integers. + count_int = torch.bincount(int_tensor) + # Return a Counter for non-empty bitstring counts. + return Counter( + { + int_to_basis(k=k, n_qubits=n_qubits, endianness=endianness): count.item() + for k, count in enumerate(count_int) + if count > 0 + } + ) + + +def to_list_of_dicts(param_values: dict[str, Tensor]) -> list[dict[str, float]]: + if not param_values: + return [param_values] + + max_batch_size = max(p.size()[0] for p in param_values.values()) + batched_values = { + k: (v if v.size()[0] == max_batch_size else v.repeat(max_batch_size, 1)) + for k, v in param_values.items() + } + + return [{k: v[i] for k, v in batched_values.items()} for i in range(max_batch_size)] diff --git a/qadence/blocks/__init__.py b/qadence/blocks/__init__.py new file mode 100644 index 000000000..b9546c5d1 --- /dev/null +++ b/qadence/blocks/__init__.py @@ -0,0 +1,37 @@ +# flake8: noqa +import warnings +from typing import Any + +from qadence.register import Register +from qadence.blocks.analog import AnalogBlock, Interaction + +from .abstract import AbstractBlock +from .primitive import ( + ParametricBlock, + PrimitiveBlock, + TimeEvolutionBlock, + ScaleBlock, + ParametricControlBlock, + ControlBlock, +) +from .composite import AddBlock, ChainBlock, CompositeBlock, KronBlock, PutBlock +from .matrix import MatrixBlock +from .manipulate import from_openfermion, to_openfermion +from .utils import ( + add, + chain, + kron, + tag, + put, + block_is_commuting_hamiltonian, + block_is_qubit_hamiltonian, + parameters, + primitive_blocks, + get_pauli_blocks, + has_duplicate_vparams, +) +from .block_to_tensor import block_to_tensor +from .embedding import embedding + +# Modules to be automatically added to the qadence namespace +__all__ = ["add", "chain", "kron", "tag", "block_to_tensor"] diff --git a/qadence/blocks/abstract.py b/qadence/blocks/abstract.py new file mode 100644 index 000000000..9aaaf7713 --- /dev/null +++ b/qadence/blocks/abstract.py @@ -0,0 +1,331 @@ +from __future__ import annotations + +import json +from abc import ABC, abstractmethod, abstractproperty +from dataclasses import dataclass +from functools import cached_property +from pathlib import Path +from typing import ClassVar, Iterable, Tuple, Union, get_args + +import sympy +import torch +from rich.console import Console, RenderableType +from rich.tree import Tree + +from qadence.parameters import Parameter +from qadence.types import TNumber + + +@dataclass(eq=False) # Avoid unhashability errors due to mutable attributes. +class AbstractBlock(ABC): + """Base class for both primitive and composite blocks + + Attributes: + name (str): A human-readable name attached to the block type. Notice, this is + the same for all the class instances so it cannot be used for identifying + different blocks + qubit_support (tuple[int, ...]): The qubit support of the block expressed as + a tuple of integers + tag (str | None): A tag identifying a particular instance of the block which can + be used for identification and pretty printing + eigenvalues (list[float] | None): The eigenvalues of the matrix representing the block. + This is used mainly for primitive blocks and it's needed for generalized parameter + shift rule computations. Currently unused. + """ + + name: ClassVar[str] = "AbstractBlock" + tag: str | None = None + + @abstractproperty + def qubit_support(self) -> Tuple[int, ...]: + """The indices of the qubit(s) the block is acting on. + Qadence uses the ordering [0..,N-1] for qubits.""" + pass + + @abstractproperty + def n_qubits(self) -> int: + """The number of qubits in the whole system. + A block acting on qubit N would has at least n_qubits >= N + 1.""" + pass + + @abstractproperty + def n_supports(self) -> int: + """The number of qubits the block is acting on.""" + pass + + @cached_property + @abstractproperty + def eigenvalues_generator(self) -> torch.Tensor: + pass + + @cached_property + def eigenvalues( + self, max_num_evals: int | None = None, max_num_gaps: int | None = None + ) -> torch.Tensor: + from qadence.utils import eigenvalues + + from .block_to_tensor import block_to_tensor + + return eigenvalues(block_to_tensor(self), max_num_evals, max_num_gaps) + + # make sure that __rmul__ works as expected with np.number + __array_priority__: int = 1000 + + @abstractmethod + def __eq__(self, other: object) -> bool: + pass + + def __mul__(self, other: Union[AbstractBlock, TNumber, Parameter]) -> AbstractBlock: + from qadence.blocks.primitive import ScaleBlock + from qadence.blocks.utils import chain + + # TODO: Improve type checking here + if isinstance(other, AbstractBlock): + return chain(self, other) + else: + if isinstance(self, ScaleBlock): + scale = self.parameters.parameter * other + return ScaleBlock(self.block, scale) + else: + scale = Parameter(other) + return ScaleBlock(self, scale) + + def __rmul__(self, other: AbstractBlock | TNumber | Parameter) -> AbstractBlock: + return self.__mul__(other) + + def __imul__(self, other: Union[AbstractBlock, TNumber, Parameter]) -> AbstractBlock: + from qadence.blocks.composite import ChainBlock + from qadence.blocks.primitive import ScaleBlock + from qadence.blocks.utils import chain + + if not isinstance(other, AbstractBlock): + raise TypeError("In-place multiplication is available only for AbstractBlock instances") + + # TODO: Improve type checking here + if isinstance(other, AbstractBlock): + return chain( + *self.blocks if isinstance(self, ChainBlock) else (self,), + *other.blocks if isinstance(other, ChainBlock) else (other,), + ) + else: + if isinstance(self, ScaleBlock): + p = self.parameters.parameter + return ScaleBlock(self.block, p * other) + else: + return ScaleBlock(self, other if isinstance(other, Parameter) else Parameter(other)) + + def __truediv__(self, other: Union[TNumber, sympy.Basic]) -> AbstractBlock: + if not isinstance(other, (get_args(TNumber), sympy.Basic)): + raise TypeError("Cannot divide block by another block.") + ix = 1 / other + return self * ix + + def __add__(self, other: AbstractBlock) -> AbstractBlock: + from qadence.blocks.utils import add + + if not isinstance(other, AbstractBlock): + raise TypeError(f"Can only add a block to another block. Got {type(other)}.") + return add(self, other) + + def __radd__(self, other: AbstractBlock) -> AbstractBlock: + from qadence.blocks.utils import add + + if isinstance(other, int) and other == 0: + return self + if not isinstance(other, AbstractBlock): + raise TypeError(f"Can only add a block to another block. Got {type(other)}.") + return add(other, self) + + def __iadd__(self, other: AbstractBlock) -> AbstractBlock: + from qadence.blocks.composite import AddBlock + from qadence.blocks.utils import add + + if not isinstance(other, AbstractBlock): + raise TypeError(f"Can only add a block to another block. Got {type(other)}.") + + # We make sure to unroll any AddBlocks, because for iadd we + # assume the user expected in-place addition + return add( + *self.blocks if isinstance(self, AddBlock) else (self,), + *other.blocks if isinstance(other, AddBlock) else (other,), + ) + + def __sub__(self, other: AbstractBlock) -> AbstractBlock: + from qadence.blocks.primitive import ScaleBlock + from qadence.blocks.utils import add + + if not isinstance(other, AbstractBlock): + raise TypeError(f"Can only subtract a block from another block. Got {type(other)}.") + if isinstance(other, ScaleBlock): + scale = other.parameters.parameter + b = ScaleBlock(other.block, -scale) + else: + b = ScaleBlock(other, Parameter(-1.0)) + return add(self, b) + + def __isub__(self, other: AbstractBlock) -> AbstractBlock: + from qadence.blocks.composite import AddBlock + from qadence.blocks.utils import add + + if not isinstance(other, AbstractBlock): + raise TypeError(f"Can only add a block to another block. Got {type(other)}.") + + # We make sure to unroll (and minus) any AddBlocks: for isub we assume the + # user expected in-place subtraction + return add( + *self.blocks if isinstance(self, AddBlock) else (self,), + *(-block for block in other.blocks) if isinstance(other, AddBlock) else (-other,), + ) + + def __pow__(self, power: int) -> AbstractBlock: + from qadence.blocks.utils import chain + + return chain(self for _ in range(power)) + + def __neg__(self) -> AbstractBlock: + return self.__mul__(-1.0) + + def __pos__(self) -> AbstractBlock: + return self + + def __matmul__(self, other: AbstractBlock) -> AbstractBlock: + from qadence.blocks.utils import kron + + if not isinstance(other, AbstractBlock): + raise TypeError(f"Can only kron a block to another block. Got {type(other)}.") + return kron(self, other) + + def __imatmul__(self, other: AbstractBlock) -> AbstractBlock: + from qadence.blocks.composite import KronBlock + from qadence.blocks.utils import kron + + if not isinstance(other, AbstractBlock): + raise TypeError(f"Can only kron a block with another block. Got {type(other)}.") + + # We make sure to unroll any KronBlocks, because for ixor we assume the user + # expected in-place kron + return kron( + *self.blocks if isinstance(self, KronBlock) else (self,), + *other.blocks if isinstance(other, KronBlock) else (other,), + ) + + def __iter__(self) -> Iterable: + yield self + + def __len__(self) -> int: + return 1 + + @property + def _block_title(self) -> str: + bits = ",".join(str(i) for i in self.qubit_support) + s = f"{type(self).__name__}({bits})" + + if self.tag is not None: + s += rf" \[tag: {self.tag}]" + return s + + def __rich_tree__(self, tree: Tree = None) -> Tree: + if tree is None: + return Tree(self._block_title) + else: + tree.add(self._block_title) + return tree + + def __repr__(self) -> str: + console = Console() + with console.capture() as cap: + console.print(self.__rich_tree__()) + return cap.get().strip() # type: ignore [no-any-return] + + @abstractproperty + def depth(self) -> int: + pass + + @abstractmethod + def __ascii__(self, console: Console) -> RenderableType: + pass + + @abstractmethod + def _to_dict(self) -> dict: + pass + + @classmethod + @abstractmethod + def _from_dict(cls, d: dict) -> AbstractBlock: + pass + + def _to_json(self) -> str: + return json.dumps(self._to_dict()) + + @classmethod + def _from_json(cls, path: str | Path) -> AbstractBlock: + d: dict = {} + if isinstance(path, str): + path = Path(path) + try: + with open(path, "r") as file: + d = json.load(file) + + except Exception as e: + print(f"Unable to load block due to {e}") + + return AbstractBlock._from_dict(d) + + def _to_file(self, path: str | Path = Path("")) -> None: + if isinstance(path, str): + path = Path(path) + try: + with open(path, "w") as file: + file.write(self._to_json()) + except Exception as e: + print(f"Unable to write {type(self)} to disk due to {e}") + + def __hash__(self) -> int: + return hash(self._to_json()) + + def dagger(self) -> AbstractBlock: + raise NotImplementedError( + f"Hermitian adjoint of the Block '{type(self)}' is not implemented yet!" + ) + + @property + def is_parametric(self) -> bool: + from qadence.blocks.utils import parameters + + params: list[sympy.Basic] = parameters(self) + return any(isinstance(p, Parameter) for p in params) + + def tensor(self, values: dict[str, TNumber | torch.Tensor] = {}) -> torch.Tensor: + from .block_to_tensor import block_to_tensor + + return block_to_tensor(self, values) + + @property + def _is_diag_pauli(self) -> bool: + from qadence.blocks import CompositeBlock, PrimitiveBlock, ScaleBlock + from qadence.blocks.utils import block_is_qubit_hamiltonian + + if not block_is_qubit_hamiltonian(self): + return False + + elif isinstance(self, CompositeBlock): + return all([b._is_diag_pauli for b in self.blocks]) + + elif isinstance(self, ScaleBlock): + return self.block._is_diag_pauli + elif isinstance(self, PrimitiveBlock): + return self.name in ["Z", "I"] + return False + + @property + def is_identity(self) -> bool: + """Identity predicate for blocks.""" + from qadence.blocks import CompositeBlock, PrimitiveBlock, ScaleBlock + + if isinstance(self, CompositeBlock): + return all([b.is_identity for b in self.blocks]) + elif isinstance(self, ScaleBlock): + return self.block.is_identity + elif isinstance(self, PrimitiveBlock): + return self.name == "I" + return False diff --git a/qadence/blocks/analog.py b/qadence/blocks/analog.py new file mode 100644 index 000000000..e65c5dafe --- /dev/null +++ b/qadence/blocks/analog.py @@ -0,0 +1,315 @@ +from __future__ import annotations + +from abc import abstractproperty +from dataclasses import dataclass +from typing import Tuple + +import numpy as np +import torch +from rich.console import Console, RenderableType +from rich.tree import Tree +from sympy import Basic + +from qadence.blocks.primitive import AbstractBlock +from qadence.parameters import Parameter, ParamMap, evaluate +from qadence.qubit_support import QubitSupport +from qadence.register import Register +from qadence.types import Interaction + + +@dataclass(eq=False, repr=False) +class AnalogBlock(AbstractBlock): + @abstractproperty # type: ignore[misc, override] + def qubit_support(self) -> QubitSupport: + pass + + @abstractproperty + def duration(self) -> Parameter: + pass + + def __ascii__(self, console: Console) -> RenderableType: + raise NotImplementedError + + def __eq__(self, other: object) -> bool: + raise NotImplementedError + + @classmethod + def _from_dict(cls, d: dict) -> AnalogBlock: + raise NotImplementedError + + def _to_dict(self) -> dict: + raise NotImplementedError + + @property + def depth(self) -> int: + raise NotImplementedError + + @property + def n_qubits(self) -> int: + if self.qubit_support.is_global: + raise ValueError("Cannot compute number of qubits of a block with global support.") + return max(self.qubit_support) + 1 # type: ignore[no-any-return] + + @property + def n_supports(self) -> int: + if self.qubit_support.is_global: + raise ValueError("Cannot compute number of qubits of a block with global support.") + return len(self.qubit_support) # type: ignore[no-any-return] + + @property + def eigenvalues_generator(self) -> torch.Tensor: + msg = ( + "Eigenvalues of analog blocks can be computed via " + "`add_interaction(register, block).eigenvalues`" + ) + raise NotImplementedError(msg) + + @property + def eigenvalues(self) -> torch.Tensor: + msg = ( + "Eigenvalues of analog blocks can be computed via " + "`add_interaction(register, block).eigenvalues`" + ) + raise NotImplementedError(msg) + + @property + def _block_title(self) -> str: + t = self.duration + q = self.qubit_support + s = f"{type(self).__name__}(t={evaluate(t)}, support={q})" + + if self.tag is not None: + s += rf" \[tag: {self.tag}]" + return s + + def compute_eigenvalues_generator( + self, register: Register, block: AbstractBlock, spacing: float + ) -> torch.Tensor: + from qadence import add_interaction + + return add_interaction(register, block, spacing=spacing).eigenvalues_generator + + +@dataclass(eq=False, repr=False) +class WaitBlock(AnalogBlock): + """ + Waits. In real interacting quantum devices, it means letting the system evolve freely according + to the time-dependent Schrodinger equation. With emulators, this block is translated to an + appropriate interaction Hamiltonian, for example, an Ising interation + + Hᵢₙₜ = ∑ᵢⱼ C₆/rᵢⱼ⁶ nᵢnⱼ + + or an XY-interaction + + Hᵢₙₜ = ∑ᵢⱼ C₃/rⱼⱼ³ (XᵢXⱼ + ZᵢZⱼ) + + with `nᵢ = (1-Zᵢ)/2`. + + To construct this block, use the [`wait`][qadence.operations.wait] function. + + Can be used with [`add_interaction`][qadence.transpile.emulate.add_interaction]. + """ + + _eigenvalues_generator: torch.Tensor | None = None + + parameters: ParamMap = ParamMap(duration=1000.0) # ns + qubit_support: QubitSupport = QubitSupport("global") + + @property + def eigenvalues_generator(self) -> torch.Tensor | None: + return self._eigenvalues_generator + + @eigenvalues_generator.setter + def eigenvalues_generator(self, value: torch.Tensor) -> None: + if not isinstance(value, torch.Tensor): + value = torch.tensor(value) + self._eigenvalues_generator = value + + @property + def duration(self) -> Basic: + return self.parameters.duration + + +@dataclass(eq=False, repr=False) +class ConstantAnalogRotation(AnalogBlock): + """Implements a constant analog rotation with interaction dictated by the chosen Hamiltonian + + H = ∑ᵢ(hΩ/2 sin(φ)*Xᵢ - cos(φ)*Yᵢ - hδnᵢ) + Hᵢₙₜ. + + To construct this block you can use of the following convenience wrappers: + - The general rotation operation [`AnalogRot`][qadence.operations.AnalogRot] + - Shorthands for rotatins around an axis: + [`AnalogRX`][qadence.operations.AnalogRX], + [`AnalogRY`][qadence.operations.AnalogRY], + [`AnalogRZ`][qadence.operations.AnalogRZ] + + Can be used with [`add_interaction`][qadence.transpile.emulate.add_interaction]. + WARNING: do not use `ConstantAnalogRotation` with `alpha` as differentiable parameter - use + the convenience wrappers mentioned above. + """ + + _eigenvalues_generator: torch.Tensor | None = None + + parameters: ParamMap = ParamMap( + alpha=0.0, # rad + duration=1000.0, # ns + omega=0.0, # rad/μs + delta=0.0, # rad/μs + phase=0.0, # rad + ) + qubit_support: QubitSupport = QubitSupport("global") + + @property + def _block_title(self) -> str: + a = self.parameters.alpha + t = self.parameters.duration + q = self.qubit_support + o = self.parameters.omega + d = self.parameters.delta + p = self.parameters.phase + s = f"{type(self).__name__}(α={a}, t={t}, support={q}, Ω={o}, δ={d}, φ={p})" + + if self.tag is not None: + s += rf" \[tag: {self.tag}]" + return s + + @property + def eigenvalues_generator(self) -> torch.Tensor: + if self._eigenvalues_generator is None: + raise ValueError( + "Set ConstantAnalogRotation eigenvalues with compute_eigenvalues_generator method." + ) + return self._eigenvalues_generator + + @eigenvalues_generator.setter + def eigenvalues_generator(self, value: torch.Tensor) -> None: + if not isinstance(value, torch.Tensor): + value = torch.tensor(value) + self._eigenvalues_generator = value + + @property + def duration(self) -> Basic: + return self.parameters.duration + + +#################################################################################################### + + +# new, more strict versions of chain/kron blocks to make sure there are no gaps in composed blocks + + +@dataclass(eq=False, repr=False, init=False) +class AnalogComposite(AnalogBlock): + blocks: Tuple[AnalogBlock, ...] = () + + def __init__(self, blocks: Tuple[AnalogBlock, ...]): + self.blocks = blocks + # FIXME: add additional Wait block if we have parameterized durations + + @property # type: ignore[misc, override] + def qubit_support(self) -> QubitSupport: + return sum([b.qubit_support for b in self.blocks], start=QubitSupport()) + + @abstractproperty + def duration(self) -> Parameter: + pass + + @property + def _block_title(self) -> str: + t = self.duration + q = self.qubit_support + s = f"{type(self).__name__}(t={t}, support={q})" + + if self.tag is not None: + s += rf" \[tag: {self.tag}]" + return s + + def __rich_tree__(self, tree: Tree = None) -> Tree: + if tree is None: + tree = Tree(self._block_title) + else: + tree = tree.add(self._block_title) + for block in self.blocks: + block.__rich_tree__(tree) + return tree + + +@dataclass(eq=False, repr=False, init=False) +class AnalogChain(AnalogComposite): + def __init__(self, blocks: Tuple[AnalogBlock, ...]): + """A chain of analog blocks. Needed because analog blocks require + stricter validation than the general `ChainBlock`. + + `AnalogChain`s can only be constructed from `AnalogKron` blocks or + _**globally supported**_, primitive, analog blocks (like `WaitBlock`s and + `ConstantAnalogRotation`s). + + Automatically constructed by the [`chain`][qadence.blocks.utils.chain] + function if only analog blocks are given. + + Example: + ```python exec="on" source="material-block" result="json" + from qadence import X, chain, wait + + b = chain(wait(200), wait(200)) + print(type(b)) # this is an `AnalogChain` + + b = chain(X(0), wait(200)) + print(type(b)) # this is a general `ChainBlock` + ``` + """ + for b in blocks: + if not (isinstance(b, AnalogKron) or b.qubit_support.is_global): + raise ValueError("Only KronBlocks or global blocks can be chain'ed.") + self.blocks = blocks + + @property + def duration(self) -> Parameter: + return Parameter(sum(evaluate(b.duration) for b in self.blocks)) + + +@dataclass(eq=False, repr=False, init=False) +class AnalogKron(AnalogComposite): + def __init__(self, blocks: Tuple[AnalogBlock, ...], interaction: Interaction = Interaction.NN): + """Stack analog blocks vertically (i.e. in time). Needed because analog require + stricter validation than the general `KronBlock`. + + `AnalogKron`s can only be constructed from _**non-global**_, analog blocks + with the _**same duration**_. + """ + if len(blocks) == 0: + raise NotImplementedError("Empty KronBlocks not supported") + + self.blocks = blocks + self.interaction = interaction + + qubit_support = QubitSupport() + duration = blocks[0].duration + for b in blocks: + if not isinstance(b, AnalogBlock): + raise ValueError("Can only kron `AnalgoBlock`s with other `AnalgoBlock`s.") + + if b.qubit_support == QubitSupport("global"): + raise ValueError("Blocks with global support cannot be kron'ed.") + + if not qubit_support.is_disjoint(b.qubit_support): + raise ValueError("Make sure blocks act on distinct qubits!") + + if not np.isclose(evaluate(duration), evaluate(b.duration)): + raise ValueError("Kron'ed blocks have to have same duration.") + + qubit_support += b.qubit_support + + self.blocks = blocks + + @property + def duration(self) -> Parameter: + return self.blocks[0].duration + + +def chain(*args: AnalogBlock) -> AnalogChain: + return AnalogChain(blocks=args) + + +def kron(*args: AnalogBlock) -> AnalogKron: + return AnalogKron(blocks=args) diff --git a/qadence/blocks/block_to_tensor.py b/qadence/blocks/block_to_tensor.py new file mode 100644 index 000000000..2c98d96ba --- /dev/null +++ b/qadence/blocks/block_to_tensor.py @@ -0,0 +1,465 @@ +from __future__ import annotations + +from uuid import UUID + +import torch + +from qadence.blocks import ( + AbstractBlock, + AddBlock, + ChainBlock, + ControlBlock, + KronBlock, + ParametricBlock, + ParametricControlBlock, + PrimitiveBlock, + ScaleBlock, +) +from qadence.blocks.utils import chain, kron, uuid_to_expression +from qadence.parameters import evaluate, stringify +from qadence.types import Endianness, TensorType, TNumber + +J = torch.tensor(1j) + +ZEROMAT = torch.zeros((2, 2), dtype=torch.cdouble).unsqueeze(0) +IMAT = torch.eye(2, dtype=torch.cdouble).unsqueeze(0) +XMAT = torch.tensor([[0, 1], [1, 0]], dtype=torch.cdouble).unsqueeze(0) +YMAT = torch.tensor([[0, -1j], [1j, 0]], dtype=torch.cdouble).unsqueeze(0) +ZMAT = torch.tensor([[1, 0], [0, -1]], dtype=torch.cdouble).unsqueeze(0) +NMAT = torch.tensor([[0, 0], [0, 1]], dtype=torch.cdouble).unsqueeze(0) +SMAT = torch.tensor([[1, 0], [0, 1j]], dtype=torch.cdouble).unsqueeze(0) +SDAGMAT = torch.tensor([[1, 0], [0, -1j]], dtype=torch.cdouble).unsqueeze(0) +TMAT = torch.tensor([[1, 0], [0, torch.exp(J * torch.pi / 4)]], dtype=torch.cdouble).unsqueeze(0) +TDAGMAT = torch.tensor([[1, 0], [0, torch.exp(J * torch.pi / 4)]], dtype=torch.cdouble).unsqueeze(0) +HMAT = ( + 1 + / torch.sqrt(torch.tensor(2)) + * torch.tensor([[1, 1], [1, -1]], dtype=torch.cdouble).unsqueeze(0) +) + + +OPERATIONS_DICT = { + "Zero": ZEROMAT, + "I": IMAT, + "X": XMAT, + "Y": YMAT, + "Z": ZMAT, + "S": SMAT, + "SDagger": SDAGMAT, + "T": TMAT, + "TDagger": TDAGMAT, + "H": HMAT, + "N": NMAT, +} + + +def _fill_identities( + block_mat: torch.Tensor, + qubit_support: tuple, + full_qubit_support: tuple | list, + diag_only: bool = False, + endianness: Endianness = Endianness.BIG, +) -> torch.Tensor: + """Returns a Kronecker product of matrix defined on a subset of qubits with identities + acting on the unused qubits. + + Args: + block_mat (torch.Tensor): matrix of an arbitrary gate + qubit_support (tuple): qubit support of `block_mat` + full_qubit_support (tuple): full qubit support of the circuit + diag_only (bool): Use diagonals only + + Returns: + torch.Tensor: augmented matrix with dimensions (2**nqubits, 2**nqubits) + or a tensor (2**n_qubits) if diag_only + """ + qubit_support = tuple(sorted(qubit_support)) + mat = IMAT if qubit_support[0] != full_qubit_support[0] else block_mat + if diag_only: + mat = torch.diag(mat.squeeze(0)) + for i in full_qubit_support[1:]: + if i == qubit_support[0]: + other = torch.diag(block_mat.squeeze(0)) if diag_only else block_mat + if endianness == Endianness.LITTLE: + mat = torch.kron(other, mat) + else: + mat = torch.kron(mat, other) + elif i not in qubit_support: + other = torch.diag(IMAT.squeeze(0)) if diag_only else IMAT + if endianness == Endianness.LITTLE: + mat = torch.kron(other.contiguous(), mat.contiguous()) + else: + mat = torch.kron(mat.contiguous(), other.contiguous()) + + return mat + + +def _rot_matrices(theta: torch.Tensor, generator: torch.Tensor) -> torch.Tensor: + """ + Args: + theta(torch.Tensor): input parameter + generator(torch.Tensor): the tensor of the generator + Returns: + torch.Tensor: a batch of gates after applying theta + """ + batch_size = theta.size(0) + + cos_t = torch.cos(theta / 2).unsqueeze(1).unsqueeze(2) + cos_t = cos_t.repeat((1, 2, 2)) + sin_t = torch.sin(theta / 2).unsqueeze(1).unsqueeze(2) + sin_t = sin_t.repeat((1, 2, 2)) + + batch_imat = IMAT.repeat(batch_size, 1, 1) + batch_generator = generator.repeat(batch_size, 1, 1) + + return cos_t * batch_imat - 1j * sin_t * batch_generator + + +def _u_matrix(theta: tuple[torch.Tensor, ...]) -> torch.Tensor: + """ + Args: + theta(tuple[torch.Tensor]): tuple of torch Tensor with 3 elements + per each parameter of the arbitrary rotation + Returns: + torch.Tensor: matrix corresponding to the U gate after applying theta + """ + z_phi = _rot_matrices(theta[0], OPERATIONS_DICT["Z"]) + y_theta = _rot_matrices(theta[1], OPERATIONS_DICT["Y"]) + z_omega = _rot_matrices(theta[2], OPERATIONS_DICT["Z"]) + + res = torch.matmul(y_theta, z_phi) + res = torch.matmul(z_omega, res) + return res + + +def _phase_matrix(theta: torch.Tensor | TNumber) -> torch.Tensor: + """ + Args: + theta(torch.Tensor): input parameter + Returns: + torch.Tensor: a batch of gates after applying theta + """ + exp_t = torch.exp(1j * theta).unsqueeze(1).unsqueeze(2) + exp_t = exp_t.repeat((1, 2, 2)) + return 0.5 * (IMAT + ZMAT) + exp_t * 0.5 * (IMAT - ZMAT) + + +def _parametric_matrix(gate: ParametricBlock, values: dict[str, torch.Tensor]) -> torch.Tensor: + from qadence.operations import PHASE, RX, RY, RZ, U + + theta = _gate_parameters(gate, values) + if isinstance(gate, (RX, RY, RZ)): + pmat = _rot_matrices( + theta[0], OPERATIONS_DICT[gate.generator.name] # type:ignore[union-attr] + ) + elif isinstance(gate, U): + pmat = _u_matrix(theta) + elif isinstance(gate, PHASE): + pmat = _phase_matrix(theta[0]) + return pmat + + +def _controlled_block_with_params( + block: ParametricControlBlock | ControlBlock, +) -> tuple[AbstractBlock, dict[str, torch.Tensor]]: + """Redefines parameterized/non-parameterized controlled block. + + Args: + block (ParametricControlBlock): original controlled rotation block + + Returns: + AbstractBlock: redefined controlled rotation block + dict with new parameters which are added + """ + from qadence.operations import I, Z + + # redefine controlled rotation block in a way suitable for matrix evaluation + control = block.qubit_support[:-1] + target = block.qubit_support[-1] + p1 = kron(0.5 * I(qubit) + (-0.5) * Z(qubit) for qubit in control) + p0 = I(control[0]) - p1 + c_block = kron(p0, I(target)) + kron(p1, block.blocks[0]) + + uuid_expr = uuid_to_expression(c_block) + newparams = { + stringify(expr): evaluate(expr, {}, as_torch=True) + for uuid, expr in uuid_expr.items() + if expr.is_number + } + + return c_block, newparams + + +def _swap_block(block: AbstractBlock) -> AbstractBlock: + """Redefines SWAP block. + + Args: + block (AbstractBlock): original SWAP block + + Returns: + AbstractBlock: redefined SWAP block + """ + from qadence.operations import CNOT + + # redefine controlled rotation block in a way suitable for matrix evaluation + control = block.qubit_support[0] + target = block.qubit_support[1] + swap_block = chain(CNOT(control, target), CNOT(target, control), CNOT(control, target)) + + return swap_block + + +def _cswap_block(block: AbstractBlock) -> torch.Tensor: + from qadence.operations import Toffoli + + control = block.qubit_support[0] + target1 = block.qubit_support[1] + target2 = block.qubit_support[2] + + cswap_block = chain( + Toffoli((control, target2), target1), + Toffoli((control, target1), target2), + Toffoli((control, target2), target1), + ) + + return cswap_block + + +def _extract_param_names_or_uuids(b: AbstractBlock, uuids: bool = False) -> tuple[str, ...]: + if isinstance(b, ParametricBlock): + return ( + tuple(b.parameters.uuids()) + if uuids + else tuple(map(stringify, b.parameters.expressions())) + ) + else: + return () + + +def is_valid_uuid(value: str) -> bool: + try: + UUID(value) + return True + except ValueError: + return False + + +def _gate_parameters(b: AbstractBlock, values: dict[str, torch.Tensor]) -> tuple[torch.Tensor, ...]: + uuids = is_valid_uuid(list(values.keys())[0]) + ks = _extract_param_names_or_uuids(b, uuids=uuids) + return tuple(values[k] for k in ks) + + +def block_to_diagonal( + block: AbstractBlock, + qubit_support: tuple | list | None = None, + use_full_support: bool = True, + endianness: Endianness = Endianness.BIG, +) -> torch.Tensor: + if block.is_parametric: + raise TypeError("Sparse observables cant be parametric.") + if not block._is_diag_pauli: + raise TypeError("Sparse observables can only be used on paulis which are diagonal.") + if qubit_support is None: + if use_full_support: + qubit_support = tuple(range(0, block.n_qubits)) + else: + qubit_support = block.qubit_support + nqubits = len(qubit_support) # type: ignore [arg-type] + if isinstance(block, (ChainBlock, KronBlock)): + v = torch.ones(2**nqubits, dtype=torch.cdouble) + for b in block.blocks: + v *= block_to_diagonal(b, qubit_support) + if isinstance(block, AddBlock): + t = torch.zeros(2**nqubits, dtype=torch.cdouble) + for b in block.blocks: + t += block_to_diagonal(b, qubit_support) + v = t + elif isinstance(block, ScaleBlock): + _s = evaluate(block.scale, {}, as_torch=True) # type: ignore[attr-defined] + _s = _s.detach() # type: ignore[union-attr] + v = _s * block_to_diagonal(block.block, qubit_support) + + elif isinstance(block, PrimitiveBlock): + v = _fill_identities( + OPERATIONS_DICT[block.name], + block.qubit_support, + qubit_support, # type: ignore [arg-type] + diag_only=True, + endianness=endianness, + ) + return v + + +# version that will accept user params +def block_to_tensor( + block: AbstractBlock, + values: dict[str, TNumber | torch.Tensor] = {}, + qubit_support: tuple | None = None, + use_full_support: bool = True, + tensor_type: TensorType = TensorType.DENSE, + endianness: Endianness = Endianness.BIG, +) -> torch.Tensor: + """ + Convert a block into a torch tensor. + + Arguments: + block (AbstractBlock): The block to convert. + values (dict): A optional dict with values for parameters. + qubit_support (tuple): The qubit_support of the block. + use_full_support (bool): True infers the total number of qubits. + tensor_type (TensorType): the target tensor type. + + Returns: + A torch.Tensor. + + Examples: + ```python exec="on" source="material-block" result="json" + from qadence import hea, hamiltonian_factory, Z, block_to_tensor + + block = hea(2,2) + print(block_to_tensor(block)) + + # In case you have a diagonal observable, you can use + obs = hamiltonian_factory(2, detuning = Z) + print(block_to_tensor(obs, tensor_type="SparseDiagonal")) + ``` + """ + + # FIXME: default use_full_support to False. In general, it would + # be more efficient to do that, and make sure that computations such + # as observables only do the matmul of the size of the qubit support. + + if tensor_type == TensorType.DENSE: + from qadence.blocks import embedding + + (ps, embed) = embedding(block) + return _block_to_tensor_embedded( + block, embed(ps, values), qubit_support, use_full_support, endianness=endianness + ) + + elif tensor_type == TensorType.SPARSEDIAGONAL: + t = block_to_diagonal(block, endianness=endianness) + indices, values, size = torch.nonzero(t), t[t != 0], len(t) + indices = torch.stack((indices.flatten(), indices.flatten())) + return torch.sparse_coo_tensor(indices, values, (size, size)) + + +# version that accepts embedded params +def _block_to_tensor_embedded( + block: AbstractBlock, + values: dict[str, TNumber | torch.Tensor] = {}, + qubit_support: tuple | None = None, + use_full_support: bool = True, + endianness: Endianness = Endianness.BIG, +) -> torch.Tensor: + from qadence.blocks import MatrixBlock + from qadence.operations import CSWAP, SWAP, HamEvo + + # get number of qubits + if qubit_support is None: + if use_full_support: + qubit_support = tuple(range(0, block.n_qubits)) + else: + qubit_support = block.qubit_support + nqubits = len(qubit_support) + + if isinstance(block, (ChainBlock, KronBlock)): + # create identity matrix of appropriate dimensions + mat = IMAT.clone() + for i in range(nqubits - 1): + mat = torch.kron(mat, IMAT) + + # perform matrix multiplications + for b in block.blocks: + other = _block_to_tensor_embedded(b, values, qubit_support, endianness=endianness) + mat = torch.matmul(other, mat) + + elif isinstance(block, AddBlock): + # create zero matrix of appropriate dimensions + mat = ZEROMAT.clone() + for _ in range(nqubits - 1): + mat = torch.kron(mat, ZEROMAT) + + # perform matrix summation + for b in block.blocks: + mat = mat + _block_to_tensor_embedded(b, values, qubit_support, endianness=endianness) + + elif isinstance(block, HamEvo): + if block.qubit_support: + if isinstance(block.generator, AbstractBlock): + # get matrix representation of generator + gen_mat = _block_to_tensor_embedded( + block.generator, values, qubit_support, endianness=endianness + ) + + # calculate evolution matrix + (p,) = _gate_parameters(block, values) + prefac = -J * p + mat = torch.linalg.matrix_exp(prefac * gen_mat) + elif isinstance(block.generator, torch.Tensor): + gen_mat = block.generator + + # calculate evolution matrix + (p, _) = _gate_parameters(block, values) + prefac = -J * p + mat = torch.linalg.matrix_exp(prefac * gen_mat) + + # add missing identities on unused qubits + mat = _fill_identities( + mat, block.qubit_support, qubit_support, endianness=endianness + ) + else: + raise TypeError( + f"Generator of type {type(block.generator)} not supported in HamEvo." + ) + else: + raise ValueError("qubit_support is not defined for HamEvo block.") + + mat = mat.unsqueeze(0) if len(mat.size()) == 2 else mat + + elif isinstance(block, CSWAP): + cswap_block = _cswap_block(block) + mat = _block_to_tensor_embedded(cswap_block, values, qubit_support, endianness=endianness) + + elif isinstance(block, (ControlBlock, ParametricControlBlock)): + c_block, newparams = _controlled_block_with_params(block) + newparams.update(values) + mat = _block_to_tensor_embedded(c_block, newparams, qubit_support, endianness=endianness) + + elif isinstance(block, ScaleBlock): + (scale,) = _gate_parameters(block, values) + mat = scale * _block_to_tensor_embedded( + block.block, values, qubit_support, endianness=endianness + ) + + elif isinstance(block, ParametricBlock): + block_mat = _parametric_matrix(block, values) + + # add missing identities on unused qubits + mat = _fill_identities(block_mat, block.qubit_support, qubit_support, endianness=endianness) + + elif isinstance(block, MatrixBlock): + mat = block.matrix.unsqueeze(0) + # FIXME: properly handle identity filling in matrix blocks + # mat = _fill_identities( + # block.matrix.unsqueeze(0), + # block.qubit_support, + # qubit_support, + # endianness=endianness, + # ) + + elif isinstance(block, SWAP): + swap_block = _swap_block(block) + mat = _block_to_tensor_embedded(swap_block, values, qubit_support, endianness=endianness) + + elif block.name in OPERATIONS_DICT.keys(): + block_mat = OPERATIONS_DICT[block.name] + + # add missing identities on unused qubits + mat = _fill_identities(block_mat, block.qubit_support, qubit_support, endianness=endianness) + + else: + raise TypeError(f"Conversion for block type {type(block)} not supported.") + + return mat diff --git a/qadence/blocks/composite.py b/qadence/blocks/composite.py new file mode 100644 index 000000000..2c8156c7b --- /dev/null +++ b/qadence/blocks/composite.py @@ -0,0 +1,261 @@ +from __future__ import annotations + +from typing import Tuple + +import torch +from rich.columns import Columns +from rich.console import Console, Group, RenderableType +from rich.padding import Padding +from rich.panel import Panel +from rich.tree import Tree + +from qadence.parameters import Parameter +from qadence.qubit_support import QubitSupport, QubitSupportType + +from .abstract import AbstractBlock +from .primitive import ParametricBlock + + +class CompositeBlock(AbstractBlock): + """Block which composes multiple blocks into one larger block (which can again be composed). + Composite blocks are constructed via [`chain`][qadence.blocks.utils.chain], + [`kron`][qadence.blocks.utils.kron], and [`add`][qadence.blocks.utils.add]. + """ + + name = "CompositeBlock" + blocks: Tuple[AbstractBlock, ...] + + @property + def qubit_support(self) -> Tuple[int, ...]: + from qadence.blocks.analog import AnalogBlock + + anablocks = filter(lambda b: isinstance(b, AnalogBlock), self.blocks) + digiblocks = filter(lambda b: not isinstance(b, AnalogBlock), self.blocks) + digital = sum([b.qubit_support for b in digiblocks], start=QubitSupport()) + analog = sum([b.qubit_support for b in anablocks], start=QubitSupport()) + return digital + analog + + @property + def eigenvalues_generator(self) -> torch.Tensor: + return torch.empty(0) + + @property + def n_qubits(self) -> int: + if self.qubit_support: + return max(self.qubit_support) + 1 + else: + return 0 + + @property + def n_supports(self) -> int: + return len(self.qubit_support) + + @property + def depth(self) -> int: + return 1 + max([b.depth for b in self.blocks]) + + def __iter__(self) -> CompositeBlock: + self._iterator = iter(self.blocks) + return self + + def __next__(self) -> AbstractBlock: + return next(self._iterator) + + def __getitem__(self, item: int) -> AbstractBlock: + return self.blocks[item] + + def __len__(self) -> int: + return len(self.blocks) + + def __rich_tree__(self, tree: Tree = None) -> Tree: + if tree is None: + tree = Tree(self._block_title) + else: + tree = tree.add(self._block_title) + for block in self.blocks: + block.__rich_tree__(tree) + return tree + + def __eq__(self, other: object) -> bool: + if not isinstance(other, AbstractBlock): + raise TypeError(f"Cant compare {type(self)} to {type(other)}") + if isinstance(other, type(self)): + if len(self.blocks) != len(other.blocks): + return False + return self.tag == other.tag and all( + [b0 == b1 for (b0, b1) in zip(self.blocks, other.blocks)] + ) + return False + + def __contains__(self, other: object) -> bool: + # Check containment by instance. + if isinstance(other, AbstractBlock): + for b in self.blocks: + if isinstance(b, CompositeBlock) and other in b: + return True + elif b == other: + return True + elif isinstance(other, Parameter): + for b in self.blocks: + if isinstance(b, ParametricBlock) or isinstance(b, CompositeBlock): + if other in b: + return True + # Check containment by type. + elif isinstance(other, type): + for b in self.blocks: + if isinstance(b, CompositeBlock) and other in b: + return True + elif type(b) == other: + return True + else: + raise TypeError( + f"Can not check for containment between {type(self)} and {type(other)}." + ) + return False + + def _to_dict(self) -> dict: + return { + "type": type(self).__name__, + "qubit_support": self.qubit_support, + "tag": self.tag, + "blocks": [b._to_dict() for b in self.blocks], + } + + @classmethod + def _from_dict(cls, d: dict) -> CompositeBlock: + from qadence import blocks as qadenceblocks + from qadence import operations + from qadence.blocks.utils import _construct, tag + + blocks = [ + getattr(operations, b["type"])._from_dict(b) + if hasattr(operations, b["type"]) + else getattr(qadenceblocks, b["type"])._from_dict(b) + for b in d["blocks"] + ] + block = _construct(cls, blocks) # type: ignore[arg-type] + if d["tag"] is not None: + tag(block, d["tag"]) + return block + + def dagger(self) -> CompositeBlock: # type: ignore[override] + reversed_blocks = tuple(block.dagger() for block in reversed(self.blocks)) + return self.__class__(reversed_blocks) # type: ignore[arg-type] + + def __hash__(self) -> int: + return hash(self._to_json()) + + +class PutBlock(CompositeBlock): + name = "put" + + def __init__(self, block: AbstractBlock, support: tuple): + # np = max(support) + 1 - min(support) + # nb = block.nqubits + # assert np == nb, f"You are trying to put a block with {nb} qubits on {np} qubits." + self.blocks = (block,) + self._qubit_support = support + super().__init__() + + @property + def qubit_support(self) -> Tuple[int, ...]: + return self._qubit_support + + @property + def n_qubits(self) -> int: + return max(self.qubit_support) + 1 - min(self.qubit_support) + + @property + def _block_title(self) -> str: + support = ",".join(str(i) for i in self.qubit_support) + return f"put on ({support})" + + def __ascii__(self, console: Console) -> RenderableType: + return self.blocks[0].__ascii__(console) + + def dagger(self) -> PutBlock: + return PutBlock(self.blocks[0].dagger(), self.qubit_support) + + +class ChainBlock(CompositeBlock): + """Chains blocks sequentially. Constructed via [`chain`][qadence.blocks.utils.chain]""" + + name = "chain" + + def __init__(self, blocks: Tuple[AbstractBlock, ...]): + self.blocks = blocks + + def __ascii__(self, console: Console) -> RenderableType: + # FIXME: deal with other paddings than 1 + padding = 1 + border_width = 1 + + # FIXME: deal with primitive block heights other than 3 + h = 3 + + def pad(b: AbstractBlock) -> Padding: + top = (min(b.qubit_support) - min(self.qubit_support)) * h + return Padding(b.__ascii__(console), (top, 0, 0, 0)) + + cols = [pad(b) for b in self.blocks] + w = sum([console.measure(c).minimum + padding for c in cols]) + w += padding + 2 * border_width + + return Panel(Columns(cols), title=self.tag, width=w) + + +class KronBlock(CompositeBlock): + """Stacks blocks horizontally. Constructed via [`kron`][qadence.blocks.utils.kron].""" + + name = "kron" + + def __init__(self, blocks: Tuple[AbstractBlock, ...]): + if len(blocks) == 0: + raise NotImplementedError("Empty KronBlocks not supported") + + qubit_support = QubitSupport() + for b in blocks: + assert ( + QubitSupportType.GLOBAL, + ) != b.qubit_support, "Blocks with global support cannot be kron'ed." + assert qubit_support.is_disjoint( + b.qubit_support + ), "Make sure blocks act on distinct qubits!" + qubit_support += b.qubit_support + + self.blocks = blocks + + def __ascii__(self, console: Console) -> RenderableType: + ps = [b.__ascii__(console) for b in self.blocks] + return Panel(Group(*ps), title=self.tag, expand=False) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, AbstractBlock): + raise TypeError(f"Cant compare {type(self)} to {type(other)}") + if isinstance(other, KronBlock): + if len(self.blocks) != len(other.blocks): + return False + return self.tag == other.tag and all([b in other for b in self.blocks]) + return False + + +class AddBlock(CompositeBlock): + """Adds blocks. Constructed via [`add`][qadence.blocks.utils.add].""" + + name = "add" + + def __init__(self, blocks: Tuple[AbstractBlock, ...]): + self.blocks = blocks + + def __ascii__(self, console: Console) -> RenderableType: + ps = [b.__ascii__(console) for b in self.blocks] + return Panel(Group(*ps), title=self.tag, expand=False) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, AbstractBlock): + raise TypeError(f"Cant compare {type(self)} to {type(other)}") + if isinstance(other, AddBlock): + if len(self.blocks) != len(other.blocks): + return False + return self.tag == other.tag and all([b in other for b in self.blocks]) + return False diff --git a/qadence/blocks/embedding.py b/qadence/blocks/embedding.py new file mode 100644 index 000000000..077ccb20e --- /dev/null +++ b/qadence/blocks/embedding.py @@ -0,0 +1,136 @@ +from __future__ import annotations + +from typing import Callable, Iterable, List + +import numpy as np +import sympy +import sympytorch # type: ignore [import] +import torch +from torch import Tensor + +from qadence.blocks import ( + AbstractBlock, +) +from qadence.blocks.utils import ( + expressions, + parameters, + uuid_to_expression, +) +from qadence.parameters import evaluate, stringify, torchify + +StrTensorDict = dict[str, Tensor] + + +def unique(x: Iterable) -> List: + return list(set(x)) + + +def embedding( + block: AbstractBlock, to_gate_params: bool = False +) -> tuple[StrTensorDict, Callable[[StrTensorDict, StrTensorDict], StrTensorDict],]: + """Construct embedding function which maps user-facing parameters to either *expression-level* + parameters or *gate-level* parameters. The construced embedding function has the signature: + + embedding_fn(params: StrTensorDict, inputs: StrTensorDict) -> StrTensorDict: + + which means that it maps the *variational* parameter dict `params` and the *feature* parameter + dict `inputs` to one new parameter dict `embedded_dict` which holds all parameters that are + needed to execute a circuit on a given backend. There are two different *modes* for this + mapping: + + - *Expression-level* parameters: For AD-based optimization. For every unique expression we end + up with one entry in the embedded dict: + `len(embedded_dict) == len(unique_parameter_expressions)`. + - *Gate-level* parameters: For PSR-based optimization or real devices. One parameter for each + gate parameter, regardless if they are based on the same expression. `len(embedded_dict) == + len(parametric_gates)`. This is needed because PSR requires to shift the angles of **every** + gate where the same parameter appears. + + Arguments: + block: parametrized block into which we want to embed parameters. + to_gate_params: A boolean flag whether to generate gate-level parameters or + expression-level parameters. + + Returns: + A tuple with variational parameter dict and the embedding function. + """ + + unique_expressions = unique(expressions(block)) + unique_symbols = [p for p in unique(parameters(block)) if not isinstance(p, sympy.Array)] + unique_const_matrices = [e for e in unique_expressions if isinstance(e, sympy.Array)] + unique_expressions = [e for e in unique_expressions if not isinstance(e, sympy.Array)] + + # NOTE + # there are 3 kinds of parameters in qadence + # - non-trainable which are considered as inputs for classical data + # - trainable which are the variational parameters to be optimized + # - fixed: which are non-trainable parameters with fixed value (e.g. pi/2) + # + # both non-trainable and trainable parameters can have the same element applied + # to different operations in the quantum circuit, e.g. assigning the same parameter + # to multiple gates. + non_numeric_symbols = [p for p in unique_symbols if not p.is_number] + trainable_symbols = [p for p in non_numeric_symbols if p.trainable] + constant_expressions = [expr for expr in unique_expressions if expr.is_number] + # we dont need to care about constant symbols if they are contained in an symbolic expression + # we only care about gate params which are ONLY a constant + + embeddings: dict[sympy.Expr, sympytorch.SymPyModule] = { + expr: torchify(expr) for expr in unique_expressions if not expr.is_number + } + + uuid_to_expr = uuid_to_expression(block) + + def embedding_fn(params: StrTensorDict, inputs: StrTensorDict) -> StrTensorDict: + embedded_params: dict[sympy.Expr, Tensor] = {} + for expr, fn in embeddings.items(): + angle: Tensor + values = {} + for symbol in expr.free_symbols: + if symbol.name in inputs: + value = inputs[symbol.name] + elif symbol.name in params: + value = params[symbol.name] + else: + msg_trainable = "Trainable" if symbol.trainable else "Non-trainable" + raise KeyError( + f"{msg_trainable} parameter '{symbol.name}' not found in the " + f"inputs list: {list(inputs.keys())} nor the " + f"params list: {list(params.keys())}." + ) + values[symbol.name] = value + angle = fn(**values) + # do not reshape parameters which are multi-dimensional + # tensors, such as for example generator matrices + if not len(angle.squeeze().shape) > 1: + angle = angle.reshape(-1) + embedded_params[expr] = angle + + for e in constant_expressions + unique_const_matrices: + embedded_params[e] = params[stringify(e)] + + if to_gate_params: + gate_lvl_params: StrTensorDict = {} + for uuid, e in uuid_to_expr.items(): + gate_lvl_params[uuid] = embedded_params[e] + return gate_lvl_params + else: + return {stringify(k): v for k, v in embedded_params.items()} + + params: StrTensorDict + params = {p.name: torch.tensor([p.value], requires_grad=True) for p in trainable_symbols} + params.update( + { + stringify(expr): torch.tensor([evaluate(expr)], requires_grad=False) + for expr in constant_expressions + } + ) + params.update( + { + stringify(expr): torch.tensor( + np.array(expr.tolist(), dtype=np.cdouble), requires_grad=False + ) + for expr in unique_const_matrices + } + ) + return params, embedding_fn diff --git a/qadence/blocks/manipulate.py b/qadence/blocks/manipulate.py new file mode 100644 index 000000000..cf73b158d --- /dev/null +++ b/qadence/blocks/manipulate.py @@ -0,0 +1,77 @@ +from __future__ import annotations + +from functools import reduce, singledispatch + +from openfermion import QubitOperator +from openfermion.utils import count_qubits + +from qadence import operations +from qadence.blocks import AbstractBlock, AddBlock, CompositeBlock, PrimitiveBlock, ScaleBlock +from qadence.blocks.utils import add, kron +from qadence.parameters import evaluate + + +@singledispatch +def to_openfermion(block: AbstractBlock) -> QubitOperator: + raise ValueError(f"Unable to convert type {type(block)} to QubitOperator.") + + +@to_openfermion.register +def _(block: PrimitiveBlock) -> QubitOperator: + pauli, qubit = block.name, block.qubit_support[0] + return QubitOperator(f"{pauli}{qubit}") + + +@to_openfermion.register +def _(block: operations.I) -> QubitOperator: + return QubitOperator("") + + +@to_openfermion.register +def _(block: CompositeBlock) -> QubitOperator: + return reduce(lambda x, y: x * y, [to_openfermion(b) for b in block.blocks]) + + +@to_openfermion.register +def _(block: AddBlock) -> QubitOperator: + return reduce(lambda x, y: x + y, [to_openfermion(b) for b in block.blocks]) + + +@to_openfermion.register +def _(block: ScaleBlock) -> QubitOperator: + op = to_openfermion(block.block) + return op * evaluate(block.parameters.parameter) + + +@to_openfermion.register +def _(block: AbstractBlock) -> QubitOperator: + return to_openfermion(block) + + +def from_openfermion(op: QubitOperator) -> AbstractBlock: + n_qubits = count_qubits(op) + + def _convert_gate(gate: tuple[int, str]) -> PrimitiveBlock: + (i, pauli) = gate + return getattr(operations, pauli)(i) # type: ignore [no-any-return] + + @singledispatch + def _convert(op: QubitOperator) -> AbstractBlock: + if isinstance(op, QubitOperator): + return _convert(op.terms) + else: + raise ValueError(f"Can only conver QubitOperators. Found {type(op)}.") + + @_convert.register + def _(op: dict) -> AbstractBlock: + bs = [_convert(term) * coef for term, coef in op.items()] + return add(*bs) if len(bs) > 1 else bs[0] # type: ignore [no-any-return] + + @_convert.register + def _(op: tuple) -> AbstractBlock: + if len(op) == 0: + return operations.I(n_qubits - 1) + bs = [_convert_gate(gate) for gate in op] + return kron(*bs) if len(bs) > 1 else bs[0] # type: ignore [no-any-return] + + return _convert(op) diff --git a/qadence/blocks/matrix.py b/qadence/blocks/matrix.py new file mode 100644 index 000000000..e2f8b9763 --- /dev/null +++ b/qadence/blocks/matrix.py @@ -0,0 +1,119 @@ +from __future__ import annotations + +from functools import cached_property + +import numpy as np +import torch +from torch.linalg import eigvals + +from qadence.blocks import PrimitiveBlock +from qadence.logger import get_logger + +logger = get_logger(__name__) + + +class MatrixBlock(PrimitiveBlock): + """ + Generates a MatrixBlock from a given matrix. + + Arguments: + matrix (torch.Tensor | np.ndarray): The matrix from which to create the MatrixBlock. + qubit_support (tuple[int]): The qubit_support of the block. + + Examples: + ```python exec="on" source="material-block" result="json" + import torch + + from qadence import QuantumCircuit + from qadence.backend import BackendName + from qadence.backends.api import DiffMode + from qadence.blocks.matrix import MatrixBlock + from qadence.models import QuantumModel + from qadence.operations import X, Z + from qadence.states import random_state + + n_qubits = 1 + XMAT = torch.tensor([[0, 1], [1, 0]], dtype=torch.cdouble) + state = random_state(n_qubits) + matblock = MatrixBlock(XMAT, (0,)) + + qm_mat = QuantumModel( + circuit=QuantumCircuit(n_qubits, matblock), + observable=Z(0), + backend=BackendName.PYQTORCH, + diff_mode=DiffMode.AD, + ) + qm = QuantumModel( + circuit=QuantumCircuit(n_qubits, X(0)), + observable=Z(0), + backend=BackendName.PYQTORCH, + diff_mode=DiffMode.AD, + ) + wf_mat = qm_mat.run({}, state) + exp_mat = qm_mat.expectation({}) + wf = qm.run({}, state) + exp = qm.expectation({}) + + assert torch.all(torch.isclose(wf_mat, wf)) and torch.isclose(exp, exp_mat) + ``` + """ + + name = "MatrixBlock" + matrix: torch.Tensor + + def __init__(self, matrix: torch.Tensor | np.ndarray, qubit_support: tuple[int, ...]) -> None: + if isinstance(matrix, np.ndarray): + matrix = torch.tensor(matrix) + if matrix.ndim == 3 and matrix.size(0) == 1: + matrix = matrix.squeeze(0) + if not matrix.ndim == 2: + raise TypeError("Please provide a 2D matrix.") + if not self.is_square(matrix): + raise TypeError("Please provide a square matrix.") + if not self.is_hermitian(matrix): + logger.warning("Provided matrix is not hermitian.") + if not self.is_unitary(matrix): + logger.warning("Provided matrix is not unitary.") + self.matrix = matrix.clone() + super().__init__(qubit_support) + + @cached_property + def eigenvalues_generator(self) -> torch.Tensor: + return torch.log(self.eigenvalues) * 1j + + @property + def eigenvalues(self) -> torch.Tensor: + ev = eigvals(self.matrix) + _, indices = torch.sort(ev.real) + return ev[indices] + + @property + def n_qubits(self) -> int: + return np.log2(self.matrix.size()[0]) # type:ignore[no-any-return] + + @staticmethod + def is_square(m: torch.Tensor) -> bool: + return m.shape[0] == m.shape[1] # type:ignore[no-any-return] + + @staticmethod + def is_hermitian(m: torch.Tensor) -> bool: + return MatrixBlock.is_square(m) and torch.allclose( + m.t().conj(), m + ) # type:ignore[no-any-return] + + @staticmethod + def is_unitary(m: torch.Tensor) -> bool: + if not MatrixBlock.is_square(m): + return False + prod = torch.mm(m, m.t().conj()) + i = torch.eye(m.shape[0], dtype=torch.complex128) + return torch.allclose(prod, i) # type:ignore[no-any-return] + + def expand_to(self, n_qubits: int = 1) -> torch.Tensor: + from qadence.blocks.block_to_tensor import _fill_identities + + if n_qubits > 1: + return _fill_identities( + self.matrix, self.qubit_support, tuple([i for i in range(n_qubits)]) + ) + return self.matrix diff --git a/qadence/blocks/primitive.py b/qadence/blocks/primitive.py new file mode 100644 index 000000000..f1ed4e2f6 --- /dev/null +++ b/qadence/blocks/primitive.py @@ -0,0 +1,439 @@ +from __future__ import annotations + +from abc import abstractmethod +from typing import Any, Iterable, Tuple + +import sympy +import torch +from rich.console import Console, RenderableType +from rich.panel import Panel +from rich.tree import Tree + +from qadence.blocks.abstract import AbstractBlock +from qadence.parameters import ( + Parameter, + ParamMap, + evaluate, + extract_original_param_entry, + stringify, +) +from qadence.types import TParameter +from qadence.utils import format_parameter + + +class PrimitiveBlock(AbstractBlock): + """ + Primitive blocks represent elementary unitary operations such as single/multi-qubit gates or + Hamiltonian evolution. See [`qadence.operations`](/qadence/operations.md) for a full list of + primitive blocks. + """ + + name = "PrimitiveBlock" + + def __init__(self, qubit_support: tuple[int, ...]): + self._qubit_support = qubit_support + + @property + def qubit_support(self) -> Tuple[int, ...]: + return self._qubit_support + + def digital_decomposition(self) -> AbstractBlock: + """Decomposition into purely digital gates + + This method returns a decomposition of the Block in a + combination of purely digital single-qubit and two-qubit + 'gates', by manual/custom knowledge of how this can be done efficiently. + :return: + """ + return self + + def __len__(self) -> int: + return 1 + + def __iter__(self) -> Iterable: + yield self + + @property + def depth(self) -> int: + return 1 + + def __ascii__(self, console: Console) -> RenderableType: + return Panel(self._block_title, expand=False) + + def __xor__(self, other: int | AbstractBlock) -> AbstractBlock: + if isinstance(other, int): + from qadence.transpile import repeat + + B = type(self) + (start,) = self.qubit_support + return repeat(B, range(start, start + other)) + else: + raise TypeError(f"PrimitiveBlocks cannot use ^ on type {type(other)}") + + def __eq__(self, other: object) -> bool: + if not isinstance(other, AbstractBlock): + raise TypeError(f"Cant compare {type(self)} to {type(other)}") + if isinstance(other, type(self)): + return self.qubit_support == other.qubit_support + return False + + def _to_dict(self) -> dict: + return { + "type": type(self).__name__, + "qubit_support": self.qubit_support, + "tag": self.tag, + } + + @classmethod + def _from_dict(cls, d: dict) -> PrimitiveBlock: + return cls(*d["qubit_support"]) + + def __hash__(self) -> int: + return hash(self._to_json()) + + @property + def n_qubits(self) -> int: + return max(self.qubit_support) + 1 + + @property + def n_supports(self) -> int: + return len(self.qubit_support) + + +class ParametricBlock(PrimitiveBlock): + """Parameterized primitive blocks""" + + name = "ParametricBlock" + + # a tuple of Parameter's specifies which parameters go into this block + parameters: ParamMap + + # any unitary can be written as exp(iH). + # For a parametric block this is particularly interesting and + # is known for most basic 'gates' or analog pulses. + generator: AbstractBlock | Parameter | TParameter | None = None + + @property + def _block_title(self) -> str: + s = super()._block_title + params_str = [] + for p in self.parameters.expressions(): + if p.is_number: + val = evaluate(p) + if isinstance(val, float): + val = round(val, 2) + params_str.append(val) + else: + params_str.append(stringify(p)) + + return s + rf" \[params: {params_str}]" + + @property + def trainable(self) -> bool: + for expr in self.parameters.expressions(): + if expr.is_number: + return False + else: + return any(not p.trainable for p in expr.free_symbols) + return True + + @abstractmethod + def num_parameters(cls) -> int: + """The number of parameters required by the block + + This is a class property since the number of parameters is defined + automatically before instantiating the operation. Also, this could + correspond to a larger number of actual user-facing parameters + since any parameter expression is allowed + + Examples: + - RX operation has 1 parameter + - U operation has 3 parameters + - HamEvo has 2 parameters (generator and time evolution) + """ + pass + + def __xor__(self, other: int | AbstractBlock) -> AbstractBlock: + if isinstance(other, AbstractBlock): + return super().__xor__(other) + elif isinstance(other, int): + from qadence.transpile import repeat + + B = type(self) + (start,) = self.qubit_support + (param,) = self.parameters.expressions() + return repeat(B, range(start, start + other), stringify(param)) + else: + raise ValueError(f"PrimitiveBlocks cannot use ^ on type {type(other)}") + + def __eq__(self, other: object) -> bool: + if not isinstance(other, AbstractBlock): + raise TypeError(f"Cant compare {type(self)} to {type(other)}") + if isinstance(other, type(self)): + return ( + self.qubit_support == other.qubit_support + and self.parameters.parameter == other.parameters.parameter + ) + return False + + def __contains__(self, other: object) -> bool: + if not isinstance(other, Parameter): + raise TypeError(f"Cant check if {type(other)} in {type(self)}") + for p in self.parameters.expressions(): + if other in p.free_symbols: + return True + return False + + def _to_dict(self) -> dict: + return { + "type": type(self).__name__, + "qubit_support": self.qubit_support, + "tag": self.tag, + "parameters": self.parameters._to_dict(), + } + + @classmethod + def _from_dict(cls, d: dict) -> ParametricBlock: + params = ParamMap._from_dict(d["parameters"]) + target = d["qubit_support"][0] + return cls(target, params) # type: ignore[call-arg] + + def dagger(self) -> ParametricBlock: # type: ignore[override] + exprs = self.parameters.expressions() + args = tuple(-extract_original_param_entry(param) for param in exprs) + args = args if -1 in self.qubit_support else (*self.qubit_support, *args) + return self.__class__(*args) # type: ignore[arg-type] + + +class ScaleBlock(ParametricBlock): + """Scale blocks are created when multiplying a block by a number or parameter. + + Example: + ```python exec="on" source="material-block" result="json" + from qadence import X + + print(X(0) * 2) + ``` + """ + + name = "ScaleBlock" + + block: AbstractBlock + + def __init__(self, block: AbstractBlock, parameter: Any): + self.block = block + # TODO: more meaningful name like `scale`? + self.parameters = ( + parameter if isinstance(parameter, ParamMap) else ParamMap(parameter=parameter) + ) + super().__init__(block.qubit_support) + + def __pow__(self, power: int) -> AbstractBlock: + from qadence.blocks.utils import chain + + expr = self.parameters.parameter + return ScaleBlock(chain(self.block for _ in range(power)), expr**power) + + @property + def qubit_support(self) -> Tuple[int, ...]: + return self.block.qubit_support + + @classmethod + def num_parameters(cls) -> int: + return 1 + + @property + def eigenvalues_generator(self) -> torch.Tensor: + return self.block.eigenvalues_generator + + @property + def eigenvalues(self) -> torch.Tensor: + return self.block.eigenvalues + + @property + def _block_title(self) -> str: + (scale,) = self.parameters.expressions() + s = rf"\[mul: {format_parameter(scale)}] " + return s + + @property + def n_qubits(self) -> int: + return self.block.n_qubits + + @property + def scale(self) -> sympy.Expr: + (scale,) = self.parameters.expressions() + return scale + + def __rich_tree__(self, tree: Tree = None) -> Tree: + if tree is None: + tree = Tree(self._block_title) + else: + tree = tree.add(self._block_title) + self.block.__rich_tree__(tree) + return tree + + def __eq__(self, other: object) -> bool: + if not isinstance(other, AbstractBlock): + raise TypeError(f"Cant compare {type(self)} to {type(other)}") + elif isinstance(other, ScaleBlock): + return ( + self.block == other.block + and self.parameters.parameter == other.parameters.parameter + ) + return False + + def __contains__(self, other: object) -> bool: + from qadence.blocks.composite import CompositeBlock + + if isinstance(other, AbstractBlock): + if isinstance(self.block, CompositeBlock) and other in self.block: + return True + else: + return self.block == other + + if isinstance(other, Parameter): + if isinstance(self.block, ParametricBlock) or isinstance(self.block, CompositeBlock): + return other in self.block + return False + else: + raise TypeError( + f"Can not check for containment between {type(self)} and {type(other)}." + ) + + def dagger(self) -> ScaleBlock: + return self.__class__( + self.block, Parameter(-extract_original_param_entry(self.parameters.parameter)) + ) + + def _to_dict(self) -> dict: + return { + "type": type(self).__name__, + "tag": self.tag, + "parameters": self.parameters._to_dict(), + "block": self.block._to_dict(), + } + + @classmethod + def _from_dict(cls, d: dict) -> ScaleBlock: + from qadence import blocks as qadenceblocks + from qadence import operations + + expr = ParamMap._from_dict(d["parameters"]) + block: AbstractBlock + if hasattr(operations, d["block"]["type"]): + block = getattr(operations, d["block"]["type"])._from_dict(d["block"]) + + else: + block = getattr(qadenceblocks, d["block"]["type"])._from_dict(d["block"]) + return cls(block, expr) # type: ignore[arg-type] + + +class TimeEvolutionBlock(ParametricBlock): + """Simple time evolution block with time-independent Hamiltonian + + This class is just a convenience class which is used to label + blocks which contains simple time evolution with time-independent + Hamiltonian operators + """ + + name = "TimeEvolutionBlock" + + @property + def has_parametrized_generator(self) -> bool: + return not isinstance(self.generator, AbstractBlock) + + +class ControlBlock(PrimitiveBlock): + """The abstract ControlBlock""" + + name = "Control" + + def __init__(self, control: tuple[int, ...], target_block: PrimitiveBlock) -> None: + self.blocks = (target_block,) + + # using tuple expansion because some control operations could + # have multiple targets, e.g. CSWAP + super().__init__((*control, *target_block.qubit_support)) # target_block.qubit_support[0])) + + @property + def _block_title(self) -> str: + c, t = self.qubit_support + s = f"{self.name}({c},{t})" + return s if self.tag is None else (s + rf" \[tag: {self.tag}]") + + def __ascii__(self, console: Console) -> RenderableType: + raise NotImplementedError + + @property + def n_qubits(self) -> int: + return len(self.qubit_support) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, AbstractBlock): + raise TypeError(f"Cant compare {type(self)} to {type(other)}") + if isinstance(other, type(self)): + return self.qubit_support == other.qubit_support and self.blocks[0] == other.blocks[0] + return False + + def _to_dict(self) -> dict: + return { + "type": type(self).__name__, + "qubit_support": self.qubit_support, + "tag": self.tag, + "blocks": [b._to_dict() for b in self.blocks], + } + + @classmethod + def _from_dict(cls, d: dict) -> ControlBlock: + control = d["qubit_support"][0] + target = d["qubit_support"][1] + return cls(control, target) + + +class ParametricControlBlock(ParametricBlock): + """The abstract parametrized ControlBlock""" + + name = "ParameterizedControl" + + def __init__(self, control: tuple[int, ...], target_block: ParametricBlock) -> None: + self.blocks = (target_block,) + self.parameters = target_block.parameters + super().__init__((*control, target_block.qubit_support[0])) + + @property + def eigenvalues_generator(self) -> torch.Tensor: + return torch.empty(0) + + def __ascii__(self, console: Console) -> RenderableType: + raise NotImplementedError + + @property + def n_qubits(self) -> int: + return len(self.qubit_support) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, AbstractBlock): + raise TypeError(f"Cant compare {type(self)} to {type(other)}") + if isinstance(other, type(self)): + return self.qubit_support == other.qubit_support and self.blocks[0] == other.blocks[0] + return False + + def _to_dict(self) -> dict: + return { + "type": type(self).__name__, + "qubit_support": self.qubit_support, + "tag": self.tag, + "blocks": [b._to_dict() for b in self.blocks], + } + + @classmethod + def _from_dict(cls, d: dict) -> ParametricControlBlock: + from qadence.serialization import deserialize + + control = d["qubit_support"][0] + target = d["qubit_support"][1] + targetblock = d["blocks"][0] + expr = deserialize(targetblock["parameters"]) + block = cls(control, target, expr) # type: ignore[call-arg] + return block diff --git a/qadence/blocks/utils.py b/qadence/blocks/utils.py new file mode 100644 index 000000000..216cdd401 --- /dev/null +++ b/qadence/blocks/utils.py @@ -0,0 +1,498 @@ +from __future__ import annotations + +import typing +from enum import Enum +from itertools import chain as _flatten +from typing import Generator, List, Type, TypeVar, Union, get_args + +from sympy import Basic, Expr +from torch import Tensor + +from qadence.blocks import ( + AbstractBlock, + AddBlock, + ChainBlock, + CompositeBlock, + KronBlock, + ParametricBlock, + PrimitiveBlock, + PutBlock, + ScaleBlock, + TimeEvolutionBlock, +) +from qadence.blocks.analog import AnalogBlock, AnalogComposite, ConstantAnalogRotation, WaitBlock +from qadence.blocks.analog import chain as analog_chain +from qadence.blocks.analog import kron as analog_kron +from qadence.errors import NotPauliBlockError +from qadence.logger import get_logger +from qadence.parameters import Parameter + +logger = get_logger(__name__) + + +TPrimitiveBlock = TypeVar("TPrimitiveBlock", bound=PrimitiveBlock) +TCompositeBlock = TypeVar("TCompositeBlock", bound=CompositeBlock) + + +def _construct( + Block: Type[TCompositeBlock], + args: tuple[Union[AbstractBlock, Generator, List[AbstractBlock]], ...], +) -> TCompositeBlock: + if len(args) == 1 and isinstance(args[0], Generator): + args = tuple(args[0]) + return Block([b for b in args]) # type: ignore [arg-type] + + +def chain(*args: Union[AbstractBlock, Generator, List[AbstractBlock]]) -> ChainBlock: + """Chain blocks sequentially. On digital backends this can be interpreted + loosely as a matrix mutliplication of blocks. In the analog case it chains + blocks in time. + + Arguments: + *args: Blocks to chain. Can also be a generator. + + Returns: + ChainBlock + + Example: + ```python exec="on" source="material-block" result="json" + from qadence import X, Y, chain + + b = chain(X(0), Y(0)) + + # or use a generator + b = chain(X(i) for i in range(3)) + print(b) + ``` + """ + # ugly hack to use `AnalogChain` if we are dealing only with analog blocks + if len(args) and all( + isinstance(a, AnalogBlock) or isinstance(a, AnalogComposite) for a in args + ): + return analog_chain(*args) # type: ignore[return-value,arg-type] + return _construct(ChainBlock, args) + + +def kron(*args: Union[AbstractBlock, Generator]) -> KronBlock: + """Stack blocks vertically. On digital backends this can be intepreted + loosely as a kronecker product of blocks. In the analog case it executes + blocks parallel in time. + + Arguments: + *args: Blocks to kron. Can also be a generator. + + Returns: + KronBlock + + Example: + ```python exec="on" source="material-block" result="json" + from qadence import X, Y, kron + + b = kron(X(0), Y(1)) + + # or use a generator + b = kron(X(i) for i in range(3)) + print(b) + ``` + """ + # ugly hack to use `AnalogKron` if we are dealing only with analog blocks + if len(args) and all( + isinstance(a, AnalogBlock) or isinstance(a, AnalogComposite) for a in args + ): + return analog_kron(*args) # type: ignore[return-value,arg-type] + return _construct(KronBlock, args) + + +def add(*args: Union[AbstractBlock, Generator]) -> AddBlock: + """Sums blocks. + + Arguments: + *args: Blocks to add. Can also be a generator. + + Returns: + AddBlock + + Example: + ```python exec="on" source="material-block" result="json" + from qadence import X, Y, add + + b = add(X(0), Y(0)) + + # or use a generator + b = add(X(i) for i in range(3)) + print(b) + ``` + """ + return _construct(AddBlock, args) + + +def tag(block: AbstractBlock, tag: str) -> AbstractBlock: + block.tag = tag + return block + + +def put(block: AbstractBlock, min_qubit: int, max_qubit: int) -> PutBlock: + from qadence.transpile import reassign + + support = tuple(range(min(block.qubit_support), max(block.qubit_support) + 1)) + shifted_block = reassign(block, {i: i - min(support) for i in support}) + return PutBlock(shifted_block, tuple(range(min_qubit, max_qubit + 1))) + + +def primitive_blocks(block: AbstractBlock) -> List[PrimitiveBlock]: + """Extract the primitive blocks from a `CompositeBlock`. + In the case of an `AddBlock`, the `AddBlock` is considered primitive. + + Args: + blocks: An Iterable of `AbstractBlock`s. + Returns: + List[PrimitiveBlock] + """ + + if isinstance(block, ScaleBlock): + return primitive_blocks(block.block) + + elif isinstance(block, PrimitiveBlock): + return [block] + + elif isinstance(block, CompositeBlock): + return list(_flatten(*(primitive_blocks(b) for b in block.blocks))) + + else: + raise NotImplementedError(f"Non-supported operation of type {type(block)}") + + +def get_pauli_blocks(block: AbstractBlock, raises: bool = False) -> List[PrimitiveBlock]: + """Extract Pauli operations from an arbitrary input block + + Args: + block (AbstractBlock): The input block to extract Pauli operations from + raises (bool, optional): Raise an exception if the block contains something + else than Pauli blocks. + + Returns: + List[PrimitiveBlock]: List of Pauli operations + """ + from qadence import operations + + paulis = [] + for b in primitive_blocks(block): + if isinstance(b, get_args(operations.TPauliBlock)): + paulis.append(b) + else: + if raises: + raise NotPauliBlockError(f"{b.name} is not a Pauli operation") + continue + + return paulis + + +def parameters(block: AbstractBlock) -> list[Parameter | Basic]: + """Extract the Parameters of a block""" + params = [] + exprs = uuid_to_expression(block).values() + for expr in exprs: + symbols = list(expr.free_symbols) + if len(symbols) == 0: + # assert expr.is_number or isinstance(expr, sympy.Matrix) + params.append(expr) + else: + for s in symbols: + params.append(s) + return params + + +def uuid_to_block(block: AbstractBlock, d: dict[str, Expr] = None) -> dict[str, ParametricBlock]: + from qadence import operations + + d = {} if d is None else d + + if isinstance(block, ScaleBlock): + (uuid,) = block.parameters.uuids() + d[uuid] = block + uuid_to_block(block.block, d) + + # special analog cases should go away soon + elif isinstance(block, (WaitBlock, ConstantAnalogRotation, operations.AnalogEntanglement)): + for uuid in block.parameters.uuids(): + d[uuid] = block + + elif isinstance(block, CompositeBlock) or isinstance(block, AnalogComposite): + for b in block.blocks: + d = uuid_to_block(b, d) + + elif isinstance(block, ParametricBlock): + if isinstance(block, TimeEvolutionBlock) and isinstance(block.generator, AbstractBlock): + d = uuid_to_block(block.generator, d) + for uuid in block.parameters.uuids(): + d[uuid] = block + + elif isinstance(block, PrimitiveBlock): + return d + + else: + raise NotImplementedError(f"'uuid_to_block' is not implemented for block: {type(block)}") + + return d + + +def uuid_to_expression(block: AbstractBlock) -> dict[str, Basic]: + return {k: v.parameters._uuid_dict[k] for k, v in uuid_to_block(block).items()} + + +def expression_to_uuids(block: AbstractBlock) -> dict[Expr, list[str]]: + """Creates a mapping between unique expressions and gate-level param_ids + of blocks using that expression.""" + + uuid_to_expr = uuid_to_expression(block) + expr_to_uuid: dict[Expr, list[str]] = {} + for uuid, expr in uuid_to_expr.items(): + expr_to_uuid.setdefault(expr, []).append(uuid) + + return expr_to_uuid + + +def uuid_to_eigen(block: AbstractBlock) -> dict[str, Tensor]: + """Creates a mapping between a parametric block's param_id and its' eigenvalues. + This method is needed for constructing the PSR rules for a given block. + + !!! warn + Will ignore eigenvalues of AnalogBlocks that are not yet computed. + """ + + result = {} + for uuid, b in uuid_to_block(block).items(): + if b.eigenvalues_generator is not None: + if b.eigenvalues_generator.numel() > 0: + result[uuid] = b.eigenvalues_generator + + # leave only angle parameter uuid with eigenvals for ConstantAnalogRotation block + if isinstance(block, ConstantAnalogRotation): + break + + return result + + +def expressions(block: AbstractBlock) -> list[Basic]: + """Extract the expressions sitting in the 'parameters' field of a ParametricBlock. + Each element of 'parameters' is a sympy expression which can be a constant, + a single parameter or an expression consisting of both symbols and constants.""" + return list(set(uuid_to_expression(block).values())) + + +def block_is_qubit_hamiltonian(block: AbstractBlock) -> bool: + try: + _ = get_pauli_blocks(block, raises=True) + return True + except NotPauliBlockError: + return False + + +def _support_primitive_block( + block: PrimitiveBlock, support: dict[int, set[str]] +) -> dict[int, set[str]]: + pauli = block.name.value if isinstance(block.name, Enum) else block.name + index = block.qubit_support[0] + if index in support.keys(): + support[index].add(pauli) + else: + support[index] = set(pauli) + + return support + + +def _check_commutation(block: AbstractBlock, support: dict[int, set[str]] | None = None) -> dict: + # avoid circular import + + if support is None: + support = {} + + if isinstance(block, AddBlock) or isinstance(block, KronBlock): + for subblock in block.blocks: + support = dict(_check_commutation(subblock, support=support)) + + elif isinstance(block, ScaleBlock): + support = dict(_check_commutation(block.block, support=support)) + + elif isinstance(block, PrimitiveBlock): + support = dict(_support_primitive_block(block, support)) + + else: + raise TypeError("Original block was not a Pauli-based QubitHamiltonian!") + + return support + + +def block_is_commuting_hamiltonian(block: AbstractBlock) -> bool: + """Check whether a Pauli block is composed by commuting set of operators + + Args: + block (AbstractBlock): The Pauli block + + Returns: + bool: flag which tells whether all the elements in the + Pauli block are commuting or not + """ + assert block_is_qubit_hamiltonian(block), "Only working for Pauli blocks" + support = _check_commutation(block) + for v in support.values(): + if len(v) > 1: + return False + return True + + +def get_block_by_uuid(block: AbstractBlock, uuid: str) -> ParametricBlock: + return uuid_to_block(block)[uuid] + + +def get_blocks_by_expression( + block: AbstractBlock, expr: Union[Parameter, Expr] +) -> list[AbstractBlock]: + expr_to_uuids = expression_to_uuids(block) + uuid_to_blk = uuid_to_block(block) + return [uuid_to_blk[uuid] for uuid in expr_to_uuids[expr]] + + +def has_duplicate_vparams(block: AbstractBlock) -> bool: + """Check if the given block has duplicated variational parameters + + Args: + block (AbstractBlock): The block to check + + Returns: + bool: A boolean indicating whether the block has + duplicated parameters or not + """ + params = parameters(block) + non_number = [p for p in params if not p.is_number] + trainables = [p for p in non_number if p.trainable] + return len(set(trainables)) != len(trainables) + + +@typing.no_type_check +def unroll_block_with_scaling( + block: AbstractBlock, block_list: list[AbstractBlock] = None +) -> list[tuple[AbstractBlock, Basic]]: + """Extract a set of terms in the given block with corresponding scales + + This function takes an input block and extracts a list of operations + with corresponding scaling factors. + + For example, consider the following block: + b = 2. * Z(0) * Z(1) + 3. * (kron(X0), X(1)) + kron(Y(0), Y(1))) + + This function will return the following list of tuples: + res = [ + ([Z(0) * Z(1)], 2.), + (kron(X(0), X(1)), 3.), + (kron(Y(0), Y(1)), 3.), + ] + + Args: + block (AbstractBlock): the given block + block_list (list[AbstractBlock], optional): A list of blocks to which append the + found terms. If None, an empty list is returned. + + Raises: + TypeError: If the given block does not respect the expected format + + Returns: + tuple[list[AbstractBlock], float]: A tuple with the list of blocks + and a scaling factor in front + """ + + # Avoid circular imports. + + def _add_block( + add_block: AddBlock, blist: list[tuple[AbstractBlock, float]] + ) -> list[tuple[AbstractBlock, float]]: + for b in add_block.blocks: + blist = unroll_block_with_scaling(b, block_list=blist) + return blist + + if block_list is None: + block_list = [] + + if isinstance(block, ScaleBlock): + scaled_block = block.block + scale: Expr = block.scale + + if not isinstance(scaled_block, AddBlock): + block_list.append((block.block, scale)) + + else: + idx = len(block_list) + block_list = _add_block(scaled_block, block_list) + # param_id = block.param_id + + for i in range(idx, len(block_list)): + (b, mul) = block_list[i] + fact = scale * mul + # make sure it gets picked up correctly in the parameters dictionary + # if hasattr(b, 'param_id'): + # b.param_id = param_id + + if not mul.is_number: + logger.warning( + """ +Nested add block with multiple variational parameters. This might cause undefined behavior. +Consider rewriting your block as a single AddBlock instance. + +For example, if you want to define a parametric observable with multiple variational +parameters, you should make sure that each parametric operation in the +AddBlock is defined separately. To make it more clear, you should write your +block in the following way: + +> theta1 = VariationalParameter("theta1") +> theta2 = VariationalParameter("theta2") +> +> generator = theta1 * kron(X(0), X(1)) + theta1 * theta2 * kron(Z(2), Z(3)) + +and NOT this way: + +> theta1 = VariationalParameter("theta1") +> theta2 = VariationalParameter("theta2") +> +> generator = theta1 * (kron(X(0), X(1)) + theta2 * kron(Z(2), Z(3))) +""" + ) + + block_list[i] = (b, fact) + + return block_list + + elif ( + isinstance(block, KronBlock) + or isinstance(block, ChainBlock) + or isinstance(block, PrimitiveBlock) + ): + block_list.append((block, Parameter(1.0))) + return block_list + + elif isinstance(block, AddBlock): + return _add_block(block, block_list) + + else: + raise TypeError( + "Input block has an invalid type! It " + "should be either a ScaleBlock or one of Add, Chain " + f"and Kron blocks. Got {type(block)}." + ) + + +def assert_same_block(b1: AbstractBlock, b2: AbstractBlock) -> None: + assert type(b1) == type( + b2 + ), f"Block {b1} is not the same type ({type(b1)}) as Block {b2} ({type(b2)})" + assert b1.name == b2.name, f"Block {b1} and {b2} don't have the same names!" + assert ( + b1.qubit_support == b2.qubit_support + ), f"Block {b1} and {b2} don't have the same qubit support!" + if isinstance(b1, ParametricBlock) and isinstance( + b2, ParametricBlock + ): # if the block is parametric, we can check some additional things + assert len(b1.parameters.items()) == len( + b2.parameters.items() + ), f"Blocks {b1} and {b2} have differing numbers of parameters." + for p1, p2 in zip(b1.parameters.expressions(), b2.parameters.expressions()): + assert p1 == p2 diff --git a/qadence/circuit.py b/qadence/circuit.py new file mode 100644 index 000000000..1ad4b487c --- /dev/null +++ b/qadence/circuit.py @@ -0,0 +1,204 @@ +from __future__ import annotations + +from dataclasses import dataclass +from itertools import chain as flatten +from pathlib import Path +from typing import Iterable + +from sympy import Array, Basic + +from qadence.blocks import AbstractBlock, AnalogBlock, CompositeBlock, chain +from qadence.blocks.utils import parameters, primitive_blocks +from qadence.parameters import Parameter +from qadence.register import Register + +# Modules to be automatically added to the qadence namespace +__all__ = ["QuantumCircuit"] + + +@dataclass(eq=False) # Avoid unhashability errors due to mutable attributes. +class QuantumCircuit: + """A QuantumCircuit instance is completely abstract and it needs to be passed to a quantum + backend in order to be executed. + """ + + block: AbstractBlock + register: Register + + def __init__(self, support: int | Register, *blocks: AbstractBlock): + """ + Arguments: + support: `Register` or number of qubits. If an integer is provided, a register is + constructed with `Register.all_to_all(x)` + *blocks: (Possibly multiple) blocks to construct the circuit from. + """ + self.block = chain(*blocks) if len(blocks) != 1 else blocks[0] + self.register = Register(support) if isinstance(support, int) else support + + global_block = isinstance(self.block, AnalogBlock) and self.block.qubit_support.is_global + if not global_block and len(self.block) and self.block.n_qubits > self.register.n_qubits: + raise ValueError( + f"Register with {self.register.n_qubits} qubits is too small for the " + f"given block with {self.block.n_qubits} qubits" + ) + + @property + def n_qubits(self) -> int: + return self.register.n_qubits + + def __eq__(self, other: object) -> bool: + if not isinstance(other, QuantumCircuit): + raise TypeError(f"Cannot compare {type(self)} to {type(other)}.") + if self.block != other.block: # type: ignore[call-overload] + return False + if self.register != other.register: + return False + return True + + def __hash__(self) -> int: + return hash(self._to_json()) + + def __iter__(self) -> Iterable: + if isinstance(self.block, CompositeBlock): + yield from self.block + else: + yield self.block + + def __contains__(self, other: object) -> bool: + if isinstance(other, AbstractBlock): + if isinstance(self.block, CompositeBlock): + return other in self.block + else: + return other == self.block + elif isinstance(other, Parameter): + return other in self.unique_parameters + else: + raise TypeError(f"Cant compare {type(self)} to {type(other)}") + + @property + def unique_parameters(self) -> list[Parameter]: + """Return the unique parameters in the circuit + + These parameters are the actual user-facing parameters which + can be assigned by the user. Multiple gates can contain the + same unique parameter + + Returns: + list[Parameter]: List of unique parameters in the circuit + """ + symbols = [] + for p in parameters(self.block): + if isinstance(p, Array): + continue + elif not p.is_number and p not in symbols: + symbols.append(p) + return symbols + + @property + def num_unique_parameters(self) -> int: + return len(self.unique_parameters) if self.unique_parameters else 0 + + @property + def num_parameters(self) -> int: + return len(self.parameters()) + + def parameters(self) -> list[Parameter | Basic] | list[tuple[Parameter | Basic, ...]]: + """Extract all parameters for primitive blocks in the circuit + + Notice that this function returns all the unique Parameters used + in the quantum circuit. These can correspond to constants too. + + Returns: + List[tuple[Parameter]]: A list of tuples containing the Parameter + instance of each of the primitive blocks in the circuit or, if the `flatten` + flag is set to True, a flattened list of all circuit parameters + """ + return parameters(self.block) + + def get_blocks_by_tag(self, tag: str) -> list[AbstractBlock]: + """Extract one or more blocks using the human-readable tag + + This function recurservily explores all composite blocks to find + all the occurrences of a certain tag in the blocks + + Args: + tag (str): the tag to look for + + Returns: + list[AbstractBlock]: The block(s) corresponding to the given tag + """ + + def _get_block(block: AbstractBlock) -> list[AbstractBlock]: + blocks = [] + if block.tag == tag: + blocks += [block] + if isinstance(block, CompositeBlock): + blocks += flatten(*[_get_block(b) for b in block.blocks]) + return blocks + + return _get_block(self.block) + + def is_empty(self) -> bool: + return len(primitive_blocks(self.block)) == 0 + + def serialize(self) -> str: + raise NotImplementedError + + @staticmethod + def deserialize(json: str) -> QuantumCircuit: + raise NotImplementedError + + def __repr__(self) -> str: + return self.block.__repr__() + + def _to_dict(self) -> dict: + return { + "block": self.block._to_dict(), + "register": self.register._to_dict(), + } + + def _to_json(self, path: Path | str | None = None) -> str: + import json + + qc_dumped = json.dumps(self._to_dict()) + if path is not None: + path = Path(path) + try: + with open(path, "w") as file: + file.write(qc_dumped) + except Exception as e: + print(f"Unable to write QuantumCircuit to disk due to {e}") + + return qc_dumped + + @classmethod + def _from_dict(cls, d: dict) -> QuantumCircuit: + from qadence import blocks as qadenceblocks + from qadence import operations + + RootBlock = ( + getattr(operations, d["block"]["type"]) + if hasattr(operations, d["block"]["type"]) + else getattr(qadenceblocks, d["block"]["type"]) + ) + + return QuantumCircuit( + Register._from_dict(d["register"]), + RootBlock._from_dict(d["block"]), + ) + + @classmethod + def _from_json(cls, path: str | Path) -> QuantumCircuit: + import json + + loaded_dict: dict = {} + if isinstance(path, str): + path = Path(path) + try: + with open(path, "r") as file: + loaded_dict = json.load(file) + + except Exception as e: + print(f"Unable to load QuantumCircuit due to {e}") + + return QuantumCircuit._from_dict(loaded_dict) diff --git a/qadence/constructors/__init__.py b/qadence/constructors/__init__.py new file mode 100644 index 000000000..13b76d852 --- /dev/null +++ b/qadence/constructors/__init__.py @@ -0,0 +1,41 @@ +# flake8: noqa + +from .feature_maps import ( + feature_map, + chebyshev_feature_map, + fourier_feature_map, + tower_feature_map, + exp_fourier_feature_map, +) + +from .ansatze import hea, build_qnn + +from .daqc import daqc_transform + +from .hamiltonians import ( + hamiltonian_factory, + ising_hamiltonian, + single_z, + total_magnetization, + zz_hamiltonian, +) + +from .qft import qft + +# Modules to be automatically added to the qadence namespace +__all__ = [ + "feature_map", + "chebyshev_feature_map", + "fourier_feature_map", + "tower_feature_map", + "exp_fourier_feature_map", + "hea", + "build_qnn", + "hamiltonian_factory", + "ising_hamiltonian", + "single_z", + "total_magnetization", + "zz_hamiltonian", + "qft", + "daqc_transform", +] diff --git a/qadence/constructors/ansatze.py b/qadence/constructors/ansatze.py new file mode 100644 index 000000000..e96b7d21f --- /dev/null +++ b/qadence/constructors/ansatze.py @@ -0,0 +1,385 @@ +from __future__ import annotations + +import itertools +from typing import Any, Optional, Type, Union + +from qadence.blocks import AbstractBlock, block_is_qubit_hamiltonian, chain, kron, tag +from qadence.operations import CNOT, CPHASE, CRX, CRY, CRZ, CZ, RX, RY, HamEvo +from qadence.types import Interaction, Strategy + +from .hamiltonians import hamiltonian_factory +from .utils import build_idx_fms + +DigitalEntanglers = Union[CNOT, CZ, CRZ, CRY, CRX] + + +def hea( + n_qubits: int, + depth: int = 1, + param_prefix: str = "theta", + support: tuple[int, ...] = None, + strategy: Strategy = Strategy.DIGITAL, + **strategy_args: Any, +) -> AbstractBlock: + """ + Factory function for the Hardware Efficient Ansatz (HEA). + + Args: + n_qubits: number of qubits in the block + depth: number of layers of the HEA + param_prefix: the base name of the variational parameters + support: qubit indexes where the HEA is applied + strategy: Strategy.Digital or Strategy.DigitalAnalog + **strategy_args: see below + + Keyword Arguments: + operations (list): list of operations to cycle through in the + digital single-qubit rotations of each layer. Valid for + Digital and DigitalAnalog HEA. + periodic (bool): if the qubits should be linked periodically. + periodic=False is not supported in emu-c. Valid for only + for Digital HEA. + entangler (AbstractBlock): + - Digital: 2-qubit entangling operation. Supports CNOT, CZ, + CRX, CRY, CRZ, CPHASE. Controlled rotations will have variational + parameters on the rotation angles. + - DigitaAnalog | Analog: Hamiltonian generator for the + analog entangling layer. Defaults to global ZZ Hamiltonian. + Time parameter is considered variational. + + + Examples: + ```python exec="on" source="material-block" result="json" + from qadence import RZ, RX + from qadence import hea + + # create the circuit + n_qubits, depth = 2, 4 + ansatz = hea( + n_qubits=n_qubits, + depth=depth, + strategy="sDAQC", + operations=[RZ,RX,RZ] + ) + ``` + """ + + if support is None: + support = tuple(range(n_qubits)) + + hea_func_dict = { + Strategy.DIGITAL: hea_digital, + Strategy.SDAQC: hea_sDAQC, + Strategy.BDAQC: hea_bDAQC, + Strategy.ANALOG: hea_analog, + } + + try: + hea_func = hea_func_dict[strategy] + except KeyError: + raise KeyError(f"Strategy {strategy} not recognized.") + + hea_block: AbstractBlock = hea_func( + n_qubits=n_qubits, + depth=depth, + param_prefix=param_prefix, + support=support, + **strategy_args, + ) # type: ignore + + return hea_block + + +############# +## DIGITAL ## +############# + + +def _rotations_digital( + n_qubits: int, + depth: int, + param_prefix: str = "theta", + support: tuple[int, ...] = None, + operations: list[Type[AbstractBlock]] = [RX, RY, RX], +) -> list[AbstractBlock]: + """ + Creates the layers of single qubit rotations in an HEA. + """ + if support is None: + support = tuple(range(n_qubits)) + iterator = itertools.count() + rot_list: list[AbstractBlock] = [] + for d in range(depth): + rots = [ + kron( + gate(support[n], param_prefix + f"_{next(iterator)}") # type: ignore [arg-type] + for n in range(n_qubits) + ) + for gate in operations + ] + rot_list.append(chain(*rots)) + return rot_list + + +def _entangler( + control: int, + target: int, + param_str: str, + op: Type[DigitalEntanglers] = CNOT, +) -> AbstractBlock: + if op in [CNOT, CZ]: + return op(control, target) # type: ignore + elif op in [CRZ, CRY, CRX, CPHASE]: + return op(control, target, param_str) # type: ignore + else: + raise ValueError("Provided entangler not accepted for digital HEA.") + + +def _entanglers_digital( + n_qubits: int, + depth: int, + param_prefix: str = "theta", + support: tuple[int, ...] = None, + periodic: bool = False, + entangler: Type[DigitalEntanglers] = CNOT, +) -> list[AbstractBlock]: + """ + Creates the layers of digital entangling operations in an HEA. + """ + if support is None: + support = tuple(range(n_qubits)) + iterator = itertools.count() + ent_list: list[AbstractBlock] = [] + for d in range(depth): + ents = [] + ents.append( + kron( + _entangler( + control=support[n], + target=support[n + 1], + param_str=param_prefix + f"_ent_{next(iterator)}", + op=entangler, + ) + for n in range(n_qubits) + if not n % 2 and n < n_qubits - 1 + ) + ) + if n_qubits > 2: + ents.append( + kron( + _entangler( + control=support[n], + target=support[(n + 1) % n_qubits], + param_str=param_prefix + f"_ent_{next(iterator)}", + op=entangler, + ) + for n in range(n_qubits - (not periodic)) + if n % 2 + ) + ) + ent_list.append(chain(*ents)) + return ent_list + + +def hea_digital( + n_qubits: int, + depth: int = 1, + param_prefix: str = "theta", + periodic: bool = False, + operations: list[type[AbstractBlock]] = [RX, RY, RX], + support: tuple[int, ...] = None, + entangler: Type[DigitalEntanglers] = CNOT, +) -> AbstractBlock: + """ + Construct the Digital Hardware Efficient Ansatz (HEA). + + Args: + n_qubits (int): number of qubits in the block. + depth (int): number of layers of the HEA. + param_prefix (str): the base name of the variational parameters + periodic (bool): if the qubits should be linked periodically. + periodic=False is not supported in emu-c. + operations (list): list of operations to cycle through in the + digital single-qubit rotations of each layer. + support (tuple): qubit indexes where the HEA is applied. + entangler (AbstractBlock): 2-qubit entangling operation. + Supports CNOT, CZ, CRX, CRY, CRZ. Controlld rotations + will have variational parameters on the rotation angles. + """ + try: + if entangler not in [CNOT, CZ, CRX, CRY, CRZ, CPHASE]: + raise ValueError( + "Please provide a valid two-qubit entangler operation for digital HEA." + ) + except TypeError: + raise ValueError("Please provide a valid two-qubit entangler operation for digital HEA.") + + rot_list = _rotations_digital( + n_qubits=n_qubits, + depth=depth, + param_prefix=param_prefix, + support=support, + operations=operations, + ) + + ent_list = _entanglers_digital( + n_qubits=n_qubits, + depth=depth, + param_prefix=param_prefix, + support=support, + periodic=periodic, + entangler=entangler, + ) + + layers = [] + for d in range(depth): + layers.append(rot_list[d]) + layers.append(ent_list[d]) + return tag(chain(*layers), "HEA") + + +########### +## sDAQC ## +########### + + +def _entanglers_analog( + depth: int, + param_prefix: str = "theta", + entangler: AbstractBlock | None = None, +) -> list[AbstractBlock]: + return [HamEvo(entangler, param_prefix + f"_t_{d}") for d in range(depth)] + + +def hea_sDAQC( + n_qubits: int, + depth: int = 1, + param_prefix: str = "theta", + operations: list[type[AbstractBlock]] = [RX, RY, RX], + support: tuple[int, ...] = None, + entangler: AbstractBlock | None = None, +) -> AbstractBlock: + """ + Construct the Hardware Efficient Ansatz (HEA) with analog entangling layers + using step-wise digital-analog computation. + + Args: + n_qubits (int): number of qubits in the block. + depth (int): number of layers of the HEA. + param_prefix (str): the base name of the variational parameters + operations (list): list of operations to cycle through in the + digital single-qubit rotations of each layer. + support (tuple): qubit indexes where the HEA is applied. + entangler (AbstractBlock): Hamiltonian generator for the + analog entangling layer. Defaults to global ZZ Hamiltonian. + Time parameter is considered variational. + """ + + # TODO: Add qubit support + if entangler is None: + entangler = hamiltonian_factory(n_qubits, interaction=Interaction.NN) + try: + if not block_is_qubit_hamiltonian(entangler): + raise ValueError( + "Please provide a valid Pauli Hamiltonian generator for digital-analog HEA." + ) + except NotImplementedError: + raise ValueError( + "Please provide a valid Pauli Hamiltonian generator for digital-analog HEA." + ) + + rot_list = _rotations_digital( + n_qubits=n_qubits, + depth=depth, + param_prefix=param_prefix, + support=support, + operations=operations, + ) + + ent_list = _entanglers_analog( + depth=depth, + param_prefix=param_prefix, + entangler=entangler, + ) + + layers = [] + for d in range(depth): + layers.append(rot_list[d]) + layers.append(ent_list[d]) + return tag(chain(*layers), "HEA-sDA") + + +########### +## bDAQC ## +########### + + +def hea_bDAQC(*args: Any, **kwargs: Any) -> Any: + raise NotImplementedError + + +############ +## ANALOG ## +############ + + +def hea_analog(*args: Any, **kwargs: Any) -> Any: + raise NotImplementedError + + +######### +## QNN ## +######### + + +def build_qnn( + n_qubits: int, + n_features: int, + depth: int = None, + ansatz: Optional[AbstractBlock] = None, + fm_pauli: Type[RY] = RY, + spectrum: str = "simple", + basis: str = "fourier", + fm_strategy: str = "parallel", +) -> list[AbstractBlock]: + """Helper function to build a qadence QNN quantum circuit + + Args: + n_qubits (int): The number of qubits. + n_features (int): The number of input dimensions. + depth (int): The depth of the ansatz. + ansatz (Optional[AbstractBlock]): An optional argument to pass a custom qadence ansatz. + fm_pauli (str): The type of Pauli gate for the feature map. Must be one of 'RX', + 'RY', or 'RZ'. + spectrum (str): The desired spectrum of the feature map generator. The options simple, + tower and exponential produce a spectrum with linear, quadratic and exponential + eigenvalues with respect to the number of qubits. + basis (str): The encoding function. The options fourier and chebyshev correspond to Φ(x)=x + and arcos(x) respectively. + fm_strategy (str): The feature map encoding strategy. If "parallel", the features + are encoded in one block of rotation gates, with each feature given + an equal number of qubits. If "serial", the features are encoded + sequentially, with a HEA block between. + + Returns: + A list of Abstract blocks to be used for constructing a quantum circuit + """ + depth = n_qubits if depth is None else depth + + idx_fms = build_idx_fms(basis, fm_pauli, fm_strategy, n_features, n_qubits, spectrum) + + if fm_strategy == "parallel": + _fm = kron(*idx_fms) + fm = tag(_fm, tag="FM") + + elif fm_strategy == "serial": + fm_components: list[AbstractBlock] = [] + for j, fm_idx in enumerate(idx_fms[:-1]): + fm_idx = tag(fm_idx, tag=f"FM{j}") # type: ignore[assignment] + fm_component = (fm_idx, hea(n_qubits, 1, f"theta_{j}")) + fm_components.extend(fm_component) + fm_components.append(tag(idx_fms[-1], tag=f"FM{len(idx_fms) - 1}")) + fm = chain(*fm_components) # type: ignore[assignment] + + ansatz = hea(n_qubits, depth=depth) if ansatz is None else ansatz + return [fm, ansatz] diff --git a/qadence/constructors/daqc/__init__.py b/qadence/constructors/daqc/__init__.py new file mode 100644 index 000000000..09acab7fb --- /dev/null +++ b/qadence/constructors/daqc/__init__.py @@ -0,0 +1,6 @@ +# flake8: noqa + +from .daqc import daqc_transform + +# Modules to be automatically added to the qadence namespace +__all__ = [] # type: ignore diff --git a/qadence/constructors/daqc/daqc.py b/qadence/constructors/daqc/daqc.py new file mode 100644 index 000000000..44b3addfe --- /dev/null +++ b/qadence/constructors/daqc/daqc.py @@ -0,0 +1,249 @@ +from __future__ import annotations + +import torch + +from qadence.blocks import AbstractBlock, add, chain, kron +from qadence.blocks.utils import block_is_qubit_hamiltonian +from qadence.constructors.hamiltonians import hamiltonian_factory +from qadence.logger import get_logger +from qadence.operations import HamEvo, I, N, X +from qadence.types import GenDAQC, Interaction, Strategy + +from .gen_parser import _check_compatibility, _parse_generator +from .utils import _build_matrix_M, _ix_map + +logger = get_logger(__name__) + + +def daqc_transform( + n_qubits: int, + gen_target: AbstractBlock, + t_f: float, + gen_build: AbstractBlock | None = None, + zero_tol: float = 1e-08, + strategy: Strategy = Strategy.SDAQC, + ignore_global_phases: bool = False, +) -> AbstractBlock: + """ + Implements the DAQC transform for representing an arbitrary 2-body Hamiltonian + with another fixed 2-body Hamiltonian. + + Reference for universality of 2-body Hamiltonians: + + -- https://arxiv.org/abs/quant-ph/0106064 + + Based on the transformation for Ising (ZZ) interactions, as described in the paper + + -- https://arxiv.org/abs/1812.03637 + + The transform translates a target weighted generator of the type: + + `gen_target = add(g_jk * kron(op(j), op(k)) for j < k)` + + To a circuit using analog evolutions with a fixed building block generator: + + `gen_build = add(f_jk * kron(op(j), op(k)) for j < k)` + + where `op = Z` or `op = N`. + + Args: + n_qubits: total number of qubits to use. + gen_target: target generator built with the structure above. The type + of the generator will be automatically evaluated when parsing. + t_f (float): total time for the gen_target evolution. + gen_build: fixed generator to act as a building block. Defaults to + constant NN: add(1.0 * kron(N(j), N(k)) for j < k). The type + of the generator will be automatically evaluated when parsing. + zero_tol: default "zero" for a missing interaction. Included for + numerical reasons, see notes below. + strategy: sDAQC or bDAQC, following definitions in the reference paper. + ignore_global_phases: if `True` the transform does not correct the global + phases coming from the mapping between ZZ and NN interactions. + + Notes: + + The paper follows an index convention of running from 1 to N. A few functions + here also use that convention to be consistent with the paper. However, for qadence + related things the indices are converted to [0, N-1]. + + The case for `n_qubits = 4` is an edge case where the sign matrix is not invertible. + There is a workaround for this described in the paper, but it is currently not implemented. + + The current implementation may result in evolution times that are both positive or + negative. In practice, both can be represented by simply changing the signs of the + interactions. However, for a real implementation where the interactions should remain + fixed, the paper discusses a workaround that is not currently implemented. + + The transformation works by representing each interaction in the target hamiltonian by + a set of evolutions using the build hamiltonian. As a consequence, some care must be + taken when choosing the build hamiltonian. Some cases: + + - The target hamiltonian can have any interaction, as long as it is sufficiently + represented in the build hamiltonian. E.g., if the interaction `g_01 * kron(Z(0), Z(1))` + is in the target hamiltonian, the corresponding interaction `f_01 * kron(Z(0), Z(1))` + needs to be in the build hamiltonian. This is checked when the generators are parsed. + + - The build hamiltonian can have any interaction, irrespectively of it being needed + for the target hamiltonian. This is especially useful for designing local operations + through the repeated evolution of a "global" hamiltonian. + + - The parameter `zero_tol` controls what it means for an interaction to be "missing". + Any interaction strength smaller than `zero_tol` in the build hamiltonian will not be + considered, and thus that interaction is missing. + + - The various ratios `g_jk / f_jk` will influence the time parameter for the various + evolution slices, meaning that if there is a big discrepancy in the interaction strength + for a given qubit pair (j, k), the output circuit may require the usage of hamiltonian + evolutions with very large times. + + - A warning will be issued for evolution times larger than `1/sqrt(zero_tol)`. Evolution + times smaller than `zero_tol` will not be represented. + + + Examples: + ```python exec="on" source="material-block" result="json" + from qadence import Z, N, daqc_transform + + n_qubits = 3 + + gen_build = 0.5 * (N(0)@N(1)) + 0.7 * (N(1)@N(2)) + 0.2 * (N(0)@N(2)) + + gen_target = 0.1 * (Z(1)@Z(2)) + + t_f = 2.0 + + transformed_circuit = daqc_transform( + n_qubits = n_qubits, + gen_target = gen_target, + t_f = t_f, + gen_build = gen_build, + ) + ``` + """ + + ################## + # Input controls # + ################## + + if strategy != Strategy.SDAQC: + raise NotImplementedError("Currently only the sDAQC transform is implemented.") + + if n_qubits == 4: + raise NotImplementedError("DAQC transform 4-qubit edge case not implemented.") + + if gen_build is None: + gen_build = hamiltonian_factory(n_qubits, interaction=Interaction.NN) + + try: + if (not block_is_qubit_hamiltonian(gen_target)) or ( + not block_is_qubit_hamiltonian(gen_build) + ): + raise ValueError( + "Generator block is not a qubit Hamiltonian. Only ZZ or NN interactions allowed." + ) + except NotImplementedError: + # Happens when block_is_qubit_hamiltonian is called on something that is not a block. + raise TypeError( + "Generator block is not a qubit Hamiltonian. Only ZZ or NN interactions allowed." + ) + + ##################### + # Generator parsing # + ##################### + + g_jk_target, mat_jk_target, target_type = _parse_generator(n_qubits, gen_target, 0.0) + g_jk_build, mat_jk_build, build_type = _parse_generator(n_qubits, gen_build, zero_tol) + + # Get the global phase hamiltonian and single-qubit detuning hamiltonian + if build_type == GenDAQC.NN: + h_phase_build, h_sq_build = _nn_phase_and_detunings(n_qubits, mat_jk_build) + + if target_type == GenDAQC.NN: + h_phase_target, h_sq_target = _nn_phase_and_detunings(n_qubits, mat_jk_target) + + # Time re-scalings + if build_type == GenDAQC.ZZ and target_type == GenDAQC.NN: + t_star = t_f / 4.0 + elif build_type == GenDAQC.NN and target_type == GenDAQC.ZZ: + t_star = 4.0 * t_f + else: + t_star = t_f + + # Check if target Hamiltonian can be mapped with the build Hamiltonian + assert _check_compatibility(g_jk_target, g_jk_build, zero_tol) + + ################## + # DAQC Transform # + ################## + + # Section III A of https://arxiv.org/abs/1812.03637: + + # Matrix M for the linear system, exemplified in Table I: + matrix_M = _build_matrix_M(n_qubits) + + # Linear system mapping interaction ratios -> evolution times. + t_slices = torch.linalg.solve(matrix_M, g_jk_target / g_jk_build) * t_star + + # ZZ-DAQC with ZZ or NN build Hamiltonian + daqc_slices = [] + for m in range(2, n_qubits + 1): + for n in range(1, m): + alpha = _ix_map(n_qubits, n, m) + t = t_slices[alpha - 1] + if abs(t) > zero_tol: + if abs(t) > (1 / (zero_tol**0.5)): + logger.warning( + """ +Transformed circuit with very long evolution time. +Make sure your target interactions are sufficiently +represented in the build Hamiltonian.""" + ) + x_gates = kron(X(n - 1), X(m - 1)) + analog_evo = HamEvo(gen_build, t) + # TODO: Fix repeated X-gates + if build_type == GenDAQC.NN: + # Local detuning at each DAQC layer for NN build Hamiltonian + sq_detuning_build = HamEvo(h_sq_build, t) + daqc_slices.append(chain(x_gates, sq_detuning_build, analog_evo, x_gates)) + elif build_type == GenDAQC.ZZ: + daqc_slices.append(chain(x_gates, analog_evo, x_gates)) + + daqc_circuit = chain(*daqc_slices) + + ######################## + # Phases and Detunings # + ######################## + + if target_type == GenDAQC.NN: + # Local detuning given a NN target Hamiltonian + sq_detuning_target = HamEvo(h_sq_target, t_f).dagger() + daqc_circuit = chain(sq_detuning_target, daqc_circuit) + + if not ignore_global_phases: + if build_type == GenDAQC.NN: + # Constant global phase given a NN build Hamiltonian + global_phase_build = HamEvo(h_phase_build, t_slices.sum()) + daqc_circuit = chain(global_phase_build, daqc_circuit) + + if target_type == GenDAQC.NN: + # Constant global phase and given a NN target Hamiltonian + global_phase_target = HamEvo(h_phase_target, t_f).dagger() + daqc_circuit = chain(global_phase_target, daqc_circuit) + + return daqc_circuit + + +def _nn_phase_and_detunings( + n_qubits: int, + mat_jk: torch.Tensor, +) -> tuple[torch.Tensor, torch.Tensor]: + # Constant global shift, leads to a global phase + global_shift = mat_jk.sum() / 8 + + # Strength of the local detunings + g_sq = mat_jk.sum(0) / 2 + + h_phase = global_shift * kron(I(i) for i in range(n_qubits)) + h_sq = add(-1.0 * g_sq[i] * N(i) for i in range(n_qubits)) + + return h_phase, h_sq diff --git a/qadence/constructors/daqc/gen_parser.py b/qadence/constructors/daqc/gen_parser.py new file mode 100644 index 000000000..d0258e212 --- /dev/null +++ b/qadence/constructors/daqc/gen_parser.py @@ -0,0 +1,113 @@ +from __future__ import annotations + +import torch + +from qadence.blocks import AbstractBlock, KronBlock +from qadence.blocks.utils import unroll_block_with_scaling +from qadence.logger import get_logger +from qadence.operations import N, Z +from qadence.parameters import Parameter, evaluate +from qadence.types import GenDAQC + +from .utils import _ix_map + +logger = get_logger(__name__) + + +def _parse_generator( + n_qubits: int, + generator: AbstractBlock, + zero_tol: float, +) -> torch.Tensor: + """ + Parses the input generator to extract the `g_jk` weights + of the Ising model and the respective target qubits `(j, k)`. + """ + + flat_size = int(0.5 * n_qubits * (n_qubits - 1)) + g_jk_list = torch.zeros(flat_size) + g_jk_mat = torch.zeros(n_qubits, n_qubits) + + # This parser is heavily dependent on unroll_block_with_scaling + gen_list = unroll_block_with_scaling(generator) + + # Now we wish to check if generator is of the form: + # `add(g_jk * kron(op(j), op(k)) for j < k)` + # and determine if `op = Z` or `op = N` + + gen_type_Z = [] + gen_type_N = [] + + for block, scale in gen_list: + if isinstance(scale, Parameter): + raise TypeError("DAQC transform does not support parameterized Hamiltonians.") + + # First we check if all relevant blocks (with non-negligible scaling) + # are of type(KronBlock), since we only admit kron(Z, Z) or kron(N, N). + if not isinstance(block, KronBlock): + if abs(scale) < zero_tol: + continue + else: + raise TypeError( + "DAQC transform only supports ZZ or NN interaction Hamiltonians." + "Error found on block: {block}." + ) + + # Next we check and keep track of the contents of each KronBlock + for pauli in block.blocks: + if isinstance(pauli, Z): + gen_type_Z.append(True) + gen_type_N.append(False) + elif isinstance(pauli, N): + gen_type_N.append(True) + gen_type_Z.append(False) + else: + raise ValueError( + "DAQC transform only supports ZZ or NN interaction Hamiltonians." + "Error found on block: {block}." + ) + + # We save the qubit support and interaction + # strength of each KronBlock to be used in DAQC + j, k = block.qubit_support + g_jk = torch.tensor(evaluate(scale), dtype=torch.get_default_dtype()) + + beta = _ix_map(n_qubits, j + 1, k + 1) + + # Flat list of interaction strength + g_jk_list[beta - 1] += g_jk + + # Symmetric matrix of interaction strength + g_jk_mat[j, k] += g_jk + g_jk_mat[k, j] += g_jk + + # Finally we check if all individual interaction terms were + # either ZZ or NN to determine the generator type. + if torch.tensor(gen_type_Z).prod() == 1 and len(gen_type_Z) > 0: + gen_type = GenDAQC.ZZ + elif torch.tensor(gen_type_N).prod() == 1 and len(gen_type_N) > 0: + gen_type = GenDAQC.NN + else: + raise ValueError( + "Wrong Hamiltonian structure provided. " + "Possible mixture of Z and N terms in the Hamiltonian." + ) + + g_jk_list[g_jk_list == 0.0] = zero_tol + + return g_jk_list, g_jk_mat, gen_type + + +def _check_compatibility( + g_jk_target: torch.Tensor, + g_jk_build: torch.Tensor, + zero_tol: float, +) -> bool: + """ + Checks if the build Hamiltonian is missing any interactions needed + for the transformation into the requested target Hamiltonian. + """ + for g_t, g_b in zip(g_jk_target, g_jk_build): + if abs(g_t) > zero_tol and abs(g_b) <= zero_tol: + raise ValueError("Incompatible interactions between target and build Hamiltonians.") + return True diff --git a/qadence/constructors/daqc/utils.py b/qadence/constructors/daqc/utils.py new file mode 100644 index 000000000..44e4f13d8 --- /dev/null +++ b/qadence/constructors/daqc/utils.py @@ -0,0 +1,35 @@ +from __future__ import annotations + +import torch + +from qadence.logger import get_logger + +logger = get_logger(__name__) + + +def _k_d(a: int, b: int) -> int: + """Kronecker delta""" + return int(a == b) + + +def _ix_map(n: int, a: int, b: int) -> int: + """Maps `(a, b)` with `b` in [1, n] and `a < b` to range [1, n(n-1)/2]""" + return int(n * (a - 1) - 0.5 * a * (a + 1) + b) + + +def _build_matrix_M(n_qubits: int) -> torch.Tensor: + """Sign matrix used by the DAQC technique for the Ising model.""" + flat_size = int(0.5 * n_qubits * (n_qubits - 1)) + + def matrix_M_ix(j: int, k: int, n: int, m: int) -> float: + return (-1.0) ** (_k_d(n, j) + _k_d(n, k) + _k_d(m, j) + _k_d(m, k)) + + M = torch.zeros(flat_size, flat_size) + for k in range(2, n_qubits + 1): + for j in range(1, k): + for m in range(2, n_qubits + 1): + for n in range(1, m): + alpha = _ix_map(n_qubits, n, m) + beta = _ix_map(n_qubits, j, k) + M[alpha - 1, beta - 1] = matrix_M_ix(j, k, n, m) + return M diff --git a/qadence/constructors/feature_maps.py b/qadence/constructors/feature_maps.py new file mode 100644 index 000000000..b35cee315 --- /dev/null +++ b/qadence/constructors/feature_maps.py @@ -0,0 +1,139 @@ +from __future__ import annotations + +from typing import Type, Union + +import numpy as np +import sympy + +from qadence.blocks import AbstractBlock, KronBlock, chain, kron, tag +from qadence.operations import RX, RY, RZ, H +from qadence.parameters import FeatureParameter, Parameter + +Rotation = Union[RX, RY, RZ] + + +def feature_map( + n_qubits: int, + support: tuple[int, ...] = None, + param: str = "phi", + op: Type[Rotation] = RX, + fm_type: str = "fourier", +) -> KronBlock: + """Construct a feature map of a given type. + + Arguments: + n_qubits: Number of qubits the feature map covers. Results in `support=range(n_qubits)`. + support: Overrides `n_qubits`. Puts one rotation gate on every qubit in `support`. + param: Parameter of the feature map. + op: Rotation operation of the feature map. + fm_type: Determines the additional expression the final feature parameter (the addtional + term in front of `param`). `"fourier": param` (nothing is done to `param`) + `"chebyshev": 2*acos(param)`, `"tower": (i+1)*2*acos(param)` (where `i` is the qubit + index). + + Example: + ```python exec="on" source="material-block" result="json" + from qadence import feature_map + + fm = feature_map(3, fm_type="fourier") + print(f"{fm = }") + + fm = feature_map(3, fm_type="chebyshev") + print(f"{fm = }") + + fm = feature_map(3, fm_type="tower") + print(f"{fm = }") + ``` + """ + fparam = FeatureParameter(param) + if support is None: + support = tuple(range(n_qubits)) + + assert len(support) <= n_qubits, "Wrong qubit support supplied" + + if fm_type == "fourier": + fm = kron(*[op(qubit, fparam) for qubit in support]) + elif fm_type == "chebyshev": + fm = kron(*[op(qubit, 2 * sympy.acos(fparam)) for qubit in support]) + elif fm_type == "tower": + fm = kron(*[op(qubit, (i + 1) * 2 * sympy.acos(fparam)) for i, qubit in enumerate(support)]) + else: + raise NotImplementedError(f"Feature map {fm_type} not implemented") + fm.tag = "FM" + return fm + + +def fourier_feature_map( + n_qubits: int, support: tuple[int, ...] = None, param: str = "phi", op: Type[Rotation] = RX +) -> AbstractBlock: + """Construct a Fourier feature map + + Args: + n_qubits: number of qubits across which the FM is created + param: The base name for the feature `Parameter` + """ + fm = feature_map(n_qubits, support=support, param=param, op=op, fm_type="fourier") + return tag(fm, tag="FourierFM") + + +def chebyshev_feature_map( + n_qubits: int, support: tuple[int, ...] = None, param: str = "phi", op: Type[Rotation] = RX +) -> AbstractBlock: + """Construct a Chebyshev feature map + + Args: + n_qubits: number of qubits across which the FM is created + support (Iterable[int]): The qubit support + param: The base name for the feature `Parameter` + """ + fm = feature_map(n_qubits, support=support, param=param, op=op, fm_type="chebyshev") + return tag(fm, tag="ChebyshevFM") + + +def tower_feature_map( + n_qubits: int, support: tuple[int, ...] = None, param: str = "phi", op: Type[Rotation] = RX +) -> AbstractBlock: + """Construct a Chebyshev tower feature map + + Args: + n_qubits: number of qubits across which the FM is created + param: The base name for the feature `Parameter` + """ + fm = feature_map(n_qubits, support=support, param=param, op=op, fm_type="tower") + return tag(fm, tag="TowerFM") + + +def exp_fourier_feature_map( + n_qubits: int, + support: tuple[int, ...] = None, + param: str = "x", + feature_range: tuple[float, float] = None, +) -> AbstractBlock: + """ + Exponential fourier feature map. + + Args: + n_qubits: number of qubits in the feature + support: qubit support + param: name of feature `Parameter` + feature_range: min and max value of the feature, as floats in a Tuple + """ + + if feature_range is None: + feature_range = (0.0, 2.0**n_qubits) + + if support is None: + support = tuple(range(n_qubits)) + + xmax = max(feature_range) + xmin = min(feature_range) + + x = Parameter(param, trainable=False) + + # The feature map works on the range of 0 to 2**n + x_rescaled = 2 * np.pi * (x - xmin) / (xmax - xmin) + + hlayer = kron(H(qubit) for qubit in support) + rlayer = kron(RZ(support[i], x_rescaled * (2**i)) for i in range(n_qubits)) + + return tag(chain(hlayer, rlayer), f"ExpFourierFM({param})") diff --git a/qadence/constructors/hamiltonians.py b/qadence/constructors/hamiltonians.py new file mode 100644 index 000000000..8577affb0 --- /dev/null +++ b/qadence/constructors/hamiltonians.py @@ -0,0 +1,286 @@ +from __future__ import annotations + +import warnings +from typing import List, Tuple, Type, Union + +import numpy as np +import torch + +from qadence.blocks import AbstractBlock, add +from qadence.logger import get_logger +from qadence.operations import N, X, Y, Z +from qadence.register import Register +from qadence.types import Interaction, TArray + +logger = get_logger(__name__) + + +def interaction_zz(i: int, j: int) -> AbstractBlock: + """Ising ZZ interaction.""" + return Z(i) @ Z(j) + + +def interaction_nn(i: int, j: int) -> AbstractBlock: + """Ising NN interaction.""" + return N(i) @ N(j) + + +def interaction_xy(i: int, j: int) -> AbstractBlock: + """XY interaction.""" + return X(i) @ X(j) + Y(i) @ Y(j) + + +def interaction_xyz(i: int, j: int) -> AbstractBlock: + """Heisenberg XYZ interaction.""" + return X(i) @ X(j) + Y(i) @ Y(j) + Z(i) @ Z(j) + + +INTERACTION_DICT = { + Interaction.ZZ: interaction_zz, + Interaction.NN: interaction_nn, + Interaction.XY: interaction_xy, + Interaction.XYZ: interaction_xyz, +} + + +ARRAYS = (list, np.ndarray, torch.Tensor) + +DETUNINGS = (N, X, Y, Z) + +TDetuning = Union[Type[N], Type[X], Type[Y], Type[Z]] + + +def hamiltonian_factory( + register: Register | int, + interaction: Interaction | None = None, + detuning: TDetuning | None = None, + interaction_strength: TArray | str | None = None, + detuning_strength: TArray | str | None = None, + random_strength: bool = False, + force_update: bool = False, +) -> AbstractBlock: + """ + General Hamiltonian creation function. Can be used to create Hamiltonians with 2-qubit + interactions and single-qubit detunings, both with arbitrary strength or parameterized. + + Arguments: + register: register of qubits with a specific graph topology, or number of qubits. + When passing a number of qubits a register with all-to-all connectivity + is created. + interaction: Interaction.ZZ, Interaction.NN, Interaction.XY, or Interacton.XYZ. + detuning: single-qubit operator N, X, Y, or Z. + interaction_strength: list of values to be used as the interaction strength for each + pair of qubits. Should be ordered following the order of `Register(n_qubits).edges`. + Alternatively, some string "x" can be passed, which will create a parameterized + interactions for each pair of qubits, each labelled as `"x_ij"`. + detuning_strength: list of values to be used as the detuning strength for each qubit. + Alternatively, some string "x" can be passed, which will create a parameterized + detuning for each qubit, each labelled as `"x_i"`. + random_strength: set random interaction and detuning strengths between -1 and 1. + force_update: force override register detuning and interaction strengths. + + Examples: + ```python exec="on" source="material-block" result="json" + from qadence import hamiltonian_factory, Interaction, Register, Z + + n_qubits = 3 + + # Constant total magnetization observable: + observable = hamiltonian_factory(n_qubits, detuning = Z) + + # Parameterized total magnetization observable: + observable = hamiltonian_factory(n_qubits, detuning = Z, detuning_strength = "z") + + # Random all-to-all XY Hamiltonian generator: + generator = hamiltonian_factory( + n_qubits, + interaction = Interaction.XY, + random_strength = True, + ) + + # Parameterized NN Hamiltonian generator with a square grid interaction topology: + register = Register.square(qubits_side = n_qubits) + generator = hamiltonian_factory( + register, + interaction = Interaction.NN, + interaction_strength = "theta" + ) + ``` + """ + + if interaction is None and detuning is None: + raise ValueError("Please provide an interaction and/or detuning for the Hamiltonian.") + + # If number of qubits is given, creates all-to-all register + register = Register(register) if isinstance(register, int) else register + + # Get interaction function + try: + int_fn = INTERACTION_DICT[interaction] # type: ignore [index] + except (KeyError, ValueError) as error: + if interaction is None: + pass + else: + raise KeyError(f"Interaction {interaction} not supported.") + + # Check single-qubit detuning + if (detuning is not None) and (detuning not in DETUNINGS): + raise TypeError(f"Detuning of type {type(detuning)} not supported.") + + # Pre-process detuning and interaction strengths and update register + has_detuning_strength, detuning_strength = _preprocess_strengths( + register, detuning_strength, "nodes", force_update, random_strength + ) + has_interaction_strength, interaction_strength = _preprocess_strengths( + register, interaction_strength, "edges", force_update, random_strength + ) + + if (not has_detuning_strength) or force_update: + register = _update_detuning_strength(register, detuning_strength) + + if (not has_interaction_strength) or force_update: + register = _update_interaction_strength(register, interaction_strength) + + # Create single-qubit detunings: + single_qubit_terms: List[AbstractBlock] = [] + if detuning is not None: + for node in register.nodes: + block_sq = detuning(node) # type: ignore [operator] + strength_sq = register.nodes[node]["strength"] + single_qubit_terms.append(strength_sq * block_sq) + + # Create two-qubit interactions: + two_qubit_terms: List[AbstractBlock] = [] + if interaction is not None: + for edge in register.edges: + block_tq = int_fn(*edge) # type: ignore [operator] + strength_tq = register.edges[edge]["strength"] + two_qubit_terms.append(strength_tq * block_tq) + + return add(*single_qubit_terms, *two_qubit_terms) + + +def _preprocess_strengths( + register: Register, + strength: TArray | str | None, + nodes_or_edges: str, + force_update: bool, + random_strength: bool, +) -> Tuple[bool, Union[TArray | str]]: + data = getattr(register, nodes_or_edges) + + # Useful for error messages: + strength_target = "detuning" if nodes_or_edges == "nodes" else "interaction" + + # First we check if strength values already exist in the register + has_strength = any(["strength" in data[i] for i in data]) + if has_strength and not force_update: + if strength is not None: + logger.warning( + "Register already includes " + strength_target + " strengths. " + "Skipping update. Use `force_update = True` to override them." + ) + # Next we process the strength given in the input arguments + if strength is None: + if random_strength: + strength = 2 * torch.rand(len(data), dtype=torch.double) - 1 + else: + # None defaults to constant = 1.0 + strength = torch.ones(len(data), dtype=torch.double) + elif isinstance(strength, ARRAYS): + # If array is given, checks it has the correct length + if len(strength) != len(data): + message = "Array of " + strength_target + " strengths has incorrect size." + raise ValueError(message) + elif isinstance(strength, str): + # Any string will be used as a prefix to variational parameters + pass + else: + # If not of the accepted types ARRAYS or str, we error out + raise TypeError( + "Incorrect " + strength_target + f" strength type {type(strength)}. " + "Please provide an array of strength values, or a string for " + "parameterized " + strength_target + "s." + ) + + return has_strength, strength + + +def _update_detuning_strength(register: Register, detuning_strength: TArray | str) -> Register: + for node in register.nodes: + if isinstance(detuning_strength, str): + register.nodes[node]["strength"] = detuning_strength + f"_{node}" + elif isinstance(detuning_strength, ARRAYS): + register.nodes[node]["strength"] = detuning_strength[node] + return register + + +def _update_interaction_strength( + register: Register, interaction_strength: TArray | str +) -> Register: + for idx, edge in enumerate(register.edges): + if isinstance(interaction_strength, str): + register.edges[edge]["strength"] = interaction_strength + f"_{edge[0]}{edge[1]}" + elif isinstance(interaction_strength, ARRAYS): + register.edges[edge]["strength"] = interaction_strength[idx] + return register + + +# FIXME: Previous hamiltonian / observable functions, now refactored, to be deprecated: + +DEPRECATION_MESSAGE = "This function will be removed in the future. " + + +def single_z(qubit: int = 0, z_coefficient: float = 1.0) -> AbstractBlock: + message = DEPRECATION_MESSAGE + "Please use `z_coefficient * Z(qubit)` directly." + warnings.warn(message, FutureWarning) + return Z(qubit) * z_coefficient + + +def total_magnetization(n_qubits: int, z_terms: np.ndarray | list | None = None) -> AbstractBlock: + message = ( + DEPRECATION_MESSAGE + + "Please use `hamiltonian_factory(n_qubits, detuning=Z, node_coeff=z_terms)`." + ) + warnings.warn(message, FutureWarning) + return hamiltonian_factory(n_qubits, detuning=Z, detuning_strength=z_terms) + + +def zz_hamiltonian( + n_qubits: int, + z_terms: np.ndarray | None = None, + zz_terms: np.ndarray | None = None, +) -> AbstractBlock: + message = ( + DEPRECATION_MESSAGE + + """ +Please use `hamiltonian_factory(n_qubits, Interaction.ZZ, Z, interaction_strength, z_terms)`. \ +Note that the argument `zz_terms` in this function is a 2D array of size `(n_qubits, n_qubits)`, \ +while `interaction_strength` is expected as a 1D array of size `0.5 * n_qubits * (n_qubits - 1)`.""" + ) + warnings.warn(message, FutureWarning) + if zz_terms is not None: + register = Register(n_qubits) + interaction_strength = [zz_terms[edge[0], edge[1]] for edge in register.edges] + else: + interaction_strength = None + + return hamiltonian_factory(n_qubits, Interaction.ZZ, Z, interaction_strength, z_terms) + + +def ising_hamiltonian( + n_qubits: int, + x_terms: np.ndarray | None = None, + z_terms: np.ndarray | None = None, + zz_terms: np.ndarray | None = None, +) -> AbstractBlock: + message = ( + DEPRECATION_MESSAGE + + """ +You can build a general transverse field ising model with the `hamiltonian_factory` function. \ +Check the hamiltonian construction tutorial in the documentation for more information.""" + ) + warnings.warn(message, FutureWarning) + zz_ham = zz_hamiltonian(n_qubits, z_terms=z_terms, zz_terms=zz_terms) + x_ham = hamiltonian_factory(n_qubits, detuning=X, detuning_strength=x_terms) + return zz_ham + x_ham diff --git a/qadence/constructors/qft.py b/qadence/constructors/qft.py new file mode 100644 index 000000000..5604e223c --- /dev/null +++ b/qadence/constructors/qft.py @@ -0,0 +1,246 @@ +from __future__ import annotations + +from typing import Any + +import torch + +from qadence.blocks import AbstractBlock, add, chain, kron, tag +from qadence.operations import CPHASE, SWAP, H, HamEvo, I, Z +from qadence.types import Strategy + +from .daqc import daqc_transform + + +def qft( + n_qubits: int, + support: tuple[int, ...] = None, + inverse: bool = False, + reverse_in: bool = False, + swaps_out: bool = False, + strategy: Strategy = Strategy.DIGITAL, + gen_build: AbstractBlock | None = None, +) -> AbstractBlock: + """ + The Quantum Fourier Transform + + Depending on the application, user should be careful with qubit ordering + in the input and output. This can be controlled with reverse_in and swaps_out + arguments. + + Args: + n_qubits: number of qubits in the QFT + support: qubit support to use + inverse: True performs the inverse QFT + reverse_in: Reverses the input qubits to account for endianness + swaps_out: Performs swaps on the output qubits to match the "textbook" QFT. + strategy: Strategy.Digital or Strategy.sDAQC + gen_build: building block Ising Hamiltonian for the DAQC transform. + Defaults to constant all-to-all Ising. + + Examples: + ```python exec="on" source="material-block" result="json" + from qadence import qft + + n_qubits = 3 + + qft_circuit = qft(n_qubits, strategy = "sDAQC") + ``` + """ + + if support is None: + support = tuple(range(n_qubits)) + + assert len(support) <= n_qubits, "Wrong qubit support supplied" + + if reverse_in: + support = support[::-1] + + qft_layer_dict = { + Strategy.DIGITAL: _qft_layer_digital, + Strategy.SDAQC: _qft_layer_sDAQC, + Strategy.BDAQC: _qft_layer_bDAQC, + Strategy.ANALOG: _qft_layer_analog, + } + + try: + layer_func = qft_layer_dict[strategy] + except KeyError: + raise KeyError(f"Strategy {strategy} not recognized.") + + qft_layers = reversed(range(n_qubits)) if inverse else range(n_qubits) + + qft_circ = chain( + layer_func( + n_qubits=n_qubits, support=support, layer=layer, inverse=inverse, gen_build=gen_build + ) # type: ignore + for layer in qft_layers + ) + + if swaps_out: + swap_ops = [SWAP(support[i], support[n_qubits - i - 1]) for i in range(n_qubits // 2)] + qft_circ = chain(*swap_ops, qft_circ) if inverse else chain(qft_circ, *swap_ops) + + return tag(qft_circ, tag="iQFT") if inverse else tag(qft_circ, tag="QFT") + + +######################## +# STANDARD DIGITAL QFT # +######################## + + +def _qft_layer_digital( + n_qubits: int, + support: tuple[int, ...], + layer: int, + inverse: bool, + gen_build: AbstractBlock | None = None, +) -> AbstractBlock: + """ + Applies the Hadamard gate followed by CPHASE gates + corresponding to one layer of the QFT. + """ + qubit_range_layer = ( + reversed(range(layer + 1, n_qubits)) if inverse else range(layer + 1, n_qubits) + ) + rots = [] + for j in qubit_range_layer: + angle = torch.tensor( + ((-1) ** inverse) * 2 * torch.pi / (2 ** (j - layer + 1)), dtype=torch.cdouble + ) + rots.append(CPHASE(support[j], support[layer], angle)) # type: ignore + if inverse: + return chain(*rots, H(support[layer])) # type: ignore + return chain(H(support[layer]), *rots) # type: ignore + + +######################################## +# DIGITAL-ANALOG QFT (with sDAQC) # +# [1] https://arxiv.org/abs/1906.07635 # +######################################## + + +def _theta(k: int) -> float: + """Eq. (16) from [1]""" + return float(torch.pi / (2 ** (k + 1))) + + +def _alpha(c: int, m: int, k: int) -> float: + """Eq. (16) from [1]""" + if c == m: + return float(torch.pi / (2 ** (k - m + 2))) + else: + return 0.0 + + +def _sqg_gen(n_qubits: int, support: tuple[int, ...], m: int, inverse: bool) -> list[AbstractBlock]: + """ + Eq. (13) from [1] + + Creates the generator corresponding to single-qubit rotations coming + out of the CPHASE decomposition. The paper also includes the generator + for the Hadamard of each layer here, but we left it explicit at + the start of each layer. + """ + k_sqg_list = reversed(range(2, n_qubits - m + 2)) if inverse else range(2, n_qubits - m + 2) + + sqg_gen_list = [] + for k in k_sqg_list: + sqg_gen = ( + kron(I(support[j]) for j in range(n_qubits)) - Z(support[k + m - 2]) - Z(support[m - 1]) + ) + sqg_gen_list.append(_theta(k) * sqg_gen) + + return sqg_gen_list + + +def _tqg_gen(n_qubits: int, support: tuple[int, ...], m: int, inverse: bool) -> list[AbstractBlock]: + """ + Eq. (14) from [1] + + Creates the generator corresponding to the two-qubit ZZ + interactions coming out of the CPHASE decomposition. + """ + k_tqg_list = reversed(range(2, n_qubits + 1)) if inverse else range(2, n_qubits + 1) + + tqg_gen_list = [] + for k in k_tqg_list: + for c in range(1, k): + tqg_gen = kron(Z(support[c - 1]), Z(support[k - 1])) + tqg_gen_list.append(_alpha(c, m, k) * tqg_gen) + + return tqg_gen_list + + +def _qft_layer_sDAQC( + n_qubits: int, + support: tuple[int, ...], + layer: int, + inverse: bool, + gen_build: AbstractBlock, +) -> AbstractBlock: + """ + QFT Layer using the sDAQC technique following the paper: + + -- [1] https://arxiv.org/abs/1906.07635 + + 4 - qubit edge case is not implemented. + + Note: the paper follows an index convention of running from 1 to N. A few functions + here also use that convention to be consistent with the paper. However, for qadence + related things the indices are converted to [0, N-1]. + """ + + # TODO: Properly check and include support for changing qubit support + allowed_support = tuple(range(n_qubits)) + if support != allowed_support and support != allowed_support[::-1]: + raise NotImplementedError("Changing support for DigitalAnalog QFT not yet supported.") + + m = layer + 1 # Paper index convention + + # Generator for the single-qubit rotations contributing to the CPHASE gate + sqg_gen_list = _sqg_gen(n_qubits=n_qubits, support=support, m=m, inverse=inverse) + + # Ising model representing the CPHASE gates two-qubit interactions + tqg_gen_list = _tqg_gen(n_qubits=n_qubits, support=support, m=m, inverse=inverse) + + if len(sqg_gen_list) > 0: + # Single-qubit rotations (leaving the Hadamard explicit) + sq_gate = chain(H(support[m - 1]), HamEvo(add(*sqg_gen_list), -1.0)) + + # Two-qubit interaction in the CPHASE converted with sDAQC + gen_cphases = add(*tqg_gen_list) + transformed_daqc_circuit = daqc_transform( + n_qubits=n_qubits, + gen_target=gen_cphases, + t_f=-1.0, + gen_build=gen_build, + ) + + layer_circ = chain( + sq_gate, + transformed_daqc_circuit, + ) + if inverse: + return layer_circ.dagger() + return layer_circ # type: ignore + else: + return chain(H(support[m - 1])) # type: ignore + + +######################################## +# DIGITAL-ANALOG QFT (with bDAQC) # +# [1] https://arxiv.org/abs/1906.07635 # +######################################## + + +def _qft_layer_bDAQC(*args: Any, **kwargs: Any) -> Any: + raise NotImplementedError + + +############ +## ANALOG ## +############ + + +def _qft_layer_analog(*args: Any, **kwargs: Any) -> Any: + raise NotImplementedError diff --git a/qadence/constructors/utils.py b/qadence/constructors/utils.py new file mode 100644 index 000000000..e1a011961 --- /dev/null +++ b/qadence/constructors/utils.py @@ -0,0 +1,95 @@ +from __future__ import annotations + +from typing import Iterable, Type + +import numpy as np +import sympy + +from qadence.blocks import KronBlock, kron +from qadence.operations import RY +from qadence.parameters import FeatureParameter, Parameter + + +def generator_prefactor(spectrum: str, qubit_index: int) -> float | int: + """ + Converts a spectrum string (e.g., tower or exponential) to the correct generator prefactor. + """ + spectrum = spectrum.lower() + conversion_dict: dict[str, float | int] = { + "simple": 1, + "tower": qubit_index + 1, + "exponential": 2 * np.pi / (2 ** (qubit_index + 1)), + } + return conversion_dict[spectrum] + + +def basis_func(basis: str, x: Parameter) -> Parameter | sympy.Expr: + basis = basis.lower() + conversion_dict: dict[str, Parameter | sympy.Expr] = { + "fourier": x, + "chebyshev": 2 * sympy.acos(x), + } + return conversion_dict[basis] + + +def build_idx_fms( + basis: str, + fm_pauli: Type[RY], + fm_strategy: str, + n_features: int, + n_qubits: int, + spectrum: str, +) -> list[KronBlock]: + """Builds the index feature maps based on the given parameters. + + Args: + basis (str): Type of basis chosen for the feature map. + fm_pauli (PrimitiveBlock type): The chosen Pauli rotation type. + fm_strategy (str): The feature map strategy to be used. Possible values are + 'parallel' or 'serial'. + n_features (int): The number of features. + n_qubits (int): The number of qubits. + spectrum (str): The chosen spectrum. + + Returns: + List[KronBlock]: The list of index feature maps. + """ + idx_fms = [] + for i in range(n_features): + target_qubits = get_fm_qubits(fm_strategy, i, n_qubits, n_features) + param = FeatureParameter(f"x{i}") + block = kron( + *[ + fm_pauli(qubit, generator_prefactor(spectrum, j) * basis_func(basis, param)) + for j, qubit in enumerate(target_qubits) + ] + ) + idx_fm = block + idx_fms.append(idx_fm) + return idx_fms + + +def get_fm_qubits(fm_strategy: str, i: int, n_qubits: int, n_features: int) -> Iterable: + """Returns the list of target qubits for the given feature map strategy and feature index + + Args: + fm_strategy (str): The feature map strategy to be used. Possible values + are 'parallel' or 'serial'. + i (int): The feature index. + n_qubits (int): The number of qubits. + n_features (int): The number of features. + + Returns: + List[int]: The list of target qubits. + + Raises: + ValueError: If the feature map strategy is not implemented. + """ + if fm_strategy == "parallel": + n_qubits_per_feature = int(n_qubits / n_features) + target_qubits = range(i * n_qubits_per_feature, (i + 1) * n_qubits_per_feature) + elif fm_strategy == "serial": + target_qubits = range(0, n_qubits) + else: + raise ValueError(f"Feature map strategy {fm_strategy} not implemented.") + return target_qubits diff --git a/qadence/decompose.py b/qadence/decompose.py new file mode 100644 index 000000000..01f2fc753 --- /dev/null +++ b/qadence/decompose.py @@ -0,0 +1,145 @@ +from __future__ import annotations + +import itertools +from enum import Enum +from typing import Any, List, Tuple, Union + +import sympy + +from qadence.blocks import AbstractBlock +from qadence.blocks.utils import get_pauli_blocks, unroll_block_with_scaling +from qadence.logger import get_logger +from qadence.parameters import Parameter, evaluate + +# from qadence.types import TNumber, TParameter +from qadence.types import PI +from qadence.types import LTSOrder as Order + +logger = get_logger(__name__) + +# flatten a doubly-nested list +flatten = lambda a: list(itertools.chain(*a)) # noqa: E731 + + +def change_to_z_basis(block: AbstractBlock, sign: int) -> list[AbstractBlock]: + """A simple function to do basis transformation of blocks of X and Y + + This needs to be generalized beyond 2 terms""" + + # import here due to circular import issue + from qadence.operations import RX, H, X, Y + + qubit = block.qubit_support[0] + + if isinstance(block, X): + return [H(target=qubit)] + + elif isinstance(block, Y): + return [RX(parameter=sign * PI / 2.0, target=qubit)] + + return [] + + +def time_discretisation( + parameter: Parameter, max_steps: int = 10 +) -> Tuple[Union[float, complex, Any], int]: + """checks and returns a numerically stable + time step that is used in the product formula + """ + # the approximation gives better results for t -> 0 + # the delta t needs to be numerically stable + # ! constant time steps + # ! the more steps, the more computationally expensive circuit + # https://arxiv.org/pdf/1403.3469.pdf + + # check the time and log warning on duration if needed + time = evaluate(parameter) + + if (time / max_steps) > 1e-3: # type: ignore + logger.warning( + """Please consider running the H evolution for + a shorter time to get a better approximation.""" + ) + + t_delta = parameter / max_steps # ! check numerical stability + return t_delta, max_steps + + +def lie_trotter_suzuki( + block: AbstractBlock | List, parameter: Parameter, order: Enum = Order.BASIC +) -> list[AbstractBlock]: + # get the block and transform it to a list of blocks + # do the correct decomposition + # return a list of blocks + + if not isinstance(block, list): + block_list = unroll_block_with_scaling(block) + else: # recursive for 4th order + block_list = block + + if order == Order.BASIC: # Lie-Trotter 1st order + return decompose_pauli_exp(block_list, parameter) + + else: # Suzuki-Trotter 2nd and 4th order + # ! handle time properly, break up into small time steps + # get a useful numerically stable time step + + t_delta, t_steps = time_discretisation(parameter) + + if order == Order.ST2: # Suzuki-Trotter 2nd order + outer = decompose_pauli_exp(block_list[:-1], Parameter(t_delta / 2.0)) + inner = decompose_pauli_exp([block_list[-1]], Parameter(t_delta)) + return (outer + inner + list(reversed(outer))) * t_steps + + else: # Suzuki-Trotter 4th order + p2 = (4 - 4 ** (1 / 3)) ** -1 # minimises the 'ideal' error in the recursive formula + outer = lie_trotter_suzuki(block_list, Parameter((t_delta * p2)), order=Order.ST2) + inner = lie_trotter_suzuki( + block_list, Parameter((1 - 4 * p2) * t_delta), order=Order.ST2 + ) + return (2 * outer + inner + outer * 2) * t_steps + + +def decompose_pauli_exp(block_list: list, parameter: Parameter | sympy.Expr) -> list[AbstractBlock]: + """A simple function to do decompositions of Pauli exponential operators into digital gates""" + + # import here due to circular import issue + from qadence.operations import CNOT, RZ + + blocks = [] + + for bl, scale in block_list: + # extract Pauli operations and raise an error in case + # a non-Pauli operation is found since it cannot be + # decomposed + n_blocks = len(get_pauli_blocks(bl)) + + # ensure that we keep the parameter as trainable + fact = 2.0 * parameter * scale + + blist: list[AbstractBlock] = bl if n_blocks >= 2 else [bl] # type: ignore[assignment] + indx = [b.qubit_support[0] for b in blist] + ztarget = max(indx) + + cnot_sequence = [CNOT(i, i + 1) for i in range(min(indx), max(indx))] + basis_fwd = [change_to_z_basis(blist[i], 1) for i in range(len(blist))] + rot = [RZ(parameter=Parameter(fact), target=ztarget)] + basis_bkd = [change_to_z_basis(blist[i], -1) for i in range(len(blist) - 1, -1, -1)] + + # NOTE + # perform the following operations in sequence to perform the decomposition of a + # polynomial Pauli term, for more details, see: https://arxiv.org/abs/1001.3855 + # - change to Z basis for all the needed qubit operators + # - apply a CNOT ladder on the full qubit support where operators are acting + # - apply a RZ rotation on the last qubit + # - apply the reverse CNOT ladder + # - go back to the original basis + blocks.extend( + flatten(basis_fwd) + + cnot_sequence + + rot + + list(reversed(cnot_sequence)) + + flatten(basis_bkd) + ) + + return blocks diff --git a/qadence/divergences.py b/qadence/divergences.py new file mode 100644 index 000000000..95bc24d72 --- /dev/null +++ b/qadence/divergences.py @@ -0,0 +1,37 @@ +from __future__ import annotations + +from collections import Counter + +import numpy as np + + +def shannon_entropy(counter: Counter) -> float: + return float(-np.sum([count * np.log(count) for count in counter.values()])) + + +def js_divergence(counter_p: Counter, counter_q: Counter) -> float: + """ + Compute the Jensen-Shannon divergence between two probability distributions + represented as Counter objects. + The JSD is calculated using only the shared keys between the two input Counter objects. + + Args: + counter_p (Counter): Counter of bitstring counts for probability mass function P. + counter_q (Counter): Counter of bitstring counts for probability mass function Q. + + Returns: + float: The Jensen-Shannon divergence between counter_p and counter_q. + """ + # Normalise counters + normalisation_p = np.sum([count for count in counter_p.values()]) + normalisation_q = np.sum([count for count in counter_q.values()]) + counter_p = Counter({k: v / normalisation_p for k, v in counter_p.items()}) + counter_q = Counter({k: v / normalisation_q for k, v in counter_q.items()}) + + average_proba_counter = counter_p + counter_q + average_proba_counter = Counter({k: v / 2.0 for k, v in average_proba_counter.items()}) + average_entropy = shannon_entropy(average_proba_counter) + + entropy_p = shannon_entropy(counter_p) + entropy_q = shannon_entropy(counter_q) + return float(average_entropy - (entropy_p + entropy_q) / 2.0) diff --git a/qadence/draw/__init__.py b/qadence/draw/__init__.py new file mode 100644 index 000000000..4378798fe --- /dev/null +++ b/qadence/draw/__init__.py @@ -0,0 +1,29 @@ +from __future__ import annotations + +import io +from typing import Any + +from graphviz import Graph + +from .themes import Dark, Light +from .utils import make_diagram +from .vizbackend import QuantumCircuitDiagram + + +def display(x: Any, *args: Any, **kwargs: Any) -> Graph: + return make_diagram(x, *args, **kwargs).show() + + +def savefig(x: Any, filename: str, *args: Any, **kwargs: Any) -> None: + make_diagram(x, *args, **kwargs).savefig(filename) + + +def html_string(x: Any, *args: Any, **kwargs: Any) -> str: + buffer = io.StringIO() + + qcd = make_diagram(x, *args, **kwargs) + qcd._build() + + buffer.write(qcd.graph.pipe(format="svg").decode("utf-8")) + buffer.seek(0) + return buffer.read() diff --git a/qadence/draw/assets/dark/measurement.png b/qadence/draw/assets/dark/measurement.png new file mode 100644 index 000000000..0f6ba0730 Binary files /dev/null and b/qadence/draw/assets/dark/measurement.png differ diff --git a/qadence/draw/assets/dark/measurement.svg b/qadence/draw/assets/dark/measurement.svg new file mode 100644 index 000000000..070cc66eb --- /dev/null +++ b/qadence/draw/assets/dark/measurement.svg @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/qadence/draw/assets/light/measurement.png b/qadence/draw/assets/light/measurement.png new file mode 100644 index 000000000..bb63896f9 Binary files /dev/null and b/qadence/draw/assets/light/measurement.png differ diff --git a/qadence/draw/assets/light/measurement.svg b/qadence/draw/assets/light/measurement.svg new file mode 100644 index 000000000..03637e87b --- /dev/null +++ b/qadence/draw/assets/light/measurement.svg @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/qadence/draw/themes.py b/qadence/draw/themes.py new file mode 100644 index 000000000..c5462435e --- /dev/null +++ b/qadence/draw/themes.py @@ -0,0 +1,160 @@ +from __future__ import annotations + +import os +from pathlib import Path +from typing import Dict + + +class BaseTheme: + name = "" + background_color = "" + color = "" + fontname = "JetBrains Mono" + fontsize = "20" + primitive_node: dict[str, str] = {} + fixed_parametric_node: dict[str, str] = {} + feature_parametric_node: dict[str, str] = {} + variational_parametric_node: dict[str, str] = {} + hamevo_cluster: dict[str, str] = {} + add_cluster: dict[str, str] = {} + scale_cluster: dict[str, str] = {} + + @classmethod + def get_graph_attr(self) -> Dict[str, str]: + return { + "bgcolor": self.background_color, + "nodesep": "0.15", # This defines the distance between wires + } + + @classmethod + def get_node_attr(self) -> Dict[str, str]: + return { + "color": self.color, + "fontcolor": self.color, + "fontname": self.fontname, + "fontsize": self.fontsize, + "width": "0.8", + "height": "0.8", + "style": "filled,rounded", + "shape": "box", + "penwidth": "2.7", + "margin": "0.3,0.1", + } + + @classmethod + def get_edge_attr(self) -> Dict[str, str]: + return {"color": self.color, "penwidth": "2.7"} + + @classmethod + def get_cluster_attr(self) -> Dict[str, str]: + return { + "color": self.color, + "fontcolor": self.color, + "fontname": self.fontname, + "fontsize": str(int(int(self.fontsize) * 2 / 3)), + "labelloc": "t", + "style": "rounded", + "penwidth": "2.0", + } + + @classmethod + def get_primitive_node_attr(self) -> Dict[str, str]: + attrs = self.get_node_attr() + attrs.update(self.primitive_node) + return attrs + + @classmethod + def get_fixed_parametric_node_attr(self) -> Dict[str, str]: + attrs = self.get_node_attr() + attrs.update(self.fixed_parametric_node) + return attrs + + @classmethod + def get_feature_parametric_node_attr(self) -> Dict[str, str]: + attrs = self.get_node_attr() + attrs.update(self.feature_parametric_node) + return attrs + + @classmethod + def get_variational_parametric_node_attr(self) -> Dict[str, str]: + attrs = self.get_node_attr() + attrs.update(self.variational_parametric_node) + return attrs + + @classmethod + def get_add_cluster_attr(self) -> Dict[str, str]: + attrs = self.get_cluster_attr() + attrs.update(self.add_cluster) + attrs["bgcolor"] = attrs["fillcolor"] + return attrs + + @classmethod + def get_scale_cluster_attr(self) -> Dict[str, str]: + attrs = self.get_cluster_attr() + attrs.update(self.scale_cluster) + attrs["bgcolor"] = attrs["fillcolor"] + return attrs + + @classmethod + def get_hamevo_cluster_attr(self) -> Dict[str, str]: + attrs = self.get_cluster_attr() + attrs.update(self.hamevo_cluster) + attrs["bgcolor"] = attrs["fillcolor"] + return attrs + + @classmethod + def load_measurement_icon(self) -> str: + basedir = Path(os.path.abspath(os.path.dirname(__file__))) + return os.path.join(basedir, "assets", self.name, "measurement.svg") + + +class Dark(BaseTheme): + name = "dark" + background_color = "black" + color = "white" + primitive_node = {"fillcolor": "#d03a2f", "color": "#f0c1be"} + variational_parametric_node = {"fillcolor": "#3182bd", "color": "#afd3e9"} + fixed_parametric_node = {"fillcolor": "#e6550d", "color": "#fed4b0"} + feature_parametric_node = {"fillcolor": "#2d954d", "color": "#b2e8c2"} + hamevo_cluster = {"fillcolor": "black", "color": "grey"} + add_cluster = {"fillcolor": "black", "color": "grey"} + scale_cluster = {"fillcolor": "black", "color": "grey"} + + +class Light(BaseTheme): + name = "light" + background_color = "white" + color = "black" + primitive_node = {"color": "#d03a2f", "fillcolor": "#f0c1be"} + variational_parametric_node = {"color": "#3182bd", "fillcolor": "#afd3e9"} + fixed_parametric_node = {"color": "#e6550d", "fillcolor": "#fed4b0"} + feature_parametric_node = {"color": "#2d954d", "fillcolor": "#b2e8c2"} + hamevo_cluster = {"color": "black", "fillcolor": "lightgrey"} + add_cluster = {"color": "black", "fillcolor": "lightgrey"} + scale_cluster = {"color": "black", "fillcolor": "lightgrey"} + + +class Black(BaseTheme): + name = "black" + background_color = "black" + color = "white" + primitive_node = {"color": "white", "fillcolor": "black"} + variational_parametric_node = {"color": "white", "fillcolor": "black"} + fixed_parametric_node = {"color": "white", "fillcolor": "black"} + feature_parametric_node = {"color": "white", "fillcolor": "black"} + hamevo_cluster = {"color": "white", "fillcolor": "black"} + add_cluster = {"color": "white", "fillcolor": "black"} + scale_cluster = {"color": "white", "fillcolor": "black"} + + +class White(BaseTheme): + name = "white" + background_color = "white" + color = "black" + primitive_node = {"color": "black", "fillcolor": "white"} + variational_parametric_node = {"color": "black", "fillcolor": "white"} + fixed_parametric_node = {"color": "black", "fillcolor": "white"} + feature_parametric_node = {"color": "black", "fillcolor": "white"} + hamevo_cluster = {"color": "black", "fillcolor": "white"} + add_cluster = {"color": "black", "fillcolor": "white"} + scale_cluster = {"color": "black", "fillcolor": "white"} diff --git a/qadence/draw/utils.py b/qadence/draw/utils.py new file mode 100644 index 000000000..137e0f882 --- /dev/null +++ b/qadence/draw/utils.py @@ -0,0 +1,336 @@ +from __future__ import annotations + +import re +import shutil +import warnings +from copy import deepcopy +from functools import singledispatch +from tempfile import NamedTemporaryFile +from typing import Any + +import sympy + +from qadence.blocks import ( + AbstractBlock, + AddBlock, + CompositeBlock, + ControlBlock, + ParametricBlock, + ParametricControlBlock, + PrimitiveBlock, + ScaleBlock, + chain, +) +from qadence.blocks.analog import ConstantAnalogRotation, WaitBlock +from qadence.circuit import QuantumCircuit +from qadence.models import QuantumModel +from qadence.operations import RX, RY, RZ, SWAP, HamEvo, I +from qadence.transpile.block import fill_identities +from qadence.utils import format_parameter + +from .vizbackend import Cluster, QuantumCircuitDiagram + +# FIXME: move this to a config/theme? +USE_LATEX_LABELS = False +USE_SUBSCRIPTS = True + + +def fixed(expr: sympy.Basic) -> bool: + return expr.is_number # type: ignore + + +def trainable(expr: sympy.Basic) -> bool: + for s in expr.free_symbols: + if hasattr(s, "trainable") and s.trainable: + return True + return False + + +def _get_latex_label(block: AbstractBlock, color: str = "red", fontsize: int = 30) -> str: + from latex2svg import default_params, latex2svg + from lxml import etree + + # FIXME: use this and e.g. + # qcd.create_node( + # wire, label="", image=_get_label(block, qcd.theme.color), block_type="primitive") + name = sympy.Function(type(block).__name__) + if isinstance(block, (RX, RY, RZ)): + p = block.parameters.parameter + expr = name(p) + else: + expr = name + + lx = sympy.latex(expr, mode="inline") + + latex_params = deepcopy(default_params) + preamble = r""" + \usepackage[utf8x]{inputenc} + \usepackage{amsmath} + \usepackage{amsfonts} + \usepackage{amssymb} + \usepackage{amstext} + \usepackage{xcolor} + \everymath{\color{black}} + \everydisplay{\color{black}} + \def\m@th{\normalcolor\mathsurround\z@} + + """ + preamble = preamble.replace("black", color) + preamble += r"\usepackage[bitstream-charter]{mathdesign}" + latex_params["preamble"] = preamble + + svg = latex2svg(lx, params=latex_params) + svgstr = svg["svg"] + root = etree.fromstring(svgstr) + # (x, y_min, width, height) = map(lambda x: fontsize*float(x), root.get("viewBox").split(" ")) + # root.set("viewBox", f"{x_min} {y_min} {width} {height}") + root.set("width", str(round(svg["width"] * fontsize))) + root.set("height", str(round(svg["height"] * fontsize))) + svgstr = etree.tostring(root, encoding="utf-8").decode("utf-8") + + fi = NamedTemporaryFile(delete=False, mode="w", suffix=".svg") + fi.write('') + fi.write(svgstr) + return fi.name + + +def _is_number(s: str) -> bool: + # match numbers with an optional minus sign and optional decimal part. + pattern = re.compile(r"^-?\d+(\.\d+)?$") + return bool(pattern.match(s)) + + +def _subscript(x: str) -> str: + offset = ord("₁") - ord("1") + return chr(ord(x) + offset) if _is_number(x) else x + + +def _index_to_subscripts(x: str) -> str: + return re.sub(r"_\d+", lambda match: "".join(map(_subscript, match.group()[1:])), x) + + +def _expr_string(expr: sympy.Basic) -> str: + return _index_to_subscripts(format_parameter(expr)) + + +def _get_label(block: AbstractBlock) -> str: + name = sympy.Function(type(block).__name__) + if isinstance(block, ParametricBlock): + p = block.parameters.parameter + expr = name(p) + else: + expr = name + return _expr_string(expr) if USE_SUBSCRIPTS else format_parameter(expr) + + +@singledispatch +def make_diagram( + x: Any, + qcd: QuantumCircuitDiagram | Cluster | None = None, + layout: str = "LR", + theme: str = "light", + fill: bool = True, + **kwargs: Any, +) -> QuantumCircuitDiagram: + raise ValueError(f"Cannot construct circuit diagram for type: {type(x)}") + + +@make_diagram.register +def _(circuit: QuantumCircuit, *args: Any, **kwargs: Any) -> QuantumCircuitDiagram: + # FIXME: add register plot here + return make_diagram(circuit.block, *args, nb_wires=circuit.n_qubits, **kwargs) + + +@make_diagram.register +def _(model: QuantumModel, *args: Any, **kwargs: Any) -> QuantumCircuitDiagram: + raise ValueError("Not yet supported.") + + # FIXME: include measurement icon + if model.out_features > 1: + raise ValueError("Cannot visualize QuantumModel with more than one observable.") + + obs = deepcopy(model._observable[0].original) + obs.tag = "Obs." + + block = chain(model._circuit.original.block, obs) + return make_diagram(block, *args, **kwargs) + + +@make_diagram.register +def _( + block: AbstractBlock, + qcd: QuantumCircuitDiagram = None, + layout: str = "LR", + theme: str = "light", + fill: bool = True, + **kwargs: Any, +) -> QuantumCircuitDiagram: + if fill: + from qadence.transpile import transpile + + block = transpile( + lambda b: fill_identities(b, 0, b.n_qubits), + # FIXME: enabling flatten can sometimes prevent wires from bending + # but flatten currently gets rid of some tags... fix that and comment in: + # flatten + )(block) + + if qcd is None: + nb_wires = kwargs.pop("nb_wires") if "nb_wires" in kwargs else block.n_qubits + qcd = QuantumCircuitDiagram(nb_wires=nb_wires, layout=layout, theme=theme, **kwargs) + + if isinstance(block, I): + wire = block.qubit_support[0] + qcd.create_identity_node(wire) + + elif isinstance(block, SWAP): + qcd.create_swap_gate(*block.qubit_support) # type: ignore + + elif isinstance(block, (ControlBlock, ParametricControlBlock)): + from_wire = block.qubit_support[-1] + to_wires = block.qubit_support[:-1] + (b,) = block.blocks + if isinstance(b, ParametricBlock): + if block.parameters.parameter.is_number: # type: ignore[union-attr] + attrs = qcd.theme.get_fixed_parametric_node_attr() + elif trainable(block.parameters.parameter): # type: ignore[union-attr] + attrs = qcd.theme.get_variational_parametric_node_attr() + else: + attrs = qcd.theme.get_feature_parametric_node_attr() + else: + attrs = qcd.theme.get_primitive_node_attr() + + qcd.create_control_gate( + from_wire, to_wires, label=_get_label(b), **attrs # type: ignore[arg-type] + ) + + elif isinstance(block, HamEvo): + labels = [block.name, f"t = {_expr_string(block.parameters.parameter)}"] + start, stop = min(block.qubit_support), block.n_qubits + _make_cluster(qcd, labels, start, stop, qcd.theme.get_hamevo_cluster_attr()) + + elif isinstance(block, AddBlock): + labels = ["AddBlock"] + start, stop = min(block.qubit_support), block.n_qubits + _make_cluster(qcd, labels, start, stop, qcd.theme.get_add_cluster_attr()) + + elif isinstance(block, WaitBlock): + labels = ["wait", f"t = {_expr_string(block.parameters.duration)}"] + is_global = block.qubit_support.is_global + start = 0 if is_global else min(block.qubit_support) + stop = qcd.nb_wires if is_global else block.n_qubits + _make_cluster(qcd, labels, start, stop, qcd.theme.get_add_cluster_attr()) + + elif isinstance(block, ConstantAnalogRotation): + labels = [ + "AnalogRot", + f"α = {_expr_string(block.parameters.alpha)}", + f"t = {_expr_string(block.parameters.duration)}", + f"Ω = {_expr_string(block.parameters.omega)}", + f"δ = {_expr_string(block.parameters.delta)}", + f"φ = {_expr_string(block.parameters.phase)}", + ] + is_global = block.qubit_support.is_global + start = 0 if is_global else min(block.qubit_support) + stop = qcd.nb_wires if is_global else block.n_qubits + _make_cluster(qcd, labels, start, stop, qcd.theme.get_add_cluster_attr()) + + elif isinstance(block, ScaleBlock): + s = f"[* {_expr_string(block.scale)}]" + label = s if block.tag is None else f"{block.tag}: {s}" + cluster = qcd.create_cluster(label, **qcd.theme.get_scale_cluster_attr()) # type: ignore + make_diagram(block.block, cluster) + + elif isinstance(block, ParametricBlock): + wire = block.qubit_support[0] + + if block.parameters.parameter.is_number: + attrs = qcd.theme.get_fixed_parametric_node_attr() + elif trainable(block.parameters.parameter): + attrs = qcd.theme.get_variational_parametric_node_attr() + else: + attrs = qcd.theme.get_feature_parametric_node_attr() + + if USE_LATEX_LABELS and shutil.which("latex"): + qcd.create_node( + wire, + label="", + image=_get_latex_label(block, attrs["color"]), + **attrs, # type: ignore[arg-type] + ) + else: + if USE_LATEX_LABELS: + warnings.warn( + "To get prettier circuit drawings, consider installing LaTeX.", UserWarning + ) + qcd.create_node(wire, label=_get_label(block), **attrs) # type: ignore[arg-type] + + elif isinstance(block, PrimitiveBlock): + wire = block.qubit_support[0] + attrs = qcd.theme.get_primitive_node_attr() + if USE_LATEX_LABELS and shutil.which("latex"): + qcd.create_node( + wire, + label="", + image=_get_latex_label(block, attrs["color"]), + **attrs, # type: ignore[arg-type] + ) + else: + if USE_LATEX_LABELS: + warnings.warn( + "To get prettier circuit drawings, consider installing LaTeX.", UserWarning + ) + qcd.create_node(wire, label=_get_label(block), **attrs) # type: ignore[arg-type] + + elif isinstance(block, CompositeBlock): + for inner_block in block: + if inner_block.tag is not None: + cluster = qcd.create_cluster( + inner_block.tag, **qcd.theme.get_cluster_attr() # type: ignore + ) + make_diagram(inner_block, cluster, fill=False) + else: + make_diagram(inner_block, qcd, fill=False) + + else: + raise ValueError(f"Don't know how to draw block of type {type(block)}.") + + return qcd + + +def _make_cluster( + qcd: QuantumCircuitDiagram, labels: list[str], start: int, stop: int, attrs: dict +) -> None: + """Construct a cluster with the list of labels centered vertically (in terms of wires). + If there are fewer wires than labels, plot all lables in one line, assuming that the first + element in `labels` is the block type.""" + N = stop - start + + # draw labels line by line + if N > len(labels): + cluster = qcd.create_cluster("", show=False, **attrs) + before = (N - len(labels)) // 2 + after = N - len(labels) - before + lines = ["" for _ in range(before)] + labels + ["" for _ in range(after)] + for i, label in zip(range(start, stop), lines): + _attrs = deepcopy(qcd.theme.get_node_attr()) + _attrs["shape"] = "none" + _attrs["style"] = "rounded" + cluster.show = True + cluster.create_node(i, label=label, **_attrs) + cluster.show = False + + # draw all labels in one line if there are too few wires + else: + cluster = qcd.create_cluster("", show=False, **attrs) + label = f"{labels[0]}({', '.join(s.replace(' ', '') for s in labels[1:])})" + for i in range(start, stop): + if i == ((stop - start) // 2 + start): + _attrs = deepcopy(qcd.theme.get_node_attr()) + _attrs["shape"] = "none" + _attrs["style"] = "rounded" + cluster.show = True + cluster.create_node(i, label=label, **_attrs) + cluster.show = False + else: + cluster.create_node(i, label="I", **qcd.theme.get_node_attr()) diff --git a/qadence/draw/vizbackend.py b/qadence/draw/vizbackend.py new file mode 100644 index 000000000..cbabd7b6d --- /dev/null +++ b/qadence/draw/vizbackend.py @@ -0,0 +1,432 @@ +from __future__ import annotations + +import uuid +from pathlib import Path +from typing import Any, Dict, List, Optional, Tuple, Union + +from graphviz import Graph + +from .themes import BaseTheme, Black, Dark, Light, White + + +class QuantumCircuitDiagram: + """ + This class plots a quantum circuit using Graphviz. + """ + + __valid_layouts = ["LR", "TB"] + __themes = {"light": Light, "dark": Dark, "black": Black, "white": White} + + __default_graph_attr = { + "compound": "true", # This helps to draw edges when clusters do not have to show + "splines": "false", # This helps edges perpendicular to wires to occur on the same axis + } + + def __init__( + self, + layout: str = "LR", + nb_wires: int = 1, + theme: BaseTheme | str = "light", + measurement: bool = False, + **kwargs: Any, + ): + self._layout = self._validate_layout(layout) + self.nb_wires = self._validate_nb_wires(nb_wires) + self.theme = self._validate_theme(theme) + self.graph = Graph() + self.graph.attr(**kwargs) + # each wire is represented by a list of nodes + self._wires: List[List[Node]] = [[] for _ in range(self.nb_wires)] + self._clusters: List[Cluster] = [] + self._measurement: bool = measurement + self._start_nodes: List[Node] = [] + self._end_nodes: List[Node] = [] + + @staticmethod + def _validate_nb_wires(nb_wires: int) -> int: + if nb_wires < 1: + raise ValueError( + f"Invalid nb_wires {nb_wires}. Only positive integers greater " + f"than zero are supported." + ) + return nb_wires + + def _get_graph_attr(self) -> Dict: + graph_attr = { + "rankdir": self._layout, + } + graph_attr.update(self.__default_graph_attr) + graph_attr.update(self.theme.get_graph_attr()) + return graph_attr + + def _validate_layout(self, layout: str) -> str: + layout = layout.upper() + if layout not in self.__valid_layouts: + raise ValueError(f"Invalid layout {layout}. Supported: {self.__valid_layouts}") + return layout + + def _validate_theme(self, theme: BaseTheme | str) -> BaseTheme: + if isinstance(theme, BaseTheme): + return theme + + theme = theme.lower() + if theme not in self.__themes: + raise ValueError(f"Invalid theme {theme}. Supported: {list(self.__themes.keys())}") + return self.__themes[theme]() + + def _validate_wire(self, wire: int) -> int: + if wire >= self.nb_wires: + raise ValueError( + f"Invalid wire {wire}. This circuit has {self.nb_wires} wires numbered from 0 " + f"to {self.nb_wires - 1}" + ) + return wire + + def _validate_wires_couple(self, wire1: int, wire2: int) -> Tuple[int, int]: + """ + This helps to ensure we are not creating a swap gate or a control gate on the same wire. + """ + wire1 = self._validate_wire(wire1) + wire2 = self._validate_wire(wire2) + if wire1 == wire2: + raise ValueError("Invalid wires couple: they can't be the same.") + return wire1, wire2 + + def _create_start_node(self, wire: int) -> Node: + node = self.create_node( + wire, + append=False, + shape="plaintext", + label=f"{wire}", + color=self.theme.color, + fontcolor=self.theme.color, + fontname=self.theme.fontname, + fontsize=str(float(self.theme.fontsize) * 3 / 4), + ) + self._start_nodes.append(node) + node.generate() + return node + + def _create_end_node(self, wire: int) -> Node: + kwargs = {"label": "", "shape": "plaintext"} + if self._measurement: + kwargs.update({"image": self.theme.load_measurement_icon(), "fixedsize": "true"}) + node = self.create_node(wire, self, append=False, **kwargs) + self._end_nodes.append(node) + node.generate() + return node + + def _create_swap_departure_node( + self, wire: int, parent: Optional[Union[QuantumCircuitDiagram, Cluster]] = None + ) -> Node: + pw = self.theme.get_edge_attr()["penwidth"] + return self.create_node( + wire, + self if parent is None else parent, + shape="octagon", + width="0.03", + height="0.03", + style="filled", + color=self.theme.color, + fillcolor=self.theme.color, + penwidth="1", + label="", + ) + + def _create_swap_arrival_node( + self, + wire: int, + swap_departure_node: Node = None, + parent: Optional[Union[QuantumCircuitDiagram, Cluster]] = None, + ) -> Node: + pw = self.theme.get_edge_attr()["penwidth"] + return self.create_node( + wire, + self if parent is None else parent, + swap_departure_node=swap_departure_node, + shape="octagon", + width="0.03", + height="0.03", + style="filled", + color=self.theme.color, + fillcolor=self.theme.color, + penwidth="1", + label="", + ) + + def _create_control_gate_departure_node( + self, + wire: int, + parent: Optional[Union[QuantumCircuitDiagram, Cluster]] = None, + **kwargs: Any, + ) -> Node: + return self.create_node(wire=wire, parent=self if parent is None else parent, **kwargs) + + def _create_control_gate_arrival_node( + self, + wire: int, + control_gate_departure_node: Node, + parent: Optional[Union[QuantumCircuitDiagram, Cluster]] = None, + ) -> None: + self.create_node( + wire=wire, + parent=self if parent is None else parent, + control_gate_departure_node=control_gate_departure_node, + label="", + shape="point", + width="0.2", + color=self.theme.color, + ) + + def _connect( + self, + node1: Node, + node2: Node, + **kwargs: str, + ) -> None: + """ + It creates an edge between node1 and node2 + """ + edge_attr = self.theme.get_edge_attr() + edge_attr.update(kwargs) + if ( + not getattr(node1.parent, "show", True) and node1.parent == node2.parent + ): # if we are connecting 2 nodes inside a cluster with show: false + edge_attr.update({"style": "invis"}) + else: + if not getattr(node1.parent, "show", True): + edge_attr.update({"ltail": node1.parent.get_name()}) + if not getattr(node2.parent, "show", True): + edge_attr.update({"lhead": node2.parent.get_name()}) + self.graph.edge(node1.id, node2.id, **edge_attr) + + def create_control_gate( + self, + from_wire: int, + to_wires: List[int], + parent: Optional[Union[QuantumCircuitDiagram, Cluster]] = None, + **kwargs: Any, + ) -> None: + """ + A control gate consists of a node in a wire, and a list of nodes on other different wires, + all connected with vertical edges between them. + """ + for to_wire in to_wires: + from_wire, to_wire = self._validate_wires_couple(from_wire, to_wire) + departure_node = self._create_control_gate_departure_node( + from_wire, parent=parent, **kwargs + ) + for to_wire in to_wires: + self._create_control_gate_arrival_node(to_wire, departure_node, parent=parent) + + def create_swap_gate( + self, + wire_1: int, + wire_2: int, + parent: Optional[Union[QuantumCircuitDiagram, Cluster]] = None, + ) -> None: + """ + A swap consists in 4 invisible nodes, 2 invisible edges on the same wire, and 2 dotted + edges on the destination wires, empty edges on the others + """ + wire_1, wire_2 = self._validate_wires_couple(wire_1, wire_2) + departure_node_1 = self._create_swap_departure_node(wire_1, parent=parent) + departure_node_2 = self._create_swap_departure_node(wire_2, parent=parent) + self._create_swap_arrival_node(wire_2, swap_departure_node=departure_node_1, parent=parent) + self._create_swap_arrival_node(wire_1, swap_departure_node=departure_node_2, parent=parent) + + def create_node( + self, + wire: int, + parent: Optional[Union[QuantumCircuitDiagram, Cluster]] = None, + append: bool = True, + **kwargs: Union[str, Node, None], + ) -> Node: + """ + It creates a node on the specified wire for the Quantum Circuit Diagram or for a cluster + """ + self._validate_wire(wire) + node = Node(wire, self if parent is None else parent, **kwargs) + if append: + self._wires[wire].append(node) + return node + + def create_identity_node( + self, wire: int, parent: Optional[Union[QuantumCircuitDiagram, Cluster]] = None + ) -> "Node": + pw = self.theme.get_edge_attr()["penwidth"] + return self.create_node( + wire, + self if parent is None else parent, + shape="square", + label="", + width="0", + height="0", + style="filled", + color=self.theme.color, + penwidth=str(float(pw) * 0.5), + ) + + def create_cluster( + self, + label: str, + parent: Optional[Cluster] = None, + show: bool = True, + **kwargs: Any, + ) -> Cluster: + """ + A cluster is a tagged sub diagram of the Quantum Circuit Diagram. It's implemented using + Graphviz's sub graphs. + """ + cluster = Cluster( + self if parent is None else parent, + label, + show=show, + **kwargs, + ) + self._clusters.append(cluster) + return cluster + + def _build(self) -> None: + self.graph.graph_attr.update(self._get_graph_attr()) + # keep track of control gates to connect them after generation + control_gates: list[list[Node]] = [] + # keep track of swap gates to connect them after generation + swap_gates: list[list[Node]] = [] + for i, wire in enumerate(self._wires): + start_node = self._create_start_node(i) + prev_node = start_node + for j, node in enumerate(wire): + node.generate() + if getattr(node, "swap_departure_node", None): + self._connect(prev_node, node, style="invis") + else: + self._connect(prev_node, node) + prev_node = node + if getattr(node, "control_gate_departure_node", None): + control_gates.append([node.control_gate_departure_node, node]) # type:ignore + if getattr(node, "swap_departure_node", None): + swap_gates.append([node.swap_departure_node, node]) # type:ignore + end_node = self._create_end_node(i) + self._connect(wire[-1], end_node) + + for cluster in reversed(self._clusters): + cluster.generate() + + # Connect control and swap gates + for gate in control_gates: + self._connect(gate[0], gate[1], constraint="false") + for gate in swap_gates: + self._connect(gate[0], gate[1]) + + # Connect all starting and ending nodes with invisible edges, this keeps wires in order + def _linelink(nodes: list) -> None: + for i in range(1, len(nodes)): + self._connect(nodes[i - 1], nodes[i], constraint="false", style="invis") + + _linelink(self._start_nodes) + _linelink(self._end_nodes) + + @staticmethod + def _runtime() -> str: + try: + ipy_str = str(type(get_ipython())) # type: ignore[name-defined] # noqa + if "zmqshell" in ipy_str: + return "jupyter" + elif "terminal" in ipy_str: + return "ipython" + else: + raise + except Exception as e: + return "terminal" + + def show(self) -> Graph: + self._build() + if not self._runtime() == "jupyter": + self.graph.view() + return self.graph + + def savefig(self, filename: str) -> None: + fn = Path(filename) + self._build() + self.graph.format = fn.suffix[1:] + self.graph.render(str(fn.parent / fn.stem), view=False) + # self.graph.save(filename) + + +class Node: + def __init__( + self, + wire: int, + parent: Any, + **kwargs: Any, + ) -> None: + self.id = uuid.uuid4().hex + self.wire = wire + self.parent = parent + self.control_gate_departure_node = kwargs.pop("control_gate_departure_node", None) + self.swap_departure_node = kwargs.pop("swap_departure_node", None) + self.attrs = {"group": str(wire)} + self.attrs.update(kwargs) + + def generate(self) -> None: + self.parent.graph.node(self.id, **self.attrs) + + +class Cluster: + def __init__( + self, + parent: Any, + label: str, + show: bool = True, + **kwargs: str, + ) -> None: + self._id: str = uuid.uuid4().hex + self.parent: Union[QuantumCircuitDiagram, Cluster] = parent + self._label: str = label + graph_attr: Dict[str, str] = {"label": self._label} + graph_attr.update(kwargs) + self.graph: Graph = Graph(name=self.get_name(), graph_attr=graph_attr) + self.show: bool = show + + def __repr__(self) -> str: + return f"{self._id} - {self._label}" + + def _get_qcd(self) -> QuantumCircuitDiagram: + parent = self.parent + while isinstance(parent, Cluster): + parent = parent.parent + return parent + + @property + def theme(self) -> BaseTheme: + return self._get_qcd().theme + + @property + def nb_wires(self) -> int: + return self._get_qcd().nb_wires + + def get_name(self) -> str: + return f"cluster_{self._id}" + + def generate(self) -> None: + self.parent.graph.subgraph(self.graph) + + def create_node(self, wire: int, **kwargs: str) -> Node: + if not self.show: + kwargs.update({"style": "invis"}) + return self._get_qcd().create_node(wire, parent=self, append=True, **kwargs) + + def create_identity_node(self, wire: int) -> None: + self._get_qcd().create_identity_node(wire, self) + + def create_cluster(self, label: str, show: bool = True, **kwargs: Any) -> Cluster: + return self._get_qcd().create_cluster(label, parent=self, show=show, **kwargs) + + def create_control_gate( + self, from_wire: int, to_wires: List[int], label: str, **kwargs: Any + ) -> None: + self._get_qcd().create_control_gate(from_wire, to_wires, label=label, parent=self, **kwargs) + + def create_swap_gate(self, wire_1: int, wire_2: int) -> None: + self._get_qcd().create_swap_gate(wire_1, wire_2, self) diff --git a/qadence/errors/__init__.py b/qadence/errors/__init__.py new file mode 100644 index 000000000..dbc249b08 --- /dev/null +++ b/qadence/errors/__init__.py @@ -0,0 +1,6 @@ +from __future__ import annotations + +from .errors import NotPauliBlockError, NotSupportedError, QadenceException + +# Modules to be automatically added to the qadence namespace +__all__ = [] # type: ignore diff --git a/qadence/errors/errors.py b/qadence/errors/errors.py new file mode 100644 index 000000000..e59b2c126 --- /dev/null +++ b/qadence/errors/errors.py @@ -0,0 +1,13 @@ +from __future__ import annotations + + +class QadenceException(Exception): + pass + + +class NotSupportedError(QadenceException): + pass + + +class NotPauliBlockError(QadenceException): + pass diff --git a/qadence/execution.py b/qadence/execution.py new file mode 100644 index 000000000..ecbeb2ff3 --- /dev/null +++ b/qadence/execution.py @@ -0,0 +1,264 @@ +from __future__ import annotations + +from collections import Counter +from functools import singledispatch +from typing import Any, Union + +from torch import Tensor, no_grad + +from qadence import backend_factory +from qadence.backend import BackendConfiguration, BackendName +from qadence.blocks import AbstractBlock +from qadence.circuit import QuantumCircuit +from qadence.register import Register +from qadence.types import DiffMode +from qadence.utils import Endianness + +# Modules to be automatically added to the qadence namespace +__all__ = ["run", "sample", "expectation"] + + +@singledispatch +def run( + x: Union[QuantumCircuit, AbstractBlock, Register, int], + *args: Any, + values: dict = {}, + state: Tensor = None, + backend: BackendName = BackendName.PYQTORCH, + endianness: Endianness = Endianness.BIG, + configuration: Union[BackendConfiguration, dict, None] = None, +) -> Tensor: + """Convenience wrapper for the `QuantumModel.run` method. This is a + `functools.singledispatch`ed function so it can be called with a number of different arguments. + See the examples of the [`expectation`][qadence.execution.expectation] function. This function + works exactly the same. + + Arguments: + x: Circuit, block, or (register+block) to run. + values: User-facing parameter dict. + state: Initial state. + backend: Name of the backend to run on. + endianness: The target device endianness. + configuration: The backend configuration. + + Returns: + A wavefunction + """ + raise ValueError(f"Cannot run {type(x)}") + + +@run.register +def _( + circuit: QuantumCircuit, + values: dict = {}, + state: Tensor = None, + backend: BackendName = BackendName.PYQTORCH, + endianness: Endianness = Endianness.BIG, + configuration: Union[BackendConfiguration, dict, None] = None, +) -> Tensor: + bknd = backend_factory(backend, configuration=configuration) + conv = bknd.convert(circuit) + with no_grad(): + return bknd.run( + circuit=conv.circuit, + param_values=conv.embedding_fn(conv.params, values), + state=state, + endianness=endianness, + ) + + +@run.register +def _(register: Register, block: AbstractBlock, **kwargs: Any) -> Tensor: + return run(QuantumCircuit(register, block), **kwargs) + + +@run.register +def _(n_qubits: int, block: AbstractBlock, **kwargs: Any) -> Tensor: + return run(Register(n_qubits), block, **kwargs) + + +@run.register +def _(block: AbstractBlock, **kwargs: Any) -> Tensor: + return run(Register(block.n_qubits), block, **kwargs) + + +@singledispatch +def sample( + x: Union[QuantumCircuit, AbstractBlock, Register, int], + *args: Any, + values: dict = {}, + state: Union[Tensor, None] = None, + n_shots: int = 100, + backend: BackendName = BackendName.PYQTORCH, + endianness: Endianness = Endianness.BIG, + configuration: Union[BackendConfiguration, dict, None] = None, +) -> list[Counter]: + """Convenience wrapper for the `QuantumModel.sample` method. This is a + `functools.singledispatch`ed function so it can be called with a number of different arguments. + See the examples of the [`expectation`][qadence.execution.expectation] function. This function + works exactly the same. + + Arguments: + x: Circuit, block, or (register+block) to run. + values: User-facing parameter dict. + state: Initial state. + n_shots: Number of shots per element in the batch. + backend: Name of the backend to run on. + endianness: The target device endianness. + configuration: The backend configuration. + + Returns: + A list of Counter instances with the sample results + """ + raise ValueError(f"Cannot sample from {type(x)}") + + +@sample.register +def _( + circuit: QuantumCircuit, + values: dict = {}, + state: Union[Tensor, None] = None, + n_shots: int = 100, + backend: BackendName = BackendName.PYQTORCH, + endianness: Endianness = Endianness.BIG, + configuration: Union[BackendConfiguration, dict, None] = None, +) -> list[Counter]: + bknd = backend_factory(backend, configuration=configuration) + conv = bknd.convert(circuit) + return bknd.sample( + circuit=conv.circuit, + param_values=conv.embedding_fn(conv.params, values), + n_shots=n_shots, + state=state, + endianness=endianness, + ) + + +@sample.register +def _(register: Register, block: AbstractBlock, **kwargs: Any) -> Tensor: + return sample(QuantumCircuit(register, block), **kwargs) + + +@sample.register +def _(n_qubits: int, block: AbstractBlock, **kwargs: Any) -> Tensor: + return sample(Register(n_qubits), block, **kwargs) + + +@sample.register +def _(block: AbstractBlock, **kwargs: Any) -> Tensor: + reg = Register(block.n_qubits) + return sample(reg, block, **kwargs) + + +@singledispatch +def expectation( + x: Union[QuantumCircuit, AbstractBlock, Register, int], + observable: Union[list[AbstractBlock], AbstractBlock], + values: dict = {}, + state: Tensor = None, + backend: BackendName = BackendName.PYQTORCH, + diff_mode: Union[DiffMode, str, None] = None, + endianness: Endianness = Endianness.BIG, + configuration: Union[BackendConfiguration, dict, None] = None, +) -> Tensor: + """Convenience wrapper for the `QuantumModel.expectation` method. This is a + `functools.singledispatch`ed function so it can be called with a number of different arguments + (see in the examples). + + Arguments: + x: Circuit, block, or (register+block) to run. + observable: Observable(s) w.r.t. which the expectation is computed. + values: User-facing parameter dict. + state: Initial state. + backend: Name of the backend to run on. + diff_mode: Which differentiation mode to use. + endianness: The target device endianness. + configuration: The backend configuration. + + Returns: + A wavefunction + + + ```python exec="on" source="material-block" + from qadence import RX, Z, Register, QuantumCircuit, expectation + + reg = Register(1) + block = RX(0, 0.5) + observable = Z(0) + circ = QuantumCircuit(reg, block) + + # You can compute the expectation for a + # QuantumCircuit with a given observable. + expectation(circ, observable) + + # You can also use only a block. + # In this case the register is constructed automatically to + # Register.line(block.n_qubits) + expectation(block, observable) + + # Or a register and block + expectation(reg, block, observable) + ```""" + + raise ValueError(f"Cannot execute {type(x)}") + + +@expectation.register +def _( + circuit: QuantumCircuit, + observable: Union[list[AbstractBlock], AbstractBlock], + values: dict = {}, + state: Tensor = None, + backend: BackendName = BackendName.PYQTORCH, + diff_mode: Union[DiffMode, str, None] = None, + endianness: Endianness = Endianness.BIG, + configuration: Union[BackendConfiguration, dict, None] = None, +) -> Tensor: + observable = observable if isinstance(observable, list) else [observable] + bknd = backend_factory(backend, configuration=configuration) + conv = bknd.convert(circuit, observable) + + def _expectation() -> Tensor: + return bknd.expectation( + circuit=conv.circuit, + observable=conv.observable, # type: ignore[arg-type] + param_values=conv.embedding_fn(conv.params, values), + state=state, + endianness=endianness, + ) + + # Do not compute gradients if no diff_mode is provided. + if diff_mode is None: + with no_grad(): + return _expectation() + else: + return _expectation() + + +@expectation.register +def _( + register: Register, + block: AbstractBlock, + observable: Union[list[AbstractBlock], AbstractBlock], + **kwargs: Any, +) -> Tensor: + return expectation(QuantumCircuit(register, block), observable, **kwargs) + + +@expectation.register +def _( + n_qubits: int, + block: AbstractBlock, + observable: Union[list[AbstractBlock], AbstractBlock], + **kwargs: Any, +) -> Tensor: + reg = Register(n_qubits) + return expectation(QuantumCircuit(reg, block), observable, **kwargs) + + +@expectation.register +def _( + block: AbstractBlock, observable: Union[list[AbstractBlock], AbstractBlock], **kwargs: Any +) -> Tensor: + reg = Register(block.n_qubits) + return expectation(QuantumCircuit(reg, block), observable, **kwargs) diff --git a/qadence/extensions.py b/qadence/extensions.py new file mode 100644 index 000000000..dd564c443 --- /dev/null +++ b/qadence/extensions.py @@ -0,0 +1,99 @@ +from __future__ import annotations + +import importlib +from string import Template +from typing import TypeVar + +from qadence.backend import Backend +from qadence.blocks import ( + AbstractBlock, +) +from qadence.types import BackendName, DiffMode + +TAbstractBlock = TypeVar("TAbstractBlock", bound=AbstractBlock) + +backends_namespace = Template("qadence.backends.$name") + + +def _available_backends() -> dict: + res = {} + for backend in BackendName.list(): + module_path = f"qadence.backends.{backend}.backend" + try: + module = importlib.import_module(module_path) + BackendCls = getattr(module, "Backend") + res[backend] = BackendCls + except (ImportError, ModuleNotFoundError): + pass + + return res + + +def _supported_gates(name: BackendName | str) -> list[TAbstractBlock]: + from qadence import operations + + name = str(BackendName(name).name.lower()) + + try: + backend_namespace = backends_namespace.substitute(name=name) + module = importlib.import_module(backend_namespace) + except KeyError: + pass + _supported_gates = getattr(module, "supported_gates", None) + assert ( + _supported_gates is not None + ), f"{name} backend should define a 'supported_gates' variable" + return [getattr(operations, gate) for gate in _supported_gates] + + +def _gpsr_fns() -> dict: + # avoid circular import + from qadence.backends.gpsr import general_psr + + return {DiffMode.GPSR: general_psr} + + +def _validate_diff_mode(backend: Backend, diff_mode: DiffMode) -> None: + if not backend.supports_ad and diff_mode == DiffMode.AD: + raise TypeError(f"Backend {backend.name} does not support diff_mode {DiffMode.AD}.") + + +def _set_backend_config(backend: Backend, diff_mode: DiffMode) -> None: + """_summary_ + + Args: + backend (Backend): _description_ + diff_mode (DiffMode): _description_ + """ + + _validate_diff_mode(backend, diff_mode) + + if not backend.supports_ad or diff_mode != DiffMode.AD: + backend.config._use_gate_params = True + + # (1) When using PSR with any backend or (2) we use the backends Pulser or Braket, + # we have to use gate-level parameters + + else: + assert diff_mode == DiffMode.AD + backend.config._use_gate_params = False + # We can use expression-level parameters for AD. + if backend.name == BackendName.PYQTORCH: + backend.config.use_single_qubit_composition = True + + # For pyqtorch, we enable some specific transpilation passes. + + +# if proprietary qadence_plus is available import the +# right function since more backends are supported +try: + module = importlib.import_module("qadence_extensions.extensions") + available_backends = getattr(module, "available_backends") + supported_gates = getattr(module, "supported_gates") + get_gpsr_fns = getattr(module, "gpsr_fns") + set_backend_config = getattr(module, "set_backend_config") +except ModuleNotFoundError: + available_backends = _available_backends + supported_gates = _supported_gates + get_gpsr_fns = _gpsr_fns + set_backend_config = _set_backend_config diff --git a/qadence/logger.py b/qadence/logger.py new file mode 100644 index 000000000..3963e2d94 --- /dev/null +++ b/qadence/logger.py @@ -0,0 +1,35 @@ +from __future__ import annotations + +import logging +import os +import sys + +logging_levels = { + "DEBUG": logging.DEBUG, + "INFO": logging.INFO, + "WARNING": logging.WARNING, + "ERROR": logging.ERROR, + "CRITICAL": logging.CRITICAL, +} + +LOG_STREAM_HANDLER = sys.stdout + +DEFAULT_LOGGING_LEVEL = logging.INFO + +# FIXME: introduce a better handling of the configuration +LOGGING_LEVEL = os.environ.get("LOGGING_LEVEL", "warning").upper() + + +def get_logger(name: str) -> logging.Logger: + logger: logging.Logger = logging.getLogger(name) + + level = logging_levels.get(LOGGING_LEVEL, DEFAULT_LOGGING_LEVEL) + logger.setLevel(level) + + formatter = logging.Formatter("%(levelname) -5s %(asctime)s: %(message)s", "%Y-%m-%d %H:%M:%S") + # formatter = logging.Formatter(LOG_FORMAT) + sh = logging.StreamHandler(LOG_STREAM_HANDLER) + sh.setFormatter(formatter) + logger.addHandler(sh) + + return logger diff --git a/qadence/measurements/__init__.py b/qadence/measurements/__init__.py new file mode 100644 index 000000000..dde75742f --- /dev/null +++ b/qadence/measurements/__init__.py @@ -0,0 +1,6 @@ +from __future__ import annotations + +from .protocols import Measurements + +# Modules to be automatically added to the qadence namespace +__all__ = ["Measurements"] diff --git a/qadence/measurements/protocols.py b/qadence/measurements/protocols.py new file mode 100644 index 000000000..cd2ea6fa8 --- /dev/null +++ b/qadence/measurements/protocols.py @@ -0,0 +1,38 @@ +from __future__ import annotations + +import importlib +from dataclasses import dataclass +from typing import Callable, cast + +PROTOCOL_TO_MODULE = { + "tomography": "qadence.measurements.tomography", + "shadow": "qadence.measurements.shadow", +} + + +# TODO: make this a StrEnum to keep consistency with the rest of the interface +@dataclass +class Measurements: + TOMOGRAPHY = "tomography" + SHADOW = "shadow" + + def __init__(self, protocol: str, options: dict) -> None: + self.protocol: str = protocol + self.options: dict = options + + def get_measurement_fn(self) -> Callable: + try: + module = importlib.import_module(PROTOCOL_TO_MODULE[self.protocol]) + except KeyError: + ImportError(f"The module corresponding to the protocol {self.protocol} is not found.") + fn = getattr(module, "compute_expectation") + return cast(Callable, fn) + + def _to_dict(self) -> dict: + return {"protocol": self.protocol, "options": self.options} + + @classmethod + def _from_dict(cls, d: dict) -> Measurements | None: + if d: + return cls(d["protocol"], **d["options"]) + return None diff --git a/qadence/measurements/shadow.py b/qadence/measurements/shadow.py new file mode 100644 index 000000000..f49e7c56e --- /dev/null +++ b/qadence/measurements/shadow.py @@ -0,0 +1,356 @@ +from __future__ import annotations + +from collections import Counter +from functools import reduce + +import numpy as np +import torch +from torch import Tensor + +from qadence import BackendName, DiffMode +from qadence.backends import backend_factory +from qadence.blocks.abstract import AbstractBlock +from qadence.blocks.block_to_tensor import ( + HMAT, + IMAT, + SDAGMAT, + ZMAT, + block_to_tensor, +) +from qadence.blocks.composite import CompositeBlock +from qadence.blocks.primitive import PrimitiveBlock +from qadence.blocks.utils import get_pauli_blocks, unroll_block_with_scaling +from qadence.circuit import QuantumCircuit +from qadence.operations import X, Y, Z, chain, kron +from qadence.states import one_state, zero_state +from qadence.types import Endianness + +pauli_gates = [X, Y, Z] + + +UNITARY_TENSOR = [ + ZMAT @ HMAT, + SDAGMAT.squeeze(dim=0) @ HMAT, + IMAT, +] + + +# Projector matrices in Big-Endian convention. +PROJECTOR_MATRICES = { + "0": zero_state(n_qubits=1).t() @ zero_state(n_qubits=1), + "1": one_state(n_qubits=1).t() @ one_state(n_qubits=1), +} + + +def identity(n_qubits: int) -> Tensor: + return torch.eye(2**n_qubits, dtype=torch.complex128) + + +def _max_observable_weight(observable: AbstractBlock) -> int: + """ + Get the maximal weight for the given observable. + + The weight is a measure of the locality of the observable, + a count of the number of qubits on which the observable acts + non-trivially. + + See https://arxiv.org/pdf/2002.08953.pdf + Supplementary Material 1 and Eq. (S17). + """ + pauli_decomposition = unroll_block_with_scaling(observable) + weights = [] + for pauli_term in pauli_decomposition: + weight = 0 + block = pauli_term[0] + if isinstance(block, PrimitiveBlock): + if isinstance(block, (X, Y, Z)): + weight += 1 + weights.append(weight) + else: + pauli_blocks = get_pauli_blocks(block=block) + weight = 0 + for block in pauli_blocks: + if isinstance(block, (X, Y, Z)): + weight += 1 + weights.append(weight) + return max(weights) + + +def maximal_weight(observables: list[AbstractBlock]) -> int: + """Return the maximal weight if a list of observables is provided.""" + return max([_max_observable_weight(observable=observable) for observable in observables]) + + +def number_of_samples( + observables: list[AbstractBlock], accuracy: float, confidence: float +) -> tuple[int, ...]: + """ + Estimate an optimal shot budget and a shadow partition size + to guarantee given accuracy on all observables expectation values + within 1 - confidence range. + + See https://arxiv.org/pdf/2002.08953.pdf + Supplementary Material 1 and Eqs. (S23)-(S24). + """ + max_k = maximal_weight(observables=observables) + N = round(3**max_k * 34.0 / accuracy**2) + K = round(2.0 * np.log(2.0 * len(observables) / confidence)) + return N, K + + +def local_shadow(sample: Counter, unitary_ids: list) -> Tensor: + """ + Compute local shadow by inverting the quantum channel for each projector state. + + See https://arxiv.org/pdf/2002.08953.pdf + Supplementary Material 1 and Eqs. (S17,S44). + + Expects a sample bitstring in ILO. + """ + bitstring = list(sample.keys())[0] + local_density_matrices = [] + for bit, unitary_id in zip(bitstring, unitary_ids): + proj_mat = PROJECTOR_MATRICES[bit] + unitary_tensor = UNITARY_TENSOR[unitary_id].squeeze(dim=0) + local_density_matrices.append( + 3 * (unitary_tensor.adjoint() @ proj_mat @ unitary_tensor) - identity(1) + ) + if len(local_density_matrices) == 1: + return local_density_matrices[0] + else: + return reduce(torch.kron, local_density_matrices) + + +def classical_shadow( + shadow_size: int, + circuit: QuantumCircuit, + param_values: dict, + state: Tensor | None = None, + backend_name: BackendName = BackendName.PYQTORCH, + # FIXME: Changed below from Little to Big, double-check when Roland is back + endianness: Endianness = Endianness.BIG, +) -> list: + shadow: list = [] + backend = backend_factory(backend=backend_name, diff_mode=DiffMode.GPSR) + # TODO: Parallelize embarrassingly parallel loop. + for _ in range(shadow_size): + unitary_ids = np.random.randint(0, 3, size=(1, circuit.n_qubits))[0] + random_unitary = [ + pauli_gates[unitary_ids[qubit]](qubit) for qubit in range(circuit.n_qubits) + ] + if len(random_unitary) == 1: + random_unitary_block = random_unitary[0] + else: + random_unitary_block = kron(*random_unitary) + rotated_circuit = QuantumCircuit( + circuit.n_qubits, + chain(circuit.block, random_unitary_block), + ) + # Reverse endianness to get sample bitstrings in ILO. + conv_circ = backend.circuit(rotated_circuit) + samples = backend.sample( + circuit=conv_circ, + param_values=param_values, + n_shots=1, + state=state, + endianness=endianness, + ) + batched_shadow = [] + for batch in samples: + batched_shadow.append(local_shadow(sample=batch, unitary_ids=unitary_ids)) + shadow.append(batched_shadow) + + # Reshape the shadow by batches of samples instead of samples of batches. + # FIXME: Improve performance. + return [list(s) for s in zip(*shadow)] + + +def reconstruct_state(shadow: list) -> Tensor: + """Reconstruct the state density matrix for the given shadow.""" + return reduce(torch.add, shadow) / len(shadow) + + +def compute_traces( + qubit_support: tuple, + N: int, + K: int, + shadow: list, + observable: AbstractBlock, + endianness: Endianness = Endianness.BIG, +) -> list: + floor = int(np.floor(N / K)) + traces = [] + # TODO: Parallelize embarrassingly parallel loop. + for k in range(K): + reconstructed_state = reconstruct_state(shadow=shadow[k * floor : (k + 1) * floor]) + # Reshape the observable matrix to fit the density matrix dimensions + # by filling indentites. + # Please note the endianness is also flipped to get results in LE. + # FIXME: Changed below from Little to Big, double-check when Roland is back + # FIXME: Correct these comments. + trace = ( + ( + block_to_tensor( + block=observable, + qubit_support=qubit_support, + endianness=Endianness.BIG, + ).squeeze(dim=0) + @ reconstructed_state + ) + .trace() + .real + ) + traces.append(trace) + return traces + + +def estimators( + qubit_support: tuple, + N: int, + K: int, + shadow: list, + observable: AbstractBlock, + endianness: Endianness = Endianness.BIG, +) -> Tensor: + """ + Return estimators (traces of observable times mean density matrix) + for K equally-sized shadow partitions. + + See https://arxiv.org/pdf/2002.08953.pdf + Algorithm 1. + """ + # If there is no Pauli-Z operator in the observable, + # the sample can't "hit" that measurement. + if isinstance(observable, PrimitiveBlock): + if type(observable) == Z: + traces = compute_traces( + qubit_support=qubit_support, + N=N, + K=K, + shadow=shadow, + observable=observable, + endianness=endianness, + ) + else: + traces = [torch.tensor(0.0)] + elif isinstance(observable, CompositeBlock): + if Z in observable: + traces = compute_traces( + qubit_support=qubit_support, + N=N, + K=K, + shadow=shadow, + observable=observable, + endianness=endianness, + ) + else: + traces = [torch.tensor(0.0)] + return torch.tensor(traces, dtype=torch.get_default_dtype()) + + +def estimations( + circuit: QuantumCircuit, + observables: list[AbstractBlock], + param_values: dict, + shadow_size: int | None = None, + accuracy: float = 0.1, + confidence: float = 0.1, + state: Tensor | None = None, + backend_name: BackendName = BackendName.PYQTORCH, + endianness: Endianness = Endianness.BIG, +) -> Tensor: + """Compute expectation values for all local observables using median of means.""" + # N is the estimated shot budget for the classical shadow to + # achieve desired accuracy for all L = len(observables) within 1 - confidence probablity. + # K is the size of the shadow partition. + N, K = number_of_samples(observables=observables, accuracy=accuracy, confidence=confidence) + if shadow_size is not None: + N = shadow_size + shadow = classical_shadow( + shadow_size=N, + circuit=circuit, + param_values=param_values, + state=state, + backend_name=backend_name, + endianness=endianness, + ) + estimations = [] + for observable in observables: + pauli_decomposition = unroll_block_with_scaling(observable) + batch_estimations = [] + for batch in shadow: + pauli_term_estimations = [] + for pauli_term in pauli_decomposition: + # Get the estimators for the current Pauli term. + # This is a tensor of size K. + estimation = estimators( + qubit_support=circuit.block.qubit_support, + N=N, + K=K, + shadow=batch, + observable=pauli_term[0], + endianness=endianness, + ) + # Compute the median of means for the current Pauli term. + # Weigh the median by the Pauli term scaling. + pauli_term_estimations.append(torch.median(estimation) * pauli_term[1]) + # Sum the expectations for each Pauli term to get the expectation for the + # current batch. + batch_estimations.append(sum(pauli_term_estimations)) + estimations.append(batch_estimations) + return torch.transpose(torch.tensor(estimations, dtype=torch.get_default_dtype()), 1, 0) + + +def compute_expectation( + circuit: QuantumCircuit, + observables: list[AbstractBlock], + param_values: dict, + options: dict, + state: Tensor | None = None, + backend_name: BackendName = BackendName.PYQTORCH, + endianness: Endianness = Endianness.BIG, +) -> Tensor: + """ + Construct a classical shadow of a state to estimate observable expectation values. + + Args: + circuit (QuantumCircuit): a circuit to prepare the state. + observables (list[AbstractBlock]): a list of observables + to estimate the expectation values from. + param_values (dict): a dict of values to substitute the + symbolic parameters for. + options (dict): a dict of options for the measurement protocol. + Here, shadow_size (int), accuracy (float) and confidence (float) are supported. + state (Tensor | None): an initial input state. + backend_name (BackendName): a backend name to retrieve computations from. + + Returns: + expectations (Tensor): an estimation of the expectation values. + """ + if not isinstance(observables, list): + raise TypeError( + "Observables must be of type . Got {}.".format( + type(observables) + ) + ) + shadow_size = options.get("shadow_size", None) + accuracy = options.get("accuracy", None) + if shadow_size is None and accuracy is None: + KeyError( + "Shadow protocol requires either an option" + "'shadow_size' of type 'int' or 'accuracy' of type 'float'." + ) + confidence = options.get("confidence", None) + if confidence is None: + KeyError("Shadow protocol requires a 'confidence' kwarg of type 'float'.") + return estimations( + circuit=circuit, + observables=observables, + param_values=param_values, + shadow_size=shadow_size, + accuracy=accuracy, + confidence=confidence, + state=state, + backend_name=backend_name, + endianness=endianness, + ) diff --git a/qadence/measurements/tomography.py b/qadence/measurements/tomography.py new file mode 100644 index 000000000..2a8aa57ff --- /dev/null +++ b/qadence/measurements/tomography.py @@ -0,0 +1,161 @@ +from __future__ import annotations + +from collections import Counter +from functools import reduce + +import numpy as np +import torch +from torch import Tensor + +from qadence import BackendName, DiffMode +from qadence.backends import backend_factory +from qadence.blocks import AbstractBlock, PrimitiveBlock +from qadence.blocks.utils import unroll_block_with_scaling +from qadence.circuit import QuantumCircuit +from qadence.operations import H, SDagger, X, Y, Z, chain +from qadence.parameters import evaluate +from qadence.utils import Endianness + + +def get_qubit_indices_for_op(pauli_term: tuple, op: PrimitiveBlock | None = None) -> list[int]: + """Get qubit indices for the given op in the Pauli term if any.""" + indices = [] + blocks = getattr(pauli_term[0], "blocks", None) + if blocks is not None: + for block in blocks: + if op is None: + indices.append(block.qubit_support[0]) + if isinstance(block, type(op)): + indices.append(block.qubit_support[0]) + else: + block = pauli_term[0] + if op is None: + indices.append(block.qubit_support[0]) + if isinstance(block, type(op)): + indices.append(block.qubit_support[0]) + return indices + + +def rotate(circuit: QuantumCircuit, pauli_term: tuple) -> QuantumCircuit: + """Rotate circuit to measurement basis and return the qubit support.""" + rotations = [] + + # Mypy expects concrete types. Although there definitely should be + # a better way to pass the operation type. + for op, gate in [(X(0), Z), (Y(0), SDagger)]: + qubit_indices = get_qubit_indices_for_op(pauli_term, op=op) + for index in qubit_indices: + rotations.append(gate(index) * H(index)) + rotated_block = chain(circuit.block, *rotations) + return QuantumCircuit(circuit.register, rotated_block) + + +def get_counts(samples: list, support: list) -> list: + """Marginalise the probablity mass function to the support.""" + counts = [] + for sample in samples: + sample_counts = [] + for k, v in sample.items(): + sample_counts.append(Counter({"".join([k[i] for i in support]): sample[k]})) + reduced_counts = reduce(lambda x, y: x + y, sample_counts) + counts.append(reduced_counts) + return counts + + +def empirical_average(samples: list, support: list) -> Tensor: + """Compute the empirical average.""" + counters = get_counts(samples, support) + expectations = [] + n_shots = np.sum(list(counters[0].values())) + parity = -1 + for counter in counters: + counter_exps = [] + for bitstring, count in counter.items(): + counter_exps.append(count * parity ** (np.sum([int(bit) for bit in bitstring]))) + expectations.append(np.sum(counter_exps) / n_shots) + return torch.tensor(expectations) + + +def iterate_pauli_decomposition( + circuit: QuantumCircuit, + param_values: dict, + pauli_decomposition: list, + n_shots: int, + state: Tensor | None = None, + backend_name: BackendName = BackendName.PYQTORCH, + endianness: Endianness = Endianness.BIG, +) -> Tensor: + """Estimate total expectation value by averaging all Pauli terms.""" + + estimated_values = [] + + backend = backend_factory(backend=backend_name, diff_mode=DiffMode.GPSR) + for pauli_term in pauli_decomposition: + if pauli_term[0].is_identity: + estimated_values.append(evaluate(pauli_term[1], as_torch=True)) + else: + # Get the full qubit support for the Pauli term. + # Note: duplicates must be kept here to allow for + # observables chaining multiple operations on the same qubit + # such as `b = chain(Z(0), Z(0))` + support = get_qubit_indices_for_op(pauli_term) + # Rotate the circuit according to the given observable term. + rotated_circuit = rotate(circuit=circuit, pauli_term=pauli_term) + # Use the low-level backend API to avoid embedding of parameters + # already performed at the higher QuantumModel level. + # Therefore, parameters passed here have already been embedded. + conv_circ = backend.circuit(rotated_circuit) + samples = backend.sample( + circuit=conv_circ, + param_values=param_values, + n_shots=n_shots, + state=state, + endianness=endianness, + ) + estim_values = empirical_average(samples=samples, support=support) + # TODO: support for parametric observables to be tested + estimated_values.append(estim_values * evaluate(pauli_term[1])) + res = torch.sum(torch.stack(estimated_values), axis=0) + # Allow for automatic differentiation. + res.requires_grad = True + return res + + +def compute_expectation( + circuit: QuantumCircuit, + observables: list[AbstractBlock], + param_values: dict, + options: dict, + state: Tensor | None = None, + backend_name: BackendName = BackendName.PYQTORCH, + endianness: Endianness = Endianness.BIG, +) -> Tensor: + """Basic tomography protocol with rotations + + Given a circuit and a list of observables, apply basic tomography protocol to estimate + the expectation values. + """ + if not isinstance(observables, list): + raise TypeError( + "Observables must be of type . Got {}.".format( + type(observables) + ) + ) + n_shots = options.get("n_shots") + if n_shots is None: + raise KeyError("Tomography protocol requires a 'n_shots' kwarg of type 'int'.") + estimated_values = [] + for observable in observables: + pauli_decomposition = unroll_block_with_scaling(observable) + estimated_values.append( + iterate_pauli_decomposition( + circuit=circuit, + param_values=param_values, + pauli_decomposition=pauli_decomposition, + n_shots=n_shots, + state=state, + backend_name=backend_name, + endianness=endianness, + ) + ) + return torch.transpose(torch.vstack(estimated_values), 1, 0) diff --git a/qadence/ml_tools/__init__.py b/qadence/ml_tools/__init__.py new file mode 100644 index 000000000..6c045f799 --- /dev/null +++ b/qadence/ml_tools/__init__.py @@ -0,0 +1,21 @@ +from __future__ import annotations + +from .config import TrainConfig +from .data import DictDataLoader +from .optimize_step import optimize_step as default_optimize_step +from .parameters import get_parameters, num_parameters, set_parameters +from .printing import print_metrics, write_tensorboard +from .saveload import load_checkpoint, load_model, write_checkpoint +from .tensors import numpy_to_tensor, promote_to, promote_to_tensor +from .train_grad import train as train_with_grad +from .train_no_grad import train as train_gradient_free + +# Modules to be automatically added to the qadence namespace +__all__ = [ + "TrainConfig", + "DictDataLoader", + "train_with_grad", + "train_gradient_free", + "load_checkpoint", + "write_checkpoint", +] diff --git a/qadence/ml_tools/config.py b/qadence/ml_tools/config.py new file mode 100644 index 000000000..dfbb71199 --- /dev/null +++ b/qadence/ml_tools/config.py @@ -0,0 +1,56 @@ +from __future__ import annotations + +import datetime +import os +from dataclasses import dataclass +from pathlib import Path +from typing import Callable, Optional + + +@dataclass +class TrainConfig: + """Default config for the train function. The default value of + each field can be customize with the constructor: + + ```python exec="on" source="material-block" result="json" + from qadence.ml_tools import TrainConfig + c = TrainConfig(folder="/tmp/train") + print(str(c)) # markdown-exec: hide + ``` + """ + + max_iter: int = 10000 + """Number of training iterations.""" + print_every: int = 1000 + """Print loss/metrics.""" + write_every: int = 50 + """Write tensorboard logs""" + checkpoint_every: int = 5000 + """Write model/optimizer checkpoint""" + folder: Optional[Path] = None + """Checkpoint/tensorboard logs folder""" + create_subfolder_per_run: bool = False + """Checkpoint/tensorboard logs stored in subfolder with name `_`. + Prevents continuing from previous checkpoint, useful for fast prototyping.""" + checkpoint_best_only: bool = False + """Write model/optimizer checkpoint only if a metric has improved""" + validation_criterion: Optional[Callable] = None + """A boolean function which evaluates a given validation metric is satisfied""" + trainstop_criterion: Optional[Callable] = None + """A boolean function which evaluates a given training stopping metric is satisfied""" + batch_size: int = 1 + """The batch_size to use when passing a list/tuple of torch.Tensors.""" + + def __post_init__(self) -> None: + if self.folder: + if isinstance(self.folder, str): # type: ignore [unreachable] + self.folder = Path(self.folder) # type: ignore [unreachable] + if self.create_subfolder_per_run: + subfoldername = ( + datetime.datetime.now().strftime("%Y%m%dT%H%M%S") + "_" + hex(os.getpid())[2:] + ) + self.folder = self.folder / subfoldername + if self.trainstop_criterion is None: + self.trainstop_criterion = lambda x: x <= self.max_iter + if self.validation_criterion is None: + self.validation_criterion = lambda x: False diff --git a/qadence/ml_tools/data.py b/qadence/ml_tools/data.py new file mode 100644 index 000000000..17d9739ba --- /dev/null +++ b/qadence/ml_tools/data.py @@ -0,0 +1,32 @@ +from __future__ import annotations + +from dataclasses import dataclass + +import torch +from torch.utils.data import DataLoader, TensorDataset + + +@dataclass +class DictDataLoader: + """This class only holds a dictionary of `DataLoader`s and samples from them""" + + dataloaders: dict[str, DataLoader] + + # this flag indicates that the dictionary contains dataloaders + # which can automatically iterate at each epoch without having to + # redefine the iterator itself (so basically no StopIteration exception + # will occur). This is the case of the Flow library where the dataloader + # is actually mostly used, so it is set to True by default + has_automatic_iter: bool = True + + def __iter__(self) -> DictDataLoader: + self.iters = {key: iter(dl) for key, dl in self.dataloaders.items()} + return self + + def __next__(self) -> dict[str, torch.Tensor]: + return {key: next(it) for key, it in self.iters.items()} + + +def to_dataloader(x: torch.Tensor, y: torch.Tensor, batch_size: int = 1) -> DataLoader: + """Convert two torch tensors x and y to a Dataloader.""" + return DataLoader(TensorDataset(x, y), batch_size=batch_size) diff --git a/qadence/ml_tools/models.py b/qadence/ml_tools/models.py new file mode 100644 index 000000000..5de9eed12 --- /dev/null +++ b/qadence/ml_tools/models.py @@ -0,0 +1,267 @@ +from __future__ import annotations + +from typing import Any, Counter, List + +import numpy as np +import torch +from torch import Tensor +from torch.nn import Parameter as TorchParam + +from qadence.backend import ConvertedObservable +from qadence.logger import get_logger +from qadence.measurements import Measurements +from qadence.ml_tools import promote_to_tensor +from qadence.models import QNN, QuantumModel +from qadence.utils import Endianness + +logger = get_logger(__name__) + + +def _set_fixed_operation( + dim: int, + x: float | np.ndarray | Tensor | None = None, + operation_name: str = "scale", +) -> Tensor: + dim = dim if dim > 0 else 1 + if x is None: + if operation_name == "shift": + x = torch.zeros(dim) + elif operation_name == "scale": + x = torch.ones(dim) + else: + NotImplementedError + res = promote_to_tensor(x, requires_grad=False).squeeze(0) + assert ( + res.numel() == dim + ), f"Number of {operation_name} values is {res.numel()}\ + and does not match number of dimensions = {dim}." + return res + + +class TransformedModule(torch.nn.Module): + """ + This class accepts a torch.nn.Module or a QuantumModel/QNN and wraps it with + either non-trainble or trainable scaling and shifting parameters for both input and output. + When given a torch.nn.Module, in_features and out_features need to be passed. + + Args: + model: The original model to transform. + in_features: The number of input dimensions of the model. + out_features: The number of output dimensions of the model. + input_scaling: The rescaling factor for the model input. Defaults to None. + input_shifting: The translation factor for the model input. Defaults to None. + output_scaling: The rescaling factor for the model output. Defaults to None. + output_shifting: The translation factor for the model output. Defaults to None. + + Example: + ``` + import torch + from torch.nn import Parameter as TorchParam + from qadence.models import QNN, TransformedModule + from qadence.circuit import QuantumCircuit + from qadence.blocks import chain + from qadence.constructors import hamiltonian_factory, hea + from qadence import Parameter, QuantumCircuit, Z + + n_qubits = 2 + phi = Parameter("phi", trainable=False) + fm = chain(*[RY(i, phi) for i in range(n_qubits)]) + ansatz = hea(n_qubits=n_qubits, depth=3) + observable = hamiltonian_factory(n_qubits, detuning = Z) + circuit = QuantumCircuit(n_qubits, fm, ansatz) + + model = QNN(circuit, observable, backend="pyqtorch", diff_mode="ad") + batch_size = 1 + input_values = {"phi": torch.rand(batch_size, requires_grad=True)} + pred = model(input_values) + assert not torch.isnan(pred) + + transformed_model = TransformedModule( + model=model, + in_features=None, + out_features=None, + input_scaling=TorchParam(torch.tensor(1.0)), + input_shifting=0.0, + output_scaling=1.0, + output_shifting=TorchParam(torch.tensor(0.0)) + ) + pred_transformed = transformed_model(input_values) + ``` + """ + + def __init__( + self, + model: torch.nn.Module | QuantumModel | QNN, + in_features: int | None = None, + out_features: int | None = None, + input_scaling: TorchParam | float | int | torch.Tensor | None = None, + input_shifting: TorchParam | float | int | torch.Tensor | None = None, + output_scaling: TorchParam | float | int | torch.Tensor | None = None, + output_shifting: TorchParam | float | int | torch.Tensor | None = None, + ) -> None: + super().__init__() + self.model = model + if in_features is None and out_features is None: + assert isinstance(model, (QuantumModel, QNN)) + self.in_features = model.in_features + self.out_features = model.out_features if model.out_features else 1 + else: + self.in_features = in_features # type: ignore[assignment] + self.out_features = out_features # type: ignore[assignment] + if isinstance(input_scaling, (float, int)) or input_scaling is None: + self.register_buffer( + "_input_scaling", + _set_fixed_operation(self.in_features, input_scaling, "scale"), + ) + else: + self._input_scaling = input_scaling + if isinstance(input_shifting, (float, int)) or input_shifting is None: + self.register_buffer( + "_input_shifting", + _set_fixed_operation(self.in_features, input_shifting, "shift"), + ) + else: + self._input_shifting = input_shifting + if isinstance(output_scaling, (float, int)) or output_scaling is None: + self.register_buffer( + "_output_scaling", + _set_fixed_operation(self.out_features, output_scaling, "scale"), + ) + else: + self._output_scaling = output_scaling + if isinstance(output_shifting, (float, int)) or output_shifting is None: + self.register_buffer( + "_output_shifting", + _set_fixed_operation(self.out_features, output_shifting, "shift"), + ) + else: + self._output_shifting = output_shifting + + def _format_to_dict(self, values: Tensor) -> dict[str, Tensor]: + """Format an input tensor into the format required by the forward pass + + The tensor is assumed to have dimensions: n_batches x in_features where in_features + corresponds to the number of input features of the QNN + """ + + if len(values.size()) == 1: + values = values.reshape(-1, 1) + if len(values.size()) != 2 or values.shape[1] != len(self.model.inputs): + raise ValueError( + f"Model expects in_features={self.model.in_features} but got {values.size()[1]}." + ) + names = [p.name for p in self.model.inputs] + res = {} + for i, name in enumerate(names): + res[name] = values[:, i] + return res + + def _transform_x(self, x: dict[str, torch.Tensor] | Tensor) -> dict[str, Tensor] | Tensor: + """ + x can either be a torch Tensor in when using torch.nn.Module, or a standard values dict. + Scales and shifts the tensors in the values dict, containing Featureparameters. + Transformation of inputs can be used to speed up training and avoid potential issues + with numerical stability that can arise due to differing feature scales. + If none are provided, it uses 0. for shifting and 1. for scaling (hence, identity). + + Arguments: + values: A torch Tensor or a dict containing values for Featureparameters. + + Returns: + A Tensor or dict containing transformed (scaled and/or shifted) Featureparameters. + + """ + + if isinstance(self.model, (QuantumModel, QNN)): + if not isinstance(x, dict): + x = self._format_to_dict(x) + return { + key: self._input_scaling * (val + self._input_shifting) for key, val in x.items() + } + + else: + assert isinstance(self.model, torch.nn.Module) and isinstance(x, Tensor) + return self._input_scaling * (x + self._input_shifting) + + def forward(self, x: dict[str, Tensor] | Tensor, *args: Any, **kwargs: Any) -> Tensor: + y = self.model(self._transform_x(x), *args, **kwargs) + return self._output_scaling * y + self._output_shifting + + def run( + self, + values: dict[str, torch.Tensor], + state: torch.Tensor | None = None, + endianness: Endianness = Endianness.BIG, + ) -> Tensor: + return self.model.run(values=self._transform_x(values), state=state, endianness=endianness) + + def sample( + self, + values: dict[str, torch.Tensor], + n_shots: int = 1000, + state: torch.Tensor | None = None, + endianness: Endianness = Endianness.BIG, + ) -> list[Counter]: + return self.model.sample( # type: ignore[no-any-return] + values=self._transform_x(values), + n_shots=n_shots, + state=state, + endianness=endianness, + ) + + def expectation( + self, + values: dict[str, torch.Tensor], + observable: List[ConvertedObservable] | ConvertedObservable | None = None, + state: torch.Tensor | None = None, + protocol: Measurements | None = None, + endianness: Endianness = Endianness.BIG, + ) -> Tensor: + """ + Computes standard expectation, however scales and shifts the output tensor + of the underlying model. If none are provided, it uses 0. for shifting and 1. for scaling. + Transformation of ouputs can be used if the magnitude + of the targets exceeds the domain (-1,1). + + """ + exp = self.model.expectation( + values=self._transform_x(values), + observable=observable if observable is not None else self.model._observable, + state=state, + protocol=protocol, + endianness=endianness, + ) + return self._output_scaling * (exp + self._output_shifting) + + def _to_dict(self, save_params: bool = True) -> dict: + from qadence.serialization import serialize + + def store_fn(x: torch.Tensor) -> list[float]: + res: list[float] + if x.requires_grad: + res = x.detach().numpy().tolist() + else: + res = x.numpy().tolist() + return res # type: ignore[no-any-return] + + _d = serialize(self.model, save_params=save_params) + return { + self.__class__.__name__: _d, + "_input_scaling": store_fn(self._input_scaling), + "_output_scaling": store_fn(self._output_scaling), + "_input_shifting": store_fn(self._input_shifting), + "_output_shifting": store_fn(self._output_shifting), + } + + @classmethod + def _from_dict(cls, d: dict, as_torch: bool = False) -> TransformedModule: + from qadence.serialization import deserialize + + _m: QuantumModel | QNN = deserialize(d[cls.__name__], as_torch) # type: ignore[assignment] + return cls( + _m, + input_scaling=torch.tensor(d["_input_scaling"]), + output_scaling=torch.tensor(d["_output_scaling"]), + input_shifting=torch.tensor(d["_input_shifting"]), + output_shifting=torch.tensor(d["_output_shifting"]), + ) diff --git a/qadence/ml_tools/optimize_step.py b/qadence/ml_tools/optimize_step.py new file mode 100644 index 000000000..fcc68d94a --- /dev/null +++ b/qadence/ml_tools/optimize_step.py @@ -0,0 +1,85 @@ +from __future__ import annotations + +from functools import singledispatch +from typing import Any, Callable + +import torch +from torch.nn import Module +from torch.optim import Optimizer + + +@singledispatch +def data_to_model(xs: Any, device: str = "cpu") -> Any: + """Default behavior for single-dispatched function + + Just return the given data independently on the type + + Args: + xs (Any): the input data + device (str, optional): The torch device. Not used in this implementation. + + Returns: + Any: the `xs` argument untouched + """ + return xs + + +@data_to_model.register(list) +def _(xs: list, device: str = "cpu") -> list: + xs_to_device = xs + + for x in xs_to_device: + if torch.is_tensor(x): + x.to(device, non_blocking=True) + + return xs_to_device + + +@data_to_model.register(dict) +def _(xs: dict, device: str = "cpu") -> dict: + # TODO: Make sure that they are tensors before calling .to() method + to_device = {key: [x.to(device, non_blocking=True) for x in val] for key, val in xs.items()} + return to_device + + +def optimize_step( + model: Module, + optimizer: Optimizer, + loss_fn: Callable, + xs: dict | list | torch.Tensor | None, + device: str = "cpu", +) -> tuple[torch.Tensor | float, dict | None]: + """Default Torch optimize step with closure + + This is the default optimization step which should work for most + of the standard use cases of optimization of Torch models + + Args: + model (Module): The input model + optimizer (Optimizer): The chosen Torch optimizer + loss_fn (Callable): A custom loss function + xs (dict | list | torch.Tensor | None): the input data. If None it means + that the given model does not require any input data + device (str, optional): The device were computations are executed. + Defaults to "cpu". + + Returns: + tuple: tuple containing the model, the optimizer, a dictionary with + the collected metrics and the compute value loss + """ + + loss, metrics = None, {} + + def closure() -> Any: + # NOTE: We need the nonlocal as we can't return a metric dict and + # because e.g. LBFGS calls this closure multiple times but for some + # reason the returned loss is always the first one... + nonlocal metrics, loss + optimizer.zero_grad() + loss, metrics = loss_fn(model, xs) + loss.backward(retain_graph=True) + return loss.item() + + optimizer.step(closure) + # return the loss/metrics that are being mutated inside the closure... + return loss, metrics diff --git a/qadence/ml_tools/parameters.py b/qadence/ml_tools/parameters.py new file mode 100644 index 000000000..b53a7dec6 --- /dev/null +++ b/qadence/ml_tools/parameters.py @@ -0,0 +1,46 @@ +from __future__ import annotations + +import torch +from torch import Tensor +from torch.nn import Module + + +def get_parameters(model: Module) -> Tensor: + """Retrieve all trainable model parameters in a single vector + + Args: + model (Module): the input PyTorch model + + Returns: + Tensor: a 1-dimensional tensor with the parameters + """ + ps = [p.reshape(-1) for p in model.parameters() if p.requires_grad] + return torch.concat(ps) + + +def set_parameters(model: Module, theta: Tensor) -> None: + """Set all trainable parameters of a model from a single vector + + Notice that this function assumes prior knowledge of right number + of parameters in the model + + Args: + model (Module): the input PyTorch model + theta (Tensor): the parameters to assign + """ + + with torch.no_grad(): + idx = 0 + for ps in model.parameters(): + if ps.requires_grad: + n = torch.numel(ps) + if ps.ndim == 0: + ps[()] = theta[idx : idx + n] + else: + ps[:] = theta[idx : idx + n].reshape(ps.size()) + idx += n + + +def num_parameters(model: Module) -> int: + """Return the total number of parameters of the given model""" + return len(get_parameters(model)) diff --git a/qadence/ml_tools/printing.py b/qadence/ml_tools/printing.py new file mode 100644 index 000000000..333c8720f --- /dev/null +++ b/qadence/ml_tools/printing.py @@ -0,0 +1,23 @@ +from __future__ import annotations + +from torch.utils.tensorboard import SummaryWriter + + +def print_metrics(loss: float | None, metrics: dict, iteration: int) -> None: + msg = " ".join( + [f"Iteration {iteration: >7} | Loss: {loss:.7f} -"] + + [f"{k}: {v.item():.7f}" for k, v in metrics.items()] + ) + print(msg) + + +def write_tensorboard( + writer: SummaryWriter, loss: float | None, metrics: dict, iteration: int +) -> None: + writer.add_scalar("loss", loss, iteration) + for key, arg in metrics.items(): + writer.add_scalar(key, arg, iteration) + + +def log_hyperparams(writer: SummaryWriter, hyperparams: dict, metrics: dict) -> None: + writer.add_hparams(hyperparams, metrics) diff --git a/qadence/ml_tools/saveload.py b/qadence/ml_tools/saveload.py new file mode 100644 index 000000000..5dd0fd3ba --- /dev/null +++ b/qadence/ml_tools/saveload.py @@ -0,0 +1,132 @@ +from __future__ import annotations + +import os +import re +from pathlib import Path +from typing import Any + +import torch +from nevergrad.optimization.base import Optimizer as NGOptimizer +from torch.nn import Module +from torch.optim import Optimizer + +from qadence.logger import get_logger + +logger = get_logger(__name__) + + +def get_latest_checkpoint_name(folder: Path, type: str) -> Path: + file = Path("") + files = [f for f in os.listdir(folder) if f.endswith(".pt") and type in f] + if len(files) == 0: + logger.error(f"Directory {folder} does not contain any {type} checkpoints.") + if len(files) == 1: + file = Path(files[0]) + else: + pattern = re.compile(".*_(\d+).pt$") + max_index = -1 + for f in files: + match = pattern.search(f) + if match: + index_str = match.group(1).replace("_", "") + index = int(index_str) + if index > max_index: + max_index = index + file = Path(f) + return Path(file) + + +def load_checkpoint( + folder: Path, + model: Module, + optimizer: Optimizer | NGOptimizer, + model_ckpt_name: str | Path = "", + opt_ckpt_name: str | Path = "", +) -> tuple[Module, Optimizer | NGOptimizer, int]: + if isinstance(folder, str): + folder = Path(folder) + if not folder.exists(): + folder.mkdir(parents=True) + return model, optimizer, 0 + model, iter = load_model(folder, model, model_ckpt_name) + optimizer = load_optimizer(folder, optimizer, opt_ckpt_name) + return model, optimizer, iter + + +def write_checkpoint( + folder: Path, model: Module, optimizer: Optimizer | NGOptimizer, iteration: int +) -> None: + from qadence.ml_tools.models import TransformedModule + from qadence.models import QNN, QuantumModel + + model_checkpoint_name: str = f"model_{type(model).__name__}_ckpt_" + f"{iteration:03n}" + ".pt" + opt_checkpoint_name: str = f"opt_{type(optimizer).__name__}_ckpt_" + f"{iteration:03n}" + ".pt" + try: + d = ( + model._to_dict(save_params=True) + if isinstance(model, (QNN, QuantumModel)) or isinstance(model, TransformedModule) + else model.state_dict() + ) + torch.save((iteration, d), folder / model_checkpoint_name) + logger.info(f"Writing {type(model).__name__} checkpoint {model_checkpoint_name}") + except Exception as e: + logger.exception(e) + try: + if isinstance(optimizer, Optimizer): + torch.save( + (iteration, type(optimizer), optimizer.state_dict()), folder / opt_checkpoint_name + ) + elif isinstance(optimizer, NGOptimizer): + optimizer.dump(folder / opt_checkpoint_name) + logger.info(f"Writing {type(optimizer).__name__} to checkpoint {opt_checkpoint_name}") + except Exception as e: + logger.exception(e) + + +def load_model( + folder: Path, model: Module, model_ckpt_name: str | Path = "", *args: Any, **kwargs: Any +) -> tuple[Module, int]: + from qadence.ml_tools.models import TransformedModule + from qadence.models import QNN, QuantumModel + + iteration = 0 + if model_ckpt_name == "": + model_ckpt_name = get_latest_checkpoint_name(folder, "model") + + try: + iteration, model_dict = torch.load(folder / model_ckpt_name, *args, **kwargs) + if isinstance(model, (QuantumModel, QNN, TransformedModule)): + model._from_dict(model_dict, as_torch=True) + elif isinstance(model, Module): + model.load_state_dict(model_dict, strict=True) + + except Exception as e: + msg = f"Unable to load state dict due to {e}.\ + No corresponding pre-trained model found. Returning the un-trained model." + import warnings + + warnings.warn(msg, UserWarning) + logger.warn(msg) + return model, iteration + + +def load_optimizer( + folder: Path, + optimizer: Optimizer | NGOptimizer, + opt_ckpt_name: str | Path = "", +) -> Optimizer | NGOptimizer: + if opt_ckpt_name == "": + opt_ckpt_name = get_latest_checkpoint_name(folder, "opt") + if os.path.isfile(folder / opt_ckpt_name): + if isinstance(optimizer, Optimizer): + (_, OptType, optimizer_state) = torch.load(folder / opt_ckpt_name) + if isinstance(optimizer, OptType): + optimizer.load_state_dict(optimizer_state) + + elif isinstance(optimizer, NGOptimizer): + loaded_optimizer = NGOptimizer.load(folder / opt_ckpt_name) + if loaded_optimizer.name == optimizer.name: + optimizer = loaded_optimizer + else: + raise NotImplementedError + return optimizer diff --git a/qadence/ml_tools/tensors.py b/qadence/ml_tools/tensors.py new file mode 100644 index 000000000..150bbc333 --- /dev/null +++ b/qadence/ml_tools/tensors.py @@ -0,0 +1,41 @@ +from __future__ import annotations + +from typing import Any + +import numpy as np +import torch +from torch import Tensor + + +def numpy_to_tensor( + x: np.ndarray, + device: torch.device = torch.device("cpu"), + dtype: torch.dtype = torch.float64, + requires_grad: bool = False, +) -> Tensor: + """This only copies the numpy array if device or dtype are different than the ones of x.""" + return torch.as_tensor(x, dtype=dtype, device=device).requires_grad_(requires_grad) + + +def promote_to_tensor(x: Tensor | np.ndarray | float, requires_grad: bool = True) -> Tensor: + """Convert the given type into a torch.Tensor""" + if isinstance(x, float): + return torch.tensor([[x]], requires_grad=requires_grad) + elif isinstance(x, np.ndarray): + return numpy_to_tensor(x, requires_grad=requires_grad) + elif isinstance(x, Tensor): + return x.requires_grad_(requires_grad) + else: + raise ValueError(f"Don't know how to promote {type(x)} to Tensor") + + +def promote_to(x: Tensor, dtype: Any) -> float | np.ndarray | Tensor: + if dtype == float: + assert x.size() == (1, 1) + return x[0, 0].item() + elif dtype == np.ndarray: + return x.detach().cpu().numpy() + elif dtype == Tensor: + return x + else: + raise ValueError(f"Don't know how to convert Tensor to {dtype}") diff --git a/qadence/ml_tools/train_grad.py b/qadence/ml_tools/train_grad.py new file mode 100644 index 000000000..b47b870d4 --- /dev/null +++ b/qadence/ml_tools/train_grad.py @@ -0,0 +1,199 @@ +from __future__ import annotations + +from typing import Callable + +from rich.progress import BarColumn, Progress, TaskProgressColumn, TextColumn, TimeRemainingColumn +from torch import Tensor +from torch.nn import Module +from torch.optim import Optimizer +from torch.utils.data import DataLoader +from torch.utils.tensorboard import SummaryWriter + +from qadence.logger import get_logger +from qadence.ml_tools.config import TrainConfig +from qadence.ml_tools.data import DictDataLoader +from qadence.ml_tools.optimize_step import optimize_step +from qadence.ml_tools.printing import print_metrics, write_tensorboard +from qadence.ml_tools.saveload import load_checkpoint, write_checkpoint + +logger = get_logger(__name__) + + +def train( + model: Module, + dataloader: DictDataLoader | DataLoader | list[Tensor] | tuple[Tensor, Tensor] | None, + optimizer: Optimizer, + config: TrainConfig, + loss_fn: Callable, + device: str = "cpu", + optimize_step: Callable = optimize_step, + write_tensorboard: Callable = write_tensorboard, +) -> tuple[Module, Optimizer]: + """Runs the training loop with gradient-based optimizer + + Assumes that `loss_fn` returns a tuple of (loss, + metrics: dict), where `metrics` is a dict of scalars. Loss and metrics are + written to tensorboard. Checkpoints are written every + `config.checkpoint_every` steps (and after the last training step). If a + checkpoint is found at `config.folder` we resume training from there. The + tensorboard logs can be viewed via `tensorboard --logdir /path/to/folder`. + + Args: + model: The model to train. + dataloader: dataloader of different types. If None, no data is required by + the model + optimizer: The optimizer to use. + config: `TrainConfig` with additional training options. + loss_fn: Loss function returning (loss: float, metrics: dict[str, float]) + device: String defining device to train on, pass 'cuda' for GPU. + optimize_step: Customizable optimization callback which is called at every iteration.= + The function must have the signature `optimize_step(model, + optimizer, loss_fn, xs, device="cpu")` (see the example below). + Apart from the default we already supply three other optimization + functions `optimize_step_evo`, `optimize_step_grad_norm`, and + `optimize_step_inv_dirichlet`. Learn more about how to use this in + the [Advancded features](../../tutorials/advanced) tutorial of the + documentation. + write_tensorboard: Customizable tensorboard logging callback which is + called every `config.write_every` iterations. The function must have + the signature `write_tensorboard(writer, loss, metrics, iteration)` + (see the example below). + + Example: + ```python exec="on" source="material-block" + from pathlib import Path + import torch + from itertools import count + from qadence.constructors import hamiltonian_factory, hea, feature_map + from qadence import chain, Parameter, QuantumCircuit, Z + from qadence.models import QNN + from qadence.ml_tools import train_with_grad, TrainConfig + + n_qubits = 2 + fm = feature_map(n_qubits) + ansatz = hea(n_qubits=n_qubits, depth=3) + observable = hamiltonian_factory(n_qubits, detuning = Z) + circuit = QuantumCircuit(n_qubits, fm, ansatz) + + model = QNN(circuit, observable, backend="pyqtorch", diff_mode="ad") + batch_size = 1 + input_values = {"phi": torch.rand(batch_size, requires_grad=True)} + pred = model(input_values) + + ## lets prepare the train routine + + cnt = count() + criterion = torch.nn.MSELoss() + optimizer = torch.optim.Adam(model.parameters(), lr=0.1) + + def loss_fn(model: torch.nn.Module, data: torch.Tensor) -> tuple[torch.Tensor, dict]: + next(cnt) + x, y = data[0], data[1] + out = model(x) + loss = criterion(out, y) + return loss, {} + tmp_path = Path("/tmp") + n_epochs = 5 + config = TrainConfig( + folder=tmp_path, + max_iter=n_epochs, + checkpoint_every=100, + write_every=100, + batch_size=batch_size, + ) + batch_size = 25 + x = torch.linspace(0, 1, batch_size).reshape(-1, 1) + y = torch.sin(x) + train_with_grad(model, (x, y), optimizer, config, loss_fn=loss_fn) + ``` + """ + + assert loss_fn is not None, "Provide a valid loss function" + + # Move model to device before optimizer is loaded + model = model.to(device) + + # load available checkpoint + init_iter = 0 + if config.folder: + model, optimizer, init_iter = load_checkpoint(config.folder, model, optimizer) + logger.debug(f"Loaded model and optimizer from {config.folder}") + # initialize tensorboard + writer = SummaryWriter(config.folder, purge_step=init_iter) + + ## Training + progress = Progress( + TextColumn("[progress.description]{task.description}"), + BarColumn(), + TaskProgressColumn(), + TimeRemainingColumn(elapsed_when_finished=True), + ) + if isinstance(dataloader, (list, tuple)): + from qadence.ml_tools.data import to_dataloader + + assert len(dataloader) == 2, "Please provide exactly two torch tensors." + x, y = dataloader + dataloader = to_dataloader(x=x, y=y, batch_size=config.batch_size) + with progress: + dl_iter = iter(dataloader) if isinstance(dataloader, DictDataLoader) else None + + # outer epoch loop + for iteration in progress.track(range(init_iter, init_iter + config.max_iter)): + try: + # in case there is not data needed by the model + # this is the case, for example, of quantum models + # which do not have classical input data (e.g. chemistry) + if dataloader is None: + loss, metrics = optimize_step( + model, optimizer, loss_fn, dataloader, device=device + ) + + # single epoch with DictDataloader using a single iteration method + # DictDataloader returns a single sample of the data + # with a given batch size decided when the dataloader is defined + elif isinstance(dataloader, DictDataLoader): + # resample all the time from the dataloader + # by creating a fresh iterator if the dataloader + # does not support automatically iterating datasets + if not dataloader.has_automatic_iter: + dl_iter = iter(dataloader) + data = next(dl_iter) # type: ignore[arg-type] + loss, metrics = optimize_step(model, optimizer, loss_fn, data, device=device) + + elif isinstance(dataloader, DataLoader): + # single-epoch with standard DataLoader + # otherwise a standard PyTorch DataLoader behavior + # is assumed with optional mini-batches + running_loss = 0.0 + for i, data in enumerate(dataloader): + # TODO: make sure to average metrics as well + loss, metrics = optimize_step( + model, optimizer, loss_fn, data, device=device + ) + running_loss += loss.item() + loss = running_loss / (i + 1) + + else: + raise NotImplementedError("Unsupported dataloader type!") + + if iteration % config.print_every == 0: + print_metrics(loss, metrics, iteration) + + if iteration % config.write_every == 0: + write_tensorboard(writer, loss, metrics, iteration) + + if config.folder: + if iteration % config.checkpoint_every == 0: + write_checkpoint(config.folder, model, optimizer, iteration) + + except KeyboardInterrupt: + print("Terminating training gracefully after the current iteration.") + break + + # Final writing and checkpointing + if config.folder: + write_checkpoint(config.folder, model, optimizer, iteration) + write_tensorboard(writer, loss, metrics, iteration) + writer.close() + + return model, optimizer diff --git a/qadence/ml_tools/train_no_grad.py b/qadence/ml_tools/train_no_grad.py new file mode 100644 index 000000000..ebffef52a --- /dev/null +++ b/qadence/ml_tools/train_no_grad.py @@ -0,0 +1,134 @@ +from __future__ import annotations + +from typing import Callable + +import nevergrad as ng +from nevergrad.optimization.base import Optimizer as NGOptimizer +from rich.progress import BarColumn, Progress, TaskProgressColumn, TextColumn, TimeRemainingColumn +from torch import Tensor +from torch.nn import Module +from torch.utils.data import DataLoader +from torch.utils.tensorboard import SummaryWriter + +from qadence.logger import get_logger +from qadence.ml_tools.config import TrainConfig +from qadence.ml_tools.data import DictDataLoader +from qadence.ml_tools.parameters import get_parameters, set_parameters +from qadence.ml_tools.printing import print_metrics, write_tensorboard +from qadence.ml_tools.saveload import load_checkpoint, write_checkpoint +from qadence.ml_tools.tensors import promote_to_tensor + +logger = get_logger(__name__) + + +def train( + model: Module, + dataloader: DictDataLoader | DataLoader | None, + optimizer: NGOptimizer, + config: TrainConfig, + loss_fn: Callable, +) -> tuple[Module, NGOptimizer]: + """Runs the training loop with a gradient-free optimizer + + Assumes that `loss_fn` returns a tuple of (loss, metrics: dict), where + `metrics` is a dict of scalars. Loss and metrics are written to + tensorboard. Checkpoints are written every `config.checkpoint_every` steps + (and after the last training step). If a checkpoint is found at `config.folder` + we resume training from there. The tensorboard logs can be viewed via + `tensorboard --logdir /path/to/folder`. + + Args: + model: The model to train + dataloader: Dataloader constructed via `dictdataloader` + optimizer: The optimizer to use taken from the Nevergrad library. If this is not + the case the function will raise an AssertionError + loss_fn: Loss function returning (loss: float, metrics: dict[str, float]) + """ + init_iter = 0 + if config.folder: + model, optimizer, init_iter = load_checkpoint(config.folder, model, optimizer) + logger.debug(f"Loaded model and optimizer from {config.folder}") + + def _update_parameters( + data: Tensor | None, ng_params: ng.p.Array + ) -> tuple[float, dict, ng.p.Array]: + loss, metrics = loss_fn(model, data) # type: ignore[misc] + optimizer.tell(ng_params, float(loss)) + ng_params = optimizer.ask() # type: ignore [assignment] + params = promote_to_tensor(ng_params.value, requires_grad=False) + set_parameters(model, params) + return loss, metrics, ng_params + + assert loss_fn is not None, "Provide a valid loss function" + # TODO: support also Scipy optimizers + assert isinstance(optimizer, NGOptimizer), "Use only optimizers from the Nevergrad library" + + # initialize tensorboard + writer = SummaryWriter(config.folder, purge_step=init_iter) + + # set optimizer configuration and initial parameters + optimizer.budget = config.max_iter + optimizer.enable_pickling() + + # TODO: Make it GPU compatible if possible + params = get_parameters(model).detach().numpy() + ng_params = ng.p.Array(init=params) + + # serial training + # TODO: Add a parallelization using the num_workers argument in Nevergrad + progress = Progress( + TextColumn("[progress.description]{task.description}"), + BarColumn(), + TaskProgressColumn(), + TimeRemainingColumn(elapsed_when_finished=True), + ) + with progress: + dl_iter = iter(dataloader) if isinstance(dataloader, DictDataLoader) else None + + for iteration in progress.track(range(init_iter, init_iter + config.max_iter)): + if dataloader is None: + loss, metrics, ng_params = _update_parameters(None, ng_params) + + elif isinstance(dataloader, DictDataLoader): + # resample all the time from the dataloader + # by creating a fresh iterator if the dataloader + # does not support automatically iterating datasets + if not dataloader.has_automatic_iter: + dl_iter = iter(dataloader) + + data = next(dl_iter) # type: ignore[arg-type] + loss, metrics, ng_params = _update_parameters(data, ng_params) + + elif isinstance(dataloader, DataLoader): + # single-epoch with standard DataLoader + # otherwise a standard PyTorch DataLoader behavior + # is assumed with optional mini-batches + running_loss = 0.0 + for i, data in enumerate(dataloader): + loss, metrics, ng_params = _update_parameters(data, ng_params) + running_loss += loss + loss = running_loss / (i + 1) + + else: + raise NotImplementedError("Unsupported dataloader type!") + + if iteration % config.print_every == 0: + print_metrics(loss, metrics, iteration) + + if iteration % config.write_every == 0: + write_tensorboard(writer, loss, metrics, iteration) + + if config.folder: + if iteration % config.checkpoint_every == 0: + write_checkpoint(config.folder, model, optimizer, iteration) + + if iteration >= init_iter + config.max_iter: + break + + ## Final writing and stuff + if config.folder: + write_checkpoint(config.folder, model, optimizer, iteration) + write_tensorboard(writer, loss, metrics, iteration) + writer.close() + + return model, optimizer diff --git a/qadence/ml_tools/utils.py b/qadence/ml_tools/utils.py new file mode 100644 index 000000000..96db165bd --- /dev/null +++ b/qadence/ml_tools/utils.py @@ -0,0 +1,46 @@ +from __future__ import annotations + +from functools import singledispatch +from typing import Any + +from torch import Tensor, rand + +from qadence.blocks import AbstractBlock, parameters +from qadence.circuit import QuantumCircuit +from qadence.ml_tools.models import TransformedModule +from qadence.models import QNN, QuantumModel +from qadence.parameters import Parameter, stringify + + +@singledispatch +def rand_featureparameters( + x: QuantumCircuit | AbstractBlock | QuantumModel | QNN | TransformedModule, *args: Any +) -> dict[str, Tensor]: + raise NotImplementedError(f"Unable to generate random featureparameters for object {type(x)}.") + + +@rand_featureparameters.register +def _(block: AbstractBlock, batch_size: int = 1) -> dict[str, Tensor]: + non_number_params = [p for p in parameters(block) if not p.is_number] + feat_params: list[Parameter] = [p for p in non_number_params if not p.trainable] + return {stringify(p): rand(batch_size, requires_grad=False) for p in feat_params} + + +@rand_featureparameters.register +def _(circuit: QuantumCircuit, batch_size: int = 1) -> dict[str, Tensor]: + return rand_featureparameters(circuit.block, batch_size) + + +@rand_featureparameters.register +def _(qm: QuantumModel, batch_size: int = 1) -> dict[str, Tensor]: + return rand_featureparameters(qm._circuit.abstract, batch_size) + + +@rand_featureparameters.register +def _(qnn: QNN, batch_size: int = 1) -> dict[str, Tensor]: + return rand_featureparameters(qnn._circuit.abstract, batch_size) + + +@rand_featureparameters.register +def _(tm: TransformedModule, batch_size: int = 1) -> dict[str, Tensor]: + return rand_featureparameters(tm.model, batch_size) diff --git a/qadence/models/__init__.py b/qadence/models/__init__.py new file mode 100644 index 000000000..b6ef2940d --- /dev/null +++ b/qadence/models/__init__.py @@ -0,0 +1,7 @@ +from __future__ import annotations + +from .qnn import QNN +from .quantum_model import QuantumModel + +# Modules to be automatically added to the qadence namespace +__all__ = ["QNN", "QuantumModel"] diff --git a/qadence/models/qnn.py b/qadence/models/qnn.py new file mode 100644 index 000000000..1cd141585 --- /dev/null +++ b/qadence/models/qnn.py @@ -0,0 +1,139 @@ +from __future__ import annotations + +from typing import Callable + +from torch import Tensor + +from qadence.backend import BackendConfiguration, BackendName +from qadence.backends.pytorch_wrapper import DiffMode +from qadence.blocks import AbstractBlock +from qadence.circuit import QuantumCircuit +from qadence.measurements import Measurements +from qadence.models.quantum_model import QuantumModel +from qadence.utils import Endianness + + +class QNN(QuantumModel): + """Quantum neural network model for n-dimensional inputs + + Examples: + ```python exec="on" source="material-block" result="json" + import torch + from qadence import QuantumCircuit, QNN + from qadence import hea, feature_map, hamiltonian_factory, Z + + # create the circuit + n_qubits, depth = 2, 4 + fm = feature_map(n_qubits) + ansatz = hea(n_qubits=n_qubits, depth=depth) + circuit = QuantumCircuit(n_qubits, fm, ansatz) + obs_base = hamiltonian_factory(n_qubits, detuning = Z) + + # the QNN will yield two outputs + obs = [2.0 * obs_base, 4.0 * obs_base] + + # initialize and use the model + qnn = QNN(circuit, obs, diff_mode="ad", backend="pyqtorch") + y = qnn.expectation({"phi": torch.rand(3)}) + print(str(y)) # markdown-exec: hide + ``` + """ + + def __init__( + self, + circuit: QuantumCircuit, + observable: list[AbstractBlock] | AbstractBlock, + transform: Callable[[Tensor], Tensor] = None, # transform output of the QNN + backend: BackendName = BackendName.PYQTORCH, + diff_mode: DiffMode = DiffMode.AD, + protocol: Measurements | None = None, + configuration: BackendConfiguration | dict | None = None, + ): + """Initialize the QNN + + The number of inputs is determined by the feature parameters in the input + quantum circuit while the number of outputs is determined by how many + observables are provided as input + + Args: + circuit: The quantum circuit to use for the QNN. + transform: A transformation applied to the output of the QNN. + backend: The chosen quantum backend. + diff_mode: The differentiation engine to use. Choices 'gpsr' or 'ad'. + protocol: optional measurement protocol. If None, + use exact expectation value with a statevector simulator + configuration: optional configuration for the backend + + """ + super().__init__( + circuit=circuit, + observable=observable, + backend=backend, + diff_mode=diff_mode, + protocol=protocol, + configuration=configuration, + ) + + if self.out_features is None: + raise ValueError("You need to provide at least one observable in the QNN constructor") + + self.transform = transform if transform else lambda x: x + + def forward( + self, + values: dict[str, Tensor] | Tensor = None, + state: Tensor | None = None, + protocol: Measurements | None = None, + endianness: Endianness = Endianness.BIG, + ) -> Tensor: + """Forward pass of the model + + This returns the (differentiable) expectation value of the given observable + operator defined in the constructor. Differently from the base QuantumModel + class, the QNN accepts also a tensor as input for the forward pass. The + tensor is expected to have shape: `n_batches x in_features` where `n_batches` + is the number of data points and `in_features` is the dimensionality of the problem + + The output of the forward pass is the expectation value of the input + observable(s). If a single observable is given, the output shape is + `n_batches` while if multiple observables are given the output shape + is instead `n_batches x n_observables` + + Args: + values (dict[str, Tensor] | Tensor): the values of the feature parameters + + Returns: + Tensor: a tensor with the expectation value of the observables passed + in the constructor of the model + """ + if values is None: + values = {} + if not isinstance(values, dict): + values = self._format_to_dict(values) + if protocol is None: + protocol = self._protocol + + return self.transform( + self.expectation(values=values, state=state, protocol=protocol, endianness=endianness) + ) + + def _format_to_dict(self, values: Tensor) -> dict[str, Tensor]: + """Format an input tensor into the format required by the forward pass + + The tensor is assumed to have dimensions: n_batches x in_features where in_features + corresponds to the number of input features of the QNN + """ + + if len(values.size()) == 1: + values = values.reshape(-1, 1) + msg = f"Model expects in_features={self.in_features} but got {values.size()[1]}." + assert len(values.size()) == 2, msg + assert values.size()[1] == self.in_features, msg + + names = [p.name for p in self.inputs] + res = {} + for i, name in enumerate(names): + res[name] = values[:, i] + return res + + # TODO: Implement derivatives w.r.t. to inputs diff --git a/qadence/models/quantum_model.py b/qadence/models/quantum_model.py new file mode 100644 index 000000000..bd549a5fa --- /dev/null +++ b/qadence/models/quantum_model.py @@ -0,0 +1,286 @@ +from __future__ import annotations + +import os +from collections import Counter, OrderedDict +from dataclasses import asdict +from pathlib import Path +from typing import Any, Callable, Optional, Sequence + +import torch +from torch import Tensor, nn + +from qadence.backend import ( + Backend, + BackendConfiguration, + BackendName, + ConvertedCircuit, + ConvertedObservable, +) +from qadence.backends import backend_factory, config_factory +from qadence.backends.pytorch_wrapper import DiffMode +from qadence.blocks import AbstractBlock +from qadence.circuit import QuantumCircuit +from qadence.logger import get_logger +from qadence.measurements import Measurements +from qadence.utils import Endianness + +logger = get_logger(__name__) + + +class QuantumModel(nn.Module): + """The central class of qadence that executes `QuantumCircuit`s and make them differentiable. + + This class should be used as base class for any new quantum model supported in the qadence + framework for information on the implementation of custom models see + [here](/advanced_tutorials/custom-models.md). + """ + + backend: Backend + embedding_fn: Callable + _params: nn.ParameterDict + _circuit: ConvertedCircuit + _observable: list[ConvertedObservable] | None + + def __init__( + self, + circuit: QuantumCircuit, + observable: list[AbstractBlock] | AbstractBlock | None = None, + backend: BackendName | str = BackendName.PYQTORCH, + diff_mode: DiffMode = DiffMode.AD, + protocol: Measurements | None = None, + configuration: BackendConfiguration | dict | None = None, + ): + """Initialize a generic QuantumModel instance. + + Arguments: + circuit: The circuit that is executed. + observable: Optional observable(s) that are used only in the `expectation` method. You + can also provide observables on the fly to the expectation call directly. + backend: A backend for circuit execution. + diff_mode: A differentiability mode. Parameter shift based modes work on all backends. + AD based modes only on PyTorch based backends. + protocol: Optional measurement protocol. If None, use + exact expectation value with a statevector simulator. + configuration: Configuration for the backend. + + Raises: + ValueError: if the `diff_mode` argument is set to None + """ + super().__init__() + + if not isinstance(circuit, QuantumCircuit): + TypeError( + f"The circuit should be of type ''. Got {type(circuit)}." + ) + + self.inputs = [p for p in circuit.unique_parameters if not p.trainable and not p.is_number] + if diff_mode is None: + raise ValueError("`diff_mode` cannot be `None` in a `QuantumModel`.") + + self.backend = backend_factory( + backend=backend, diff_mode=diff_mode, configuration=configuration + ) + + if isinstance(observable, list) or observable is None: + observable = observable + else: + observable = [observable] + + conv = self.backend.convert(circuit, observable) + self.embedding_fn = conv.embedding_fn + self._circuit = conv.circuit + self._observable = conv.observable + self._backend_name = backend + self._diff_mode = diff_mode + self._protocol = protocol + + self._params = nn.ParameterDict( + { + str(key): nn.Parameter(val, requires_grad=val.requires_grad) + for key, val in conv.params.items() + } + ) + + @property + def vparams(self) -> OrderedDict: + return OrderedDict({k: v.data for k, v in self._params.items() if v.requires_grad}) + + @property + def vals_vparams(self) -> Tensor: + """Dictionary with parameters which are actually updated during optimization""" + vals = torch.tensor([v for v in self._params.values() if v.requires_grad]) + vals.requires_grad = False + return vals.flatten() + + @property + def in_features(self) -> int: + """Number of inputs.""" + return len(self.inputs) + + @property + def out_features(self) -> int | None: + """Number of outputs""" + return 0 if self._observable is None else len(self._observable) + + @property + def num_vparams(self) -> int: + """The number of variational parameters""" + return len(self.vals_vparams) + + def circuit(self, circuit: QuantumCircuit) -> ConvertedCircuit: + return self.backend.circuit(circuit) + + def observable(self, observable: AbstractBlock, n_qubits: int) -> Any: + return self.backend.observable(observable, n_qubits) + + def reset_vparams(self, values: Sequence) -> None: + """Reset all the variational parameters with a given list of values""" + current_vparams = OrderedDict({k: v for k, v in self._params.items() if v.requires_grad}) + + assert ( + len(values) == self.num_vparams + ), "Pass an iterable with the values of all variational parameters" + for i, k in enumerate(current_vparams.keys()): + current_vparams[k].data = torch.tensor([values[i]]) + + def forward(self, *args: Any, **kwargs: Any) -> Tensor: + return self.run(*args, **kwargs) + + def run( + self, + values: dict[str, Tensor] = None, + state: Tensor | None = None, + endianness: Endianness = Endianness.BIG, + ) -> Tensor: + if values is None: + values = {} + params = self.embedding_fn(self._params, values) + return self.backend.run(self._circuit, params, state=state, endianness=endianness) + + def sample( + self, + values: dict[str, torch.Tensor] = {}, + n_shots: int = 1000, + state: torch.Tensor | None = None, + endianness: Endianness = Endianness.BIG, + ) -> list[Counter]: + params = self.embedding_fn(self._params, values) + return self.backend.sample( + self._circuit, params, n_shots=n_shots, state=state, endianness=endianness + ) + + def expectation( + self, + values: dict[str, Tensor] = {}, + observable: list[ConvertedObservable] | ConvertedObservable | None = None, + state: Optional[Tensor] = None, + protocol: Measurements | None = None, + endianness: Endianness = Endianness.BIG, + ) -> Tensor: + """Compute expectation using the given backend. + + Returns: + A torch.Tensor of shape n_batches x n_obs + """ + if observable is None: + if self._observable is None: + raise ValueError( + "Provide an AbstractBlock as the observable to compute expectation." + "Either pass a 'native_observable' directly to 'QuantumModel.expectation'" + "or pass a (non-native) '' to the 'QuantumModel.__init__'." + ) + observable = self._observable + + params = self.embedding_fn(self._params, values) + if protocol is None: + protocol = self._protocol + + return self.backend.expectation( + circuit=self._circuit, + observable=observable, + param_values=params, + state=state, + protocol=protocol, + endianness=endianness, + ) + + def overlap(self) -> Tensor: + raise NotImplementedError("The overlap method is not implemented for this model.") + + def _to_dict(self, save_params: bool = False) -> dict[str, Any]: + if isinstance(self._observable, list): + abs_obs = [obs.abstract._to_dict() for obs in self._observable] + else: + abs_obs = [dict()] + + d = { + "circuit": self._circuit.abstract._to_dict(), + "observable": abs_obs, + "backend": self._backend_name, + "diff_mode": self._diff_mode, + "protocol": self._protocol._to_dict() if self._protocol is not None else {}, + "backend_configuration": asdict(self.backend.backend.config), # type: ignore + } + param_dict_conv = {} + if save_params: + param_dict_conv = {name: param.data for name, param in self._params.items()} + return {self.__class__.__name__: d, "param_dict": param_dict_conv} + + @classmethod + def _from_dict(cls, d: dict, as_torch: bool = False) -> QuantumModel: + from qadence.serialization import deserialize + + qm_dict = d[cls.__name__] + qm = cls( + circuit=QuantumCircuit._from_dict(qm_dict["circuit"]), + observable=( + None + if not isinstance(qm_dict["observable"], list) + else [deserialize(q_obs) for q_obs in qm_dict["observable"]] # type: ignore[misc] + ), + backend=qm_dict["backend"], + diff_mode=qm_dict["diff_mode"], + protocol=Measurements._from_dict(qm_dict["protocol"]), + configuration=config_factory(qm_dict["backend"], qm_dict["backend_configuration"]), + ) + + if as_torch: + conv_pd = torch.nn.ParameterDict() + param_dict = d["param_dict"] + for n, param in param_dict.items(): + conv_pd[n] = torch.nn.Parameter(param) + qm._params = conv_pd + return qm + + def save( + self, folder: str | Path, file_name: str = "quantum_model.pt", save_params: bool = True + ) -> None: + if not os.path.isdir(folder): + raise FileNotFoundError + try: + torch.save(self._to_dict(save_params), folder / Path(file_name)) + except Exception as e: + print(f"Unable to write QuantumModel to disk due to {e}") + + @classmethod + def load( + cls, file_path: str | Path, as_torch: bool = False, map_location: str | torch.device = "cpu" + ) -> QuantumModel: + qm_pt = {} + if isinstance(file_path, str): + file_path = Path(file_path) + if os.path.isdir(file_path): + from qadence.ml_tools.saveload import get_latest_checkpoint_name + + file_path = file_path / get_latest_checkpoint_name(file_path, "model") + + try: + qm_pt = torch.load(file_path, map_location=map_location) + except Exception as e: + print(f"Unable to load QuantumModel due to {e}") + return cls._from_dict(qm_pt, as_torch) + + def assign_parameters(self, values: dict[str, Tensor]) -> Any: + """Return the final, assigned circuit that is used in e.g. `backend.run`""" + params = self.embedding_fn(self._params, values) + return self.backend.assign_parameters(self._circuit, params) diff --git a/qadence/operations.py b/qadence/operations.py new file mode 100644 index 000000000..8caaebb5b --- /dev/null +++ b/qadence/operations.py @@ -0,0 +1,1284 @@ +"""Basic operations to be implemented by backends.""" +from __future__ import annotations + +from copy import deepcopy +from dataclasses import dataclass +from functools import cached_property +from typing import Any, Tuple, Union + +import numpy as np +import sympy +import torch +from rich.console import Console, RenderableType +from rich.padding import Padding +from rich.panel import Panel +from rich.tree import Tree +from sympy import Basic +from torch import Tensor, cdouble, tensor + +from qadence.blocks import ( + AbstractBlock, + ControlBlock, + ParametricBlock, + ParametricControlBlock, + PrimitiveBlock, + TimeEvolutionBlock, +) +from qadence.blocks.analog import ( + AnalogBlock, + ConstantAnalogRotation, + QubitSupport, + WaitBlock, +) +from qadence.blocks.block_to_tensor import block_to_tensor +from qadence.blocks.utils import ( + add, # noqa + block_is_commuting_hamiltonian, + block_is_qubit_hamiltonian, + chain, + expressions, + kron, +) +from qadence.decompose import lie_trotter_suzuki +from qadence.logger import get_logger +from qadence.parameters import ( + Parameter, + ParamMap, + evaluate, + extract_original_param_entry, +) +from qadence.types import LTSOrder, OpName, TGenerator, TNumber, TParameter +from qadence.utils import eigenvalues + +logger = get_logger(__name__) + + +# Modules to be automatically added to the qadence namespace +__all__ = [ + "X", + "Y", + "Z", + "N", + "H", + "I", + "Zero", + "RX", + "RY", + "RZ", + "U", + "CNOT", + "CZ", + "MCZ", + "HamEvo", + "CRX", + "MCRX", + "CRY", + "MCRY", + "CRZ", + "MCRZ", + "T", + "TDagger", + "S", + "SDagger", + "SWAP", + "PHASE", + "CPHASE", + "MCPHASE", + "wait", + "entangle", + "AnalogEntanglement", + "AnalogRot", + "AnalogRX", + "AnalogRY", + "AnalogRZ", + "AnalogSWAP", +] + + +class X(PrimitiveBlock): + """The X gate""" + + name = OpName.X + + def __init__(self, target: int): + super().__init__((target,)) + + @property + def generator(self) -> AbstractBlock: + return self + + @property + def eigenvalues_generator(self) -> Tensor: + return torch.tensor([-1, 1], dtype=cdouble) + + @property + def eigenvalues(self) -> Tensor: + return tensor([-1, 1], dtype=cdouble) + + def dagger(self) -> X: + return self + + +class Y(PrimitiveBlock): + """The Y gate""" + + name = OpName.Y + + def __init__(self, target: int): + super().__init__((target,)) + + @property + def generator(self) -> AbstractBlock: + return self + + @property + def eigenvalues_generator(self) -> Tensor: + return torch.tensor([-1, 1], dtype=cdouble) + + @property + def eigenvalues(self) -> Tensor: + return tensor([-1, 1], dtype=cdouble) + + def dagger(self) -> Y: + return self + + +class Z(PrimitiveBlock): + """The Z gate""" + + name = OpName.Z + + def __init__(self, target: int): + super().__init__((target,)) + + @property + def generator(self) -> AbstractBlock: + return self + + @property + def eigenvalues_generator(self) -> Tensor: + return torch.tensor([-1, 1], dtype=cdouble) + + @property + def eigenvalues(self) -> Tensor: + return tensor([-1, 1], dtype=cdouble) + + def dagger(self) -> Z: + return self + + +class N(PrimitiveBlock): + """The N = (1/2)(I-Z) operator""" + + name = OpName.N + + def __init__(self, target: int): + super().__init__((target,)) + + @property + def generator(self) -> None: + raise ValueError("Property `generator` not available for non-unitary operator.") + return None + + @property + def eigenvalues_generator(self) -> None: + raise ValueError("Property `eigenvalues_generator` not available for non-unitary operator.") + return None + + @property + def eigenvalues(self) -> Tensor: + return tensor([0, 1], dtype=cdouble) + + def dagger(self) -> N: + return self + + +class S(PrimitiveBlock): + """The S / Phase gate""" + + name = OpName.S + + def __init__(self, target: int): + self.generator = I(target) - Z(target) + super().__init__((target,)) + + @property + def eigenvalues_generator(self) -> Tensor: + return torch.tensor([0, 1], dtype=cdouble) + + @property + def eigenvalues(self) -> Tensor: + return tensor([1, 1j], dtype=cdouble) + + def dagger(self) -> SDagger: + return SDagger(*self.qubit_support) + + +class SDagger(PrimitiveBlock): + """The Hermitian adjoint/conjugate transpose of the S / Phase gate""" + + name = OpName.SDAGGER + + def __init__(self, target: int): + self.generator = I(target) - Z(target) + super().__init__((target,)) + + @property + def eigenvalues_generator(self) -> Tensor: + return torch.tensor([0, 1], dtype=cdouble) + + @property + def eigenvalues(self) -> Tensor: + return tensor([1, -1j], dtype=cdouble) + + def dagger(self) -> S: + return S(*self.qubit_support) + + +class PHASE(ParametricBlock): + """The Parametric Phase / S gate""" + + name = OpName.PHASE + + def __init__(self, target: int, parameter: Parameter | TNumber | sympy.Expr | str): + self.parameters = ParamMap(parameter=parameter) + self.generator = I(target) - Z(target) + super().__init__((target,)) + + @classmethod + def num_parameters(cls) -> int: + return 1 + + @property + def eigenvalues_generator(self) -> Tensor: + return torch.tensor([0, 2], dtype=cdouble) + + @property + def eigenvalues(self) -> Tensor: + lmbda = torch.exp(1j * evaluate(self.parameters.parameter, as_torch=True)) + return torch.cat((torch.ones(1), lmbda)) + + +class I(PrimitiveBlock): + """The identity gate""" + + name = OpName.I + + def __init__(self, target: int): + super().__init__((target,)) + + def __ixor__(self, other: AbstractBlock | int) -> AbstractBlock: + if not isinstance(other, AbstractBlock): + raise ValueError( + f"Can only initialize a kron block with another block. Got {type(other)}." + ) + return other + + def __imul__(self, other: AbstractBlock | TNumber | Parameter) -> AbstractBlock: + if not isinstance(other, AbstractBlock): + raise ValueError( + "In-place multiplication is available " "only for AbstractBlock instances" + ) + return other + + @property + def generator(self) -> AbstractBlock: + return I(*self.qubit_support) + + @property + def eigenvalues_generator(self) -> Tensor: + return torch.ones(2, dtype=cdouble) + + @property + def eigenvalues(self) -> Tensor: + return torch.ones(2, dtype=cdouble) + + def __ascii__(self, console: Console) -> Padding: + return Padding("──────", (1, 1, 1, 1)) + + def dagger(self) -> I: + return I(*self.qubit_support) + + +TPauliBlock = Union[X, Y, Z, I, N] + + +class H(PrimitiveBlock): + """The Hadamard or H gate""" + + name = OpName.H + + def __init__(self, target: int): + self.generator = (1 / np.sqrt(2)) * (X(target) + Z(target) - np.sqrt(2) * I(target)) + super().__init__((target,)) + + @property + def eigenvalues_generator(self) -> Tensor: + return torch.tensor([-2, 0], dtype=cdouble) + + @property + def eigenvalues(self) -> Tensor: + return torch.tensor([-1, 1], dtype=cdouble) + + def dagger(self) -> H: + return H(*self.qubit_support) + + +class Zero(PrimitiveBlock): + name = OpName.ZERO + + def __init__(self) -> None: + self.generator = 0 * I(0) + super().__init__((0,)) + + @property + def eigenvalues_generator(self) -> Tensor: + return torch.zeros(2, dtype=cdouble) + + @property + def eigenvalues(self) -> Tensor: + return torch.zeros(2, dtype=cdouble) + + def __add__(self, other: AbstractBlock) -> AbstractBlock: + return other + + def __iadd__(self, other: AbstractBlock) -> AbstractBlock: + return other + + def __sub__(self, other: AbstractBlock) -> AbstractBlock: + return -other + + def __isub__(self, other: AbstractBlock) -> AbstractBlock: + return -other + + def __mul__(self, other: AbstractBlock | TNumber | Parameter) -> AbstractBlock: + return self + + def __imul__(self, other: AbstractBlock | TNumber | Parameter) -> AbstractBlock: + return self + + def __ixor__(self, other: AbstractBlock | TNumber | Parameter) -> AbstractBlock: + return self + + def __pow__(self, power: int) -> AbstractBlock: + return self + + def dagger(self) -> Zero: + return Zero() + + +class RX(ParametricBlock): + """The Rx gate""" + + name = OpName.RX + + def __init__(self, target: int, parameter: Parameter | TParameter | ParamMap): + # TODO: should we give them more meaningful names? like 'angle'? + self.parameters = ( + parameter if isinstance(parameter, ParamMap) else ParamMap(parameter=parameter) + ) + self.generator = X(target) + super().__init__((target,)) + + @classmethod + def num_parameters(cls) -> int: + return 1 + + @property + def eigenvalues_generator(self) -> Tensor: + return torch.tensor([-1, 1], dtype=cdouble) + + @property + def eigenvalues(self) -> Tensor: + val = evaluate(self.parameters.parameter, as_torch=True) + lmbd = torch.cos(val / 2.0) - 1j * torch.sin(val / 2.0) + return torch.cat((lmbd, lmbd.conj())) + + +class RY(ParametricBlock): + """The Ry gate""" + + name = OpName.RY + + def __init__(self, target: int, parameter: Parameter | TParameter | ParamMap): + self.parameters = ( + parameter if isinstance(parameter, ParamMap) else ParamMap(parameter=parameter) + ) + self.generator = Y(target) + super().__init__((target,)) + + @classmethod + def num_parameters(cls) -> int: + return 1 + + @property + def eigenvalues_generator(self) -> Tensor: + return torch.tensor([-1, 1], dtype=cdouble) + + @property + def eigenvalues(self) -> Tensor: + val = evaluate(self.parameters.parameter, as_torch=True) + lmbd = torch.cos(val / 2.0) - 1j * torch.sin(val / 2.0) + return torch.cat((lmbd, lmbd.conj())) + + +class RZ(ParametricBlock): + """The Rz gate""" + + name = OpName.RZ + + def __init__(self, target: int, parameter: Parameter | TParameter | ParamMap): + self.parameters = ( + parameter if isinstance(parameter, ParamMap) else ParamMap(parameter=parameter) + ) + self.generator = Z(target) + super().__init__((target,)) + + @classmethod + def num_parameters(cls) -> int: + return 1 + + @property + def eigenvalues_generator(self) -> Tensor: + return torch.tensor([-1, 1], dtype=cdouble) + + @property + def eigenvalues(self) -> Tensor: + val = evaluate(self.parameters.parameter, as_torch=True) + lmbd = torch.cos(val / 2.0) - 1j * torch.sin(val / 2.0) + return torch.cat((lmbd, lmbd.conj())) + + +class U(ParametricBlock): + """Arbitrary one-qubit rotation in the Bloch sphere + + This operation accepts 3 parameters (phi, theta, omega)""" + + name = OpName.U + + def __init__( + self, + target: int, + phi: Parameter | TParameter, + theta: Parameter | TParameter, + omega: Parameter | TParameter, + ): + self.parameters = ParamMap(phi=phi, theta=theta, omega=omega) + self.generator = chain(Z(target), Y(target), Z(target)) + super().__init__((target,)) + + @classmethod + def num_parameters(cls) -> int: + return 3 + + @property + def eigenvalues_generator(self) -> Tensor: + return torch.tensor([-1, 1], dtype=torch.cdouble) + + @property + def eigenvalues(self) -> Tensor: + phi = evaluate(self.parameters.phi) + theta = evaluate(self.parameters.theta) + omega = evaluate(self.parameters.omega) + lmbd = np.exp(-1j * (phi + omega) / 2) * np.cos(theta / 2) + return torch.cat((lmbd, lmbd.conj())) + + @property + def n_qubits(self) -> int: + return 1 + + def digital_decomposition(self) -> AbstractBlock: + return chain( + RZ(self.qubit_support[0], self.parameters.phi), + RY(self.qubit_support[0], self.parameters.theta), + RZ(self.qubit_support[0], self.parameters.omega), + ) + + +class HamEvo(TimeEvolutionBlock): + """ + A block implementing the Hamiltonian evolution operation H where: + H = exp(-iG, t) + where G represents a square generator and t represents the time parameter + which can be parametrized. + + Arguments: + generator: Either a AbstractBlock, torch.Tensor or numpy.ndarray. + parameter: A scalar or vector of numeric or torch.Tensor type. + qubit_support: The qubits on which the evolution will be performed on. + + Examples: + + ```python exec="on" source="material-block" result="json" + from qadence import RX, HamEvo, run + import torch + hevo = HamEvo(generator=RX(0, torch.pi), parameter=torch.rand(2)) + print(run(hevo)) + # Now lets use a torch.Tensor as a generator, Now we have to pass the support + gen = torch.rand(2,2, dtype=torch.complex128) + hevo = HamEvo(generator=gen, parameter=torch.rand(2), qubit_support=(0,)) + print(run(hevo)) + ``` + """ + + name = OpName.HAMEVO + draw_generator: bool = False + + def __init__( + self, + generator: Union[TGenerator, AbstractBlock], + parameter: TParameter, + qubit_support: tuple[int, ...] = None, + ): + gen_exprs = {} + if qubit_support is None and not isinstance(generator, AbstractBlock): + raise ValueError("You have to supply a qubit support for non-block generators.") + super().__init__(qubit_support if qubit_support else generator.qubit_support) + if isinstance(generator, AbstractBlock): + qubit_support = generator.qubit_support + if generator.is_parametric: + gen_exprs = {str(e): e for e in expressions(generator)} + elif isinstance(generator, torch.Tensor): + msg = "Please provide a square generator." + if len(generator.shape) == 2: + assert generator.shape[0] == generator.shape[1], msg + elif len(generator.shape) == 3: + assert generator.shape[1] == generator.shape[2], msg + assert generator.shape[0] == 1, "Qadence doesnt support batched generators." + else: + raise TypeError( + "Only 2D or 3D generators are supported.\ + In case of a 3D generator, the batch dim\ + is expected to be at dim 0." + ) + gen_exprs = {str(generator.__hash__()): generator} + elif isinstance(generator, (sympy.Basic, sympy.Array)): + gen_exprs = {str(generator): generator} + else: + raise TypeError( + f"Generator of type {type(generator)} not supported.\ + If you're using a numpy.ndarray, please cast it to a torch tensor." + ) + ps = {"parameter": Parameter(parameter), **gen_exprs} + self.parameters = ParamMap(**ps) + self.generator = generator + + @classmethod + def num_parameters(cls) -> int: + return 2 + + @cached_property + def eigenvalues_generator( + self, max_num_evals: int | None = None, max_num_gaps: int | None = None + ) -> Tensor: + if isinstance(self.generator, AbstractBlock): + generator_tensor = block_to_tensor(self.generator) + elif isinstance(self.generator, Tensor): + generator_tensor = self.generator + return eigenvalues(generator_tensor, max_num_evals, max_num_gaps) + + @property + def eigenvalues(self) -> Tensor: + return torch.exp( + -1j * evaluate(self.parameters.parameter, as_torch=True) * self.eigenvalues_generator + ) + + @property + def n_qubits(self) -> int: + if isinstance(self.generator, Tensor): + n_qubits = int(np.log2(self.generator.shape[-1])) + else: + n_qubits = self.generator.n_qubits # type: ignore [union-attr] + + return n_qubits + + def dagger(self) -> Any: + p = list(self.parameters.expressions())[0] + return HamEvo(deepcopy(self.generator), -extract_original_param_entry(p)) + + def digital_decomposition(self, approximation: LTSOrder = LTSOrder.ST4) -> AbstractBlock: + """Decompose the Hamiltonian evolution into digital gates + + Args: + approximation (str, optional): Choose the type of decomposition. Defaults to "st4". + Available types are: + * 'basic' = apply first-order Trotter formula and decompose each term of + the exponential into digital gates. It is exact only if applied to an + operator whose terms are mutually commuting. + * 'st2' = Trotter-Suzuki 2nd order formula for approximating non-commuting + Hamiltonians. + * 'st4' = Trotter-Suzuki 4th order formula for approximating non-commuting + Hamiltonians. + + Returns: + AbstractBlock: a block with the digital decomposition + """ + + # psi(t) = exp(-i * H * t * psi0) + # psi(t) = exp(-i * lambda * t * psi0) + # H = sum(Paulin) + sum(Pauli1*Pauli2) + logger.info("Quantum simulation of the time-independent Schrödinger equation.") + + blocks = [] + + # how to change the type/dict to enum effectively + + # when there is a term including non-commuting matrices use st2 or st4 + + # 1) should check that the given generator respects the constraints + # single-qubit gates + + assert isinstance( + self.generator, AbstractBlock + ), "Only a generator represented as a block can be decomposed" + + if block_is_qubit_hamiltonian(self.generator): + try: + block_is_commuting_hamiltonian(self.generator) + approximation = LTSOrder.BASIC # use the simpler approach if the H is commuting + except TypeError: + logger.warning( + """Non-commuting terms in the Pauli operator. + The Suzuki-Trotter approximation is applied.""" + ) + + blocks.extend( + lie_trotter_suzuki( + block=self.generator, + parameter=self.parameters.parameter, + order=LTSOrder[approximation], + ) + ) + + # 2) return an AbstractBlock instance with the set of gates + # resulting from the decomposition + + return chain(*blocks) + else: + raise NotImplementedError( + "The current digital decomposition can be applied only to Pauli Hamiltonians." + ) + + +class CNOT(ControlBlock): + """The CNot, or CX, gate""" + + name = OpName.CNOT + + def __init__(self, control: int, target: int) -> None: + self.generator = kron((I(control) - Z(control)) * 0.5, X(target) - I(target)) + super().__init__((control,), X(target)) + + @property + def eigenvalues_generator(self) -> Tensor: + return torch.tensor([-2, 0, 0, 0], dtype=cdouble) + + @property + def eigenvalues(self) -> Tensor: + return torch.tensor([-1, 1, 1, 1], dtype=cdouble) + + def __ascii__(self, console: Console) -> RenderableType: + (target, control) = self.qubit_support + h = abs(target - control) + 1 + return Panel(self._block_title, expand=False, height=3 * h) + + def __rich_tree__(self, tree: Tree = None) -> Tree: + if tree is None: + return Tree(self._block_title) + else: + tree.add(self._block_title) + return tree + + def dagger(self) -> CNOT: + return CNOT(*self.qubit_support) + + +class MCZ(ControlBlock): + name = OpName.MCZ + + def __init__(self, control: tuple[int, ...], target: int) -> None: + self.generator = kron( + *[(I(qubit) - Z(qubit)) * 0.5 for qubit in control], Z(target) - I(target) + ) + super().__init__(control, Z(target)) + + @property + def eigenvalues_generator(self) -> Tensor: + return torch.cat((torch.tensor(-2, dtype=cdouble), torch.zeros(2**self.n_qubits - 1))) + + @property + def eigenvalues(self) -> Tensor: + torch.cat((torch.tensor(-1, dtype=cdouble), torch.ones(2**self.n_qubits - 1))) + + def __ascii__(self, console: Console) -> RenderableType: + (target, control) = self.qubit_support + h = abs(target - control) + 1 + return Panel(self._block_title, expand=False, height=3 * h) + + def __rich_tree__(self, tree: Tree = None) -> Tree: + if tree is None: + return Tree(self._block_title) + else: + tree.add(self._block_title) + return tree + + def dagger(self) -> MCZ: + return MCZ(self.qubit_support[:-1], self.qubit_support[-1]) + + +class CZ(MCZ): + """The CZ gate""" + + name = OpName.CZ + + def __init__(self, control: int, target: int) -> None: + super().__init__((control,), target) + + def dagger(self) -> CZ: + return CZ(self.qubit_support[-2], self.qubit_support[-1]) + + +class MCRX(ParametricControlBlock): + name = OpName.MCRX + + def __init__( + self, + control: tuple[int, ...], + target: int, + parameter: Parameter | TNumber | sympy.Expr | str, + ) -> None: + self.generator = kron(*[(I(qubit) - Z(qubit)) * 0.5 for qubit in control], X(target)) + super().__init__(control, RX(target, parameter)) + + @classmethod + def num_parameters(cls) -> int: + return 1 + + @property + def eigenvalues_generator(self) -> Tensor: + return torch.cat( + (torch.zeros(2**self.n_qubits - 2), torch.tensor([1, -1], dtype=cdouble)) + ) + + @property + def eigenvalues(self) -> Tensor: + val = evaluate(self.parameters.parameter, as_torch=True) + lmbd = torch.cos(val / 2.0) - 1j * torch.sin(val / 2.0) + return torch.cat((torch.ones(2**self.n_qubits - 2), lmbd, lmbd.conj())) + + +class CRX(MCRX): + """The CRX gate""" + + name = OpName.CRX + + def __init__( + self, + control: int, + target: int, + parameter: Parameter | TNumber | sympy.Expr | str, + ): + super().__init__((control,), target, parameter) + + +class MCRY(ParametricControlBlock): + name = OpName.MCRY + + def __init__( + self, + control: tuple[int, ...], + target: int, + parameter: Parameter | TNumber | sympy.Expr | str, + ) -> None: + self.generator = kron(*[(I(qubit) - Z(qubit)) * 0.5 for qubit in control], Y(target)) + super().__init__(control, RY(target, parameter)) + + @classmethod + def num_parameters(cls) -> int: + return 1 + + @property + def eigenvalues_generator(self) -> Tensor: + return torch.cat( + (torch.zeros(2**self.n_qubits - 2), torch.tensor([1, -1], dtype=cdouble)) + ) + + @property + def eigenvalues(self) -> Tensor: + val = evaluate(self.parameters.parameter, as_torch=True) + lmbd = torch.cos(val / 2.0) - 1j * torch.sin(val / 2.0) + return torch.cat((torch.ones(2**self.n_qubits - 2), lmbd, lmbd.conj())) + + +class CRY(MCRY): + """The CRY gate""" + + name = OpName.CRY + + def __init__( + self, + control: int, + target: int, + parameter: Parameter | TNumber | sympy.Expr | str, + ): + super().__init__((control,), target, parameter) + + +class MCRZ(ParametricControlBlock): + name = OpName.MCRZ + + def __init__( + self, + control: tuple[int, ...], + target: int, + parameter: Parameter | TNumber | sympy.Expr | str, + ) -> None: + self.generator = kron(*[(I(qubit) - Z(qubit)) * 0.5 for qubit in control], Z(target)) + super().__init__(control, RZ(target, parameter)) + + @classmethod + def num_parameters(cls) -> int: + return 1 + + @property + def eigenvalues_generator(self) -> Tensor: + return torch.cat( + (torch.zeros(2**self.n_qubits - 2), torch.tensor([1, -1], dtype=cdouble)) + ) + + @property + def eigenvalues(self) -> Tensor: + val = evaluate(self.parameters.parameter, as_torch=True) + lmbd = torch.cos(val / 2.0) - 1j * torch.sin(val / 2.0) + return torch.cat((torch.ones(2**self.n_qubits - 2), lmbd, lmbd.conj())) + + +class CRZ(MCRZ): + """The CRZ gate""" + + name = OpName.CRZ + + def __init__( + self, + control: int, + target: int, + parameter: Parameter | TNumber | sympy.Expr | str, + ): + super().__init__((control,), target, parameter) + + +class CSWAP(ControlBlock): + """The CSWAP (Control-SWAP) gate.""" + + name = OpName.CSWAP + + def __init__(self, control: int | tuple[int, ...], target1: int, target2: int) -> None: + if isinstance(control, tuple): + control = control[0] + + a00m = 0.5 * (Z(control) - I(control)) + a00p = -0.5 * (Z(control) + I(control)) + a11 = 0.5 * (Z(target1) - I(target1)) + a22 = -0.5 * (Z(target2) + I(target2)) + a12 = 0.5 * (chain(X(target1), Z(target1)) + X(target1)) + a21 = 0.5 * (chain(Z(target2), X(target2)) + X(target2)) + no_effect = kron(a00m, I(target1), I(target2)) + swap_effect = ( + kron(a00p, -1.0 * a22, a11) + + kron(a00p, -1.0 * a11, a22) + + kron(a00p, a12, a21) + + kron(a00p, a21, a12) + ) + self.generator = no_effect + swap_effect + super().__init__((control,), SWAP(target1, target2)) + + @property + def eigenvalues_generator(self) -> Tensor: + return torch.tensor((1, -1, 1, 1, 1, 1, 1, 1), dtype=torch.cdouble) + + @property + def eigenvalues(self) -> Tensor: + return torch.tensor((1, -1, 1, 1, 1, 1, 1, 1), dtype=torch.cdouble) + + @property + def nqubits(self) -> int: + return 3 + + def dagger(self) -> CSWAP: + return CSWAP(*self.qubit_support) + + +class T(PrimitiveBlock): + """The T gate""" + + name = OpName.T + + def __init__(self, target: int): + self.generator = I(target) - Z(target) + super().__init__((target,)) + + @property + def eigenvalues_generator(self) -> Tensor: + return torch.tensor([0, 1], dtype=cdouble) + + @property + def eigenvalues(self) -> Tensor: + return torch.tensor([1.0, torch.sqrt(torch.tensor([1j]))], dtype=cdouble) + + @property + def n_qubits(self) -> int: + return 1 + + def dagger(self) -> TDagger: + return TDagger(*self.qubit_support) + + +class TDagger(PrimitiveBlock): + """The Hermitian adjoint/conjugate transpose of the T gate""" + + # FIXME: this gate is not support by any backend + name = "T_dagger" + + def __init__(self, target: int): + self.generator = I(target) - Z(target) + super().__init__((target,)) + + @property + def eigenvalues_generator(self) -> Tensor: + return torch.tensor([0, 1], dtype=cdouble) + + @property + def eigenvalues(self) -> Tensor: + return torch.tensor([1.0, torch.sqrt(torch.tensor([-1j]))], dtype=cdouble) + + @property + def n_qubits(self) -> int: + return 1 + + def dagger(self) -> T: + return T(*self.qubit_support) + + +class SWAP(PrimitiveBlock): + """The SWAP gate""" + + name = OpName.SWAP + + def __init__(self, control: int, target: int) -> None: + a11 = 0.5 * (Z(control) - I(control)) + a22 = -0.5 * (Z(target) + I(target)) + a12 = 0.5 * (chain(X(control), Z(control)) + X(control)) + a21 = 0.5 * (chain(Z(target), X(target)) + X(target)) + self.generator = ( + kron(-1.0 * a22, a11) + kron(-1.0 * a11, a22) + kron(a12, a21) + kron(a21, a12) + ) + super().__init__((control, target)) + + @property + def eigenvalues_generator(self) -> Tensor: + return torch.tensor([-2, 0, 0, 0], dtype=cdouble) + + @property + def eigenvalues(self) -> Tensor: + return torch.tensor([-1, 1, 1, 1], dtype=cdouble) + + @property + def n_qubits(self) -> int: + return 2 + + @property + def _block_title(self) -> str: + c, t = self.qubit_support + s = f"{self.name}({c}, {t})" + return s if self.tag is None else (s + rf" \[tag: {self.tag}]") + + def dagger(self) -> SWAP: + return SWAP(*self.qubit_support) + + +class AnalogSWAP(HamEvo): + """ + Single time-independent Hamiltonian evolution over a Rydberg Ising + hamiltonian yielding a SWAP (up to global phase). + + Derived from + [Bapat et al.](https://journals.aps.org/prresearch/abstract/10.1103/PhysRevResearch.4.L012023) + where it is applied to XX-type Hamiltonian + """ + + name = OpName.ANALOGSWAP + + def __init__(self, control: int, target: int, parameter: TParameter = 3 * np.pi / 4): + rydberg_ising_hamiltonian_generator = ( + 4.0 * kron((I(control) - Z(control)) / 2.0, (I(target) - Z(target)) / 2.0) + + (2.0 / 3.0) * np.sqrt(2.0) * X(control) + + (2.0 / 3.0) * np.sqrt(2.0) * X(target) + + (1.0 + np.sqrt(5.0) / 3) * Z(control) + + (1.0 + np.sqrt(5.0) / 3) * Z(target) + ) + super().__init__(rydberg_ising_hamiltonian_generator, parameter, (control, target)) + + +class MCPHASE(ParametricControlBlock): + name = OpName.MCPHASE + + def __init__( + self, + control: tuple[int, ...], + target: int, + parameter: Parameter | TNumber | sympy.Expr | str, + ) -> None: + self.generator = kron( + *[(I(qubit) - Z(qubit)) * 0.5 for qubit in control], Z(target) - I(target) + ) + super().__init__(control, PHASE(target, parameter)) + + @classmethod + def num_parameters(cls) -> int: + return 1 + + @property + def eigenvalues_generator(self) -> Tensor: + return torch.cat( + (torch.tensor([-2, 0], dtype=cdouble), (torch.zeros(2**self.n_qubits - 2))) + ) + + @property + def eigenvalues(self) -> Tensor: + v = evaluate(self.parameters.parameter, as_torch=True) + return torch.cat((torch.ones(2**self.n_qubits - 1), torch.exp(1j * v))) + + def __rich_tree__(self, tree: Tree = None) -> Tree: + if tree is None: + return Tree(self._block_title) + else: + tree.add(self._block_title) + return tree + + def __ascii__(self, console: Console) -> RenderableType: + (target, control) = self.qubit_support + h = abs(target - control) + 1 + return Panel(self._block_title, expand=False, height=3 * h) + + +class CPHASE(MCPHASE): + """The CPHASE gate""" + + name = OpName.CPHASE + + def __init__( + self, + control: int, + target: int, + parameter: Parameter | TNumber | sympy.Expr | str, + ): + super().__init__((control,), target, parameter) + + +class Toffoli(ControlBlock): + name = OpName.TOFFOLI + + def __init__(self, control: tuple[int, ...], target: int) -> None: + self.generator = kron( + *[(I(qubit) - Z(qubit)) * 0.5 for qubit in control], X(target) - I(target) + ) + super().__init__(control, X(target)) + + def dagger(self) -> Toffoli: + return Toffoli(self.qubit_support[:-1], self.qubit_support[-1]) + + @property + def n_qubits(self) -> int: + return len(self.qubit_support) + + @property + def eigenvalues_generator(self) -> Tensor: + return torch.tensor( + [-2, *[0 for _ in range(2 ** len(self.qubit_support) - 1)]], dtype=cdouble + ) + + @property + def eigenvalues(self) -> Tensor: + return torch.tensor( + [-1, *[1 for _ in range(2 ** len(self.qubit_support) - 1)]], dtype=cdouble + ) + + +# FIXME: better name that stresses difference to `Wait`? +@dataclass(eq=False, repr=False) +class AnalogEntanglement(AnalogBlock): + parameters: ParamMap = ParamMap(duration=1.0) + qubit_support: QubitSupport = QubitSupport("global") + + @property + def eigenvalues_generator(self) -> torch.Tensor: + return torch.tensor([0.0], dtype=cdouble) + + @property + def duration(self) -> Basic: + return self.parameters.duration + + +def _cast(T: Any, val: Any) -> Any: + return val if isinstance(val, T) else T(val) + + +def wait( + duration: TNumber | sympy.Basic, + qubit_support: str | QubitSupport | tuple = "global", +) -> WaitBlock: + """Constructs a [`WaitBlock`][qadence.blocks.analog.WaitBlock]. + + Arguments: + duration: Time to wait in nanoseconds. + qubit_support: Qubits the `WaitBlock` is applied to. Can be either + `"global"` to apply the wait block to all qubits or a tuple of integers. + + Returns: + a `WaitBlock` + """ + q = _cast(QubitSupport, qubit_support) + ps = ParamMap(duration=duration) + return WaitBlock(parameters=ps, qubit_support=q) + + +def entangle( + duration: Any, + qubit_support: str | QubitSupport | Tuple = "global", +) -> AnalogEntanglement: + q = _cast(QubitSupport, qubit_support) + ps = ParamMap(duration=duration) + return AnalogEntanglement(parameters=ps, qubit_support=q) + + +def AnalogRot( + duration: float | str | Parameter = 1000.0, + omega: float | str | Parameter = 0, + delta: float | str | Parameter = 0, + phase: float | str | Parameter = 0, + qubit_support: str | QubitSupport | Tuple = "global", +) -> ConstantAnalogRotation: + """General analog rotation operation. + + Arguments: + duration: Duration of the rotation [ns]. + omega: Rotation frequency [rad/μs] + delta: Rotation frequency [rad/μs] + phase: Phase angle [rad] + qubit_support: Defines the (local/global) qubit support + + Returns: + ConstantAnalogRotation + """ + q = _cast(QubitSupport, qubit_support) + if isinstance(duration, str): + duration = Parameter(duration) + alpha = duration * sympy.sqrt(omega**2 + delta**2) / 1000 # type: ignore [operator] + + ps = ParamMap(alpha=alpha, duration=duration, omega=omega, delta=delta, phase=phase) + return ConstantAnalogRotation(parameters=ps, qubit_support=q) + + +def _analog_rot( + angle: float | str | Parameter, + qubit_support: str | QubitSupport | Tuple, + phase: float, +) -> ConstantAnalogRotation: + q = _cast(QubitSupport, qubit_support) + # assuming some arbitrary omega = π rad/μs + alpha = _cast(Parameter, angle) + + omega = np.pi + duration = alpha / omega * 1000 + + # FIXME: once https://gitlab.pasqal.com/pqs/qadence/-/issues/402 is fixed set default duration + # in the function arguments to: + # duration = Parameter(160) + # and compute omega like this: + # omega = alpha / duration * 1000 + ps = ParamMap(alpha=alpha, duration=duration, omega=omega, delta=0, phase=phase) + return ConstantAnalogRotation(parameters=ps, qubit_support=q) + + +def AnalogRX( + angle: float | str | Parameter, + qubit_support: str | QubitSupport | Tuple = "global", +) -> ConstantAnalogRotation: + """Analog X rotation. Shorthand for [`AnalogRot`][qadence.operations.AnalogRot]: + + ```python + φ=2.4; Ω=π; t = φ/Ω * 1000 + AnalogRot(duration=t, omega=Ω) + ``` + + Arguments: + angle: Rotation angle [rad] + qubit_support: Defines the (local/global) qubit support + + Returns: + ConstantAnalogRotation + """ + return _analog_rot(angle, qubit_support, phase=0) + + +def AnalogRY( + angle: float | str | Parameter, + qubit_support: str | QubitSupport | Tuple = "global", +) -> ConstantAnalogRotation: + """Analog Y rotation. Shorthand for [`AnalogRot`][qadence.operations.AnalogRot]: + + ```python + φ=2.4; Ω=π; t = φ/Ω * 1000 + AnalogRot(duration=t, omega=Ω, phase=-π/2) + ``` + Arguments: + angle: Rotation angle [rad] + qubit_support: Defines the (local/global) qubit support + + Returns: + ConstantAnalogRotation + """ + return _analog_rot(angle, qubit_support, phase=-np.pi / 2) + + +def AnalogRZ( + angle: float | str | Parameter, + qubit_support: str | QubitSupport | Tuple = "global", +) -> ConstantAnalogRotation: + """Analog Z rotation. Shorthand for [`AnalogRot`][qadence.operations.AnalogRot]: + ``` + φ=2.4; δ=π; t = φ/δ * 100) + AnalogRot(duration=t, delta=δ, phase=π/2) + ``` + """ + q = _cast(QubitSupport, qubit_support) + alpha = _cast(Parameter, angle) + delta = np.pi + duration = alpha / delta * 1000 + ps = ParamMap(alpha=alpha, duration=duration, omega=0, delta=delta, phase=np.pi / 2) + return ConstantAnalogRotation(qubit_support=q, parameters=ps) + + +# gate sets +# FIXME: this could be inferred by the number of qubits if we had +# a class property for each operation. The number of qubits can default +# to None for operations which do not have it by default +# this would allow to greatly simplify the tests +pauli_gateset: list = [I, X, Y, Z] +# FIXME: add Tdagger when implemented +single_qubit_gateset = [X, Y, Z, H, I, RX, RY, RZ, U, S, SDagger, T, PHASE] +two_qubit_gateset = [CNOT, SWAP, CZ, CRX, CRY, CRZ, CPHASE] +three_qubit_gateset = [CSWAP] +multi_qubit_gateset = [Toffoli, MCRX, MCRY, MCRZ, MCPHASE, MCZ] +analog_gateset = [ + HamEvo, + ConstantAnalogRotation, + AnalogEntanglement, + AnalogSWAP, + AnalogRX, + AnalogRY, + AnalogRZ, + entangle, + wait, +] +non_unitary_gateset = [Zero, N] diff --git a/qadence/overlap.py b/qadence/overlap.py new file mode 100644 index 000000000..c5fa96151 --- /dev/null +++ b/qadence/overlap.py @@ -0,0 +1,453 @@ +from __future__ import annotations + +from collections import Counter +from typing import Any, Callable + +import numpy as np +import torch +from torch import Tensor + +from qadence.backend import BackendConfiguration, BackendName +from qadence.backends.pytorch_wrapper import DiffMode +from qadence.blocks import AbstractBlock, chain, kron, tag +from qadence.circuit import QuantumCircuit +from qadence.divergences import js_divergence +from qadence.measurements import Measurements +from qadence.models import QuantumModel +from qadence.operations import SWAP, H, I, S, Z +from qadence.transpile import reassign +from qadence.types import OverlapMethod + +# Modules to be automatically added to the qadence namespace +__all__ = ["Overlap", "OverlapMethod"] + + +def _cswap(control: int, target1: int, target2: int) -> AbstractBlock: + # define projectors on control qubit + p0 = 0.5 * I(control) + 0.5 * Z(control) + p1 = 0.5 * I(control) + (-0.5) * Z(control) + + # construct controlled-SWAP block + cswap_blocks = kron(p0, I(target1), I(target2)) + kron(p1, SWAP(target1, target2)) + cswap = tag(cswap_blocks, f"CSWAP({control}, {target1}, {target2})") + + return cswap + + +def _controlled_unitary(control: int, unitary_block: AbstractBlock) -> AbstractBlock: + n_qubits = unitary_block.n_qubits + + # define projectors on control qubit + p0 = 0.5 * I(control) + 0.5 * Z(control) + p1 = 0.5 * I(control) + (-0.5) * Z(control) + + # shift qubit support of unitary + shifted_unitary_block = reassign(unitary_block, {i: control + i + 1 for i in range(n_qubits)}) + + # construct controlled-U block + cu_blocks = kron(p0, *[I(control + i + 1) for i in range(n_qubits)]) + kron( + p1, shifted_unitary_block + ) + cu = tag(cu_blocks, f"c-U({control}, {shifted_unitary_block.qubit_support})") + + return cu + + +def _is_counter_list(lst: list[Counter]) -> bool: + return all(map(lambda x: isinstance(x, Counter), lst)) and isinstance(lst, list) + + +def _select_overlap_method( + method: OverlapMethod, + backend: BackendName, + bra_circuit: QuantumCircuit, + ket_circuit: QuantumCircuit, +) -> tuple[Callable, QuantumCircuit, QuantumCircuit]: + if method == OverlapMethod.EXACT: + fn = overlap_exact + + def _overlap_fn( + param_values: dict, + bra_calc_fn: Callable, + bra_state: Tensor | None, + ket_calc_fn: Callable, + ket_state: Tensor | None, + ) -> Tensor: + bras = bra_calc_fn(param_values["bra"], bra_state) + kets = ket_calc_fn(param_values["ket"], ket_state) + overlap = fn(bras, kets) + return overlap + + elif method == OverlapMethod.JENSEN_SHANNON: + + def _overlap_fn( + param_values: dict, + bra_calc_fn: Callable, + bra_state: Tensor | None, + ket_calc_fn: Callable, + ket_state: Tensor | None, + ) -> Tensor: + bras = bra_calc_fn(param_values["bra"], bra_state) + kets = ket_calc_fn(param_values["ket"], ket_state) + overlap = overlap_jensen_shannon(bras, kets) + return overlap + + elif method == OverlapMethod.COMPUTE_UNCOMPUTE: + # create a single circuit from bra and ket circuits + bra_circuit = QuantumCircuit( + bra_circuit.n_qubits, bra_circuit.block, ket_circuit.block.dagger() + ) + ket_circuit = None # type: ignore[assignment] + + def _overlap_fn( # type: ignore [misc] + param_values: dict, bra_calc_fn: Callable, bra_state: Tensor | None, *_: Any + ) -> Tensor: + bras = bra_calc_fn(param_values["bra"], bra_state) + overlap = overlap_compute_uncompute(bras) + return overlap + + elif method == OverlapMethod.SWAP_TEST: + if backend == BackendName.BRAKET: + raise ValueError("SWAP test method is not supported by the Braket backend.") + + n_qubits = bra_circuit.n_qubits + + # shift qubit support of bra and ket circuit blocks + shifted_bra_block = reassign(bra_circuit.block, {i: i + 1 for i in range(n_qubits)}) + shifted_ket_block = reassign( + ket_circuit.block, {i: i + n_qubits + 1 for i in range(n_qubits)} + ) + ket_circuit = None # type: ignore[assignment] + + # construct swap test circuit + state_blocks = kron(shifted_bra_block, shifted_ket_block) + cswap_blocks = chain(*[_cswap(0, n + 1, n + 1 + n_qubits) for n in range(n_qubits)]) + swap_test_blocks = chain(H(0), state_blocks, cswap_blocks, H(0)) + bra_circuit = QuantumCircuit(2 * n_qubits + 1, swap_test_blocks) + + def _overlap_fn( # type: ignore [misc] + param_values: dict, bra_calc_fn: Callable, bra_state: Tensor | None, *_: Any + ) -> Tensor: + bras = bra_calc_fn(param_values["bra"], bra_state) + overlap = overlap_swap_test(bras) + return overlap + + elif method == OverlapMethod.HADAMARD_TEST: + if backend == BackendName.BRAKET: + raise ValueError("Hadamard test method is not supported by the Braket backend.") + + n_qubits = bra_circuit.n_qubits + + # construct controlled bra and ket blocks + c_bra_block = _controlled_unitary(0, bra_circuit.block) + c_ket_block = _controlled_unitary(0, ket_circuit.block.dagger()) + + # construct swap test circuit for Re part + re_blocks = chain(H(0), c_bra_block, c_ket_block, H(0)) + bra_circuit = QuantumCircuit(n_qubits + 1, re_blocks) + + # construct swap test circuit for Im part + im_blocks = chain(H(0), c_bra_block, c_ket_block, S(0), H(0)) + ket_circuit = QuantumCircuit(n_qubits + 1, im_blocks) + + def _overlap_fn( + param_values: dict, + bra_calc_fn: Callable, + bra_state: Tensor | None, + ket_calc_fn: Callable, + ket_state: Tensor | None, + ) -> Tensor: + bras = bra_calc_fn(param_values["bra"], bra_state) + kets = ket_calc_fn(param_values["ket"], ket_state) + overlap = overlap_hadamard_test(bras, kets) + return overlap + + return _overlap_fn, bra_circuit, ket_circuit + + +def overlap_exact(bras: Tensor, kets: Tensor) -> Tensor: + """Calculate overlap using exact quantum mechanical definition. + + Args: + bras (Tensor): full bra wavefunctions + kets (Tensor): full ket wavefunctions + + Returns: + Tensor: overlap tensor containing values of overlap of each bra with each ket + """ + return torch.abs(torch.sum(bras.conj() * kets, dim=1)) ** 2 + + +def fidelity(bras: Tensor, kets: Tensor) -> Tensor: + return overlap_exact(bras, kets) + + +def overlap_jensen_shannon(bras: list[Counter], kets: list[Counter]) -> Tensor: + """Calculate overlap from bitstring counts using Jensen-Shannon divergence method. + + Args: + bras (list[Counter]): bitstring counts corresponding to bra wavefunctions + kets (list[Counter]): bitstring counts corresponding to ket wavefunctions + + Returns: + Tensor: overlap tensor containing values of overlap of each bra with each ket + """ + return 1 - torch.tensor([js_divergence(p, q) for p, q in zip(bras, kets)]) + + +def overlap_compute_uncompute(bras: Tensor | list[Counter]) -> Tensor: + """Calculate overlap using compute-uncompute method from full wavefunctions or + bitstring counts. + + Args: + bras (Tensor | list[Counter]): full bra wavefunctions or bitstring counts + + Returns: + Tensor: overlap tensor containing values of overlap of each bra with zeros ket + """ + if isinstance(bras, Tensor): + # calculate exact overlap of full bra wavefunctions with |0> state + overlap = torch.abs(bras[:, 0]) ** 2 + + elif isinstance(bras, list): + # estimate overlap as the fraction of shots when "0..00" bitstring was observed + n_qubits = len(list(bras[0].keys())[0]) + n_shots = sum(list(bras[0].values())) + overlap = torch.tensor([p["0" * n_qubits] / n_shots for p in bras]) + + return overlap + + +def overlap_swap_test(bras: Tensor | list[Counter]) -> Tensor: + """Calculate overlap using swap test method from full wavefunctions or + bitstring counts. + + Args: + bras (Tensor | list[Counter]): full bra wavefunctions or bitstring counts + + Returns: + Tensor: overlap tensor + """ + if isinstance(bras, Tensor): + n_qubits = int(np.log2(bras.shape[1])) + + # define measurement operator |0><0| x I + proj_op = torch.tensor([[1.0, 0.0], [0.0, 0.0]]) + ident_op = torch.diag(torch.tensor([1.0 for _ in range(2 ** (n_qubits - 1))])) + meas_op = torch.kron(proj_op, ident_op).type(torch.complex128) + + # estimate overlap from ancilla qubit measurement + prob0 = (bras.conj() * torch.matmul(meas_op, bras.t()).t()).sum(dim=1).real + + elif _is_counter_list(bras): + # estimate overlap as the fraction of shots when 0 was observed on ancilla qubit + n_qubits = len(list(bras[0].keys())[0]) + n_shots = sum(list(bras[0].values())) + prob0 = torch.tensor( + [ + sum(map(lambda k, v: v if k[0] == "0" else 0, p.keys(), p.values())) / n_shots + for p in bras + ] + ) + else: + raise TypeError("Incorrect type passed for bras argument.") + + # construct final overlap tensor + overlap = 2 * prob0 - 1 + + return overlap + + +def overlap_hadamard_test( + bras_re: Tensor | list[Counter], bras_im: Tensor | list[Counter] +) -> Tensor: + """Calculate overlap using Hadamard test method from full wavefunctions or + bitstring counts. + + Args: + bras_re (Tensor | list[Counter]): full bra wavefunctions or bitstring counts + for estimation of overlap's real part + bras_im (Tensor | list[Counter]): full bra wavefunctions or bitstring counts + for estimation of overlap's imaginary part + + Returns: + Tensor: overlap tensor + """ + if isinstance(bras_re, Tensor) and isinstance(bras_im, Tensor): + n_qubits = int(np.log2(bras_re.shape[1])) + + # define measurement operator |0><0| x I + proj_op = torch.tensor([[1.0, 0.0], [0.0, 0.0]]) + ident_op = torch.diag(torch.tensor([1.0 for _ in range(2 ** (n_qubits - 1))])) + meas_op = torch.kron(proj_op, ident_op).type(torch.complex128) + + # estimate overlap from ancilla qubit measurement + prob0_re = (bras_re * torch.matmul(meas_op, bras_re.conj().t()).t()).sum(dim=1).real + prob0_im = (bras_im * torch.matmul(meas_op, bras_im.conj().t()).t()).sum(dim=1).real + + elif _is_counter_list(bras_re) and _is_counter_list(bras_im): + # estimate overlap as the fraction of shots when 0 was observed on ancilla qubit + n_qubits = len(list(bras_re[0].keys())[0]) + n_shots = sum(list(bras_re[0].values())) + prob0_re = torch.tensor( + [ + sum(map(lambda k, v: v if k[0] == "0" else 0, p.keys(), p.values())) / n_shots + for p in bras_re + ] + ) + prob0_im = torch.tensor( + [ + sum(map(lambda k, v: v if k[0] == "0" else 0, p.keys(), p.values())) / n_shots + for p in bras_im + ] + ) + else: + raise TypeError("Incorrect types passed for bras_re and kets_re arguments.") + + # construct final overlap tensor + overlap = (2 * prob0_re - 1) ** 2 + (2 * prob0_im - 1) ** 2 + + return overlap + + +class Overlap(QuantumModel): + def __init__( + self, + bra_circuit: QuantumCircuit, + ket_circuit: QuantumCircuit, + backend: BackendName = BackendName.PYQTORCH, + diff_mode: DiffMode = DiffMode.AD, + protocol: Measurements | None = None, + configuration: BackendConfiguration | dict | None = None, + method: OverlapMethod = OverlapMethod.EXACT, + ): + self.backend_name = backend + self.method = method + + overlap_fn, bra_circuit, ket_circuit = _select_overlap_method( + method, backend, bra_circuit, ket_circuit + ) + self.overlap_fn = overlap_fn + + super().__init__( + bra_circuit, + backend=backend, + diff_mode=diff_mode, + protocol=protocol, + configuration=configuration, + ) + self.bra_feat_param_names = set([inp.name for inp in self.inputs]) + + if ket_circuit: + self.ket_model = QuantumModel( + ket_circuit, + backend=backend, + diff_mode=diff_mode, + protocol=protocol, + configuration=configuration, + ) + self.ket_feat_param_names = set([inp.name for inp in self.ket_model.inputs]) + else: + self.ket_model = None # type: ignore [assignment] + self.ket_feat_param_names = set([]) + + def _process_param_values( + self, bra_param_values: dict[str, Tensor], ket_param_values: dict[str, Tensor] + ) -> dict: + # we assume that either batch sizes are equal or 0 in case when no user params + # are present in bra/ket + bra_param_values = { + k: v.reshape(-1) if v.shape == () else v for k, v in bra_param_values.items() + } + batch_size_bra = ( + len(list(bra_param_values.values())[0]) if len(bra_param_values) != 0 else 0 + ) + ket_param_values = { + k: v.reshape(-1) if v.shape == () else v for k, v in ket_param_values.items() + } + batch_size_ket = ( + len(list(ket_param_values.values())[0]) if len(ket_param_values) != 0 else 0 + ) + new_bra_param_values = bra_param_values.copy() + new_ket_param_values = ket_param_values.copy() + + # if len(self.bra_feat_param_names) + len(self.ket_feat_param_names) <= 2: + + if len(self.bra_feat_param_names.union(self.ket_feat_param_names)) == 2: + # extend bra parameter tensors + for param_name in new_bra_param_values.keys(): + new_bra_param_values[param_name] = new_bra_param_values[param_name].repeat( + batch_size_ket + ) + + # extend ket parameter tensors + for param_name in new_ket_param_values.keys(): + idxs = torch.cat( + [ + torch.ones(batch_size_bra, dtype=torch.int64) * i + for i in range(batch_size_ket) + ] + ) + new_ket_param_values[param_name] = new_ket_param_values[param_name][idxs] + + if self.method in [OverlapMethod.EXACT, OverlapMethod.JENSEN_SHANNON]: + param_values = {"bra": new_bra_param_values, "ket": new_ket_param_values} + elif self.method in [ + OverlapMethod.COMPUTE_UNCOMPUTE, + OverlapMethod.SWAP_TEST, + OverlapMethod.HADAMARD_TEST, + ]: + # merge bra and ket param values to simulate all wavefunctions in one batch + new_bra_param_values.update(new_ket_param_values) + param_values = {"bra": new_bra_param_values} + if self.method == OverlapMethod.HADAMARD_TEST: + param_values["ket"] = new_bra_param_values + + elif len(self.bra_feat_param_names.union(self.ket_feat_param_names)) < 2: + if batch_size_bra == batch_size_ket or batch_size_bra == 0 or batch_size_ket == 0: + param_values = {"bra": bra_param_values, "ket": ket_param_values} + else: + raise ValueError("Batch sizes of both bra and ket parameters must be equal.") + + else: + raise ValueError("Multiple feature parameters for bra/ket are not currently supported.") + + return param_values + + def forward( # type: ignore [override] + self, + bra_param_values: dict[str, Tensor] = {}, + ket_param_values: dict[str, Tensor] = {}, + bra_state: Tensor | None = None, + ket_state: Tensor | None = None, + n_shots: int = 0, + ) -> Tensor: + # reformat parameters + param_values = self._process_param_values(bra_param_values, ket_param_values) + + # determine bra and ket calculation functions + if n_shots == 0: + bra_calc_fn = getattr(self, "run") + ket_calc_fn = getattr(self.ket_model, "run", None) + else: + + def bra_calc_fn(values: dict, state: Tensor) -> Any: + return getattr(self, "sample")(values, n_shots, state) + + def ket_calc_fn(values: dict, state: Tensor) -> Any: + return getattr(self.ket_model, "sample", lambda *_: _)(values, n_shots, state) + + # calculate overlap + overlap = self.overlap_fn( + param_values, bra_calc_fn, bra_state, ket_calc_fn, ket_state # type: ignore [arg-type] + ) + + # reshape output if needed + if len(self.bra_feat_param_names.union(self.ket_feat_param_names)) < 2: + overlap = overlap[:, None] + else: + batch_size_bra = max(len(list(bra_param_values.values())[0]), 1) + batch_size_ket = max(len(list(ket_param_values.values())[0]), 1) + overlap = overlap.reshape((batch_size_ket, batch_size_bra)).t() + + return overlap diff --git a/qadence/parameters.py b/qadence/parameters.py new file mode 100644 index 000000000..9242de06a --- /dev/null +++ b/qadence/parameters.py @@ -0,0 +1,350 @@ +from __future__ import annotations + +from typing import Any, ItemsView, KeysView, ValuesView, get_args +from uuid import uuid4 + +import jsonschema +import numpy as np +import sympy +import torch +from sympy import * +from sympy import Array, Basic, Expr, Symbol, sympify +from sympytorch import SymPyModule +from torch import Tensor + +from qadence.logger import get_logger +from qadence.types import TNumber + +# Modules to be automatically added to the qadence namespace +__all__ = ["FeatureParameter", "Parameter", "VariationalParameter"] + +logger = get_logger(__file__) + + +ParameterJSONSchema = { + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "properties": { + "name": {"type": "string"}, + "trainable": {"type": "string"}, + "value": {"type": "string"}, + }, + "oneOf": [ + { + "allOf": [ + {"required": ["name"]}, + {"not": {"required": ["trainable"]}}, + {"not": {"required": ["value"]}}, + ] + }, + {"allOf": [{"required": ["name", "trainable"]}, {"not": {"required": ["value"]}}]}, + {"required": ["name", "trainable", "value"]}, + ], +} + + +class Parameter(Symbol): + """ + A wrapper on top of `sympy.Symbol` to include two additional keywords: `trainable` and + `value`. This class is to define both feature parameter and variational parameters. + """ + + trainable: bool + """Trainable parameters are *variational* parameters. Non-trainable parameters are *feature* + parameters.""" + value: TNumber + """(Initial) value of the parameter.""" + + def __new__( + cls, name: str | TNumber | Tensor | Basic | Parameter, **assumptions: Any + ) -> Parameter | Basic | Expr | Array: + """ + Arguments: + name: When given a string only, the class + constructs a trainable Parameter with a a randomly initialized value. + **assumptions: are passed on to the parent class `sympy.Symbol`. Two new assumption + kwargs are supported by this constructor: `trainable: bool`, and `value: TNumber`. + + Example: + ```python exec="on" source="material-block" result="json" + from qadence import Parameter, VariationalParameter + + theta = Parameter("theta") + print(f"{theta}: trainable={theta.trainable} value={theta.value}") + assert not theta.is_number + + # you can specify both trainable/value in the constructor + theta = Parameter("theta", trainable=True, value=2.0) + print(f"{theta}: trainable={theta.trainable} value={theta.value}") + + # VariationalParameter/FeatureParameter are constructing + # trainable/untrainable Parameters + theta = VariationalParameter("theta", value=2.0) + assert theta == Parameter("theta", trainable=True, value=2.0) + + # When provided with a numeric type, Parameter constructs a sympy numeric type": + constant_zero = Parameter(0) + assert constant_zero.is_number + + # When passed a Parameter or a sympy expression, it just returns it. + expr = Parameter("x") * Parameter("y") + print(f"{expr=} : {expr.free_symbols}") + ``` + """ + p: Parameter + if isinstance(name, get_args(TNumber)): + return sympify(name) + elif isinstance(name, Tensor): + if name.numel() == 1: + return sympify(name) + else: + return Array(name.detach().numpy()) + elif isinstance(name, Parameter): + p = super().__new__(cls, name.name, **assumptions) + p.name = name.name + p.trainable = name.trainable + p.value = name.value + return p + elif isinstance(name, (Basic, Expr)): + if name.is_number: + return sympify(evaluate(name)) + return name + elif isinstance(name, str): + p = super().__new__(cls, name, **assumptions) + p.trainable = assumptions.get("trainable", True) + p.value = assumptions.get("value", None) + if p.value is None: + p.value = torch.rand(1).item() + return p + else: + raise TypeError(f"Parameter does not support type {type(name)}") + + def __eq__(self, other: object) -> bool: + from qadence.utils import isclose + + if isinstance(other, str): + return self.name == other # type: ignore[no-any-return] + + elif isinstance(other, Parameter): + return ( + self.name == other.name + and self.trainable == other.trainable + and isclose(self.value, other.value) + ) + elif isinstance(other, Expr): + return self in other.free_symbols + elif isinstance(other, Symbol): + return self.name == other.name # type: ignore[no-any-return] + + return False + + def __hash__(self) -> Any: + return super().__hash__() + + def _to_dict(self) -> dict: + d = {"name": self.name, "trainable": str(self.trainable), "value": str(self.value)} + try: + jsonschema.validate(d, ParameterJSONSchema) + return d + except jsonschema.exceptions.ValidationError as e: + logger.exception(f"Parameter dict {d} doesnt comply to {ParameterJSONSchema} with {e}.") + return {} + + @classmethod + def _from_dict(cls, d: dict) -> Parameter | None: + try: + jsonschema.validate(d, ParameterJSONSchema) + trainable = True if d["trainable"] == "True" else False + return cls(name=d["name"], trainable=trainable, value=float(d["value"])) + except jsonschema.exceptions.ValidationError as e: + logger.exception(f"Parameter dict {d} doesnt comply to {ParameterJSONSchema} with {e}.") + return None + + +def FeatureParameter(name: str, **kwargs: Any) -> Parameter: + """Shorthand for `Parameter(..., trainable=False)`.""" + return Parameter(name, trainable=False, **kwargs) + + +def VariationalParameter(name: str, **kwargs: Any) -> Parameter: + """Shorthand for `Parameter(..., trainable=True)`.""" + return Parameter(name, trainable=True, **kwargs) + + +def extract_original_param_entry( + param: Expr, +) -> TNumber | Tensor | Expr: + """ + Given an Expression, what was the original "param" given by the user? It is either + going to be a numeric value, or a sympy Expression (in case a string was given, + it was converted via Parameter("string"). + """ + return param if not param.is_number else evaluate(param) + + +def torchify(expr: Expr) -> SymPyModule: + """ + Arguments: + expr: An expression consisting of Parameters. + + Returns: + A torchified, differentiable Expression. + """ + extra_funcs = {sympy.core.numbers.ImaginaryUnit: 1.0j} + return SymPyModule(expressions=[expr], extra_funcs=extra_funcs) + + +def sympy_to_numeric(expr: Basic) -> TNumber: + if expr.as_real_imag()[1] != 0: + return complex(expr) + else: + return float(expr) + + +def evaluate(expr: Expr, values: dict = {}, as_torch: bool = False) -> TNumber | Tensor: + """ + Arguments: + expr: An expression consisting of Parameters. + values: values dict which contains values for the Parameters, + if empty, Parameter.value will be used. + as_torch: Whether to retrieve a torch-differentiable expression result. + + Example: + ```python exec="on" source="material-block" result="json" + from qadence.parameters import Parameter, evaluate + + expr = Parameter("x") * Parameter("y") + + # Unless specified, Parameter initialized random values + # Lets evaluate this expression and see what the result is + res = evaluate(expr) + print(res) + + # We can also evaluate the expr using a custom dict + d = {"x": 1, "y":2} + res = evaluate(expr, d) + print(res) + + # Lastly, if we want a differentiable result, lets put the as_torch flag + res = evaluate(expr, d, as_torch=True) + print(res) + ``` + """ + res: Basic + res_value: TNumber | Tensor + query: dict[Parameter, TNumber | Tensor] = {} + if isinstance(expr, Array): + return torch.Tensor(expr.tolist()) + else: + if not expr.is_number: + for s in expr.free_symbols: + if s.name in values.keys(): + query[s] = values[s.name] + elif hasattr(s, "value"): + query[s] = s.value + else: + raise ValueError(f"No value provided for symbol {s.name}") + if as_torch: + res_value = torchify(expr)(**{s.name: torch.tensor(v) for s, v in query.items()}) + else: + res = expr.subs(query) + res_value = sympy_to_numeric(res) + return res_value + + +def stringify(expr: Basic) -> str: + name: str = "" + if isinstance(expr, Array): + return str(np.array(expr.tolist())).replace(".", "_") + else: + if expr.is_number: + expr_hash = hash(sympy_to_numeric(expr)) + name = "fix_" + str(expr_hash) + else: + name = str(expr).replace(".", "_") + return name + + +class ParamMap: + """Connects UUIDs of parameters to their expressions and names. This class is not user-facing + and only needed for more complex block definitions. It provides convenient access to + expressions/UUIDs/names needed in different backends. + + Arguments: + kwargs: Parameters. + + Example: + ```python exec="on" source="material-block" result="json" + import sympy + from qadence.parameters import ParamMap + + (x,y) = sympy.symbols("x y") + ps = ParamMap(omega=2.0, duration=x+y) + + print(f"{ps.names() = }") + print(f"{ps.expressions() = }") + print(f"{ps.uuids() = }") + ``` + """ + + def __init__(self, **kwargs: str | TNumber | Tensor | Basic | Parameter): + self._name_dict: dict[str, tuple[str, Basic]] = {} + self._uuid_dict: dict[str, str] = {} + for name, v in kwargs.items(): + param = v if isinstance(v, sympy.Basic) else Parameter(v) + uuid = str(uuid4()) + self._name_dict[name] = (uuid, param) + self._uuid_dict[uuid] = param + + def __getattr__(self, name: str) -> Basic: + _name_dict = self.__getattribute__("_name_dict") + if name in _name_dict: + (_, param) = _name_dict[name] + return param + else: + return self.__getattribute__(name) + + def uuid(self, name: str) -> str: + (_uuid, _) = self._name_dict[name] + return _uuid + + def param_str(self, name: str) -> str: + return stringify(self.param(name)) + + def uuid_param(self, name: str) -> tuple[str, Basic]: + return self._name_dict[name] + + def names(self) -> KeysView: + return self._name_dict.keys() + + def uuids(self) -> KeysView: + return self._uuid_dict.keys() + + def expressions(self) -> ValuesView: + return self._uuid_dict.values() + + def items(self) -> ItemsView: + return self._uuid_dict.items() + + def __repr__(self) -> str: + s = repr(self._name_dict) + s = s.replace("{", "(") + s = s.replace("}", ")") + return "ParamMap" + s + + def _to_dict(self) -> dict: + from qadence.serialization import serialize + + d = {name: (uuid, serialize(expr)) for (name, (uuid, expr)) in self._name_dict.items()} + return {"_name_dict": d} + + @classmethod + def _from_dict(cls, d: dict) -> ParamMap: + from qadence.serialization import deserialize + + res = ParamMap() + for name, (uuid, v) in d["_name_dict"].items(): + param: Parameter = deserialize(v) # type: ignore[assignment] + res._name_dict[name] = (uuid, param) + res._uuid_dict[uuid] = param + return res diff --git a/qadence/py.typed b/qadence/py.typed new file mode 100644 index 000000000..e69de29bb diff --git a/qadence/qubit_support.py b/qadence/qubit_support.py new file mode 100644 index 000000000..b7a0f0fdc --- /dev/null +++ b/qadence/qubit_support.py @@ -0,0 +1,61 @@ +from __future__ import annotations + +from typing import Any, Union + +from qadence.types import QubitSupportType + + +def _is_valid_support(t: Any) -> bool: + return isinstance(t, tuple) and all(i >= 0 for i in t) + + +class QubitSupport(tuple): + def __new__(cls, *support: Union[QubitSupportType, str, int, tuple]) -> QubitSupport: + if len(support) == 1: + if isinstance(support[0], tuple): + return QubitSupport(*support[0]) + if support[0] == "global": + support = (QubitSupportType.GLOBAL,) + valid = True + elif support[0] >= 0: # type: ignore[operator] + valid = True + else: + valid = False + else: + valid = _is_valid_support(support) + + if not valid: + raise ValueError( + "QubitSupport can be a tuple of ints or 'global'. For example:ℕn" + "QubitSupport(1,2,3) or QubitSupport('global')\n" + f"Found: {support}" + ) + return super(QubitSupport, cls).__new__(cls, support) # type: ignore[arg-type] + + def __add__(self, other: Any) -> QubitSupport: + if not isinstance(other, tuple): + raise ValueError(f"Cannot add type '{type(other)}' to QubitSupport.") + if self == other: + return self + elif self == ("global",): + return QubitSupport(*range(max(other) + 1)) if len(other) else QubitSupport("global") + elif other == ("global",): + return QubitSupport(*range(max(self) + 1)) if len(self) else QubitSupport("global") + else: + return QubitSupport(tuple({*self, *other})) + + def __radd__(self, other: Any) -> QubitSupport: + return self.__add__(other) + + @property + def is_global(self) -> bool: + return self == ("global",) + + def is_disjoint(self, other: Any) -> bool: + oth = QubitSupport(other) + if self.is_global or oth.is_global: + return False + else: + selfsup = set(self) + othersup = set(oth) + return selfsup.isdisjoint(othersup) diff --git a/qadence/register.py b/qadence/register.py new file mode 100644 index 000000000..ec0b2447f --- /dev/null +++ b/qadence/register.py @@ -0,0 +1,223 @@ +from __future__ import annotations + +from copy import deepcopy +from typing import Any + +import matplotlib.pyplot as plt +import networkx as nx +import numpy as np +from deepdiff import DeepDiff +from networkx.classes.reportviews import EdgeView, NodeView + +from qadence.types import LatticeTopology + +# Modules to be automatically added to the qadence namespace +__all__ = ["Register"] + + +def _scale_node_positions(graph: nx.Graph, scale: float) -> None: + scaled_nodes = {} + for k, node in graph.nodes.items(): + (x, y) = node["pos"] + scaled_nodes[k] = {"pos": (x * scale, y * scale)} + nx.set_node_attributes(graph, scaled_nodes) + + +class Register: + def __init__(self, support: nx.Graph | int): + """A 2D register of qubits which includes their coordinates (needed for e.g. analog + computing). The coordinates are ignored in backends that don't need them. The easiest + way to construct a register is via its classmethods like `Register.triangular_lattice`. + + Arguments: + support: A graph or number of qubits. Nodes can include a `"pos"` attribute + such that e.g.: `graph.nodes = {0: {"pos": (2,3)}, 1: {"pos": (0,0)}, ...}` which + will be used in backends that need qubit coordinates. + See the classmethods for simple construction of some predefined lattices if you + don't want to build a graph manually. + If you pass an integer the resulting register is the same as + `Register.all_to_all(n_qubits)`. + + Examples: + ```python exec="on" source="material-block" result="json" + from qadence import Register + + reg = Register.honeycomb_lattice(2,3) + reg.draw() + ``` + """ + self.graph = support if isinstance(support, nx.Graph) else alltoall_graph(support) + + @property + def n_qubits(self) -> int: + return len(self.graph) + + @classmethod + def from_coordinates( + cls, coords: list[tuple], lattice: LatticeTopology | str = LatticeTopology.ARBITRARY + ) -> Register: + graph = nx.Graph() + for i, pos in enumerate(coords): + graph.add_node(i, pos=pos) + return cls(graph) + + @classmethod + def line(cls, n_qubits: int) -> Register: + return cls(line_graph(n_qubits)) + + @classmethod + def circle(cls, n_qubits: int, scale: float = 1.0) -> Register: + graph = nx.grid_2d_graph(n_qubits, 1, periodic=True) + graph = nx.relabel_nodes(graph, {(i, 0): i for i in range(n_qubits)}) + coords = nx.circular_layout(graph) + values = {i: {"pos": pos} for i, pos in coords.items()} + nx.set_node_attributes(graph, values) + _scale_node_positions(graph, scale) + return cls(graph) + + @classmethod + def square(cls, qubits_side: int, scale: float = 1.0) -> Register: + n_points = 4 * (qubits_side - 1) + + def gen_points() -> np.ndarray: + rotate_left = np.array([[0.0, -1.0], [1.0, 0.0]]) + increment = np.array([0.0, 1.0]) + + points = [np.array([0.0, 0.0])] + counter = 1 + while len(points) < n_points: + points.append(points[-1] + increment) + + counter = (counter + 1) % qubits_side + if counter == 0: + increment = rotate_left.dot(increment) + counter = 1 + points = np.array(points) # type: ignore[assignment] + points -= np.mean(points, axis=0) + + return points # type: ignore[return-value] + + graph = nx.grid_2d_graph(n_points, 1, periodic=True) + graph = nx.relabel_nodes(graph, {(i, 0): i for i in range(n_points)}) + values = {i: {"pos": point} for i, point in zip(graph.nodes, gen_points())} + nx.set_node_attributes(graph, values) + _scale_node_positions(graph, scale) + return cls(graph) + + @classmethod + def all_to_all(cls, n_qubits: int) -> Register: + return cls(alltoall_graph(n_qubits)) + + @classmethod + def rectangular_lattice( + cls, qubits_row: int, qubits_col: int, side_length: float = 1.0 + ) -> Register: + graph = nx.grid_2d_graph(qubits_col, qubits_row) + values = {i: {"pos": node} for (i, node) in enumerate(graph.nodes)} + graph = nx.relabel_nodes(graph, {(i, j): k for k, (i, j) in enumerate(graph.nodes)}) + nx.set_node_attributes(graph, values) + _scale_node_positions(graph, side_length) + return cls(graph) + + @classmethod + def triangular_lattice( + cls, n_cells_row: int, n_cells_col: int, side_length: float = 1.0 + ) -> Register: + return cls(triangular_lattice_graph(n_cells_row, n_cells_col, side_length)) + + @classmethod + def honeycomb_lattice(cls, n_cells_row: int, n_cells_col: int, scale: float = 1.0) -> Register: + graph = nx.hexagonal_lattice_graph(n_cells_row, n_cells_col) + graph = nx.relabel_nodes(graph, {(i, j): k for k, (i, j) in enumerate(graph.nodes)}) + _scale_node_positions(graph, scale) + return cls(graph) + + @classmethod + def lattice(cls, topology: LatticeTopology | str, *args: Any, **kwargs: Any) -> Register: + return getattr(cls, topology)(*args, **kwargs) # type: ignore[no-any-return] + + def draw(self, show: bool = True) -> None: + coords = {i: n["pos"] for i, n in self.graph.nodes.items()} + nx.draw(self.graph, with_labels=True, pos=coords) + if show: + plt.gcf().show() + + def __getitem__(self, item: int) -> Any: + return self.graph.nodes[item] + + @property + def support(self) -> set: + return set(self.graph.nodes) + + @property + def coords(self) -> dict: + return {i: tuple(node.get("pos", ())) for i, node in self.graph.nodes.items()} + + @property + def edges(self) -> EdgeView: + return self.graph.edges + + @property + def nodes(self) -> NodeView: + return self.graph.nodes + + def _scale_positions(self, scale: float) -> Register: + g = deepcopy(self.graph) + _scale_node_positions(g, scale) + return Register(g) + + def _to_dict(self) -> dict: + return {"graph": nx.node_link_data(self.graph)} + + @classmethod + def _from_dict(cls, d: dict) -> Register: + return cls(nx.node_link_graph(d["graph"])) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, Register): + return False + return ( + DeepDiff(self.coords, other.coords, ignore_order=False) == {} + and nx.is_isomorphic(self.graph, other.graph) + and self.n_qubits == other.n_qubits + ) + + +def line_graph(n_qubits: int, spacing: float = 1.0) -> nx.Graph: + """Create graph representing linear lattice. + + Args: + n_qubits (int): number of nodes in the graph + + Returns: + graph instance + """ + graph = nx.Graph() + for i in range(n_qubits): + graph.add_node(i, pos=(i * spacing, 0.0)) + for i, j in zip(range(n_qubits - 1), range(1, n_qubits)): + graph.add_edge(i, j) + return graph + + +def triangular_lattice_graph( + n_cells_row: int, n_cells_col: int, side_length: float = 1.0 +) -> nx.Graph: + graph = nx.triangular_lattice_graph(n_cells_row, n_cells_col) + graph = nx.relabel_nodes(graph, {(i, j): k for k, (i, j) in enumerate(graph.nodes)}) + _scale_node_positions(graph, side_length) + return graph + + +def alltoall_graph(n_qubits: int) -> nx.Graph: + if n_qubits == 2: + return line_graph(2) + elif n_qubits == 3: + return triangular_lattice_graph(1, 1) + + graph = nx.complete_graph(n_qubits) + # set seed to make sure the produced graphs are reproducible + coords = nx.spring_layout(graph, seed=0) + for i, pos in coords.items(): + graph.nodes[i]["pos"] = tuple(pos) + return graph diff --git a/qadence/serialization.py b/qadence/serialization.py new file mode 100644 index 000000000..2a53cdb54 --- /dev/null +++ b/qadence/serialization.py @@ -0,0 +1,352 @@ +from __future__ import annotations + +import json +import os +from pathlib import Path +from typing import Any, get_args +from typing import Union as TypingUnion + +import torch +from sympy import * +from sympy import Basic, Expr, srepr + +from qadence import QuantumCircuit, operations +from qadence import blocks as qadenceblocks +from qadence.blocks import AbstractBlock +from qadence.blocks.utils import tag +from qadence.logger import get_logger +from qadence.ml_tools.models import TransformedModule +from qadence.models import QNN, QuantumModel +from qadence.parameters import Parameter +from qadence.register import Register +from qadence.types import SerializationFormat + +# Modules to be automatically added to the qadence namespace +__all__ = ["deserialize", "load", "save", "serialize"] + + +logger = get_logger(__name__) + + +def file_extension(file: Path | str) -> str: + FORMAT = "" + if isinstance(file, str): + _, extension = os.path.splitext(file) + FORMAT = extension[1:].upper() + elif isinstance(file, os.PathLike): + _, extension = os.path.splitext(str(file)) + FORMAT = extension[1:].upper() + return FORMAT + + +SUPPORTED_OBJECTS = [ + AbstractBlock, + QuantumCircuit, + QuantumModel, + QNN, + TransformedModule, + Register, + Basic, + torch.nn.Module, +] +SUPPORTED_TYPES = TypingUnion[ + AbstractBlock, + QuantumCircuit, + QuantumModel, + QNN, + TransformedModule, + Register, + Basic, + torch.nn.Module, +] + + +ALL_BLOCK_NAMES = [ + n for n in dir(qadenceblocks) if not (n.startswith("__") and n.endswith("__")) +] + [n for n in dir(operations) if not (n.startswith("__") and n.endswith("__"))] + + +def save_pt(d: dict, file_path: str | Path) -> None: + torch.save(d, file_path) + + +def save_json(d: dict, file_path: str | Path) -> None: + with open(file_path, "w") as file: + file.write(json.dumps(d)) + + +def load_pt(file_path: str | Path, map_location: str) -> Any: + return torch.load(file_path, map_location=map_location) + + +def load_json(file_path: str | Path, map_location: str) -> Any: + with open(file_path, "r") as file: + return json.load(file) + + +FORMAT_DICT = { + SerializationFormat.PT: (".pt", save_pt, load_pt, True), + SerializationFormat.JSON: (".json", save_json, load_json, False), +} + + +def serialize(obj: SUPPORTED_TYPES, save_params: bool = False) -> dict: + """ + Supported Types: + AbstractBlock | QuantumCircuit | QuantumModel | TransformedModule | Register | Module + Serializes a qadence object to a dictionary. + + Arguments: + obj (AbstractBlock | QuantumCircuit | QuantumModel | Register | Module): + Returns: + A dict. + + Examples: + ```python exec="on" source="material-block" result="json" + import torch + from qadence import serialize, deserialize, hea, hamiltonian_factory, Z + from qadence import QuantumCircuit, QuantumModel + + n_qubits = 2 + myblock = hea(n_qubits=n_qubits, depth=1) + block_dict = serialize(myblock) + print(block_dict) + + ## Lets use myblock in a QuantumCircuit and serialize it. + + qc = QuantumCircuit(n_qubits, myblock) + qc_dict = serialize(qc) + qc_deserialized = deserialize(qc_dict) + assert qc == qc_deserialized + + ## Finally, let's wrap it in a QuantumModel + obs = hamiltonian_factory(n_qubits, detuning = Z) + qm = QuantumModel(qc, obs, backend='pyqtorch', diff_mode='ad') + + qm_dict = serialize(qm) + qm_deserialized = deserialize(qm_dict) + # Lets check if the loaded QuantumModel returns the same expectation + assert torch.isclose(qm.expectation({}), qm_deserialized.expectation({})) + ``` + """ + if not isinstance(obj, get_args(SUPPORTED_TYPES)): + logger.error(TypeError(f"Serialization of object type {type(obj)} not supported.")) + d: dict = {} + try: + if isinstance(obj, Expr): + symb_dict = {} + expr_dict = {"name": str(obj), "expression": srepr(obj)} + symbs: set[Parameter | Basic] = obj.free_symbols + if symbs: + symb_dict = {"symbols": {str(s): s._to_dict() for s in symbs}} + d = {**expr_dict, **symb_dict} + elif isinstance(obj, (QuantumModel, QNN, TransformedModule)): + d = obj._to_dict(save_params) + elif isinstance(obj, torch.nn.Module): + d = {type(obj).__name__: obj.state_dict()} + else: + d = obj._to_dict() + except Exception as e: + logger.error(f"Serialization of object {obj} failed due to {e}") + return d + + +def deserialize(d: dict, as_torch: bool = False) -> SUPPORTED_TYPES: + """ + Supported Types: + AbstractBlock | QuantumCircuit | QuantumModel | TransformedModule | Register | Module + Deserializes a dict to one of the supported types. + + Arguments: + d (dict): A dict containing a serialized object. + Returns: + AbstractBlock, QuantumCircuit, QuantumModel, TransformedModule, Register, Module. + + Examples: + ```python exec="on" source="material-block" result="json" + import torch + from qadence import serialize, deserialize, hea, hamiltonian_factory, Z + from qadence import QuantumCircuit, QuantumModel + + n_qubits = 2 + myblock = hea(n_qubits=n_qubits, depth=1) + block_dict = serialize(myblock) + print(block_dict) + + ## Lets use myblock in a QuantumCircuit and serialize it. + + qc = QuantumCircuit(n_qubits, myblock) + qc_dict = serialize(qc) + qc_deserialized = deserialize(qc_dict) + assert qc == qc_deserialized + + ## Finally, let's wrap it in a QuantumModel + obs = hamiltonian_factory(n_qubits, detuning = Z) + qm = QuantumModel(qc, obs, backend='pyqtorch', diff_mode='ad') + + qm_dict = serialize(qm) + qm_deserialized = deserialize(qm_dict) + # Lets check if the loaded QuantumModel returns the same expectation + assert torch.isclose(qm.expectation({}), qm_deserialized.expectation({})) + ``` + """ + obj: Any + if d.get("expression"): + expr = eval(d["expression"]) + if hasattr(expr, "free_symbols"): + for symb in expr.free_symbols: + symb.value = float(d["symbols"][symb.name]["value"]) + obj = expr + elif d.get("QuantumModel"): + obj = QuantumModel._from_dict(d, as_torch) + elif d.get("QNN"): + obj = QNN._from_dict(d, as_torch) + elif d.get("TransformedModule"): + obj = TransformedModule._from_dict(d, as_torch) + elif d.get("block") and d.get("register"): + obj = QuantumCircuit._from_dict(d) + elif d.get("graph"): + obj = Register._from_dict(d) + elif d.get("type"): + if d["type"] in ALL_BLOCK_NAMES: + block: AbstractBlock = ( + getattr(operations, d["type"])._from_dict(d) + if hasattr(operations, d["type"]) + else getattr(qadenceblocks, d["type"])._from_dict(d) + ) + if d["tag"] is not None: + block = tag(block, d["tag"]) + obj = block + else: + import warnings + + msg = warnings.warn( + "In order to load a custom torch.nn.Module, make sure its imported in the namespace." + ) + try: + module_name = list(d.keys())[0] + obj = getattr(globals(), module_name) + obj.load_state_dict(d[module_name]) + except Exception as e: + logger.error( + TypeError( + f"{msg}. Unable to deserialize object due to {e}.\ + Supported objects are: {SUPPORTED_OBJECTS}" + ) + ) + return obj + + +def save( + obj: SUPPORTED_TYPES, + folder: str | Path, + file_name: str = "", + format: SerializationFormat = SerializationFormat.JSON, +) -> None: + """ + Same as serialize/deserialize but for storing/loading files. + Supported types: + AbstractBlock | QuantumCircuit | QuantumModel | TransformedModule | Register | torch.nn.Module + Saves a qadence object to a json/.pt. + + Arguments: + obj (AbstractBlock | QuantumCircuit | QuantumModel | Register): + Either AbstractBlock, QuantumCircuit, QuantumModel, TransformedModule, Register. + file_name (str): The name of the file. + format (str): The type of file to save. + Returns: + None. + + Examples: + ```python exec="on" source="material-block" result="json" + import torch + from pathlib import Path + import os + + from qadence import save, load, hea, hamiltonian_factory, Z + from qadence import QuantumCircuit, QuantumModel + + n_qubits = 2 + myblock = hea(n_qubits=n_qubits, depth=1) + qc = QuantumCircuit(n_qubits, myblock) + # Lets store the circuit in a json file + save(qc, '.', 'circ') + loaded_qc = load(Path('circ.json')) + qc == loaded_qc + os.remove('circ.json') + ## Let's wrap it in a QuantumModel and store that + obs = hamiltonian_factory(n_qubits, detuning = Z) + qm = QuantumModel(qc, obs, backend='pyqtorch', diff_mode='ad') + save(qm, folder= '.',file_name= 'quantum_model') + qm_loaded = load('quantum_model.json') + os.remove('quantum_model.json') + ``` + """ + if not isinstance(obj, get_args(SUPPORTED_TYPES)): + logger.error(f"Serialization of object type {type(obj)} not supported.") + folder = Path(folder) + if not folder.is_dir(): + logger.error(NotADirectoryError) + if file_name == "": + file_name = type(obj).__name__ + try: + suffix, save_fn, _, save_params = FORMAT_DICT[format] + d = serialize(obj, save_params) + file_path = folder / Path(file_name + suffix) + save_fn(d, file_path) + logger.debug(f"Successfully saved {obj} from to {folder}.") + except Exception as e: + logger.error(f"Unable to write {type(obj)} to disk due to {e}") + + +def load(file_path: str | Path, map_location: str = "cpu") -> SUPPORTED_TYPES: + """ + Same as serialize/deserialize but for storing/loading files. + Supported types: AbstractBlock | QuantumCircuit | QuantumModel | TransformedModule | Register + Loads a .json or .pt file to one of the supported types. + + Arguments: + file_path (str): The name of the file. + map_location (str): In case of a .pt file, on which device to load the object (cpu,cuda). + Returns: + A object of type AbstractBlock, QuantumCircuit, QuantumModel, TransformedModule, Register. + + Examples: + ```python exec="on" source="material-block" result="json" + import torch + from pathlib import Path + import os + + from qadence import save, load, hea, hamiltonian_factory, Z + from qadence import QuantumCircuit, QuantumModel + + n_qubits = 2 + myblock = hea(n_qubits=n_qubits, depth=1) + qc = QuantumCircuit(n_qubits, myblock) + # Lets store the circuit in a json file + save(qc, '.', 'circ') + loaded_qc = load(Path('circ.json')) + qc == loaded_qc + os.remove('circ.json') + ## Let's wrap it in a QuantumModel and store that + obs = hamiltonian_factory(n_qubits, detuning = Z) + qm = QuantumModel(qc, obs, backend='pyqtorch', diff_mode='ad') + save(qm, folder= '.',file_name= 'quantum_model') + qm_loaded = load('quantum_model.json') + os.remove('quantum_model.json') + ``` + """ + d = {} + if isinstance(file_path, str): + file_path = Path(file_path) + if not os.path.exists(file_path): + logger.error(f"File {file_path} not found.") + raise FileNotFoundError + FORMAT = file_extension(file_path) + _, _, load_fn, _ = FORMAT_DICT[FORMAT] # type: ignore[index] + try: + d = load_fn(file_path, map_location) + logger.debug(f"Successfully loaded {d} from {file_path}.") + except Exception as e: + logger.error(f"Unable to load Object from {file_path} due to {e}") + return deserialize(d) diff --git a/qadence/states.py b/qadence/states.py new file mode 100644 index 000000000..b7ee2eb34 --- /dev/null +++ b/qadence/states.py @@ -0,0 +1,557 @@ +from __future__ import annotations + +import random +from functools import singledispatch +from typing import List + +import torch +from torch import Tensor, concat +from torch.distributions import Categorical, Distribution + +from qadence import BackendName +from qadence.backends.api import backend_factory +from qadence.blocks import ChainBlock, KronBlock, PrimitiveBlock, chain, kron +from qadence.circuit import QuantumCircuit +from qadence.operations import CNOT, RX, RY, RZ, H, I, X +from qadence.overlap import fidelity +from qadence.types import Endianness, StateGeneratorType +from qadence.utils import basis_to_int + +# Modules to be automatically added to the qadence namespace +__all__ = [ + "uniform_state", + "zero_state", + "one_state", + "product_state", + "rand_product_state", + "ghz_state", + "random_state", + "uniform_block", + "one_block", + "zero_block", + "product_block", + "rand_product_block", + "ghz_block", + "pmf", + "normalize", + "is_normalized", + "rand_bitstring", + "equivalent_state", +] + +ATOL_64 = 1e-14 # 64 bit precision +NORMALIZATION_ATOL = ATOL_64 +DTYPE = torch.cdouble + +parametric_single_qubit_gates: List = [RX, RY, RZ] + +# PRIVATE + + +def _rand_haar_fast(n_qubits: int) -> Tensor: + # inspired by https://qiskit.org/documentation/_modules/qiskit/quantum_info/states/random.html#random_statevector + N = 2**n_qubits + x = -torch.log(torch.rand(N)) + sumx = torch.sum(x) + phases = torch.rand(N) * 2.0 * torch.pi + return (torch.sqrt(x / sumx) * torch.exp(1j * phases)).reshape(1, N) + + +def _rand_haar_slow(n_qubits: int) -> Tensor: + """ + Detailed in https://arxiv.org/pdf/math-ph/0609050.pdf + + Textbook implementation, but very expensive. For 12 qubits it takes several seconds. + For 1 qubit it seems to produce the same distribution as the measure above. + """ + N = 2**n_qubits + A = torch.zeros(N, N, dtype=DTYPE).normal_(0, 1) + B = torch.zeros(N, N, dtype=DTYPE).normal_(0, 1) + Z = A + 1.0j * B + Q, R = torch.linalg.qr(Z) + Lambda = torch.diag(torch.diag(R) / torch.diag(R).abs()) + haar_unitary = torch.matmul(Q, Lambda) + return torch.matmul(haar_unitary, zero_state(n_qubits).squeeze(0)).unsqueeze(0) + + +@singledispatch +def _run_state(circ: QuantumCircuit, backend: str) -> Tensor: + if backend != BackendName.PYQTORCH: + raise ValueError("Only pyqtorch supports custom states.") + bknd = backend_factory(backend=backend, diff_mode="ad") + conv = bknd.convert(circ) + return bknd.run(conv.circuit, conv.embedding_fn(conv.params, {})) + + +@_run_state.register +def _(circs: list, backend: str) -> Tensor: # type: ignore[misc] + bknd = backend_factory(backend=backend, diff_mode="ad") + results = () + for c in circs: + conv = bknd.convert(c) + results += ( + bknd.run(conv.circuit, conv.embedding_fn(conv.params, {})), + ) # type:ignore[assignment] + return concat(results, dim=0) + + +def _from_op(op: type[PrimitiveBlock], n_qubits: int) -> KronBlock: + return kron(op(i) for i in range(n_qubits)) # type: ignore[arg-type] + + +def _block_from_bitstring(bitstring: str) -> KronBlock: + n_qubits = len(bitstring) + gates = [] + for i, b in zip(range(n_qubits), bitstring): + gates.append(X(i)) if b == "1" else gates.append(I(i)) # type: ignore[arg-type] + return kron(*gates) + + +def _state_from_bitstring( + bitstring: str, batch_size: int, endianness: Endianness = Endianness.BIG +) -> Tensor: + n_qubits = len(bitstring) + wf_batch = torch.zeros(batch_size, 2**n_qubits, dtype=DTYPE) + k = basis_to_int(basis=bitstring, endianness=endianness) + wf_batch[:, k] = torch.tensor(1.0 + 0j, dtype=DTYPE) + return wf_batch + + +def _abstract_random_state( + n_qubits: int, batch_size: int = 1 +) -> QuantumCircuit | list[QuantumCircuit]: + qc_list = [] + for i in range(batch_size): + gates_list = [] + for i in range(n_qubits): + gate = parametric_single_qubit_gates[ + random.randrange(len(parametric_single_qubit_gates)) + ] + angle = random.uniform(-2, 2) + gates_list.append(gate(i, angle)) + qc_list.append(QuantumCircuit(n_qubits, chain(*gates_list))) + return qc_list[0] if batch_size == 1 else qc_list + + +# STATES + + +def uniform_state(n_qubits: int, batch_size: int = 1) -> Tensor: + """ + Generates the uniform state for a specified number of qubits. + + Arguments: + n_qubits (int): The number of qubits. + batch_size (int): The batch size. + + Returns: + A torch.Tensor. + + Examples: + ```python exec="on" source="material-block" result="json" + from qadence.states import uniform_state + + state = uniform_state(n_qubits=2) + print(state) + ``` + """ + norm = 1 / torch.sqrt(torch.tensor(2**n_qubits)) + return norm * torch.ones(batch_size, 2**n_qubits, dtype=DTYPE) + + +def zero_state(n_qubits: int, batch_size: int = 1) -> Tensor: + """ + Generates the zero state for a specified number of qubits. + + Arguments: + n_qubits (int): The number of qubits for which the zero state is to be generated. + batch_size (int): The batch size for the zero state. + + Returns: + A torch.Tensor. + + Examples: + ```python exec="on" source="material-block" result="json" + from qadence.states import zero_state + + state = zero_state(n_qubits=2) + print(state) + ``` + """ + bitstring = "0" * n_qubits + return _state_from_bitstring(bitstring, batch_size) + + +def one_state(n_qubits: int, batch_size: int = 1) -> Tensor: + """ + Generates the one state for a specified number of qubits. + + Arguments: + n_qubits (int): The number of qubits. + batch_size (int): The batch size. + + Returns: + A torch.Tensor. + + Examples: + ```python exec="on" source="material-block" result="json" + from qadence.states import one_state + + state = one_state(n_qubits=2) + print(state) + ``` + """ + bitstring = "1" * n_qubits + return _state_from_bitstring(bitstring, batch_size) + + +@singledispatch +def product_state( + bitstring: str, batch_size: int = 1, endianness: Endianness = Endianness.BIG +) -> Tensor: + """ + Creates a product state from a bitstring. + + Arguments: + bitstring (str): A bitstring. + batch_size (int) : Batch size. + + Returns: + A torch.Tensor. + + Examples: + ```python exec="on" source="material-block" result="json" + from qadence.states import product_state + + print(product_state("1100")) + ``` + """ + return _state_from_bitstring(bitstring, batch_size, endianness=endianness) + + +@product_state.register +def _(bitstrings: list) -> Tensor: # type: ignore + return concat(tuple(product_state(b) for b in bitstrings), dim=0) + + +def rand_product_state(n_qubits: int, batch_size: int = 1) -> Tensor: + """ + Creates a random product state. + + Arguments: + n_qubits (int): The number of qubits. + batch_size (int): How many bitstrings to use. + + Returns: + A torch.Tensor. + + Examples: + ```python exec="on" source="material-block" result="json" + from qadence.states import rand_product_state + + print(rand_product_state(n_qubits=2, batch_size=2)) + ``` + """ + wf_batch = torch.zeros(batch_size, 2**n_qubits, dtype=DTYPE) + rand_pos = torch.randint(0, 2**n_qubits, (batch_size,)) + wf_batch[torch.arange(batch_size), rand_pos] = torch.tensor(1.0 + 0j, dtype=DTYPE) + return wf_batch + + +def ghz_state(n_qubits: int, batch_size: int = 1) -> Tensor: + """ + Creates a GHZ state. + + Arguments: + n_qubits (int): The number of qubits. + batch_size (int): How many bitstrings to use. + + Returns: + A torch.Tensor. + + Examples: + ```python exec="on" source="material-block" result="json" + from qadence.states import ghz_state + + print(ghz_state(n_qubits=2, batch_size=2)) + ``` + """ + norm = 1 / torch.sqrt(torch.tensor(2)) + return norm * (zero_state(n_qubits, batch_size) + one_state(n_qubits, batch_size)) + + +def random_state( + n_qubits: int, + batch_size: int = 1, + backend: str = BackendName.PYQTORCH, + type: StateGeneratorType = StateGeneratorType.HAAR_MEASURE_FAST, +) -> Tensor: + """ + Generates a random state for a specified number of qubits. + + Arguments: + n_qubits (int): The number of qubits. + backend (str): The backend to use. + batch_size (int): The batch size. + type : StateGeneratorType. + + Returns: + A torch.Tensor. + + Examples: + ```python exec="on" source="material-block" result="json" + from qadence.states import random_state, StateGeneratorType + from qadence.states import random_state, is_normalized, pmf + from qadence.backend import BackendName + from torch.distributions import Distribution + + ### We have the following options: + print([g.value for g in StateGeneratorType]) + + + n_qubits = 2 + # The default is StateGeneratorType.HAAR_MEASURE_FAST + state = random_state(n_qubits=n_qubits) + print(state) + + ### Lets initialize a state using random rotations, i.e., StateGeneratorType.RANDOM_ROTATIONS. + random = random_state(n_qubits=n_qubits, type=StateGeneratorType.RANDOM_ROTATIONS) + print(random) + ``` + """ + + if type == StateGeneratorType.HAAR_MEASURE_FAST: + state = concat(tuple(_rand_haar_fast(n_qubits) for _ in range(batch_size)), dim=0) + elif type == StateGeneratorType.HAAR_MEASURE_SLOW: + state = concat(tuple(_rand_haar_slow(n_qubits) for _ in range(batch_size)), dim=0) + elif type == StateGeneratorType.RANDOM_ROTATIONS: + state = _run_state(_abstract_random_state(n_qubits, batch_size), backend) # type: ignore + assert all(list(map(is_normalized, state))) + return state + + +# BLOCKS + + +def uniform_block(n_qubits: int) -> KronBlock: + """ + Generates the abstract uniform state for a specified number of qubits. + + Arguments: + n_qubits (int): The number of qubits. + + Returns: + A KronBlock representing the uniform state. + + Examples: + ```python exec="on" source="material-block" result="json" + from qadence.states import uniform_block + + block = uniform_block(n_qubits=2) + print(block) + ``` + """ + return _from_op(H, n_qubits=n_qubits) + + +def one_block(n_qubits: int) -> KronBlock: + """ + Generates the abstract one state for a specified number of qubits. + + Arguments: + n_qubits (int): The number of qubits. + + Returns: + A KronBlock representing the one state. + + Examples: + ```python exec="on" source="material-block" result="json" + from qadence.states import one_block + + block = one_block(n_qubits=2) + print(block) + ``` + """ + return _from_op(X, n_qubits=n_qubits) + + +def zero_block(n_qubits: int) -> KronBlock: + """ + Generates the abstract zero state for a specified number of qubits. + + Arguments: + n_qubits (int): The number of qubits. + + Returns: + A KronBlock representing the zero state. + + Examples: + ```python exec="on" source="material-block" result="json" + from qadence.states import zero_block + + block = zero_block(n_qubits=2) + print(block) + ``` + """ + return _from_op(I, n_qubits=n_qubits) + + +def product_block(bitstring: str) -> KronBlock: + """ + Creates an abstract product state from a bitstring. + + Arguments: + bitstring (str): A bitstring. + + Returns: + A KronBlock representing the product state. + + Examples: + ```python exec="on" source="material-block" result="json" + from qadence.states import product_block + + print(product_block("1100")) + ``` + """ + return _block_from_bitstring(bitstring) + + +def rand_product_block(n_qubits: int) -> KronBlock: + """ + Creates a block representing a random abstract product state. + + Arguments: + n_qubits (int): The number of qubits. + + Returns: + A KronBlock representing the product state. + + Examples: + ```python exec="on" source="material-block" result="json" + from qadence.states import rand_product_block + + print(rand_product_block(n_qubits=2)) + ``` + """ + return product_block(rand_bitstring(n_qubits)) + + +def ghz_block(n_qubits: int) -> ChainBlock: + """ + Generates the abstract ghz state for a specified number of qubits. + + Arguments: + n_qubits (int): The number of qubits. + + Returns: + A ChainBlock representing the GHZ state. + + Examples: + ```python exec="on" source="material-block" result="json" + from qadence.states import ghz_block + + block = ghz_block(n_qubits=2) + print(block) + ``` + """ + cnots = chain(CNOT(i - 1, i) for i in range(1, n_qubits)) + return chain(H(0), cnots) + + +# UTILITIES + + +def pmf(wf: Tensor) -> Distribution: + """ + Converts a wave function into a torch Distribution. + + Arguments: + wf (torch.Tensor): The wave function as a torch tensor. + + Returns: + A torch.distributions.Distribution. + + Examples: + ```python exec="on" source="material-block" result="json" + from qadence.states import uniform_state, pmf + + print(pmf(uniform_state(2)).probs) + ``` + """ + return Categorical(torch.abs(torch.pow(wf, 2))) + + +def normalize(wf: Tensor) -> Tensor: + """ + Normalizes a wavefunction or batch of wave functions. + + Arguments: + wf (torch.Tensor): Normalized wavefunctions. + + Returns: + A torch.Tensor. + + Examples: + ```python exec="on" source="material-block" result="json" + from qadence.states import uniform_state, normalize + + print(normalize(uniform_state(2, 2))) + ``` + """ + if wf.dim() == 1: + return wf / torch.sqrt((wf.abs() ** 2).sum()) + else: + return wf / torch.sqrt((wf.abs() ** 2).sum(1)).unsqueeze(1) + + +def is_normalized(wf: Tensor, atol: float = NORMALIZATION_ATOL) -> bool: + """ + Checks if a wave function is normalized. + + Arguments: + wf (torch.Tensor): The wave function as a torch tensor. + atol (float) : The tolerance. + + Returns: + A bool. + + Examples: + ```python exec="on" source="material-block" result="json" + from qadence.states import uniform_state, is_normalized + + print(is_normalized(uniform_state(2))) + ``` + """ + if wf.dim() == 1: + wf = wf.unsqueeze(0) + sum_probs: Tensor = (wf.abs() ** 2).sum(dim=1) + ones = torch.ones_like(sum_probs) + return torch.allclose(sum_probs, ones, rtol=0.0, atol=atol) # type: ignore[no-any-return] + + +def rand_bitstring(N: int) -> str: + """ + Creates a random bistring. + + Arguments: + N (int): The length of the bitstring. + + Returns: + A string. + + Examples: + ```python exec="on" source="material-block" result="json" + from qadence.states import rand_bitstring + + print(rand_bitstring(N=8)) + ``` + """ + return "".join(str(random.randint(0, 1)) for _ in range(N)) + + +def equivalent_state( + s0: torch.Tensor, s1: torch.Tensor, rtol: float = 0.0, atol: float = NORMALIZATION_ATOL +) -> bool: + fid = fidelity(s0, s1) + expected = torch.ones_like(fid) + return torch.allclose(fid, expected, rtol=rtol, atol=atol) # type: ignore[no-any-return] diff --git a/qadence/transpile/__init__.py b/qadence/transpile/__init__.py new file mode 100644 index 000000000..a50aa6bb2 --- /dev/null +++ b/qadence/transpile/__init__.py @@ -0,0 +1,17 @@ +from __future__ import annotations + +from .block import ( + chain_single_qubit_ops, + flatten, + repeat, + scale_primitive_blocks_only, + set_trainable, + validate, +) +from .circuit import fill_identities +from .digitalize import digitalize +from .emulate import add_interaction +from .invert import invert_endianness, reassign +from .transpile import blockfn_to_circfn, transpile + +__all__ = ["add_interaction", "set_trainable", "invert_endianness"] diff --git a/qadence/transpile/block.py b/qadence/transpile/block.py new file mode 100644 index 000000000..548cefa3e --- /dev/null +++ b/qadence/transpile/block.py @@ -0,0 +1,430 @@ +from __future__ import annotations + +from copy import deepcopy +from functools import reduce, singledispatch +from typing import Callable, Generator, Iterable, Type + +import sympy + +from qadence import SWAP, I +from qadence.blocks import ( + AbstractBlock, + AddBlock, + AnalogBlock, + ChainBlock, + CompositeBlock, + ControlBlock, + KronBlock, + PrimitiveBlock, + PutBlock, + ScaleBlock, + add, + chain, + kron, +) +from qadence.blocks.utils import ( + TPrimitiveBlock, + _construct, + parameters, +) +from qadence.logger import get_logger +from qadence.parameters import Parameter + +logger = get_logger(__name__) + + +def _flat_blocks(block: AbstractBlock, T: Type) -> Generator: + """Constructs a generator that flattens nested `CompositeBlock`s of type `T`. + + Example: + ```python exec="on" source="material-block" result="json" + from qadence.transpile.block import _flat_blocks + from qadence.blocks import ChainBlock + from qadence import chain, X + + x = chain(chain(chain(X(0)), X(0))) + assert tuple(_flat_blocks(x, ChainBlock)) == (X(0), X(0)) + ``` + """ + if isinstance(block, T): + # here we do the flattening + for b in block.blocks: + if isinstance(b, T): + yield from _flat_blocks(b, T) + else: + yield flatten(b, [T]) + elif isinstance(block, CompositeBlock): + # here we make sure that we don't get stuck at e.g. `KronBlock`s if we + # want to flatten `ChainBlock`s + yield from (flatten(b, [T]) for b in block.blocks) + elif isinstance(block, ScaleBlock): + blk = deepcopy(block) + blk.block = flatten(block.block, [T]) + yield blk + else: + yield block + + +def flatten(block: AbstractBlock, types: list = [ChainBlock, KronBlock, AddBlock]) -> AbstractBlock: + """Flattens the given types of `CompositeBlock`s if possible. + + Example: + ```python exec="on" source="material-block" result="json" + from qadence import chain, kron, X + from qadence.transpile import flatten + from qadence.blocks import ChainBlock, KronBlock, AddBlock + + x = chain(chain(chain(X(0))), kron(kron(X(0)))) + + # flatten only `ChainBlock`s + assert flatten(x, [ChainBlock]) == chain(X(0), kron(kron(X(0)))) + + # flatten `ChainBlock`s and `KronBlock`s + assert flatten(x, [ChainBlock, KronBlock]) == chain(X(0), kron(X(0))) + + # flatten `AddBlock`s (does nothing in this case) + assert flatten(x, [AddBlock]) == x + ``` + """ + if isinstance(block, CompositeBlock): + + def fn(b: AbstractBlock, T: Type) -> AbstractBlock: + return _construct(type(block), tuple(_flat_blocks(b, T))) + + return reduce(fn, types, block) # type: ignore[arg-type] + elif isinstance(block, ScaleBlock): + blk = deepcopy(block) + blk.block = flatten(block.block, types=types) + return blk + else: + return block + + +def repeat( + Block: Type[TPrimitiveBlock], support: Iterable[int], parameter: str | Parameter | None = None +) -> KronBlock: + if parameter is None: + return kron(Block(i) for i in support) # type: ignore [arg-type] + return kron(Block(i, parameter) for i in support) # type: ignore [call-arg, arg-type] + + +def set_trainable( + blocks: AbstractBlock | list[AbstractBlock], value: bool = True, inplace: bool = True +) -> AbstractBlock | list[AbstractBlock]: + """Set the trainability of all parameters in a block to a given value + + Args: + blocks (AbstractBlock | list[AbstractBlock]): Block or list of blocks for which + to set the trainable attribute + value (bool, optional): The value of the trainable attribute to assign to the input blocks + inplace (bool, optional): Whether to modify the block(s) in place or not. Currently, only + + Raises: + NotImplementedError: if the `inplace` argument is set to False, the function will + raise this exception + + Returns: + AbstractBlock | list[AbstractBlock]: the input block or list of blocks with the trainable + attribute set to the given value + """ + + if isinstance(blocks, AbstractBlock): + blocks = [blocks] + + if inplace: + for block in blocks: + params: list[sympy.Basic] = parameters(block) + for p in params: + if not p.is_number: + p.trainable = value + else: + raise NotImplementedError("Not inplace set_trainable is not yet available") + + return blocks if len(blocks) > 1 else blocks[0] + + +def validate(block: AbstractBlock) -> AbstractBlock: + """Moves a block from global to local qubit numbers by adding PutBlocks and reassigning + qubit locations approriately. + + # Example + ```python exec="on" source="above" result="json" + from qadence.blocks import chain + from qadence.operations import X + from qadence.transpile import validate + + x = chain(chain(X(0)), chain(X(1))) + print(x) + print(validate(x)) + ``` + """ + vblock: AbstractBlock + from qadence.transpile import reassign + + if isinstance(block, ControlBlock): + vblock = deepcopy(block) + b: AbstractBlock + (b,) = block.blocks + b = reassign(b, {i: i - min(b.qubit_support) for i in b.qubit_support}) + b = validate(b) + vblock.blocks = (b,) # type: ignore[assignment] + + elif isinstance(block, CompositeBlock): + blocks = [] + for b in block.blocks: + mi, ma = min(b.qubit_support), max(b.qubit_support) + nb = reassign(b, {i: i - min(b.qubit_support) for i in b.qubit_support}) + nb = validate(nb) + nb = PutBlock(nb, tuple(range(mi, ma + 1))) + blocks.append(nb) + try: + vblock = _construct(type(block), tuple(blocks)) + except AssertionError as e: + if str(e) == "Make sure blocks act on distinct qubits!": + vblock = chain(*blocks) + else: + raise e + + elif isinstance(block, PrimitiveBlock): + vblock = deepcopy(block) + + else: + raise NotImplementedError + + vblock.tag = block.tag + return vblock + + +@singledispatch +def scale_primitive_blocks_only(block: AbstractBlock, scale: sympy.Basic = None) -> AbstractBlock: + """When given a scaled CompositeBlock consisting of several PrimitiveBlocks, + move the scale all the way down into the leaves of the block tree. + + Arguments: + block: The block to be transpiled. + scale: An optional scale parameter. Only to be used for recursive calls internally. + + Returns: + AbstractBlock: A block of the same type where the scales have been moved into the subblocks. + + Examples: + + There are two different cases: + `ChainBlock`s/`KronBlock`s: Only the first subblock needs to be scaled because chains/krons + represent multiplications. + ```python exec="on" source="above" result="json" + from qadence import chain, X, RX + from qadence.transpile import scale_primitive_blocks_only + b = 2 * chain(X(0), RX(0, "theta")) + print(b) + # After applying scale_primitive_blocks_only + print(scale_primitive_blocks_only(b)) + ``` + + `AddBlock`s: Consider 2 * add(X(0), RX(0, "theta")). The scale needs to be added to all + subblocks. We get add(2 * X(0), 2 * RX(0, "theta")). + ```python exec="on" source="above" result="json" + from qadence import add, X, RX + from qadence.transpile import scale_primitive_blocks_only + b = 2 * add(X(0), RX(0, "theta")) + print(b) + # After applying scale_primitive_blocks_only + print(scale_primitive_blocks_only(b)) + ``` + """ + raise NotImplementedError(f"scale_primitive_blocks_only is not implemented for {type(block)}") + + +@scale_primitive_blocks_only.register +def _(block: ScaleBlock, scale: sympy.Basic = None) -> AbstractBlock: + (scale2,) = block.parameters.expressions() + s = scale2 if scale is None else scale * scale2 + blk = scale_primitive_blocks_only(block.block, s) + blk.tag = block.tag + return blk + + +@scale_primitive_blocks_only.register +def _(block: ChainBlock, scale: sympy.Basic = None) -> CompositeBlock: + blk = scale_only_first_block(chain, block, scale) + blk.tag = block.tag + return blk + + +@scale_primitive_blocks_only.register +def _(block: KronBlock, scale: sympy.Basic = None) -> CompositeBlock: + blk = scale_only_first_block(kron, block, scale) + blk.tag = block.tag + return blk + + +@scale_primitive_blocks_only.register +def _(block: AddBlock, scale: sympy.Basic = None) -> CompositeBlock: + blk = add(scale_primitive_blocks_only(b, scale) for b in block.blocks) + blk.tag = block.tag + return blk + + +@scale_primitive_blocks_only.register +def _(block: PrimitiveBlock, scale: sympy.Basic = None) -> AbstractBlock: + if scale is None: + return block + b: ScaleBlock = block * scale + return b + + +@scale_primitive_blocks_only.register +def _(block: AnalogBlock, scale: sympy.Basic = None) -> AbstractBlock: + if scale is not None: + raise NotImplementedError("Cannot scale `AnalogBlock`s!") + return block + + +def scale_only_first_block( + fn: Callable, block: CompositeBlock, scale: sympy.Basic = None +) -> CompositeBlock: + if len(block.blocks): + first, rest = block.blocks[0], block.blocks[1:] + firstscaled = scale_primitive_blocks_only(first, scale) + + blk: CompositeBlock + blk = fn(firstscaled, *[scale_primitive_blocks_only(b, None) for b in rest]) + return blk + else: + return block + + +@singledispatch +def fill_identities(block: AbstractBlock, start: int, stop: int) -> AbstractBlock: + return block + + +@fill_identities.register +def _(block: PrimitiveBlock, start: int, stop: int) -> AbstractBlock: + if (start == min(block.qubit_support)) and (stop == max(block.qubit_support) + 1): + return block + tag = block.tag + block.tag = None + bs = [block] + [I(i) for i in (set(range(start, stop)) - set(block.qubit_support))] + b = kron(*sorted(bs, key=lambda x: x.qubit_support)) + b.tag = tag + return b + + +@fill_identities.register +def _(block: SWAP, start: int, stop: int) -> AbstractBlock: + if (start == min(block.qubit_support)) and (stop == max(block.qubit_support) + 1): + return block + tag = block.tag + block.tag = None + bs = [block] + [chain(I(i), I(i)) for i in (set(range(start, stop)) - set(block.qubit_support))] + b = kron(*sorted(bs, key=lambda x: x.qubit_support)) + b.tag = tag + return b + + +@fill_identities.register +def _(block: ChainBlock, start: int, stop: int) -> AbstractBlock: + b = chain(fill_identities(b, start, stop) for b in block) + b.tag = block.tag + return b + + +def _fill_kron(block: KronBlock, start: int, stop: int) -> list[AbstractBlock]: + def length(b: AbstractBlock | list) -> int: + if isinstance(b, list): + return max(map(length, b)) + elif isinstance(b, ChainBlock): + return len(b) + elif isinstance(b, SWAP): + return 2 + else: + return 1 + + def append_ids(block: AbstractBlock, total: int) -> AbstractBlock: + qs = block.qubit_support + ids = [I(i) for i in range(min(qs), max(qs) + 1) for _ in range(length(block), total)] + bs = [block] + ids + return chain(*bs) + + def id_chain(i: int, max_len: int) -> AbstractBlock: + return chain(I(i) for _ in range(max_len)) + + bs = [fill_identities(b, min(b.qubit_support), max(b.qubit_support) + 1) for b in block] + max_len = length(bs) + bs = [append_ids(b, max_len) for b in bs] + bs += [id_chain(i, max_len) for i in (set(range(start, stop)) - set(block.qubit_support))] + return sorted(bs, key=lambda x: x.qubit_support) + + +@fill_identities.register +def _(block: KronBlock, start: int, stop: int) -> AbstractBlock: + b = kron(*_fill_kron(block, start, stop)) # type: ignore[misc] + b.tag = block.tag + return b + + +@fill_identities.register +def _(block: AddBlock, start: int, stop: int) -> AbstractBlock: + b = add(fill_identities(b, start, stop) for b in block) + b.tag = block.tag + return b + + +def is_kron_of_primitives(block: AbstractBlock) -> bool: + return isinstance(block, (KronBlock)) and all( + [isinstance(b, PrimitiveBlock) and b.n_supports == 1 for b in block.blocks] + ) + + +def is_chain_of_primitivekrons(block: AbstractBlock) -> bool: + return ( + isinstance(block, ChainBlock) + and len(block) > 1 + and all( + [ + is_kron_of_primitives(b) and b.qubit_support == block.qubit_support + for b in block.blocks + ] + ) + ) + + +def chain_single_qubit_ops(block: AbstractBlock) -> AbstractBlock: + """Transpile a chain of krons into a kron of chains of single qubit operations. + + Examples: + ```python exec="on" source="above" result="json" + from qadence import hea + from qadence.transpile.block import chain_single_qubit_ops + + # Consider a single HEA layer + block = hea(2,1) + print(block) + + # After applying chain_single_qubit_ops, we get: + print(chain_single_qubit_ops(block)) + ``` + """ + if is_chain_of_primitivekrons(block): + kronblocks = block.blocks # type: ignore[attr-defined] + n_blocks = len(kronblocks) + chains = [] + for qb_idx in range(block.n_qubits): + prim_gates = [] + for kron_idx in range(n_blocks): + prim_gates.append(kronblocks[kron_idx][qb_idx]) # type: ignore[index] + chains.append(chain(*prim_gates)) + try: + return kron(*chains) + except Exception as e: + logger.debug( + f"Unable to transpile {block} using chain_single_qubit_ops\ + due to {e}. Returning original circuit." + ) + return block + + elif isinstance(block, CompositeBlock): + return _construct(type(block), tuple(chain_single_qubit_ops(b) for b in block.blocks)) + else: + return block diff --git a/qadence/transpile/circuit.py b/qadence/transpile/circuit.py new file mode 100644 index 000000000..1d555df53 --- /dev/null +++ b/qadence/transpile/circuit.py @@ -0,0 +1,23 @@ +from __future__ import annotations + +from typing import List + +from qadence.blocks.utils import chain +from qadence.circuit import QuantumCircuit +from qadence.operations import I + + +def reverse_program(circuit: QuantumCircuit, inplace: bool = True) -> QuantumCircuit: + raise (NotImplementedError("Must also implement at Block level")) + + +def parametric_shift_rule(circuit: QuantumCircuit) -> List[QuantumCircuit]: + raise NotImplementedError + + +def fill_identities(circ: QuantumCircuit) -> QuantumCircuit: + empty_wires = set(range(circ.n_qubits)) - set(circ.block.qubit_support) + if len(empty_wires) > 0: + ids = chain(I(i) for i in empty_wires) + return QuantumCircuit(circ.n_qubits, chain(circ.block, ids)) + return circ diff --git a/qadence/transpile/digitalize.py b/qadence/transpile/digitalize.py new file mode 100644 index 000000000..6f0bc17c1 --- /dev/null +++ b/qadence/transpile/digitalize.py @@ -0,0 +1,44 @@ +from __future__ import annotations + +from functools import singledispatch +from typing import overload + +from qadence import HamEvo, QuantumCircuit +from qadence.blocks import AbstractBlock, CompositeBlock +from qadence.blocks.utils import _construct +from qadence.operations import U +from qadence.types import LTSOrder + + +@overload +def digitalize(circuit: QuantumCircuit, approximation: LTSOrder = LTSOrder.BASIC) -> QuantumCircuit: + ... + + +@overload +def digitalize(block: AbstractBlock, approximation: LTSOrder = LTSOrder.BASIC) -> AbstractBlock: + ... + + +@singledispatch +def digitalize( + circ_or_block: AbstractBlock | QuantumCircuit, approximation: LTSOrder +) -> AbstractBlock | QuantumCircuit: + raise NotImplementedError(f"digitalize is not implemented for {type(circ_or_block)}") + + +@digitalize.register # type: ignore[attr-defined] +def _(block: AbstractBlock, approximation: LTSOrder = LTSOrder.BASIC) -> AbstractBlock: + if isinstance(block, CompositeBlock): + return _construct(type(block), tuple(digitalize(b, approximation) for b in block.blocks)) + elif isinstance(block, HamEvo): + return block.digital_decomposition(approximation=approximation) + elif isinstance(block, U): + return block.digital_decomposition() + else: + return block + + +@digitalize.register # type: ignore[attr-defined] +def _(circuit: QuantumCircuit, approximation: LTSOrder = LTSOrder.BASIC) -> QuantumCircuit: + return QuantumCircuit(circuit.n_qubits, digitalize(circuit.block, approximation)) diff --git a/qadence/transpile/emulate.py b/qadence/transpile/emulate.py new file mode 100644 index 000000000..9d54393f1 --- /dev/null +++ b/qadence/transpile/emulate.py @@ -0,0 +1,297 @@ +from __future__ import annotations + +from copy import deepcopy +from functools import singledispatch +from itertools import product +from math import dist as euclidean_distance +from typing import Any, Callable, Union, overload + +import torch +from sympy import cos, sin + +from qadence.blocks.abstract import AbstractBlock +from qadence.blocks.analog import ( + AnalogBlock, + AnalogChain, + AnalogKron, + ConstantAnalogRotation, + Interaction, + WaitBlock, +) +from qadence.blocks.composite import CompositeBlock +from qadence.blocks.primitive import PrimitiveBlock, ScaleBlock +from qadence.blocks.utils import _construct +from qadence.circuit import QuantumCircuit +from qadence.operations import HamEvo, I, N, X, Y, add, chain, kron, wait +from qadence.qubit_support import QubitSupport +from qadence.register import Register +from qadence.transpile.transpile import blockfn_to_circfn + +C6_DICT = { + 50: 96120.72, + 51: 122241.6, + 52: 154693.02, + 53: 194740.36, + 54: 243973.91, + 55: 304495.01, + 56: 378305.98, + 57: 468027.05, + 58: 576714.85, + 59: 707911.38, + 60: 865723.02, + 61: 1054903.11, + 62: 1281042.11, + 63: 1550531.15, + 64: 1870621.31, + 65: 2249728.57, + 66: 2697498.69, + 67: 3224987.51, + 68: 3844734.37, + 69: 4571053.32, + 70: 5420158.53, + 71: 6410399.4, + 72: 7562637.31, + 73: 8900342.14, + 74: 10449989.62, + 75: 12241414.53, + 76: 14308028.03, + 77: 16687329.94, + 78: 19421333.62, + 79: 22557029.94, + 80: 26146720.74, + 81: 30248886.65, + 82: 34928448.69, + 83: 40257623.67, + 84: 46316557.88, + 85: 53194043.52, + 86: 60988354.64, + 87: 69808179.15, + 88: 79773468.88, + 89: 91016513.07, + 90: 103677784.57, + 91: 117933293.96, + 92: 133943541.9, + 93: 151907135.94, + 94: 172036137.34, + 95: 194562889.89, + 96: 219741590.56, + 97: 247850178.91, + 98: 279192193.77, + 99: 314098829.39, + 100: 352931119.11, +} + + +def _qubitposition(register: Register, i: int) -> tuple[int, int]: + (x, y) = list(register.coords.values())[i] + return (x, y) + + +def ising_interaction( + register: Register, pairs: list[tuple[int, int]], rydberg_level: int = 60 +) -> AbstractBlock: + c6 = C6_DICT[rydberg_level] + + def term(i: int, j: int) -> AbstractBlock: + qi, qj = _qubitposition(register, i), _qubitposition(register, j) + rij = euclidean_distance(qi, qj) + return (c6 / rij**6) * kron(N(i), N(j)) + + return add(term(i, j) for (i, j) in pairs) + + +def xy_interaction( + register: Register, pairs: list[tuple[int, int]], c3: float = 3700.0 +) -> AbstractBlock: + def term(i: int, j: int) -> AbstractBlock: + qi, qj = _qubitposition(register, i), _qubitposition(register, j) + rij = euclidean_distance(qi, qj) + return (c3 / rij**3) * (kron(X(i), X(j)) + kron(Y(i), Y(j))) + + return add(term(i, j) for (i, j) in pairs) + + +INTERACTIONS = {Interaction.NN: ising_interaction, Interaction.XY: xy_interaction} + + +@overload +def add_interaction(circuit: QuantumCircuit, **kwargs: Any) -> QuantumCircuit: + ... + + +@overload +def add_interaction(block: AbstractBlock, **kwargs: Any) -> AbstractBlock: + ... + + +@overload +def add_interaction(register: Register, block: AbstractBlock, **kwargs: Any) -> AbstractBlock: + ... + + +@singledispatch +def add_interaction( + x: Register | QuantumCircuit | AbstractBlock, + *args: Any, + interaction: Interaction | Callable = Interaction.NN, + spacing: float = 1.0, +) -> QuantumCircuit | AbstractBlock: + """Turns blocks or circuits into (a chain of) `HamEvo` blocks including a + chosen interaction term. + + This is a `@singledipatch`ed function which can be called in three ways: + + * With a `QuantumCircuit` which contains all necessary information: `add_interaction(circuit)` + * With a `Register` and an `AbstractBlock`: `add_interaction(reg, block)` + * With an `AbstractBlock` only: `add_interaction(block)` + + See the section about [analog blocks](/digital_analog_qc/analog-basics.md) for + detailed information about how which types of blocks are translated. + + Arguments: + x: Circuit or block to be emulated. See the examples on which argument + combinations are accepted. + interaction: Type of interaction that is added. Can also be a function that accepts a + register and a list of edges that define which qubits interact (see the examples). + spacing: All qubit coordinates are multiplied by `spacing`. + + Examples: + ```python exec="on" source="material-block" result="json" + from qadence import QuantumCircuit, AnalogRX, add_interaction + + c = QuantumCircuit(2, AnalogRX(2.0)) + e = add_interaction(c) + print(str(e.block.generator)) # markdown-exec: hide + ``` + You can also use `add_interaction` directly on a block, but you have to provide either + the `Register` or define a non-global qubit support. + ```python exec="on" source="material-block" result="json" + from qadence import AnalogRX, Register, add_interaction + + b = AnalogRX(2.0) + r = Register(1) + e = add_interaction(r, b) + print(e.generator) # markdown-exec: hide + + # or provide only the block with local qubit support + # in this case the register is created via `Register(b.n_qubits)` + e = add_interaction(AnalogRX(2.0, qubit_support=(0,))) + print(e.generator) + ``` + You can specify a custom `interaction` function which has to accept a `Register` and a list + of `edges: list[tuple[int, int]]`: + ```python exec="on" source="material-block" result="json" + from qadence import AnalogRX, Register, add_interaction + from qadence.transpile.emulate import ising_interaction + + def int_fn(r: Register, pairs: list[tuple[int, int]]) -> AbstractBlock: + # do either something completely custom + # ... + # or e.g. change the default kwargs to `ising_interaction` + return ising_interaction(r, pairs, rydberg_level=70) + + b = AnalogRX(2.0) + r = Register(1) + e = add_interaction(r, b, interaction=int_fn) + ``` + """ + raise ValueError(f"`add_interaction` is not implemented for {type(x)}") + + +@add_interaction.register # type: ignore[attr-defined] +def _(circuit: QuantumCircuit, **kwargs: Any) -> QuantumCircuit: + reg = circuit.register + return blockfn_to_circfn(lambda b: add_interaction(reg, b, **kwargs))(circuit) + + +@add_interaction.register # type: ignore[attr-defined] +def _(block: AbstractBlock, **kwargs: Any) -> AbstractBlock: + return add_interaction(Register(block.n_qubits), block, **kwargs) + + +@add_interaction.register # type: ignore[attr-defined] +def _( + register: Register, + block: AbstractBlock, + interaction: Union[Interaction, Callable] = Interaction.NN, + spacing: float = 1.0, +) -> AbstractBlock: + try: + fn = interaction if callable(interaction) else INTERACTIONS[Interaction(interaction)] + except KeyError: + raise KeyError( + "Function `add_interaction` only supports NN and XY, or a custom callable function." + ) + reg = register._scale_positions(spacing) + return _add_interaction(block, reg, fn) # type: ignore[arg-type] + + +@singledispatch +def _add_interaction(b: AbstractBlock, r: Register, interaction: Callable) -> AbstractBlock: + raise NotImplementedError(f"Cannot emulate {type(b)}") + + +@_add_interaction.register +def _(b: CompositeBlock, r: Register, i: Callable) -> AbstractBlock: + return _construct(type(b), tuple(map(lambda b: _add_interaction(b, r, i), b.blocks))) + + +@_add_interaction.register +def _(block: ScaleBlock, register: Register, interaction: Callable) -> AbstractBlock: + if isinstance(block.block, AnalogBlock): + raise NotImplementedError("Scaling emulated analog blocks is not implemented.") + return block + + +@_add_interaction.register +def _(block: PrimitiveBlock, register: Register, interaction: Callable) -> AbstractBlock: + return block + + +@_add_interaction.register +def _(block: WaitBlock, register: Register, interaction: Callable) -> AbstractBlock: + duration = block.parameters.duration + + support = tuple(range(register.n_qubits)) + assert support == block.qubit_support if not block.qubit_support.is_global else True + pairs = list(filter(lambda x: x[0] < x[1], product(support, support))) + + return HamEvo(interaction(register, pairs), duration / 1000) if len(pairs) else I(0) + + +def rot_generator(block: ConstantAnalogRotation) -> AbstractBlock: + omega = block.parameters.omega + delta = block.parameters.delta + phase = block.parameters.phase + support = block.qubit_support + + x_terms = (omega / 2) * add(cos(phase) * X(i) - sin(phase) * Y(i) for i in support) + z_terms = delta * add(N(i) for i in support) + return x_terms - z_terms # type: ignore[no-any-return] + + +@_add_interaction.register +def _(block: ConstantAnalogRotation, register: Register, interaction: Callable) -> AbstractBlock: + # convert "global" to indexed qubit suppport so that we can re-use `kron` dispatched function + b = deepcopy(block) + b.qubit_support = QubitSupport(*range(register.n_qubits)) + return _add_interaction(kron(b), register, interaction) + + +@_add_interaction.register +def _(block: AnalogKron, register: Register, interaction: Callable) -> AbstractBlock: + from qadence import block_to_tensor + + w_block = wait(duration=block.duration, qubit_support=block.qubit_support) + i_terms = add_interaction(register, w_block, interaction=interaction) + + generator = add(rot_generator(b) for b in block.blocks if isinstance(b, ConstantAnalogRotation)) + generator = generator if i_terms == I(0) else generator + i_terms.generator # type: ignore[attr-defined] # noqa: E501 + + norm = torch.norm(block_to_tensor(generator)).item() + return HamEvo(generator / norm, norm * block.duration / 1000) + + +@_add_interaction.register +def _(block: AnalogChain, register: Register, interaction: Callable) -> AbstractBlock: + return chain(add_interaction(register, b, interaction=interaction) for b in block.blocks) diff --git a/qadence/transpile/invert.py b/qadence/transpile/invert.py new file mode 100644 index 000000000..20140dcc8 --- /dev/null +++ b/qadence/transpile/invert.py @@ -0,0 +1,152 @@ +from __future__ import annotations + +from collections import Counter +from copy import deepcopy +from functools import singledispatch +from typing import Any, overload + +import numpy as np +from torch import Tensor, tensor + +from qadence import QuantumCircuit +from qadence.blocks import AbstractBlock + + +def reassign(block: AbstractBlock, qubit_map: dict[int, int]) -> AbstractBlock: + """Update the support of a given block + + Args: + block (AbstractBlock): _description_ + qubit_map (dict[int, int]): _description_ + + """ + from qadence.blocks import CompositeBlock, ControlBlock, ParametricControlBlock, ScaleBlock + from qadence.blocks.utils import _construct + + def _block_with_updated_support(block: AbstractBlock) -> AbstractBlock: + if isinstance(block, ControlBlock) or isinstance(block, ParametricControlBlock): + old_qs = block.qubit_support + new_control_block = deepcopy(block) + new_control_block._qubit_support = tuple(qubit_map[i] for i in old_qs) + (subblock,) = block.blocks + new_control_block.blocks = (reassign(subblock, qubit_map),) # type: ignore [assignment] + return new_control_block + elif isinstance(block, CompositeBlock): + subblocks = tuple(_block_with_updated_support(b) for b in block.blocks) + blk = _construct(type(block), subblocks) + blk.tag = block.tag + return blk + elif isinstance(block, ScaleBlock): + blk = deepcopy(block) # type: ignore [assignment] + blk.block = _block_with_updated_support(block.block) # type: ignore [attr-defined] + return blk + else: + blk = deepcopy(block) # type: ignore [assignment] + qs = tuple(qubit_map[i] for i in block.qubit_support) + blk._qubit_support = qs # type: ignore[attr-defined] + return blk + + return _block_with_updated_support(block) + + +@overload +def invert_endianness(wf: Tensor) -> Tensor: + ... + + +@overload +def invert_endianness(arr: np.ndarray) -> np.ndarray: + ... + + +@overload +def invert_endianness(cntr: Counter) -> Counter: + ... + + +@overload +def invert_endianness(cntrs: list) -> list: + ... + + +@overload +def invert_endianness(circuit: QuantumCircuit, n_qubits: int) -> QuantumCircuit: + ... + + +@overload +def invert_endianness(block: AbstractBlock, n_qubits: int, in_place: bool) -> AbstractBlock: + ... + + +@singledispatch +def invert_endianness( + x: QuantumCircuit | AbstractBlock | Tensor | Counter | np.ndarray, *args: Any +) -> QuantumCircuit | AbstractBlock | Tensor | Counter | np.ndarray: + """Invert the endianness of a QuantumCircuit, AbstractBlock, wave function or Counter.""" + raise NotImplementedError(f"Unable to invert endianness of object {type(x)}.") + + +@invert_endianness.register(AbstractBlock) # type: ignore[attr-defined] +def _(block: AbstractBlock, n_qubits: int = None, in_place: bool = False) -> AbstractBlock: + if n_qubits is None: + n_qubits = block.n_qubits + """Flips endianness of the block""" + if in_place: + raise NotImplementedError + bits = list(range(n_qubits)) + qubit_map = {i: j for (i, j) in zip(bits, reversed(bits))} + return reassign(block, qubit_map=qubit_map) + + +@invert_endianness.register(Tensor) # type: ignore[attr-defined] +def _(wf: Tensor) -> Tensor: + """ + Inverts the endianness of a wave function. + + Args: + wf (Tensor): the target wf as a torch Tensor of shape batch_size X 2**n_qubits + + Returns: + The inverted wave function. + """ + n_qubits = int(np.log2(wf.shape[1])) + ls = list(range(2**n_qubits)) + permute_ind = tensor([int(f"{num:0{n_qubits}b}"[::-1], 2) for num in ls]) + return wf[:, permute_ind] + + +@invert_endianness.register(np.ndarray) # type: ignore[attr-defined] +def _(arr: np.ndarray) -> np.ndarray: + return invert_endianness(tensor(arr)).numpy() + + +@invert_endianness.register(Counter) # type: ignore[attr-defined] +def _(cntr: Counter) -> Counter: + return Counter( + { + format(int(bstring[::-1], 2), "0{}b".format(len(bstring))): count + for bstring, count in cntr.items() + } + ) + + +@invert_endianness.register(list) # type: ignore[attr-defined] +def _(cntrs: list) -> list: + return list(map(invert_endianness, cntrs)) + + +@invert_endianness.register(QuantumCircuit) # type: ignore[attr-defined] +def _(circuit: QuantumCircuit) -> QuantumCircuit: + """This method inverts a circuit "vertically" + + All gates are same but qubit indices are ordered inversely, + such that bitstrings 00111 become 11100 when measured. Handy for + big-endian <> little-endian conversion + + Returns: + QuantumCircuit with endianess switched + """ + return QuantumCircuit( + circuit.n_qubits, invert_endianness(circuit.block, circuit.n_qubits, False) + ) diff --git a/qadence/transpile/transpile.py b/qadence/transpile/transpile.py new file mode 100644 index 000000000..b41652650 --- /dev/null +++ b/qadence/transpile/transpile.py @@ -0,0 +1,86 @@ +from __future__ import annotations + +from functools import reduce +from typing import Callable, TypeVar, overload + +from qadence.blocks import AbstractBlock +from qadence.circuit import QuantumCircuit + +BlockOrCirc = TypeVar("BlockOrCirc", AbstractBlock, QuantumCircuit) + + +@overload +def transpile( + *fs: Callable[[AbstractBlock], AbstractBlock] +) -> Callable[[AbstractBlock], AbstractBlock]: + ... + + +@overload +def transpile( + *fs: Callable[[QuantumCircuit], QuantumCircuit] +) -> Callable[[QuantumCircuit], QuantumCircuit]: + ... + + +def transpile(*fs: Callable) -> Callable: + """`AbstractBlock` or `QuantumCircuit` transpilation. Compose functions that + accept a circuit/block and returns a circuit/block. + + Arguments: + *fs: composable functions that either map blocks to blocks + (`Callable[[AbstractBlock], AbstractBlock]`) + or circuits to circuits (`Callable[[QuantumCircuit], QuantumCircuit]`). + + Returns: + Composed function. + + + Examples: + + Flatten a block of nested chains and krons: + ```python exec="on" source="material-block" result="json" + from qadence import * + from qadence.transpile import transpile, flatten, scale_primitive_blocks_only + + b = chain(2 * chain(chain(X(0), Y(0))), kron(kron(X(0), X(1)))) + print(b) + print() # markdown-exec: hide + + # both flatten and scale_primitive_blocks_only are functions that accept and + # return a block + t = transpile(flatten, scale_primitive_blocks_only)(b) + print(t) + ``` + + We also proved a decorator to easily turn a function `Callable[[AbstractBlock], AbstractBlock]` + into a `Callable[[QuantumCircuit], QuantumCircuit]` to be used in circuit transpilation. + ```python exec="on" source="material-block" result="json" + from qadence import * + from qadence.transpile import transpile, blockfn_to_circfn, flatten + + # We want to pass this circuit to `transpile` instead of a block, + # so we need functions that map from a circuit to a circuit. + circ = QuantumCircuit(2, chain(chain(X(0), chain(X(1))))) + + @blockfn_to_circfn + def fn(block): + # un-decorated function accepts a block and returns a block + return block * block + + transp = transpile( + # the decorated function accepts a circuit and returns a circuit + fn, + # already existing functions can also be decorated + blockfn_to_circfn(flatten) + ) + print(transp(circ)) + ``` + """ + return lambda x: reduce(lambda acc, f: f(acc), reversed(fs), x) + + +def blockfn_to_circfn( + fn: Callable[[AbstractBlock], AbstractBlock] +) -> Callable[[QuantumCircuit], QuantumCircuit]: + return lambda circ: QuantumCircuit(circ.register, fn(circ.block)) diff --git a/qadence/types.py b/qadence/types.py new file mode 100644 index 000000000..3e9c0657b --- /dev/null +++ b/qadence/types.py @@ -0,0 +1,347 @@ +from __future__ import annotations + +import importlib +from enum import Enum +from typing import Iterable, Tuple, Union + +import numpy as np +import sympy +import torch + +TNumber = Union[int, float, complex] +"""Union of python number types.""" + +TDrawColor = Tuple[float, float, float, float] + +TParameter = Union[TNumber, torch.Tensor, sympy.Basic, str] +"""Union of numbers, tensors, and parameter types.""" + +TArray = Union[Iterable, torch.Tensor, np.ndarray] +"""Union of common array types.""" + +TGenerator = Union[torch.Tensor, sympy.Array, sympy.Basic] +"""Union of torch tensors and numpy arrays.""" + +PI = torch.pi + +# Modules to be automatically added to the qadence namespace +__all__ = [ + "Endianness", + "Strategy", + "ResultType", + "ParameterType", + "BackendName", + "StateGeneratorType", + "LTSOrder", + "TensorType", + "DiffMode", + "BackendName", + "Interaction", + "OverlapMethod", + "AlgoHEvo", + "SerializationFormat", +] # type: ignore + + +class StrEnum(str, Enum): + def __str__(self) -> str: + """Used when dumping enum fields in a schema.""" + ret: str = self.value + return ret + + @classmethod + def list(cls) -> list[str]: + return list(map(lambda c: c.value, cls)) # type: ignore + + +class Strategy(StrEnum): + """Computing paradigm.""" + + DIGITAL = "Digital" + """Use the digital paradigm.""" + ANALOG = "Analog" + """Use the analog paradigm.""" + SDAQC = "sDAQC" + """Use the step-wise digital-analog QC paradigm.""" + BDAQC = "bDAQC" + """Use the banged digital-analog QC paradigm.""" + + +class Endianness(StrEnum): + """The endianness convention to use.""" + + BIG = "Big" + """Use Big endianness.""" + LITTLE = "Little" + """Use little endianness.""" + + +class ResultType(StrEnum): + """Available data types for generating certain results.""" + + STRING = "String" + """String Type.""" + TORCH = "Torch" + """Torch Tensor Type.""" + NUMPY = "Numpy" + """Numpy Array Type.""" + + +class ParameterType(StrEnum): + """Parameter types available in qadence.""" + + FEATURE = "Feature" + """FeatureParameters act as input and are not trainable.""" + VARIATIONAL = "Variational" + """VariationalParameters are trainable.""" + FIXED = "Fixed" + """Fixed/ constant parameters are neither trainable nor act as input.""" + + +class TensorType(StrEnum): + """Tensor Types for converting blocks to tensors.""" + + SPARSEDIAGONAL = "SparseDiagonal" + """Convert a diagonal observable block to a sparse diagonal if possible.""" + DENSE = "Dense" + """Convert a block to a dense tensor.""" + SPARSE = "Sparse" + """Convert a observable block to a sparse tensor.""" + + +class LTSOrder(StrEnum): + """ + Lie-Trotter-Suzuki approximation order. + """ + + BASIC = "BASIC" + """Basic.""" + ST2 = "ST2" + """ST2.""" + ST4 = "ST4" + """ST4.""" + + +class _DiffMode(StrEnum): + """Differentiation modes to choose from.""" + + GPSR = "gpsr" + """Basic generalized parameter shift rule.""" + AD = "ad" + """Automatic Differentiation.""" + + +class QubitSupportType(StrEnum): + """Qubit support types.""" + + GLOBAL = "global" + """Use global qubit support.""" + + +class Interaction(StrEnum): + """Interaction types used in + - [`add_interaction`][qadence.transpile.emulate.add_interaction]. + - [`hamiltonian_factory`][qadence.constructors.hamiltonians.hamiltonian_factory]. + """ + + ZZ = "ZZ" + """ZZ-Ising Interaction""" + NN = "NN" + """NN-Ising Interaction, N=(I-Z)/2""" + XY = "XY" + """XY Interaction""" + XYZ = "XYZ" + """XYZ Interaction""" + + +class _BackendName(StrEnum): + """The available backends for running circuits.""" + + PYQTORCH = "pyqtorch" + """The Pyqtorch backend.""" + BRAKET = "braket" + """The Braket backend.""" + PULSER = "pulser" + """The Pulser backend.""" + + +# If proprietary qadence_extensions is available, import the +# right function since more backends are supported. +try: + module = importlib.import_module("qadence_extensions.types") + BackendName = getattr(module, "BackendName") + DiffMode = getattr(module, "DiffMode") +except ModuleNotFoundError: + BackendName = _BackendName + DiffMode = _DiffMode + + +class StateGeneratorType(StrEnum): + """Methods to generate random states.""" + + RANDOM_ROTATIONS = "RandomRotations" + """Random Rotations.""" + HAAR_MEASURE_FAST = "HaarMeasureFast" + """HaarMeasure.""" + HAAR_MEASURE_SLOW = "HaarMeasureSlow" + """HaarMeasure non-optimized version.""" + + +class SerializationFormat(StrEnum): + """Available serialization formats for circuits.""" + + PT = "PT" + """The PT format used by Torch.""" + JSON = "JSON" + """The Json format.""" + + +class OverlapMethod(StrEnum): + """Overlap Methods to choose from.""" + + EXACT = "exact" + """Exact.""" + JENSEN_SHANNON = "jensen_shannon" + """Jensen-shannon.""" + COMPUTE_UNCOMPUTE = "compute_uncompute" + """Compute-uncompute.""" + SWAP_TEST = "swap_test" + """Swap-test.""" + HADAMARD_TEST = "hadamard_test" + """Hadamard-test.""" + + +class FigFormat(StrEnum): + """Available output formats for exporting visualized circuits to a file.""" + + PNG = "PNG" + """PNG format.""" + PDF = "PDF" + """PDF format.""" + SVG = "SVG" + """SVG format.""" + + +class AlgoHEvo(StrEnum): + """Hamiltonian Evolution algorithms that can be used by the backend.""" + + RK4 = "RK4" + """4th order Runge-Kutta approximation.""" + EIG = "EIG" + """Using Hamiltonian diagonalization.""" + EXP = "EXP" + """Using torch.matrix_exp on the generator matrix.""" + + +class LatticeTopology(StrEnum): + """Lattice topologies to choose from for the register.""" + + LINE = "line" + """Line-format lattice.""" + SQUARE = "square" + """Square lattice.""" + CIRCLE = "circle" + """Circular lattice.""" + ALL_TO_ALL = "all_to_all" + """All to all- connected lattice.""" + RECTANGULAR_LATTICE = "rectangular_lattice" + """Rectangular-shaped lattice.""" + TRIANGULAR_LATTICE = "triangular_lattice" + """Triangular-shaped shape.""" + HONEYCOMB_LATTICE = "honeycomb_lattice" + """Honeycomb-shaped lattice.""" + ARBITRARY = "arbitrary" + """Arbitrarily-shaped lattice.""" + + +class GenDAQC(StrEnum): + """The type of interaction for the DAQC transform.""" + + ZZ = "ZZ" + """ZZ""" + NN = "NN" + """NN""" + + +class OpName(StrEnum): + """A list of all available of digital-analog operations.""" + + # Digital operations + X = "X" + """The X gate.""" + Y = "Y" + """The Y gate.""" + Z = "Z" + """The Z gate.""" + N = "N" + """The N = (1/2)(I-Z) operator""" + H = "H" + """The Hadamard gate.""" + I = "I" # noqa + """The Identity gate.""" + ZERO = "Zero" + """The zero gate.""" + RX = "RX" + """The RX gate.""" + RY = "RY" + """The RY gate.""" + RZ = "RZ" + """The RZ gate.""" + U = "U" + """The U gate.""" + CNOT = "CNOT" + """The CNOT gate.""" + CZ = "CZ" + """The CZ gate.""" + MCZ = "MCZ" + """The Multicontrol CZ gate.""" + HAMEVO = "HamEvo" + """The Hamiltonian Evolution operation.""" + CRX = "CRX" + """The Control RX gate.""" + MCRX = "MCRX" + """The Multicontrol RX gate.""" + CRY = "CRY" + """The Controlled RY gate.""" + MCRY = "MCRY" + """The Multicontrol RY gate.""" + CRZ = "CRZ" + """The Control RZ gate.""" + MCRZ = "MCRZ" + """The Multicontrol RZ gate.""" + CSWAP = "CSWAP" + """The Control SWAP gate.""" + T = "T" + """The T gate.""" + # FIXME: Tdagger is not currently supported by any backend + TDAGGER = "TDagger" + """The T dagger gate.""" + S = "S" + """The S gate.""" + SDAGGER = "SDagger" + """The S dagger gate.""" + SWAP = "SWAP" + """The SWAP gate.""" + PHASE = "PHASE" + """The PHASE gate.""" + CPHASE = "CPHASE" + """The controlled PHASE gate.""" + MCPHASE = "MCPHASE" + """The Multicontrol PHASE gate.""" + TOFFOLI = "Toffoli" + """The Toffoli gate.""" + # Analog operations + ANALOGENTANG = "AnalogEntanglement" + """The analog entanglement operation.""" + ANALOGRX = "AnalogRX" + """The analog RX operation.""" + ANALOGRY = "AnalogRY" + """The analog RY operation.""" + ANALOGRZ = "AnalogRZ" + """The analog RZ operation.""" + ANALOGSWAP = "AnalogSWAP" + """The analog SWAP operation.""" + ENTANG = "entangle" + """The entanglement operation.""" + WAIT = "wait" + """The wait operation.""" diff --git a/qadence/utils.py b/qadence/utils.py new file mode 100644 index 000000000..1b43a0a9f --- /dev/null +++ b/qadence/utils.py @@ -0,0 +1,213 @@ +from __future__ import annotations + +import math +import warnings +from collections import Counter +from typing import Any + +import numpy as np +import sympy +import torch +from scipy.sparse.linalg import eigs +from torch.linalg import eigvals + +from qadence.logger import get_logger +from qadence.types import Endianness, ResultType, TNumber + +# Modules to be automatically added to the qadence namespace +__all__ = [] # type: ignore + + +logger = get_logger(__name__) + + +def bitstring_to_int(bstring: str, endianness: Endianness = Endianness.BIG) -> int: + # FIXME: Remove in v1.0.0 + warnings.warn("Deprecated function bitstring_to_int. Please use basis_to_int.", FutureWarning) + return basis_to_int(bstring, endianness) + + +def basis_to_int(basis: str, endianness: Endianness = Endianness.BIG) -> int: + """ + Converts a computational basis state to an int. + + - `endianness = "Big"` reads the most significant bit in qubit 0 (leftmost). + - `endianness = "Little"` reads the least significant bit in qubit 0 (leftmost). + + Arguments: + basis (str): A computational basis state. + endianness (Endianness): The Endianness when reading the basis state. + + Returns: + The corresponding integer. + + Examples: + ```python exec="on" source="material-block" result="json" + from qadence.utils import basis_to_int, Endianness + + k = basis_to_int(basis="10", endianness=Endianness.BIG) + print(k) + ``` + """ + if endianness == Endianness.BIG: + return int(basis, 2) + else: + return int(basis[::-1], 2) + + +def int_to_basis( + k: int, n_qubits: int | None = None, endianness: Endianness = Endianness.BIG +) -> str: + """ + Converts an integer to its corresponding basis state. + + - `endianness = "Big"` stores the most significant bit in qubit 0 (leftmost). + - `endianness = "Little"` stores the least significant bit in qubit 0 (leftmost). + + Arguments: + k (int): The int to convert. + n_qubits (int): The total number of qubits in the basis state. + endianness (Endianness): The Endianness of the resulting basis state. + + Returns: + A computational basis state. + + Examples: + ```python exec="on" source="material-block" result="json" + from qadence.utils import int_to_basis, Endianness + + bs = int_to_basis(k=1, n_qubits=2, endianness=Endianness.BIG) + print(bs) + ``` + """ + if n_qubits is None: + n_qubits = int(math.log(k + 0.6) / math.log(2)) + 1 + assert k <= 2**n_qubits - 1, "k can not be larger than 2**n_qubits-1." + basis = format(k, "0{}b".format(n_qubits)) + if endianness == Endianness.BIG: + return basis + else: + return basis[::-1] + + +def nqubits_to_basis( + n_qubits: int, + result_type: ResultType = ResultType.STRING, + endianness: Endianness = Endianness.BIG, +) -> list[str] | torch.Tensor | np.array: + """ + Creates all basis states for a given number of qubits, endianness and format. + + Arguments: + n_qubits: The total number of qubits. + result_type: The data type of the resulting states. + endianness: The Endianness of the resulting states. + + Returns: + The full computational basis for n_qubits. + + Examples: + ```python exec="on" source="material-block" result="json" + from qadence.utils import nqubits_to_basis, Endianness, ResultType + basis_type = ResultType.Torch + bs = nqubits_to_basis(n_qubits=2, result_type= basis_type, endianness=Endianness.BIG) + print(bs) + ``` + """ + basis_strings = [int_to_basis(k, n_qubits, endianness) for k in range(0, 2**n_qubits)] + if result_type == ResultType.STRING: + return basis_strings + else: + basis_list = [list(map(int, tuple(basis))) for basis in basis_strings] + if result_type == ResultType.TORCH: + return torch.stack([torch.tensor(basis) for basis in basis_list]) + elif result_type == ResultType.NUMPY: + return np.stack([np.array(basis) for basis in basis_list]) + + +def samples_to_integers(samples: Counter, endianness: Endianness = Endianness.BIG) -> Counter: + """ + Converts a Counter of basis state samples to integer values + + Args: + samples (Counter({bits: counts})): basis state sample counter. + endianness (Endianness): endianness to use for conversion. + + Returns: + Counter({ints: counts}): samples converted + """ + + return Counter({basis_to_int(k, endianness): v for k, v in samples.items()}) + + +def format_number(x: float | complex, num_digits: int = 3) -> str: + if isinstance(x, int): + return f"{x}" + elif isinstance(x, float): + return f"{x:.{num_digits}f}" + elif isinstance(x, complex): + re = "" if np.isclose(x.real, 0) else f"{x.real:.{num_digits}f}" + im = "" if np.isclose(x.imag, 0) else f"{x.imag:.{num_digits}f}" + if len(re) > 0 and len(im) > 0: + return f"{re}+{im}j" + elif len(re) > 0 and len(im) == 0: + return re + elif len(re) == 0 and len(im) > 0: + return f"{im}j" + else: + return "0" + else: + raise ValueError(f"Unknown number type: {type(x)}") + + +def format_parameter(p: sympy.Basic) -> str: + def round_expr(expr: sympy.Basic, num_digits: int) -> sympy.Basic: + return expr.xreplace({n: round(n, num_digits) for n in expr.atoms(sympy.Number)}) + + return str(round_expr(p, 3)) + + +def print_sympy_expr(expr: sympy.Expr, num_digits: int = 3) -> str: + """ + Converts all numerical values in a sympy expression to + something with fewer digits for better readability. + """ + from qadence.parameters import sympy_to_numeric + + round_dict = {sympy_to_numeric(n): round(n, num_digits) for n in expr.atoms(sympy.Number)} + return str(expr.xreplace(round_dict)) + + +def isclose( + x: TNumber | Any, y: TNumber | Any, rel_tol: float = 1e-5, abs_tol: float = 1e-07 +) -> bool: + if isinstance(x, complex) or isinstance(y, complex): + return abs(x - y) <= max(rel_tol * max(abs(x), abs(y)), abs_tol) # type: ignore + + return math.isclose(x, y, rel_tol=rel_tol, abs_tol=abs_tol) + + +def eigenvalues( + x: torch.Tensor, max_num_evals: int | None = None, max_num_gaps: int | None = None +) -> torch.Tensor: + if max_num_evals and not max_num_gaps: + # get specified number of eigenvalues of generator + eigenvals, _ = eigs(x.squeeze(0).numpy(), k=max_num_evals, which="LM") + elif max_num_gaps and not max_num_evals: + # get eigenvalues of generator corresponding to specified number of spectral gaps + k = int(np.ceil(0.5 * (1 + np.sqrt(1 + 8 * max_num_gaps)))) + eigenvals, _ = eigs(x.squeeze(0).numpy(), k=k, which="LM") + else: + # get all eigenvalues of generator + eigenvals = eigvals(x) + return eigenvals + + +def _round_complex(t: torch.Tensor, decimals: int = 4) -> torch.Tensor: + def _round(_t: torch.Tensor) -> torch.Tensor: + r = _t.real.round(decimals=decimals) + i = _t.imag.round(decimals=decimals) + return torch.complex(r, i) + + fn = torch.vmap(_round) + return fn(t) diff --git a/setup.py b/setup.py new file mode 100644 index 000000000..d7e4d7d8c --- /dev/null +++ b/setup.py @@ -0,0 +1,7 @@ +#!/usr/bin/env python +from __future__ import annotations + +from setuptools import setup + +if __name__ == "__main__": + setup() diff --git a/tests/backends/braket/test_conversion.py b/tests/backends/braket/test_conversion.py new file mode 100644 index 000000000..957a7562a --- /dev/null +++ b/tests/backends/braket/test_conversion.py @@ -0,0 +1,55 @@ +from __future__ import annotations + +import pytest +from braket.circuits.gates import ( + CNot, + CPhaseShift, + Rx, + Ry, + Rz, + Swap, +) +from braket.circuits.gates import ( + H as Braket_H, +) +from braket.circuits.gates import ( + S as Braket_S, +) +from braket.circuits.gates import ( + T as Braket_T, +) +from braket.circuits.gates import ( + X as Braket_X, +) +from braket.circuits.gates import ( + Y as Braket_Y, +) +from braket.circuits.gates import ( + Z as Braket_Z, +) +from braket.circuits.instruction import Instruction + +from qadence.backends.braket.convert_ops import convert_block +from qadence.blocks import AbstractBlock +from qadence.operations import CNOT, CPHASE, RX, RY, RZ, SWAP, H, S, T, X, Y, Z + + +@pytest.mark.parametrize( + "Qadence_op, braket_op", + [ + (CNOT(0, 1), CNot.cnot(0, 1)), + (CPHASE(0, 1, 0.5), CPhaseShift.cphaseshift(0, 1, 0.5)), + (H(0), Braket_H.h(0)), + (S(0), Braket_S.s(0)), + (SWAP(0, 1), Swap.swap(0, 1)), + (T(0), Braket_T.t(0)), + (X(0), Braket_X.x(0)), + (Y(0), Braket_Y.y(0)), + (Z(0), Braket_Z.z(0)), + (RX(0, 0.5), Rx.rx(0, 0.5)), + (RY(0, 0.5), Ry.ry(0, 0.5)), + (RZ(0, 0.5), Rz.rz(0, 0.5)), + ], +) +def test_block_conversion(Qadence_op: AbstractBlock, braket_op: Instruction) -> None: + assert convert_block(Qadence_op)[0] == braket_op diff --git a/tests/backends/braket/test_quantum_braket.py b/tests/backends/braket/test_quantum_braket.py new file mode 100644 index 000000000..8a49e96e8 --- /dev/null +++ b/tests/backends/braket/test_quantum_braket.py @@ -0,0 +1,249 @@ +from __future__ import annotations + +from collections import Counter + +import numpy as np +import numpy.typing as npt +import pytest +import torch +from braket.circuits import Circuit +from torch import Tensor + +from qadence.backends import backend_factory +from qadence.backends.braket import Backend +from qadence.blocks import AbstractBlock, PrimitiveBlock +from qadence.circuit import QuantumCircuit +from qadence.constructors import ising_hamiltonian, single_z, total_magnetization +from qadence.operations import CNOT, CPHASE, RX, RY, RZ, SWAP, H, I, S, T, U, X, Y, Z, chain + + +def custom_obs() -> AbstractBlock: + return X(0) * 2.0 + X(1) * 3.0 + Z(0) + Z(1) + Y(2) * 1.5 + Y(3) * 2.5 + + +def test_register_circuit(parametric_circuit: QuantumCircuit) -> None: + backend = Backend() + conv_circ = backend.circuit(parametric_circuit) + assert isinstance(conv_circ.native, Circuit) + + +@pytest.mark.parametrize( + "observable", + [ + total_magnetization(4), + single_z(0), + single_z(1) * 3.0, + ising_hamiltonian(4, x_terms=np.array([0.1, 0.2, 0.3, 0.4])), + custom_obs(), + ], +) +def test_expectation_value(parametric_circuit: QuantumCircuit, observable: AbstractBlock) -> None: + batch_size = 1 + values = {"x": 0.5} + + bkd = backend_factory(backend="braket", diff_mode=None) + bra_circ, bra_obs, embed, params = bkd.convert(parametric_circuit, observable) + expval = bkd.expectation(bra_circ, bra_obs, embed(params, values)) + assert len(expval) == batch_size + + +def test_expectation_value_list_of_obs(parametric_circuit: QuantumCircuit) -> None: + batch_size = 1 + values = {"x": 0.5} # torch.rand(batch_size)} + observables = [ising_hamiltonian(4), total_magnetization(4), single_z(0)] + n_obs = len(observables) + + bkd = backend_factory(backend="braket", diff_mode=None) + bra_circ, bra_obs, embed, params = bkd.convert(parametric_circuit, observables) + expval = bkd.expectation(bra_circ, bra_obs, embed(params, values)) + + assert isinstance(expval, torch.Tensor) + assert expval.shape == (batch_size, n_obs) + + +@pytest.mark.parametrize( + "observable, result", + [ + ([total_magnetization(4) for _ in range(4)], torch.tensor([4.0 for _ in range(4)])), + ([Z(k) for k in range(4)], torch.tensor([1.0 for _ in range(4)])), + ], +) +def test_list_observables(observable: AbstractBlock, result: Tensor) -> None: + circuit = QuantumCircuit(4, chain(Z(k) for k in range(4))) + values = {"x": 0.5} + + bkd = backend_factory(backend="braket", diff_mode=None) + bra_circ, bra_obs, embed, params = bkd.convert(circuit, observable) + expval = bkd.expectation(bra_circ, bra_obs, embed(params, values)) + assert torch.allclose(expval, result) + + +@pytest.mark.parametrize( + "gate, state", + [ + (X(0), np.array([[0.0 + 0.0j, 1.0 + 0.0j]])), + (Y(0), np.array([[0.0 + 0.0j, 0.0 + 1.0j]])), + (Z(0), np.array([[1.0 + 0.0j, 0.0 + 0.0j]])), + (T(0), np.array([[1.0 + 0.0j, 0.0 + 0.0j]])), + (S(0), np.array([[1.0 + 0.0j, 0.0 + 0.0j]])), + (H(0), 1.0 / np.sqrt(2) * np.array([1.0, 1.0])), + (I(0), np.array([[1.0 + 0.0j, 0.0 + 0.0j]])), + ], +) +def test_run_with_nonparametric_single_qubit_gates( + gate: PrimitiveBlock, state: npt.NDArray +) -> None: + circuit = QuantumCircuit(1, gate) + backend = Backend() + wf = backend.run(backend.circuit(circuit)) + assert np.allclose(wf, state) + + +@pytest.mark.parametrize( + "parametric_gate, state", + [ + ( + RX(0, 0.5), + np.array( + [[0.9689124217106447 + 0.0j, 0.0 - 0.24740395925452294j]], dtype=np.complex128 + ), + ), + ( + RY(0, 0.5), + np.array( + [[0.9689124217106447 + 0.0j, 0.24740395925452294 + 0.0j]], dtype=np.complex128 + ), + ), + ( + RZ(0, 0.5), + np.array( + [[0.9689124217106447 - 0.24740395925452294j, 0.0 + 0.0j]], dtype=np.complex128 + ), + ), + ( + U(0, 0.25, 0.5, 0.75), + np.array( + [[0.850300645292233 - 0.464521359638928j, 0.239712769302101 + 0.061208719054814j]], + dtype=np.complex128, + ), + ), + ], +) +def test_run_with_parametric_single_qubit_gates( + parametric_gate: PrimitiveBlock, state: npt.NDArray +) -> None: + circuit = QuantumCircuit(1, parametric_gate) + backend = Backend() + wf = backend.run(backend.circuit(circuit)) + assert np.allclose(wf, state) + + +@pytest.mark.parametrize( + "parametric_gate, state", + [ + ( + CNOT(0, 1), + np.array( + [[1.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j]], + dtype=np.complex128, + ), + ), + ( + X(0) * CNOT(0, 1), + np.array( + [[0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 1.0 + 0.0j]], + dtype=np.complex128, + ), + ), + ( + H(0) * SWAP(0, 1), + np.array( + [[0.70710678 + 0.0j, 0.70710678 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j]], + dtype=np.complex128, + ), + ), + ], +) +def test_run_with_nonparametric_two_qubit_gates( + parametric_gate: PrimitiveBlock, state: npt.NDArray +) -> None: + circuit = QuantumCircuit(2, parametric_gate) + backend = Backend() + wf = backend.run(backend.circuit(circuit)) + assert np.allclose(wf, state) + + +@pytest.mark.parametrize( + "parametric_gate, state", + [ + ( + (X(0) @ X(1)) * CPHASE(0, 1, 0.5), + np.array( + [[0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.87758256 + 0.47942554j]], + dtype=np.complex128, + ), + ), + ], +) +def test_run_with_parametric_two_qubit_gates( + parametric_gate: PrimitiveBlock, state: npt.NDArray +) -> None: + circuit = QuantumCircuit(2, parametric_gate) + backend = Backend() + wf = backend.run(backend.circuit(circuit)) + assert np.allclose(wf, state) + + +@pytest.mark.parametrize( + "gate, state", + [ + ( + H(0), + np.array([0.0], dtype=np.float64), + ), + ( + X(0), + np.array([-1.0], dtype=np.float64), + ), + ( + Y(0), + np.array([-1.0], dtype=np.float64), + ), + ( + Z(0), + np.array([1.0], dtype=np.float64), + ), + ], +) +def test_expectation_with_pauli_gates(gate: PrimitiveBlock, state: npt.NDArray) -> None: + circuit = QuantumCircuit(1, gate) + observable = Z(0) + backend = Backend() + bra_circ, bra_obs, _, _ = backend.convert(circuit, observable) + expectation_value = backend.expectation(bra_circ, bra_obs) + assert np.isclose(expectation_value, state) + + +@pytest.mark.flaky(max_runs=5) +def test_sample_with_hadamard_gate() -> None: + gate = H(0) + circuit = QuantumCircuit(1, gate) + backend = Backend() + sample = backend.sample(backend.circuit(circuit), n_shots=10)[0] + assert 4 <= sample["0"] <= 6 + assert 4 <= sample["1"] <= 6 + + +@pytest.mark.parametrize( + "gate, state", + [ + (X(0), np.array([[["1"], ["1"], ["1"], ["1"], ["1"], ["1"], ["1"], ["1"], ["1"], ["1"]]])), + (Y(0), np.array([[["1"], ["1"], ["1"], ["1"], ["1"], ["1"], ["1"], ["1"], ["1"], ["1"]]])), + (Z(0), np.array([[["0"], ["0"], ["0"], ["0"], ["0"], ["0"], ["0"], ["0"], ["0"], ["0"]]])), + ], +) +def test_sample_with_pauli_gates(gate: PrimitiveBlock, state: npt.NDArray) -> None: + circuit = QuantumCircuit(1, gate) + backend = Backend() + sample = backend.sample(backend.circuit(circuit), n_shots=10)[0] + assert sample == Counter(state.flatten()) diff --git a/tests/backends/pulser_basic/test_configuration.py b/tests/backends/pulser_basic/test_configuration.py new file mode 100644 index 000000000..a526dac06 --- /dev/null +++ b/tests/backends/pulser_basic/test_configuration.py @@ -0,0 +1,74 @@ +from __future__ import annotations + +import numpy as np +import pytest +import torch +from pulser_simulation.simconfig import SimConfig + +from qadence import QuantumCircuit +from qadence.backends.pulser import Backend +from qadence.blocks import chain +from qadence.divergences import js_divergence +from qadence.operations import RY, entangle +from qadence.register import Register + +SEED = 42 + + +def test_configuration() -> None: + torch.manual_seed(SEED) + np.random.seed(SEED) + + blocks = chain(entangle(892, qubit_support=(0, 1)), RY(0, torch.pi / 2)) + register = Register(2) + circuit = QuantumCircuit(register, blocks) + + # first try the standard execution with default configuration + backend1 = Backend() + seq1 = backend1.circuit(circuit) + sample1 = backend1.sample(seq1, n_shots=500)[0] + + # then add some noise and a different sampling rate + sim_config = SimConfig(noise=("SPAM",), runs=10, eta=0.5) + sampling_rate = 0.1 + + # standard configuration method using default configuration class + conf = Backend.default_configuration() + conf.sim_config = sim_config + conf.sampling_rate = sampling_rate + backend2 = Backend(config=conf) + seq2 = backend2.circuit(circuit) + sample2 = backend2.sample(seq2, n_shots=500)[0] + + div = js_divergence(sample1, sample2) + assert not np.isclose(div, 0.0, rtol=1e-2, atol=1e-2) + + +def test_configuration_as_dict() -> None: + torch.manual_seed(SEED) + np.random.seed(SEED) + + blocks = chain(entangle(892, qubit_support=(0, 1)), RY(0, torch.pi / 2)) + register = Register(2) + circuit = QuantumCircuit(register, blocks) + + # first try the standard execution with default configuration + backend1 = Backend() + seq1 = backend1.circuit(circuit) + sample1 = backend1.sample(seq1, n_shots=500)[0] + + # then add some noise and a different sampling rate + sim_config = SimConfig(noise=("SPAM",), runs=10, eta=0.5) + sampling_rate = 0.1 + + conf = {"sim_config": sim_config, "sampling_rate": sampling_rate} + backend2 = Backend(config=conf) # type: ignore[arg-type] + seq2 = backend2.circuit(circuit) + sample2 = backend2.sample(seq2, n_shots=500)[0] + + div = js_divergence(sample1, sample2) + assert not np.isclose(div, 0.0, rtol=1e-2, atol=1e-2) + + wrong_conf = {"wrong": "value"} + with pytest.raises(ValueError): + backend3 = Backend(config=wrong_conf) # type: ignore[arg-type] diff --git a/tests/backends/pulser_basic/test_differentiation.py b/tests/backends/pulser_basic/test_differentiation.py new file mode 100644 index 000000000..b44546406 --- /dev/null +++ b/tests/backends/pulser_basic/test_differentiation.py @@ -0,0 +1,88 @@ +from __future__ import annotations + +import numpy as np +import pytest +import torch +from metrics import PULSER_GPSR_ACCEPTANCE + +from qadence import DifferentiableBackend, DiffMode, Parameter, QuantumCircuit, add_interaction +from qadence.backends.pulser import Backend as PulserBackend +from qadence.backends.pyqtorch import Backend as PyQBackend +from qadence.blocks import chain +from qadence.constructors import total_magnetization +from qadence.operations import RX, RY, AnalogRot, AnalogRX, wait + + +def circuit(circ_id: int) -> QuantumCircuit: + """Helper function to make an example circuit""" + + x = Parameter("x", trainable=False) + + if circ_id == 1: + block = chain(RX(0, x), RY(1, x)) + elif circ_id == 2: + block = chain(AnalogRot(duration=1000 * x / 3.0, omega=3.0)) + if circ_id == 3: + block = chain(AnalogRX(x)) + elif circ_id == 4: + block = chain( + AnalogRX(np.pi / 2), + AnalogRot(duration=1000 * x / 3.0, omega=4.0, delta=3.0), + wait(500), + AnalogRX(np.pi / 2), + ) + + circ = QuantumCircuit(2, block) + + return circ + + +@pytest.mark.slow +@pytest.mark.parametrize( + "circ_id", + [1, 2, 3, 4], +) +def test_pulser_gpsr(circ_id: int) -> None: + torch.manual_seed(42) + np.random.seed(42) + + if circ_id == 1: + spacing = 30.0 + else: + spacing = 8.0 + + # define circuits + circ = circuit(circ_id) + circ_pyq = add_interaction(circ, spacing=spacing) + + # create input values + xs = torch.linspace(1, 2 * np.pi, 30, requires_grad=True) + values = {"x": xs} + + obs = total_magnetization(2) + + # run with pyq backend + pyq_backend = PyQBackend() + conv = pyq_backend.convert(circ_pyq, obs) + pyq_circ, pyq_obs, embedding_fn, params = conv + diff_backend = DifferentiableBackend(pyq_backend, diff_mode=DiffMode.AD) + expval_pyq = diff_backend.expectation(pyq_circ, pyq_obs, embedding_fn(params, values)) + dexpval_x_pyq = torch.autograd.grad( + expval_pyq, values["x"], torch.ones_like(expval_pyq), create_graph=True + )[0] + + # run with pulser backend + pulser_backend = PulserBackend(config={"spacing": spacing}) # type: ignore[arg-type] + conv = pulser_backend.convert(circ, obs) + pulser_circ, pulser_obs, embedding_fn, params = conv + diff_backend = DifferentiableBackend(pulser_backend, diff_mode=DiffMode.GPSR, shift_prefac=0.2) + expval_pulser = diff_backend.expectation(pulser_circ, pulser_obs, embedding_fn(params, values)) + dexpval_x_pulser = torch.autograd.grad( + expval_pulser, values["x"], torch.ones_like(expval_pulser), create_graph=True + )[0] + + # acceptance is checked by calculating mean absolute deviation between every derivative value + # obtained with pyq and pulser backends + assert ( + torch.mean(torch.abs(dexpval_x_pyq - dexpval_x_pulser)).item() < PULSER_GPSR_ACCEPTANCE + ), "df/dx not equal." diff --git a/tests/backends/pulser_basic/test_entanglement.py b/tests/backends/pulser_basic/test_entanglement.py new file mode 100644 index 000000000..d955a04fd --- /dev/null +++ b/tests/backends/pulser_basic/test_entanglement.py @@ -0,0 +1,32 @@ +from __future__ import annotations + +from collections import Counter + +import pytest +import torch +from metrics import JS_ACCEPTANCE + +from qadence import sample +from qadence.backend import BackendName +from qadence.backends.pulser import Device +from qadence.blocks import AbstractBlock, chain +from qadence.divergences import js_divergence +from qadence.operations import RY, entangle +from qadence.register import Register + + +@pytest.mark.parametrize( + "blocks,register,goal", + [ + # Bell state + ( + chain(entangle(1000, qubit_support=(0, 1)), RY(0, 3 * torch.pi / 2)), + Register(2), + Counter({"00": 250, "11": 250}), + ), + ], +) +def test_entanglement(blocks: AbstractBlock, register: Register, goal: Counter) -> None: + config = {"device_type": Device.REALISTIC} + res = sample(register, blocks, backend=BackendName.PULSER, n_shots=500, configuration=config)[0] + assert js_divergence(res, goal) < JS_ACCEPTANCE diff --git a/tests/backends/pulser_basic/test_pulser_conversion.py b/tests/backends/pulser_basic/test_pulser_conversion.py new file mode 100644 index 000000000..18d3c94a8 --- /dev/null +++ b/tests/backends/pulser_basic/test_pulser_conversion.py @@ -0,0 +1,88 @@ +from __future__ import annotations + +from typing import Callable + +import numpy as np +import pytest +from metrics import JS_ACCEPTANCE +from pulser.register.register import Register as PulserRegister +from pulser.sequence.sequence import Sequence +from pulser_simulation.simulation import QutipEmulator + +from qadence.backends.pulser.backend import make_sequence +from qadence.backends.pulser.config import Configuration +from qadence.backends.pulser.devices import Device, RealisticDevice +from qadence.backends.pulser.pulses import digital_rot_pulse, entangle_pulse +from qadence.blocks import AbstractBlock +from qadence.blocks.analog import Interaction +from qadence.circuit import QuantumCircuit +from qadence.divergences import js_divergence +from qadence.operations import RX, RY, entangle +from qadence.register import Register as QadenceRegister + + +@pytest.mark.parametrize( + "Qadence_op, func", + [ + (RX(0, 1.5), lambda ch: digital_rot_pulse(1.5, 0, ch)), + (RY(1, 1.5), lambda ch: digital_rot_pulse(1.5, np.pi / 2, ch)), + ], +) +def test_single_qubit_block_conversion(Qadence_op: AbstractBlock, func: Callable) -> None: + spacing = 10 + n_qubits = 2 + reg = QadenceRegister(n_qubits) + circ = QuantumCircuit(reg, Qadence_op) + config = Configuration(spacing=spacing, device_type=Device.REALISTIC) + + seq1 = make_sequence(circ, config) + sim1 = QutipEmulator.from_sequence(seq1) + res1 = sim1.run() + sample1 = res1.sample_final_state(500) + + reg = PulserRegister.rectangle(1, n_qubits, spacing=spacing) + seq2 = Sequence(reg, RealisticDevice) + seq2.declare_channel("local", "rydberg_local") + seq2.target(Qadence_op.qubit_support, "local") + pulse = func(seq2.device.channels["rydberg_local"]) + seq2.add(pulse, "local") + sim2 = QutipEmulator.from_sequence(seq2) + res2 = sim2.run() + sample2 = res2.sample_final_state(500) + assert js_divergence(sample1, sample2) < JS_ACCEPTANCE + + +@pytest.mark.parametrize( + "Qadence_op, func", + [ + (entangle(500), lambda ch: entangle_pulse(500, ch)), + ], +) +def test_multiple_qubit_block_conversion(Qadence_op: AbstractBlock, func: Callable) -> None: + spacing = 10 + reg = QadenceRegister(2) + circ = QuantumCircuit(reg, Qadence_op) + config = Configuration(spacing=spacing) + + seq1 = make_sequence(circ, config) + sim1 = QutipEmulator.from_sequence(seq1) + res1 = sim1.run() + sample1 = res1.sample_final_state(500) + + reg = PulserRegister.rectangle(1, 2, spacing=spacing) + seq2 = Sequence(reg, RealisticDevice) + seq2.declare_channel("global", "rydberg_global") + seq2.add(func(seq2.device.channels["rydberg_global"]), "global") + sim2 = QutipEmulator.from_sequence(seq2) + res2 = sim2.run() + sample2 = res2.sample_final_state(500) + + assert js_divergence(sample1, sample2) < JS_ACCEPTANCE + + +def test_interaction() -> None: + with pytest.raises(ValueError, match="Pulser does not support other interactions than 'NN'"): + reg = QadenceRegister(2) + circ = QuantumCircuit(reg, entangle(100)) + config = Configuration(spacing=10, interaction=Interaction.XY) + make_sequence(circ, config) diff --git a/tests/backends/pulser_basic/test_quantum_pulser.py b/tests/backends/pulser_basic/test_quantum_pulser.py new file mode 100644 index 000000000..aa6ed5cc1 --- /dev/null +++ b/tests/backends/pulser_basic/test_quantum_pulser.py @@ -0,0 +1,50 @@ +from __future__ import annotations + +import pytest +import torch + +from qadence import ( + RX, + BackendName, + FeatureParameter, + QuantumCircuit, + VariationalParameter, + backend_factory, + kron, + total_magnetization, +) + + +@pytest.fixture +def batched_circuit() -> QuantumCircuit: + n_qubits = 3 + phi = FeatureParameter("phi") + theta = VariationalParameter("theta") + + block = kron(RX(0, phi), RX(1, theta), RX(2, torch.pi)) + return QuantumCircuit(n_qubits, block) + + +def test_expectation_batched(batched_circuit: QuantumCircuit) -> None: + batch_size = 3 + values = {"phi": torch.tensor([torch.pi / 5, torch.pi / 4, torch.pi / 3])} + observables = [ + total_magnetization(batched_circuit.n_qubits), + 2 * total_magnetization(batched_circuit.n_qubits), + ] + + backend = backend_factory(backend=BackendName.PULSER, diff_mode=None) + circ, obs, embed, params = backend.convert(batched_circuit, observable=observables) + expval = backend.expectation(circ, observable=obs, param_values=embed(params, values)) + assert expval.shape == (batch_size, len(observables)) + + +def test_run_batched(batched_circuit: QuantumCircuit) -> None: + batch_size = 3 + values = {"phi": torch.tensor([torch.pi / 5, torch.pi / 4, torch.pi / 3])} + + backend = backend_factory(backend=BackendName.PULSER, diff_mode=None) + circ, _, embed, params = backend.convert(batched_circuit) + wf = backend.run(circ, param_values=embed(params, values)) + + assert wf.shape == (batch_size, 2**batched_circuit.n_qubits) diff --git a/tests/backends/pulser_basic/test_rotations.py b/tests/backends/pulser_basic/test_rotations.py new file mode 100644 index 000000000..05dc13d83 --- /dev/null +++ b/tests/backends/pulser_basic/test_rotations.py @@ -0,0 +1,61 @@ +from __future__ import annotations + +from collections import Counter + +import pytest +import torch +from metrics import JS_ACCEPTANCE + +from qadence import BackendName, QuantumCircuit, QuantumModel +from qadence.blocks import AbstractBlock, chain +from qadence.divergences import js_divergence +from qadence.operations import RX, RY, AnalogRX, AnalogRY +from qadence.register import Register +from qadence.types import DiffMode + + +@pytest.mark.parametrize( + "block,goal", + [ + (RY(0, -torch.pi / 2), Counter({"00": 260, "10": 240})), + (RY(1, -torch.pi / 2), Counter({"00": 260, "01": 240})), + (RX(0, -torch.pi / 2), Counter({"00": 260, "10": 240})), + (RX(1, -torch.pi / 2), Counter({"00": 260, "01": 240})), + ], +) +def test_single_rotation(block: AbstractBlock, goal: Counter) -> None: + register = Register.from_coordinates([(-0.5, 0), (0.5, 0)], lattice="line") + circuit = QuantumCircuit(register, block) + model_pulser = QuantumModel( + circuit=circuit, backend=BackendName.PULSER, diff_mode=DiffMode.GPSR + ) + sample_pulser = model_pulser.sample(n_shots=500)[0] + + assert js_divergence(sample_pulser, goal) < JS_ACCEPTANCE + + +@pytest.mark.parametrize( + "single_rotation,global_rotation", + [ + (chain(RY(0, -torch.pi / 2), RY(1, -torch.pi / 2)), AnalogRY(-torch.pi / 2)), + (chain(RX(0, -torch.pi / 2), RX(1, -torch.pi / 2)), AnalogRX(-torch.pi / 2)), + ], +) +def test_single_rotation_multiple_qubits( + single_rotation: AbstractBlock, global_rotation: AbstractBlock +) -> None: + register = Register.from_coordinates([(-0.5, 0), (0.5, 0)], lattice="line") + + circuit1 = QuantumCircuit(register, single_rotation) + model_pulser1 = QuantumModel( + circuit=circuit1, backend=BackendName.PULSER, diff_mode=DiffMode.GPSR + ) + sample1 = model_pulser1.sample(n_shots=500)[0] + + circuit2 = QuantumCircuit(register, global_rotation) + model_pulser2 = QuantumModel( + circuit=circuit2, backend=BackendName.PULSER, diff_mode=DiffMode.GPSR + ) + sample2 = model_pulser2.sample(n_shots=500)[0] + + assert js_divergence(sample1, sample2) < JS_ACCEPTANCE diff --git a/tests/backends/pyq/test_analog_emulation.py b/tests/backends/pyq/test_analog_emulation.py new file mode 100644 index 000000000..67611b717 --- /dev/null +++ b/tests/backends/pyq/test_analog_emulation.py @@ -0,0 +1,114 @@ +from __future__ import annotations + +from collections import Counter +from typing import Any, Callable + +import pytest +import torch +from metrics import JS_ACCEPTANCE + +from qadence import QuantumCircuit, QuantumModel, run, sample +from qadence.blocks.abstract import AbstractBlock +from qadence.blocks.analog import AnalogBlock +from qadence.operations import ( + RX, + RY, + RZ, + AnalogRot, + AnalogRX, + AnalogRY, + AnalogRZ, + HamEvo, + I, + chain, + kron, + wait, +) +from qadence.overlap import js_divergence +from qadence.register import Register +from qadence.states import equivalent_state, random_state +from qadence.transpile import add_interaction + + +def layer(Op: Any, n_qubits: int, angle: float) -> AbstractBlock: + return kron(Op(i, angle) for i in range(n_qubits)) + + +d = 3.75 + + +@pytest.mark.parametrize( + "analog, digital_fn", + [ + # FIXME: I commented this test because it was still running + # and failing despite the pytest.mark.xfail. + # pytest.param( # enable with next pulser release + # wait(duration=1), lambda n: I(n), marks=pytest.mark.xfail + # ), + (AnalogRX(angle=torch.pi), lambda n: layer(RX, n, torch.pi)), + (AnalogRY(angle=torch.pi), lambda n: layer(RY, n, torch.pi)), + (AnalogRZ(angle=torch.pi), lambda n: layer(RZ, n, torch.pi)), + ], +) +@pytest.mark.parametrize( + "register", + [ + Register.from_coordinates([(0, 0)]), + Register.from_coordinates([(-d, 0), (d, 0)]), + Register.from_coordinates([(-d, 0), (d, 0), (0, d)]), + Register.from_coordinates([(-d, 0), (d, 0), (0, d), (0, -d)]), + Register.from_coordinates([(-d, 0), (d, 0), (0, d), (0, -d), (0, 0)]), + Register.from_coordinates([(-d, 0), (d, 0), (0, d), (0, -d), (0, 0), (d, d)]), + ], +) +def test_far_add_interaction(analog: AnalogBlock, digital_fn: Callable, register: Register) -> None: + emu_block = add_interaction(register, analog, spacing=8.0) + emu_samples = sample(register, emu_block, backend="pyqtorch")[0] # type: ignore[arg-type] + pulser_samples = sample(register, analog, backend="pulser")[0] # type: ignore[arg-type] + assert js_divergence(pulser_samples, emu_samples) < JS_ACCEPTANCE + + wf = random_state(register.n_qubits) + digital = digital_fn(register.n_qubits) + emu_state = run(register, emu_block, state=wf) + dig_state = run(register, digital, state=wf) + assert equivalent_state(emu_state, dig_state, atol=1e-3) + + +@pytest.mark.parametrize( + "block", + [ + AnalogRX(angle=torch.pi), + AnalogRY(angle=torch.pi), + chain(wait(duration=2000), AnalogRX(angle=torch.pi)), + chain( + AnalogRot(duration=1000, omega=1.0, delta=0.0, phase=0), + AnalogRot(duration=1000, omega=0.0, delta=1.0, phase=0), + ), + kron(AnalogRX(torch.pi, qubit_support=(0, 1)), wait(1000, qubit_support=(2, 3))), + ], +) +@pytest.mark.parametrize("register", [Register.from_coordinates([(0, 5), (5, 5), (5, 0), (0, 0)])]) +@pytest.mark.flaky(max_runs=5) +def test_close_add_interaction(block: AnalogBlock, register: Register) -> None: + pulser_samples = sample(register, block, backend="pulser", n_shots=1000)[0] # type: ignore[arg-type] # noqa: E501 + emu_block = add_interaction(register, block, spacing=8.0) + pyqtorch_samples = sample(register, emu_block, backend="pyqtorch", n_shots=1000)[0] # type: ignore[arg-type] # noqa: E501 + assert js_divergence(pulser_samples, pyqtorch_samples) < JS_ACCEPTANCE + + +def test_mixing_digital_analog() -> None: + from qadence import X, chain, kron + + b = chain(kron(X(0), X(1)), AnalogRX(torch.pi)) + r = Register.from_coordinates([(0, 10), (0, -10)]) + + assert js_divergence(sample(r, b)[0], Counter({"00": 100})) < JS_ACCEPTANCE + + +def test_custom_interaction_function() -> None: + circuit = QuantumCircuit(2, wait(duration=100)) + emulated = add_interaction(circuit, interaction=lambda reg, pairs: I(0)) + assert emulated.block == HamEvo(I(0), 100 / 1000) + + m = QuantumModel(circuit, configuration={"interaction": lambda reg, pairs: I(0)}) + assert m._circuit.abstract.block == HamEvo(I(0), 100 / 1000) diff --git a/tests/backends/pyq/test_quantum_pyq.py b/tests/backends/pyq/test_quantum_pyq.py new file mode 100644 index 000000000..752815d52 --- /dev/null +++ b/tests/backends/pyq/test_quantum_pyq.py @@ -0,0 +1,846 @@ +from __future__ import annotations + +import random +from collections import Counter +from itertools import count, product +from typing import Any, Callable + +import numpy as np +import pytest +import strategies as st +import torch +from hypothesis import given, settings +from pyqtorch.core.circuit import QuantumCircuit as PyQQuantumCircuit +from sympy import acos +from torch import Tensor + +from qadence import BackendName, DiffMode +from qadence.backends import backend_factory +from qadence.backends.pyqtorch.backend import Backend +from qadence.backends.pyqtorch.config import Configuration as PyqConfig +from qadence.blocks import ( + AbstractBlock, + PrimitiveBlock, + chain, + kron, +) +from qadence.circuit import QuantumCircuit +from qadence.constructors import ( + chebyshev_feature_map, + hea, + ising_hamiltonian, + total_magnetization, + zz_hamiltonian, +) +from qadence.ml_tools import TrainConfig, train_with_grad +from qadence.models import QuantumModel +from qadence.operations import ( + CNOT, + CPHASE, + CRX, + CRY, + CRZ, + RX, + RY, + RZ, + SWAP, + AnalogSWAP, + H, + HamEvo, + I, + S, + T, + U, + X, + Y, + Z, +) +from qadence.parameters import FeatureParameter, Parameter +from qadence.transpile import set_trainable + + +def custom_obs() -> AbstractBlock: + return X(0) * 2.0 + X(1) * 3.0 + Z(0) + Z(1) + Y(2) * 1.5 + Y(3) * 2.5 + + +def test_register_circuit(parametric_circuit: QuantumCircuit) -> None: + backend = Backend() + conv_circ = backend.circuit(parametric_circuit) + assert len(conv_circ.native.operations) > 0 + + +def wf_is_normalized(wf: torch.Tensor) -> torch.Tensor: + return torch.isclose(sum(torch.flatten(torch.abs(wf) ** 2)), torch.tensor(1.00)) + + +@pytest.mark.parametrize( + "observable", + [ + total_magnetization(4), + # single_z(0), # FIXME: enable those again + # single_z(1) * 3.0, + ising_hamiltonian(4, x_terms=np.array([0.1, 0.2, 0.3, 0.4])), + custom_obs(), + ], +) +def test_expectation_value(parametric_circuit: QuantumCircuit, observable: AbstractBlock) -> None: + # TODO: refactor parametric_circuit fixture + circuit = QuantumCircuit(parametric_circuit.n_qubits, parametric_circuit.block) + + batch_size = 10 + values = {"x": torch.rand(batch_size)} + + bkd = Backend() + pyqtorch_circ, pyqtorch_obs, embed, params = bkd.convert(parametric_circuit, observable) + expval = bkd.expectation(pyqtorch_circ, pyqtorch_obs, embed(params, values)) + assert len(expval) == batch_size + + +@pytest.mark.parametrize( + "observable, result", + [ + ([total_magnetization(4) for _ in range(4)], torch.tensor([4.0 for _ in range(4)])), + ([Z(k) for k in range(4)], torch.tensor([1.0 for _ in range(4)])), + ], +) +def test_list_observables(observable: AbstractBlock, result: Tensor) -> None: + circuit = QuantumCircuit(4, chain(Z(k) for k in range(4))) + + bkd = backend_factory(backend="pyqtorch", diff_mode=None) + bra_circ, bra_obs, embed, params = bkd.convert(circuit, observable) + expval = bkd.expectation(bra_circ, bra_obs, embed(params, {})) + assert torch.allclose(expval, result) + + +@pytest.mark.parametrize("n_obs, loop_expectation", product([1, 2, 3], [True, False])) +def test_list_observables_with_batches(n_obs: int, loop_expectation: bool) -> None: + n_qubits = 4 + x = FeatureParameter("x") + circuit = QuantumCircuit(n_qubits, kron(RX(k, x) for k in range(n_qubits))) + + observables = [] + for i in range(3): + o = float(i + 1) * ising_hamiltonian(4) + observables.append(o) + + observables = observables[:n_obs] + batch_size = 10 + values = {"x": torch.rand(batch_size)} + + model = QuantumModel(circuit, observables, configuration={"loop_expectation": loop_expectation}) + expval = model.expectation(values) + + assert len(expval.shape) == 2 and expval.shape[0] == batch_size and expval.shape[1] == n_obs + factors = torch.linspace(1, n_obs, n_obs) + for i, e in enumerate(expval): + tmp = torch.div(e, factors * e[0]) + assert torch.allclose(tmp, torch.ones(n_obs)) + + +@pytest.mark.parametrize("n_shots", [5, 10, 100, 1000, 10000]) +def test_sample(parametric_circuit: QuantumCircuit, n_shots: int) -> None: + batch_size = 10 + values = {"x": torch.rand(batch_size)} + + bkd = Backend() + pyqtorch_circ, _, embed, params = bkd.convert(parametric_circuit) + samples = bkd.sample(pyqtorch_circ, embed(params, values), n_shots=n_shots) + assert len(samples) == batch_size # type: ignore[arg-type] + assert all(sum(s.values()) == n_shots for s in samples) + + +@pytest.mark.xfail(reason="Removed params from native_circuit") +def test_native_circuit(parametric_circuit: QuantumCircuit) -> None: + backend = Backend() + conv_circ = backend.circuit(parametric_circuit) + assert isinstance(conv_circ.native, PyQQuantumCircuit) + assert len([p for p in conv_circ.native.parameters()]) == parametric_circuit.num_parameters + + +def test_raise_error_for_ill_dimensioned_initial_state() -> None: + circuit = QuantumCircuit(2, X(0) @ X(1)) + backend = Backend() + initial_state = torch.tensor([1.0, 0.0], dtype=torch.complex128) + with pytest.raises(ValueError): + backend.run(backend.circuit(circuit), state=initial_state) + + +@pytest.mark.parametrize( + "gate, state", + [ + (X(0), torch.tensor([0.0 + 0.0j, 1.0 + 0.0j], dtype=torch.complex128)), + (Y(0), torch.tensor([0.0 + 0.0j, 0.0 + 1.0j], dtype=torch.complex128)), + (Z(0), torch.tensor([1.0 + 0.0j, 0.0 + 0.0j], dtype=torch.complex128)), + (T(0), torch.tensor([1.0 + 0.0j, 0.0 + 0.0j], dtype=torch.complex128)), + (S(0), torch.tensor([1.0 + 0.0j, 0.0 + 0.0j], dtype=torch.complex128)), + (H(0), torch.tensor(1.0 / np.sqrt(2) * np.array([1.0, 1.0]), dtype=torch.complex128)), + ], +) +def test_run_with_nonparametric_single_qubit_gates( + gate: PrimitiveBlock, state: torch.Tensor +) -> None: + circuit = QuantumCircuit(1, gate) + backend = Backend() + pyqtorch_circ = backend.circuit(circuit) + wf = backend.run(pyqtorch_circ) + assert torch.allclose(wf, state) + # Same test by passing explicitly the initial state. + initial_state = torch.tensor([[1.0, 0.0]], dtype=torch.complex128) + wf = backend.run(pyqtorch_circ, state=initial_state) + assert torch.allclose(wf, state) + + +@pytest.mark.parametrize( + "gate, matrix", + [ + ( + X(0), + torch.tensor( + [[0.0 + 0.0j, 1.0 + 0.0j], [1.0 + 0.0j, 0.0 + 0.0j]], dtype=torch.complex128 + ), + ), + ( + Y(0), + torch.tensor( + [[0.0 + 0.0j, 0.0 - 1.0j], [0.0 + 1.0j, 0.0 + 0.0j]], dtype=torch.complex128 + ), + ), + ( + Z(0), + torch.tensor( + [[1.0 + 0.0j, 0.0 + 0.0j], [0.0 + 0.0j, -1.0 + 0.0j]], dtype=torch.complex128 + ), + ), + ( + T(0), + torch.tensor( + [[1.0 + 0.0j, 0.0 + 0.0j], [0.0 + 0.0j, np.exp((np.pi / 4.0) * 1j)]], + dtype=torch.complex128, + ), + ), + ( + S(0), + torch.tensor( + [[1.0 + 0.0j, 0.0 + 0.0j], [0.0 + 0.0j, 0.0 + 1.0j]], dtype=torch.complex128 + ), + ), + ( + H(0), + torch.tensor( + 1.0 / np.sqrt(2) * np.array([[1.0, 1.0], [1.0, -1.0]]), dtype=torch.complex128 + ), + ), + ], +) +def test_run_with_nonparametric_single_qubit_gates_and_random_initial_state( + gate: PrimitiveBlock, matrix: torch.Tensor +) -> None: + circuit = QuantumCircuit(1, gate) + backend = Backend() + theta1 = random.uniform(0.0, 2.0 * np.pi) + complex1 = complex(np.cos(theta1), np.sin(theta1)) + theta2 = random.uniform(0.0, 2.0 * np.pi) + complex2 = complex(np.cos(theta2), np.sin(theta2)) + initial_state = torch.tensor([[complex1, complex2]], dtype=torch.complex128) + wf = backend.run(backend.circuit(circuit), state=initial_state) + expected_state = torch.matmul(matrix, initial_state[0]) + assert torch.allclose(wf, expected_state) + + +@pytest.mark.parametrize( + "parametric_gate, state", + [ + ( + RX(0, 0.5), + torch.tensor( + [[0.9689124217106447 + 0.0j, 0.0 - 0.24740395925452294j]], dtype=torch.complex128 + ), + ), + ( + RY(0, 0.5), + torch.tensor( + [[0.9689124217106447 + 0.0j, 0.24740395925452294 + 0.0j]], dtype=torch.complex128 + ), + ), + ( + RZ(0, 0.5), + torch.tensor( + [[0.9689124217106447 - 0.24740395925452294j, 0.0 + 0.0j]], dtype=torch.complex128 + ), + ), + ( + U(0, 0.25, 0.5, 0.75), + torch.tensor( + [[0.850300645292233 - 0.464521359638928j, 0.239712769302101 + 0.061208719054814j]], + dtype=torch.complex128, + ), + ), + ], +) +def test_run_with_parametric_single_qubit_gates( + parametric_gate: PrimitiveBlock, state: torch.Tensor +) -> None: + circuit = QuantumCircuit(1, parametric_gate) + backend = Backend() + pyqtorch_circ, _, embed, params = backend.convert(circuit) + wf = backend.run(pyqtorch_circ, embed(params, {})) + assert torch.allclose(wf, state) + + +def test_ugate_pure_pyqtorch() -> None: + import pyqtorch.modules as pyqtorch + + thetas = torch.rand(3) + state = pyqtorch.zero_state(n_qubits=1, dtype=torch.complex128) + pyqtorch_u = pyqtorch.U(qubits=[0], n_qubits=1) + Qadence_u = U(0, phi=thetas[0], theta=thetas[1], omega=thetas[2]) + circ = QuantumCircuit(1, Qadence_u) + backend = Backend() + convert = backend.convert(circ) + Qadence_state = backend.run(convert.circuit, convert.embedding_fn(convert.params, {})) + f_state = torch.reshape(pyqtorch_u(state, thetas), (1, 2)) + assert torch.allclose(f_state, Qadence_state) + + +theta = 0.5 +theta_half = theta / 2.0 + + +@pytest.mark.parametrize( + "parametric_gate, matrix", + [ + ( + RX(0, theta), + torch.tensor( + [ + [np.cos(theta_half), -np.sin(theta_half) * 1j], + [-np.sin(theta_half) * 1j, np.cos(theta_half)], + ], + dtype=torch.complex128, + ), + ), + ( + RY(0, theta), + torch.tensor( + [ + [np.cos(theta_half), -np.sin(theta_half)], + [np.sin(theta_half), np.cos(theta_half)], + ], + dtype=torch.complex128, + ), + ), + ( + RZ(0, 0.5), + torch.tensor( + [[np.exp(-1j * theta_half), 0.0], [0.0, np.exp(1j * theta_half)]], + dtype=torch.complex128, + ), + ), + ], +) +def test_run_with_parametric_single_qubit_gates_and_random_initial_state( + parametric_gate: PrimitiveBlock, matrix: torch.Tensor +) -> None: + circuit = QuantumCircuit(1, parametric_gate) + backend = Backend() + pyqtorch_circ, _, embed, params = backend.convert(circuit) + theta1 = random.uniform(0.0, 2.0 * np.pi) + complex1 = complex(np.cos(theta1), np.sin(theta1)) + theta2 = random.uniform(0.0, 2.0 * np.pi) + complex2 = complex(np.cos(theta2), np.sin(theta2)) + initial_state = torch.tensor([[complex1, complex2]], dtype=torch.complex128) + wf = backend.run(pyqtorch_circ, embed(params, {}), state=initial_state) + expected_state = torch.matmul(matrix, initial_state[0]) + assert torch.allclose(wf, expected_state) + + +@pytest.mark.parametrize( + "parametric_gate, state", + [ + ( + CRX(0, 1, 0.5), + torch.tensor( + [[0.0 + 0.0j, 0.0 + 0.0j, 0.0 - 0.24740395925452294j, 0.9689124217106447 + 0.0j]], + dtype=torch.complex128, + ), + ), + ( + CRY(0, 1, 0.5), + torch.tensor( + [[0.0 + 0.0j, 0.0 + 0.0j, -0.24740395925452294 + 0.0j, 0.9689124217106447 + 0.0j]], + dtype=torch.complex128, + ), + ), + ( + CRZ(0, 1, 0.5), + torch.tensor( + [[0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.9689124217106447 + 0.24740395925452294j]], + dtype=torch.complex128, + ), + ), + ], +) +def test_run_with_parametric_two_qubit_gates( + parametric_gate: PrimitiveBlock, state: torch.Tensor +) -> None: + circuit = QuantumCircuit(2, parametric_gate) + backend = Backend() + pyqtorch_circ, _, embed, params = backend.convert(circuit) + # Initialising the state to |11> to produce non-trivial outputs. + initial_state = torch.tensor( + [[0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 1.0 + 0.0j]], dtype=torch.complex128 + ) + wf = backend.run(pyqtorch_circ, embed(params, {}), state=initial_state) + assert torch.allclose(wf, state) + + +@pytest.mark.parametrize( + "parametric_gate, matrix", + [ + ( + CRX(0, 1, theta), + torch.tensor( + [ + [1.0, 0.0, 0.0, 0.0], + [0.0, 1.0, 0.0, 0.0], + [0.0, 0.0, np.cos(theta_half), -np.sin(theta_half) * 1j], + [0.0, 0.0, -np.sin(theta_half) * 1j, np.cos(theta_half)], + ], + dtype=torch.complex128, + ), + ), + ( + CRY(0, 1, 0.5), + torch.tensor( + [ + [1.0, 0.0, 0.0, 0.0], + [0.0, 1.0, 0.0, 0.0], + [0.0, 0.0, np.cos(theta_half), -np.sin(theta_half)], + [0.0, 0.0, np.sin(theta_half), np.cos(theta_half)], + ], + dtype=torch.complex128, + ), + ), + ( + CRZ(0, 1, 0.5), + torch.tensor( + [ + [1.0, 0.0, 0.0, 0.0], + [0.0, 1.0, 0.0, 0.0], + [0.0, 0.0, np.exp(-1j * theta_half), 0.0], + [0.0, 0.0, 0.0, np.exp(1j * theta_half)], + ], + dtype=torch.complex128, + ), + ), + ], +) +def test_run_with_parametric_two_qubit_gates_and_random_state( + parametric_gate: PrimitiveBlock, matrix: torch.Tensor +) -> None: + circuit = QuantumCircuit(2, parametric_gate) + backend = Backend() + + pyqtorch_circ, _, embed, params = backend.convert(circuit) + + # Initialising random state to produce non-trivial outputs. + random_coefs = np.array([random.uniform(0, 1) for _ in range(8)]) + random_coefs = random_coefs / np.sqrt(np.sum(np.square(random_coefs))) + initial_state = torch.tensor( + [ + [ + random_coefs[0] + random_coefs[1] * 1j, + random_coefs[2] + random_coefs[3] * 1j, + random_coefs[4] + random_coefs[5] * 1j, + random_coefs[6] + random_coefs[7] * 1j, + ], + ], + dtype=torch.complex128, + ) + wf = backend.run(pyqtorch_circ, embed(params, {}), state=initial_state) + expected_state = torch.matmul(matrix, initial_state[0]) + assert torch.allclose(wf, expected_state) + + +@pytest.mark.parametrize( + "gate, state", + [ + ( + H(0), + torch.tensor([[0.0]]), + ), + ( + X(0), + torch.tensor([[-1.0]]), + ), + ( + Y(0), + torch.tensor([[-1.0]]), + ), + ( + Z(0), + torch.tensor([[1.0]]), + ), + ], +) +def test_expectation_with_pauli_gates(gate: PrimitiveBlock, state: torch.Tensor) -> None: + circuit = QuantumCircuit(1, gate) + observable = Z(0) + backend = Backend() + pyqtorch_circ, pyqtorch_obs, embed, params = backend.convert(circuit, observable) + expectation_value = backend.expectation(pyqtorch_circ, pyqtorch_obs, embed(params, {})) + assert expectation_value == state + + +@pytest.mark.parametrize( + "gate, matrix", + [ + ( + H(0), + torch.tensor( + 1.0 / np.sqrt(2) * np.array([[1.0, 1.0], [1.0, -1.0]]), dtype=torch.complex128 + ), + ), + ( + X(0), + torch.tensor( + [[0.0 + 0.0j, 1.0 + 0.0j], [1.0 + 0.0j, 0.0 + 0.0j]], dtype=torch.complex128 + ), + ), + ( + Y(0), + torch.tensor( + [[0.0 + 0.0j, 0.0 - 1.0j], [0.0 + 1.0j, 0.0 + 0.0j]], dtype=torch.complex128 + ), + ), + ( + Z(0), + torch.tensor( + [[1.0 + 0.0j, 0.0 + 0.0j], [0.0 + 0.0j, -1.0 + 0.0j]], dtype=torch.complex128 + ), + ), + ], +) +def test_expectation_with_pauli_gates_and_random_state( + gate: PrimitiveBlock, matrix: torch.Tensor +) -> None: + circuit = QuantumCircuit(1, gate) + observable = Z(0) + backend = Backend() + pyqtorch_circ, pyqtorch_obs, embed, params = backend.convert(circuit, observable) + + theta1 = random.uniform(0.0, 2.0 * np.pi) + complex1 = complex(np.cos(theta1), np.sin(theta1)) + theta2 = random.uniform(0.0, 2.0 * np.pi) + complex2 = complex(np.cos(theta2), np.sin(theta2)) + initial_state = torch.tensor([[complex1, complex2]], dtype=torch.complex128) + expectation_value = backend.expectation( + pyqtorch_circ, pyqtorch_obs, embed(params, {}), state=initial_state + ) + Z_matrix = torch.tensor( + [[1.0 + 0.0j, 0.0 + 0.0j], [0.0 + 0.0j, -1.0 + 0.0j]], dtype=torch.complex128 + ) + final_state = torch.matmul(Z_matrix, torch.matmul(matrix, initial_state[0])) + probas = torch.square(torch.abs(final_state)) + expected_value = probas[0] - probas[1] + assert torch.allclose(expectation_value, expected_value) + + +@pytest.mark.flaky(max_runs=5) +def test_sample_with_hadamard_gate() -> None: + gate = H(0) + circuit = QuantumCircuit(1, gate) + backend = Backend() + conv = backend.convert(circuit) + samples = backend.sample(conv.circuit, n_shots=100) + assert len(samples) == 1 + sample = samples[0] + assert 40 <= sample["0"] <= 60 + assert 40 <= sample["1"] <= 60 + + +@pytest.mark.parametrize( + "gate, counter", + [ + (X(0), [Counter({"1": 10})]), + (Y(0), [Counter({"1": 10})]), + (Z(0), [Counter({"0": 10})]), + ], +) +def test_sample_with_pauli_gates(gate: PrimitiveBlock, counter: Counter) -> None: + circuit = QuantumCircuit(1, gate) + backend = Backend() + sample = backend.sample(backend.circuit(circuit), n_shots=10) + assert sample == counter + + +def test_controlled_rotation_gates_with_heterogeneous_parameters() -> None: + from qadence.parameters import FeatureParameter + + block = CRX(0, 1, 0.5) * CRY(1, 2, FeatureParameter("x")) * CRZ(2, 3, "y") + circ = QuantumCircuit(4, block) + backend = Backend() + conv = backend.convert(circ) + + values = {"x": torch.rand(2)} + wf = backend.run(conv.circuit, conv.embedding_fn(conv.params, values)) + assert wf.size() == (2, 2**4) + + +@pytest.mark.parametrize( + "block", + [ + X(0), + RZ(1, 0.5), + # CRY(0,1,0.2) write proper test for this + ], +) +def test_scaled_operation(block: AbstractBlock) -> None: + backend = Backend() + state = torch.rand(1, 4, dtype=torch.cdouble) + + circ = QuantumCircuit(2, block) + pyqtorch_circ, _, embed, params = backend.convert(circ) + wf = backend.run(pyqtorch_circ, embed(params, {}), state=state) + + circ = QuantumCircuit(2, block * 2) + pyqtorch_circ, _, embed, params = backend.convert(circ) + wf2 = backend.run(pyqtorch_circ, embed(params, {}), state=state) + + assert torch.allclose(wf * 2, wf2) + + +@pytest.mark.parametrize("batch_size", [i for i in range(10)]) +def test_scaled_featureparam_batching(batch_size: int) -> None: + backend = Backend() + block = FeatureParameter("x") * X(0) + circ = QuantumCircuit(1, block) + pyqtorch_circ, _, embed, params = backend.convert(circ) + rand_vals = torch.rand(batch_size) + param_values = embed(params, {"x": rand_vals}) + wf = backend.run(pyqtorch_circ, param_values) + wf2 = backend.run(pyqtorch_circ, embed(params, {"x": torch.ones(batch_size)})) + assert torch.allclose(wf, wf2 * rand_vals.unsqueeze(1)) + + +@pytest.mark.parametrize( + "block", + [ + X(0), + Y(0), + Z(0), + # S(0), # TODO implement SDagger in PyQ + # T(0), # TODO implement TDagger in PyQ + CNOT(0, 1), + # CZ(0, 1), # TODO implement CZ in PyQ? + SWAP(0, 1), + H(0), + I(0), + # Zero(), # TODO what to test here? + chain(X(0), Y(0), Z(0), Y(0)), + kron(X(1), Y(3), Z(4), Y(2)), + chain(kron(X(0), Y(1)), kron(Z(3), H(1))), + chain(CNOT(0, 1), CNOT(1, 0)), + ], +) +def test_dagger_returning_fixed_gates(block: AbstractBlock) -> None: + nqubits = block.n_qubits + circ = QuantumCircuit(nqubits, block, block.dagger()) + backend = Backend() + conv = backend.convert(circ) + initial_state = torch.rand((1, 2**nqubits), dtype=torch.cdouble) + 1j * torch.rand( + (1, 2**nqubits), dtype=torch.cdouble + ) + initial_state = initial_state / torch.sqrt(sum(abs(initial_state) ** 2)) + wf = backend.run(conv.circuit, state=initial_state) + assert torch.allclose(wf, initial_state) + + +@pytest.mark.parametrize( + "block_class", + [ + RX, + RY, + RZ, + CRX, + CRY, + CRZ, + CPHASE, + AnalogSWAP, + ], +) +@pytest.mark.parametrize("p_type", [0.52, "x", Parameter("x"), acos(Parameter("x"))]) +@pytest.mark.parametrize("trainable", [True, False]) +def test_dagger_returning_parametric_gates( + block_class: AbstractBlock, p_type: float | str | Parameter, trainable: bool +) -> None: + n_qubits = 2 if block_class not in [RX, RY, RZ] else 1 + block = block_class(*tuple(range(n_qubits)), p_type) # type: ignore[operator] + set_trainable(block, trainable) + circ = QuantumCircuit(n_qubits, block, block.dagger()) + backend = backend_factory(backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD) + (pyqtorch_circ, _, embed, params) = backend.convert(circ) + run_params = embed(params, {"x": torch.tensor([0.52])}) + initial_state = torch.rand((1, 2**n_qubits), dtype=torch.cdouble) + 1j * torch.rand( + (1, 2**n_qubits), dtype=torch.cdouble + ) + initial_state = initial_state / torch.sqrt(sum(abs(initial_state) ** 2)) + wf = backend.run(pyqtorch_circ, run_params, state=initial_state.clone()) + assert torch.allclose(wf, initial_state) + + +def test_dagger_returning_kernel() -> None: + generatorx = 3.1 * X(0) + 1.2 * Y(0) + 1.1 * Y(1) + 1.9 * X(1) + 2.4 * Z(0) * Z(1) + fmx = HamEvo(generatorx, parameter=acos(Parameter("x"))) + set_trainable(fmx, False) + fmy = HamEvo(generatorx, parameter=acos(Parameter("y"))) + set_trainable(fmy, False) + ansatz = hea(2, 2) + set_trainable(ansatz, True) + circ = QuantumCircuit(2, fmx, ansatz.dagger(), ansatz, fmy.dagger()) + backend = backend_factory(backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD) + (pyqtorch_circ, _, embed, params) = backend.convert(circ) + + initial_state = torch.rand((1, 2**2), dtype=torch.cdouble) + 1j * torch.rand( + (1, 2**2), dtype=torch.cdouble + ) + initial_state = initial_state / torch.sqrt(4 * sum(abs(initial_state) ** 2)) + + run_params = embed(params, {"x": torch.tensor([0.52]), "y": torch.tensor(0.52)}) + wf = backend.run(pyqtorch_circ, run_params, state=initial_state.clone()) + assert wf_is_normalized(wf) + assert torch.allclose(wf, initial_state) + + run_params = embed(params, {"x": torch.tensor([0.38]), "y": torch.tensor(0.92)}) + wf = backend.run(pyqtorch_circ, run_params, state=initial_state.clone()) + assert not torch.allclose(wf, initial_state) + + +def test_scaled_blocks() -> None: + circuit = QuantumCircuit(1, 3.1 * (X(0) + X(0))) + model = QuantumModel(circuit, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD) + wf = model.run({}) + assert isinstance(wf, torch.Tensor) + + circuit = QuantumCircuit(2, 2 * (X(0) @ X(1))) + model = QuantumModel(circuit, diff_mode=DiffMode.AD) + wf = model.run({}) + assert isinstance(wf, torch.Tensor) + + +def test_kron_chain_add_circuit() -> None: + p0 = I(0) * 0.5 + Z(0) * 0.5 + p1 = I(0) * 0.5 + Z(0) * (-0.5) + cnot = kron(p0, I(1)) + kron(p1, X(1)) + + backend = backend_factory(backend=BackendName.PYQTORCH, diff_mode=None) + + circ = QuantumCircuit(2, chain(X(0), X(1), cnot)) + (circ_conv, _, embedding_fn, params) = backend.convert(circ) + res_constructed = backend.run(circ_conv, embedding_fn(params, {})) + + circ = QuantumCircuit(2, chain(X(0), X(1), CNOT(0, 1))) + (circ_conv, _, embedding_fn, params) = backend.convert(circ) + res_native = backend.run(circ_conv, embedding_fn(params, {})) + + assert torch.allclose(res_constructed, res_native) + + +def test_swap_equivalences() -> None: + block = AnalogSWAP(0, 1) + ref_block = SWAP(0, 1) + circ = QuantumCircuit(2, block, ref_block) + state_r = torch.rand(2**2, dtype=torch.cdouble) - 0.5 + state_i = torch.rand(2**2, dtype=torch.cdouble) - 0.5 + norm = torch.linalg.vector_norm(state_r + 1j * state_i) + wf_init = torch.Tensor(((state_r + 1j * state_i) / norm).unsqueeze(0)) + backend = backend_factory(backend=BackendName.PYQTORCH, diff_mode=None) + (pyqtorch_circ, _, embed, params) = backend.convert(circ) + run_params = embed(params, {}) + wf = backend.run(pyqtorch_circ, run_params, state=wf_init.clone()) + + # check equivalence up to rotation + angle = torch.angle(wf_init[0, 0]).detach() + wf_init = wf_init * torch.exp(-1j * angle) + angle = torch.angle(wf[0, 0]).detach() + wf = wf * torch.exp(-1j * angle) + assert torch.allclose(wf, wf_init) + + +@given(st.batched_digital_circuits()) +@settings(deadline=None) +def test_batched_circuits( + circuit_and_inputs: tuple[QuantumCircuit, dict[str, torch.Tensor]] +) -> None: + circuit, inputs = circuit_and_inputs + bknd_pyqtorch = backend_factory(backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD) + (circ_pyqtorch, _, embed_pyqtorch, params_pyqtorch) = bknd_pyqtorch.convert(circuit) + wf_pyqtorch = bknd_pyqtorch.run(circ_pyqtorch, embed_pyqtorch(params_pyqtorch, inputs)) + assert not torch.any(torch.isnan(wf_pyqtorch)) + + +@pytest.mark.parametrize("diff_mode", [DiffMode.GPSR, DiffMode.AD]) +@pytest.mark.parametrize("obs", [total_magnetization, zz_hamiltonian]) +@given(st.batched_digital_circuits()) +@settings(deadline=None) +def test_sparse_obs_expectation_value( + diff_mode: DiffMode, + obs: Callable, + circuit_and_inputs: tuple[QuantumCircuit, dict[str, torch.Tensor]], +) -> None: + non_sparse_cfg = PyqConfig() + non_sparse_cfg.use_sparse_observable = False + sparse_cfg = PyqConfig() + sparse_cfg.use_sparse_observable = True + circuit, inputs = circuit_and_inputs + observable = obs(circuit.n_qubits) + qm_nonsparse = QuantumModel( + circuit=circuit, + observable=observable, + backend=BackendName.PYQTORCH, + diff_mode=diff_mode, + configuration=non_sparse_cfg, + ) + qm_sparse = QuantumModel( + circuit=circuit, + observable=observable, + backend=BackendName.PYQTORCH, + diff_mode=diff_mode, + configuration=sparse_cfg, + ) + expval = qm_nonsparse.expectation(inputs) + expval_s = qm_sparse.expectation(inputs) + + assert torch.allclose(expval, expval_s) + + +@pytest.mark.parametrize("diff_mode", DiffMode.list()) +def test_gradient_checkpointing(diff_mode: DiffMode) -> None: + n_qubits = 2 + qc = QuantumCircuit(n_qubits, chain(chebyshev_feature_map(n_qubits), hea(n_qubits, n_qubits))) + qm = QuantumModel( + qc, + total_magnetization(n_qubits), + backend=BackendName.PYQTORCH, + diff_mode=diff_mode, + configuration=PyqConfig(use_gradient_checkpointing=True), + ) + inputs = {"phi": torch.rand(2, requires_grad=True)} + + opt = torch.optim.Adam(qm.parameters()) + + criterion = torch.nn.MSELoss() + cnt = count() + + wf = qm.run(inputs) + samples = qm.sample(inputs) + + def loss_fn(model: QuantumModel, xs: Any = None) -> tuple[torch.Tensor, dict]: + next(cnt) + out = model.expectation(inputs) + loss = criterion(out, torch.tensor([0.0])) + return loss, {} + + train_with_grad( + model=qm, dataloader=None, optimizer=opt, config=TrainConfig(max_iter=2), loss_fn=loss_fn + ) diff --git a/tests/backends/test_backends.py b/tests/backends/test_backends.py new file mode 100644 index 000000000..3b038340a --- /dev/null +++ b/tests/backends/test_backends.py @@ -0,0 +1,320 @@ +from __future__ import annotations + +from typing import Counter + +import numpy as np +import pytest +import strategies as st # type: ignore +import sympy +import torch +from hypothesis import given, settings +from metrics import ATOL_DICT, JS_ACCEPTANCE # type: ignore +from torch import Tensor + +from qadence import BackendName, DiffMode +from qadence.backend import BackendConfiguration +from qadence.backends.api import backend_factory +from qadence.blocks import AbstractBlock, chain, kron +from qadence.circuit import QuantumCircuit +from qadence.constructors import total_magnetization +from qadence.divergences import js_divergence +from qadence.ml_tools.utils import rand_featureparameters +from qadence.models import QuantumModel +from qadence.operations import CPHASE, RX, RY, H, I, X +from qadence.parameters import FeatureParameter +from qadence.states import ( + equivalent_state, + product_state, + rand_product_state, + random_state, + zero_state, +) +from qadence.utils import nqubits_to_basis + +BACKENDS = BackendName.list() +BACKENDS.remove("pulser") + + +def flatten_counter(c: Counter | list[Counter]) -> Counter: + if isinstance(c, Counter): + sorted_counter = Counter(dict(sorted(c.items()))) + return sorted_counter + + elif isinstance(c, list): + flattened_counter: Counter = Counter() + for counter in c: + flattened_counter += counter + sorted_counter = Counter(dict(sorted(flattened_counter.items()))) + return sorted_counter + + else: + raise TypeError("Input must be either a Counter object or a list of Counter objects.") + + +@pytest.mark.parametrize("backend", BACKENDS) +@pytest.mark.parametrize( + "circuit", + [ + QuantumCircuit(1), + QuantumCircuit(2, chain(X(0), X(1))), + QuantumCircuit(3, chain(I(0), X(1), I(2))), + QuantumCircuit(3, chain(X(1))), + ], +) +def test_simple_circuits(backend: str, circuit: QuantumCircuit) -> None: + bknd = backend_factory(backend=backend) + wf = bknd.run(bknd.circuit(circuit)) + assert isinstance(wf, Tensor) + + +def test_expectation_value(parametric_circuit: QuantumCircuit) -> None: + observable = total_magnetization(parametric_circuit.n_qubits) + + values = {"x": torch.rand(1)} + wfs = [] + for b in BACKENDS: + bkd = backend_factory(backend=b, diff_mode=None) + conv = bkd.convert(parametric_circuit, observable) + expval = bkd.expectation( + conv.circuit, conv.observable, conv.embedding_fn(conv.params, values) # type: ignore + ) + wf = bkd.run(conv.circuit, conv.embedding_fn(conv.params, values)) + wfs.append(wf.flatten().sum()) + + # normalize the type of the wavefunction + wfs_np = [] + for wf in wfs: + wfs_np.append(complex(wf)) + wfs_np = np.array(wfs_np) # type: ignore [assignment] + assert np.all(np.isclose(wfs_np, wfs_np[0])) + + +@pytest.mark.parametrize("backend", BACKENDS) +def test_qcl_loss(backend: str) -> None: + np.random.seed(42) + torch.manual_seed(42) + + def get_training_data(domain: tuple = (-0.99, 0.99), n_teacher: int = 30) -> tuple: + start, end = domain + x_rand_np = np.sort(np.random.uniform(low=start, high=end, size=n_teacher)) + y_rand_np = x_rand_np * x_rand_np + x_rand = torch.tensor(x_rand_np, requires_grad=True) + y_rand = torch.tensor(y_rand_np, requires_grad=True) + return x_rand, y_rand + + n_qubits = 2 # number of qubits in the circuit + + param = FeatureParameter("phi") + featuremap = kron(RY(qubit, sympy.asin(param)) for qubit in range(n_qubits)) + circuit = QuantumCircuit(n_qubits, featuremap) + observable = kron(X(i) for i in range(n_qubits)) + + x_train, y_train = get_training_data(n_teacher=1) + input_values = {"phi": x_train} + + # test expectation == 0 + model = QuantumModel(circuit, backend=BackendName(backend), diff_mode=DiffMode.GPSR) + native_observable = model.observable(observable, n_qubits) + e = model.expectation(input_values, native_observable) + mse_loss = torch.nn.MSELoss() + loss = mse_loss(e, y_train) + assert torch.allclose(loss, torch.tensor(0.0)) + + # test derivative of expectation == 2x + d = torch.autograd.grad(e, x_train, torch.ones_like(e))[0] + assert torch.allclose(d, 2 * x_train) + + +@pytest.mark.parametrize( + "backend", + [ + BackendName.PYQTORCH, + pytest.param( + BackendName.BRAKET, + marks=pytest.mark.xfail(reason="state-vector initial state not implemented in Braket"), + ), + ], +) +def test_custom_initial_state(backend: str) -> None: + circ = QuantumCircuit(2, chain(X(0), X(1))) + bkd = backend_factory(backend) + conv = bkd.convert(circ) + + # test single sample batch + for input_state, target_state in zip(["01", "10", "11"], ["10", "01", "00"]): + wf = product_state(input_state) # type: ignore[arg-type] + # need to use pyqtorch to construct 00 state + target_wf = product_state(target_state) # type: ignore[arg-type] + assert equivalent_state(bkd.run(conv.circuit, state=wf), target_wf) + + # test batch + wf = product_state(["01", "10", "11"]) # type: ignore[arg-type] + assert equivalent_state( + bkd.run(conv.circuit, state=wf), product_state(["10", "01", "00"]) # type: ignore[arg-type] + ) + + +@pytest.mark.parametrize( + "circ", [QuantumCircuit(2, chain(X(0), X(1))), QuantumCircuit(2, chain(H(0), H(1)))] +) +@pytest.mark.flaky(max_runs=5) +def test_backend_sampling(circ: QuantumCircuit) -> None: + bknd_pyqtorch = backend_factory(BackendName.PYQTORCH) + bknd_braket = backend_factory(BackendName.BRAKET) + + (circ_pyqtorch, _, _, _) = bknd_pyqtorch.convert(circ) + (circ_braket, _, embed, params) = bknd_braket.convert(circ) + + # braket doesnt support custom initial states so we use state=None for the zero state + pyqtorch_samples = bknd_pyqtorch.sample( + circ_pyqtorch, embed(params, {}), state=None, n_shots=100 + ) + braket_samples = bknd_braket.sample(circ_braket, embed(params, {}), state=None, n_shots=100) + + for pyqtorch_sample, braket_sample in zip(pyqtorch_samples, braket_samples): + assert js_divergence(pyqtorch_sample, braket_sample) < JS_ACCEPTANCE + + wf_braket = bknd_braket.run(circ_braket) + wf_pyqtorch = bknd_pyqtorch.run(circ_pyqtorch) + assert equivalent_state(wf_braket, wf_pyqtorch, atol=ATOL_DICT[BackendName.BRAKET]) + + +@given(st.restricted_circuits()) +@settings(deadline=None) +@pytest.mark.parametrize("backend", BACKENDS) +def test_run_for_random_circuit(backend: BackendName, circuit: QuantumCircuit) -> None: + cfg = {"_use_gate_params": True} + bknd_pyqtorch = backend_factory(backend=BackendName.PYQTORCH, configuration=cfg) + bknd = backend_factory(backend=backend, configuration=cfg) + (circ_pyqtorch, _, embed_pyqtorch, params_pyqtorch) = bknd_pyqtorch.convert(circuit) + (circ, _, embed, params) = bknd.convert(circuit) + inputs = rand_featureparameters(circuit, 1) + wf_pyqtorch = bknd_pyqtorch.run(circ_pyqtorch, embed_pyqtorch(params_pyqtorch, inputs)) + wf = bknd.run(circ, embed(params, inputs)) + assert equivalent_state(wf_pyqtorch, wf, atol=ATOL_DICT[backend]) + + +@given(st.restricted_circuits()) +@settings(deadline=None) +@pytest.mark.parametrize("backend", BACKENDS) +@pytest.mark.flaky(max_runs=5) +def test_sample_for_random_circuit(backend: BackendName, circuit: QuantumCircuit) -> None: + cfg = {"_use_gate_params": True} + bknd_pyqtorch = backend_factory(backend=BackendName.PYQTORCH, configuration=cfg) + bknd = backend_factory(backend=backend, configuration=cfg) + (circ_pyqtorch, _, embed_pyqtorch, params_pyqtorch) = bknd_pyqtorch.convert(circuit) + (circ, _, embed, params) = bknd.convert(circuit) + inputs = rand_featureparameters(circuit, 1) + pyqtorch_samples = bknd_pyqtorch.sample( + circ_pyqtorch, embed_pyqtorch(params_pyqtorch, inputs), n_shots=100 + ) + samples = bknd.sample(circ, embed(params, inputs), n_shots=100) + + for pyqtorch_sample, sample in zip(pyqtorch_samples, samples): + assert js_divergence(pyqtorch_sample, sample) < JS_ACCEPTANCE + ATOL_DICT[backend] + + +# TODO: include many observables +@given(st.restricted_circuits(), st.observables()) +@settings(deadline=None) +@pytest.mark.parametrize("backend", BACKENDS) +def test_expectation_for_random_circuit( + backend: BackendName, circuit: QuantumCircuit, observable: AbstractBlock +) -> None: + if observable.n_qubits > circuit.n_qubits: + circuit = QuantumCircuit(observable.n_qubits, circuit.block) + cfg = {"_use_gate_params": True} + bknd_pyqtorch = backend_factory(backend=BackendName.PYQTORCH, configuration=cfg) + bknd = backend_factory(backend=backend, configuration=cfg) + (circ_pyqtorch, obs_pyqtorch, embed_pyqtorch, params_pyqtorch) = bknd_pyqtorch.convert( + circuit, observable + ) + (circ, obs, embed, params) = bknd.convert(circuit, observable) + inputs = rand_featureparameters(circuit, 1) + pyqtorch_expectation = bknd_pyqtorch.expectation( + circ_pyqtorch, obs_pyqtorch, embed_pyqtorch(params_pyqtorch, inputs) + )[0] + expectation = bknd.expectation(circ, obs, embed(params, inputs))[0] + assert torch.allclose(pyqtorch_expectation, expectation, atol=ATOL_DICT[backend]) + + +@given(st.restricted_circuits()) +@settings(deadline=None) +@pytest.mark.parametrize("backend", BACKENDS) +@pytest.mark.flaky(max_runs=5) +def test_compare_run_to_sample(backend: BackendName, circuit: QuantumCircuit) -> None: + bknd = backend_factory(backend) + (conv_circ, _, embed, params) = bknd.convert(circuit) + inputs = rand_featureparameters(circuit, 1) + samples = bknd.sample(conv_circ, embed(params, inputs), n_shots=1000) + wf = bknd.run(conv_circ, embed(params, inputs)) + probs = list(torch.abs(torch.pow(wf, 2)).flatten().detach().numpy()) + bitstrngs = nqubits_to_basis(circuit.n_qubits) + wf_counter = Counter( + {bitstring: prob for (bitstring, prob) in zip(bitstrngs, probs) if prob > 0.0} + ) + assert js_divergence(samples[0], wf_counter) < JS_ACCEPTANCE + ATOL_DICT[backend] + + +def test_default_configuration() -> None: + for b in BACKENDS + [BackendName.PULSER]: + bknd = backend_factory(b, diff_mode=None) + conf = bknd.default_configuration() + assert isinstance(conf, BackendConfiguration) + opts = conf.available_options() + assert isinstance(opts, str) + + +@given(st.digital_circuits()) +@settings(deadline=None) +@pytest.mark.parametrize( + "backend", + [ + BackendName.PYQTORCH, + pytest.param( + BackendName.BRAKET, + marks=pytest.mark.xfail(reason="Braket doesnt support passing custom states"), + ), + ], +) +def test_run_for_random_state(backend: str, circuit: QuantumCircuit) -> None: + bknd_pyqtorch = backend_factory(backend) + pyqtorch_state = random_state(circuit.n_qubits, backend=backend) + rand_bit_state = rand_product_state(circuit.n_qubits) + (circ_pyqtorch, _, embed, params) = bknd_pyqtorch.convert(circuit) + inputs = rand_featureparameters(circuit, 1) + embedded_params = embed(params, inputs) + wf_pyqtorch = bknd_pyqtorch.run(circ_pyqtorch, embedded_params, pyqtorch_state) + wf_randbit = bknd_pyqtorch.run(circ_pyqtorch, embedded_params, rand_bit_state) + assert not torch.any(torch.isnan(wf_pyqtorch)) + assert not torch.any(torch.isnan(wf_randbit)) + + +@pytest.mark.parametrize("bsize", [i for i in range(1, 10, 2)]) +def test_output_cphase_batching(bsize: int) -> None: + backend_list = [BackendName.BRAKET, BackendName.PYQTORCH] + + n_qubits = 4 + w = FeatureParameter("w") + + # Circuit is created here. + circuit = QuantumCircuit(n_qubits, chain(X(0), CPHASE(1, 0, w), CPHASE(2, 1, w), RX(1, "x"))) + values = {"w": torch.rand(bsize)} + exp_list = [] + wf_list = [] + for backend_name in backend_list: + backend = backend_factory(backend_name) + observable = [total_magnetization(n_qubits=circuit.n_qubits)] * 1 + (circ, obs, embed, params) = backend.convert(circuit, observable) + + val = embed(params, values) + wf = backend.run(circ, val) + wf_list.append(wf) + + expected = zero_state(n_qubits=4, batch_size=10) + expected[0] = 1.0 + + exp_list.append(backend.expectation(circ, obs, val)) + + assert torch.allclose(exp_list[0], exp_list[1]) + assert equivalent_state(wf_list[0], wf_list[1]) diff --git a/tests/backends/test_endianness.py b/tests/backends/test_endianness.py new file mode 100644 index 000000000..b299430f6 --- /dev/null +++ b/tests/backends/test_endianness.py @@ -0,0 +1,268 @@ +from __future__ import annotations + +from typing import Counter + +import pytest +import strategies as st # type: ignore +import torch +from hypothesis import given, settings +from metrics import ATOL_DICT, JS_ACCEPTANCE # type: ignore +from torch import Tensor, allclose, pi, tensor + +from qadence import QuantumCircuit, block_to_tensor, run, sample +from qadence.backend import BackendName +from qadence.backends.api import backend_factory +from qadence.blocks import AbstractBlock, MatrixBlock, chain, kron +from qadence.divergences import js_divergence +from qadence.ml_tools.utils import rand_featureparameters +from qadence.operations import CNOT, RX, RY, H, HamEvo, I, X, Z +from qadence.states import equivalent_state, product_state +from qadence.transpile import invert_endianness +from qadence.types import Endianness, ResultType +from qadence.utils import ( + basis_to_int, + nqubits_to_basis, +) + +BACKENDS = BackendName.list() +BACKENDS.remove("pulser") +N_SHOTS = 1000 + + +@pytest.mark.parametrize("backend", BACKENDS) +@pytest.mark.parametrize( + "block,n_qubits,samples", + [ + (I(0) @ X(1) @ I(2) @ X(3), 4, [Counter({"0101": N_SHOTS})]), + ( + chain(chain(chain(H(0), X(1), CNOT(0, 1)), CNOT(0, 2)), chain(CNOT(1, 3), CNOT(1, 2))), + 4, + [Counter({"0111": N_SHOTS / 2, "1010": N_SHOTS / 2})], + ), + ], +) +def test_endianness_equal_sample( + block: AbstractBlock, n_qubits: int, samples: list[Counter[str]], backend: BackendName +) -> None: + for endianness in Endianness: + if endianness == Endianness.LITTLE: + samples = [invert_endianness(samples[0])] + circ = QuantumCircuit(n_qubits, block) + circ_samples = sample(circ, {}, backend=backend, n_shots=N_SHOTS, endianness=endianness) + for circ_sample, smple in zip(circ_samples, samples): + assert js_divergence(circ_sample, smple) < JS_ACCEPTANCE + ATOL_DICT[backend] + + +@pytest.mark.parametrize("backend", [BackendName.PYQTORCH]) +def test_endianness_hamevo(backend: BackendName) -> None: + n_qubits = 2 + gen = -0.5 * kron(I(0) - Z(0), I(1) - X(1)) + evo = HamEvo(gen, tensor([pi / 2])) + circ = QuantumCircuit(n_qubits, evo) + cnotgate = CNOT(0, 1) + qc_cnot = QuantumCircuit(2, cnotgate) + bkd = backend_factory(backend=backend) + conv_cnot = bkd.convert(qc_cnot) + state_10 = product_state("10") + conv = bkd.convert(circ) + wf_cnot = bkd.run( + conv_cnot.circuit, conv_cnot.embedding_fn(conv_cnot.params, {}), state=state_10 + ) + wf_hamevo = bkd.run(conv.circuit, conv.embedding_fn(conv.params, {}), state=state_10) + assert allclose(wf_cnot, wf_hamevo) + # The first qubit is 1 and we do CNOT(0,1), so we expect "11" + expected_samples = [Counter({"11": N_SHOTS})] + hamevo_samples = bkd.sample( + conv.circuit, conv.embedding_fn(conv.params, {}), n_shots=N_SHOTS, state=state_10 + ) + cnot_samples = bkd.sample( + conv_cnot.circuit, + conv_cnot.embedding_fn(conv_cnot.params, {}), + n_shots=N_SHOTS, + state=state_10, + ) + + for hamevo_sample, expected_sample in zip(hamevo_samples, expected_samples): + assert js_divergence(hamevo_sample, expected_sample) < JS_ACCEPTANCE + ATOL_DICT[backend] + + for cnot_sample, expected_sample in zip(cnot_samples, expected_samples): + assert js_divergence(cnot_sample, expected_sample) < JS_ACCEPTANCE + ATOL_DICT[backend] + + +def test_state_endianness() -> None: + big_endian = nqubits_to_basis(2, ResultType.STRING, Endianness.BIG) + assert big_endian[1] == "01" + + little_endian = nqubits_to_basis(2, ResultType.STRING, Endianness.LITTLE) + assert little_endian[1] == "10" + + state_01 = tensor([[0.0 + 0.0j, 1.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j]]) + assert allclose(product_state(big_endian[1]), state_01) + + assert allclose(run(I(0) @ I(1), state=state_01), state_01) + + assert basis_to_int("01", Endianness.BIG) == 1 + + +@pytest.mark.parametrize( + "circ, truth", + [ + (QuantumCircuit(3), torch.tensor([[1, 0, 0, 0, 0, 0, 0, 0]], dtype=torch.cdouble)), + (QuantumCircuit(3, X(0)), torch.tensor([[0, 0, 0, 0, 1, 0, 0, 0]], dtype=torch.cdouble)), + (QuantumCircuit(3, X(1)), torch.tensor([[0, 0, 1, 0, 0, 0, 0, 0]], dtype=torch.cdouble)), + ( + QuantumCircuit(3, chain(X(0), X(1))), + torch.tensor([[0, 0, 0, 0, 0, 0, 1, 0]], dtype=torch.cdouble), + ), + (QuantumCircuit(3, X(2)), torch.tensor([[0, 1, 0, 0, 0, 0, 0, 0]], dtype=torch.cdouble)), + ( + QuantumCircuit(3, chain(X(0), X(2))), + torch.tensor([[0, 0, 0, 0, 0, 1, 0, 0]], dtype=torch.cdouble), + ), + ( + QuantumCircuit(3, chain(X(1), X(2))), + torch.tensor([[0, 0, 0, 1, 0, 0, 0, 0]], dtype=torch.cdouble), + ), + ( + QuantumCircuit(3, chain(X(0), X(1), X(2))), + torch.tensor([[0, 0, 0, 0, 0, 0, 0, 1]], dtype=torch.cdouble), + ), + (QuantumCircuit(2, RY(0, torch.pi)), torch.tensor([[0, 0, 1, 0]], dtype=torch.cdouble)), + (QuantumCircuit(2, RY(1, torch.pi)), torch.tensor([[0, 1, 0, 0]], dtype=torch.cdouble)), + ], +) +@pytest.mark.parametrize("backend", BACKENDS) +def test_backend_wf_endianness(circ: QuantumCircuit, truth: Tensor, backend: BackendName) -> None: + for endianness in Endianness: + wf = run(circ, {}, backend=backend, endianness=endianness) + if endianness == Endianness.LITTLE: + truth = invert_endianness(truth) + assert equivalent_state(wf, truth, atol=ATOL_DICT[backend]) + + +@pytest.mark.parametrize( + "circ, truth", + [ + (QuantumCircuit(3, RX(0, torch.pi)), Counter({"100": 100})), + (QuantumCircuit(3, RX(1, torch.pi)), Counter({"010": 100})), + (QuantumCircuit(3, RX(2, torch.pi)), Counter({"001": 100})), + ], +) +@pytest.mark.parametrize("backend", BACKENDS + [BackendName.PULSER]) +def test_backend_sample_endianness( + circ: QuantumCircuit, truth: Counter, backend: BackendName +) -> None: + for endianness in Endianness: + smple = sample(circ, {}, backend=backend, n_shots=100, endianness=endianness)[0] + if endianness == Endianness.LITTLE: + truth = invert_endianness(truth) + assert smple == truth + + +@pytest.mark.parametrize( + "circ", + [ + QuantumCircuit(3, RX(0, torch.pi)), + QuantumCircuit(3, RX(1, torch.pi)), + QuantumCircuit(3, RX(2, torch.pi)), + ], +) +def test_pulser_run_endianness( + circ: QuantumCircuit, +) -> None: + for endianness in Endianness: + wf_pyq = run(circ, {}, backend="pyqtorch", endianness=endianness) + wf_pulser = run(circ, {}, backend="pulser", endianness=endianness) + assert equivalent_state(wf_pyq, wf_pulser, atol=ATOL_DICT[BackendName.PULSER]) + + +@pytest.mark.parametrize( + "block,n_qubits,expected_mat, expected_samples", + [ + ( + X(0), + 2, + torch.tensor([[0, 0, 1, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 1, 0, 0]]), + [Counter({"10": N_SHOTS})], + ), + ( + X(1), + 2, + torch.tensor([[0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]]), + [Counter({"01": N_SHOTS})], + ), + ], +) +def test_matrix_endianness( + block: X, n_qubits: int, expected_mat: Tensor, expected_samples: list[Counter] +) -> None: + mat = block_to_tensor(block, {}, block.qubit_support) + + matblock = MatrixBlock(mat, block.qubit_support) + samples = sample(n_qubits, matblock, n_shots=N_SHOTS) + + assert torch.allclose( + block_to_tensor(block, {}, tuple([i for i in range(n_qubits)])).squeeze(0).to(dtype=int), + expected_mat, + ) + for smple, expected_sample in zip(samples, expected_samples): + assert js_divergence(smple, expected_sample) < JS_ACCEPTANCE + + +@pytest.mark.parametrize( + "endianness,expected_mat", + [ + ( + Endianness.BIG, + torch.tensor([[0, 0, 1, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 1, 0, 0]]), + ), + ( + Endianness.LITTLE, + torch.tensor([[0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]]), + ), + ], +) +def test_block_to_tensor_endianness( + endianness: Endianness, + expected_mat: Tensor, +) -> None: + block = X(0) + n_qubits = 2 + assert torch.allclose( + block_to_tensor( + block=block, + values={}, + qubit_support=tuple([i for i in range(n_qubits)]), + endianness=endianness, + ) + .squeeze(0) + .to(dtype=int), + expected_mat, + ) + + +@given(st.restricted_circuits()) +@settings(deadline=None) +@pytest.mark.parametrize("backend", BACKENDS) +def test_sample_inversion_for_random_circuit(backend: str, circuit: QuantumCircuit) -> None: + bknd = backend_factory(backend=backend) + (circ, _, embed, params) = bknd.convert(circuit) + inputs = rand_featureparameters(circuit, 1) + for endianness in Endianness: + samples = bknd.sample(circ, embed(params, inputs), n_shots=100, endianness=endianness) + for _sample in samples: + double_inv_wf = invert_endianness(invert_endianness(_sample)) + assert js_divergence(double_inv_wf, _sample) < JS_ACCEPTANCE + + +@given(st.restricted_circuits()) +@settings(deadline=None) +@pytest.mark.parametrize("backend", BACKENDS) +def test_wf_inversion_for_random_circuit(backend: str, circuit: QuantumCircuit) -> None: + bknd = backend_factory(backend=backend) + (circ, _, embed, params) = bknd.convert(circuit) + inputs = rand_featureparameters(circuit, 1) + for endianness in Endianness: + wf = bknd.run(circ, embed(params, inputs), endianness=endianness) + double_inv_wf = invert_endianness(invert_endianness(wf)) + assert equivalent_state(double_inv_wf, wf) diff --git a/tests/backends/test_gpsr.py b/tests/backends/test_gpsr.py new file mode 100644 index 000000000..f48a59b90 --- /dev/null +++ b/tests/backends/test_gpsr.py @@ -0,0 +1,237 @@ +from __future__ import annotations + +from typing import Callable + +import numpy as np +import pytest +import sympy +import torch +from metrics import GPSR_ACCEPTANCE, PSR_ACCEPTANCE + +from qadence import DifferentiableBackend, DiffMode, Parameter, QuantumCircuit +from qadence.backends.pyqtorch import Backend as PyQBackend +from qadence.blocks import add, chain +from qadence.constructors import total_magnetization +from qadence.operations import CNOT, CRX, CRY, RX, RY, ConstantAnalogRotation, HamEvo, X, Y, Z +from qadence.parameters import ParamMap +from qadence.register import Register +from qadence.transpile import add_interaction + + +def circuit_psr(n_qubits: int) -> QuantumCircuit: + """Helper function to make an example circuit""" + + x = Parameter("x", trainable=False) + theta = Parameter("theta") + + fm = chain(RX(0, 3 * x), RY(1, sympy.exp(x)), RX(0, theta), RY(1, np.pi / 2)) + ansatz = CNOT(0, 1) + block = chain(fm, ansatz) + + circ = QuantumCircuit(n_qubits, block) + + return circ + + +def circuit_gpsr(n_qubits: int) -> QuantumCircuit: + """Helper function to make an example circuit""" + + x = Parameter("x", trainable=False) + theta = Parameter("theta") + + fm = chain( + CRX(0, 1, 3 * x), + X(1), + CRY(1, 2, sympy.exp(x)), + CRX(1, 2, theta), + X(0), + CRY(0, 1, np.pi / 2), + ) + ansatz = CNOT(0, 1) + block = chain(fm, ansatz) + + circ = QuantumCircuit(n_qubits, block) + + return circ + + +def circuit_hamevo_tensor_gpsr(n_qubits: int) -> QuantumCircuit: + """Helper function to make an example circuit""" + + x = Parameter("x", trainable=False) + theta = Parameter("theta") + + h = torch.rand(2**n_qubits, 2**n_qubits) + ham = h + torch.conj(torch.transpose(h, 0, 1)) + ham = ham[None, :, :] + + fm = chain( + CRX(0, 1, 3 * x), + X(1), + CRY(1, 2, sympy.exp(x)), + HamEvo(ham, x, qubit_support=tuple(range(n_qubits))), + CRX(1, 2, theta), + X(0), + CRY(0, 1, np.pi / 2), + ) + ansatz = CNOT(0, 1) + block = chain(fm, ansatz) + + circ = QuantumCircuit(n_qubits, block) + + return circ + + +def circuit_hamevo_block_gpsr(n_qubits: int) -> QuantumCircuit: + """Helper function to make an example circuit""" + + x = Parameter("x", trainable=False) + theta = Parameter("theta") + + dim = np.random.randint(1, n_qubits + 1) + ops = [X, Y, Z] * 2 + qubit_supports = np.random.choice(list(range(dim)), len(ops), replace=True) + generator = chain( + add(*[op(q) for op, q in zip(ops, qubit_supports)]), # type: ignore [abstract] + *[op(q) for op, q in zip(ops, qubit_supports)], # type: ignore [abstract] + ) + generator = generator + generator.dagger() # type: ignore [assignment] + + fm = chain( + CRX(0, 1, 3 * x), + X(1), + CRY(1, 2, sympy.exp(x)), + HamEvo(generator, x, qubit_support=tuple(range(n_qubits))), + CRX(1, 2, theta), + X(0), + CRY(0, 1, np.pi / 2), + ) + ansatz = CNOT(0, 1) + block = chain(fm, ansatz) + + circ = QuantumCircuit(n_qubits, block) + + return circ + + +def circuit_analog_rotation_gpsr(n_qubits: int) -> QuantumCircuit: + d = 10 + omega1 = 6 * np.pi + omega2 = 3 * np.pi + coords = [(x_coord, 0) for x_coord in np.linspace(0, (n_qubits - 1) * d, n_qubits)] + register = Register.from_coordinates(coords) # type: ignore[arg-type] + + # circuit with builting primitives + x = Parameter("x", trainable=False) + theta = Parameter("theta") + analog_block = chain( + ConstantAnalogRotation( + parameters=ParamMap(duration=1000 * x / omega1, omega=omega1, delta=0, phase=0) + ), + ConstantAnalogRotation( + parameters=ParamMap(duration=1000 * theta / omega2, omega=omega2, delta=0, phase=0) + ), + ) + + block = add_interaction(register, analog_block) # type: ignore [arg-type] + circ = QuantumCircuit(n_qubits, block) + + return circ + + +@pytest.mark.parametrize( + ["n_qubits", "batch_size", "n_obs", "circuit_fn"], + [ + (2, 1, 2, circuit_psr), + (5, 10, 1, circuit_psr), + (3, 1, 4, circuit_gpsr), + (5, 10, 1, circuit_gpsr), + (3, 1, 1, circuit_hamevo_tensor_gpsr), + (3, 1, 1, circuit_hamevo_block_gpsr), + (3, 1, 1, circuit_analog_rotation_gpsr), + ], +) +def test_expectation_psr(n_qubits: int, batch_size: int, n_obs: int, circuit_fn: Callable) -> None: + torch.manual_seed(42) + np.random.seed(42) + + # Making circuit with AD + circ = circuit_fn(n_qubits) + obs = total_magnetization(n_qubits) + quantum_backend = PyQBackend() + conv = quantum_backend.convert(circ, [obs for _ in range(n_obs)]) + pyq_circ, pyq_obs, embedding_fn, params = conv + diff_backend = DifferentiableBackend(quantum_backend, diff_mode=DiffMode.AD) + + # Running for some inputs + values = {"x": torch.rand(batch_size, requires_grad=True)} + expval = diff_backend.expectation(pyq_circ, pyq_obs, embedding_fn(params, values)) + dexpval_x = torch.autograd.grad( + expval, values["x"], torch.ones_like(expval), create_graph=True + )[0] + + dexpval_xx = torch.autograd.grad( + dexpval_x, values["x"], torch.ones_like(dexpval_x), create_graph=True + )[0] + if circuit_fn not in [ + circuit_hamevo_tensor_gpsr, + circuit_hamevo_block_gpsr, + circuit_analog_rotation_gpsr, + ]: + dexpval_xxtheta = torch.autograd.grad( + dexpval_xx, + list(params.values())[0], + torch.ones_like(dexpval_xx), + retain_graph=True, + )[0] + dexpval_theta = torch.autograd.grad(expval, list(params.values())[0], torch.ones_like(expval))[ + 0 + ] + + # Now running stuff for (G)PSR + quantum_backend.config._use_gate_params = True + conv = quantum_backend.convert(circ, [obs for _ in range(n_obs)]) + pyq_circ, pyq_obs, embedding_fn, params = conv + if circuit_fn == circuit_analog_rotation_gpsr: + diff_backend = DifferentiableBackend( + quantum_backend, diff_mode=DiffMode.GPSR, shift_prefac=0.2 + ) + else: + diff_backend = DifferentiableBackend( + quantum_backend, diff_mode=DiffMode.GPSR, shift_prefac=0.2 + ) + expval = diff_backend.expectation(pyq_circ, pyq_obs, embedding_fn(params, values)) + dexpval_psr_x = torch.autograd.grad( + expval, values["x"], torch.ones_like(expval), create_graph=True + )[0] + + dexpval_psr_xx = torch.autograd.grad( + dexpval_psr_x, values["x"], torch.ones_like(dexpval_psr_x), create_graph=True + )[0] + if circuit_fn not in [ + circuit_hamevo_tensor_gpsr, + circuit_hamevo_block_gpsr, + circuit_analog_rotation_gpsr, + ]: + dexpval_psr_xxtheta = torch.autograd.grad( + dexpval_psr_xx, + list(params.values())[0], + torch.ones_like(dexpval_psr_xx), + retain_graph=True, + )[0] + dexpval_psr_theta = torch.autograd.grad( + expval, list(params.values())[0], torch.ones_like(expval) + )[0] + + atol = PSR_ACCEPTANCE if circuit_fn == circuit_psr else GPSR_ACCEPTANCE + assert torch.allclose(dexpval_x, dexpval_psr_x, atol=atol), "df/dx not equal." + assert torch.allclose(dexpval_xx, dexpval_psr_xx, atol=atol), " d2f/dx2 not equal." + assert torch.allclose(dexpval_theta, dexpval_psr_theta, atol=atol), "df/dtheta not equal." + if circuit_fn not in [ + circuit_hamevo_tensor_gpsr, + circuit_hamevo_block_gpsr, + circuit_analog_rotation_gpsr, + ]: + assert torch.allclose( + dexpval_xxtheta, dexpval_psr_xxtheta, atol=atol + ), "d3f/dx2dtheta not equal." diff --git a/tests/backends/test_pulser_pyq_compat.py b/tests/backends/test_pulser_pyq_compat.py new file mode 100644 index 000000000..de332ad65 --- /dev/null +++ b/tests/backends/test_pulser_pyq_compat.py @@ -0,0 +1,186 @@ +from __future__ import annotations + +import numpy as np +import pytest +import torch +from metrics import ATOL_DICT, JS_ACCEPTANCE, LARGE_SPACING, SMALL_SPACING # type: ignore + +from qadence import BackendName, Register, add_interaction +from qadence.backends.pulser.devices import Device +from qadence.blocks import AbstractBlock, chain, kron +from qadence.circuit import QuantumCircuit +from qadence.constructors import ising_hamiltonian, total_magnetization +from qadence.divergences import js_divergence +from qadence.models import QuantumModel +from qadence.operations import CNOT, RX, RY, AnalogRX, AnalogRY, H, X, Z, entangle +from qadence.parameters import FeatureParameter +from qadence.types import DiffMode + + +# "Compare" Pulser and PyQ +# NOTE: Since they are use different concepts, here only equivalent +# circuits/pulses are used. +@pytest.mark.parametrize( + "pyqtorch_circuit,pulser_circuit", + [ + # Bell state generation + ( + QuantumCircuit(2, chain(H(0), CNOT(0, 1))), + QuantumCircuit(2, chain(entangle(1000, qubit_support=(0, 1)), RY(0, 3 * torch.pi / 2))), + ) + ], +) +@pytest.mark.flaky(max_runs=5) +def test_compatibility_pyqtorch_pulser_entanglement( + pyqtorch_circuit: QuantumCircuit, pulser_circuit: QuantumCircuit +) -> None: + model_pyqtorch = QuantumModel( + pyqtorch_circuit, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD + ) + config = {"device_type": Device.REALISTIC} + model_pulser = QuantumModel( + pulser_circuit, backend=BackendName.PULSER, diff_mode=DiffMode.GPSR, configuration=config + ) + pyqtorch_samples = model_pyqtorch.sample({}, n_shots=500) + pulser_samples = model_pulser.sample({}, n_shots=500) + for pyqtorch_sample, pulser_sample in zip(pyqtorch_samples, pulser_samples): + assert js_divergence(pyqtorch_sample, pulser_sample) < JS_ACCEPTANCE + + +@pytest.mark.flaky(max_runs=5) +@pytest.mark.parametrize("obs", [Z(0), total_magnetization(2), X(0), ising_hamiltonian(2)]) +def test_compatibility_pyqtorch_pulser_digital_rot(obs: AbstractBlock) -> None: + phi = FeatureParameter("phi") + psi = FeatureParameter("psi") + + n_qubits = 2 + + block = chain( + kron(RX(0, phi), RX(1, phi)), + kron(RY(0, psi), RY(1, psi)), + ) + pyqtorch_circuit = QuantumCircuit(n_qubits, block) + + register = Register.line(n_qubits) + pulser_circuit = QuantumCircuit(register, block) + + model_pyqtorch = QuantumModel( + pyqtorch_circuit, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD, observable=obs + ) + conf = {"spacing": LARGE_SPACING, "amplitude_local": 2 * np.pi} + model_pulser = QuantumModel( + pulser_circuit, + backend=BackendName.PULSER, + observable=obs, + diff_mode=DiffMode.GPSR, + configuration=conf, + ) + + batch_size = 5 + values = { + "phi": torch.rand(batch_size), + "psi": torch.rand(batch_size), + } + + pyqtorch_expval = model_pyqtorch.expectation(values=values) + pulser_expval = model_pulser.expectation(values=values) + + assert torch.allclose(pyqtorch_expval, pulser_expval, atol=ATOL_DICT[BackendName.PULSER]) + + +@pytest.mark.flaky(max_runs=5) +@pytest.mark.parametrize( + "obs", + [ + Z(0), + total_magnetization(2), + X(0), + ising_hamiltonian(2), + ], +) +def test_compatibility_pyqtorch_pulser_analog_rot(obs: AbstractBlock) -> None: + phi = FeatureParameter("phi") + psi = FeatureParameter("psi") + + n_qubits = 2 + + b_digital = chain( + kron(RX(0, phi), RX(1, phi)), + kron(RY(0, psi), RY(1, psi)), + ) + + b_analog = chain(AnalogRX(phi), AnalogRY(psi)) + pyqtorch_circuit = QuantumCircuit(n_qubits, b_digital) + + register = Register.line(n_qubits) + pulser_circuit = QuantumCircuit(register, b_analog) + + model_pyqtorch = QuantumModel( + pyqtorch_circuit, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD, observable=obs + ) + conf = {"spacing": LARGE_SPACING} + model_pulser = QuantumModel( + pulser_circuit, + backend=BackendName.PULSER, + observable=obs, + diff_mode=DiffMode.GPSR, + configuration=conf, + ) + + batch_size = 5 + values = { + "phi": torch.rand(batch_size), + "psi": torch.rand(batch_size), + } + + pyqtorch_expval = model_pyqtorch.expectation(values=values) + pulser_expval = model_pulser.expectation(values=values) + + assert torch.allclose(pyqtorch_expval, pulser_expval, atol=ATOL_DICT[BackendName.PULSER]) + + +@pytest.mark.flaky(max_runs=5) +@pytest.mark.parametrize( + "obs", + [ + Z(0), + total_magnetization(2), + X(0), + ising_hamiltonian(2), + ], +) +def test_compatibility_pyqtorch_pulser_analog_rot_int(obs: AbstractBlock) -> None: + phi = FeatureParameter("phi") + psi = FeatureParameter("psi") + + n_qubits = 2 + register = Register.line(n_qubits) + + b_analog = chain(AnalogRX(phi), AnalogRY(psi)) + pyqtorch_circuit = QuantumCircuit(register, b_analog) + pyqtorch_circuit = add_interaction(pyqtorch_circuit, spacing=SMALL_SPACING) + + pulser_circuit = QuantumCircuit(register, b_analog) + + model_pyqtorch = QuantumModel( + pyqtorch_circuit, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD, observable=obs + ) + conf = {"spacing": SMALL_SPACING} + model_pulser = QuantumModel( + pulser_circuit, + backend=BackendName.PULSER, + diff_mode=DiffMode.GPSR, + observable=obs, + configuration=conf, + ) + + batch_size = 5 + values = { + "phi": torch.rand(batch_size), + "psi": torch.rand(batch_size), + } + + pyqtorch_expval = model_pyqtorch.expectation(values=values) + pulser_expval = model_pulser.expectation(values=values) + + assert torch.allclose(pyqtorch_expval, pulser_expval, atol=ATOL_DICT[BackendName.PULSER]) diff --git a/tests/backends/test_pytorch_wrapper.py b/tests/backends/test_pytorch_wrapper.py new file mode 100644 index 000000000..417808ca9 --- /dev/null +++ b/tests/backends/test_pytorch_wrapper.py @@ -0,0 +1,172 @@ +from __future__ import annotations + +from typing import Callable + +import numpy as np +import pytest +import sympy +import torch + +from qadence.backends.api import backend_factory +from qadence.blocks import AbstractBlock, add, chain, kron +from qadence.circuit import QuantumCircuit +from qadence.operations import CNOT, RX, RZ, Z +from qadence.parameters import Parameter, VariationalParameter + +torch.manual_seed(42) + +expected_pi = torch.tensor([[0.0, 0.0], [1.0, 0.0]]) +expected_pi2 = torch.tensor([[0.5, 0.0], [0.5, 0.0]]) + + +def parametric_circuit(n_qubits: int) -> QuantumCircuit: + """Helper function to make an example circuit""" + + x = Parameter("x", trainable=False) + y = Parameter("y", trainable=False) + + fm = kron(RX(0, 3 * x), RZ(1, sympy.exp(y)), RX(2, 0.5), RZ(3, x)) + ansatz = kron(CNOT(0, 1), CNOT(2, 3)) + rotlayer1 = kron(RX(i, f"w_{i}") for i in range(n_qubits)) + + theta = VariationalParameter("theta") + rotlayer2 = kron(RX(i, 3.0 * theta) for i in range(n_qubits)) + + block = chain(fm, rotlayer1, ansatz, rotlayer2) + + return QuantumCircuit(n_qubits, block) + + +@pytest.mark.parametrize("diff_mode", ["ad", "gpsr"]) +def test_parametrized_rotation(diff_mode: str) -> None: + param = Parameter("theta", trainable=False) + nqubits = 2 + block1 = RX(0, param) + block2 = Z(1) + comp_block = chain(block1, block2) + circ = QuantumCircuit(nqubits, comp_block) + + backend = backend_factory("pyqtorch", diff_mode=diff_mode) + (pyqtorch_circ, _, embed, params) = backend.convert(circ) + + values = {param.name: torch.tensor([np.pi])} + wf = backend.run(pyqtorch_circ, embed(params, values))[0] + + wf_prob = torch.abs(torch.pow(wf, 2)) # type: ignore [arg-type] + assert torch.allclose(wf_prob.reshape(nqubits, nqubits), expected_pi) + + values = {param.name: torch.tensor([np.pi / 2])} + wf = backend.run(pyqtorch_circ, embed(params, values))[0] + wf_prob = torch.abs(torch.pow(wf, 2)) + assert torch.allclose(wf_prob.reshape(nqubits, nqubits), expected_pi2) + + +@pytest.mark.parametrize("diff_mode", ["ad", "gpsr"]) +def test_parametrized_rotation_with_expr(diff_mode: str) -> None: + param = Parameter("theta", trainable=False) + nqubits = 2 + block1 = RX(0, sympy.exp(5 * param)) + block2 = Z(1) + comp_block = chain(block1, block2) + circ = QuantumCircuit(nqubits, comp_block) + + backend = backend_factory("pyqtorch", diff_mode=diff_mode) + (pyqtorch_circ, _, embed, params) = backend.convert(circ) + + angle = np.log(np.pi) / 5 + values = {param.name: torch.tensor([angle])} + wf = backend.run(pyqtorch_circ, embed(params, values))[0] + wf_prob = torch.abs(torch.pow(wf, 2)) # type: ignore [arg-type] + assert torch.allclose(wf_prob.reshape(nqubits, nqubits), expected_pi) + + angle = np.log(np.pi / 2) / 5 + values = {param.name: torch.tensor([angle])} + wf = backend.run(pyqtorch_circ, embed(params, values))[0] + wf_prob = torch.abs(torch.pow(wf, 2)) # type: ignore [arg-type] + assert torch.allclose(wf_prob.reshape(nqubits, nqubits), expected_pi2) + + +def test_embeddings() -> None: + n_qubits = 4 + circ = parametric_circuit(n_qubits) + backend = backend_factory("pyqtorch", diff_mode="ad") + (_, _, embed, params) = backend.convert(circ) + + batch_size = 5 + + inputs = {"x": torch.ones(batch_size), "y": torch.rand(batch_size)} + low_level_params = embed(params, inputs) + + assert len(list(low_level_params.keys())) == 9 + + assert [v for k, v in low_level_params.items() if k.startswith("fix_")][0] == 0.5 + assert torch.allclose(low_level_params["3*x"], 3 * inputs["x"]) + assert torch.allclose(low_level_params["x"], inputs["x"]) + assert torch.allclose(low_level_params["exp(y)"], torch.exp(inputs["y"])) + + with pytest.raises(KeyError): + embed(params, {"x": torch.ones(batch_size)}) + + +@pytest.mark.parametrize( + "batch_size", + [ + 1, + pytest.param( + "2", + marks=pytest.mark.xfail( + reason="Batch_size and n_obs > 1 should be made consistent." # FIXME + ), + ), + ], +) +@pytest.mark.parametrize( + "diff_mode", + [ + "ad", + pytest.param( + "gpsr", + marks=pytest.mark.xfail(reason="PSR cannot be applied to parametric observable."), + ), + ], +) +def test_expval_differentiation(batch_size: int, diff_mode: str) -> None: + torch.manual_seed(42) + n_qubits = 4 + observable: list[AbstractBlock] = [add(Z(i) * Parameter(f"o_{i}") for i in range(n_qubits))] + n_obs = len(observable) + circ = parametric_circuit(n_qubits) + + ad_backend = backend_factory(backend="pyqtorch", diff_mode=diff_mode) + pyqtorch_circ, pyqtorch_obs, embeddings_fn, params = ad_backend.convert(circ, observable) + + inputs_x = torch.rand(batch_size, requires_grad=True) + inputs_y = torch.rand(batch_size, requires_grad=True) + param_w = torch.rand(1, requires_grad=True) + + def func(x: torch.Tensor, y: torch.Tensor, w: torch.Tensor) -> torch.Tensor: + # FIXME: add a parameter from a parametric observable + inputs = {"x": x, "y": y} + params["o_1"] = w + all_params = embeddings_fn(params, inputs) + return ad_backend.expectation(pyqtorch_circ, pyqtorch_obs, all_params) + + expval = func(inputs_x, inputs_y, param_w) + # if expval.numel() > 1: + # assert expval.shape == (batch_size, n_obs) + + # FIXME: higher order + torch.autograd.gradcheck(func, (inputs_x, inputs_y, param_w)) + + def finitediff(f: Callable, x: torch.Tensor, eps: float = 1e-4) -> torch.Tensor: + return (f(x + eps) - f(x - eps)) / (2 * eps) # type: ignore + + assert torch.allclose( + finitediff(lambda x: func(x, inputs_y, param_w), inputs_x), + torch.autograd.grad(expval, inputs_x, torch.ones_like(expval), create_graph=True)[0], + ) + + assert torch.allclose( + finitediff(lambda w: func(inputs_x, inputs_y, w), param_w), + torch.autograd.grad(expval, param_w, torch.ones_like(expval), create_graph=True)[0], + ) diff --git a/tests/backends/test_utils.py b/tests/backends/test_utils.py new file mode 100644 index 000000000..db966c0b0 --- /dev/null +++ b/tests/backends/test_utils.py @@ -0,0 +1,24 @@ +from __future__ import annotations + +from collections import Counter + +import pytest +import torch +from torch import Tensor + +from qadence.backends.utils import count_bitstrings + + +@pytest.mark.parametrize( + "sample, counter", + [ + ( + torch.tensor( + [[1, 1], [0, 0], [1, 1], [1, 0], [1, 1], [0, 1], [1, 1], [1, 0], [1, 0], [0, 1]] + ), + Counter({"11": 4, "01": 2, "10": 3, "00": 1}), + ) + ], +) +def test_count_bitstring(sample: Tensor, counter: Counter) -> None: + assert count_bitstrings(sample) == counter diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 000000000..6844ec835 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,210 @@ +from __future__ import annotations + +import networkx as nx +import numpy as np +import torch +import torch.nn as nn +from openfermion import QubitOperator +from pytest import fixture # type: ignore +from sympy import Expr + +from qadence import BackendName, DiffMode +from qadence.blocks import AbstractBlock, chain, kron +from qadence.blocks.utils import unroll_block_with_scaling +from qadence.circuit import QuantumCircuit +from qadence.constructors import feature_map, hea, total_magnetization +from qadence.ml_tools.models import TransformedModule +from qadence.models import QNN, QuantumModel +from qadence.operations import CNOT, RX, RY, X, Y, Z +from qadence.parameters import Parameter +from qadence.register import Register + +BASIC_NQUBITS = 4 +FM_NQUBITS = 2 + + +@fixture +def BasicFeatureMap() -> AbstractBlock: + return feature_map(BASIC_NQUBITS) + + +@fixture +def BasicAnsatz() -> AbstractBlock: + return hea(BASIC_NQUBITS, BASIC_NQUBITS) + + +@fixture +def BasicQuantumCircuit(BasicAnsatz: AbstractBlock) -> QuantumCircuit: + return QuantumCircuit(BASIC_NQUBITS, BasicAnsatz) + + +@fixture +def BasicFMQuantumCircuit() -> QuantumCircuit: + return QuantumCircuit(FM_NQUBITS, feature_map(FM_NQUBITS), hea(FM_NQUBITS, FM_NQUBITS * 4)) + + +@fixture +def BasicObservable() -> AbstractBlock: + return total_magnetization(BASIC_NQUBITS) + + +@fixture +def BasicRegister() -> Register: + n_qubits = 4 + graph = nx.Graph() + graph.add_nodes_from({i: (i, 0) for i in range(n_qubits)}) + graph.add_edge(0, 1) + return Register(graph) + + +@fixture +def BasicExpression() -> Expr: + return Parameter("x") + Parameter("y", trainable=False) * 2.0212 + + +class BasicNetwork(nn.Module): + def __init__(self, n_neurons: int = 5) -> None: + super().__init__() + network = [ + nn.Linear(1, n_neurons), + nn.ReLU(), + nn.Linear(n_neurons, n_neurons), + nn.ReLU(), + nn.Linear(n_neurons, 1), + ] + self.network = nn.Sequential(*network) + self.n_neurons = n_neurons + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.network(x) + + +class BasicNetworkNoInput(nn.Module): + def __init__(self) -> None: + super().__init__() + self.x = nn.Parameter(torch.tensor([1.0])) + self.scale = nn.Parameter(torch.tensor([1.0])) + + def forward(self) -> torch.Tensor: + res = self.scale * (self.x - 2.0) ** 2 + return res + + +@fixture +def parametric_circuit() -> QuantumCircuit: + nqubits = 4 + x = Parameter("x", trainable=False) + + block1 = RY(0, 3 * x) + block2 = RX(1, "theta1") + block3 = RX(2, "theta2") + block4 = RX(3, "theta3") + block5 = RY(0, np.pi) + block6 = RX(1, np.pi) + block7 = CNOT(2, 3) + + comp_block = chain( + *[ + kron(*[X(0), X(1), Z(2), Z(3)]), + kron(*[block1, block2, block3, block4]), + kron(*[block5, block6, block7]), + ] + ) + + return QuantumCircuit(nqubits, comp_block) + + +@fixture +def duplicate_expression_circuit() -> QuantumCircuit: + nqubits = BASIC_NQUBITS + x = Parameter("x", trainable=False) + + fm = chain(RY(i, 3 * x) for i in range(nqubits)) + expr = Parameter("theta_0") * Parameter("theta_1") + Parameter("theta_2") + rotblock = chain(RX(i, expr) for i in range(nqubits)) + + comp_block = chain( + *[ + chain(*[X(0), X(1), Z(2), Z(3)]), + chain(*[fm, rotblock]), + ] + ) + + return QuantumCircuit(nqubits, comp_block) + + +@fixture +def cost_operator() -> QubitOperator: + nqubits = BASIC_NQUBITS + operator = QubitOperator() + + for qubit in range(nqubits): + operator += QubitOperator(f"Z{qubit}", coefficient=1.0) + + return operator + + +@fixture +def Basic() -> nn.Module: + return BasicNetwork() + + +@fixture +def BasicNoInput() -> nn.Module: + return BasicNetworkNoInput() + + +@fixture +def simple_circuit() -> QuantumCircuit: + kron_block = kron(X(0), X(1)) + return QuantumCircuit(BASIC_NQUBITS, kron_block) + + +@fixture +def observable() -> AbstractBlock: + return kron(X(0), Z(2)) + 1.5 * kron(Y(1), Z(2)) + + +@fixture +def pauli_decomposition(observable: AbstractBlock) -> list: + return list(unroll_block_with_scaling(observable)) + + +@fixture +def expected_rotated_circuit() -> list[QuantumCircuit]: + layer = X(0) ^ X(1) + final_layer1 = chain(layer, RY(0, -np.pi / 2.0)) + final_layer2 = chain(layer, RX(1, np.pi / 2.0)) + return [QuantumCircuit(2, final_layer1), QuantumCircuit(2, final_layer2)] + + +@fixture +def BasicQuantumModel( + BasicQuantumCircuit: QuantumCircuit, BasicObservable: AbstractBlock +) -> QuantumModel: + return QuantumModel( + BasicQuantumCircuit, BasicObservable, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD + ) + + +@fixture +def BasicQNN(BasicFMQuantumCircuit: QuantumCircuit, BasicObservable: AbstractBlock) -> QNN: + return QNN( + BasicFMQuantumCircuit, + total_magnetization(FM_NQUBITS), + backend=BackendName.PYQTORCH, + diff_mode=DiffMode.AD, + ) + + +@fixture +def BasicTransformedModule(BasicQNN: QNN) -> TransformedModule: + return TransformedModule( + BasicQNN, + None, + None, + input_scaling=1.0, + output_scaling=1.0, + input_shifting=0.0, + output_shifting=0.0, + ) diff --git a/tests/constructors/test_ansatz.py b/tests/constructors/test_ansatz.py new file mode 100644 index 000000000..af3b1d1db --- /dev/null +++ b/tests/constructors/test_ansatz.py @@ -0,0 +1,98 @@ +from __future__ import annotations + +import pytest +import torch + +from qadence import ( + CNOT, + CRX, + RX, + RZ, + Interaction, + QuantumCircuit, + QuantumModel, + VariationalParameter, + Z, + chain, + hamiltonian_factory, + hea, + kron, +) +from qadence.blocks import AbstractBlock, has_duplicate_vparams +from qadence.types import Strategy + + +@pytest.mark.parametrize("n_qubits", [2, 3]) +@pytest.mark.parametrize("depth", [2, 3]) +@pytest.mark.parametrize("entangler", [CNOT, CRX]) +def test_hea_duplicate_params(n_qubits: int, depth: int, entangler: AbstractBlock) -> None: + """Tests that HEAs are initialized with correct parameter namings.""" + common_params = { + "n_qubits": n_qubits, + "depth": depth, + "operations": [RZ, RX, RZ], + "entangler": entangler, + } + hea1 = hea(n_qubits=n_qubits, depth=depth, operations=[RZ, RX, RZ], entangler=entangler) + hea2 = hea(n_qubits=n_qubits, depth=depth, operations=[RZ, RX, RZ], entangler=entangler) + block1 = chain(hea1, hea2) + assert has_duplicate_vparams(block1) + hea1 = hea( + n_qubits=n_qubits, + depth=depth, + operations=[RZ, RX, RZ], + entangler=entangler, + param_prefix="0", + ) + hea2 = hea( + n_qubits=n_qubits, + depth=depth, + operations=[RZ, RX, RZ], + entangler=entangler, + param_prefix="1", + ) + block2 = chain(hea1, hea2) + assert not has_duplicate_vparams(block2) + + +@pytest.mark.parametrize("n_qubits", [2, 3]) +@pytest.mark.parametrize("depth", [2, 3]) +@pytest.mark.parametrize("hamiltonian", ["fixed_global", "parametric_local"]) +def test_hea_sDAQC(n_qubits: int, depth: int, hamiltonian: str) -> None: + if hamiltonian == "fixed_global": + entangler = hamiltonian_factory(n_qubits, interaction=Interaction.NN) + if hamiltonian == "parametric_local": + x = VariationalParameter("x") + entangler = x * kron(Z(0), Z(1)) + hea1 = hea( + n_qubits=n_qubits, + depth=depth, + operations=[RZ, RX, RZ], + entangler=entangler, + strategy=Strategy.SDAQC, + ) + # Variational parameters in the digital-analog entangler + # are not created automatically by the hea function, but + # by passing them in the entangler. Thus for depth larger + # than 1 we do get duplicate vparams: + if hamiltonian == "fixed_global": + assert not has_duplicate_vparams(hea1) + if hamiltonian == "parametric_local": + assert has_duplicate_vparams(hea1) + + +@pytest.mark.parametrize("n_qubits", [2, 5]) +@pytest.mark.parametrize("depth", [2, 4]) +@pytest.mark.parametrize("strategy", [Strategy.DIGITAL, Strategy.SDAQC]) +def test_hea_forward(n_qubits: int, depth: int, strategy: Strategy) -> None: + hea1 = hea( + n_qubits=n_qubits, + depth=depth, + operations=[RZ, RX, RZ], + strategy=strategy, + ) + circuit = QuantumCircuit(n_qubits, hea1) + model = QuantumModel(circuit) + + wf = model.run({}) + assert wf.shape == torch.Size([1, 2**n_qubits]) diff --git a/tests/constructors/test_daqc.py b/tests/constructors/test_daqc.py new file mode 100644 index 000000000..b33e0a19e --- /dev/null +++ b/tests/constructors/test_daqc.py @@ -0,0 +1,105 @@ +from __future__ import annotations + +import pytest +from metrics import ATOL_64 + +from qadence import ( + HamEvo, + QuantumCircuit, + QuantumModel, + Register, + daqc_transform, + hamiltonian_factory, + random_state, +) +from qadence.states import equivalent_state +from qadence.types import Interaction + + +@pytest.mark.parametrize("n_qubits", [2, 3, 5]) +@pytest.mark.parametrize("t_f", [0.1, 10]) +@pytest.mark.parametrize( + "int_build, int_target", + [ + (Interaction.ZZ, Interaction.ZZ), + (Interaction.ZZ, Interaction.NN), + (Interaction.NN, Interaction.ZZ), + (Interaction.NN, Interaction.NN), + ], +) +def test_daqc_ising( + n_qubits: int, + t_f: float, + int_build: Interaction, + int_target: Interaction, +) -> None: + """ + Tests that the DAQC transformation works for + a random target and build hamiltonian. + """ + gen_build = hamiltonian_factory(n_qubits, interaction=int_build, random_strength=True) + gen_target = hamiltonian_factory(n_qubits, interaction=int_target, random_strength=True) + + transformed_circuit = daqc_transform( + n_qubits=n_qubits, + gen_target=gen_target, + t_f=t_f, + gen_build=gen_build, + ) + + circuit_daqc = QuantumCircuit(n_qubits, transformed_circuit) + circuit_digital_block = QuantumCircuit(n_qubits, HamEvo(gen_target, t_f)) + model_digital = QuantumModel(circuit_digital_block) + model_analog = QuantumModel(circuit_daqc) + + wf_init = random_state(n_qubits) + + wf_digital = model_digital.run(values={}, state=wf_init) + wf_analog = model_analog.run(values={}, state=wf_init) + + assert equivalent_state(wf_digital, wf_analog, atol=10 * t_f * ATOL_64) + + +@pytest.mark.parametrize("n_qubits", [2, 3, 5]) +@pytest.mark.parametrize("t_f", [0.1, 10]) +@pytest.mark.parametrize( + "int_build, int_target", + [ + (Interaction.ZZ, Interaction.ZZ), + (Interaction.ZZ, Interaction.NN), + (Interaction.NN, Interaction.ZZ), + (Interaction.NN, Interaction.NN), + ], +) +def test_daqc_local( + n_qubits: int, + t_f: float, + int_build: Interaction, + int_target: Interaction, +) -> None: + """ + Tests that the DAQC transformation works for a local + target hamiltonian using a global random one. + """ + gen_build = hamiltonian_factory(n_qubits, interaction=int_build, random_strength=True) + register_target = Register.line(2) + gen_target = hamiltonian_factory(register_target, interaction=int_target, random_strength=True) + + transformed_circuit = daqc_transform( + n_qubits=n_qubits, + gen_target=gen_target, + t_f=t_f, + gen_build=gen_build, + ) + + circuit_daqc = QuantumCircuit(n_qubits, transformed_circuit) + circuit_digital_block = QuantumCircuit(n_qubits, HamEvo(gen_target, t_f)) + + model_digital = QuantumModel(circuit_digital_block) + model_analog = QuantumModel(circuit_daqc) + + wf_init = random_state(n_qubits) + wf_digital = model_digital.run(values={}, state=wf_init) + wf_analog = model_analog.run(values={}, state=wf_init) + + assert equivalent_state(wf_digital, wf_analog, atol=10 * t_f * ATOL_64) diff --git a/tests/constructors/test_hamiltonians.py b/tests/constructors/test_hamiltonians.py new file mode 100644 index 000000000..d34157352 --- /dev/null +++ b/tests/constructors/test_hamiltonians.py @@ -0,0 +1,91 @@ +from __future__ import annotations + +import networkx as nx +import pytest +import torch + +from qadence import ( + Interaction, + N, + Register, + X, + Y, + Z, + hamiltonian_factory, +) +from qadence.blocks.utils import block_is_qubit_hamiltonian + + +@pytest.mark.parametrize( + "interaction", [None, Interaction.ZZ, Interaction.NN, Interaction.XY, Interaction.XYZ] +) +@pytest.mark.parametrize("detuning", [None, X, Y, Z, N]) +@pytest.mark.parametrize("strength_type", ["none", "parameter", "numeric", "random"]) +def test_hamiltonian_factory_creation( + interaction: Interaction | None, + detuning: type[N] | type[X] | type[Z] | type[Y] | None, + strength_type: str, +) -> None: + n_qubits = 5 + + if (interaction is None) and (detuning is None): + pass + else: + detuning_strength = None + interaction_strength = None + random_strength = False + if strength_type == "parameter": + detuning_strength = "x" + interaction_strength = "y" + elif strength_type == "numeric": + detuning_strength = torch.rand(n_qubits) + interaction_strength = torch.rand(int(0.5 * n_qubits * (n_qubits - 1))) + elif strength_type == "random": + random_strength = True + + hamilt = hamiltonian_factory( + n_qubits, + interaction=interaction, + detuning=detuning, + detuning_strength=detuning_strength, + interaction_strength=interaction_strength, + random_strength=random_strength, + ) + + assert block_is_qubit_hamiltonian(hamilt) + + +@pytest.mark.parametrize( + "register", + [ + "graph", + Register(4), + Register.line(4), + Register.circle(8), + Register.square(4), + Register.rectangular_lattice(2, 3), + Register.triangular_lattice(1, 3), + Register.honeycomb_lattice(1, 3), + Register.from_coordinates([(0, 1), (0, 2), (0, 3), (1, 3)]), + ], +) +@pytest.mark.parametrize("interaction", [Interaction.NN, Interaction.XY]) +@pytest.mark.parametrize("detuning", [Y, Z]) +def test_hamiltonian_factory_register( + register: Register | str, + interaction: Interaction | None, + detuning: type[N] | type[X] | type[Z] | type[Y] | None, +) -> None: + if register == "graph": + graph = nx.Graph() + graph.add_edge(0, 1) + register = Register(graph) + + hamilt = hamiltonian_factory( + register, # type: ignore [arg-type] + interaction=interaction, + detuning=detuning, + random_strength=True, + ) + + assert block_is_qubit_hamiltonian(hamilt) diff --git a/tests/constructors/test_qft.py b/tests/constructors/test_qft.py new file mode 100644 index 000000000..2d9b9e15f --- /dev/null +++ b/tests/constructors/test_qft.py @@ -0,0 +1,103 @@ +from __future__ import annotations + +import pytest +import torch +from metrics import ATOL_64 + +from qadence import ( + BackendName, + Interaction, + QuantumCircuit, + QuantumModel, + hamiltonian_factory, + qft, + random_state, +) +from qadence.states import equivalent_state +from qadence.types import Strategy + + +def test_qft() -> None: + def qft_matrix(N: int) -> torch.Tensor: + """Textbook QFT unitary matrix to compare to the circuit solution""" + matrix = torch.zeros((N, N), dtype=torch.cdouble) + w = torch.exp(torch.tensor(2.0j * torch.pi / N, dtype=torch.cdouble)) + for i in range(N): + for j in range(N): + matrix[i, j] = (N ** (-1 / 2)) * w ** (i * j) + return matrix + + n_qubits = 2 + + # First tests that the qft_matrix function is correct for 2-qubits + qft_m_2q = (1 / 2) * torch.tensor( + [ + [1.0 + 0.0j, 1.0 + 0.0j, 1.0 + 0.0j, 1.0 + 0.0j], + [1.0 + 0.0j, 0.0 + 1.0j, -1.0 + 0.0j, 0.0 - 1.0j], + [1.0 + 0.0j, -1.0 + 0.0j, 1.0 + 0.0j, -1.0 + 0.0j], + [1.0 + 0.0j, 0.0 - 1.0j, -1.0 + 0.0j, 0.0 + 1.0j], + ], + dtype=torch.cdouble, + ) + + assert torch.allclose(qft_m_2q, qft_matrix(n_qubits**2), rtol=0.0, atol=ATOL_64) + + # Now loads larger random initial state + n_qubits = 5 + + wf_init = random_state(n_qubits) + + # Runs QFT circuit with swaps to match standard QFT definition + qc_qft = QuantumCircuit(n_qubits, qft(n_qubits, swaps_out=True, strategy=Strategy.DIGITAL)) + model = QuantumModel(qc_qft, backend=BackendName.PYQTORCH) + wf_qft = model.run(values={}, state=wf_init) + + # Checks output with the textbook matrix + wf_textbook = torch.matmul(qft_matrix(2**n_qubits), wf_init[0]) + + assert equivalent_state(wf_qft, wf_textbook.unsqueeze(0), atol=10 * ATOL_64) + + +def test_qft_inverse() -> None: + """Tests that applying qft -> inverse qft returns the initial state.""" + n_qubits = 4 + wf_init = random_state(n_qubits) + qc_qft = QuantumCircuit(n_qubits, qft(n_qubits)) + qc_qft_inv = QuantumCircuit(n_qubits, qft(n_qubits, inverse=True)) + model = QuantumModel(qc_qft, backend=BackendName.PYQTORCH) + model_inv = QuantumModel(qc_qft_inv, backend=BackendName.PYQTORCH) + wf_1 = model.run(values={}, state=wf_init) + wf_2 = model_inv.run(values={}, state=wf_1) + assert equivalent_state(wf_2, wf_init, atol=ATOL_64) + + +@pytest.mark.parametrize( + "param_dict", + [ + {"inverse": False, "reverse_in": False, "swaps_out": False}, + {"inverse": True, "reverse_in": True, "swaps_out": True}, + ], +) +@pytest.mark.parametrize("n_qubits", [1, 2, 3]) +def test_qft_digital_analog(n_qubits: int, param_dict: dict) -> None: + """Tests that the digital and digital-analog qfts return the same result.""" + qc_qft_digital = QuantumCircuit( + n_qubits, qft(n_qubits, strategy=Strategy.DIGITAL, **param_dict) + ) + + qft_analog_block = hamiltonian_factory( + n_qubits, interaction=Interaction.NN, random_strength=True + ) + + qc_qft_digital_analog = QuantumCircuit( + n_qubits, + qft(n_qubits, strategy=Strategy.SDAQC, gen_build=qft_analog_block, **param_dict), + ) + model_digital = QuantumModel(qc_qft_digital) + model_analog = QuantumModel(qc_qft_digital_analog) + + wf_init = random_state(n_qubits) + wf_digital = model_digital.run(values={}, state=wf_init) + wf_analog = model_analog.run(values={}, state=wf_init) + + assert equivalent_state(wf_digital, wf_analog, atol=ATOL_64) diff --git a/tests/metrics.py b/tests/metrics.py new file mode 100644 index 000000000..b4b70e68e --- /dev/null +++ b/tests/metrics.py @@ -0,0 +1,24 @@ +from __future__ import annotations + +from qadence import BackendName + +ATOL_64 = 1e-14 # 64 bit precision +ATOL_32 = 1e-07 # 32 bit precision +ATOL_E6 = 1e-06 # some tests do not pass ATOL_32; to fix +LOW_ACCEPTANCE = 2.0e-2 +MIDDLE_ACCEPTANCE = 5.0e-2 +HIGH_ACCEPTANCE = 0.5 +JS_ACCEPTANCE = 7.5e-2 +PSR_ACCEPTANCE = 1e-5 +GPSR_ACCEPTANCE = 1e-1 +PULSER_GPSR_ACCEPTANCE = 6.0e-2 +ATOL_DICT = { + BackendName.PYQTORCH: 1e-06, + BackendName.PULSER: 1e-02, + BackendName.BRAKET: 1e-02, +} +MAX_COUNT_DIFF = 20 +SMALL_SPACING = 7.0 +LARGE_SPACING = 30.0 +DIGITAL_DECOMP_ACCEPTANCE_HIGH = 1e-2 +DIGITAL_DECOMP_ACCEPTANCE_LOW = 1e-3 diff --git a/tests/ml_tools/test_checkpointing.py b/tests/ml_tools/test_checkpointing.py new file mode 100644 index 000000000..00c2ba930 --- /dev/null +++ b/tests/ml_tools/test_checkpointing.py @@ -0,0 +1,211 @@ +from __future__ import annotations + +import os +from itertools import count +from pathlib import Path + +import torch +from torch.utils.data import DataLoader, TensorDataset + +from qadence.ml_tools import ( + TrainConfig, + load_checkpoint, + train_with_grad, + write_checkpoint, +) +from qadence.ml_tools.models import TransformedModule +from qadence.ml_tools.parameters import get_parameters, set_parameters +from qadence.ml_tools.utils import rand_featureparameters +from qadence.models import QNN, QuantumModel + + +def dataloader() -> DataLoader: + batch_size = 25 + x = torch.linspace(0, 1, batch_size).reshape(-1, 1) + y = torch.cos(x) + + dataset = TensorDataset(x, y) + return DataLoader(dataset, batch_size=batch_size) + + +def test_basic_save_load_ckpts(Basic: torch.nn.Module, tmp_path: Path) -> None: + data = dataloader() + model = Basic + cnt = count() + criterion = torch.nn.MSELoss() + optimizer = torch.optim.Adam(model.parameters(), lr=0.1) + + def loss_fn(model: torch.nn.Module, data: torch.Tensor) -> tuple[torch.Tensor, dict]: + next(cnt) + x, y = data[0], data[1] + out = model(x) + loss = criterion(out, y) + return loss, {} + + config = TrainConfig(folder=tmp_path, max_iter=1, checkpoint_every=1, write_every=1) + train_with_grad(model, data, optimizer, config, loss_fn=loss_fn) + set_parameters(model, torch.ones(len(get_parameters(model)))) + write_checkpoint(tmp_path, model, optimizer, 1) + # check that saved model has ones + load_checkpoint(tmp_path, model, optimizer) + ps = get_parameters(model) + assert torch.allclose(ps, torch.ones(len(ps))) + + +def test_random_basicqQM_save_load_ckpts(BasicQuantumModel: QuantumModel, tmp_path: Path) -> None: + data = dataloader() + model = BasicQuantumModel + cnt = count() + criterion = torch.nn.MSELoss() + optimizer = torch.optim.Adam(model.parameters(), lr=0.1) + + def loss_fn(model: QuantumModel, data: torch.Tensor) -> tuple[torch.Tensor, dict]: + next(cnt) + out = model.expectation({}) + loss = criterion(out, torch.rand(1)) + return loss, {} + + config = TrainConfig(folder=tmp_path, max_iter=10, checkpoint_every=1, write_every=1) + train_with_grad(model, data, optimizer, config, loss_fn=loss_fn) + load_checkpoint(tmp_path, model, optimizer) + assert not torch.all(torch.isnan(model.expectation({}))) + loaded_model, optimizer, _ = load_checkpoint( + tmp_path, + BasicQuantumModel, + optimizer, + "model_QuantumModel_ckpt_009.pt", + "opt_Adam_ckpt_006.pt", + ) + assert torch.allclose(loaded_model.expectation({}), model.expectation({})) + + +def test_check_ckpts_exist(BasicQuantumModel: QuantumModel, tmp_path: Path) -> None: + data = dataloader() + model = BasicQuantumModel + cnt = count() + criterion = torch.nn.MSELoss() + optimizer = torch.optim.Adam(model.parameters(), lr=0.1) + + def loss_fn(model: QuantumModel, data: torch.Tensor) -> tuple[torch.Tensor, dict]: + next(cnt) + out = model.expectation({}) + loss = criterion(out, torch.rand(1)) + return loss, {} + + config = TrainConfig(folder=tmp_path, max_iter=10, checkpoint_every=1, write_every=1) + train_with_grad(model, data, optimizer, config, loss_fn=loss_fn) + ckpts = [tmp_path / Path(f"model_QuantumModel_ckpt_00{i}.pt") for i in range(1, 9)] + assert all(os.path.isfile(ckpt) for ckpt in ckpts) + for ckpt in ckpts: + loaded_model, optimizer, _ = load_checkpoint( + tmp_path, BasicQuantumModel, optimizer, ckpt, "" + ) + assert torch.allclose(loaded_model.expectation({}), model.expectation({})) + + +def test_random_basicqQNN_save_load_ckpts(BasicQNN: QNN, tmp_path: Path) -> None: + data = dataloader() + model = BasicQNN + cnt = count() + criterion = torch.nn.MSELoss() + optimizer = torch.optim.Adam(model.parameters(), lr=0.1) + inputs = rand_featureparameters(model, 1) + + def loss_fn(model: QuantumModel, data: torch.Tensor) -> tuple[torch.Tensor, dict]: + next(cnt) + out = model.expectation(inputs) + loss = criterion(out, torch.rand(1)) + return loss, {} + + config = TrainConfig(folder=tmp_path, max_iter=10, checkpoint_every=1, write_every=1) + train_with_grad(model, data, optimizer, config, loss_fn=loss_fn) + load_checkpoint(tmp_path, model, optimizer) + assert not torch.all(torch.isnan(model.expectation(inputs))) + loaded_model, optimizer, _ = load_checkpoint( + tmp_path, + BasicQNN, + optimizer, + "model_QNN_ckpt_009.pt", + "opt_Adam_ckpt_006.pt", + ) + assert torch.allclose(loaded_model.expectation(inputs), model.expectation(inputs)) + + +def test_check_QNN_ckpts_exist(BasicQNN: QNN, tmp_path: Path) -> None: + data = dataloader() + model = BasicQNN + cnt = count() + criterion = torch.nn.MSELoss() + optimizer = torch.optim.Adam(model.parameters(), lr=0.1) + inputs = rand_featureparameters(model, 1) + + def loss_fn(model: QuantumModel, data: torch.Tensor) -> tuple[torch.Tensor, dict]: + next(cnt) + out = model.expectation(inputs) + loss = criterion(out, torch.rand(1)) + return loss, {} + + config = TrainConfig(folder=tmp_path, max_iter=10, checkpoint_every=1, write_every=1) + train_with_grad(model, data, optimizer, config, loss_fn=loss_fn) + ckpts = [tmp_path / Path(f"model_QNN_ckpt_00{i}.pt") for i in range(1, 9)] + assert all(os.path.isfile(ckpt) for ckpt in ckpts) + for ckpt in ckpts: + loaded_model, optimizer, _ = load_checkpoint(tmp_path, BasicQNN, optimizer, ckpt, "") + assert torch.allclose(loaded_model.expectation(inputs), model.expectation(inputs)) + + +def test_random_basicqtransformedmodule_save_load_ckpts( + BasicTransformedModule: TransformedModule, tmp_path: Path +) -> None: + data = dataloader() + model = BasicTransformedModule + cnt = count() + criterion = torch.nn.MSELoss() + optimizer = torch.optim.Adam(model.parameters(), lr=0.1) + inputs = rand_featureparameters(model, 1) + + def loss_fn(model: QuantumModel, data: torch.Tensor) -> tuple[torch.Tensor, dict]: + next(cnt) + out = model.expectation(inputs) + loss = criterion(out, torch.rand(1)) + return loss, {} + + config = TrainConfig(folder=tmp_path, max_iter=10, checkpoint_every=1, write_every=1) + train_with_grad(model, data, optimizer, config, loss_fn=loss_fn) + load_checkpoint(tmp_path, model, optimizer) + assert not torch.all(torch.isnan(model.expectation(inputs))) + loaded_model, optimizer, _ = load_checkpoint( + tmp_path, + BasicTransformedModule, + optimizer, + "model_TransformedModule_ckpt_009.pt", + "opt_Adam_ckpt_006.pt", + ) + assert torch.allclose(loaded_model.expectation(inputs), model.expectation(inputs)) + + +def test_check_transformedmodule_ckpts_exist( + BasicTransformedModule: TransformedModule, tmp_path: Path +) -> None: + data = dataloader() + model = BasicTransformedModule + cnt = count() + criterion = torch.nn.MSELoss() + optimizer = torch.optim.Adam(model.parameters(), lr=0.1) + inputs = rand_featureparameters(model, 1) + + def loss_fn(model: QuantumModel, data: torch.Tensor) -> tuple[torch.Tensor, dict]: + next(cnt) + out = model.expectation(inputs) + loss = criterion(out, torch.rand(1)) + return loss, {} + + config = TrainConfig(folder=tmp_path, max_iter=10, checkpoint_every=1, write_every=1) + train_with_grad(model, data, optimizer, config, loss_fn=loss_fn) + ckpts = [tmp_path / Path(f"model_TransformedModule_ckpt_00{i}.pt") for i in range(1, 9)] + assert all(os.path.isfile(ckpt) for ckpt in ckpts) + for ckpt in ckpts: + loaded_model, optimizer, _ = load_checkpoint( + tmp_path, BasicTransformedModule, optimizer, ckpt, "" + ) + assert torch.allclose(loaded_model.expectation(inputs), model.expectation(inputs)) diff --git a/tests/ml_tools/test_model_parameters.py b/tests/ml_tools/test_model_parameters.py new file mode 100644 index 000000000..f13475ec1 --- /dev/null +++ b/tests/ml_tools/test_model_parameters.py @@ -0,0 +1,43 @@ +from __future__ import annotations + +import torch + +from qadence import BackendName, DiffMode, QuantumCircuit +from qadence.constructors import feature_map, hea, total_magnetization +from qadence.ml_tools.parameters import get_parameters, num_parameters, set_parameters +from qadence.models import QNN + + +def test_get_parameters(Basic: torch.nn.Module) -> None: + # verify that parameters have expected length + model = Basic + ps = get_parameters(model) + assert len(ps) == model.n_neurons * 4 * 2 + 2 * 3 + assert len(ps) == num_parameters(model) + + +def test_get_parameters_qnn() -> None: + # verify that parameters have expected length (exluding fix/non-trainable params) + n_qubits, depth = 2, 4 + fm = feature_map(n_qubits) + ansatz = hea(n_qubits=n_qubits, depth=depth) + circuit = QuantumCircuit(n_qubits, fm, ansatz) + obs = total_magnetization(n_qubits) + + # initialize and use the model + model = QNN(circuit, obs, diff_mode=DiffMode.AD, backend=BackendName.PYQTORCH) + ps = get_parameters(model) + assert len(ps) == 6 * 4 + + +def test_set_parameters_qnn() -> None: + # make sure that only variational parameters are set + n_qubits, depth = 2, 4 + fm = feature_map(n_qubits) + ansatz = hea(n_qubits=n_qubits, depth=depth) + circuit = QuantumCircuit(n_qubits, fm, ansatz) + obs = total_magnetization(n_qubits) + + # initialize and use the model + model = QNN(circuit, obs, diff_mode=DiffMode.AD, backend=BackendName.PYQTORCH) + set_parameters(model, torch.rand(6 * 4)) diff --git a/tests/ml_tools/test_tensors.py b/tests/ml_tools/test_tensors.py new file mode 100644 index 000000000..bf91e3409 --- /dev/null +++ b/tests/ml_tools/test_tensors.py @@ -0,0 +1,47 @@ +from __future__ import annotations + +import numpy as np +import pytest +import torch + +from qadence.ml_tools import numpy_to_tensor, promote_to, promote_to_tensor + + +@pytest.mark.parametrize("requires_grad", [True, False]) +@pytest.mark.parametrize("dtype", [torch.float64, torch.complex128]) +def test_numpy_to_tensor(requires_grad: bool, dtype: torch.dtype) -> None: + array_np = np.random.random((10, 2)) + array_tc = numpy_to_tensor(array_np, dtype=dtype, requires_grad=requires_grad) + + assert array_tc.requires_grad == requires_grad + assert array_tc.dtype == dtype + assert np.allclose(array_np, array_tc.detach().numpy()) + + +@pytest.mark.parametrize("requires_grad", [True, False]) +def test_promote_to_tensor(requires_grad: bool) -> None: + array_np = np.linspace(0, 1, 100) + array_tc = promote_to_tensor(array_np, requires_grad=requires_grad) + + assert array_tc.requires_grad == requires_grad + assert np.allclose(array_np, array_tc.detach().numpy()) + + number = 1.2345 + number_tc = promote_to_tensor(number, requires_grad=requires_grad) + assert number_tc.requires_grad == requires_grad + assert number_tc.shape == (1, 1) + assert np.isclose(float(number_tc.flatten()), number) + + +def test_promote_to() -> None: + array_tc = torch.linspace(0, 1, 100) + array_np = promote_to(array_tc, np.ndarray) + assert np.allclose(array_np, array_tc.detach().numpy()) + + number_tc = torch.Tensor([1.2345]).reshape(-1, 1) + number = promote_to(number_tc, float) + assert np.isclose(float(number_tc.flatten()), number) + + array_tc = torch.rand(10, 2) + array_tc_prom = promote_to(array_tc, torch.Tensor) + assert torch.equal(array_tc, array_tc_prom) diff --git a/tests/ml_tools/test_train.py b/tests/ml_tools/test_train.py new file mode 100644 index 000000000..8ac8c37c5 --- /dev/null +++ b/tests/ml_tools/test_train.py @@ -0,0 +1,193 @@ +from __future__ import annotations + +from itertools import count +from pathlib import Path +from typing import Any + +import numpy as np +import pytest +import torch +from torch.utils.data import DataLoader, TensorDataset + +from qadence.ml_tools import DictDataLoader, TrainConfig, train_with_grad +from qadence.ml_tools.models import TransformedModule +from qadence.models import QNN + +torch.manual_seed(42) +np.random.seed(42) + + +def dataloader() -> DataLoader: + batch_size = 25 + x = torch.linspace(0, 1, batch_size).reshape(-1, 1) + y = torch.sin(x) + + dataset = TensorDataset(x, y) + return DataLoader(dataset, batch_size=batch_size) + + +def dictdataloader() -> DictDataLoader: + batch_size = 25 + + keys = ["y1", "y2"] + dls = {} + for k in keys: + x = torch.rand(batch_size, 1) + y = torch.sin(x) + dataset = TensorDataset(x, y) + dataloader = DataLoader(dataset, batch_size=batch_size) + dls[k] = dataloader + + return DictDataLoader(dls, has_automatic_iter=False) + + +def FMdictdataloader(param_name: str = "phi", n_qubits: int = 2) -> DictDataLoader: + batch_size = 1 + + dls = {} + x = torch.rand(batch_size, 1) + y = torch.sin(x) + dataset = TensorDataset(x, y) + dataloader = DataLoader(dataset, batch_size=batch_size) + dls[param_name] = dataloader + + return DictDataLoader(dls, has_automatic_iter=False) + + +@pytest.mark.flaky(max_runs=10) +def test_train_dataloader_default(tmp_path: Path, Basic: torch.nn.Module) -> None: + data = dataloader() + model = Basic + + cnt = count() + criterion = torch.nn.MSELoss() + optimizer = torch.optim.Adam(model.parameters(), lr=0.1) + + def loss_fn(model: torch.nn.Module, data: torch.Tensor) -> tuple[torch.Tensor, dict]: + next(cnt) + x, y = data[0], data[1] + out = model(x) + loss = criterion(out, y) + return loss, {} + + n_epochs = 100 + config = TrainConfig(folder=tmp_path, max_iter=n_epochs, checkpoint_every=100, write_every=100) + train_with_grad(model, data, optimizer, config, loss_fn=loss_fn) + assert next(cnt) == n_epochs + + x = torch.rand(5, 1) + assert torch.allclose(torch.sin(x), model(x), rtol=1e-1, atol=1e-1) + + +def test_train_dataloader_no_data(tmp_path: Path, BasicNoInput: torch.nn.Module) -> None: + data = None + model = BasicNoInput + + cnt = count() + criterion = torch.nn.MSELoss() + optimizer = torch.optim.Adam(model.parameters(), lr=1.0) + + def loss_fn(model: torch.nn.Module, xs: Any = None) -> tuple[torch.Tensor, dict]: + next(cnt) + out = model() + loss = criterion(out, torch.tensor([0.0])) + return loss, {} + + n_epochs = 50 + config = TrainConfig( + folder=tmp_path, + max_iter=n_epochs, + print_every=5, + checkpoint_every=100, + write_every=100, + ) + train_with_grad(model, data, optimizer, config, loss_fn=loss_fn) + assert next(cnt) == n_epochs + + out = model() + assert torch.allclose(out, torch.zeros(1), atol=1e-2, rtol=1e-2) + + +@pytest.mark.flaky(max_runs=10) +def test_train_dictdataloader(tmp_path: Path, Basic: torch.nn.Module) -> None: + data = dictdataloader() + model = Basic + + cnt = count() + criterion = torch.nn.MSELoss() + optimizer = torch.optim.Adam(model.parameters(), lr=0.1) + + def loss_fn(model: torch.nn.Module, data: torch.Tensor) -> tuple[torch.Tensor, dict]: + next(cnt) + x1, y1 = data["y1"][0], data["y1"][1] + x2, y2 = data["y2"][0], data["y2"][1] + l1 = criterion(model(x1), y1) + l2 = criterion(model(x2), y2) + return l1 + l2, {} + + n_epochs = 100 + config = TrainConfig( + folder=tmp_path, max_iter=n_epochs, print_every=10, checkpoint_every=100, write_every=100 + ) + train_with_grad(model, data, optimizer, config, loss_fn=loss_fn) + assert next(cnt) == n_epochs + + x = torch.rand(5, 1) + assert torch.allclose(torch.sin(x), model(x), rtol=1e-1, atol=1e-1) + + +@pytest.mark.slow +@pytest.mark.flaky(max_runs=10) +def test_modules_save_load(BasicQNN: QNN, BasicTransformedModule: TransformedModule) -> None: + data = FMdictdataloader() + for _m in [BasicQNN, BasicTransformedModule]: + model: torch.nn.Module = _m + criterion = torch.nn.MSELoss() + optimizer = torch.optim.Adam(model.parameters(), lr=0.1) + + def loss_fn(model: torch.nn.Module, data: torch.Tensor) -> tuple[torch.Tensor, dict]: + x = torch.rand(1) + y = torch.sin(x) + l1 = criterion(model(x), y) + return l1, {} + + n_epochs = 200 + config = TrainConfig( + max_iter=n_epochs, print_every=10, checkpoint_every=500, write_every=500 + ) + model, optimizer = train_with_grad(model, data, optimizer, config, loss_fn=loss_fn) + x = torch.rand(1) + assert torch.allclose(torch.sin(x), model(x), rtol=1e-1, atol=1e-1) + + +@pytest.mark.flaky(max_runs=10) +def test_train_tensor_tuple(tmp_path: Path, Basic: torch.nn.Module) -> None: + model = Basic + batch_size = 25 + x = torch.linspace(0, 1, batch_size).reshape(-1, 1) + y = torch.sin(x) + + cnt = count() + criterion = torch.nn.MSELoss() + optimizer = torch.optim.Adam(model.parameters(), lr=0.1) + + def loss_fn(model: torch.nn.Module, data: torch.Tensor) -> tuple[torch.Tensor, dict]: + next(cnt) + x, y = data[0], data[1] + out = model(x) + loss = criterion(out, y) + return loss, {} + + n_epochs = 100 + config = TrainConfig( + folder=tmp_path, + max_iter=n_epochs, + checkpoint_every=100, + write_every=100, + batch_size=batch_size, + ) + train_with_grad(model, (x, y), optimizer, config, loss_fn=loss_fn) + assert next(cnt) == n_epochs + + x = torch.rand(5, 1) + assert torch.allclose(torch.sin(x), model(x), rtol=1e-1, atol=1e-1) diff --git a/tests/ml_tools/test_train_no_grad.py b/tests/ml_tools/test_train_no_grad.py new file mode 100644 index 000000000..03553542e --- /dev/null +++ b/tests/ml_tools/test_train_no_grad.py @@ -0,0 +1,55 @@ +from __future__ import annotations + +import random +from itertools import count +from pathlib import Path + +import nevergrad as ng +import numpy as np +import torch +from torch.utils.data import DataLoader, TensorDataset + +from qadence.ml_tools import TrainConfig, num_parameters, train_gradient_free + +# ensure reproducibility +SEED = 42 + +random.seed(SEED) +np.random.seed(SEED) +torch.manual_seed(SEED) + + +def dataloader() -> DataLoader: + batch_size = 25 + x = torch.linspace(0, 1, batch_size).reshape(-1, 1) + y = torch.cos(x) + + dataset = TensorDataset(x, y) + return DataLoader(dataset, batch_size=batch_size) + + +def test_train_dataloader_default(tmp_path: Path, Basic: torch.nn.Module) -> None: + data = dataloader() + model = Basic + + cnt = count() + criterion = torch.nn.MSELoss() + optimizer = torch.optim.Adam(model.parameters(), lr=0.1) + + def loss_fn(model: torch.nn.Module, data: torch.Tensor) -> tuple[torch.Tensor, dict]: + next(cnt) + x, y = data[0], data[1] + out = model(x) + loss = criterion(out, y) + return loss, {} + + n_epochs = 500 + config = TrainConfig(folder=tmp_path, max_iter=n_epochs, checkpoint_every=100, write_every=100) + + optimizer = ng.optimizers.NGOpt(budget=config.max_iter, parametrization=num_parameters(model)) + + train_gradient_free(model, data, optimizer, config, loss_fn=loss_fn) + assert next(cnt) == n_epochs + + x = torch.rand(5, 1) + assert torch.allclose(torch.cos(x), model(x), rtol=1e-1, atol=1e-1) diff --git a/tests/ml_tools/test_transformed_module.py b/tests/ml_tools/test_transformed_module.py new file mode 100644 index 000000000..441aa53d0 --- /dev/null +++ b/tests/ml_tools/test_transformed_module.py @@ -0,0 +1,162 @@ +from __future__ import annotations + +from pathlib import Path + +import numpy as np +import pytest +import torch +from torch.nn import Parameter as TorchParam + +from qadence import ( + BackendName, + DiffMode, + Parameter, + QuantumCircuit, + deserialize, + load, + save, + serialize, +) +from qadence.blocks import chain, tag +from qadence.constructors import hea, total_magnetization +from qadence.ml_tools.models import TransformedModule +from qadence.ml_tools.utils import rand_featureparameters +from qadence.models import QNN +from qadence.operations import RY +from qadence.serialization import SerializationFormat + +np.random.seed(42) +torch.manual_seed(42) + + +def quantum_circuit(n_qubits: int = 2, depth: int = 1) -> QuantumCircuit: + # Chebyshev feature map with input parameter defined as non trainable + phi = Parameter("phi", trainable=False) + fm = chain(*[RY(i, phi) for i in range(n_qubits)]) + tag(fm, "feature_map") + + ansatz = hea(n_qubits=n_qubits, depth=depth) + tag(ansatz, "ansatz") + + return QuantumCircuit(n_qubits, fm, ansatz) + + +def get_qnn(n_qubits: int, depth: int) -> QNN: + observable = total_magnetization(n_qubits) + circuit = quantum_circuit(n_qubits=n_qubits, depth=depth) + model = QNN(circuit, observable, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD) + init_params = torch.rand(model.num_vparams) + model.reset_vparams(init_params) + return model + + +@pytest.mark.parametrize("n_qubits", [2, 4, 8]) +def test_transformed_module(n_qubits: int) -> None: + depth = 1 + model = get_qnn(n_qubits, depth) + batch_size = 1 + input_values = {"phi": torch.rand(batch_size, requires_grad=True)} + pred = model(input_values) + assert not torch.isnan(pred) + + transformed_model = TransformedModule( + model, + None, + None, + TorchParam(torch.tensor(5.0)), + 2.0, + 1000.0, + TorchParam(torch.tensor(10.0)), + ) + pred_transformed = transformed_model(input_values) + assert not torch.isnan(pred_transformed) + + +@pytest.mark.parametrize("n_qubits", [2, 4, 8]) +def test_same_output(n_qubits: int) -> None: + depth = 1 + model = get_qnn(n_qubits, depth) + batch_size = 1 + input_values = {"phi": torch.rand(batch_size, requires_grad=True)} + pred = model(input_values) + assert not torch.isnan(pred) + + transformed_model = TransformedModule( + model, + None, + None, + TorchParam(torch.tensor(1.0)), + 0.0, + 1.0, + TorchParam(torch.tensor(0.0)), + ) + pred_transformed = transformed_model(input_values) + assert torch.allclose(pred_transformed.real, pred) + assert pred.size() == pred_transformed.size() + + +@pytest.mark.parametrize("n_qubits", [2, 4, 8]) +def test_no_scaling_provided(n_qubits: int) -> None: + depth = 1 + model = get_qnn(n_qubits, depth) + batch_size = 1 + input_values = {"phi": torch.rand(batch_size, requires_grad=True)} + pred = model(input_values) + assert not torch.isnan(pred) + + transformed_model = TransformedModule(model, None, None, None, 2.0, None, 100.0) + pred_transformed = transformed_model(input_values) + assert not torch.isnan(pred_transformed) + assert pred.size() == pred_transformed.size() + + +@pytest.mark.parametrize("n_qubits", [2, 4, 8]) +def test_no_args(n_qubits: int) -> None: + depth = 1 + model = get_qnn(n_qubits, depth) + batch_size = 1 + input_values = {"phi": torch.rand(batch_size, requires_grad=True)} + pred = model(input_values) + assert not torch.isnan(pred) + + transformed_model = TransformedModule(model) + pred_transformed = transformed_model(input_values) + assert torch.allclose(pred_transformed.real, pred) + assert pred.size() == pred_transformed.size() + + +def test_save_load_TM_pyq(tmp_path: Path, BasicTransformedModule: TransformedModule) -> None: + tm = BasicTransformedModule + # serialize deserialize + d = serialize(tm) + tm_ser = deserialize(d) # type: ignore[assignment] + inputs = rand_featureparameters(tm, 1) + y_p0 = tm(inputs)[0] + y_p1 = tm_ser(inputs)[0] # type: ignore[operator] + assert torch.allclose(y_p0, y_p1) + # save load + for _format, _suffix in zip( + [SerializationFormat.JSON, SerializationFormat.PT], [".json", ".pt"] + ): + base_name = "tm" + save(tm, tmp_path, base_name, _format) + tm_load = load(tmp_path / (base_name + _suffix)) # type: ignore[assignment] + y_px = tm_load.expectation(inputs)[0] # type: ignore[union-attr] + assert torch.allclose(y_p0, y_px) + + +def test_basic_save_load_ckpts(Basic: torch.nn.Module, tmp_path: Path) -> None: + model = Basic + in_feat = 1 + x = torch.rand(in_feat) + exp_no = model(x) + tm = TransformedModule( + model=model, + in_features=in_feat, + out_features=1, + input_scaling=torch.ones(in_feat), + input_shifting=torch.zeros(in_feat), + output_scaling=torch.ones(1), + output_shifting=torch.zeros(1), + ) + assert torch.allclose(exp_no, tm(x)) diff --git a/tests/models/test_qnn.py b/tests/models/test_qnn.py new file mode 100644 index 000000000..31a64d12b --- /dev/null +++ b/tests/models/test_qnn.py @@ -0,0 +1,176 @@ +from __future__ import annotations + +from collections import OrderedDict + +import numpy as np +import pytest +import torch + +from qadence import BackendName, DiffMode, FeatureParameter, QuantumCircuit +from qadence.blocks import ( + chain, + kron, + tag, +) +from qadence.constructors import hea, ising_hamiltonian, total_magnetization +from qadence.models import QNN +from qadence.operations import RX, RY +from qadence.parameters import Parameter +from qadence.states import uniform_state + + +def build_circuit(n_qubits_per_feature: int, n_features: int, depth: int = 2) -> QuantumCircuit: + n_qubits = n_qubits_per_feature * n_features + + idx_fms = [] + + for i in range(n_features): + start_qubit = i * n_qubits_per_feature + end_qubit = (i + 1) * n_qubits_per_feature + param = FeatureParameter(f"x{i}") + block = kron(*[RY(qubit, (qubit + 1) * param) for qubit in range(start_qubit, end_qubit)]) + idx_fm = tag(block, tag=f"FM{i}") + idx_fms.append(idx_fm) + + fm = kron(*idx_fms) + ansatz = hea(n_qubits, depth=depth) + + return QuantumCircuit(n_qubits, fm, ansatz) + + +def test_parameters(parametric_circuit: QuantumCircuit) -> None: + circ = parametric_circuit + model = QNN( + circ, + observable=total_magnetization(circ.n_qubits), + backend=BackendName.PYQTORCH, + diff_mode=DiffMode.AD, + ) + + vparams = model.vparams + assert isinstance(vparams, OrderedDict) + + trainables: list[Parameter] + trainables = [p for p in circ.parameters() if not p.is_number and p.trainable] # type: ignore + assert model.num_vparams == len(trainables) + + # init with torch + init_values_tc = torch.rand(model.num_vparams) + model.reset_vparams(init_values_tc) # type: ignore + assert torch.equal(init_values_tc, model.vals_vparams) + + # init with numpy + init_values_np = np.random.rand(model.num_vparams) + model.reset_vparams(init_values_np) # type: ignore + assert torch.equal(torch.tensor(init_values_np), model.vals_vparams) + + +@pytest.mark.parametrize("dim", [1, 2, 3]) +def test_input_nd(dim: int) -> None: + batch_size = 10 + n_qubits_per_feature = 2 + + observable = total_magnetization(n_qubits_per_feature * dim) + circuit = build_circuit(n_qubits_per_feature, dim) + a = torch.rand(batch_size, dim) + qnn = QNN(circuit, observable) + assert qnn.in_features == dim + + res: torch.Tensor = qnn(a) + assert qnn.out_features is not None and qnn.out_features == 1 + assert res.size()[1] == qnn.out_features + assert res.size()[0] == batch_size + + +def test_qnn_expectation(n_qubits: int = 4) -> None: + theta0 = Parameter("theta0", trainable=True) + theta1 = Parameter("theta1", trainable=True) + + ry0 = RY(0, theta0) + ry1 = RY(1, theta1) + + fm = chain(ry0, ry1) + + ansatz = hea(2, 2, param_prefix="eps") + + block = chain(fm, ansatz) + + qc = QuantumCircuit(n_qubits, block) + uni_state = uniform_state(n_qubits) + obs = total_magnetization(n_qubits) + model = QNN(circuit=qc, observable=obs, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD) + + exp = model(values={}, state=uni_state) + assert not torch.any(torch.isnan(exp)) + + +def test_qnn_multiple_outputs(n_qubits: int = 4) -> None: + theta0 = Parameter("theta0", trainable=True) + theta1 = Parameter("theta1", trainable=True) + phi = Parameter("phi", trainable=False) + + ry_theta0 = RY(0, theta0) + ry_theta1 = RY(1, theta1) + + fm = chain(ry_theta0, ry_theta1, *[RX(i, phi) for i in range(n_qubits)]) + ansatz = hea(2, 2, param_prefix="eps") + block = chain(fm, ansatz) + + qc = QuantumCircuit(n_qubits, block) + uni_state = uniform_state(n_qubits) + + obs = [] + n_obs = 3 + for i in range(n_obs): + o = float(i + 1) * ising_hamiltonian(4) + obs.append(o) + + model = QNN(circuit=qc, observable=obs, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD) + assert model.out_features == n_obs + assert len(model._observable) == n_obs # type: ignore[arg-type] + + batch_size = 10 + values = {"phi": torch.rand(batch_size)} + exp = model(values=values, state=uni_state) + assert not torch.any(torch.isnan(exp)) + assert exp.shape[0] == batch_size and exp.shape[1] == n_obs + + factors = torch.linspace(1, n_obs, n_obs) + for i, e in enumerate(exp): + tmp = torch.div(e, factors * e[0]) + assert torch.allclose(tmp, torch.ones(n_obs)) + + +def test_multiparam_qnn_training() -> None: + backend = BackendName.PYQTORCH + n_qubits = 2 + n_epochs = 5 + + x = Parameter("x", trainable=False) + theta0 = Parameter("theta0", trainable=True) + theta1 = Parameter("theta1", trainable=True) + + ry0 = RY(0, theta0 * x) + ry1 = RY(1, theta1 * x) + + fm = chain(ry0, ry1) + + ansatz = hea(n_qubits, depth=2, param_prefix="eps") + + block = chain(fm, ansatz) + qc = QuantumCircuit(n_qubits, block) + obs = total_magnetization(n_qubits) + qnn = QNN(qc, observable=obs, diff_mode=DiffMode.AD, backend=backend) + + optimizer = torch.optim.Adam(qnn.parameters(), lr=1e-1) + + loss_fn = torch.nn.MSELoss() + for i in range(n_epochs): + optimizer.zero_grad() + exp = qnn(values={"x": 1.0}, state=None) + assert not torch.any(torch.isnan(exp)) + loss = loss_fn(exp, torch.tensor([np.random.rand()], requires_grad=False)) + assert not torch.any(torch.isnan(loss)) + loss.backward() + optimizer.step() + print(f"Epoch {i+1} modeling training - Loss: {loss.item()}") diff --git a/tests/models/test_quantum_model.py b/tests/models/test_quantum_model.py new file mode 100644 index 000000000..29186824e --- /dev/null +++ b/tests/models/test_quantum_model.py @@ -0,0 +1,336 @@ +from __future__ import annotations + +from pathlib import Path +from typing import Any + +import numpy as np +import pytest +import strategies as st # type: ignore +import sympy +import torch +from hypothesis import given, settings +from metrics import ATOL_DICT, JS_ACCEPTANCE # type: ignore + +from qadence import BackendName, DiffMode, FeatureParameter, QuantumCircuit, VariationalParameter +from qadence.blocks import AbstractBlock, chain, kron +from qadence.constructors import hea, total_magnetization +from qadence.divergences import js_divergence +from qadence.ml_tools.utils import rand_featureparameters +from qadence.models.quantum_model import QuantumModel +from qadence.operations import MCRX, RX, HamEvo, I, Toffoli, X, Z +from qadence.states import equivalent_state +from qadence.transpile import invert_endianness + +np.random.seed(42) +torch.manual_seed(42) + + +def digital_analog_circ(n_qubits: int = 2, depth: int = 1) -> QuantumCircuit: + t_evo = VariationalParameter("tevo") + g_evo = FeatureParameter("gevo") + + feature_map = HamEvo(g_evo, t_evo, qubit_support=tuple(range(n_qubits))) + ansatz = hea(n_qubits=n_qubits, depth=depth) + + return QuantumCircuit(n_qubits, feature_map, ansatz) + + +def test_quantum_model_parameters(parametric_circuit: QuantumCircuit) -> None: + circ = parametric_circuit + assert len(circ.unique_parameters) == 4 + model_psr = QuantumModel(circ, backend=BackendName.PYQTORCH, diff_mode=DiffMode.GPSR) + model_ad = QuantumModel(circ, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD) + assert len([i for i in model_psr.parameters()]) == 4 + assert len([i for i in model_ad.parameters()]) == 4 + embedded_params_psr = model_psr.embedding_fn(model_psr._params, {"x": torch.rand(1)}) + embedded_params_ad = model_ad.embedding_fn(model_ad._params, {"x": torch.rand(1)}) + assert len(embedded_params_ad) == 5 + assert len(embedded_params_psr) == 6 + + +def test_quantum_model_duplicate_expr(duplicate_expression_circuit: QuantumCircuit) -> None: + circ = duplicate_expression_circuit + assert len(circ.unique_parameters) == 4 + model_psr = QuantumModel(circ, backend=BackendName.PYQTORCH, diff_mode=DiffMode.GPSR) + model_ad = QuantumModel(circ, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD) + assert len([i for i in model_psr.parameters()]) == 3 + assert len([i for i in model_ad.parameters()]) == 3 + embedded_params_psr = model_psr.embedding_fn(model_psr._params, {"x": torch.rand(1)}) + embedded_params_ad = model_ad.embedding_fn(model_ad._params, {"x": torch.rand(1)}) + assert len(embedded_params_ad) == 2 + assert len(embedded_params_psr) == 8 + + +def test_quantum_model_with_hevo() -> None: + n_qubits = 4 + batch_size = 10 + + # quantum circuit + circuit = digital_analog_circ(n_qubits=n_qubits, depth=1) + + # random Hamiltonian matrices + h = torch.rand(batch_size, 2**n_qubits, 2**n_qubits) + hams = h + torch.conj(torch.transpose(h, 1, 2)) + values = {"gevo": hams} + + model = QuantumModel(circuit, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD) + wf = model.run(values) + + assert wf.size()[0] == batch_size + + +@pytest.mark.parametrize("n_qubits", [3, 4, 6]) +def test_quantum_model_with_toffoli(n_qubits: int) -> None: + prep_block = kron(X(i) for i in range(n_qubits)) + block = chain(prep_block, Toffoli(tuple(range(n_qubits - 1)), n_qubits - 1)) + circuit = QuantumCircuit(n_qubits, block) + model = QuantumModel(circuit, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD) + wf = model.run({}) + assert wf[0][2 ** (n_qubits) - 2] == 1 + + +@pytest.mark.parametrize("n_qubits", [3, 4, 6]) +@pytest.mark.parametrize("gate", [MCRX, MCRX, MCRX]) +def test_quantum_model_with_multi_controlled_rotation(gate: Any, n_qubits: int) -> None: + prep_block = kron(X(i) for i in range(n_qubits)) + block = chain(prep_block, gate(tuple(range(n_qubits - 1)), n_qubits - 1, 2 * sympy.pi)) + circuit = QuantumCircuit(n_qubits, block) + model = QuantumModel(circuit, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD) + wf = model.run({}) + assert wf[0][-1] == -1 + + +@given(st.restricted_circuits()) +@settings(deadline=None) +def test_run_for_different_backends(circuit: QuantumCircuit) -> None: + pyq_model = QuantumModel(circuit, backend=BackendName.PYQTORCH, diff_mode="ad") + braket_model = QuantumModel(circuit, backend=BackendName.BRAKET, diff_mode="gpsr") + inputs = rand_featureparameters(circuit, 1) + assert equivalent_state( + pyq_model.run(inputs), braket_model.run(inputs), atol=ATOL_DICT[BackendName.BRAKET] + ) + + +@given(st.restricted_circuits()) +@settings(deadline=None) +def test_sample_for_different_backends(circuit: QuantumCircuit) -> None: + pyq_model = QuantumModel(circuit, backend=BackendName.PYQTORCH, diff_mode="ad") + braket_model = QuantumModel(circuit, backend=BackendName.BRAKET, diff_mode="gpsr") + inputs = rand_featureparameters(circuit, 1) + pyq_samples = pyq_model.sample(inputs, n_shots=100) + braket_samples = braket_model.sample(inputs, n_shots=100) + # Compare bitstring counts in pyq_samples with ones in braket_samples + # avoiding non-sampled ones. + for pyq_sample, sample in zip(pyq_samples, braket_samples): + assert js_divergence(pyq_sample, sample) < JS_ACCEPTANCE + ATOL_DICT[BackendName.BRAKET] + + +@given(st.restricted_circuits()) +@settings(deadline=None) +def test_expectation_for_different_backends(circuit: QuantumCircuit) -> None: + observable = [total_magnetization(circuit.n_qubits) for _ in range(np.random.randint(1, 5))] + pyq_model = QuantumModel( + circuit, observable, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD + ) + braket_model = QuantumModel( + circuit, observable, backend=BackendName.BRAKET, diff_mode=DiffMode.GPSR + ) + inputs = rand_featureparameters(circuit, 1) + pyq_expectation = pyq_model.expectation(inputs) + braket_expectation = braket_model.expectation(inputs) + assert torch.allclose(pyq_expectation, braket_expectation) + + +def test_negative_scale_qm() -> None: + from qadence.blocks import kron + from qadence.circuit import QuantumCircuit + from qadence.models import QuantumModel + from qadence.operations import HamEvo, Z + + hamilt = kron(Z(0), Z(1)) - 10 * Z(0) + circ = QuantumCircuit(2, HamEvo(hamilt, 3)) + model = QuantumModel(circ, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD) + assert not torch.all(torch.isnan(model.run({}))) + + +def test_save_load_qm_pyq(BasicQuantumModel: QuantumModel, tmp_path: Path) -> None: + pyq_model = BasicQuantumModel + for save_params in [True, False]: + pyq_model.save(tmp_path, save_params=save_params) + pyq_model_loaded = QuantumModel.load(tmp_path, save_params) + pyq_expectation_orig = pyq_model.expectation({})[0] + pyq_expectation_loaded = pyq_model_loaded.expectation({})[0] + ser_qm = QuantumModel._from_dict(BasicQuantumModel._to_dict(save_params), save_params) + ser_exp = ser_qm.expectation({}) + assert torch.allclose(ser_exp, pyq_expectation_orig) + assert torch.allclose(pyq_expectation_orig, pyq_expectation_loaded) + + +def test_hamevo_qm() -> None: + from qadence.circuit import QuantumCircuit + from qadence.models import QuantumModel + from qadence.operations import HamEvo, X, Z + from qadence.parameters import VariationalParameter + + obs = [Z(0) for _ in range(np.random.randint(1, 4))] + block = HamEvo(VariationalParameter("theta") * X(1), 1, (0, 1)) + circ = QuantumCircuit(2, block) + model = QuantumModel(circ, obs, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD) # type: ignore # noqa + assert not torch.all(torch.isnan(model.expectation({}))) + + +@pytest.mark.parametrize( + "backend", + [ + BackendName.BRAKET, + pytest.param(BackendName.PULSER, marks=[pytest.mark.xfail]), + ], +) +def test_correct_order(backend: BackendName) -> None: + from qadence.circuit import QuantumCircuit + from qadence.models import QuantumModel + from qadence.operations import X, Z + + circ = QuantumCircuit(3, X(0)) + obs = [Z(0) for _ in range(np.random.randint(1, 5))] + n_obs = len(obs) + pyq_model = QuantumModel( + circ, observable=obs, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD # type: ignore + ) + other_model = QuantumModel( + circ, observable=obs, backend=backend, diff_mode=DiffMode.GPSR # type: ignore + ) + + pyq_exp = pyq_model.expectation({}) + other_exp = other_model.expectation({}) + + assert pyq_exp.size() == other_exp.size() + assert torch.all(torch.isclose(pyq_exp, other_exp, atol=ATOL_DICT[BackendName.BRAKET])) + + +def test_qc_obs_different_support_0() -> None: + model_sup1 = QuantumModel( + QuantumCircuit(1, RX(0, FeatureParameter("x"))), + observable=Z(0), + backend=BackendName.PYQTORCH, + diff_mode=DiffMode.AD, + ) + model_sup2 = QuantumModel( + QuantumCircuit(2, RX(0, FeatureParameter("x"))), + observable=Z(0), + backend=BackendName.PYQTORCH, + diff_mode=DiffMode.AD, + ) + query_dict = {"x": torch.tensor([1.57])} + assert torch.isclose(model_sup1.expectation(query_dict), model_sup2.expectation(query_dict)) + + +def test_qc_obs_different_support_1() -> None: + model_obs0_id_0 = QuantumModel( + QuantumCircuit(1, I(0)), + observable=Z(0), + backend=BackendName.PYQTORCH, + diff_mode=DiffMode.AD, + ) + + model_obs0_rot1 = QuantumModel( + QuantumCircuit(2, RX(1, FeatureParameter("x"))), + observable=Z(0), + backend=BackendName.PYQTORCH, + diff_mode=DiffMode.AD, + ) + + model_obs01_rot1 = QuantumModel( + QuantumCircuit(2, RX(1, FeatureParameter("x"))), + observable=Z(0) + Z(1), + backend=BackendName.PYQTORCH, + diff_mode=DiffMode.AD, + ) + + model_obs1_rot1 = QuantumModel( + QuantumCircuit(2, RX(1, FeatureParameter("x"))), + observable=I(0) + Z(1), + backend=BackendName.PYQTORCH, + diff_mode=DiffMode.AD, + ) + + query_dict = {"x": torch.tensor([2.1])} + + assert torch.isclose(model_obs0_rot1.expectation(query_dict), model_obs0_id_0.expectation({})) + assert torch.isclose( + model_obs01_rot1.expectation(query_dict), model_obs1_rot1.expectation(query_dict) + ) + + +def test_distinct_obs_invert() -> None: + qc = QuantumCircuit(2, chain(RX(0, FeatureParameter("x")), RX(1, FeatureParameter("y")))) + obs = Z(0) + Z(1) + + qc_inv = invert_endianness(qc) + obs_inv = invert_endianness(obs) + + m_pyq = QuantumModel( + qc, + obs, + backend=BackendName.PYQTORCH, + diff_mode=DiffMode.AD, + ) + + m_braket = QuantumModel( + qc, + obs, + backend=BackendName.PYQTORCH, + diff_mode=DiffMode.AD, + ) + + m_pyq_inv = QuantumModel( + qc_inv, + obs_inv, + backend=BackendName.PYQTORCH, + diff_mode=DiffMode.AD, + ) + + m_braket_inv = QuantumModel( + qc_inv, + obs_inv, + backend=BackendName.PYQTORCH, + diff_mode=DiffMode.AD, + ) + + query_dict = {"x": torch.tensor([2.1]), "y": torch.tensor([2.1])} + + assert torch.isclose(m_pyq.expectation(query_dict), m_braket.expectation(query_dict)) + assert torch.isclose(m_pyq_inv.expectation(query_dict), m_braket_inv.expectation(query_dict)) + + +def test_qm_obs_single_feature_param() -> None: + cost_v = VariationalParameter("x") * Z(0) + model_v = QuantumModel( + QuantumCircuit(1, I(0)), cost_v, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD + ) + model_v.reset_vparams([2.7]) + model_v_exp = model_v.expectation({}) + cost_f = FeatureParameter("x") * Z(0) + model_f = QuantumModel( + QuantumCircuit(1, I(0)), cost_f, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD + ) + assert torch.all(torch.isclose(model_f.expectation({"x": torch.tensor([2.7])}), model_v_exp)) + + +@pytest.mark.parametrize("batch_size", [1, 2]) +@pytest.mark.parametrize( + "observables", + [[FeatureParameter("x") * Z(0)], [FeatureParameter("x") * Z(0) for i in range(2)]], +) +def test_qm_obs_batch_feature_param(batch_size: int, observables: list[AbstractBlock]) -> None: + n_obs = len(observables) + random_batch = torch.rand(batch_size) + batch_query_dict = {"x": random_batch} + expected_output = random_batch.unsqueeze(1).repeat(1, n_obs) + assert expected_output.shape == (batch_size, n_obs) + model_f = QuantumModel( + QuantumCircuit(1, I(0)), observables, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD + ) + model_f_exp = model_f.expectation(batch_query_dict) + + assert torch.all(torch.isclose(model_f_exp, expected_output)) diff --git a/tests/qadence/test_analog.py b/tests/qadence/test_analog.py new file mode 100644 index 000000000..6c8c0eb60 --- /dev/null +++ b/tests/qadence/test_analog.py @@ -0,0 +1,99 @@ +from __future__ import annotations + +import pytest +import torch + +from qadence.blocks import CompositeBlock +from qadence.blocks.analog import ( + AnalogBlock, + ConstantAnalogRotation, + QubitSupport, + chain, + kron, +) +from qadence.operations import AnalogRX, X, wait +from qadence.parameters import ParamMap + + +def test_qubit_support() -> None: + assert QubitSupport("global").is_global + assert not QubitSupport(2, 3).is_global + + assert QubitSupport("global") + QubitSupport("global") == QubitSupport("global") + assert QubitSupport("global") + QubitSupport(1, 2) == QubitSupport(0, 1, 2) + assert QubitSupport(2, 3) + QubitSupport(1, 2) == QubitSupport(1, 2, 3) + + # local QubitSupport / mixing QubitSupport & tuple + assert QubitSupport(0, 1) + (2, 4) == QubitSupport(0, 1, 2, 4) + assert (0, 4) + QubitSupport(1, 2) == QubitSupport(0, 1, 2, 4) + assert (0, 4) + QubitSupport("global") == QubitSupport(0, 1, 2, 3, 4) + assert QubitSupport("global") + (0, 4) == QubitSupport(0, 1, 2, 3, 4) + assert QubitSupport("global") + () == QubitSupport("global") + assert () + QubitSupport("global") == QubitSupport("global") + assert () + QubitSupport(1, 2) == QubitSupport(1, 2) + assert QubitSupport() == () + + +def test_analog_block() -> None: + b: AnalogBlock + b = wait(duration=3, qubit_support=(1, 2)) + assert b.__repr__() == "WaitBlock(t=3.0, support=(1, 2))" + + c1 = chain( + ConstantAnalogRotation(parameters=ParamMap(duration=2000, omega=1, delta=0, phase=0)), + ConstantAnalogRotation(parameters=ParamMap(duration=3000, omega=1, delta=0, phase=0)), + ) + assert c1.duration == 5000 + assert c1.qubit_support == QubitSupport("global") + + c2 = kron( + AnalogRX(torch.pi, qubit_support=(0, 1)), + wait(duration=1000, qubit_support=(2, 3)), + ) + assert c2.duration == 1000 + assert c2.qubit_support == QubitSupport(0, 1, 2, 3) + + c3 = chain( + kron( + AnalogRX(torch.pi, qubit_support=(0, 1)), + wait(duration=1000, qubit_support=(2, 3)), + ), + kron( + wait(duration=1000, qubit_support=(0, 1)), + AnalogRX(torch.pi, qubit_support=(2, 3)), + ), + ) + assert c3.duration == 2000 + + with pytest.raises(ValueError, match="Only KronBlocks or global blocks can be chain'ed."): + chain(c3, wait(duration=10)) + + with pytest.raises(ValueError, match="Blocks with global support cannot be kron'ed."): + kron(AnalogRX(torch.pi, qubit_support=(0, 1)), wait(duration=1000)) + + with pytest.raises(ValueError, match="Make sure blocks act on distinct qubits!"): + kron( + AnalogRX(torch.pi, qubit_support=(0, 1)), + wait(duration=1000, qubit_support=(1, 2)), + ) + + with pytest.raises(ValueError, match="Kron'ed blocks have to have same duration."): + kron( + AnalogRX(1, qubit_support=(0, 1)), + wait(duration=10, qubit_support=(2, 3)), + ) + + +@pytest.mark.xfail +def test_mix_digital_analog() -> None: + from qadence import chain + + b = chain(X(0), AnalogRX(2.0)) + assert b.qubit_support == (0,) + + b = chain(X(0), wait(2.0), X(2)) + assert b.qubit_support == (0, 1, 2) + + b = chain(chain(X(0), wait(2.0, qubit_support="global"), X(2)), X(3)) + assert all([not isinstance(b, CompositeBlock) for b in b.blocks]) + assert b.qubit_support == (0, 1, 2, 3) diff --git a/tests/qadence/test_block_utils.py b/tests/qadence/test_block_utils.py new file mode 100644 index 000000000..710e8e4ff --- /dev/null +++ b/tests/qadence/test_block_utils.py @@ -0,0 +1,37 @@ +from __future__ import annotations + +import pytest +from sympy import cos, symbols + +from qadence.blocks import AbstractBlock +from qadence.blocks.utils import expression_to_uuids, uuid_to_block, uuid_to_expression +from qadence.operations import RX, X, chain + +(alpha, beta) = symbols("alpha beta") +gamma = cos(alpha + beta) + +blocks = [ + X(0), + RX(0, 0.5), + RX(0, "theta"), + RX(0, gamma), + chain(RX(0, "theta"), RX(1, "theta")), + chain(RX(0, gamma), RX(1, gamma * gamma)), + 2 * chain(RX(0, "theta"), RX(1, "theta")), +] + + +@pytest.mark.parametrize("block,length", zip(blocks, [0, 1, 1, 1, 2, 2, 3])) +def test_uuid_to_block(block: AbstractBlock, length: int) -> None: + assert len(uuid_to_block(block)) == length + + +@pytest.mark.parametrize("block,length", zip(blocks, [0, 1, 1, 1, 2, 2, 3])) +def test_uuid_to_expression(block: AbstractBlock, length: int) -> None: + assert len(uuid_to_expression(block)) == length + + +@pytest.mark.parametrize("block,length", zip(blocks, [0, 1, 1, 1, 1, 2, 2])) +def test_expression_to_uuids(block: AbstractBlock, length: int) -> None: + print(expression_to_uuids(block)) + assert len(expression_to_uuids(block)) == length diff --git a/tests/qadence/test_blocks.py b/tests/qadence/test_blocks.py new file mode 100644 index 000000000..30c4cecaa --- /dev/null +++ b/tests/qadence/test_blocks.py @@ -0,0 +1,709 @@ +from __future__ import annotations + +from uuid import uuid4 + +import numpy as np +import pytest +import sympy + +from qadence.blocks import ( + AddBlock, + ChainBlock, + KronBlock, + ParametricBlock, + ScaleBlock, + add, + block_is_qubit_hamiltonian, + chain, + has_duplicate_vparams, + kron, + put, + tag, +) +from qadence.blocks.abstract import AbstractBlock +from qadence.blocks.utils import ( + expressions, + get_blocks_by_expression, + get_pauli_blocks, + parameters, + primitive_blocks, +) +from qadence.constructors import ( + hea, + ising_hamiltonian, + single_z, + total_magnetization, + zz_hamiltonian, +) +from qadence.operations import CNOT, CRX, CRY, RX, RY, H, I, X, Y, Z, Zero +from qadence.parameters import Parameter, evaluate +from qadence.transpile import invert_endianness, reassign, set_trainable +from qadence.types import TNumber + + +def test_1qubit_blocks() -> None: + for B in [X, Y, Z]: + b1 = B(1) # type: ignore [abstract] + assert b1.n_qubits == 2 # type: ignore [attr-defined] + assert b1.qubit_support == (1,) + assert block_is_qubit_hamiltonian(b1) + + b2 = B(0) # type: ignore [abstract] + assert b2.n_qubits == 1 # type: ignore [attr-defined] + assert b2.qubit_support == (0,) + assert block_is_qubit_hamiltonian(b2) + + +def test_block_is_qubit_ham_constructors() -> None: + n_qubits = 4 + + assert block_is_qubit_hamiltonian(single_z(0)) + assert block_is_qubit_hamiltonian(total_magnetization(n_qubits)) + assert block_is_qubit_hamiltonian(zz_hamiltonian(n_qubits)) + assert block_is_qubit_hamiltonian(ising_hamiltonian(n_qubits)) + + +def test_chain_block_only() -> None: + block_tag = str(uuid4()) + block = chain(X(0), Z(0), Z(4), Y(4)) + tag(block, block_tag) + + assert block.qubit_support == (0, 4) + assert block.tag == block_tag + + pbs = primitive_blocks(block) + assert len(pbs) == 4 + + +def test_kron_block_only() -> None: + block_tag = str(uuid4()) + block = kron(X(0), Y(1), CNOT(2, 3)) + tag(block, block_tag) + + assert block.qubit_support == tuple(range(4)) + assert block.tag == block_tag + + pbs = primitive_blocks(block) + assert len(pbs) == 3 + + with pytest.raises(AssertionError, match="Make sure blocks act on distinct qubits!"): + block = kron(X(0), Y(1), CNOT(1, 2)) + + with pytest.raises(AssertionError, match="Make sure blocks act on distinct qubits!"): + block = kron(X(0), Y(0), CNOT(1, 2)) + + +@pytest.mark.parametrize( + "block", + [ + # FIXME: Defining it in this way will yield a nested AddBlock + # this will break the tests and it is not exactly what we would like + # a possible solution would be to add a "simplify()" method for + # making sure no nested AddBlock are present + # X(0) + X(1) * 2.0 + Y(1) * 3.0 + Z(3), + add(X(0), X(1) * 2.0, Y(1) * 3.0, Z(3)) + ], +) +def test_add_block_only(block: AddBlock) -> None: + block_tag = str(uuid4()) + tag(block, block_tag) + + assert block.qubit_support == (0, 1, 3) + assert block.tag == block_tag + + b2 = block.blocks[1] + assert evaluate(b2.parameters.parameter) == 2.0 # type: ignore [attr-defined] + b3 = block.blocks[2] + assert evaluate(b3.parameters.parameter) == 3.0 # type: ignore [attr-defined] + + +def test_composition() -> None: + block_tag = str(uuid4()) + block = chain( + chain(X(0), X(1)), + chain(X(2), X(3)), + kron(CNOT(0, 1), Y(3), Y(4)), + chain(X(5), Y(5)), + add(Z(1), Z(2), Z(3) * 3.0), + ) + tag(block, block_tag) + + assert block.qubit_support == tuple(range(6)) + assert block.tag == block_tag + + pbs = primitive_blocks(block) + assert len(pbs) == 12 + + # test composition of references to blocks + block1 = X(0) + block2 = Y(1) + block3 = RX(2, Parameter("theta")) + block4 = CNOT(3, 4) + + assert isinstance(block1 + block2, AddBlock) + assert isinstance(block1 * block2 * block3, ChainBlock) + assert isinstance(block1 @ block2 @ block4, KronBlock) + + comp_block = chain( + (block1 @ block2 @ block3), + (block3 * block4), + block1 + block2, + ) + assert isinstance(comp_block[1], ChainBlock) + assert isinstance(comp_block[2][0], X) # type: ignore [index] + + all_blocks = primitive_blocks(comp_block) + assert len(all_blocks) == 7 + + +def test_precedence() -> None: + assert X(0) + Z(0) * Z(1) == add(X(0), chain(Z(0), Z(1))) + assert X(0) + Z(0) @ Z(1) == add(X(0), kron(Z(0), Z(1))) + assert X(0) * Z(0) @ Z(1) == kron(chain(X(0), Z(0)), Z(1)) + + assert 2 * Z(0) @ Z(1) == kron(2 * Z(0), Z(1)) + assert 2 * Z(0) * Z(1) == chain(2 * Z(0), Z(1)) + + assert 2 * (Z(0) @ Z(1)) == 2 * kron(Z(0), Z(1)) + assert 2 * (Z(0) * Z(1)) == 2 * chain(Z(0), Z(1)) + + +def test_reassign() -> None: + b = X(0) + c = reassign(b, {0: 2}) + assert c.qubit_support == (2,) + + b = chain(X(1), CNOT(2, 4)) # type: ignore [assignment] + c = reassign(b, {1: 0, 2: 1, 4: 3}) + assert c.qubit_support == (0, 1, 3) + assert c.blocks[0].qubit_support == (0,) # type: ignore + assert c.blocks[1].qubit_support == (1, 3) # type: ignore + + b = kron( # type: ignore [assignment] + chain(RX(3, 2.0), CNOT(3, 5)), CNOT(6, 7), kron(X(9), Z(8)) + ) + c = reassign(b, {i: i - 3 for i in b.qubit_support}) + assert c.qubit_support == (0, 2, 3, 4, 5, 6) + assert c.blocks[0].qubit_support == (0, 2) # type: ignore + assert c.blocks[0].blocks[1].qubit_support == (0, 2) # type: ignore + assert c.blocks[1].qubit_support == (3, 4) # type: ignore + + +def test_put_block() -> None: + b = chain(X(2), X(3), kron(X(3), X(4))) + + b = put(b, 3, 5) # type: ignore + assert b.qubit_support == (3, 4, 5) + assert b.blocks[0].qubit_support == (0, 1, 2) + assert b.n_qubits == 3 + + # with pytest.raises(AssertionError, match="You are trying to put a block with 3"): + # b = chain(X(2), X(3), kron(X(3), X(4))) # type: ignore + # b = put(b, 3, 4) # type: ignore [assignment] + + +# TODO: Update to the new interface +def test_repr() -> None: + assert X(1).__repr__() == "X(1)" + # assert RX(2, Parameter("theta")).__repr__() == "RX(2) [params: (theta(trainable=True),)]" + assert CNOT(3, 4).__repr__() == "CNOT(3,4)" + + +@pytest.mark.xfail +def test_ascii() -> None: + # FIXME: test ascii printing + from rich.console import Console + + console = Console() + + circ = chain(kron(X(0), X(2)), chain(X(3), X(2))) + circ.tag = "chain chain" + # print(circ) + + console.size = (40, 10) # type: ignore + # print(circ.__layout__().tree) + + assert False + + +def test_set_trainable() -> None: + block1 = X(0) + block2 = Y(1) + theta = Parameter("theta") + scale = Parameter("scale") + block3 = RX(2, theta) + block4 = CNOT(3, 4) + + comp_block = chain( + (block1 @ block2 @ block3), + scale * (block3 * block4), + block1 + block2, + CRY(2, 3, "rot_theta"), + ) + + params = parameters(comp_block) + assert len(params) == 3 + for p in params: + assert p.trainable + + non_trainable_b = set_trainable(comp_block, value=False) # type: ignore [assignment] + assert isinstance(non_trainable_b, ChainBlock) + assert len(non_trainable_b) == 4 + + for p in params: + assert not p.trainable + + +@pytest.mark.parametrize( + "parameter", + [3 * sympy.acos(Parameter("x", trainable=False)), Parameter("x", trainable=True), 1.0, "x"], +) +def test_parameterised_gates_syntax(parameter: Parameter | TNumber | sympy.Expr | str) -> None: + rx = RX(0, parameter) + + for p in rx.parameters.expressions(): + if p.is_number: + assert evaluate(p) == parameter + else: + assert isinstance(p, (sympy.Expr, Parameter)) # type: ignore + + if isinstance(parameter, str): + assert p.name == parameter + assert p.value > 0 # type: ignore [operator] + assert p.trainable + + if isinstance(parameter, Parameter): + assert p == parameter # type: ignore + + if isinstance(parameter, sympy.Expr): + assert p.free_symbols == parameter.free_symbols + assert [s.trainable for s in p.free_symbols] == [ + s.trainable for s in parameter.free_symbols + ] + + +def test_tag_blocks() -> None: + block1 = X(0) + block2 = Y(1) + block3 = RX(2, Parameter("theta")) + block4 = CNOT(3, 4) + + comp_block = chain( + tag(block1 @ block2 @ block3, "Feature Map"), + tag(block3 * block4, "Variational Ansatz"), + block1 + block2, + ) + tags = [block.tag for block in comp_block.blocks] + assert "Feature Map" in tags + + +def test_reverse() -> None: + block1 = X(0) + block2 = Y(1) + block3 = RX(2, Parameter("theta")) + block4 = CNOT(3, 4) + + inv_block1 = X(4) + inv_block2 = Y(3) + inv_block3 = RX(2, Parameter("theta")) + inv_block4 = CNOT(1, 0) + + comp_block = chain(block1, block2, block3, block4) + inverted = invert_endianness(comp_block, 5, False) + inv_block = chain(inv_block1, inv_block2, inv_block3, inv_block4) + for b1, b2 in zip(inverted.blocks, inv_block.blocks): # type: ignore [attr-defined] + assert b1.name == b2.name + assert b1.qubit_support == b2.qubit_support + + +@pytest.mark.parametrize( + "in_place", + [False, pytest.param(True, marks=pytest.mark.xfail(reason="Treacherous syntax needs fixing"))], +) +def test_duplicate_manipulation(in_place: bool) -> None: + # if done "in place" (without deepcopying each object) + # it fails due two block1 appearing twice and the inversion + # will happen twice + block1 = X(0) + block2 = Y(1) + block4 = CNOT(3, 4) + inv_block1 = X(4) + inv_block2 = Y(3) + inv_block4 = CNOT(1, 0) + + comp_block = chain(block1, block2, block1, block4) + inverted = invert_endianness(comp_block, n_qubits=5, in_place=in_place) + + inv_block = chain(inv_block1, inv_block2, inv_block1, inv_block4) + for b1, b2 in zip(inverted.blocks, inv_block.blocks): # type: ignore [attr-defined] + assert b1.name == b2.name + assert b1.qubit_support == b2.qubit_support + + +def test_control_gate_manipulation() -> None: + cry = CRY(0, 1, "theta") + inv_block = invert_endianness(cry) + cry_double_inverted = invert_endianness(inv_block) + assert cry_double_inverted.qubit_support == cry.qubit_support + + +def test_reassign_cnotchain() -> None: + myqubitmap = {1: 0, 0: 1, 3: 2, 2: 3} + orig_cnotchain = chain(CNOT(0, 1), CNOT(2, 3)) + target_cnotchain = chain(CNOT(1, 0), CNOT(3, 2)) + new_cnotchain = reassign(orig_cnotchain, myqubitmap) + for b1, b2 in zip(target_cnotchain.blocks, new_cnotchain.blocks): # type: ignore + assert b1.name == b2.name + assert b1.qubit_support == b2.qubit_support + + +def test_reassign_parametrized_controlgate_chain() -> None: + myqubitmap = {1: 0, 0: 1, 3: 2, 2: 3} + orig_chain = chain(CRY(0, 1, "theta_0"), CRX(2, 3, "theta_1")) + target_chain = chain(CRY(1, 0, "theta_0"), CRX(3, 2, "theta_1")) + new_chain = reassign(orig_chain, myqubitmap) + for b1, b2 in zip(target_chain.blocks, new_chain.blocks): # type: ignore + assert b1.name == b2.name + assert b1.qubit_support == b2.qubit_support + assert b1.parameters.parameter.name == b2.parameters.parameter.name # type: ignore [attr-defined] # noqa: E501 + assert b1.blocks[0].qubit_support[0] == b2.blocks[0].qubit_support[0] # type: ignore [attr-defined] # noqa: E501 + + +def test_reassign_identity() -> None: + identitymap = {0: 0, 1: 1, 2: 2, 3: 3} + orig_cnotchain = chain(CNOT(0, 1), CNOT(2, 3)) + new_cnotchain = reassign(orig_cnotchain, identitymap) + for b1, b2 in zip(orig_cnotchain.blocks, new_cnotchain.blocks): # type: ignore + assert b1.name == b2.name + assert b1.qubit_support == b2.qubit_support + + +def test_lookup_block_by_param() -> None: + x = Parameter("x", trainable=False) + block1 = RY(0, 3 * x) + block2 = RX(1, "theta1") + block3 = RX(2, "theta2") + block4 = RX(3, "theta3") + block5 = RY(0, np.pi) + block6 = RX(1, np.pi) + block7 = CNOT(2, 3) + + comp_block = chain( + *[ + kron(*[X(0), X(1), Z(2), Z(3)]), + kron(*[block1, block2, block3, block4]), + kron(*[block5, block6, block7]), + ] + ) + + exprs = expressions(comp_block) + assert exprs + for expr in exprs: + bs = get_blocks_by_expression(comp_block, expr) + for b in bs: + assert isinstance(b, ParametricBlock) + + +def test_addition_multiplication() -> None: + b = X(0) * X(1) + assert isinstance(b, ChainBlock) + + b = X(0) * 2.0 + assert evaluate(b.parameters.parameter) == 2.0 # type: ignore [attr-defined] + + b = 2.0 * X(0) + assert evaluate(b.parameters.parameter) == 2.0 # type: ignore [attr-defined] + + b = 2.0 * (2.0 * X(0)) + assert isinstance(b.block, X) # type: ignore[attr-defined] + assert evaluate(b.parameters.parameter) == 4.0 # type: ignore [attr-defined] + + phi = Parameter("phi") + b = 2 * (phi * X(0)) + assert b.parameters.parameter == 2 * phi # type: ignore[attr-defined] + + b = X(0) + X(1) + assert isinstance(b, AddBlock) + + b = X(0) - 2.3 * X(1) + assert isinstance(b, AddBlock) + assert evaluate(b.blocks[1].parameters.parameter) == -2.3 # type: ignore[attr-defined] + + b = -X(1) + assert isinstance(b, ScaleBlock) + assert evaluate(b.parameters.parameter) == -1.0 + + b = +X(1) + assert isinstance(b, X) + + b = (I(0) - Z(0)) / 2 + assert isinstance(b, ScaleBlock) + assert evaluate(b.parameters.parameter) == 0.5 + + with pytest.raises(TypeError, match="Can only add a block to another block."): + X(0) + 1.0 # type: ignore [operator] + + with pytest.raises(TypeError, match="Cannot divide block by another block."): + X(0) / X(1) + + b = X(0) @ X(1) + assert isinstance(b, KronBlock) + + block = X(0) ^ 3 + assert isinstance(block, KronBlock) + assert all(isinstance(b, X) for b in block.blocks) + + block = RX(2, "phi") ^ 2 + assert isinstance(block, KronBlock) + assert all(isinstance(b, RX) for b in block.blocks) + assert block.qubit_support == (2, 3) + + +def test_inplace_operations() -> None: + a = Zero() + a += add(X(0), Y(1), Z(2)) # type: ignore[misc] + assert isinstance(a, AddBlock) + assert len(a.blocks) == 3 + + a = add(X(0), Y(1)) + a += Z(2) + assert isinstance(a, AddBlock) + assert len(a.blocks) == 3 + + a = X(0) + a += add(Y(1), Z(2)) + assert isinstance(a, AddBlock) + assert len(a.blocks) == 3 + + a = X(0) + a += Y(1) + assert isinstance(a, AddBlock) + assert len(a.blocks) == 2 + + a = Zero() + a -= add(X(0), Y(1), Z(2)) + assert isinstance(a, ScaleBlock) + assert evaluate(a.parameters.parameter) == -1.0 + + a = add(X(0), Y(1)) + a -= Z(2) + assert isinstance(a, AddBlock) + assert len(a.blocks) == 3 + + a = X(0) + a -= add(Y(1), Z(2)) + assert isinstance(a, AddBlock) + assert len(a.blocks) == 3 + + a = X(0) + a -= Y(1) + assert isinstance(a, AddBlock) + assert len(a.blocks) == 2 + + with pytest.raises(AssertionError, match="Make sure blocks act on distinct qubits!"): + a = I(0) + a @= X(0) + + a = kron(X(0), Y(1)) + a @= Z(2) + assert isinstance(a, KronBlock) + assert len(a.blocks) == 3 + + a = X(0) + a @= kron(X(1), X(2)) + assert isinstance(a, KronBlock) + assert len(a.blocks) == 3 + + a = I(0) + a *= X(0) + assert isinstance(a, X) + + a = sum(X(j) for j in range(3)) + assert isinstance(a, AddBlock) + + a = X(0) + a /= 4 + assert isinstance(a, ScaleBlock) + assert evaluate(a.parameters.parameter) == 1 / 4 + + a = Zero() + a **= 3 + assert isinstance(a, Zero) + + a = X(0) + a **= 3 + assert isinstance(a, ChainBlock) + assert len(a.blocks) == 3 + assert all(isinstance(block, X) for block in a.blocks) + + a = 2 * X(0) + a **= 3 + assert isinstance(a, ScaleBlock) + assert evaluate(a.parameters.parameter) == 8 + assert isinstance(a.block, ChainBlock) + assert len(a.block.blocks) == 3 + + +def test_duplicate_parameters() -> None: + n_qubits = 4 + depth = 2 + + hea1 = hea(n_qubits=n_qubits, depth=depth) + hea2 = hea(n_qubits=n_qubits, depth=depth) + + block1 = chain(hea1, hea2) + assert has_duplicate_vparams(block1) + + hea1 = hea(n_qubits=n_qubits, depth=depth, param_prefix="0") + hea2 = hea(n_qubits=n_qubits, depth=depth, param_prefix="1") + + block2 = chain(hea1, hea2) + assert not has_duplicate_vparams(block2) + + +def test_pauli_blocks() -> None: + b1 = 0.1 * kron(X(0), X(1)) + 0.2 * kron(Z(0), Z(1)) + 0.3 * kron(Y(2), Y(3)) + b2 = chain(Z(0) * Z(1), CNOT(0, 1)) + CNOT(2, 3) + + paulis = get_pauli_blocks(b1) + primitives = primitive_blocks(b1) + assert len(paulis) == len(primitives) + + paulis = get_pauli_blocks(b2) + primitives = primitive_blocks(b2) + assert len(paulis) != len(primitives) + + +def test_block_from_dict_primitive() -> None: + # Primitive + myx = X(0) + block_dict = myx._to_dict() + myx_copy = X._from_dict(block_dict) + assert myx == myx_copy + + +def test_block_from_dict_parametric() -> None: + # Parametric + myrx = RX(0, "theta") + block_dict = myrx._to_dict() + myrx_copy = RX._from_dict(block_dict) + assert myrx == myrx_copy + + +def test_block_from_dict_chain() -> None: + # Composite + from qadence.blocks import ChainBlock + + mychain = chain(RX(0, "theta"), RY(1, "epsilon")) + block_dict = mychain._to_dict() + mychain_copy = ChainBlock._from_dict(block_dict) + assert mychain == mychain_copy + + +@pytest.mark.parametrize("n_qubits", [2, 4, 6, 8]) +def test_block_from_dict_hea_qubits(n_qubits: int) -> None: + # hea + from qadence.blocks import ChainBlock + + depth = 2 + myhea = hea(n_qubits, depth) + block_dict = myhea._to_dict() + myhea_copy = ChainBlock._from_dict(block_dict) + assert myhea == myhea_copy + + +@pytest.mark.parametrize("depth", [2, 4, 6, 8]) +def test_block_from_dict_hea_depth(depth: int) -> None: + # hea + from qadence.blocks import ChainBlock + + n_qubits = 4 + myhea = hea(n_qubits, depth) + block_dict = myhea._to_dict() + myhea_copy = ChainBlock._from_dict(block_dict) + assert myhea == myhea_copy + + +def test_comp_contains_operator() -> None: + ry = RY(1, "epsilon") + mychain = chain(RX(0, "theta"), ry) + assert ry in mychain + + +def test_eq_kron_order() -> None: + block0 = kron(Z(0), Z(1)) + block1 = kron(Z(1), Z(0)) + assert block0 == block1 + + +def test_eq_scale_kron() -> None: + block0 = 0.9 * kron(Z(0), Z(1)) + block1 = 0.9 * kron(Z(1), Z(0)) + assert block0 == block1 + + +def test_eq_scale_add_kron() -> None: + block0 = kron(Z(0), Z(1)) + kron(X(0), Y(1)) + block1 = kron(Z(1), Z(0)) + kron(Y(1), X(0)) + assert block0 == block1 + + +def test_parametric_scale_eq() -> None: + from qadence.parameters import VariationalParameter + + p1 = VariationalParameter("p1") + p2 = VariationalParameter("p2") + rx0 = RX(1, "a") + rx1 = RY(2, "b") + b0 = ( + p1 * kron(Z(0), Z(1), X(2), X(3)) + + p2 * chain(kron(X(0), X(3)), kron(rx0, rx1)) + + 0.5 * X(0) + ) + b1 = ( + p1 * kron(Z(0), Z(1), X(2), X(3)) + + p2 * chain(kron(X(0), X(3)), kron(rx0, rx1)) + + 0.5 * X(0) + ) + + assert b0 == b1 + + +def test_kron_eq() -> None: + block1 = kron(X(0), Z(1), Y(2)) + block2 = kron(X(1), Z(2), Y(3)) + block3 = kron(X(0), Z(2), Y(3)) + + assert not block1 == block2 and not block1 == block3 + + +def test_kron_chain_eq() -> None: + assert kron(X(0), X(1)) != chain(X(0), X(1)) + assert kron(Z(0), Z(1)) != chain(Z(0), Z(1)) + + +@pytest.mark.parametrize( + "block", + [ + chain(I(n) for n in range(5)), + kron(I(n) for n in range(5)), + ], +) +def test_identity_predicate(block: AbstractBlock) -> None: + assert block.is_identity + + +def test_composite_containment() -> None: + kron_block = kron(X(0), Y(1), Z(2)) + assert X(0) in kron_block + assert Z in kron_block + add_block = add(X(0), Y(1), Z(2)) + assert X(0) in add_block + assert Z in add_block + chain_block = chain(X(0), Y(0), Z(0)) + assert X(0) in chain_block + assert Z in chain_block + # Test case for nested blocks. + nested_block = add(kron(X(0), Y(1)) + kron(Z(0), H(1))) + assert X(0) in nested_block + assert Z in nested_block diff --git a/tests/qadence/test_circuit.py b/tests/qadence/test_circuit.py new file mode 100644 index 000000000..de0245e6b --- /dev/null +++ b/tests/qadence/test_circuit.py @@ -0,0 +1,147 @@ +from __future__ import annotations + +import json +import os +from pathlib import Path + +import pytest + +from qadence.blocks import chain, kron, primitive_blocks, tag +from qadence.circuit import QuantumCircuit +from qadence.constructors import hea +from qadence.draw import savefig +from qadence.operations import CNOT, RX, X, Y +from qadence.parameters import FeatureParameter, Parameter +from qadence.transpile import invert_endianness + + +def build_circuit(n_qubits: int, depth: int = 2) -> QuantumCircuit: + param = FeatureParameter("x") + block = kron(*[RX(qubit, (qubit + 1) * param) for qubit in range(n_qubits)]) + fm = tag(block, tag="FM") + + # this tags it as "HEA" + ansatz = hea(n_qubits, depth=depth) + + return QuantumCircuit(n_qubits, fm, ansatz) + + +def test_get_block_by_tag() -> None: + # standard circuit + circuit = build_circuit(n_qubits=4) + + ansatz = circuit.get_blocks_by_tag("HEA") + assert len(ansatz) == 1 + assert ansatz[0].tag == "HEA" + fm = circuit.get_blocks_by_tag("FM") + assert len(fm) == 1 + assert fm[0].tag == "FM" + + # multiple blocks + block = chain(RX(0, 0.5), kron(RX(1, 0.5), RX(2, 0.5)), hea(n_qubits=4), hea(n_qubits=4)) + circuit = QuantumCircuit(4, block) + + ansatz = circuit.get_blocks_by_tag("HEA") + assert len(ansatz) == 2 + + +def test_reverse() -> None: + block1 = X(0) + block2 = Y(1) + block3 = RX(2, Parameter("theta")) + block4 = CNOT(3, 4) + + comp_block = chain( + (block1 @ block2 @ block3), + (block3 * block4), + block1 + block2, + ) + + # comp_block = chain(block1, block2, block4) + orig = chain(*primitive_blocks(comp_block)) + circ = QuantumCircuit(5, comp_block) + circ2 = invert_endianness(circ) + new = primitive_blocks(circ2.block) + orig = invert_endianness(orig, 5, False) # type: ignore [assignment] + for b1, b2 in zip(orig, new): # type: ignore + assert b1.name == b2.name + assert b1.qubit_support == b2.qubit_support + + +def test_circuit_dict() -> None: + circ = build_circuit(4) + qc_dict = circ._to_dict() + qc_copy = QuantumCircuit._from_dict(qc_dict) + + assert circ == qc_copy + + +def test_circuit_from_dumps() -> None: + circ = build_circuit(4) + qc_dumped = circ._to_json() + loadedqcdict = json.loads(qc_dumped) + loaded_qc = QuantumCircuit._from_dict(loadedqcdict) + + assert circ == loaded_qc + + +def test_loaded_circuit_from_json() -> None: + circ = build_circuit(4) + from pathlib import Path + + file_name = Path("tmp.json") + circ._to_json(file_name) + qc_copy = QuantumCircuit._from_json(file_name) + os.remove(file_name) + + assert circ == qc_copy + + +@pytest.mark.parametrize( + "n_qubits", + [2, 4, 6, 8], +) +def test_underlying_hea(n_qubits: int) -> None: + from qadence.blocks import ChainBlock + + param = FeatureParameter("x") + block = kron(*[RX(qubit, (qubit + 1) * param) for qubit in range(n_qubits)]) + fm = tag(block, tag="FM") + + # this tags it as "HEA" + ansatz = hea(n_qubits=n_qubits, depth=2) + + mychain = chain(fm, ansatz) + d = mychain._to_dict() + mychain1 = ChainBlock._from_dict(d) + + assert mychain == mychain1 + + +def test_circ_operator() -> None: + x = Parameter("x", trainable=True, value=1.0) + myrx = RX(0, x) + qc = QuantumCircuit(1, myrx) + assert x in qc + assert myrx in qc + assert x in myrx + + +def test_hea_operators() -> None: + n_qubits = 4 + param = FeatureParameter("x") + block = kron(*[RX(qubit, (qubit + 1) * param) for qubit in range(n_qubits)]) + fm = tag(block, tag="FM") + # this tags it as "HEA" + ansatz = hea(n_qubits=n_qubits, depth=2) + mychain = chain(fm, ansatz) + assert param in mychain + + +@pytest.mark.parametrize("fname", ["circuit.png", "circuit.pdf", "circuit.png"]) +@pytest.mark.skip +def test_savefig_circuit(fname: str) -> None: + circuit = build_circuit(4, depth=2) + savefig(circuit, fname) + assert os.path.isfile(fname) + Path.unlink(Path(fname)) diff --git a/tests/qadence/test_dagger.py b/tests/qadence/test_dagger.py new file mode 100644 index 000000000..1a63b8761 --- /dev/null +++ b/tests/qadence/test_dagger.py @@ -0,0 +1,149 @@ +from __future__ import annotations + +from typing import Tuple + +import pytest +from sympy import acos + +from qadence import Parameter +from qadence.blocks import AbstractBlock, chain, kron +from qadence.blocks.utils import assert_same_block, put +from qadence.constructors import hea +from qadence.operations import ( + CNOT, + CPHASE, + CRX, + CRY, + CRZ, + CZ, + RX, + RY, + RZ, + SWAP, + # AnEntanglement, + # AnFreeEvo, + # AnRX, + # AnRY, + H, + HamEvo, + I, + S, + SDagger, + T, + TDagger, + X, + Y, + Z, + Zero, +) + + +@pytest.mark.parametrize( + "block", + [ + X(0), + Y(0), + Z(0), + S(0), + SDagger(0), + T(0), + TDagger(0), + CNOT(0, 1), + CZ(0, 1), + SWAP(0, 1), + H(0), + I(0), + Zero(), + ], +) +def test_all_fixed_primitive_blocks(block: AbstractBlock) -> None: + # testing all fixed primitive blocks, for which U=U'' + assert_same_block(block, block.dagger().dagger()) + + +@pytest.mark.parametrize( + "block", + [ + X(0), + Y(0), + Z(0), + I(0), + H(0), + CNOT(0, 1), + CZ(0, 1), + SWAP(0, 1), + Zero(), + ], +) +def test_self_adjoint_blocks(block: AbstractBlock) -> None: + # some cases are self-adjoint, which means the property U=U' + assert_same_block(block, block.dagger()) + + +def test_t_and_s_gates() -> None: + # testing those cases which are not self-adjoint, and require special backend implementations + assert_same_block(S(0), SDagger(0).dagger()) + assert_same_block(SDagger(0), S(0).dagger()) + assert_same_block(T(0), TDagger(0).dagger()) + assert_same_block(TDagger(0), T(0).dagger()) + + +def test_scale_dagger() -> None: + # testing scale blocks with numerical or parametric values + for scale in [2, 2.1, Parameter("x"), acos(Parameter("x"))]: + assert_same_block(scale * X(0), (-scale * X(0)).dagger()) + assert_same_block(scale * X(0), (scale * X(0)).dagger().dagger()) + + +@pytest.mark.parametrize( + "block", + [ + (1, RX), + (1, RY), + (1, RZ), + (2, CRX), + (2, CRY), + (2, CRZ), + (2, CPHASE), + # (0, AnEntanglement), + # (0, AnFreeEvo), + # (0, AnRX), + # (0, AnRY), + (-1, HamEvo), + ], +) +def test_all_self_adjoint_blocks(block: Tuple[int, AbstractBlock]) -> None: + n_qubits, block_class = block + for p_type in [1.42, "x", Parameter("x"), acos(Parameter("x"))]: + if n_qubits >= 0: + block = block_class(*tuple(range(n_qubits)), p_type) # type: ignore[operator] + else: + generator = X(0) + 3 * Y(1) * Z(1) + 2 * X(1) + block = HamEvo(generator, p_type) # type: ignore[assignment] + assert_same_block(block, block.dagger().dagger()) # type: ignore[arg-type,attr-defined] + if not isinstance(p_type, str): + block_dagger = ( + block_class(*tuple(range(n_qubits)), -p_type) # type: ignore[operator] + if n_qubits >= 0 + else HamEvo(generator, -p_type) + ) + assert_same_block(block, block_dagger.dagger()) # type: ignore[arg-type,attr-defined] + assert_same_block(block.dagger(), block_dagger) # type: ignore[arg-type,attr-defined] + + +@pytest.mark.parametrize( + "block", + [ + chain(X(0), Y(0), Z(0), Y(0)), + kron(X(1), Y(3), Z(4), Y(2)), + chain(kron(X(0), Y(1)), kron(Z(3), H(1))), + chain(CNOT(0, 1), CNOT(1, 0)), + X(0) + Y(1), + X(0) + 3.0 * Y(1), + hea(3, 2), + put(X(0), 1, 3), + # TODO add QFT here + ], +) +def test_composite_blocks_no_fails(block: AbstractBlock) -> None: + assert isinstance(block.dagger(), AbstractBlock) diff --git a/tests/qadence/test_decompose.py b/tests/qadence/test_decompose.py new file mode 100644 index 000000000..6b60d703b --- /dev/null +++ b/tests/qadence/test_decompose.py @@ -0,0 +1,291 @@ +from __future__ import annotations + +import os +from json import loads +from typing import no_type_check + +import numpy as np +import pytest +import torch +from metrics import ATOL_32, DIGITAL_DECOMP_ACCEPTANCE_HIGH, DIGITAL_DECOMP_ACCEPTANCE_LOW + +from qadence import BackendName, DiffMode +from qadence.blocks import ( + AbstractBlock, + add, + chain, + get_pauli_blocks, + kron, + primitive_blocks, +) +from qadence.circuit import QuantumCircuit +from qadence.constructors import ( + ising_hamiltonian, + total_magnetization, + zz_hamiltonian, +) +from qadence.models import QuantumModel +from qadence.operations import ( + CNOT, + RX, + RZ, + H, + HamEvo, + X, + Y, + Z, +) +from qadence.parameters import Parameter, VariationalParameter, evaluate +from qadence.serialization import deserialize +from qadence.types import LTSOrder + + +@no_type_check +def test_hamevo_digital_decompositon() -> None: + parameter = Parameter("p", trainable=True) + + # simple Pauli + generator = Z(0) + expected = chain(RZ(0, parameter=parameter)) + tevo_digital = HamEvo(generator, parameter).digital_decomposition(approximation=LTSOrder.BASIC) + for exp, blk in zip(primitive_blocks(expected), primitive_blocks(tevo_digital)): + assert type(exp) == type(blk) + assert exp.qubit_support == blk.qubit_support + + # commuting + generator = add(kron(Y(0), Y(1))) + expected = chain( + RX(0, parameter=1.5708), + RX(1, parameter=1.5708), + CNOT(0, 1), + RZ(1, parameter=parameter), + CNOT(0, 1), + RX(0, parameter=-1.5708), + RX(1, parameter=-1.5708), + ) + tevo_digital = HamEvo(generator, parameter).digital_decomposition(approximation=LTSOrder.BASIC) + for exp, blk in zip(primitive_blocks(expected), primitive_blocks(tevo_digital)): + assert type(exp) == type(blk) + if isinstance(blk, RX): + exp_p, blk_p = exp.parameters.parameter, blk.parameters.parameter + assert np.isclose(evaluate(exp_p), evaluate(blk_p)) + + # Trotter + generator = kron(Z(0), Z(1), Z(2)) + kron(X(0), Y(1), Z(2)) + expected = chain( + CNOT(0, 1), + CNOT(1, 2), + RZ(2, parameter=parameter), + CNOT(1, 2), + CNOT(0, 1), + H(0), + RX(1, parameter=-1.5708), + CNOT(0, 1), + CNOT(1, 2), + RZ(2, parameter=parameter), + CNOT(1, 2), + CNOT(0, 1), + H(0), + RX(1, parameter=-1.5708), + ) + tevo_digital = HamEvo(generator, parameter).digital_decomposition(approximation=LTSOrder.BASIC) + assert all(primitive_blocks(expected)) == all(primitive_blocks(tevo_digital)) + + +@no_type_check +def test_hamevo_digital_decompositon_multiparam_timeevo() -> None: + p0 = Parameter("p0", trainable=True) + p1 = Parameter("p1", trainable=True) + + parameter = p0 + p1 + + # simple Pauli + generator = Z(0) + expected = chain(RZ(0, parameter=parameter)) + tevo_digital = HamEvo(generator, parameter).digital_decomposition(approximation=LTSOrder.BASIC) + for exp, blk in zip(primitive_blocks(expected), primitive_blocks(tevo_digital)): + assert type(exp) == type(blk) + assert exp.qubit_support == blk.qubit_support + + # commuting + generator = add(kron(Y(0), Y(1))) + expected = chain( + RX(0, parameter=1.5708), + RX(1, parameter=1.5708), + CNOT(0, 1), + RZ(1, parameter=parameter), + CNOT(0, 1), + RX(0, parameter=-1.5708), + RX(1, parameter=-1.5708), + ) + tevo_digital = HamEvo(generator, parameter).digital_decomposition(approximation=LTSOrder.BASIC) + for exp, blk in zip(primitive_blocks(expected), primitive_blocks(tevo_digital)): + assert type(exp) == type(blk) + if isinstance(blk, RX): + exp_p, blk_p = exp.parameters.parameter, blk.parameters.parameter + assert np.isclose(evaluate(exp_p), evaluate(blk_p)) + + # Trotter + generator = kron(Z(0), Z(1), Z(2)) + kron(X(0), Y(1), Z(2)) + expected = chain( + CNOT(0, 1), + CNOT(1, 2), + RZ(2, parameter=parameter), + CNOT(1, 2), + CNOT(0, 1), + H(0), + RX(1, parameter=-1.5708), + CNOT(0, 1), + CNOT(1, 2), + RZ(2, parameter=parameter), + CNOT(1, 2), + CNOT(0, 1), + H(0), + RX(1, parameter=-1.5708), + ) + tevo_digital = HamEvo(generator, parameter).digital_decomposition(approximation=LTSOrder.BASIC) + assert all(primitive_blocks(expected)) == all(primitive_blocks(tevo_digital)) + + +@pytest.mark.parametrize( + "generator", + [ + X(0), + Y(0), + Z(0), + kron(X(0), X(1)), + kron(Z(0), Z(1), Z(2)) + kron(X(0), Y(1), Z(2)), + add(Z(0), Z(1), Z(2)), + 0.1 * kron(X(0), X(1)) + 0.2 * kron(Z(0), Z(1)) + 0.3 * kron(X(2), X(3)), + 0.5 * add(Z(0), Z(1), kron(X(2), X(3))) + 0.2 * add(X(2), X(3)), + add(0.1 * kron(Z(0), Z(1)), 0.2 * kron(X(2), X(3))), + total_magnetization(4), + 0.1 * kron(Z(0), Z(1)) + 2 * CNOT(0, 1), + ], +) +def test_check_with_hamevo_exact_fixed_generator(generator: AbstractBlock) -> None: + paulis = get_pauli_blocks(generator) + primitives = primitive_blocks(generator) + is_pauli = len(paulis) == len(primitives) + + n_qubits = generator.n_qubits + + tevo = 2.0 + b1 = HamEvo(generator, parameter=tevo) + if is_pauli: + b2 = HamEvo(generator, parameter=tevo).digital_decomposition() + else: + with pytest.raises(NotImplementedError): + _ = HamEvo(generator, parameter=tevo).digital_decomposition() + return + + c1 = QuantumCircuit(n_qubits, b1) + c2 = QuantumCircuit(n_qubits, b2) + + model1 = QuantumModel(c1, backend=BackendName.PYQTORCH) + model2 = QuantumModel(c2, backend=BackendName.PYQTORCH) + + wf1 = model1.run({}) + wf2 = model2.run({}) + + assert torch.allclose(wf1, wf2, atol=1.0e-7) + + +@pytest.mark.parametrize( + "generator", + [ + kron(X(0), X(1), X(2)), + chain(chain(chain(chain(X(0))))), + kron(kron(X(0), kron(X(1))), kron(X(2))), + chain(kron(X(0), kron(X(1))), kron(X(1))), + 2 * kron(kron(X(0), kron(X(1))), kron(X(2))), + ], +) +def test_composite_hamevo_edge_cases(generator: AbstractBlock) -> None: + n_qubits = generator.n_qubits + + tevo = 0.005 + b1 = HamEvo(generator, parameter=tevo) + b2 = HamEvo(generator, parameter=tevo).digital_decomposition() + + c1 = QuantumCircuit(n_qubits, b1) + c2 = QuantumCircuit(n_qubits, b2) + + model1 = QuantumModel(c1, backend=BackendName.PYQTORCH) + model2 = QuantumModel(c2, backend=BackendName.PYQTORCH) + + wf1 = model1.run({}) + wf2 = model2.run({}) + + assert torch.allclose(wf1, wf2, atol=1.0e-2) + + +def open_chem_obs() -> AbstractBlock: + """A tiny helper function""" + directory = os.getcwd() + with open(os.path.join(directory, "tests/test_files/h4.json"), "r") as js: + obs = loads(js.read()) + return deserialize(obs) # type: ignore[return-value] + + +@pytest.mark.parametrize( + "generator", + [ + kron(X(0), X(1), X(2), X(3)) + kron(Z(0), Z(1), Y(2), X(3)), + ising_hamiltonian(2), + ising_hamiltonian(4), + zz_hamiltonian(2), + zz_hamiltonian(4), + open_chem_obs(), # H4 + ], +) +def test_check_with_hamevo_approximate(generator: AbstractBlock) -> None: + def _run( + generator: AbstractBlock, tevo: float, approximation: LTSOrder = LTSOrder.BASIC + ) -> tuple[torch.Tensor, torch.Tensor]: + b1 = HamEvo(generator, parameter=tevo) + b2 = HamEvo(generator, parameter=tevo).digital_decomposition(approximation=approximation) + + c1 = QuantumCircuit(generator.n_qubits, b1) + c2 = QuantumCircuit(generator.n_qubits, b2) + + model1 = QuantumModel(c1, backend=BackendName.PYQTORCH) + model2 = QuantumModel(c2, backend=BackendName.PYQTORCH) + + wf1 = model1.run({}) + wf2 = model2.run({}) + + return wf1, wf2 + + # short time evolution still works + tevo_short = 0.005 + wf1, wf2 = _run(generator, tevo_short) + assert torch.allclose(wf1, wf2, atol=DIGITAL_DECOMP_ACCEPTANCE_HIGH) + + # short time evolution better approximation + tevo_short = 0.005 + wf1, wf2 = _run(generator, tevo_short, approximation=LTSOrder.ST4) + assert torch.allclose(wf1, wf2, atol=DIGITAL_DECOMP_ACCEPTANCE_LOW) + + +def test_check_with_hamevo_parametric_scaleblocks() -> None: + theta1 = VariationalParameter("theta1") + theta2 = VariationalParameter("theta2") + + generator = theta1 * kron(X(0), X(1)) + theta1 * theta2 * kron(Z(2), Z(3)) + n_qubits = generator.n_qubits + + tevo = 2.0 + b1 = HamEvo(generator, parameter=tevo) + b2 = HamEvo(generator, parameter=tevo).digital_decomposition() + + c1 = QuantumCircuit(n_qubits, b1) + c2 = QuantumCircuit(n_qubits, b2) + + model1 = QuantumModel(c1, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD) + model2 = QuantumModel(c2, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD) + + wf1 = model1.run({}) + wf2 = model2.run({}) + + assert torch.allclose(wf1, wf2, atol=ATOL_32) diff --git a/tests/qadence/test_manipulate.py b/tests/qadence/test_manipulate.py new file mode 100644 index 000000000..ee846c6ca --- /dev/null +++ b/tests/qadence/test_manipulate.py @@ -0,0 +1,141 @@ +from __future__ import annotations + +import pytest +import torch +from openfermion import QubitOperator + +from qadence.blocks import AbstractBlock, PutBlock, add, chain, kron +from qadence.blocks.manipulate import from_openfermion, to_openfermion +from qadence.circuit import QuantumCircuit +from qadence.constructors import total_magnetization +from qadence.models import QuantumModel +from qadence.operations import CNOT, CRX, RX, I, X, Y, Z +from qadence.parameters import FeatureParameter +from qadence.transpile import invert_endianness, scale_primitive_blocks_only, validate + + +@pytest.mark.parametrize( + "block_and_op", + [ + (Y(0), QubitOperator("Y0")), + (X(1), QubitOperator("X1")), + (add(X(0), X(1)), QubitOperator("X0") + QubitOperator("X1")), + ( + add(kron(X(0), X(1)), kron(Y(0), Y(1))) * 0.5, + (QubitOperator("X0 X1") + QubitOperator("Y0 Y1")) * 0.5, + ), + ( + chain(kron(X(0), X(1)), kron(Y(2), Y(3))) * 0.5, + QubitOperator("X0 X1") * QubitOperator("Y2 Y3") * 0.5, + ), + (add(X(0), I(1) * 2), QubitOperator("X0") + QubitOperator("", coefficient=2)), + (X(0) * 1.5j, 1.5j * QubitOperator("X0")), # type: ignore [operator] + ], +) +def test_to_openfermion_qubit_operator(block_and_op: tuple) -> None: + (b, op) = block_and_op + assert op == to_openfermion(b) + assert op == to_openfermion(from_openfermion(op)) + + +def test_validate() -> None: + x = chain(chain(X(0)), chain(X(1)), CRX(2, 3, "phi"), CNOT(2, 3)) + y = validate(x) + + p0, p1 = y.blocks[0], y.blocks[1] # type: ignore [attr-defined] + x0 = p0.blocks[0].blocks[0].blocks[0] + assert isinstance(p0, PutBlock) + assert p0.qubit_support == (0,) + assert isinstance(x0, X) + assert x0.qubit_support == (0,) + + x1 = p1.blocks[0].blocks[0].blocks[0] # type: ignore [attr-defined] + assert isinstance(p1, PutBlock) + assert p1.qubit_support == (1,) + assert isinstance(x1, X) + assert x1.qubit_support == (0,) + + x = chain(kron(CNOT(1, 2), CNOT(3, 4))) + y = validate(x) + assert y.blocks[0].blocks[0].blocks[1].qubit_support == (2, 3) # type: ignore[attr-defined] + assert y.blocks[0].blocks[0].blocks[1].blocks[0].qubit_support == (0, 1) # type: ignore[attr-defined] # noqa: E501 + + b = kron(CNOT(1, 2), CNOT(0, 3)) + y = validate(b) + assert y.blocks[0].qubit_support == (1, 2) # type: ignore[attr-defined] + assert y.blocks[1].qubit_support == (0, 1, 2, 3) # type: ignore[attr-defined] + assert y.blocks[0].blocks[0].qubit_support == (0, 1) # type: ignore[attr-defined] + assert y.blocks[1].blocks[0].qubit_support == (0, 3) # type: ignore[attr-defined] + + +def test_invert_single_scale() -> None: + b = Z(0) * 1.0 + assert invert_endianness(b, 2, False).qubit_support == (1,) + + +def test_invert_add_zs() -> None: + nqubits = 4 + b = add(Z(i) * c for (i, c) in enumerate([1.0] * nqubits)) + b1 = invert_endianness(b, nqubits, False) + assert b == invert_endianness(b1, nqubits, False) + + +def test_invert_observable() -> None: + nqubits = 4 + x = total_magnetization(nqubits) + x_prime = invert_endianness(x) + assert x == invert_endianness(x_prime) + + +def test_invert_nonsymmentrical_obs() -> None: + x = X(0) + Y(1) + Z(2) + I(3) + x_prime = invert_endianness(x) + assert x == invert_endianness(x_prime) + + +def test_match_inversions() -> None: + nqubits = 2 + qc = QuantumCircuit(nqubits, RX(1, FeatureParameter("x"))) + qc_rev = invert_endianness(qc) + assert qc_rev.block.qubit_support == (0,) + + zz = Z(0) + iz = Z(1) + zz_inv = invert_endianness(zz, nqubits, False) + iz_inv = invert_endianness(iz, nqubits, False) + assert zz_inv.qubit_support == (1,) + assert iz_inv.qubit_support == (0,) + + +@pytest.mark.parametrize( + "block, truth", + [ + (2 * X(0), 2 * X(0)), + # scale only first block because we are multiplying + (2 * chain(X(0), RX(0, "theta")), chain(2 * X(0), RX(0, "theta"))), + (2 * kron(X(0), RX(1, "theta")), kron(2 * X(0), RX(1, "theta"))), + # scale all blocks because we are adding + (2 * add(X(0), RX(0, "theta")), add(2 * X(0), 2 * RX(0, "theta"))), + (add(2 * chain(X(0))), add(chain(2 * X(0)))), + ( + 2 * chain(add(X(2), 3 * X(3)), RX(0, "theta")), + chain(add(2 * X(2), 2 * 3 * X(3)), RX(0, "theta")), + ), + ( + add(3.0 * chain(0.5 * (I(0) - Z(0)), 0.5 * (I(1) - Z(1)))), + add(chain(1.5 * I(0) - 1.5 * Z(0), 0.5 * I(1) - 0.5 * Z(1))), + ), + ], +) +def test_scale_primitive_blocks_only(block: AbstractBlock, truth: AbstractBlock) -> None: + transformed = scale_primitive_blocks_only(block) + assert truth == transformed + + n = max(block.qubit_support) + 1 + vals = {"theta": torch.zeros(2)} + si = torch.ones(2, 2**n, dtype=torch.cdouble) + m1 = QuantumModel(QuantumCircuit(n, block)) + s1 = m1.run(vals, state=si) + m2 = QuantumModel(QuantumCircuit(n, transformed)) + s2 = m2.run(vals, state=si) + assert torch.allclose(s1, s2) diff --git a/tests/qadence/test_matrices.py b/tests/qadence/test_matrices.py new file mode 100644 index 000000000..7bf019939 --- /dev/null +++ b/tests/qadence/test_matrices.py @@ -0,0 +1,349 @@ +# FIXME: not all tests pass ATOL_32 (1e-7) + +from __future__ import annotations + +import numpy as np +import pytest +import strategies as st +import torch +from hypothesis import given, settings +from metrics import ATOL_32, ATOL_E6 + +from qadence import Parameter, QuantumCircuit, VariationalParameter, run +from qadence.blocks import ( + AbstractBlock, + AddBlock, + ParametricBlock, + ParametricControlBlock, + embedding, +) +from qadence.blocks.block_to_tensor import ( + TensorType, + _block_to_tensor_embedded, + block_to_tensor, +) +from qadence.blocks.utils import add, chain, kron +from qadence.constructors import ( + feature_map, + hamiltonian_factory, + hea, + ising_hamiltonian, + qft, + total_magnetization, + zz_hamiltonian, +) +from qadence.operations import ( + CNOT, + CSWAP, + MCPHASE, + MCRX, + MCRY, + MCRZ, + RX, + RY, + RZ, + SWAP, + H, + HamEvo, + I, + N, + S, + T, + Toffoli, + U, + X, + Y, + Z, +) +from qadence.states import equivalent_state, random_state, zero_state +from qadence.types import Interaction + + +def _calc_mat_vec_wavefunction( + block: AbstractBlock, n_qubits: int, init_state: torch.Tensor, values: dict = {} +) -> torch.Tensor: + mat = block_to_tensor(block, values, tuple(range(n_qubits))) + return torch.einsum("bij,kj->bi", mat, init_state) + + +@given(st.batched_digital_circuits()) +@settings(deadline=None) +def test_embedded(circ_and_inputs: tuple[QuantumCircuit, dict[str, torch.Tensor]]) -> None: + circ, inputs = circ_and_inputs + ps, embed = embedding(circ.block, to_gate_params=False) + m = block_to_tensor(circ.block, inputs) + m_embedded = _block_to_tensor_embedded(circ.block, values=embed(ps, inputs)) + zro_state = zero_state(circ.n_qubits) + wf_run = run(circ, values=inputs) + wf_embedded = torch.einsum("bij,kj->bi", m_embedded, zro_state) + wf_nonembedded = torch.einsum("bij,kj->bi", m, zro_state) + assert torch.allclose(m, m_embedded) + assert equivalent_state(wf_run, wf_embedded, atol=ATOL_E6) + assert equivalent_state(wf_run, wf_nonembedded, atol=ATOL_E6) + + +@pytest.mark.parametrize("n_qubits", [3, 5, 7]) +@pytest.mark.parametrize("op0", [X, Y, Z]) +@pytest.mark.parametrize("op1", [X, Y, Z]) +def test_block_to_tensor_support(n_qubits: int, op0: X | Y | Z, op1: X | Y | Z) -> None: + mat0 = block_to_tensor(op0(0)) # type: ignore [operator] + mat1 = block_to_tensor(op1(0)) # type: ignore [operator] + IMAT = block_to_tensor(I(0)) + + possible_targets = list(range(n_qubits - 1)) + target = np.random.choice(possible_targets) + + qubit_support = [target, n_qubits - 1] + np.random.shuffle(qubit_support) + + block = kron(op0(qubit_support[0]), op1(qubit_support[1])) # type: ignore [operator] + + mat_small = block_to_tensor(block, use_full_support=False) + mat_large = block_to_tensor(block, use_full_support=True) + + if qubit_support[0] < qubit_support[1]: + exact_small = torch.kron(mat0, mat1).unsqueeze(0) + else: + exact_small = torch.kron(mat1, mat0).unsqueeze(0) + + kron_list = [IMAT for i in range(n_qubits)] + kron_list[qubit_support[0]] = mat0 + kron_list[qubit_support[1]] = mat1 + + exact_large = kron_list[0] + for i in range(n_qubits - 1): + exact_large = torch.kron(exact_large, kron_list[i + 1]) + + assert torch.allclose(mat_small, exact_small) + assert torch.allclose(mat_large, exact_large) + + +@pytest.mark.parametrize("gate", [I, X, Y, Z, H, T, S]) +@pytest.mark.parametrize("n_qubits", [1, 2, 4]) +def test_single_qubit_gates(gate: AbstractBlock, n_qubits: int) -> None: + target = np.random.randint(0, n_qubits) + block = gate(target) # type: ignore[operator] + init_state = random_state(n_qubits) + wf_pyq = run(n_qubits, block, state=init_state) + wf_mat = _calc_mat_vec_wavefunction(block, n_qubits, init_state) + assert equivalent_state(wf_pyq, wf_mat, atol=ATOL_32) + + +@pytest.mark.parametrize("batch_size", [i for i in range(2, 10)]) +@pytest.mark.parametrize("gate", [RX, RY, RZ, U]) +@pytest.mark.parametrize("n_qubits", [1, 2, 4]) +def test_rotation_gates(batch_size: int, gate: ParametricBlock, n_qubits: int) -> None: + param_names = [f"th{i}" for i in range(gate.num_parameters())] + + target = np.random.randint(0, n_qubits) + block = gate(target, *param_names) # type: ignore[operator] + init_state = random_state(n_qubits) + values = {k: torch.rand(batch_size) for k in param_names} + wf_pyq = run(n_qubits, block, values=values, state=init_state) + wf_mat = _calc_mat_vec_wavefunction(block, n_qubits, init_state, values=values) + assert equivalent_state(wf_pyq, wf_mat, atol=ATOL_32) + + # test with fixed parameter + block = gate(target, *[np.random.rand()] * len(param_names)) # type: ignore[operator] + init_state = random_state(n_qubits) + wf_pyq = run(n_qubits, block, state=init_state) + wf_mat = _calc_mat_vec_wavefunction(block, n_qubits, init_state) + + assert equivalent_state(wf_pyq, wf_mat, atol=ATOL_32) + + +@pytest.mark.parametrize("gate", [MCRX, MCRY, MCRZ, MCPHASE]) +@pytest.mark.parametrize("n_qubits", [2, 4, 6]) +def test_controlled_parameterized_gates(gate: ParametricControlBlock, n_qubits: int) -> None: + qubits = np.random.choice(list(range(n_qubits)), size=n_qubits, replace=False).tolist() + control = tuple(qubits[:-1]) + target = qubits[-1] + q = np.random.choice([*control, target]) + block = chain(X(q), gate(control, target, "theta")) # type: ignore[operator] + values = {"theta": torch.rand(3)} + init_state = random_state(n_qubits) + wf_pyq = run(n_qubits, block, values=values, state=init_state) + wf_mat = _calc_mat_vec_wavefunction(block, n_qubits, init_state, values=values) + assert equivalent_state(wf_pyq, wf_mat, atol=ATOL_32) + + +@pytest.mark.parametrize("gate", [CNOT, SWAP]) +@pytest.mark.parametrize("n_qubits", [2, 4, 6]) +def test_swap_cnot_gates(gate: AbstractBlock, n_qubits: int) -> None: + control, target = np.random.choice(list(range(n_qubits)), size=2, replace=False).tolist() + q = np.random.choice([control, target]) + block = chain(X(q), gate(control, target)) # type: ignore[operator] + init_state = random_state(n_qubits) + wf_pyq = run(n_qubits, block, state=init_state) + wf_mat = _calc_mat_vec_wavefunction(block, n_qubits, init_state) + assert equivalent_state(wf_pyq, wf_mat, atol=ATOL_32) + + +@pytest.mark.parametrize("n_qubits", [3, 4, 6]) +def test_cswap_gate(n_qubits: int) -> None: + control, target1, target2 = np.random.choice( + list(range(n_qubits)), size=3, replace=False + ).tolist() + block = CSWAP(control, target1, target2) + init_state = random_state(n_qubits) + wf_pyq = run(n_qubits, block, state=init_state) + wf_mat = _calc_mat_vec_wavefunction(block, n_qubits, init_state) + assert equivalent_state(wf_pyq, wf_mat, atol=ATOL_32) + + +@pytest.mark.parametrize("n_qubits", [3, 4, 6]) +def test_toffoli_gates(n_qubits: int) -> None: + init_state = random_state(n_qubits) + target = np.random.choice(list(range(n_qubits)), size=1, replace=False)[0] + control = tuple([qubit for qubit in range(n_qubits) if qubit != target]) + block = Toffoli(control, target) + wf_pyq = run(n_qubits, block, state=init_state) + wf_mat = _calc_mat_vec_wavefunction(block, n_qubits, init_state) + + assert equivalent_state(wf_pyq, wf_mat, atol=ATOL_32) + + +@pytest.mark.parametrize("n_qubits", [2, 4, 6]) +@pytest.mark.parametrize("generator_type", ["tensor", "block"]) +def test_hamevo_gate(n_qubits: int, generator_type: str) -> None: + dim = np.random.randint(1, n_qubits + 1) + if generator_type == "tensor": + h = torch.rand(2**dim, 2**dim) + generator = h + torch.conj(torch.transpose(h, 0, 1)) + generator = generator.unsqueeze(0) + elif generator_type == "block": + ops = [X, Y] * 2 + qubit_supports = np.random.choice(list(range(dim)), len(ops), replace=True) + generator = chain( + add(*[op(q) for op, q in zip(ops, qubit_supports)]), + *[op(q) for op, q in zip(ops, qubit_supports)], + ) + generator = generator + generator.dagger() + + x = Parameter("x", trainable=False) + qubit_support = list(range(dim)) + # FIXME: random shuffle temporarily commented due to issues with how + # qubit_support and block_to_tensor handle MatrixBlocks, to be fixed. + # np.random.shuffle(qubit_support) + block = HamEvo(generator, x, qubit_support=tuple(qubit_support)) + values = {"x": torch.tensor(0.5)} + init_state = random_state(n_qubits) + wf_pyq = run(n_qubits, block, values=values, state=init_state) + wf_mat = _calc_mat_vec_wavefunction(block, n_qubits, init_state, values) + assert equivalent_state(wf_pyq, wf_mat, atol=ATOL_32) + + +@pytest.mark.parametrize("n_qubits", list(range(2, 9, 2))) +def test_hea(n_qubits: int, depth: int = 1) -> None: + block = hea(n_qubits, depth) + init_state = random_state(n_qubits) + wf_pyq = run(n_qubits, block, state=init_state) + wf_mat = _calc_mat_vec_wavefunction(block, n_qubits, init_state) + assert equivalent_state(wf_pyq, wf_mat, atol=ATOL_32) + + +@pytest.mark.parametrize("n_qubits", [1, 2, 4]) +def test_total_magnetization(n_qubits: int) -> None: + block = total_magnetization(n_qubits) + init_state = random_state(n_qubits) + wf_pyq = run(n_qubits, block, state=init_state) + wf_mat = _calc_mat_vec_wavefunction(block, n_qubits, init_state) + assert torch.allclose(wf_pyq, wf_mat, atol=ATOL_32) + + +@pytest.mark.parametrize("n_qubits", [1, 2, 4]) +@pytest.mark.parametrize("fm_type", ["tower", "fourier", "chebyshev"]) +@pytest.mark.parametrize("op", [RX, RY, RZ]) +def test_feature_maps(n_qubits: int, fm_type: str, op: AbstractBlock) -> None: + x = Parameter("x", trainable=True) + block = feature_map(n_qubits, param=x, op=op, fm_type=fm_type) # type: ignore[arg-type] + init_state = random_state(n_qubits) + wf_pyq = run(n_qubits, block, state=init_state) + wf_mat = _calc_mat_vec_wavefunction(block, n_qubits, init_state) + assert equivalent_state(wf_pyq, wf_mat, atol=ATOL_32) + + +@pytest.mark.parametrize("n_qubits", [2, 4]) +@pytest.mark.parametrize("interaction", [Interaction.ZZ, Interaction.NN, Interaction.XY]) +@pytest.mark.parametrize("detuning", [Z, N, X]) +def test_hamiltonians( + n_qubits: int, interaction: Interaction, detuning: type[N] | type[X] | type[Y] | type[Z] +) -> None: + block = hamiltonian_factory( + n_qubits, + interaction=interaction, + detuning=detuning, + random_strength=True, + ) + init_state = random_state(n_qubits) + wf_pyq = run(n_qubits, block, state=init_state) + wf_mat = _calc_mat_vec_wavefunction(block, n_qubits, init_state) + assert torch.allclose(wf_pyq, wf_mat, atol=ATOL_32) + + +@pytest.mark.parametrize("n_qubits", [1, 2, 4]) +def test_qft_block(n_qubits: int) -> None: + block = qft(n_qubits) + init_state = random_state(n_qubits) + wf_pyq = run(n_qubits, block, state=init_state) + wf_mat = _calc_mat_vec_wavefunction(block, n_qubits, init_state) + assert equivalent_state(wf_pyq, wf_mat, atol=ATOL_32 * 10) + + +@pytest.mark.parametrize("n_qubits", [2, 4, 6]) +def test_random_qubit_support(n_qubits: int) -> None: + dim = np.random.randint(1, n_qubits + 1) + ops = [X, Y, Z, S, T] * 2 + qubit_supports = np.random.choice(list(range(dim)), len(ops), replace=True) + block = chain( + *[op(q) for op, q in zip(ops, qubit_supports)], # type: ignore [abstract] + ) + init_state = random_state(n_qubits) + wf_pyq = run(n_qubits, block, state=init_state) + wf_mat = _calc_mat_vec_wavefunction(block, n_qubits, init_state) + assert equivalent_state(wf_pyq, wf_mat, atol=ATOL_32) + + +def variational_ising(n_qubits: int) -> AddBlock: + ops = [] + for i in range(n_qubits): + for j in range(i): + x = VariationalParameter(f"x_{i}{j}") + ops.append(x * kron(Z(j), Z(i))) + return add(*ops) + + +@pytest.mark.parametrize( + "block, is_diag_pauli", + [ + (ising_hamiltonian(2), False), + (total_magnetization(2), True), + (zz_hamiltonian(2), True), + (variational_ising(3), True), + (hea(4), False), + ], +) +def test_block_is_diag(block: AbstractBlock, is_diag_pauli: bool) -> None: + assert block._is_diag_pauli == is_diag_pauli + + +@pytest.mark.parametrize("n_qubits", [i for i in range(1, 5)]) +@pytest.mark.parametrize("obs", [total_magnetization, zz_hamiltonian]) +def test_sparse_obs_conversion(n_qubits: int, obs: AbstractBlock) -> None: + obs = obs(n_qubits) # type: ignore[operator] + sparse_diag = block_to_tensor(obs, tensor_type=TensorType.SPARSEDIAGONAL) + true_diag = torch.diag(block_to_tensor(obs, {}, tuple([i for i in range(n_qubits)])).squeeze(0)) + + assert torch.allclose( + sparse_diag.coalesce().values(), true_diag.to_sparse().coalesce().values() + ) + assert torch.allclose( + sparse_diag.coalesce().indices(), true_diag.to_sparse().coalesce().indices() + ) + + +def test_scaled_kron_hamevo_equal() -> None: + block = kron(I(0), I(1)) + assert torch.allclose( + block_to_tensor(HamEvo(block, 0.0)), block_to_tensor(HamEvo(1.0 * block, 0.0)) + ) diff --git a/tests/qadence/test_matrixblock.py b/tests/qadence/test_matrixblock.py new file mode 100644 index 000000000..3312116ee --- /dev/null +++ b/tests/qadence/test_matrixblock.py @@ -0,0 +1,94 @@ +from __future__ import annotations + +import numpy as np +import pytest +import torch + +from qadence import QuantumCircuit as QC +from qadence.backend import BackendName +from qadence.backends.api import DiffMode +from qadence.blocks import MatrixBlock, ParametricBlock, PrimitiveBlock, chain +from qadence.blocks.block_to_tensor import OPERATIONS_DICT, block_to_tensor +from qadence.constructors import hea +from qadence.execution import run +from qadence.models import QuantumModel as QM +from qadence.operations import CNOT, RX, RY, RZ, H, I, S, T, U, X, Y, Z +from qadence.states import random_state + + +@pytest.mark.parametrize("gate", [I, X, Y, Z, H, T, S]) +@pytest.mark.parametrize("n_qubits", [1, 2, 4]) +def test_single_qubit_gates(gate: PrimitiveBlock, n_qubits: int) -> None: + target = np.random.randint(0, n_qubits) + block = gate(target) # type: ignore[operator] + mat = block_to_tensor(block, {}, tuple(range(n_qubits))) + matblock = MatrixBlock(block_to_tensor(block, {}, (target,)), (target,)) + init_state = random_state(n_qubits) + wf_pyq_mat = run(n_qubits, matblock, state=init_state) + wf_pyq_standard = run(n_qubits, block, state=init_state) + wf_mat = torch.einsum("bij,kj->ki", mat, init_state) + assert torch.all(torch.isclose(wf_pyq_mat, wf_mat)) and torch.all( + torch.isclose(wf_pyq_mat, wf_pyq_standard) + ) + + +@pytest.mark.parametrize("gate", [RX, RY, RZ, U]) +@pytest.mark.parametrize("n_qubits", [1, 2, 4]) +def test_rotation_gates(gate: ParametricBlock, n_qubits: int) -> None: + target = np.random.randint(0, n_qubits) + param = [np.random.rand()] * gate.num_parameters() + block = gate(target, *param) # type: ignore[operator] + init_state = random_state(n_qubits) + mat = block_to_tensor(block, {}, tuple(range(n_qubits))) + matblock = MatrixBlock(block_to_tensor(block, {}, (target,)), (target,)) + wf_pyq_mat = run(n_qubits, matblock, state=init_state) + wf_pyq_standard = run(n_qubits, block, state=init_state) + wf_mat = torch.einsum("bij,kj->ki", mat, init_state) + assert torch.allclose(wf_pyq_mat, wf_mat) and torch.allclose(wf_pyq_mat, wf_pyq_standard) + + +@pytest.mark.parametrize("gate", [X, Y, Z]) +def test_single_qubit_gates_eigenvals(gate: PrimitiveBlock) -> None: + matblock = MatrixBlock(OPERATIONS_DICT[gate.name], (0,)) + block = gate(0) # type: ignore[operator] + assert torch.allclose(matblock.eigenvalues, block.eigenvalues) + + +@pytest.mark.parametrize("gate", [RX, RY, RZ, U]) +@pytest.mark.parametrize("n_qubits", [2, 4]) +def test_parametric_circ_with_matblock(gate: ParametricBlock, n_qubits: int) -> None: + target = np.random.randint(0, n_qubits) + p = [np.random.rand()] * gate.num_parameters() + block = gate(target, *p) # type: ignore[operator] + s = random_state(n_qubits) + matblock = MatrixBlock(block_to_tensor(block, {}, (target,)), (target,)) + bb = chain(hea(n_qubits=n_qubits, depth=1), CNOT(0, 1)) + wf_pyq_mat = run(n_qubits, chain(matblock, bb), state=s) + wf_pyq_standard = run(n_qubits, chain(gate(target, *p), bb), state=s) # type: ignore[operator] + assert torch.all(torch.isclose(wf_pyq_mat, wf_pyq_standard)) + + +def test_qm_with_matblock() -> None: + n_qubits = 1 + XMAT = torch.tensor([[0, 1], [1, 0]], dtype=torch.cdouble) + state = random_state(n_qubits) + matblock = MatrixBlock(XMAT, (0,)) + + qm_mat = QM( + circuit=QC(n_qubits, matblock), + observable=Z(0), + backend=BackendName.PYQTORCH, + diff_mode=DiffMode.AD, + ) + qm = QM( + circuit=QC(n_qubits, X(0)), + observable=Z(0), + backend=BackendName.PYQTORCH, + diff_mode=DiffMode.AD, + ) + wf_mat = qm_mat.run({}, state) + exp_mat = qm_mat.expectation({}) + wf = qm.run({}, state) + exp = qm.expectation({}) + + assert torch.all(torch.isclose(wf_mat, wf)) and torch.isclose(exp, exp_mat) diff --git a/tests/qadence/test_measurements/test_shadows.py b/tests/qadence/test_measurements/test_shadows.py new file mode 100644 index 000000000..5ef43152e --- /dev/null +++ b/tests/qadence/test_measurements/test_shadows.py @@ -0,0 +1,325 @@ +from __future__ import annotations + +import json +import os +from collections import Counter + +import pytest +import torch +from torch import Tensor + +from qadence import ( + BackendName, + DiffMode, + Parameter, + QuantumCircuit, + QuantumModel, + backend_factory, + expectation, +) +from qadence.blocks import AbstractBlock +from qadence.blocks.block_to_tensor import IMAT +from qadence.constructors import ising_hamiltonian, total_magnetization +from qadence.measurements import Measurements +from qadence.measurements.shadow import ( + PROJECTOR_MATRICES, + UNITARY_TENSOR, + _max_observable_weight, + classical_shadow, + estimations, + estimators, + local_shadow, + number_of_samples, +) +from qadence.operations import RX, RY, H, I, X, Y, Z, add, chain, kron +from qadence.serialization import deserialize + + +@pytest.mark.parametrize( + "observable, exp_weight", + [ + (X(0), 1), + (kron(*[X(0), Y(1), Z(2)]), 3), + (add(*[X(0), Y(0), Z(0)]), 1), + (kron(*[X(0), H(1), I(2), Z(3)]), 2), + (total_magnetization(5), 1), + (ising_hamiltonian(4), 2), + ], +) +def test_weight(observable: AbstractBlock, exp_weight: int) -> None: + qubit_weight = _max_observable_weight(observable) + assert qubit_weight == exp_weight + + +@pytest.mark.parametrize( + "observables, accuracy, confidence, exp_samples", + [([total_magnetization(2)], 0.1, 0.1, (10200, 6))], +) +def test_number_of_samples( + observables: list[AbstractBlock], accuracy: float, confidence: float, exp_samples: tuple +) -> None: + N, K = number_of_samples(observables=observables, accuracy=accuracy, confidence=confidence) + assert N == exp_samples[0] + assert K == exp_samples[1] + + +@pytest.mark.parametrize( + "sample, unitary_ids, exp_shadow", + [ + ( + Counter({"10": 1}), + [0, 2], + torch.kron( + 3 * (UNITARY_TENSOR[0].adjoint() @ PROJECTOR_MATRICES["1"] @ UNITARY_TENSOR[0]) + - IMAT, + 3 * (UNITARY_TENSOR[2].adjoint() @ PROJECTOR_MATRICES["0"] @ UNITARY_TENSOR[2]) + - IMAT, + ), + ), + ( + Counter({"0111": 1}), + [2, 0, 2, 2], + torch.kron( + torch.kron( + 3 * (UNITARY_TENSOR[2].adjoint() @ PROJECTOR_MATRICES["0"] @ UNITARY_TENSOR[2]) + - IMAT, + 3 * (UNITARY_TENSOR[0].adjoint() @ PROJECTOR_MATRICES["1"] @ UNITARY_TENSOR[0]) + - IMAT, + ), + torch.kron( + 3 * (UNITARY_TENSOR[2].adjoint() @ PROJECTOR_MATRICES["1"] @ UNITARY_TENSOR[2]) + - IMAT, + 3 * (UNITARY_TENSOR[2].adjoint() @ PROJECTOR_MATRICES["1"] @ UNITARY_TENSOR[2]) + - IMAT, + ), + ), + ), + ], +) +def test_local_shadow(sample: Counter, unitary_ids: list, exp_shadow: Tensor) -> None: + shadow = local_shadow(sample=sample, unitary_ids=unitary_ids) + assert torch.allclose(shadow, exp_shadow) + + +theta = Parameter("theta") + + +@pytest.mark.skip(reason="Can't fix the seed for deterministic outputs.") +@pytest.mark.parametrize( + "layer, param_values, exp_shadows", + [ + (X(0) @ X(2), {}, []) + # (kron(RX(0, theta), X(1)), {"theta": torch.tensor([0.5, 1.0, 1.5])}, []) + ], +) +def test_classical_shadow(layer: AbstractBlock, param_values: dict, exp_shadows: list) -> None: + circuit = QuantumCircuit(2, layer) + shadows = classical_shadow( + shadow_size=2, + circuit=circuit, + param_values=param_values, + ) + for shadow, exp_shadow in zip(shadows, exp_shadows): + for batch, exp_batch in zip(shadow, exp_shadow): + assert torch.allclose(batch, exp_batch, atol=1.0e-2) + + +@pytest.mark.parametrize( + "N, K, circuit, param_values, observable, exp_traces", + [ + (2, 1, QuantumCircuit(2, kron(X(0), Z(1))), {}, X(1), torch.tensor([0.0])), + ], +) +def test_estimators( + N: int, + K: int, + circuit: QuantumCircuit, + param_values: dict, + observable: AbstractBlock, + exp_traces: Tensor, +) -> None: + shadows = classical_shadow(shadow_size=N, circuit=circuit, param_values=param_values) + estimated_traces = estimators( + qubit_support=circuit.block.qubit_support, + N=N, + K=K, + shadow=shadows[0], + observable=observable, + ) + assert torch.allclose(estimated_traces, exp_traces) + + +@pytest.mark.flaky(max_runs=5) +@pytest.mark.parametrize( + "circuit, observable, values", + [ + (QuantumCircuit(2, kron(X(0), X(1))), X(0) @ X(1), {}), + (QuantumCircuit(2, kron(X(0), X(1))), X(0) @ Y(1), {}), + (QuantumCircuit(2, kron(X(0), X(1))), Y(0) @ X(1), {}), + (QuantumCircuit(2, kron(X(0), X(1))), Y(0) @ Y(1), {}), + (QuantumCircuit(2, kron(Z(0), H(1))), X(0) @ Z(1), {}), + ( + QuantumCircuit(2, kron(RX(0, theta), X(1))), + kron(Z(0), Z(1)), + {"theta": torch.tensor([0.5, 1.0])}, + ), + (QuantumCircuit(2, kron(X(0), Z(1))), ising_hamiltonian(2), {}), + ], +) +def test_estimations_comparison_exact( + circuit: QuantumCircuit, observable: AbstractBlock, values: dict +) -> None: + backend = backend_factory(backend=BackendName.PYQTORCH, diff_mode=DiffMode.GPSR) + (conv_circ, _, embed, params) = backend.convert(circuit=circuit, observable=observable) + param_values = embed(params, values) + + estimated_exp = estimations( + circuit=conv_circ.abstract, + observables=[observable], + param_values=param_values, + shadow_size=5000, + ) + exact_exp = expectation(circuit, observable, values=values) + assert torch.allclose(estimated_exp, exact_exp, atol=0.2) + + +theta1 = Parameter("theta1", trainable=False) +theta2 = Parameter("theta2", trainable=False) +theta3 = Parameter("theta3", trainable=False) +theta4 = Parameter("theta4", trainable=False) + + +blocks = chain( + kron(RX(0, theta1), RY(1, theta2)), + kron(RX(0, theta3), RY(1, theta4)), +) + +values = { + "theta1": torch.tensor([0.5]), + "theta2": torch.tensor([1.5]), + "theta3": torch.tensor([2.0]), + "theta4": torch.tensor([2.5]), +} + +values2 = { + "theta1": torch.tensor([0.5, 1.0]), + "theta2": torch.tensor([1.5, 2.0]), + "theta3": torch.tensor([2.0, 2.5]), + "theta4": torch.tensor([2.5, 3.0]), +} + + +@pytest.mark.flaky(max_runs=5) +@pytest.mark.parametrize( + "circuit, values", + [ + (QuantumCircuit(2, blocks), values), + (QuantumCircuit(2, blocks), values2), + ], +) +def test_estimations_comparison_tomo_forward_pass(circuit: QuantumCircuit, values: dict) -> None: + observable = Z(0) ^ circuit.n_qubits + + pyq_backend = backend_factory(BackendName.PYQTORCH, diff_mode=DiffMode.GPSR) + (conv_circ, conv_obs, embed, params) = pyq_backend.convert(circuit, observable) + pyq_exp_exact = pyq_backend.expectation(conv_circ, conv_obs, embed(params, values)) + model = QuantumModel( + circuit=circuit, + observable=observable, + backend=BackendName.PYQTORCH, + diff_mode=DiffMode.GPSR, + ) + options = {"n_shots": 100000} + estimated_exp_tomo = model.expectation( + values=values, + protocol=Measurements(protocol=Measurements.TOMOGRAPHY, options=options), + ) + new_options = {"accuracy": 0.1, "confidence": 0.1} + estimated_exp_shadow = model.expectation( + values=values, + protocol=Measurements(protocol=Measurements.SHADOW, options=new_options), + ) # N = 54400. + assert torch.allclose(estimated_exp_tomo, pyq_exp_exact, atol=1.0e-2) + assert torch.allclose(estimated_exp_shadow, pyq_exp_exact, atol=0.1) + assert torch.allclose(estimated_exp_shadow, pyq_exp_exact, atol=0.1) + + +@pytest.mark.flaky(max_runs=5) +def test_chemistry_hamiltonian_1() -> None: + from qadence import load + + circuit = load("./tests/test_files/chem_circ.json") + assert isinstance(circuit, QuantumCircuit) + hamiltonian = load("./tests/test_files/chem_ham.json") + assert isinstance(hamiltonian, AbstractBlock) + # Restrict shadow size for faster tests. + kwargs = {"accuracy": 0.1, "confidence": 0.1, "shadow_size": 1000} + param_values = {"theta_0": torch.tensor([1.0])} + + model = QuantumModel( + circuit=circuit, + observable=hamiltonian, + backend=BackendName.PYQTORCH, + diff_mode=DiffMode.GPSR, + ) + exact = model.expectation(values=param_values) + estim = model.expectation( + values=param_values, + protocol=Measurements(protocol=Measurements.SHADOW, options=kwargs), + ) + assert torch.allclose(estim, exact, atol=0.3) + + +@pytest.mark.flaky(max_runs=5) +def test_chemistry_hamiltonian_2() -> None: + from qadence import load + + circuit = load("./tests/test_files/chem_circ.json") + assert isinstance(circuit, QuantumCircuit) + hamiltonian = ising_hamiltonian(2) + assert isinstance(hamiltonian, AbstractBlock) + # Restrict shadow size for faster tests. + kwargs = {"accuracy": 0.1, "confidence": 0.1, "shadow_size": 1000} + param_values = {"theta_0": torch.tensor([1.0])} + + model = QuantumModel( + circuit=circuit, + observable=hamiltonian, + backend=BackendName.PYQTORCH, + diff_mode=DiffMode.GPSR, + ) + exact = model.expectation(values=param_values) + estim = model.expectation( + values=param_values, + protocol=Measurements(protocol=Measurements.SHADOW, options=kwargs), + ) + assert torch.allclose(estim, exact, atol=0.2) + + +def open_chem_obs() -> AbstractBlock: + directory = os.getcwd() + with open(os.path.join(directory, "tests/test_files/h4.json"), "r") as js: + obs = json.loads(js.read()) + return deserialize(obs) # type: ignore[return-value] + + +@pytest.mark.flaky(max_runs=5) +def test_chemistry_hamiltonian_3() -> None: + circuit = QuantumCircuit(4, kron(Z(0), H(1), Z(2), X(3))) + hamiltonian = open_chem_obs() + param_values: dict = dict() + + kwargs = {"accuracy": 0.1, "confidence": 0.1, "shadow_size": 5000} + + model = QuantumModel( + circuit=circuit, + observable=hamiltonian, + backend=BackendName.PYQTORCH, + diff_mode=DiffMode.GPSR, + ) + exact = model.expectation(values=param_values) + estim = model.expectation( + values=param_values, + protocol=Measurements(protocol=Measurements.SHADOW, options=kwargs), + ) + assert torch.allclose(estim, exact, atol=0.3) diff --git a/tests/qadence/test_measurements/test_tomography.py b/tests/qadence/test_measurements/test_tomography.py new file mode 100644 index 000000000..171aa1c3a --- /dev/null +++ b/tests/qadence/test_measurements/test_tomography.py @@ -0,0 +1,705 @@ +from __future__ import annotations + +from collections import Counter +from typing import List + +import pytest +import strategies as st # type: ignore +import torch +from hypothesis import given, settings +from metrics import HIGH_ACCEPTANCE, LOW_ACCEPTANCE, MIDDLE_ACCEPTANCE # type: ignore + +from qadence import BackendName, DiffMode +from qadence.backends import backend_factory +from qadence.blocks import ( + AbstractBlock, + add, + chain, + kron, +) +from qadence.blocks.utils import unroll_block_with_scaling +from qadence.circuit import QuantumCircuit +from qadence.constructors import ( + feature_map, + hea, + total_magnetization, + zz_hamiltonian, +) +from qadence.measurements import Measurements +from qadence.measurements.tomography import ( + compute_expectation as basic_tomography, +) +from qadence.measurements.tomography import ( + empirical_average, + get_counts, + get_qubit_indices_for_op, + iterate_pauli_decomposition, + rotate, +) +from qadence.ml_tools.utils import rand_featureparameters +from qadence.models import QNN, QuantumModel +from qadence.operations import RX, RY, H, SDagger, X, Y, Z +from qadence.parameters import Parameter + +torch.manual_seed(1) + +BACKENDS = ["pyqtorch", "braket"] +DIFF_MODE = ["ad", "gpsr"] + + +@pytest.mark.parametrize( + "pauli_word, exp_indices_X, exp_indices_Y", + [ + (kron(X(0), X(1)), [[0, 1]], [[]]), + (kron(X(0), Y(1)), [[0]], [[1]]), + (kron(Y(0), Y(1)), [[]], [[0, 1]]), + (kron(Z(0), Z(1)), [[]], [[]]), + (add(X(0), X(1)), [[0], [1]], [[], []]), + (add(X(0), Y(1)), [[0], []], [[], [1]]), + (add(Y(0), Y(1)), [[], []], [[0], [1]]), + (add(Z(0), Z(1)), [[], []], [[], []]), + (add(kron(X(0), Z(2)), 1.5 * kron(Y(1), Z(2))), [[0], []], [[], [1]]), + ( + add( + 0.5 * kron(X(0), Y(1), X(2), Y(3)), + 1.5 * kron(Y(0), Z(1), Y(2), Z(3)), + 2.0 * kron(Z(0), X(1), Z(2), X(3)), + ), + [[0, 2], [], [1, 3]], + [[1, 3], [0, 2], []], + ), + ], +) +def test_get_qubit_indices_for_op( + pauli_word: tuple, exp_indices_X: list, exp_indices_Y: list +) -> None: + pauli_decomposition = unroll_block_with_scaling(pauli_word) + + indices_X = [] + indices_Y = [] + for index, pauli_term in enumerate(pauli_decomposition): + indices_x = get_qubit_indices_for_op(pauli_term, X(0)) + # if indices_x: + indices_X.append(indices_x) + indices_y = get_qubit_indices_for_op(pauli_term, Y(0)) + # if indices_y: + indices_Y.append(indices_y) + assert indices_X == exp_indices_X + assert indices_Y == exp_indices_Y + + +@pytest.mark.parametrize( + "circuit, observable, expected_circuit", + [ + ( + QuantumCircuit(2, kron(X(0), X(1))), + kron(X(0), Z(2)) + 1.5 * kron(Y(1), Z(2)), + [ + QuantumCircuit(2, chain(kron(X(0), X(1)), Z(0) * H(0))), + QuantumCircuit(2, chain(kron(X(0), X(1)), SDagger(1) * H(1))), + ], + ), + ( + QuantumCircuit(4, kron(X(0), X(1), X(2), X(3))), + add( + 0.5 * kron(X(0), Y(1), X(2), Y(3)), + 1.5 * kron(Y(0), Z(1), Y(2), Z(3)), + 2.0 * kron(Z(0), X(1), Z(2), X(3)), + ), + [ + QuantumCircuit( + 4, + chain( + kron(X(0), X(1), X(2), X(3)), + Z(0) * H(0), + Z(2) * H(2), + SDagger(1) * H(1), + SDagger(3) * H(3), + ), + ), + QuantumCircuit( + 4, + chain( + kron(X(0), X(1), X(2), X(3)), + SDagger(0) * H(0), + SDagger(2) * H(2), + ), + ), + QuantumCircuit( + 4, + chain( + kron(X(0), X(1), X(2), X(3)), + Z(1) * H(1), + Z(3) * H(3), + ), + ), + ], + ), + ], +) +def test_rotate( + circuit: QuantumCircuit, + observable: AbstractBlock, + expected_circuit: List[QuantumCircuit], +) -> None: + pauli_decomposition = unroll_block_with_scaling(observable) + for index, pauli_term in enumerate(pauli_decomposition): + rotated_circuit = rotate(circuit, pauli_term) + assert rotated_circuit == expected_circuit[index] + + +def test_raise_errors() -> None: + block = H(0) + observable = Z(0) + circuit = QuantumCircuit(1, block) + pyqtorch_backend = backend_factory(BackendName.PYQTORCH, diff_mode=None) + (conv_circ, _, _, _) = pyqtorch_backend.convert(circuit, observable) + options = {"n_shots": 10000} + with pytest.raises(TypeError): + basic_tomography( + circuit=conv_circ, + param_values={}, + observables=observable, # type: ignore[arg-type] + options=options, + ) + options = {"shots": 10000} + with pytest.raises(KeyError): + basic_tomography( + circuit=conv_circ, param_values={}, observables=[observable], options=options + ) + + +def test_get_counts() -> None: + samples = [Counter({"00": 10, "01": 50, "10": 20, "11": 20})] + support = [0] + counts = get_counts(samples, support) + assert counts == [Counter({"0": 60, "1": 40})] + support = [1] + counts = get_counts(samples, support) + assert counts == [Counter({"0": 30, "1": 70})] + support = [0, 1] + counts = get_counts(samples, support) + assert counts == samples + + samples = [ + Counter( + { + "1111": 1653, + "0000": 1586, + "0001": 1463, + "0110": 1286, + "1110": 998, + "0101": 668, + "0111": 385, + "1000": 327, + "0011": 322, + "1100": 281, + "1001": 218, + "1010": 213, + "0100": 187, + "1101": 172, + "1011": 154, + "0010": 87, + } + ) + ] + support = [0, 1, 2, 3] + counts = get_counts(samples, support) + assert counts == samples + + +def test_empirical_average() -> None: + samples = [Counter({"00": 10, "01": 50, "10": 20, "11": 20})] + support = [0] + assert torch.allclose(empirical_average(samples, support), torch.tensor([0.2])) + support = [1] + assert torch.allclose(empirical_average(samples, support), torch.tensor([-0.4])) + support = [0, 1] + assert torch.allclose(empirical_average(samples, support), torch.tensor([-0.4])) + samples = [ + Counter( + { + "1111": 1653, + "0000": 1586, + "0001": 1463, + "0110": 1286, + "1110": 998, + "0101": 668, + "0111": 385, + "1000": 327, + "0011": 322, + "1100": 281, + "1001": 218, + "1010": 213, + "0100": 187, + "1101": 172, + "1011": 154, + "0010": 87, + } + ) + ] + support = [0, 1, 2, 3] + assert torch.allclose(empirical_average(samples, support), torch.tensor([0.2454])) + + +# Disable cases are passing at the expense of high (1 billion) nshots. +# To keep reasonable run time, less expensive cases are tested. +# Some observables also contain ScaleBlock for which PSR are not defined. +@pytest.mark.parametrize( + "circuit, values, observable", + [ + (QuantumCircuit(1, H(0)), {}, Z(0)), + (QuantumCircuit(2, kron(H(0), H(1))), {}, kron(X(0), X(1))), + ( + QuantumCircuit(4, feature_map(4, fm_type="chebyshev"), hea(4, depth=2)), + {"phi": torch.rand(1)}, + total_magnetization(4), + ), + ( + QuantumCircuit(4, feature_map(4, fm_type="chebyshev"), hea(4, depth=2)), + {"phi": torch.rand(1)}, + zz_hamiltonian(4), + ), + # ( + # QuantumCircuit(4, feature_map(4, fm_type="chebyshev"), hea(4, depth=2)), + # {"phi": torch.rand(1)}, + # ising_hamiltonian(4), + # HIGH_ACCEPTANCE, + # ), + ( + QuantumCircuit(4, feature_map(4, fm_type="chebyshev"), hea(4, depth=2)), + {"phi": torch.rand(1)}, + add( + 0.5 * kron(X(0), Y(1), X(2), Y(3)), + 1.5 * kron(Y(0), Z(1), Y(2), Z(3)), + 2.0 * kron(Z(0), X(1), Z(2), X(3)), + ), + ), + ], +) +def test_iterate_pauli_decomposition( + circuit: QuantumCircuit, + values: dict, + observable: AbstractBlock, +) -> None: + pauli_decomposition = unroll_block_with_scaling(observable) + pyqtorch_backend = backend_factory(BackendName.PYQTORCH, diff_mode=DiffMode.GPSR) + (conv_circ, conv_obs, embed, params) = pyqtorch_backend.convert(circuit, observable) + param_values = embed(params, values) + pyqtorch_expectation = pyqtorch_backend.expectation(conv_circ, conv_obs, param_values)[0] + estimated_values = iterate_pauli_decomposition( + circuit=conv_circ.abstract, + param_values=param_values, + pauli_decomposition=pauli_decomposition, + n_shots=1000000, + ) + assert torch.allclose(estimated_values, pyqtorch_expectation, atol=LOW_ACCEPTANCE) + + +@given(st.digital_circuits()) +@settings(deadline=None) +def test_basic_tomography_direct_call(circuit: QuantumCircuit) -> None: + observable = Z(0) ^ circuit.n_qubits + pyqtorch_backend = backend_factory(BackendName.PYQTORCH, diff_mode=DiffMode.GPSR) + (conv_circ, conv_obs, embed, params) = pyqtorch_backend.convert(circuit, observable) + inputs = rand_featureparameters(circuit, 1) + kwargs = {"n_shots": 100000} + tomo_values = basic_tomography( + conv_circ.abstract, [c_o.abstract for c_o in conv_obs], embed(params, inputs), kwargs + )[0] + estimated_values = torch.flatten(tomo_values) + + pyqtorch_expectation = pyqtorch_backend.expectation(conv_circ, conv_obs, embed(params, inputs))[ + 0 + ] + assert torch.allclose(estimated_values, pyqtorch_expectation, atol=LOW_ACCEPTANCE) + + +@given(st.restricted_circuits()) +@settings(deadline=None) +def test_basic_tomography_for_backend_forward_pass(circuit: QuantumCircuit) -> None: + obs = Z(0) ^ circuit.n_qubits + kwargs = {"n_shots": 100000} + for backend in BACKENDS: + for diff_mode in [DiffMode.GPSR]: + inputs = rand_featureparameters(circuit, 1) + qm = QuantumModel(circuit=circuit, observable=obs, backend=backend, diff_mode=diff_mode) + exp_tomo = qm.expectation( + values=inputs, + protocol=Measurements( + protocol=Measurements.TOMOGRAPHY, + options=kwargs, + ), + )[0] + estimated_values = torch.flatten(exp_tomo) + expectation_values = qm.expectation(values=inputs)[0] + assert torch.allclose(estimated_values, expectation_values, atol=LOW_ACCEPTANCE) + + +@given(st.digital_circuits()) +@settings(deadline=None) +def test_basic_tomography_for_quantum_model(circuit: QuantumCircuit) -> None: + backend = BackendName.PYQTORCH + diff_mode = DiffMode.GPSR + observable = Z(0) ^ circuit.n_qubits + model = QuantumModel( + circuit=circuit, + observable=observable, + backend=BackendName.PYQTORCH, + diff_mode=DiffMode.GPSR, + ) + inputs = rand_featureparameters(circuit, 1) + kwargs = {"n_shots": 100000} + estimated_values = model.expectation( + inputs, + protocol=Measurements(protocol=Measurements.TOMOGRAPHY, options=kwargs), + ) + pyqtorch_backend = backend_factory(backend=backend, diff_mode=diff_mode) + (conv_circ, conv_obs, embed, params) = pyqtorch_backend.convert(circuit, observable) + pyqtorch_expectation = pyqtorch_backend.expectation(conv_circ, conv_obs, embed(params, inputs))[ + 0 + ] + assert torch.allclose(estimated_values, pyqtorch_expectation, atol=LOW_ACCEPTANCE) + + +@given(st.digital_circuits()) +@settings(deadline=None) +def test_basic_list_observables_tomography_for_quantum_model(circuit: QuantumCircuit) -> None: + observable = [Z(n) for n in range(circuit.n_qubits)] + model = QuantumModel( + circuit=circuit, + observable=observable, # type: ignore + backend=BackendName.PYQTORCH, + diff_mode=DiffMode.GPSR, + ) + inputs = rand_featureparameters(circuit, 1) + kwargs = {"n_shots": 100000} + estimated_values = model.expectation( + inputs, + protocol=Measurements(protocol=Measurements.TOMOGRAPHY, options=kwargs), + ) + pyqtorch_backend = backend_factory(BackendName.PYQTORCH, diff_mode=DiffMode.GPSR) + (conv_circ, conv_obs, embed, params) = pyqtorch_backend.convert( + circuit, observable # type: ignore [arg-type] + ) + pyqtorch_expectation = pyqtorch_backend.expectation(conv_circ, conv_obs, embed(params, inputs)) + assert torch.allclose(estimated_values, pyqtorch_expectation, atol=LOW_ACCEPTANCE) + + +theta1 = Parameter("theta1", trainable=False) +theta2 = Parameter("theta2", trainable=False) +theta3 = Parameter("theta3", trainable=False) +theta4 = Parameter("theta4", trainable=False) + +blocks = chain( + kron(RX(0, theta1), RY(1, theta2)), + kron(RX(0, theta3), RY(1, theta4)), +) + +values = { + "theta1": torch.tensor([0.5]), + "theta2": torch.tensor([1.5]), + "theta3": torch.tensor([2.0]), + "theta4": torch.tensor([2.5]), +} + +values2 = { + "theta1": torch.tensor([0.5, 1.0]), + "theta2": torch.tensor([1.5, 2.0]), + "theta3": torch.tensor([2.0, 2.5]), + "theta4": torch.tensor([2.5, 3.0]), +} + + +@pytest.mark.parametrize( + "circuit, values", + [ + ( + QuantumCircuit(2, blocks), + values, + ), + ( + QuantumCircuit(2, blocks), + values2, + ), + ], +) +def test_basic_tomography_for_parametric_circuit_forward_pass( + circuit: QuantumCircuit, values: dict +) -> None: + observable = Z(0) ^ circuit.n_qubits + model = QuantumModel( + circuit=circuit, + observable=observable, + backend=BackendName.PYQTORCH, + diff_mode=DiffMode.GPSR, + ) + kwargs = {"n_shots": 100000} + estimated_values = model.expectation( + values=values, + protocol=Measurements(protocol=Measurements.TOMOGRAPHY, options=kwargs), + ) + pyqtorch_backend = backend_factory(BackendName.PYQTORCH, diff_mode=DiffMode.GPSR) + (conv_circ, conv_obs, embed, params) = pyqtorch_backend.convert(circuit, observable) + pyqtorch_expectation = pyqtorch_backend.expectation(conv_circ, conv_obs, embed(params, values)) + assert torch.allclose(estimated_values, pyqtorch_expectation, atol=LOW_ACCEPTANCE) + + +# The ising hamiltonian constructor produces results that +# are far at variance. This is investigated separately. +@pytest.mark.slow +@pytest.mark.parametrize( + "observable, acceptance", + [ + (total_magnetization(4), MIDDLE_ACCEPTANCE), + (zz_hamiltonian(4), MIDDLE_ACCEPTANCE), + # (ising_hamiltonian(4), MIDDLE_ACCEPTANCE), + ( + add( + 0.5 * kron(X(0), X(1), X(2), X(3)), + 1.5 * kron(Y(0), Y(1), Y(2), Y(3)), + 2.0 * kron(Z(0), Z(1), Z(2), Z(3)), + ), + MIDDLE_ACCEPTANCE, + ), + ( + add( + 0.5 * kron(X(0), Y(1), X(2), Y(3)), + 1.5 * kron(Y(0), Z(1), Y(2), Z(3)), + 2.0 * kron(Z(0), X(1), Z(2), X(3)), + ), + MIDDLE_ACCEPTANCE, + ), + ], +) +def test_forward_and_backward_passes_with_qnn(observable: AbstractBlock, acceptance: float) -> None: + n_qubits = 4 + batch_size = 5 + kwargs = {"n_shots": 1000000} + + # fm = fourier_feature_map(n_qubits) + fm = feature_map(n_qubits, fm_type="chebyshev") + ansatz = hea(n_qubits, depth=2) + circuit = QuantumCircuit(n_qubits, fm, ansatz) + values = {"phi": torch.rand(batch_size, requires_grad=True)} + + protocol = Measurements(protocol=Measurements.TOMOGRAPHY, options=kwargs) + + model_with_psr = QNN(circuit=circuit, observable=observable, diff_mode=DiffMode.GPSR) + model_with_psr_and_init = QNN( + circuit=circuit, observable=observable, diff_mode=DiffMode.GPSR, protocol=protocol + ) + model_with_psr.zero_grad() + expectation_tomo = model_with_psr.expectation( + values=values, + protocol=protocol, + ) + expectation_tomo_init = model_with_psr_and_init.expectation(values=values) + assert torch.allclose(expectation_tomo, expectation_tomo_init, atol=acceptance) + dexpval_tomo = torch.autograd.grad( + expectation_tomo, + values["phi"], + torch.ones_like(expectation_tomo), + )[0] + dexpval_tomo_init = torch.autograd.grad( + expectation_tomo_init, + values["phi"], + torch.ones_like(expectation_tomo_init), + )[0] + assert torch.allclose(dexpval_tomo, dexpval_tomo_init, atol=acceptance) + expectation_exact = model_with_psr.expectation(values=values) + dexpval_exact = torch.autograd.grad( + expectation_exact, + values["phi"], + torch.ones_like(expectation_exact), + )[0] + assert torch.allclose(expectation_tomo, expectation_exact, atol=acceptance) + assert torch.allclose(dexpval_tomo, dexpval_exact, atol=acceptance) + + +@pytest.mark.slow +@pytest.mark.parametrize( + "observable, acceptance", + [ + (total_magnetization(4), MIDDLE_ACCEPTANCE), + ], +) +def test_partial_derivatives_with_qnn(observable: AbstractBlock, acceptance: float) -> None: + n_qubits = 4 + batch_size = 5 + kwargs = {"n_shots": 100000} + + # fm = fourier_feature_map(n_qubits) + fm = feature_map(n_qubits, fm_type="chebyshev") + ansatz = hea(n_qubits, depth=2) + circuit = QuantumCircuit(n_qubits, fm, ansatz) + values = {"phi": torch.rand(batch_size, requires_grad=True)} + + model_with_psr = QNN(circuit=circuit, observable=observable, diff_mode=DiffMode.GPSR) + params = {k: v for k, v in model_with_psr._params.items() if v.requires_grad} + model_with_psr.zero_grad() + expectation_tomo = model_with_psr.expectation( + values=values, + protocol=Measurements(protocol=Measurements.TOMOGRAPHY, options=kwargs), + ) + dexpval_tomo_phi = torch.autograd.grad( + expectation_tomo, + values["phi"], + torch.ones_like(expectation_tomo), + create_graph=True, + )[0] + dexpval_tomo_theta = torch.autograd.grad( + expectation_tomo, + list(params.values()), + torch.ones_like(expectation_tomo), + create_graph=True, + )[0] + dexpval_tomo_phitheta = torch.autograd.grad( + dexpval_tomo_phi, + list(params.values()), + torch.ones_like(dexpval_tomo_phi), + create_graph=True, + )[0] + d2expval_tomo_phi2 = torch.autograd.grad( + dexpval_tomo_phi, + values["phi"], + torch.ones_like(dexpval_tomo_phi), + create_graph=True, + )[0] + d2expval_tomo_phi2theta = torch.autograd.grad( + d2expval_tomo_phi2, + list(params.values()), + torch.ones_like(d2expval_tomo_phi2), + create_graph=True, + )[0] + expectation_exact = model_with_psr.expectation(values=values) + dexpval_exact_phi = torch.autograd.grad( + expectation_exact, + values["phi"], + torch.ones_like(expectation_exact), + create_graph=True, + )[0] + dexpval_exact_theta = torch.autograd.grad( + expectation_exact, + list(params.values()), + torch.ones_like(expectation_exact), + create_graph=True, + )[0] + dexpval_exact_phitheta = torch.autograd.grad( + dexpval_exact_phi, + list(params.values()), + torch.ones_like(dexpval_exact_phi), + create_graph=True, + )[0] + d2expval_exact_phi2 = torch.autograd.grad( + dexpval_exact_phi, + values["phi"], + torch.ones_like(dexpval_exact_phi), + create_graph=True, + )[0] + d2expval_exact_phi2theta = torch.autograd.grad( + d2expval_exact_phi2, + list(params.values()), + torch.ones_like(d2expval_exact_phi2), + create_graph=True, + )[0] + assert torch.allclose(expectation_tomo, expectation_exact, atol=acceptance) + assert torch.allclose(dexpval_tomo_phi, dexpval_exact_phi, atol=acceptance) + assert torch.allclose(dexpval_tomo_theta, dexpval_exact_theta, atol=acceptance) + assert torch.allclose(dexpval_tomo_phitheta, dexpval_exact_phitheta, atol=acceptance) + assert torch.allclose(d2expval_tomo_phi2, d2expval_exact_phi2, atol=HIGH_ACCEPTANCE) + assert torch.allclose(d2expval_tomo_phi2theta, d2expval_exact_phi2theta, atol=HIGH_ACCEPTANCE) + + +@pytest.mark.skip( + reason="High-order derivatives takes a long time. Keeping them here for future reference." +) +@pytest.mark.parametrize( + "observable, acceptance", + [ + (total_magnetization(4), MIDDLE_ACCEPTANCE), + ], +) +def test_high_order_derivatives_with_qnn(observable: AbstractBlock, acceptance: float) -> None: + n_qubits = 4 + batch_size = 5 + kwargs = {"n_shots": 100000} + + # fm = fourier_feature_map(n_qubits) + fm = feature_map(n_qubits, fm_type="chebyshev") + ansatz = hea(n_qubits, depth=2) + circuit = QuantumCircuit(n_qubits, fm, ansatz) + values = {"phi": torch.rand(batch_size, requires_grad=True)} + + model_with_psr = QNN(circuit=circuit, observable=observable, diff_mode=DiffMode.GPSR) + params = {k: v for k, v in model_with_psr._params.items() if v.requires_grad} + model_with_psr.zero_grad() + expectation_tomo = model_with_psr.expectation( + values=values, + protocol=Measurements(protocol=Measurements.TOMOGRAPHY, options=kwargs), + ) + dexpval_tomo_phi = torch.autograd.grad( + expectation_tomo, + values["phi"], + torch.ones_like(expectation_tomo), + create_graph=True, + )[0] + d2expval_tomo_phi2 = torch.autograd.grad( + dexpval_tomo_phi, + values["phi"], + torch.ones_like(dexpval_tomo_phi), + create_graph=True, + )[0] + d3expval_tomo_phi3 = torch.autograd.grad( + d2expval_tomo_phi2, + values["phi"], + torch.ones_like(d2expval_tomo_phi2), + create_graph=True, + )[0] + expectation_exact = model_with_psr.expectation(values=values) + dexpval_exact_phi = torch.autograd.grad( + expectation_exact, + values["phi"], + torch.ones_like(expectation_exact), + create_graph=True, + )[0] + d2expval_exact_phi2 = torch.autograd.grad( + dexpval_exact_phi, + values["phi"], + torch.ones_like(dexpval_exact_phi), + create_graph=True, + )[0] + d3expval_exact_phi3 = torch.autograd.grad( + d2expval_exact_phi2, + values["phi"], + torch.ones_like(d2expval_exact_phi2), + create_graph=True, + )[0] + assert torch.allclose(expectation_tomo, expectation_exact, atol=acceptance) + assert torch.allclose(dexpval_tomo_phi, dexpval_exact_phi, atol=acceptance) + assert torch.allclose(d2expval_tomo_phi2, d2expval_exact_phi2, atol=HIGH_ACCEPTANCE) + assert torch.allclose(d3expval_tomo_phi3, d3expval_exact_phi3, atol=HIGH_ACCEPTANCE) + + +def test_chemistry_hamiltonian() -> None: + from qadence import load + + circuit = load("./tests/test_files/chem_circ.json") + assert isinstance(circuit, QuantumCircuit) + hamiltonian = load("./tests/test_files/chem_ham.json") + assert isinstance(hamiltonian, AbstractBlock) + model = QuantumModel( + circuit=circuit, + observable=hamiltonian, + backend=BackendName.PYQTORCH, + diff_mode=DiffMode.GPSR, + ) + kwargs = {"n_shots": 1000000} + exact = model.expectation( + values={}, + ) + estim = model.expectation( + values={}, + protocol=Measurements(protocol=Measurements.TOMOGRAPHY, options=kwargs), + ) + assert torch.allclose(estim, exact, atol=LOW_ACCEPTANCE) diff --git a/tests/qadence/test_observable.py b/tests/qadence/test_observable.py new file mode 100644 index 000000000..2e8cb23a8 --- /dev/null +++ b/tests/qadence/test_observable.py @@ -0,0 +1,93 @@ +from __future__ import annotations + +import strategies as st # type: ignore +import torch +from hypothesis import given, settings + +from qadence import block_to_tensor, total_magnetization +from qadence.blocks import ( + AbstractBlock, + AddBlock, + ScaleBlock, + add, + kron, +) +from qadence.operations import X, Y, Z +from qadence.parameters import VariationalParameter +from qadence.serialization import deserialize + + +def test_to_tensor() -> None: + n_qubits = 2 + + theta1 = VariationalParameter("theta1", value=0.25) + theta2 = VariationalParameter("theta2", value=0.5) + theta3 = VariationalParameter("theta3", value=0.75) + + # following here the convention defined in + # the humar_readable_params() function + values = { + "theta1": 0.25, + "theta2": 0.5, + "theta3": 0.75, + } + + obs1 = add( + values["theta1"] * kron(X(0), X(1)), + values["theta2"] * kron(Y(0), Y(1)), + values["theta3"] * kron(Z(0), Z(1)), + ) + mat1 = block_to_tensor(obs1) + + g2 = add(theta1 * kron(X(0), X(1)), theta2 * kron(Y(0), Y(1)), theta3 * kron(Z(0), Z(1))) + mat2 = block_to_tensor(g2) + assert torch.allclose(mat1, mat2) + + +def test_scaled_observable_serialization() -> None: + theta1 = VariationalParameter("theta1") + theta2 = VariationalParameter("theta2") + theta3 = VariationalParameter("theta3") + + # following here the convention defined in + # the humar_readable_params() function + values = {"theta1_#[0]": 0.25, "theta2_#[0]": 0.5, "theta3_#[0]": 0.75} + + obs1 = add( + values["theta1_#[0]"] * kron(X(0), X(1)), + values["theta2_#[0]"] * kron(Y(0), Y(1)), + values["theta3_#[0]"] * kron(Z(0), Z(1)), + ) + + obs2 = add(theta1 * kron(X(0), X(1)), theta2 * kron(Y(0), Y(1)), theta3 * kron(Z(0), Z(1))) + + d2 = obs2._to_dict() + obs2_0 = deserialize(d2) + assert obs2 == obs2_0 + + d1 = obs1._to_dict() + obs1_0 = deserialize(d1) + assert obs1 == obs1_0 + + +def test_totalmagn_serialization() -> None: + obs = total_magnetization(2) + d2 = obs._to_dict() + obs2_0 = deserialize(d2) + assert obs == obs2_0 + + +def test_scaled_totalmagn_serialization() -> None: + theta1 = VariationalParameter("theta1") + obs = theta1 * total_magnetization(2) + d2 = obs._to_dict() + obs2_0 = deserialize(d2) + assert obs == obs2_0 + + +@given(st.observables()) +@settings(deadline=None) +def test_observable_strategy(block: AbstractBlock) -> None: + assert isinstance(block, (ScaleBlock)) + for block in block.block.blocks: # type: ignore[attr-defined] + assert isinstance(block, (ScaleBlock, AddBlock)) diff --git a/tests/qadence/test_operators.py b/tests/qadence/test_operators.py new file mode 100644 index 000000000..a73569705 --- /dev/null +++ b/tests/qadence/test_operators.py @@ -0,0 +1,225 @@ +from __future__ import annotations + +import numpy as np +import pytest +import torch +from openfermion import QubitOperator, get_sparse_operator +from torch.linalg import eigvals + +from qadence import block_to_tensor +from qadence.blocks import ( + AbstractBlock, + AddBlock, + add, + block_is_commuting_hamiltonian, + chain, + from_openfermion, + kron, + to_openfermion, +) +from qadence.operations import ( + CNOT, + CPHASE, + CRX, + CRY, + CRZ, + MCPHASE, + MCRX, + MCRY, + MCRZ, + RX, + RY, + RZ, + SWAP, + AnalogSWAP, + H, + HamEvo, + I, + N, + S, + T, + Toffoli, + X, + Y, + Z, + Zero, +) + + +def hamevo_generator_tensor() -> torch.Tensor: + n_qubits = 4 + h = torch.rand(2**n_qubits, 2**n_qubits) + ham = h + torch.conj(torch.transpose(h, 0, 1)) + return ham + + +def hamevo_generator_block() -> AbstractBlock: + n_qubits = 4 + ops = [X, Y] * 2 + qubit_supports = np.random.choice(list(range(n_qubits)), len(ops), replace=True) + ham = chain( + add(*[op(q) for op, q in zip(ops, qubit_supports)]), + *[op(q) for op, q in zip(ops, qubit_supports)], + ) + ham = ham + ham.dagger() # type: ignore [assignment] + return ham + + +def hamevo_eigenvalues(p: float, generator: torch.Tensor) -> torch.Tensor: + eigenvals = eigvals(generator).real + return torch.exp(-1j * p * eigenvals) + + +def eigenval(p: float) -> torch.Tensor: + return torch.exp(torch.tensor([-1j]) * p / 2.0) + + +def rxyz_eigenvals(p: float) -> torch.Tensor: + return torch.cat((eigenval(p), eigenval(p).conj())) + + +def crxy_eigenvals(p: float, n_qubits: int = 2) -> torch.Tensor: + return torch.cat((torch.ones(2**n_qubits - 2), eigenval(p), eigenval(p).conj())) + + +def crz_eigenvals(p: float, n_qubits: int = 2) -> torch.Tensor: + return torch.cat((torch.ones(2**n_qubits - 2), eigenval(p), eigenval(p).conj())) + + +def cphase_eigenvals(p: float, n_qubits: int = 2) -> torch.Tensor: + return torch.cat((torch.ones(2**n_qubits - 1), eigenval(2.0 * p).conj())) + + +@pytest.mark.parametrize( + "gate, eigenvalues", + [ + (X(0), (-1, 1)), + (Y(0), (-1, 1)), + (Z(0), (-1, 1)), + (N(0), (0, 1)), + (H(0), (-1, 1)), + (I(0), (1, 1)), + (Zero(), (0, 0)), + (RX(0, 0.5), rxyz_eigenvals(0.5)), + (RY(0, 0.5), rxyz_eigenvals(0.5)), + (RZ(0, 0.5), rxyz_eigenvals(0.5)), + (CNOT(0, 1), (-1, 1, 1, 1)), + (Toffoli((0, 1), 2), (-1, 1, 1, 1, 1, 1, 1, 1)), + (Toffoli((0, 1, 2), 3), (-1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)), + (HamEvo(hamevo_generator_tensor(), 0.5, tuple(range(4))), ()), + (HamEvo(hamevo_generator_block(), 0.5, tuple(range(4))), ()), + (CRX(0, 1, 0.5), crxy_eigenvals(0.5)), + (CRY(0, 1, 0.5), crxy_eigenvals(0.5)), + (CRZ(0, 1, 0.5), crz_eigenvals(0.5)), + (MCRX((0, 1), 2, 0.5), crxy_eigenvals(0.5, 3)), + (MCRY((0, 1), 2, 0.5), crxy_eigenvals(0.5, 3)), + (MCRZ((0, 1), 2, 0.5), crz_eigenvals(0.5, 3)), + (T(0), (1, np.sqrt(1j))), + (S(0), (1, 1j)), + (SWAP(0, 1), (-1, 1, 1, 1)), + (AnalogSWAP(0, 1), (-1, -1, -1, 1)), # global phase difference with SWAP + (CPHASE(0, 1, 0.5), cphase_eigenvals(0.5)), + (MCPHASE((0, 1), 2, 0.5), cphase_eigenvals(0.5, 3)), + ], +) +def test_gate_instantiation(gate: AbstractBlock, eigenvalues: torch.Tensor) -> None: + if not isinstance(eigenvalues, torch.Tensor): + eigenvalues = torch.tensor(eigenvalues, dtype=torch.cdouble) + + assert gate.qubit_support == tuple(range(gate.n_qubits)) + + if isinstance(gate, HamEvo) and not isinstance(gate, AnalogSWAP): + if isinstance(gate.generator, AbstractBlock): + generator = block_to_tensor(gate.generator) + elif isinstance(gate.generator, torch.Tensor): + generator = gate.generator + + evs = hamevo_eigenvalues(0.5, generator) + # cope with machine precision on the Gitlab runner instance + assert torch.allclose(gate.eigenvalues, evs, atol=1e-9, rtol=1e-9) + else: + # cope with machine precision on the Gitlab runner instance + assert torch.allclose(gate.eigenvalues, eigenvalues, atol=1e-9, rtol=1e-9) + + +def test_creation() -> None: + from qadence.parameters import evaluate + + block1 = from_openfermion(0.52 * QubitOperator("X0 Y5") + QubitOperator("Z0")) + block2 = from_openfermion(0.52 * QubitOperator("Z0 X5") + QubitOperator("Y0")) + block3 = block1 + block2 + + assert len(block1) == 2 + assert evaluate(block1.blocks[0].parameters.parameter) == 0.52 # type: ignore + assert evaluate(block1.blocks[1].parameters.parameter) == 1.0 # type: ignore + + assert isinstance(block3, AddBlock) + + +def test_commutation() -> None: + block1 = X(0) + block2 = from_openfermion(0.52 * QubitOperator("X0 Y5") + QubitOperator("Z1")) + block3 = from_openfermion(0.52 * QubitOperator("Z0 X5") + QubitOperator("Y0")) + block4 = block2 + block3 + block5 = block3 + block4 + + assert block_is_commuting_hamiltonian(block1) + assert block_is_commuting_hamiltonian(block2) + assert not block_is_commuting_hamiltonian(block3) + assert not block_is_commuting_hamiltonian(block4) + assert not block_is_commuting_hamiltonian(block5) + + +@pytest.mark.parametrize( + "block_and_mat", + [ + (X(0), np.array([[0.0, 1.0], [1.0, 0.0]])), + (Y(0), np.array([[0.0 + 0.0j, 0.0 - 1.0j], [0.0 + 1.0j, 0.0 - 0.0j]])), + (Z(0), np.array([[1.0, 0.0], [0.0, -1.0]])), + ( + add(kron(X(0), X(1)), kron(Y(0), Y(1))) * 0.5, + np.array( + [ + [0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0], + ] + ), + ), + ], +) +def test_to_matrix(block_and_mat: tuple[AbstractBlock, np.ndarray]) -> None: + block = block_and_mat[0] + expected = block_and_mat[1] + mat = block_to_tensor(block).squeeze().numpy() + assert np.array_equal(mat, expected) + + +@pytest.mark.parametrize( + "qubit_op", + [ + # various + # QubitOperator(""), + QubitOperator("") + QubitOperator("X0") + QubitOperator("X0 X1"), + QubitOperator("X0 X1") + QubitOperator("Y0 Y1") + QubitOperator("Z0 Z1"), + QubitOperator("X0 X1 X2 X3"), + 0.52 * QubitOperator("X0 Y5") + QubitOperator("Z0"), + 0.52 * QubitOperator("Z0 X5") + QubitOperator("Y0"), + # total magnetization + QubitOperator("Z0") + QubitOperator("Z1") + QubitOperator("Z2") + QubitOperator("Z3"), + # ising-like + QubitOperator("Z0 Z1") + + QubitOperator("X0") + + QubitOperator("X1") + + QubitOperator("Z0") + + QubitOperator("Z1"), + ], +) +def test_from_openfermion(qubit_op: QubitOperator) -> None: + obs = from_openfermion(qubit_op) + expected_mat = get_sparse_operator(qubit_op).toarray() + np_mat = block_to_tensor(obs).squeeze().numpy() + assert np.array_equal(np_mat, expected_mat) + op = to_openfermion(obs) + assert op == qubit_op diff --git a/tests/qadence/test_overlap.py b/tests/qadence/test_overlap.py new file mode 100644 index 000000000..7d47544fd --- /dev/null +++ b/tests/qadence/test_overlap.py @@ -0,0 +1,392 @@ +from __future__ import annotations + +from timeit import timeit + +import numpy as np +import pytest +import torch +from metrics import LOW_ACCEPTANCE + +from qadence import BackendName, Overlap, OverlapMethod, QuantumCircuit, backend_factory +from qadence.blocks import chain, kron, tag +from qadence.blocks.primitive import PrimitiveBlock +from qadence.operations import RX, RY, H, I, S, T, Z +from qadence.parameters import FeatureParameter, VariationalParameter +from qadence.types import DiffMode + +torch.manual_seed(42) + + +def _create_test_circuits(n_qubits: int) -> tuple[QuantumCircuit, QuantumCircuit]: + # prepare circuit for bras + param_bra = FeatureParameter("phi") + block_bra = kron(*[RX(qubit, param_bra) for qubit in range(n_qubits)]) + fm_bra = tag(block_bra, tag="feature-map-bra") + circuit_bra = QuantumCircuit(n_qubits, fm_bra) + + # prepare circuit for kets + param_ket = FeatureParameter("psi") + block_ket = kron(*[RX(qubit, param_ket) for qubit in range(n_qubits)]) + fm_ket = tag(block_ket, tag="feature-map-ket") + circuit_ket = QuantumCircuit(n_qubits, fm_ket) + + return circuit_bra, circuit_ket + + +def _get_theoretical_result(n_qubits: int, values_bra: dict, values_ket: dict) -> torch.Tensor: + # get theoretical result + ovrlp_theor = torch.zeros((2, 2)) + for i in range(2): + for j in range(2): + ovrlp_theor[i, j] = np.cos((values_bra["phi"][i] - values_ket["psi"][j]) / 2) ** ( + 2 * n_qubits + ) + return ovrlp_theor + + +def _generate_parameter_values() -> tuple[dict, dict]: + values_bra = {"phi": 2 * np.pi * torch.rand(2)} + values_ket = {"psi": 2 * np.pi * torch.rand(2)} + return values_bra, values_ket + + +@pytest.mark.parametrize("backend_name", [BackendName.PYQTORCH, BackendName.BRAKET]) +@pytest.mark.parametrize("n_qubits", [1, 2]) +def test_overlap_exact(backend_name: BackendName, n_qubits: int) -> None: + # prepare circuits + circuit_bra, circuit_ket = _create_test_circuits(n_qubits) + + # values for circuits + values_bra, values_ket = _generate_parameter_values() + + # get theoretical result + ovrlp_theor = _get_theoretical_result(n_qubits, values_bra, values_ket) + + # get result from overlap class + ovrlp = Overlap( + circuit_bra, + circuit_ket, + backend=backend_name, + method=OverlapMethod.EXACT, + diff_mode=DiffMode.AD if backend_name == BackendName.PYQTORCH else DiffMode.GPSR, + ) + ovrlp_exact = ovrlp(values_bra, values_ket) + + assert torch.all(torch.isclose(ovrlp_exact, ovrlp_theor, atol=LOW_ACCEPTANCE)) + + +@pytest.mark.flaky(max_runs=5) +@pytest.mark.parametrize("backend_name", [BackendName.PYQTORCH, BackendName.BRAKET]) +@pytest.mark.parametrize("n_qubits", [1, 2]) +def test_overlap_jensen_shannon(backend_name: BackendName, n_qubits: int) -> None: + # prepare circuits + circuit_bra, circuit_ket = _create_test_circuits(n_qubits) + + # values for circuits + values_bra = {"phi": torch.Tensor([np.pi / 2, np.pi])} + values_ket = {"psi": torch.Tensor([np.pi / 2, np.pi])} + + # get theoretical result + if n_qubits == 1: + ovrlp_theor = torch.tensor([[1.0, 0.78], [0.78, 1.0]]) + elif n_qubits == 2: + ovrlp_theor = torch.tensor([[1.0, 0.61], [0.61, 1.0]]) + + # get result from overlap class + ovrlp = Overlap( + circuit_bra, + circuit_ket, + backend=backend_name, + method=OverlapMethod.JENSEN_SHANNON, + diff_mode=DiffMode.AD if backend_name == BackendName.PYQTORCH else DiffMode.GPSR, + ) + ovrlp_js = ovrlp(values_bra, values_ket, n_shots=10000) + + assert torch.all(torch.isclose(ovrlp_theor, ovrlp_js, atol=LOW_ACCEPTANCE)) + + +@pytest.mark.parametrize("backend_name", [BackendName.PYQTORCH, BackendName.BRAKET]) +@pytest.mark.parametrize("n_qubits", [1, 2]) +def test_overlap_comp_uncomp_exact(backend_name: BackendName, n_qubits: int) -> None: + # prepare circuits + circuit_bra, circuit_ket = _create_test_circuits(n_qubits) + + # values for circuits + values_bra, values_ket = _generate_parameter_values() + + # get theoretical result + ovrlp_theor = _get_theoretical_result(n_qubits, values_bra, values_ket) + + # get result from overlap class + ovrlp = Overlap( + circuit_bra, + circuit_ket, + backend=backend_name, + method=OverlapMethod.COMPUTE_UNCOMPUTE, + diff_mode=DiffMode.AD if backend_name == BackendName.PYQTORCH else DiffMode.GPSR, + ) + ovrlp_cu = ovrlp(values_bra, values_ket) + + assert torch.all(torch.isclose(ovrlp_theor, ovrlp_cu, atol=LOW_ACCEPTANCE)) + + +@pytest.mark.flaky(max_runs=5) +@pytest.mark.parametrize("backend_name", [BackendName.PYQTORCH, BackendName.BRAKET]) +@pytest.mark.parametrize("n_qubits", [1, 2]) +def test_overlap_comp_uncomp_shots(backend_name: BackendName, n_qubits: int) -> None: + # prepare circuits + circuit_bra, circuit_ket = _create_test_circuits(n_qubits) + + # values for circuits + values_bra, values_ket = _generate_parameter_values() + + # get theoretical result + ovrlp_theor = _get_theoretical_result(n_qubits, values_bra, values_ket) + + # get result from overlap class + ovrlp = Overlap( + circuit_bra, + circuit_ket, + backend=backend_name, + method=OverlapMethod.COMPUTE_UNCOMPUTE, + diff_mode=DiffMode.AD if backend_name == BackendName.PYQTORCH else DiffMode.GPSR, + ) + ovrlp_cu = ovrlp(values_bra, values_ket, n_shots=10000) + + assert torch.all(torch.isclose(ovrlp_theor, ovrlp_cu, atol=LOW_ACCEPTANCE)) + + +@pytest.mark.parametrize("backend_name", [BackendName.PYQTORCH]) +@pytest.mark.parametrize("n_qubits", [1, 2]) +def test_overlap_swap_test_exact(backend_name: BackendName, n_qubits: int) -> None: + # prepare circuits + circuit_bra, circuit_ket = _create_test_circuits(n_qubits) + + # values for circuits + values_bra, values_ket = _generate_parameter_values() + + # get theoretical result + ovrlp_theor = _get_theoretical_result(n_qubits, values_bra, values_ket) + + # get result from overlap class + ovrlp = Overlap( + circuit_bra, + circuit_ket, + backend=backend_name, + method=OverlapMethod.SWAP_TEST, + diff_mode=DiffMode.AD if backend_name == BackendName.PYQTORCH else DiffMode.GPSR, + ) + ovrlp_st = ovrlp(values_bra, values_ket) + + assert torch.all(torch.isclose(ovrlp_theor, ovrlp_st, atol=LOW_ACCEPTANCE)) + + +@pytest.mark.flaky(max_runs=5) +@pytest.mark.parametrize("backend_name", [BackendName.PYQTORCH]) +@pytest.mark.parametrize("n_qubits", [1, 2]) +def test_overlap_swap_test_shots(backend_name: BackendName, n_qubits: int) -> None: + # prepare circuits + circuit_bra, circuit_ket = _create_test_circuits(n_qubits) + + # values for circuits + values_bra, values_ket = _generate_parameter_values() + + # get theoretical result + ovrlp_theor = _get_theoretical_result(n_qubits, values_bra, values_ket) + + # get result from overlap class + ovrlp = Overlap( + circuit_bra, + circuit_ket, + backend=backend_name, + method=OverlapMethod.SWAP_TEST, + diff_mode=DiffMode.AD if backend_name == BackendName.PYQTORCH else DiffMode.GPSR, + ) + ovrlp_st = ovrlp(values_bra, values_ket, n_shots=10000) + + assert torch.all(torch.isclose(ovrlp_theor, ovrlp_st, atol=LOW_ACCEPTANCE)) + + +@pytest.mark.parametrize("backend_name", [BackendName.PYQTORCH]) +@pytest.mark.parametrize("n_qubits", [1, 2]) +def test_overlap_hadamard_test_exact(backend_name: BackendName, n_qubits: int) -> None: + # prepare circuits + circuit_bra, circuit_ket = _create_test_circuits(n_qubits) + + # values for circuits + values_bra, values_ket = _generate_parameter_values() + + # get theoretical result + ovrlp_theor = _get_theoretical_result(n_qubits, values_bra, values_ket) + + # get result from overlap class + ovrlp = Overlap( + circuit_bra, + circuit_ket, + backend=backend_name, + method=OverlapMethod.HADAMARD_TEST, + diff_mode=DiffMode.AD if backend_name == BackendName.PYQTORCH else DiffMode.GPSR, + ) + ovrlp_ht = ovrlp(values_bra, values_ket) + + assert torch.all(torch.isclose(ovrlp_theor, ovrlp_ht, atol=LOW_ACCEPTANCE)) + + +@pytest.mark.flaky(max_runs=5) +@pytest.mark.parametrize("backend_name", [BackendName.PYQTORCH]) +@pytest.mark.parametrize("n_qubits", [1, 2]) +def test_overlap_hadamard_test_shots(backend_name: BackendName, n_qubits: int) -> None: + # prepare circuits + circuit_bra, circuit_ket = _create_test_circuits(n_qubits) + + # values for circuits + values_bra, values_ket = _generate_parameter_values() + + # get theoretical result + ovrlp_theor = _get_theoretical_result(n_qubits, values_bra, values_ket) + + # get result from overlap class + ovrlp = Overlap( + circuit_bra, + circuit_ket, + backend=backend_name, + method=OverlapMethod.HADAMARD_TEST, + diff_mode=DiffMode.AD if backend_name == BackendName.PYQTORCH else DiffMode.GPSR, + ) + ovrlp_ht = ovrlp(values_bra, values_ket, n_shots=10000) + + assert torch.all(torch.isclose(ovrlp_theor, ovrlp_ht, atol=LOW_ACCEPTANCE)) + + +# TODO: investigate why braket overlap.EXACT gives slower results that fails +# TODO: move the test below in the future to https://gitlab.pasqal.com/pqs/benchmarks +@pytest.mark.flaky(max_runs=5) +@pytest.mark.parametrize("backend_name", [BackendName.PYQTORCH, BackendName.BRAKET]) +@pytest.mark.parametrize("n_qubits", [1, 2, 4, 10, 12]) +def test_overlap_exact_speed(backend_name: BackendName, n_qubits: int) -> None: + # prepare circuit for bras + param_bra = FeatureParameter("phi") + block_bra = kron(*[RX(qubit, param_bra) for qubit in range(n_qubits)]) + fm_bra = tag(block_bra, tag="feature-map-bra") + circuit_bra = QuantumCircuit(n_qubits, fm_bra) + + # values for circuits + values_bra = {"phi": torch.Tensor([np.pi / 2])} + + # create backend for calculating expectation value + obs = tag(kron(*[I(i) for i in range(n_qubits)]), "observable") + backend = backend_factory(backend=backend_name, diff_mode=None) + (conv_circ, conv_obs, embed, params) = backend.convert(circuit_bra, obs) + t_exp = timeit( + lambda: backend.expectation(conv_circ, conv_obs, embed(params, values_bra)), number=100 + ) + + # get result from overlap class + ovrlp = Overlap( + circuit_bra, + circuit_bra, + backend=backend_name, + method=OverlapMethod.EXACT, + diff_mode=DiffMode.AD if backend_name == BackendName.PYQTORCH else DiffMode.GPSR, + ) + t_ovrlp = timeit(lambda: ovrlp(values_bra, values_bra), number=100) + + assert np.round(t_ovrlp / t_exp, decimals=0) <= 2.0 + + +@pytest.mark.parametrize("backend_name", [BackendName.PYQTORCH]) +@pytest.mark.parametrize("gate", [Z, S, T, H]) +def test_overlap_training(backend_name: BackendName, gate: PrimitiveBlock) -> None: + # define training parameters + phi = VariationalParameter("phi") + theta = VariationalParameter("theta") + + # define training and target quantum circuits + circuit_bra = QuantumCircuit(1, chain(RX(0, phi), RY(0, theta))) + circuit_ket = QuantumCircuit(1, gate(0)) # type: ignore [operator] + + # define overlap model + model = Overlap(circuit_bra, circuit_ket, backend=backend_name, method=OverlapMethod.EXACT) + + # prepare for training + optimizer = torch.optim.Adam(model.parameters(), lr=0.25) + loss_criterion = torch.nn.MSELoss() + n_epochs = 1000 + loss_save = [] + + # train model + for _ in range(n_epochs): + optimizer.zero_grad() + out = model() + loss = loss_criterion(torch.tensor(1.0).reshape((1, 1)), out) + loss.backward() + optimizer.step() + loss_save.append(loss.item()) + + # get final results + wf_exact = model.ket_model.run({}).detach() + wf_overlap = model.run({}).detach() + + assert torch.all(torch.isclose(wf_exact, wf_overlap, atol=LOW_ACCEPTANCE)) + + +def test_output_shape() -> None: + # define feature params + param_bra = FeatureParameter("phi") + param_ket = FeatureParameter("psi") + + # prepare circuit for bras + block_bra = kron(*[RX(qubit, param_bra) for qubit in range(2)]) + fm_bra = tag(block_bra, tag="feature-map-bra") + circuit_bra = QuantumCircuit(2, fm_bra) + + # prepare circuit for kets + block_ket = kron(*[RX(qubit, param_ket) for qubit in range(2)]) + fm_ket = tag(block_ket, tag="feature-map-ket") + circuit_ket = QuantumCircuit(2, fm_ket) + + # values for circuits + values_bra = {"phi": 2 * np.pi * torch.rand(2)} + values_ket = {"psi": 2 * np.pi * torch.rand(3)} + + # get result from overlap class - distinct feature params for bra/ket + ovrlp = Overlap( + circuit_bra, circuit_ket, backend=BackendName.PYQTORCH, method=OverlapMethod.EXACT + ) + ovrlp = ovrlp(values_bra, values_ket) + assert ovrlp.shape == (2, 3) + + # prepare circuit for bras + block_bra = kron(*[RX(qubit, param_bra) for qubit in range(2)]) + fm_bra = tag(block_bra, tag="feature-map-bra") + circuit_bra = QuantumCircuit(2, fm_bra) + + # prepare circuit for kets + block_ket = kron(*[RX(qubit, param_bra) for qubit in range(2)]) + fm_ket = tag(block_ket, tag="feature-map-ket") + circuit_ket = QuantumCircuit(2, fm_ket) + + # values for circuits + values_bra = {"phi": 2 * np.pi * torch.rand(4)} + + # get result from overlap class - shared feature param for bra/ket + ovrlp = Overlap( + circuit_bra, circuit_ket, backend=BackendName.PYQTORCH, method=OverlapMethod.EXACT + ) + ovrlp = ovrlp(values_bra, values_bra) + assert ovrlp.shape == (4, 1) + + # prepare circuit for kets + block_ket = kron(*[RX(qubit, np.pi / 2) for qubit in range(2)]) + fm_ket = tag(block_ket, tag="feature-map-ket") + circuit_ket = QuantumCircuit(2, fm_ket) + + # values for circuits + values_bra = {"phi": 2 * np.pi * torch.rand(4)} + + # get result from overlap class - bra has feature param, ket doesn't + ovrlp = Overlap( + circuit_bra, circuit_ket, backend=BackendName.PYQTORCH, method=OverlapMethod.EXACT + ) + ovrlp = ovrlp(values_bra) + assert ovrlp.shape == (4, 1) diff --git a/tests/qadence/test_parameters.py b/tests/qadence/test_parameters.py new file mode 100644 index 000000000..e5c779bc0 --- /dev/null +++ b/tests/qadence/test_parameters.py @@ -0,0 +1,361 @@ +from __future__ import annotations + +import numpy as np +import pytest +import sympy +import torch +from torch import allclose + +from qadence import BackendName, DiffMode +from qadence.backends.pyqtorch import Backend as PyQBackend +from qadence.blocks import ParametricBlock, chain +from qadence.blocks.utils import expressions +from qadence.circuit import QuantumCircuit +from qadence.constructors import hea, total_magnetization +from qadence.models import QuantumModel +from qadence.operations import CNOT, RX, RY, RZ +from qadence.parameters import ( + FeatureParameter, + Parameter, + evaluate, + stringify, +) +from qadence.serialization import deserialize, serialize +from qadence.states import one_state, uniform_state, zero_state + + +def test_param_initialization(parametric_circuit: QuantumCircuit) -> None: + circ = parametric_circuit + + # check general configuration + assert len(circ.unique_parameters) == 4 + # the additional 4 parameters are the four fixed scale parameters of the observable + assert len(circ.parameters()) == 6 + + # unique parameters are returned as sympy symbols + for p in circ.unique_parameters: + if p is not None: + assert isinstance(p, sympy.Symbol) + + params: list[Parameter] + params = circ.parameters() # type: ignore [assignment] + assert all([isinstance(p, sympy.Basic) for p in params]) + + # check symbol assignation + non_number = [p for p in params if not p.is_number] + expected = ["x", "theta1", "theta2", "theta3"] + # symbols = unique_symbols(params) + assert len(non_number) == len(expected) + assert all([a in expected for a in non_number]) + + # check numerical valued parameter + for q in params[:6]: + if q.is_number: + assert evaluate(q) == np.pi + for q in params[6:]: + assert evaluate(q) == 1.0 + + # check parameter with expression + exprs = expressions(circ.block) + for expr in exprs: + if not expr.is_number and "x" in stringify(expr): + assert stringify(expr) == "3*x" + + +@pytest.mark.parametrize( + "n_qubits", + [1, 2, 4, 6, 8], +) +def test_multiparam_expressions(n_qubits: int) -> None: + w = Parameter("w", trainable=True) + x = Parameter("x", trainable=True) + y = Parameter("y", trainable=True) + z = Parameter("z", trainable=True) + block = RX(np.random.randint(n_qubits), w * x) + block1 = RZ(np.random.randint(n_qubits), y + z) + qc = QuantumCircuit(n_qubits, chain(block, block1)) + obs = total_magnetization(n_qubits) + qm = QuantumModel(qc, obs, BackendName.PYQTORCH, DiffMode.AD) + uni_state = uniform_state(n_qubits) + wf = qm.run( + { + "w": torch.rand(1) * np.pi, + "x": torch.rand(1) * np.pi, + "y": torch.rand(1) * np.pi, + "z": torch.rand(1) * np.pi, + }, + uni_state, + ) + assert wf is not None + + +def test_multiparam_no_rx_rotation(n_qubits: int = 1) -> None: + w = Parameter("w", trainable=True, value=0.0) + x = Parameter("x", trainable=True, value=0.0) + y = Parameter("y", trainable=True, value=0.0) + block = RX(np.random.randint(n_qubits), x + y * w) + qc = QuantumCircuit(n_qubits, block) + obs = total_magnetization(n_qubits) + qm = QuantumModel(qc, obs, BackendName.PYQTORCH, DiffMode.AD) + uni_state = uniform_state(n_qubits) + wf = qm.run( + {}, + uni_state, + ) + + assert allclose(wf, uni_state) + + +def test_multiparam_pi_ry_rotation_trainable(n_qubits: int = 1) -> None: + x = Parameter("x", trainable=True, value=torch.tensor([np.pi / 2], dtype=torch.cdouble)) + y = Parameter("y", trainable=True, value=torch.tensor([np.pi / 2], dtype=torch.cdouble)) + block = RY(0, x + y) + qc = QuantumCircuit(n_qubits, block) + obs = total_magnetization(n_qubits) + qm = QuantumModel(qc, obs, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD) + z_state = zero_state(n_qubits) + o_state = one_state(n_qubits) + wf = qm.run({}, z_state) + assert torch.allclose(wf, o_state) + + +def test_multiparam_pi_ry_rotation_nontrainable(n_qubits: int = 1) -> None: + x = Parameter("x", trainable=False) + y = Parameter("y", trainable=False) + block = RY(0, x + y) + qc = QuantumCircuit(n_qubits, block) + obs = total_magnetization(n_qubits) + qm = QuantumModel(qc, obs, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD) + z_state = zero_state(n_qubits) + o_state = one_state(n_qubits) + wf = qm.run( + { + "x": torch.tensor([np.pi / 2], dtype=torch.cdouble), + "y": torch.tensor([np.pi / 2], dtype=torch.cdouble), + }, + z_state, + ) + assert torch.allclose(wf, o_state) + + +def test_mixed_single_trainable(n_qubits: int = 1) -> None: + x = Parameter("x", trainable=False) + y = Parameter("y", trainable=True, value=torch.tensor([np.pi / 2], dtype=torch.cdouble)) + ry0 = RY(0, x) + ry1 = RY(0, y) + block = chain(ry0, ry1) + qc = QuantumCircuit(n_qubits, block) + obs = total_magnetization(n_qubits) + qm = QuantumModel(qc, obs, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD) + z_state = zero_state(n_qubits) + o_state = one_state(n_qubits) + wf = qm.run( + { + "x": torch.tensor([np.pi / 2], dtype=torch.cdouble), + }, + z_state, + ) + assert torch.allclose(wf, o_state) + + +def test_multiple_trainable_multiple_untrainable(n_qubits: int = 1) -> None: + w = Parameter("w", trainable=True) + x = Parameter("x", trainable=True) + y = Parameter("y", trainable=True) + rx = RY(0, x + y * w) + + a = Parameter("a", trainable=False) + b = Parameter("b", trainable=False) + c = Parameter("c", trainable=False) + rz = RZ(0, a - b - c) + + block = chain(rx, rz) + + qc = QuantumCircuit(n_qubits, block) + obs = total_magnetization(n_qubits) + qm = QuantumModel(qc, obs, BackendName.PYQTORCH, DiffMode.AD) + uni_state = uniform_state(n_qubits) + wf = qm.run( + {param: np.random.rand() for param in ["a", "b", "c"]}, + uni_state, + ) + + assert not torch.any(torch.isnan(wf)) + + +def test_multparam_grads(n_qubits: int = 2) -> None: + batch_size = 5 + theta0 = Parameter("theta0", trainable=True) + theta1 = Parameter("theta1", trainable=True) + phi = Parameter("phi", trainable=False) + + variational = RY(1, theta0 * theta1) + fm = RX(0, phi) + block = chain(fm, variational, CNOT(0, 1)) + + circ = QuantumCircuit(n_qubits, block) + + # Making circuit with AD + observable = total_magnetization(n_qubits=n_qubits) + quantum_backend = PyQBackend() + (pyq_circ, pyq_obs, embed, params) = quantum_backend.convert(circ, observable) + + batch_size = 5 + values = { + "phi": torch.rand(batch_size, requires_grad=False), + } + + wf = quantum_backend.run(pyq_circ, embed(params, values)) + expval = quantum_backend.expectation(pyq_circ, pyq_obs, embed(params, values)) + dexpval_x = torch.autograd.grad( + expval, params["theta0"], torch.ones_like(expval), retain_graph=True + )[0] + dexpval_y = torch.autograd.grad( + expval, params["theta1"], torch.ones_like(expval), retain_graph=True + )[0] + assert ( + not torch.isnan(wf).any().item() + and not torch.isnan(dexpval_x).any().item() + and not torch.isnan(dexpval_y).any().item() + ) + + +def test_non_trainable_trainable_gate(n_qubits: int = 1) -> None: + x = Parameter("x", trainable=True, value=torch.tensor([1.0], dtype=torch.cdouble)) + y = Parameter("y", trainable=False) + z = Parameter( + "z", + trainable=True, + value=torch.tensor([np.pi / 2], dtype=torch.cdouble), + ) + block = RY(0, x * y + z) + qc = QuantumCircuit(n_qubits, block) + obs = total_magnetization(n_qubits) + qm = QuantumModel(qc, obs, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD) + z_state = zero_state(n_qubits) + o_state = one_state(n_qubits) + wf = qm.run( + { + "y": torch.tensor([np.pi / 2], dtype=torch.cdouble), + }, + z_state, + ) + assert torch.allclose(wf, o_state) + + +def test_trainable_untrainable_fm(n_qubits: int = 2) -> None: + x = Parameter("x", trainable=False) + theta0 = Parameter("theta0", trainable=True) + theta1 = Parameter("theta1", trainable=True) + + ry0 = RY(0, theta0 * x) + ry1 = RY(1, theta1 * x) + + fm = chain(ry0, ry1) + + ansatz = hea(2, 2, param_prefix="eps") + + block = chain(fm, ansatz) + + qc = QuantumCircuit(n_qubits, block) + obs = total_magnetization(n_qubits) + qm = QuantumModel(qc, obs, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD) + z_state = zero_state(n_qubits) + wf = qm.run( + { + "x": torch.tensor([1.0], dtype=torch.cdouble), + "theta0": torch.tensor([np.pi / 2], dtype=torch.cdouble), + "theta1": torch.tensor([np.pi / 2], dtype=torch.cdouble), + }, + z_state, + ) + assert wf is not None + + +def test_hetereogenous_multiparam_expr(n_qubits: int = 2) -> None: + x = Parameter("x", trainable=False) + theta0 = Parameter("theta0", trainable=True) + theta1 = Parameter("theta1", trainable=True) + myconstant = 2.0 + + ry0 = RY(0, theta0 * x + myconstant) + ry1 = RY(1, theta1 * x - myconstant) + + fm = chain(ry0, ry1) + + ansatz = hea(2, 2, param_prefix="eps") + + block = chain(fm, ansatz) + + qc = QuantumCircuit(n_qubits, block) + obs = total_magnetization(n_qubits) + qm = QuantumModel(qc, obs, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD) + z_state = zero_state(n_qubits) + wf = qm.run( + { + "x": torch.tensor([1.0], dtype=torch.cdouble), + "theta0": torch.tensor([np.pi / 2], dtype=torch.cdouble), + "theta1": torch.tensor([np.pi / 2], dtype=torch.cdouble), + }, + z_state, + ) + assert wf is not None + + +def test_single_param_serialization() -> None: + x0 = Parameter("x", trainable=True, value=1.0) + d0 = x0._to_dict() + x1 = Parameter._from_dict(d0) + assert x0 == x1 + + y = Parameter("y", trainable=True) + d2 = y._to_dict() + y1 = Parameter._from_dict(d2) + assert y == y1 + + +@pytest.mark.parametrize( + "gate", + [ + RX(0, "theta"), + RY(0, Parameter("theta", trainable=False)), + RZ(0, Parameter("theta", trainable=True, value=5.0)), + ], +) +def test_serialize_singleparam_gate(gate: ParametricBlock) -> None: + d = serialize(gate.parameters.parameter) + op = deserialize(d) + assert gate.parameters.parameter == op + + +def test_multiparam_serialization() -> None: + x = Parameter("x", trainable=True, value=1.0) + y = Parameter("y", trainable=True, value=2.0) + expr = x + y + myrx = RX(0, expr) + d_block = myrx._to_dict() + nb = RX._from_dict(d_block) + assert nb == myrx + + +def test_multiparam_eval_serialization() -> None: + x = Parameter("x", trainable=True, value=1.0) + y = Parameter("y", trainable=True, value=2.0) + expr = x + y + myrx = RX(0, expr) + d = serialize(myrx.parameters.parameter) + loaded_expr = deserialize(d) + assert loaded_expr == expr + eval_orig = evaluate(myrx.parameters.parameter) + eval_copy = evaluate(loaded_expr) + assert eval_orig == eval_copy + + +def test_sympy_modules() -> None: + x = FeatureParameter("x") + y = FeatureParameter("y") + expr = 2 * sympy.acos(x) + (sympy.cos(y) + sympy.asinh(y)) + d = serialize(expr) + loaded_expr = deserialize(d) + assert loaded_expr == expr + assert evaluate(expr) == evaluate(loaded_expr) diff --git a/tests/qadence/test_register.py b/tests/qadence/test_register.py new file mode 100644 index 000000000..74997eb89 --- /dev/null +++ b/tests/qadence/test_register.py @@ -0,0 +1,105 @@ +from __future__ import annotations + +import json +import os + +import networkx as nx +import numpy as np +from pytest import approx + +from qadence import Register + + +def calc_dist(graph: nx.Graph) -> np.ndarray: + coords = {i: node["pos"] for i, node in graph.nodes.items()} + coords_np = np.array(list(coords.values())) + center = np.mean(coords_np, axis=0) + distances = np.array(np.sqrt(np.sum((coords_np - center) ** 2, axis=1))) + return distances + + +def test_register() -> None: + # create register with number of qubits only + reg = Register(4) + assert reg.n_qubits == 4 + + # create register from arbitrary graph + graph = nx.Graph() + graph.add_edge(0, 1) + reg = Register(graph) + assert reg.n_qubits == 2 + + # test linear lattice node number + r = Register.line(4) + assert len(r.graph) == 4 + assert r == Register.lattice("line", 4) + + # test circular lattice node number + r = Register.circle(8) + assert len(r.graph) == 8 + assert r == Register.lattice("circle", 8) + + # test shape of circular lattice + distances = calc_dist(r.graph) + assert distances == approx(np.ones(len(distances)) * distances[0]) + + # test square loop lattice node number + r = Register.square(4) + assert len(r.graph) == 12 + assert r == Register.lattice("square", 4) + + # test rectangular lattice node number + r = Register.rectangular_lattice(2, 3) + assert len(r.graph) == 6 + assert r == Register.lattice("rectangular_lattice", 2, 3) + + # test shape of rectangular lattice + r = Register.rectangular_lattice(2, 2) + distances = calc_dist(r.graph) + assert distances == approx(np.ones(len(distances)) * distances[0]) + + # test triangular lattice node number + r = Register.triangular_lattice(1, 3) + assert len(r.graph) == 5 + assert r == Register.lattice("triangular_lattice", 1, 3) + + # test shape of triangular lattice + r = Register.triangular_lattice(1, 1) + distances = calc_dist(r.graph) + assert distances == approx(np.ones(len(distances)) * distances[0]) + + # test honeycomb lattice node number + r = Register.honeycomb_lattice(1, 3) + assert len(r.graph) == 14 + assert r == Register.lattice("honeycomb_lattice", 1, 3) + + # test shape of honeycomb lattice + r = Register.honeycomb_lattice(1, 1) + distances = calc_dist(r.graph) + assert distances == approx(np.ones(len(distances)) * distances[0]) + + # test arbitrary lattice node number + r = Register.from_coordinates([(0, 1), (0, 2), (0, 3), (1, 3)]) + assert len(r.graph) == 4 + + +def test_register_to_dict(BasicRegister: Register) -> None: + reg = BasicRegister + reg_dict = reg._to_dict() + reg_from_dict = Register._from_dict(reg_dict) + assert reg == reg_from_dict + + +def test_json_dump_load_register_to_dict(BasicRegister: Register) -> None: + reg = BasicRegister + reg_dict = reg._to_dict() + dumpedregdict = json.dumps(reg_dict) + file_name = "tmp.json" + with open(file_name, "w") as file: + file.write(dumpedregdict) + with open(file_name, "r") as file: + loaded_dict = json.load(file) + + os.remove(file_name) + reg_from_loaded_dict = Register._from_dict(loaded_dict) + assert reg == reg_from_loaded_dict diff --git a/tests/qadence/test_states.py b/tests/qadence/test_states.py new file mode 100644 index 000000000..fd6a7a976 --- /dev/null +++ b/tests/qadence/test_states.py @@ -0,0 +1,60 @@ +from __future__ import annotations + +from typing import Callable + +import pytest +import torch + +from qadence.circuit import QuantumCircuit +from qadence.states import ( + _run_state, + ghz_block, + ghz_state, + is_normalized, + one_block, + one_state, + product_block, + product_state, + rand_bitstring, + uniform_block, + uniform_state, + zero_block, + zero_state, +) + + +@pytest.mark.parametrize( + "n_qubits", + [2, 4, 6], +) +@pytest.mark.parametrize( + "state_generators", + [ + (one_state, one_block), + (zero_state, zero_block), + (uniform_state, uniform_block), + (ghz_state, ghz_block), + ], +) +def test_base_states(n_qubits: int, state_generators: tuple[Callable, Callable]) -> None: + state_func, block_func = state_generators + state_direct = state_func(n_qubits) + block = block_func(n_qubits) + state_block = _run_state(QuantumCircuit(n_qubits, block), "pyqtorch") + assert is_normalized(state_direct) + assert is_normalized(state_block) + assert torch.allclose(state_direct, state_block) + + +@pytest.mark.parametrize( + "n_qubits", + [2, 4, 6], +) +def test_product_state(n_qubits: int) -> None: + bitstring = rand_bitstring(n_qubits) + state_direct = product_state(bitstring) + block = product_block(bitstring) + state_block = _run_state(QuantumCircuit(n_qubits, block), "pyqtorch") + assert is_normalized(state_direct) + assert is_normalized(state_block) + assert torch.allclose(state_direct, state_block) diff --git a/tests/qadence/test_transpile.py b/tests/qadence/test_transpile.py new file mode 100644 index 000000000..fdcd3388e --- /dev/null +++ b/tests/qadence/test_transpile.py @@ -0,0 +1,49 @@ +from __future__ import annotations + +from qadence import RX, RZ, H, HamEvo, X, chain, kron +from qadence.blocks import AbstractBlock, AddBlock, ChainBlock, KronBlock +from qadence.transpile import digitalize, flatten +from qadence.types import LTSOrder + + +def test_flatten() -> None: + from qadence.transpile.block import _flat_blocks + + x: AbstractBlock + + # make sure we get the identity when flattening non-existent blocks + x = kron(X(0), X(1), X(2)) + assert tuple(_flat_blocks(x, ChainBlock)) == (X(0), X(1), X(2)) + assert flatten(x, [ChainBlock]) == kron(X(0), X(1), X(2)) + + x = chain(chain(chain(chain(X(0))))) + assert flatten(x, [ChainBlock]) == chain(X(0)) + + x = kron(kron(X(0), kron(X(1))), kron(X(2))) + assert tuple(_flat_blocks(x, KronBlock)) == (X(0), X(1), X(2)) + assert flatten(x, [KronBlock]) == kron(X(0), X(1), X(2)) + assert flatten(x, [KronBlock, ChainBlock]) == kron(X(0), X(1), X(2)) + + x = chain(kron(X(0), kron(X(1))), kron(X(1))) + assert flatten(x) == chain(kron(X(0), X(1)), kron(X(1))) + + x = 2 * kron(kron(X(0), kron(X(1))), kron(X(2))) + assert flatten(x) == 2 * kron(X(0), X(1), X(2)) + + x = kron(kron(X(0), 2 * kron(X(1))), kron(X(2))) + # note that the innermost `KronBlock` behind the `ScaleBlock` stays + assert flatten(x) == kron(X(0), 2 * kron(X(1)), X(2)) + + x = chain(chain(chain(X(0))), kron(kron(X(0)))) + assert flatten(x, [ChainBlock]) == chain(X(0), kron(kron(X(0)))) + assert flatten(x, [KronBlock]) == chain(chain(chain(X(0))), kron(X(0))) + assert flatten(x, [ChainBlock, KronBlock]) == chain(X(0), kron(X(0))) + assert flatten(x, [AddBlock]) == x + + x = chain(kron(chain(chain(X(0), X(0))))) + assert flatten(x, [ChainBlock]) == chain(kron(chain(X(0), X(0)))) + + x = chain(chain(X(0), HamEvo(X(0), 2), RX(0, 2))) + assert digitalize(x, LTSOrder.BASIC) == chain( + chain(X(0), chain(H(0), RZ(0, 4.0), H(0)), RX(0, 2.0)) + ) diff --git a/tests/strategies.py b/tests/strategies.py new file mode 100644 index 000000000..d3f856437 --- /dev/null +++ b/tests/strategies.py @@ -0,0 +1,291 @@ +from __future__ import annotations + +import random +import string +from functools import reduce +from typing import Any, Callable, Set + +import hypothesis.strategies as st +from hypothesis.strategies._internal import SearchStrategy +from numpy import pi +from sympy import Basic, Expr, acos, asin, atan, cos, sin, tan +from torch import Tensor + +from qadence.backend import BackendName +from qadence.blocks import ( + AbstractBlock, + ParametricBlock, + add, + chain, + kron, +) +from qadence.circuit import QuantumCircuit +from qadence.extensions import supported_gates +from qadence.ml_tools.utils import rand_featureparameters +from qadence.operations import ( + analog_gateset, + multi_qubit_gateset, + non_unitary_gateset, + pauli_gateset, + single_qubit_gateset, + three_qubit_gateset, + two_qubit_gateset, +) +from qadence.parameters import FeatureParameter, Parameter, VariationalParameter +from qadence.types import ParameterType, TNumber + +PARAM_NAME_LENGTH = 1 +MIN_SYMBOLS = 1 +MAX_SYMBOLS = 3 +FEAT_PARAM_MIN = -1.0 +FEAT_PARAM_MAX = 1.0 + +VAR_PARAM_MIN = -2 * pi +VAR_PARAM_MAX = 2 * pi + +TRIG_FNS = [cos, sin, tan, acos, asin, atan] + +PARAM_RANGES = { + "Feature": (FEAT_PARAM_MIN, FEAT_PARAM_MAX), + "Variational": (VAR_PARAM_MIN, VAR_PARAM_MAX), + "Fixed": (VAR_PARAM_MIN, VAR_PARAM_MAX), +} + +OPS_DICT = {"+": lambda x, y: x + y, "-": lambda x, y: x - y, "*": lambda x, y: x * y} + +supported_gates_map: dict = {k: supported_gates(k) for k in BackendName.list()} +supported_gates_list: list[Set] = [ + set(supported_gates(name)) for name in BackendName.list() if name != BackendName.PULSER +] + +full_gateset = list( + reduce(lambda fs, s: fs.union(s), supported_gates_list) # type: ignore[attr-defined] +) +minimal_gateset = list( + reduce(lambda fs, s: fs.intersection(s), supported_gates_list) # type: ignore[attr-defined] +) +digital_gateset = list(set(full_gateset) - set(analog_gateset) - set(non_unitary_gateset)) + +MIN_N_QUBITS = 1 +MAX_N_QUBITS = 4 +MIN_CIRCUIT_DEPTH = 1 +MAX_CIRCUIT_DEPTH = 4 +MIN_BATCH_SIZE = 1 +MAX_BATCH_SIZE = 4 + +N_QUBITS_STRATEGY: SearchStrategy[int] = st.integers(min_value=MIN_N_QUBITS, max_value=MAX_N_QUBITS) +CIRCUIT_DEPTH_STRATEGY: SearchStrategy[int] = st.integers( + min_value=MIN_CIRCUIT_DEPTH, max_value=MAX_CIRCUIT_DEPTH +) +BATCH_SIZE_STRATEGY: SearchStrategy[int] = st.integers( + min_value=MIN_BATCH_SIZE, max_value=MAX_BATCH_SIZE +) + + +def get_param( + draw: Callable[[SearchStrategy[Any]], Any], + param_type: ParameterType, + name_len: int, + value: TNumber, +) -> Basic: + def rand_name(length: int) -> str: + letters = string.ascii_letters + result_str = "".join(random.choice(letters) for i in range(length)) + return result_str + + p: Basic + if param_type == ParameterType.FEATURE: + p = FeatureParameter(rand_name(name_len), value=value) + with_trig: SearchStrategy[bool] = st.booleans() + if draw(with_trig): + p = draw(st.sampled_from(TRIG_FNS))(p) + elif param_type == ParameterType.VARIATIONAL: + p = VariationalParameter(rand_name(name_len), value=value) + else: + p = Parameter(value) + return p + + +# A strategy to generate random parameters. +def rand_parameter(draw: Callable[[SearchStrategy[Any]], Any]) -> Basic: + param_type = draw(st.sampled_from([p for p in ParameterType])) + min_v, max_v = PARAM_RANGES[param_type] + value = draw(st.floats(min_value=min_v, max_value=max_v)) + name_len = draw(st.integers(min_value=1, max_value=PARAM_NAME_LENGTH)) + return get_param(draw, param_type=param_type, name_len=name_len, value=value) + + +# A strategy to generate random expressions. +def rand_expression(draw: Callable[[SearchStrategy[Any]], Any]) -> Expr: + n_symbols: SearchStrategy[int] = st.integers(min_value=MIN_SYMBOLS, max_value=MAX_SYMBOLS) + N = draw(n_symbols) + expr = rand_parameter(draw) + if N > 1: + for _ in range(N - 1): + other = rand_parameter(draw) + op = draw(st.sampled_from([op for op in OPS_DICT.keys()])) + expr = OPS_DICT[op](expr, other) + return expr + + +# A strategy to generate random blocks. +def rand_digital_blocks(gate_list: list[AbstractBlock]) -> Callable: + @st.composite + def blocks( + # ops_pool: list[AbstractBlock] TO BE ADDED + draw: Callable[[SearchStrategy[Any]], Any], + n_qubits: SearchStrategy[int] = st.integers(min_value=1, max_value=4), + depth: SearchStrategy[int] = st.integers(min_value=1, max_value=8), + ) -> AbstractBlock: + total_qubits = draw(n_qubits) + gates_list = [] + qubit_indices = {0} + + pool_1q = [gate for gate in single_qubit_gateset if gate in gate_list] + pool_1q_fixed = [gate for gate in pool_1q if not issubclass(gate, ParametricBlock)] + pool_1q_param = list(set(pool_1q) - set(pool_1q_fixed)) + pool_2q = [gate for gate in two_qubit_gateset if gate in gate_list] + pool_2q_fixed = [ + gate for gate in two_qubit_gateset if not issubclass(gate, ParametricBlock) + ] + pool_2q_param = list(set(pool_2q) - set(pool_2q_fixed)) + pool_3q = [gate for gate in three_qubit_gateset if gate in gate_list] + pool_nq = [gate for gate in multi_qubit_gateset if gate in gate_list] + pool_nq_fixed = [ + gate for gate in multi_qubit_gateset if not issubclass(gate, ParametricBlock) + ] + pool_nq_param = list(set(pool_nq) - set(pool_nq_fixed)) + + for _ in range(draw(depth)): + if total_qubits == 1: + gate = draw(st.sampled_from(pool_1q)) + elif total_qubits >= 2: + gate = draw(st.sampled_from(gate_list)) + + qubit = draw(st.integers(min_value=0, max_value=total_qubits - 1)) + qubit_indices = qubit_indices.union({qubit}) + + if gate in pool_1q: + if gate in pool_1q_fixed: + gates_list.append(gate(qubit)) + elif gate in pool_1q_param: + angles = [rand_expression(draw) for _ in range(gate.num_parameters())] + gates_list.append(gate(qubit, *angles)) + + elif gate in pool_2q: + target = draw( + st.integers(min_value=0, max_value=total_qubits - 1).filter( + lambda x: x != qubit + ) + ) + qubit_indices = qubit_indices.union({target}) + if gate in pool_2q_fixed: + gates_list.append(gate(qubit, target)) + elif gate in pool_2q_param: + gates_list.append(gate(qubit, target, rand_expression(draw))) + + elif gate in pool_3q: + target1 = draw( + st.integers(min_value=0, max_value=total_qubits - 1).filter( + lambda x: x != qubit + ) + ) + target2 = draw( + st.integers(min_value=0, max_value=total_qubits - 1).filter( + lambda x: x != qubit and x != target1 + ) + ) + gates_list.append(gate(qubit, target1, target2)) + + elif gate in pool_nq: + target1 = draw( + st.integers(min_value=0, max_value=total_qubits - 1).filter( + lambda x: x != qubit + ) + ) + target2 = draw( + st.integers(min_value=0, max_value=total_qubits - 1).filter( + lambda x: x != qubit and x != target1 + ) + ) + if gate in pool_nq_fixed: + gates_list.append(gate((qubit, target1), target2)) + elif gate in pool_nq_param: + gates_list.append(gate((qubit, target1), target2, rand_expression(draw))) + + return chain(*gates_list) + + return blocks # type: ignore[no-any-return] + + +@st.composite +def digital_circuits( + draw: Callable[[SearchStrategy[Any]], Any], + n_qubits: SearchStrategy[int] = N_QUBITS_STRATEGY, + depth: SearchStrategy[int] = CIRCUIT_DEPTH_STRATEGY, +) -> QuantumCircuit: + block = draw(rand_digital_blocks(digital_gateset)(n_qubits, depth)) + total_qubits = max(block.qubit_support) + 1 + return QuantumCircuit(total_qubits, block) + + +@st.composite +def restricted_circuits( + draw: Callable[[SearchStrategy[Any]], Any], + n_qubits: SearchStrategy[int] = N_QUBITS_STRATEGY, + depth: SearchStrategy[int] = CIRCUIT_DEPTH_STRATEGY, +) -> QuantumCircuit: + block = draw(rand_digital_blocks(minimal_gateset)(n_qubits, depth)) + total_qubits = max(block.qubit_support) + 1 + return QuantumCircuit(total_qubits, block) + + +# A strategy to generate both a circuit and a batch of values for each FeatureParameter. +@st.composite +def batched_digital_circuits( + draw: Callable[[SearchStrategy[Any]], Any], + n_qubits: SearchStrategy[int] = N_QUBITS_STRATEGY, + depth: SearchStrategy[int] = CIRCUIT_DEPTH_STRATEGY, + batch_size: SearchStrategy[int] = BATCH_SIZE_STRATEGY, +) -> tuple[QuantumCircuit, dict[str, Tensor]]: + circuit = draw(digital_circuits(n_qubits, depth)) + b_size = draw(batch_size) + inputs = rand_featureparameters(circuit, b_size) + return circuit, inputs + + +@st.composite +def restricted_batched_circuits( + draw: Callable[[SearchStrategy[Any]], Any], + n_qubits: SearchStrategy[int] = N_QUBITS_STRATEGY, + depth: SearchStrategy[int] = CIRCUIT_DEPTH_STRATEGY, + batch_size: SearchStrategy[int] = BATCH_SIZE_STRATEGY, +) -> tuple[QuantumCircuit, dict[str, Tensor]]: + circuit = draw(restricted_circuits(n_qubits, depth)) + b_size = draw(batch_size) + inputs = rand_featureparameters(circuit, b_size) + return circuit, inputs + + +# A strategy to generate random observables under the form +# of an add block of numerically scaled kron blocks. +@st.composite +def observables( + draw: Callable[[SearchStrategy[Any]], Any], + n_qubits: SearchStrategy[int] = N_QUBITS_STRATEGY, + depth: SearchStrategy[int] = CIRCUIT_DEPTH_STRATEGY, +) -> AbstractBlock: + total_qubits = draw(n_qubits) + add_layer = [] + qubit_indices = {0} + for _ in range(draw(depth)): + kron_layer = [] + for qubit in range(draw(st.integers(min_value=1, max_value=total_qubits))): + gate = draw(st.sampled_from(pauli_gateset)) + kron_layer.append(gate(qubit)) + scale = draw(st.floats(min_value=-10.0, max_value=10.0)) + kron_block = scale * kron(*kron_layer) + add_layer.append(kron_block) + scale_add: float = draw(st.floats(min_value=-10.0, max_value=10.0)) + add_block = scale_add * add(*add_layer) + return add_block diff --git a/tests/test_divergences.py b/tests/test_divergences.py new file mode 100644 index 000000000..a3b66cd33 --- /dev/null +++ b/tests/test_divergences.py @@ -0,0 +1,23 @@ +from __future__ import annotations + +from collections import Counter + +import numpy as np +import pytest + +from qadence.divergences import js_divergence + + +@pytest.mark.parametrize( + "counter_p, counter_q, exp_js", + [ + ( + Counter({"00": 10, "01": 50, "10": 70, "11": 30}), + Counter({"00": 10, "01": 50, "10": 70, "11": 30}), + 0.0, + ), + (Counter({"00": 10, "01": 50}), Counter({"10": 70, "11": 30}), np.log(2.0)), + ], +) +def test_js_divergence_fixture(counter_p: Counter, counter_q: Counter, exp_js: float) -> None: + assert np.isclose(js_divergence(counter_p, counter_q), exp_js) diff --git a/tests/test_examples.py b/tests/test_examples.py new file mode 100644 index 000000000..d088fa777 --- /dev/null +++ b/tests/test_examples.py @@ -0,0 +1,53 @@ +"""Test examples scripts.""" +from __future__ import annotations + +import os +import subprocess +import sys +from pathlib import Path +from typing import Iterable + +import pytest + +expected_fail: dict = {} + + +def get_py_files(dir: Path) -> Iterable[Path]: + files = [] + + for it in dir.iterdir(): + if it.suffix == ".py": + files.append(it) + elif it.is_dir(): + files.extend(get_py_files(it)) + return files + + +examples_dir = Path(__file__).parent.parent.joinpath("examples").resolve() +assert examples_dir.exists() +examples = get_py_files(examples_dir) +example_names = [f"{example.relative_to(examples_dir)}" for example in examples] +for example, reason in expected_fail.items(): + try: + examples[example_names.index(example)] = pytest.param( # type: ignore + example, marks=pytest.mark.xfail(reason=reason) + ) + except ValueError: + pass + + +@pytest.mark.parametrize("example", examples, ids=example_names) +def test_example(example: Path) -> None: + """Execute and example as a test, passes if it returns 0.""" + cmd = [sys.executable, example] + with subprocess.Popen( + cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env={**os.environ} # type: ignore + ) as run_example: + stdout, stderr = run_example.communicate() + error_string = ( + f"Example {example.name} failed\n" + f"stdout:{stdout.decode()}\n" + f"stderr: {stderr.decode()}" + ) + if run_example.returncode != 0: + raise Exception(error_string) diff --git a/tests/test_execution.py b/tests/test_execution.py new file mode 100644 index 000000000..7fa898c3a --- /dev/null +++ b/tests/test_execution.py @@ -0,0 +1,103 @@ +from __future__ import annotations + +from collections import Counter + +import pytest +import strategies as st # type: ignore +from hypothesis import given, settings +from metrics import JS_ACCEPTANCE # type: ignore +from torch import Tensor, allclose, rand + +from qadence import RX, QuantumCircuit, Z, expectation, run, sample, total_magnetization +from qadence.backend import BackendName +from qadence.blocks import AbstractBlock +from qadence.divergences import js_divergence +from qadence.register import Register +from qadence.states import equivalent_state +from qadence.types import DiffMode + +BACKENDS = [BackendName.PYQTORCH, BackendName.BRAKET] + + +@pytest.mark.parametrize("backend", list(BACKENDS)) +@given(st.restricted_batched_circuits()) +@settings(deadline=None) +def test_run(backend: BackendName, circ_and_vals: tuple[QuantumCircuit, dict[str, Tensor]]) -> None: + circ, inputs = circ_and_vals + reg = Register(circ.n_qubits) + wf = run(circ, values=inputs, backend=backend) # type: ignore[arg-type] + wf = run(reg, circ.block, values=inputs, backend=backend) # type: ignore[arg-type] + wf = run(circ.block, values=inputs, backend=backend) # type: ignore[arg-type] + assert isinstance(wf, Tensor) + + +@pytest.mark.parametrize("backend", list(BACKENDS)) +@given(st.restricted_batched_circuits()) +@settings(deadline=None) +def test_sample( + backend: BackendName, circ_and_vals: tuple[QuantumCircuit, dict[str, Tensor]] +) -> None: + circ, inputs = circ_and_vals + reg = Register(circ.n_qubits) + samples = sample(circ, values=inputs, backend=backend) + samples = sample(reg, circ.block, values=inputs, backend=backend) + samples = sample(circ.block, values=inputs, backend=backend) + assert all([isinstance(s, Counter) for s in samples]) + + +@pytest.mark.parametrize("diff_mode", list(DiffMode) + [None]) +@pytest.mark.parametrize("backend", list(BACKENDS)) +@given(st.restricted_batched_circuits()) +@settings(deadline=None) +def test_expectation( + diff_mode: DiffMode, + backend: BackendName, + circ_and_vals: tuple[QuantumCircuit, dict[str, Tensor]], +) -> None: + if diff_mode == "ad" and backend != "pyqtorch": + pytest.skip(f"Backend {backend} doesnt support diff_mode={diff_mode}.") + circ, inputs = circ_and_vals + reg = Register(circ.n_qubits) + obs = total_magnetization(reg.n_qubits) + x = expectation( + circ, obs, values=inputs, backend=backend, diff_mode=diff_mode + ) # type: ignore[call-arg] + x = expectation( + reg, circ.block, obs, values=inputs, backend=backend, diff_mode=diff_mode # type: ignore + ) + x = expectation( + circ.block, obs, values=inputs, backend=backend, diff_mode=diff_mode + ) # type: ignore[call-arg] + if inputs: + assert x.size(0) == len(inputs[list(inputs.keys())[0]]) + else: + assert x.size(0) == 1 + + +@pytest.mark.parametrize("backend", BACKENDS) +def test_single_qubit_block( + backend: BackendName, block: AbstractBlock = RX(2, rand(1).item()) +) -> None: + run(block, values={}, backend=backend) # type: ignore[arg-type] + sample(block, values={}, backend=backend) # type: ignore[arg-type] + expectation(block, Z(0), values={}, backend=backend) # type: ignore[arg-type] + + +@given(st.batched_digital_circuits()) +def test_singlequbit_comp(circ_and_vals: tuple[QuantumCircuit, dict[str, Tensor]]) -> None: + circ, inputs = circ_and_vals + wf_0 = run(circ, values=inputs) # type: ignore[arg-type] + samples_0 = sample(circ, values=inputs) # type: ignore[arg-type] + expectation_0 = expectation(circ, Z(0), values=inputs) # type: ignore[arg-type] + + # diffmode = "ad" makes pyq compose single qubit ops if possible + + wf_1 = run(circ, values=inputs) # type: ignore[arg-type] + samples_1 = sample(circ, values=inputs) # type: ignore[arg-type] + expectation_1 = expectation(circ, Z(0), values=inputs, diff_mode="ad") # type: ignore[arg-type] + + assert equivalent_state(wf_0, wf_1) + assert allclose(expectation_0, expectation_1) + + for sample0, sample1 in zip(samples_0, samples_1): + assert js_divergence(sample0, sample1) < JS_ACCEPTANCE diff --git a/tests/test_files/chem_circ.json b/tests/test_files/chem_circ.json new file mode 100644 index 000000000..cf83d7157 --- /dev/null +++ b/tests/test_files/chem_circ.json @@ -0,0 +1 @@ +{"block":{"type":"ChainBlock","qubit_support":[0,1],"tag":null,"blocks":[{"type":"KronBlock","qubit_support":[0],"tag":null,"blocks":[{"type":"X","qubit_support":[0],"tag":null}]},{"type":"ChainBlock","qubit_support":[0,1],"tag":null,"blocks":[{"type":"ChainBlock","qubit_support":[0,1],"tag":null,"blocks":[{"type":"ChainBlock","qubit_support":[0,1],"tag":null,"blocks":[{"type":"RZ","qubit_support":[1],"tag":null,"parameters":{"_name_dict":{"parameter":["9eedc746-5f67-41e7-b559-3e755ace54f2",{"name":"-1.57079632679490","expression":"Float('-1.5707963267948966',precision=53)"}]}}},{"type":"ChainBlock","qubit_support":[0,1],"tag":null,"blocks":[{"type":"H","qubit_support":[0],"tag":null},{"type":"H","qubit_support":[1],"tag":null},{"type":"CNOT","qubit_support":[0,1],"tag":null,"blocks":[{"type":"X","qubit_support":[1],"tag":null}]},{"type":"RZ","qubit_support":[1],"tag":null,"parameters":{"_name_dict":{"parameter":["62b9d643-abc9-434f-8da2-6a8e1691bbd1",{"name":"2.0*theta_0","expression":"Mul(Float('2.0',precision=53),Parameter('theta_0'))","symbols":{"theta_0":{"name":"theta_0","trainable":"True","value":"0.5186768347707205"}}}]}}},{"type":"CNOT","qubit_support":[0,1],"tag":null,"blocks":[{"type":"X","qubit_support":[1],"tag":null}]},{"type":"H","qubit_support":[1],"tag":null},{"type":"H","qubit_support":[0],"tag":null}]},{"type":"ChainBlock","qubit_support":[0,1],"tag":null,"blocks":[{"type":"RX","qubit_support":[0],"tag":null,"parameters":{"_name_dict":{"parameter":["ccfbc8a4-ac54-4c6b-a3eb-f2859d164565",{"name":"1.57079632679490","expression":"Float('1.5707963267948966',precision=53)"}]}}},{"type":"RX","qubit_support":[1],"tag":null,"parameters":{"_name_dict":{"parameter":["407a3a68-62fd-42a6-ad5a-857c771b6583",{"name":"1.57079632679490","expression":"Float('1.5707963267948966',precision=53)"}]}}},{"type":"CNOT","qubit_support":[0,1],"tag":null,"blocks":[{"type":"X","qubit_support":[1],"tag":null}]},{"type":"RZ","qubit_support":[1],"tag":null,"parameters":{"_name_dict":{"parameter":["4a8eff69-a55c-42fd-b4f6-d2c4bdee2620",{"name":"2.0*theta_0","expression":"Mul(Float('2.0',precision=53),Parameter('theta_0'))","symbols":{"theta_0":{"name":"theta_0","trainable":"True","value":"0.5186768347707205"}}}]}}},{"type":"CNOT","qubit_support":[0,1],"tag":null,"blocks":[{"type":"X","qubit_support":[1],"tag":null}]},{"type":"RX","qubit_support":[1],"tag":null,"parameters":{"_name_dict":{"parameter":["ab2f5fc8-4183-4cdf-ba88-480b80163d57",{"name":"-1.57079632679490","expression":"Float('-1.5707963267948966',precision=53)"}]}}},{"type":"RX","qubit_support":[0],"tag":null,"parameters":{"_name_dict":{"parameter":["e613fff8-e449-4dc1-9da8-7cb549a1604e",{"name":"-1.57079632679490","expression":"Float('-1.5707963267948966',precision=53)"}]}}}]},{"type":"RZ","qubit_support":[1],"tag":null,"parameters":{"_name_dict":{"parameter":["4d400191-c462-4acb-9ab9-9500676dd2cc",{"name":"1.57079632679490","expression":"Float('1.5707963267948966',precision=53)"}]}}}]},{"type":"SWAP","qubit_support":[0,1],"tag":null}]}]}]},"register":{"graph":{"directed":false,"multigraph":false,"graph":{},"nodes":[{"pos":[0.8627152252841357,0.9999999999999998],"id":0},{"pos":[-0.8627152252841359,-1],"id":1}],"links":[{"source":0,"target":1}]}}} diff --git a/tests/test_files/chem_ham.json b/tests/test_files/chem_ham.json new file mode 100644 index 000000000..7784b1ae0 --- /dev/null +++ b/tests/test_files/chem_ham.json @@ -0,0 +1 @@ +{"type": "AddBlock", "qubit_support": [0, 1], "tag": null, "blocks": [{"type": "ScaleBlock", "tag": null, "parameters": {"_name_dict": {"parameter": ["9f80ec01-88c0-4521-a3ce-38f930258063", {"name": "0.304794814853385", "expression": "Float('0.30479481485338472', precision=53)"}]}}, "block": {"type": "I", "qubit_support": [1], "tag": null}}, {"type": "ScaleBlock", "tag": null, "parameters": {"_name_dict": {"parameter": ["ea9956b2-3e5a-4d43-9fa1-25a4efe07711", {"name": "0.355425749302799", "expression": "Float('0.35542574930279891', precision=53)"}]}}, "block": {"type": "Z", "qubit_support": [0], "tag": null}}, {"type": "ScaleBlock", "tag": null, "parameters": {"_name_dict": {"parameter": ["fe4a4024-34c9-4dc9-8635-4ba00afd1161", {"name": "-0.485485610262810", "expression": "Float('-0.48548561026281017', precision=53)"}]}}, "block": {"type": "Z", "qubit_support": [1], "tag": null}}, {"type": "ScaleBlock", "tag": null, "parameters": {"_name_dict": {"parameter": ["8d1738b5-d8ac-4b69-bb1d-002ed829bd86", {"name": "0.0895002880307033", "expression": "Float('0.089500288030703312', precision=53)"}]}}, "block": {"type": "KronBlock", "qubit_support": [0, 1], "tag": null, "blocks": [{"type": "X", "qubit_support": [0], "tag": null}, {"type": "X", "qubit_support": [1], "tag": null}]}}, {"type": "ScaleBlock", "tag": null, "parameters": {"_name_dict": {"parameter": ["790e2dba-35a6-42f9-90a1-be355825f4ad", {"name": "0.0895002880307033", "expression": "Float('0.089500288030703312', precision=53)"}]}}, "block": {"type": "KronBlock", "qubit_support": [0, 1], "tag": null, "blocks": [{"type": "Y", "qubit_support": [0], "tag": null}, {"type": "Y", "qubit_support": [1], "tag": null}]}}, {"type": "ScaleBlock", "tag": null, "parameters": {"_name_dict": {"parameter": ["b537d0ce-1e4d-4c9f-9175-3f80f1cdedd2", {"name": "0.581232490278055", "expression": "Float('0.58123249027805524', precision=53)"}]}}, "block": {"type": "KronBlock", "qubit_support": [0, 1], "tag": null, "blocks": [{"type": "Z", "qubit_support": [0], "tag": null}, {"type": "Z", "qubit_support": [1], "tag": null}]}}]} diff --git a/tests/test_files/h4.json b/tests/test_files/h4.json new file mode 100644 index 000000000..8d908ca76 --- /dev/null +++ b/tests/test_files/h4.json @@ -0,0 +1 @@ +{"type": "AddBlock", "qubit_support": [0, 1, 2, 3], "tag": null, "blocks": [{"type": "ScaleBlock", "tag": null, "parameters": {"_name_dict": {"parameter": ["90afc190-ba10-4fab-8227-9341b2548962", {"name": "-0.332610304343429", "expression": "Float('-0.33261030434342942', precision=53)", "symbols": {}}]}}, "block": {"type": "I", "qubit_support": [3], "tag": null}}, {"type": "ScaleBlock", "tag": null, "parameters": {"_name_dict": {"parameter": ["7b70b37a-6695-4be0-a1a1-778e9badc422", {"name": "0.280951381024097", "expression": "Float('0.28095138102409667', precision=53)", "symbols": {}}]}}, "block": {"type": "Z", "qubit_support": [0], "tag": null}}, {"type": "ScaleBlock", "tag": null, "parameters": {"_name_dict": {"parameter": ["d2fefea5-8c5e-4a82-b121-23062f82c119", {"name": "0.0879669847048241", "expression": "Float('0.087966984704824114', precision=53)", "symbols": {}}]}}, "block": {"type": "Z", "qubit_support": [1], "tag": null}}, {"type": "ScaleBlock", "tag": null, "parameters": {"_name_dict": {"parameter": ["d43a89e8-405c-470e-99e5-e45707867feb", {"name": "0.0581275320456030", "expression": "Float('0.058127532045602975', precision=53)", "symbols": {}}]}}, "block": {"type": "Z", "qubit_support": [2], "tag": null}}, {"type": "ScaleBlock", "tag": null, "parameters": {"_name_dict": {"parameter": ["f170eca4-d488-40de-ba0a-820b7fe5b148", {"name": "-0.296294546523882", "expression": "Float('-0.29629454652388221', precision=53)", "symbols": {}}]}}, "block": {"type": "Z", "qubit_support": [3], "tag": null}}, {"type": "ScaleBlock", "tag": null, "parameters": {"_name_dict": {"parameter": ["a7eb8872-776b-433b-bd60-7b5d6f61d145", {"name": "0.0155100358591431", "expression": "Float('0.015510035859143123', precision=53)", "symbols": {}}]}}, "block": {"type": "KronBlock", "qubit_support": [0, 1], "tag": null, "blocks": [{"type": "X", "qubit_support": [0], "tag": null}, {"type": "X", "qubit_support": [1], "tag": null}]}}, {"type": "ScaleBlock", "tag": null, "parameters": {"_name_dict": {"parameter": ["1f7e5eae-f3c6-40fc-8bd8-9ccdbe375d94", {"name": "0.0155100358591431", "expression": "Float('0.015510035859143123', precision=53)", "symbols": {}}]}}, "block": {"type": "KronBlock", "qubit_support": [0, 1], "tag": null, "blocks": [{"type": "Y", "qubit_support": [0], "tag": null}, {"type": "Y", "qubit_support": [1], "tag": null}]}}, {"type": "ScaleBlock", "tag": null, "parameters": {"_name_dict": {"parameter": ["dedf0229-0771-4654-a45b-d61e1c4354aa", {"name": "0.0171189323127432", "expression": "Float('0.017118932312743168', precision=53)", "symbols": {}}]}}, "block": {"type": "KronBlock", "qubit_support": [0, 2], "tag": null, "blocks": [{"type": "X", "qubit_support": [0], "tag": null}, {"type": "X", "qubit_support": [2], "tag": null}]}}, {"type": "ScaleBlock", "tag": null, "parameters": {"_name_dict": {"parameter": ["988b6183-45d4-41a3-a88a-a64a7e0304d0", {"name": "0.0171189323127432", "expression": "Float('0.017118932312743168', precision=53)", "symbols": {}}]}}, "block": {"type": "KronBlock", "qubit_support": [0, 2], "tag": null, "blocks": [{"type": "Y", "qubit_support": [0], "tag": null}, {"type": "Y", "qubit_support": [2], "tag": null}]}}, {"type": "ScaleBlock", "tag": null, "parameters": {"_name_dict": {"parameter": ["41f66c3f-39ae-447c-8b5a-55c84cacc8c4", {"name": "0.0872203221441241", "expression": "Float('0.087220322144124091', precision=53)", "symbols": {}}]}}, "block": {"type": "KronBlock", "qubit_support": [0, 3], "tag": null, "blocks": [{"type": "X", "qubit_support": [0], "tag": null}, {"type": "X", "qubit_support": [3], "tag": null}]}}, {"type": "ScaleBlock", "tag": null, "parameters": {"_name_dict": {"parameter": ["2ff67502-e0e7-4839-9033-e7bb6301cfc1", {"name": "0.0872203221441241", "expression": "Float('0.087220322144124091', precision=53)", "symbols": {}}]}}, "block": {"type": "KronBlock", "qubit_support": [0, 3], "tag": null, "blocks": [{"type": "Y", "qubit_support": [0], "tag": null}, {"type": "Y", "qubit_support": [3], "tag": null}]}}, {"type": "ScaleBlock", "tag": null, "parameters": {"_name_dict": {"parameter": ["c09c42a5-5bb0-4a83-8eff-fa001b8e0933", {"name": "0.230929426685301", "expression": "Float('0.23092942668530098', precision=53)", "symbols": {}}]}}, "block": {"type": "KronBlock", "qubit_support": [0, 1], "tag": null, "blocks": [{"type": "Z", "qubit_support": [0], "tag": null}, {"type": "Z", "qubit_support": [1], "tag": null}]}}, {"type": "ScaleBlock", "tag": null, "parameters": {"_name_dict": {"parameter": ["630c4df8-2890-4582-88bc-c2a343514d20", {"name": "0.233236271858933", "expression": "Float('0.23323627185893336', precision=53)", "symbols": {}}]}}, "block": {"type": "KronBlock", "qubit_support": [0, 2], "tag": null, "blocks": [{"type": "Z", "qubit_support": [0], "tag": null}, {"type": "Z", "qubit_support": [2], "tag": null}]}}, {"type": "ScaleBlock", "tag": null, "parameters": {"_name_dict": {"parameter": ["8321c0d3-5673-4469-bedd-4fc9e406ca4d", {"name": "0.496824905495945", "expression": "Float('0.49682490549594505', precision=53)", "symbols": {}}]}}, "block": {"type": "KronBlock", "qubit_support": [0, 3], "tag": null, "blocks": [{"type": "Z", "qubit_support": [0], "tag": null}, {"type": "Z", "qubit_support": [3], "tag": null}]}}, {"type": "ScaleBlock", "tag": null, "parameters": {"_name_dict": {"parameter": ["edc165d8-91a3-48c7-83bf-88cb3b7413e7", {"name": "0.147781314033260", "expression": "Float('0.14778131403326014', precision=53)", "symbols": {}}]}}, "block": {"type": "KronBlock", "qubit_support": [1, 2], "tag": null, "blocks": [{"type": "X", "qubit_support": [1], "tag": null}, {"type": "X", "qubit_support": [2], "tag": null}]}}, {"type": "ScaleBlock", "tag": null, "parameters": {"_name_dict": {"parameter": ["e70373b9-edbd-4c24-9e67-fc5341ec5ee7", {"name": "0.147781314033260", "expression": "Float('0.14778131403326014', precision=53)", "symbols": {}}]}}, "block": {"type": "KronBlock", "qubit_support": [1, 2], "tag": null, "blocks": [{"type": "Y", "qubit_support": [1], "tag": null}, {"type": "Y", "qubit_support": [2], "tag": null}]}}, {"type": "ScaleBlock", "tag": null, "parameters": {"_name_dict": {"parameter": ["b6607105-3378-47c9-a875-123cce95bf36", {"name": "0.0182705886877766", "expression": "Float('0.018270588687776628', precision=53)", "symbols": {}}]}}, "block": {"type": "KronBlock", "qubit_support": [1, 3], "tag": null, "blocks": [{"type": "X", "qubit_support": [1], "tag": null}, {"type": "X", "qubit_support": [3], "tag": null}]}}, {"type": "ScaleBlock", "tag": null, "parameters": {"_name_dict": {"parameter": ["57d69802-9ab9-4634-aec9-eda158e6a374", {"name": "0.0182705886877766", "expression": "Float('0.018270588687776628', precision=53)", "symbols": {}}]}}, "block": {"type": "KronBlock", "qubit_support": [1, 3], "tag": null, "blocks": [{"type": "Y", "qubit_support": [1], "tag": null}, {"type": "Y", "qubit_support": [3], "tag": null}]}}, {"type": "ScaleBlock", "tag": null, "parameters": {"_name_dict": {"parameter": ["7646e1de-9b79-4b8c-90ef-407bfa7024fc", {"name": "0.271080192734930", "expression": "Float('0.27108019273492961', precision=53)", "symbols": {}}]}}, "block": {"type": "KronBlock", "qubit_support": [1, 2], "tag": null, "blocks": [{"type": "Z", "qubit_support": [1], "tag": null}, {"type": "Z", "qubit_support": [2], "tag": null}]}}, {"type": "ScaleBlock", "tag": null, "parameters": {"_name_dict": {"parameter": ["8b45b9b4-4c1d-48f2-a6a6-41689e377c62", {"name": "0.242446525517855", "expression": "Float('0.24244652551785489', precision=53)", "symbols": {}}]}}, "block": {"type": "KronBlock", "qubit_support": [1, 3], "tag": null, "blocks": [{"type": "Z", "qubit_support": [1], "tag": null}, {"type": "Z", "qubit_support": [3], "tag": null}]}}, {"type": "ScaleBlock", "tag": null, "parameters": {"_name_dict": {"parameter": ["dadafe90-215d-4de4-8164-08c30e501426", {"name": "0.0182859747722903", "expression": "Float('0.018285974772290287', precision=53)", "symbols": {}}]}}, "block": {"type": "KronBlock", "qubit_support": [2, 3], "tag": null, "blocks": [{"type": "X", "qubit_support": [2], "tag": null}, {"type": "X", "qubit_support": [3], "tag": null}]}}, {"type": "ScaleBlock", "tag": null, "parameters": {"_name_dict": {"parameter": ["4d8f5aea-f10f-45e3-9fb1-67dbd49b20b9", {"name": "0.0182859747722903", "expression": "Float('0.018285974772290287', precision=53)", "symbols": {}}]}}, "block": {"type": "KronBlock", "qubit_support": [2, 3], "tag": null, "blocks": [{"type": "Y", "qubit_support": [2], "tag": null}, {"type": "Y", "qubit_support": [3], "tag": null}]}}, {"type": "ScaleBlock", "tag": null, "parameters": {"_name_dict": {"parameter": ["ed5b3270-a372-4a9c-971b-929ac7397a00", {"name": "0.244316302103824", "expression": "Float('0.24431630210382418', precision=53)", "symbols": {}}]}}, "block": {"type": "KronBlock", "qubit_support": [2, 3], "tag": null, "blocks": [{"type": "Z", "qubit_support": [2], "tag": null}, {"type": "Z", "qubit_support": [3], "tag": null}]}}]} diff --git a/tests/test_notebooks.py b/tests/test_notebooks.py new file mode 100644 index 000000000..9e84a5aeb --- /dev/null +++ b/tests/test_notebooks.py @@ -0,0 +1,56 @@ +"""Test examples scripts.""" +from __future__ import annotations + +import os +import subprocess +import sys +from glob import glob +from pathlib import Path +from typing import List + +import pytest + +expected_fail = {} # type: ignore + + +def get_ipynb_files(dir: Path) -> List[Path]: + files = [] + + for it in dir.iterdir(): + if it.suffix == ".ipynb": + files.append(it) + elif it.is_dir(): + files.extend(get_ipynb_files(it)) + return files + + +# FIXME: refactor choice of notebooks folders +docsdir = Path(__file__).parent.parent.joinpath("docs") +notebooks = [ + Path(nb).relative_to(docsdir.parent) for nb in glob(str(docsdir / "**/*.ipynb"), recursive=True) +] +for example, reason in expected_fail.items(): + try: + notebooks[notebooks.index(Path(example))] = pytest.param( # type: ignore + example, marks=pytest.mark.xfail(reason=reason) + ) + except ValueError: + pass + + +@pytest.mark.parametrize("notebook", notebooks, ids=map(str, notebooks)) +def test_notebooks(notebook: Path) -> None: + """Execute docs notebooks as a test, passes if it returns 0.""" + jupyter_cmd = ["-m", "jupyter", "nbconvert", "--to", "python", "--execute"] + path = str(notebook) + cmd = [sys.executable, *jupyter_cmd, path] + with subprocess.Popen( + cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env={**os.environ} # type: ignore + ) as run_example: + stdout, stderr = run_example.communicate() + error_string = ( + f"Notebook {path} failed\n" f"stdout:{stdout.decode()}\n" f"stderr: {stderr.decode()}" + ) + + if run_example.returncode != 0: + raise Exception(error_string) diff --git a/tests/test_serialize.py b/tests/test_serialize.py new file mode 100644 index 000000000..1a87c471d --- /dev/null +++ b/tests/test_serialize.py @@ -0,0 +1,93 @@ +from __future__ import annotations + +from pathlib import Path + +import torch +from sympy import Expr + +from qadence import QuantumCircuit +from qadence.blocks import AbstractBlock, KronBlock +from qadence.ml_tools.models import TransformedModule +from qadence.ml_tools.utils import rand_featureparameters +from qadence.models import QNN, QuantumModel +from qadence.register import Register +from qadence.serialization import ( + FORMAT_DICT, + SerializationFormat, + deserialize, + load, + save, + serialize, +) + + +def test_non_module_serialization( + tmp_path: Path, + BasicQuantumCircuit: QuantumCircuit, + BasicExpression: Expr, + BasicRegister: Register, + BasicFeatureMap: KronBlock, + BasicObservable: AbstractBlock, +) -> None: + for obj in [ + BasicQuantumCircuit, + BasicFeatureMap, + BasicExpression, + BasicRegister, + BasicObservable, + ]: + assert obj == deserialize(serialize(obj)) + save(obj, tmp_path, "obj") + loaded_obj = load(tmp_path / Path("obj.json")) + assert obj == loaded_obj + + +def test_qm_serialization(tmp_path: Path, BasicQuantumModel: QuantumModel) -> None: + _m = BasicQuantumModel + inputs = rand_featureparameters(_m, 1) + for save_params in [True, False]: + exp = _m.expectation(inputs) + d = serialize(_m, save_params) + qm_ser = deserialize(d, save_params) # type: ignore[assignment] + exp_ser = qm_ser.expectation(inputs) # type: ignore[union-attr] + assert torch.isclose(exp, exp_ser) # type: ignore[union-attr] + for FORMAT in SerializationFormat: + save(_m, tmp_path, "obj", FORMAT) + suffix, _, _, _ = FORMAT_DICT[FORMAT] + qm = load(tmp_path / Path("obj" + suffix)) + exp_l = qm.expectation(inputs) # type: ignore[union-attr] + assert torch.isclose(exp, exp_l) + + +def test_qnn_serialization(tmp_path: Path, BasicQNN: QNN) -> None: + _m = BasicQNN + inputs = rand_featureparameters(_m, 1) + for save_params in [True, False]: + exp = _m.expectation(inputs) + d = serialize(_m, save_params) + qm_ser = deserialize(d, save_params) # type: ignore[assignment] + exp_ser = qm_ser.expectation(inputs) # type: ignore[union-attr] + assert torch.isclose(exp, exp_ser) # type: ignore[union-attr] + for FORMAT in SerializationFormat: + save(_m, tmp_path, "obj", FORMAT) + suffix, _, _, _ = FORMAT_DICT[FORMAT] + qm = load(tmp_path / Path("obj" + suffix)) + exp_l = qm.expectation(inputs) # type: ignore[union-attr] + assert torch.isclose(exp, exp_l) + + +def test_tm_serialization(tmp_path: Path, BasicTransformedModule: TransformedModule) -> None: + _m = BasicTransformedModule + inputs = rand_featureparameters(_m, 1) + for save_params in [True, False]: + exp = _m.expectation(inputs) + d = serialize(_m, save_params) + qm_ser = deserialize(d, save_params) # type: ignore[assignment] + exp_ser = qm_ser.expectation(inputs) # type: ignore[union-attr] + assert torch.isclose(exp, exp_ser) # type: ignore[union-attr] + for FORMAT in SerializationFormat: + save(_m, tmp_path, "obj", FORMAT) + suffix, _, _, _ = FORMAT_DICT[FORMAT] + qm = load(tmp_path / Path("obj" + suffix)) + exp_l = qm.expectation(inputs) # type: ignore[union-attr] + assert torch.isclose(exp, exp_l)