From e9703d6170c2b99ad7ccb01fef0ceeed20128e9d Mon Sep 17 00:00:00 2001 From: QuantumJaeYoo Date: Tue, 20 Jun 2023 07:10:14 +0000 Subject: [PATCH] Add python layer for cuquantum grad + sim ops --- .../core/ops/circuit_execution_ops.py | 123 ++++- .../core/ops/circuit_execution_ops_test.py | 95 +++- .../python/differentiators/BUILD | 8 +- .../python/differentiators/adjoint.py | 81 +++- .../python/differentiators/adjoint_test.py | 51 +- .../python/differentiators/differentiator.py | 71 ++- .../differentiators/differentiator_test.py | 20 + .../python/differentiators/gradient_test.py | 208 ++++++-- .../layers/circuit_executors/expectation.py | 47 +- .../circuit_executors/expectation_test.py | 172 +++++-- .../python/layers/circuit_executors/sample.py | 34 +- .../layers/circuit_executors/sample_test.py | 68 ++- .../circuit_executors/sampled_expectation.py | 51 +- .../sampled_expectation_test.py | 445 ++++++++++++------ .../python/layers/circuit_executors/state.py | 30 +- .../layers/circuit_executors/state_test.py | 44 +- 16 files changed, 1182 insertions(+), 366 deletions(-) diff --git a/tensorflow_quantum/core/ops/circuit_execution_ops.py b/tensorflow_quantum/core/ops/circuit_execution_ops.py index b2cc1ce70..e2dd49aa2 100644 --- a/tensorflow_quantum/core/ops/circuit_execution_ops.py +++ b/tensorflow_quantum/core/ops/circuit_execution_ops.py @@ -21,6 +21,30 @@ tfq_utility_ops) from tensorflow_quantum.python import quantum_context +try: + from tensorflow_quantum.core.ops import tfq_simulate_ops_cuquantum + _ENABLE_USE_CUQUANTUM = True +except: + # `_ENABLE_USE_CUQUANTUM = False` makes `use_cuquantum` silent. + _ENABLE_USE_CUQUANTUM = False + tfq_simulate_ops_cuquantum = tfq_simulate_ops + + +def is_gpu_configured() -> bool: + """Returns True if gpu ops are available or not.""" + return _ENABLE_USE_CUQUANTUM + + +def _preprocess_use_cuquantum(use_cuquantum: bool) -> bool: + if is_gpu_configured(): + return use_cuquantum + + # GPU is not set. `use_cuquantum` becomes silent. + if use_cuquantum: + print("WARNING: cuQuantum was not set, " + "`use_cuquantum=True` option becomes effectless. Using CPU.") + return False + class TFQStateVectorSimulator(enum.Enum): """Enum to make specifying TFQ simulators user-friendly.""" @@ -29,17 +53,32 @@ class TFQStateVectorSimulator(enum.Enum): state = tfq_simulate_ops.tfq_simulate_state sampled_expectation = tfq_simulate_ops.tfq_simulate_sampled_expectation + expectation_cuquantum = tfq_simulate_ops_cuquantum.tfq_simulate_expectation + samples_cuquantum = tfq_simulate_ops_cuquantum.tfq_simulate_samples + state_cuquantum = tfq_simulate_ops_cuquantum.tfq_simulate_state + sampled_expectation_cuquantum = ( + tfq_simulate_ops_cuquantum.tfq_simulate_sampled_expectation) -def _check_quantum_concurrent(quantum_concurrent): + + +def _check_quantum_concurrent(quantum_concurrent, use_cuquantum): if not isinstance(quantum_concurrent, bool): raise TypeError("quantum_concurrent must be type bool." " Given: {}".format(str(type(quantum_concurrent)))) + if not isinstance(use_cuquantum, bool): + raise TypeError("use_cuquantum must be type bool." + " Given: {}".format(str(type(use_cuquantum)))) + if use_cuquantum is True and quantum_concurrent is True: + raise ValueError("use_cuquantum and quantum_concurrent should " + "not be True at the same time. Please set False to " + "quantum_concurrent.") def get_expectation_op( backend=None, *, - quantum_concurrent=quantum_context.get_quantum_concurrent_op_mode()): + quantum_concurrent=quantum_context.get_quantum_concurrent_op_mode(), + use_cuquantum=False): """Get a TensorFlow op that will calculate batches of expectation values. This function produces a non-differentiable TF op that will calculate @@ -80,8 +119,8 @@ def get_expectation_op( backend: Optional Python `object` that specifies what backend this op should use when evaluating circuits. Can be `cirq.DensityMatrixSimulator` or any - `cirq.sim.simulator.SimulatesExpectationValues`. If not provided the - default C++ analytical expectation calculation op is returned. + `cirq.sim.simulator.SimulatesExpectationValues`. If not provided + the default C++ analytical expectation calculation op is returned. quantum_concurrent: Optional Python `bool`. True indicates that the returned op should not block graph level parallelism on itself when executing. False indicates that graph level parallelism on itself @@ -90,6 +129,8 @@ def get_expectation_op( (no blocking). This flag is only needed for advanced users when using TFQ for very large simulations, or when running on a real chip. + use_cuquantum: Set True to turn on TFQ cuQuantum version op, which + requires `quantum_concurrent` to be False. Returns: A `callable` with the following signature: @@ -115,21 +156,28 @@ def get_expectation_op( expectation value for each circuit with each op applied to it (after resolving the corresponding parameters in). """ - # TODO (mbbrough): investigate how the above docstring renders. - _check_quantum_concurrent(quantum_concurrent) + _check_quantum_concurrent(quantum_concurrent, use_cuquantum) + use_cuquantum = _preprocess_use_cuquantum(use_cuquantum) op = None if backend is None: - op = TFQStateVectorSimulator.expectation + if use_cuquantum: + op = TFQStateVectorSimulator.expectation_cuquantum + else: + op = TFQStateVectorSimulator.expectation # TODO(zaqqwerty): remove DM check after cirq #3964 if isinstance(backend, (cirq.sim.simulator.SimulatesExpectationValues, cirq.DensityMatrixSimulator)): + if use_cuquantum: + raise ValueError( + "use_cuquantum is not supported for cirq simulator. Please " + "set use_cuquantum to False.") op = cirq_ops._get_cirq_analytical_expectation(backend) if op is not None: - if quantum_concurrent is True: + if use_cuquantum is False and quantum_concurrent is True: # Return an op that does not block graph level parallelism. return lambda programs, symbol_names, symbol_values, pauli_sums: \ op(programs, symbol_names, symbol_values, pauli_sums) @@ -152,7 +200,8 @@ def get_expectation_op( def get_sampling_op( backend=None, *, - quantum_concurrent=quantum_context.get_quantum_concurrent_op_mode()): + quantum_concurrent=quantum_context.get_quantum_concurrent_op_mode(), + use_cuquantum=False): """Get a Tensorflow op that produces samples from given quantum circuits. This function produces a non-differentiable op that will calculate @@ -190,6 +239,8 @@ def get_sampling_op( (no blocking). This flag is only needed for advanced users when using TFQ for very large simulations, or when running on a real chip. + use_cuquantum: Set True to turn on TFQ cuQuantum version op, which + requires `quantum_concurrent` to be False. Returns: A `callable` with the following signature: @@ -214,19 +265,26 @@ def get_sampling_op( [batch_size, num_samples, n_qubits] that holds samples (as boolean values) for each circuit. """ - # TODO (mbbrough): investigate how the above docstring renders. - _check_quantum_concurrent(quantum_concurrent) + _check_quantum_concurrent(quantum_concurrent, use_cuquantum) + use_cuquantum = _preprocess_use_cuquantum(use_cuquantum) op = None if backend is None: - op = TFQStateVectorSimulator.samples + if use_cuquantum: + op = TFQStateVectorSimulator.samples_cuquantum + else: + op = TFQStateVectorSimulator.samples if isinstance(backend, cirq.Sampler): + if use_cuquantum: + raise ValueError( + "use_cuquantum is not supported for cirq sampler. Please " + "set use_cuquantum to False.") op = cirq_ops._get_cirq_samples(backend) if op is not None: - if quantum_concurrent is True: + if use_cuquantum is False and quantum_concurrent is True: # Return an op that does not block graph level parallelism. return lambda programs, symbol_names, symbol_values, num_samples: \ tfq_utility_ops.padded_to_ragged( @@ -244,7 +302,8 @@ def get_sampling_op( def get_state_op( backend=None, *, - quantum_concurrent=quantum_context.get_quantum_concurrent_op_mode()): + quantum_concurrent=quantum_context.get_quantum_concurrent_op_mode(), + use_cuquantum=False): """Get a TensorFlow op that produces states from given quantum circuits. This function produces a non-differentiable op that will calculate @@ -282,6 +341,8 @@ def get_state_op( (no blocking). This flag is only needed for advanced users when using TFQ for very large simulations, or when running on a real chip. + use_cuquantum: Set True to turn on TFQ cuQuantum version op, which + requires `quantum_concurrent` to be False. Returns: A `callable` with the following signature: @@ -303,19 +364,26 @@ def get_state_op( `tf.Tensor` with shape [batch_size, size of state] that contains the state information of the circuit. """ - # TODO (mbbrough): investigate how the above docstring renders. - _check_quantum_concurrent(quantum_concurrent) + _check_quantum_concurrent(quantum_concurrent, use_cuquantum) + use_cuquantum = _preprocess_use_cuquantum(use_cuquantum) op = None if backend is None: - op = TFQStateVectorSimulator.state + if use_cuquantum: + op = TFQStateVectorSimulator.state_cuquantum + else: + op = TFQStateVectorSimulator.state if isinstance(backend, (cirq.SimulatesFinalState)): + if use_cuquantum: + raise ValueError( + "use_cuquantum is not supported for cirq simulator. Please " + "set use_cuquantum to False.") op = cirq_ops._get_cirq_simulate_state(backend) if op is not None: - if quantum_concurrent is True: + if use_cuquantum is False and quantum_concurrent is True: # Return an op that does not block graph level parallelism. return lambda programs, symbol_names, symbol_values: \ tfq_utility_ops.padded_to_ragged( @@ -334,7 +402,8 @@ def get_state_op( def get_sampled_expectation_op( backend=None, *, - quantum_concurrent=quantum_context.get_quantum_concurrent_op_mode()): + quantum_concurrent=quantum_context.get_quantum_concurrent_op_mode(), + use_cuquantum=False): """Get a TensorFlow op that will calculate sampled expectation values. This function produces a non-differentiable TF op that will calculate @@ -386,6 +455,8 @@ def get_sampled_expectation_op( (no blocking). This flag is only needed for advanced users when using TFQ for very large simulations, or when running on a real chip. + use_cuquantum: Set True to turn on TFQ cuQuantum version op, which + requires `quantum_concurrent` to be False. Returns: A `callable` with the following signature: @@ -416,17 +487,25 @@ def get_sampled_expectation_op( (after resolving the corresponding parameters in). """ # TODO (mbbrough): investigate how the above docstring renders. - _check_quantum_concurrent(quantum_concurrent) + _check_quantum_concurrent(quantum_concurrent, use_cuquantum) + use_cuquantum = _preprocess_use_cuquantum(use_cuquantum) op = None if backend is None: - op = TFQStateVectorSimulator.sampled_expectation + if use_cuquantum: + op = TFQStateVectorSimulator.sampled_expectation_cuquantum + else: + op = TFQStateVectorSimulator.sampled_expectation if isinstance(backend, cirq.Sampler): + if use_cuquantum: + raise ValueError( + "use_cuquantum is not supported for cirq sampler. Please " + "set use_cuquantum to False.") op = cirq_ops._get_cirq_sampled_expectation(backend) if op is not None: - if quantum_concurrent is True: + if use_cuquantum is False and quantum_concurrent is True: # Return an op that does not block graph level parallelism. return lambda programs, symbol_names, symbol_values, pauli_sums, \ num_samples: op(programs, diff --git a/tensorflow_quantum/core/ops/circuit_execution_ops_test.py b/tensorflow_quantum/core/ops/circuit_execution_ops_test.py index 08e4f5b6f..b89c85aa5 100644 --- a/tensorflow_quantum/core/ops/circuit_execution_ops_test.py +++ b/tensorflow_quantum/core/ops/circuit_execution_ops_test.py @@ -28,6 +28,7 @@ from scipy import stats import cirq import cirq_google +from cirq_google.engine.abstract_processor import AbstractProcessor from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops from tensorflow_quantum.python import util @@ -48,7 +49,11 @@ quantum_concurrent=True), # For timing interests C++ backend is tested in quantum_concurrent mode. circuit_execution_ops.get_expectation_op(backend=None, - quantum_concurrent=False) + quantum_concurrent=False), + # For cuQuantum op. quantum_concurrent=True is not allowed. + circuit_execution_ops.get_expectation_op(backend=None, + quantum_concurrent=False, + use_cuquantum=True) ] SAMPLING_OPS = [ @@ -60,7 +65,11 @@ quantum_concurrent=True), # For timing interests C++ backend is tested in quantum_concurrent mode. circuit_execution_ops.get_sampling_op(backend=None, - quantum_concurrent=False) + quantum_concurrent=False), + # For cuQuantum op. quantum_concurrent=True is not allowed. + circuit_execution_ops.get_sampling_op(backend=None, + quantum_concurrent=False, + use_cuquantum=True) ] STATE_OPS = [ @@ -68,8 +77,13 @@ circuit_execution_ops.get_state_op(backend=WF_SIM, quantum_concurrent=True), circuit_execution_ops.get_state_op(backend=DM_SIM, quantum_concurrent=True), # For timing interests C++ backend is tested in quantum_concurrent mode. - circuit_execution_ops.get_state_op(backend=None, quantum_concurrent=False) + circuit_execution_ops.get_state_op(backend=None, quantum_concurrent=False), + # For cuQuantum op. quantum_concurrent=True is not allowed. + circuit_execution_ops.get_state_op(backend=None, + quantum_concurrent=False, + use_cuquantum=True) ] +NO_DM_STATE_OPS = STATE_OPS[:2] + STATE_OPS[2:] SAMPLED_EXPECTATION_OPS = [ circuit_execution_ops.get_sampled_expectation_op(backend=None, @@ -81,9 +95,14 @@ # For timing interests C++ backend is tested in quantum_concurrent mode. circuit_execution_ops.get_sampled_expectation_op(backend=None, quantum_concurrent=False), + # For cuQuantum op. quantum_concurrent=True is not allowed. + circuit_execution_ops.get_sampled_expectation_op(backend=None, + quantum_concurrent=False, + use_cuquantum=True) ] -SIMS = [WF_SIM, WF_SIM, DM_SIM, WF_SIM] +SIMS = [WF_SIM, WF_SIM, DM_SIM, WF_SIM, WF_SIM] +NO_DM_SIMS = SIMS[:2] + SIMS[2:] class OpGetterInputChecks(tf.test.TestCase): @@ -98,11 +117,9 @@ def test_get_expectation_inputs(self): circuit_execution_ops.get_expectation_op() with self.assertRaisesRegex(NotImplementedError, expected_regex='Sample-based'): - mock_engine = mock.Mock() + mock_processor = mock.create_autospec(AbstractProcessor) circuit_execution_ops.get_expectation_op( - cirq_google.QuantumEngineSampler(engine=mock_engine, - processor_id='test', - gate_set=cirq_google.XMON)) + cirq_google.ProcessorSampler(processor=mock_processor)) with self.assertRaisesRegex( TypeError, expected_regex="cirq.sim.simulator.SimulatesExpectationValues"): @@ -112,6 +129,15 @@ def test_get_expectation_inputs(self): expected_regex="must be type bool."): circuit_execution_ops.get_expectation_op(quantum_concurrent='junk') + with self.assertRaisesRegex(TypeError, + expected_regex="must be type bool."): + circuit_execution_ops.get_expectation_op(use_cuquantum='junk') + + with self.assertRaisesRegex( + ValueError, expected_regex="not be True at the same time"): + circuit_execution_ops.get_expectation_op(quantum_concurrent=True, + use_cuquantum=True) + def test_get_sampled_expectation_inputs(self): """Test that get expectation only accepts inputs it should.""" circuit_execution_ops.get_sampled_expectation_op() @@ -119,11 +145,9 @@ def test_get_sampled_expectation_inputs(self): backend=cirq.Simulator()) circuit_execution_ops.get_sampled_expectation_op( backend=cirq.DensityMatrixSimulator()) - mock_engine = mock.Mock() + mock_processor = mock.create_autospec(AbstractProcessor) circuit_execution_ops.get_sampled_expectation_op( - cirq_google.QuantumEngineSampler(engine=mock_engine, - processor_id='test', - gate_set=cirq_google.XMON)) + cirq_google.ProcessorSampler(processor=mock_processor)) with self.assertRaisesRegex(TypeError, expected_regex="a Cirq.Sampler"): circuit_execution_ops.get_sampled_expectation_op(backend="junk") @@ -132,17 +156,25 @@ def test_get_sampled_expectation_inputs(self): circuit_execution_ops.get_sampled_expectation_op( quantum_concurrent='junk') + with self.assertRaisesRegex(TypeError, + expected_regex="must be type bool."): + circuit_execution_ops.get_sampled_expectation_op( + use_cuquantum='junk') + + with self.assertRaisesRegex( + ValueError, expected_regex="not be True at the same time"): + circuit_execution_ops.get_sampled_expectation_op( + quantum_concurrent=True, use_cuquantum=True) + def test_get_samples_inputs(self): """Test that get_samples only accepts inputs it should.""" circuit_execution_ops.get_sampling_op() circuit_execution_ops.get_sampling_op(backend=cirq.Simulator()) circuit_execution_ops.get_sampling_op( backend=cirq.DensityMatrixSimulator()) - mock_engine = mock.Mock() + mock_processor = mock.create_autospec(AbstractProcessor) circuit_execution_ops.get_sampling_op( - backend=cirq_google.QuantumEngineSampler(engine=mock_engine, - processor_id='test', - gate_set=cirq_google.XMON)) + backend=cirq_google.ProcessorSampler(processor=mock_processor)) with self.assertRaisesRegex(TypeError, expected_regex="Expected a Cirq.Sampler"): circuit_execution_ops.get_sampling_op(backend="junk") @@ -151,6 +183,15 @@ def test_get_samples_inputs(self): expected_regex="must be type bool."): circuit_execution_ops.get_sampling_op(quantum_concurrent='junk') + with self.assertRaisesRegex(TypeError, + expected_regex="must be type bool."): + circuit_execution_ops.get_sampling_op(use_cuquantum='junk') + + with self.assertRaisesRegex( + ValueError, expected_regex="not be True at the same time"): + circuit_execution_ops.get_sampling_op(quantum_concurrent=True, + use_cuquantum=True) + def test_get_state_inputs(self): """Test that get_states only accepts inputs it should.""" circuit_execution_ops.get_state_op() @@ -162,17 +203,23 @@ def test_get_state_inputs(self): circuit_execution_ops.get_state_op(backend="junk") with self.assertRaisesRegex(TypeError, expected_regex="Cirq.SimulatesFinalState"): - mock_engine = mock.Mock() + mock_processor = mock.create_autospec(AbstractProcessor) circuit_execution_ops.get_state_op( - backend=cirq_google.QuantumEngineSampler( - engine=mock_engine, - processor_id='test', - gate_set=cirq_google.XMON)) + backend=cirq_google.ProcessorSampler(processor=mock_processor)) with self.assertRaisesRegex(TypeError, expected_regex="must be type bool."): circuit_execution_ops.get_state_op(quantum_concurrent='junk') + with self.assertRaisesRegex(TypeError, + expected_regex="must be type bool."): + circuit_execution_ops.get_state_op(use_cuquantum='junk') + + with self.assertRaisesRegex( + ValueError, expected_regex="not be True at the same time"): + circuit_execution_ops.get_state_op(quantum_concurrent=True, + use_cuquantum=True) + class ExecutionOpsConsistentyTest(tf.test.TestCase, parameterized.TestCase): """Test all ops produce equivalent output to one another.""" @@ -277,9 +324,7 @@ def test_simulate_state_with_symbols(self, op_and_sim, n_qubits, util.kwargs_cartesian_product( **{ 'op_and_sim': [(op, sim) for ( - op, - sim) in zip(STATE_OPS[:-2] + - [STATE_OPS[-1]], SIMS[:-2] + [SIMS[-1]])], + op, sim) in zip(NO_DM_STATE_OPS, NO_DM_SIMS)], }))) def test_simulate_state_large(self, op_and_sim): """Test a reasonably large and complex circuit.""" @@ -287,7 +332,7 @@ def test_simulate_state_large(self, op_and_sim): symbol_names = [] circuit_batch, resolver_batch = \ util.random_circuit_resolver_batch( - cirq.GridQubit.rect(4, 4), 5) + cirq.GridQubit.rect(3, 3), 5) symbol_values_array = np.array( [[resolver[symbol] diff --git a/tensorflow_quantum/python/differentiators/BUILD b/tensorflow_quantum/python/differentiators/BUILD index 9e5f28aab..33103e4e7 100644 --- a/tensorflow_quantum/python/differentiators/BUILD +++ b/tensorflow_quantum/python/differentiators/BUILD @@ -1,3 +1,5 @@ +load("@local_config_cuda//cuda:build_defs.bzl", "if_cuda_is_configured") + package(default_visibility = ["//visibility:public"]) licenses(["notice"]) @@ -25,7 +27,9 @@ py_library( deps = [ ":differentiator", "//tensorflow_quantum/core/ops:tfq_adj_grad_op_py", - ], + ] + if_cuda_is_configured([ + "//tensorflow_quantum/core/ops:tfq_adj_grad_op_cuquantum_py", + ]), ) py_test( @@ -35,6 +39,7 @@ py_test( deps = [ ":adjoint", "//tensorflow_quantum/core/ops:circuit_execution_ops", + "//tensorflow_quantum/python:util", ], ) @@ -118,6 +123,7 @@ py_test( py_test( name = "gradient_test", timeout = "eternal", + shard_count = 5, srcs = ["gradient_test.py"], python_version = "PY3", deps = [ diff --git a/tensorflow_quantum/python/differentiators/adjoint.py b/tensorflow_quantum/python/differentiators/adjoint.py index 44b8e9da6..57ccd304b 100644 --- a/tensorflow_quantum/python/differentiators/adjoint.py +++ b/tensorflow_quantum/python/differentiators/adjoint.py @@ -16,6 +16,13 @@ import tensorflow as tf from tensorflow_quantum.core.ops import tfq_adj_grad_op +try: + from tensorflow_quantum.core.ops import tfq_adj_grad_op_cuquantum + _ENABLE_USE_CUQUANTUM = True +except: + _ENABLE_USE_CUQUANTUM = False + tfq_adj_grad_op_cuquantum = tfq_adj_grad_op + from tensorflow_quantum.python.differentiators import differentiator @@ -32,9 +39,10 @@ class Adjoint(differentiator.Differentiator): https://academic.oup.com/gji/article-pdf/167/2/495/1492368/167-2-495.pdf). The Adjoint method differentiates the input circuits in roughly one forward and backward pass over the circuits, to calculate the gradient of - a symbol only a constant number of gate operations need to be applied to the - circuits state. When the number of parameters in a circuit is very large, - this differentiator performs much better than all the others found in TFQ. + a symbol only a constant number of gate operations need to be applied to + the circuits state. When the number of parameters in a circuit is very + large, this differentiator performs much better than all the others found + in TFQ. >>> my_op = tfq.get_expectation_op() @@ -62,7 +70,11 @@ class Adjoint(differentiator.Differentiator): """ - def generate_differentiable_op(self, *, sampled_op=None, analytic_op=None): + def generate_differentiable_op(self, + *, + sampled_op=None, + analytic_op=None, + use_cuquantum=False): """Generate a differentiable op by attaching self to an op. See `tfq.differentiators.Differentiator`. This has been partially @@ -75,6 +87,8 @@ def generate_differentiable_op(self, *, sampled_op=None, analytic_op=None): using this differentiator's `differentiate_sampled` method. analytic_op: A `callable` op that you want to make differentiable using this differentiators `differentiate_analytic` method. + use_cuquantum: A `bool` indicating whether to use the cuQuantum + version of the adjoint gradient op. Returns: A `callable` op that who's gradients are now registered to be @@ -85,8 +99,10 @@ def generate_differentiable_op(self, *, sampled_op=None, analytic_op=None): raise ValueError("sample base backends are not supported by the " "Adjoint method, please use analytic expectation" " or choose another differentiator.") + use_cuquantum = _ENABLE_USE_CUQUANTUM and use_cuquantum - return super().generate_differentiable_op(analytic_op=analytic_op) + return super().generate_differentiable_op(analytic_op=analytic_op, + use_cuquantum=use_cuquantum) @tf.function def get_gradient_circuits(self, programs, symbol_names, symbol_values): @@ -97,13 +113,60 @@ def get_gradient_circuits(self, programs, symbol_names, symbol_values): @differentiator.catch_empty_inputs @tf.function - def differentiate_analytic(self, programs, symbol_names, symbol_values, - pauli_sums, forward_pass_vals, grad): + def differentiate_analytic_cuquantum( + self, + programs, + symbol_names, + symbol_values, + pauli_sums, + forward_pass_vals, + grad, + ): + """Returns cuquantum adjoint gradient op result.""" + return tfq_adj_grad_op_cuquantum.tfq_adj_grad(programs, symbol_names, + symbol_values, pauli_sums, + grad) + + @differentiator.catch_empty_inputs + @tf.function + def differentiate_analytic( + self, + programs, + symbol_names, + symbol_values, + pauli_sums, + forward_pass_vals, + grad, + ): + """Returns cpu adjoint gradient op result.""" return tfq_adj_grad_op.tfq_adj_grad(programs, symbol_names, symbol_values, pauli_sums, grad) - def differentiate_sampled(self, programs, symbol_names, symbol_values, - pauli_sums, num_samples, forward_pass_vals, grad): + def differentiate_sampled_cuquantum( + self, + programs, + symbol_names, + symbol_values, + pauli_sums, + num_samples, + forward_pass_vals, + grad, + ): + raise NotImplementedError( + "Adjoint state methods are not supported in sample based settings." + " Please use analytic expectation calculation or a different " + "tfq.differentiator.") + + def differentiate_sampled( + self, + programs, + symbol_names, + symbol_values, + pauli_sums, + num_samples, + forward_pass_vals, + grad, + ): raise NotImplementedError( "Adjoint state methods are not supported in sample based settings." " Please use analytic expectation calculation or a different " diff --git a/tensorflow_quantum/python/differentiators/adjoint_test.py b/tensorflow_quantum/python/differentiators/adjoint_test.py index 640a87d30..9cb5c30a9 100644 --- a/tensorflow_quantum/python/differentiators/adjoint_test.py +++ b/tensorflow_quantum/python/differentiators/adjoint_test.py @@ -20,20 +20,65 @@ NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x] sys.path = NEW_PATH # pylint: enable=wrong-import-position +from unittest import mock +from absl.testing import parameterized +import cirq +import numpy as np +import sympy import tensorflow as tf -from tensorflow_quantum.python.differentiators import adjoint from tensorflow_quantum.core.ops import circuit_execution_ops +from tensorflow_quantum.python import util +from tensorflow_quantum.python.differentiators import adjoint -class AdjointTest(tf.test.TestCase): +class AdjointTest(tf.test.TestCase, parameterized.TestCase): """Test that we can properly subclass differentiator.""" def test_instantiation(self): """Test that adjoint can be created.""" adjoint.Adjoint() + @parameterized.parameters( + list(util.kwargs_cartesian_product(**{ + 'use_cuquantum': [False, True], + }))) + def test_use_cuquantum(self, use_cuquantum): + """Ensure that use_cuquantum switches to cuquantum ops well.""" + if not circuit_execution_ops.is_gpu_configured(): + # Ignores this test if gpu is not configured. + self.skipTest("GPU is not set. Ignoring gpu tests...") + # Prepares a simple circuit. + qubit = cirq.GridQubit(0, 0) + circuit = util.convert_to_tensor( + [cirq.Circuit(cirq.X(qubit)**sympy.Symbol('alpha'))]) + psums = util.convert_to_tensor([[cirq.Z(qubit)]]) + symbol_values_array = np.array([[0.123]], dtype=np.float32) + symbol_values_tensor = tf.convert_to_tensor(symbol_values_array) + + # Mocks `Adjoint.differentiate_analytic*()` to check if + # it's called once correctly. + method_name = ("differentiate_analytic_cuquantum" + if use_cuquantum else "differentiate_analytic") + with mock.patch.object(adjoint.Adjoint, + method_name, + return_value=None, + autospec=True) as mock_adj: + dif = adjoint.Adjoint() + op = circuit_execution_ops.get_expectation_op( + use_cuquantum=use_cuquantum, quantum_concurrent=False) + diff_op = dif.generate_differentiable_op( + analytic_op=op, use_cuquantum=use_cuquantum) + + # Calculate tfq gradient. + with tf.GradientTape() as g: + g.watch(symbol_values_tensor) + expectations = diff_op(circuit, tf.convert_to_tensor(['alpha']), + symbol_values_tensor, psums) + _ = g.gradient(expectations, symbol_values_tensor) + mock_adj.assert_called_once() + def test_sample_errors(self): """Ensure that the adjoint method won't attach to sample ops.""" @@ -41,6 +86,8 @@ def test_sample_errors(self): op = circuit_execution_ops.get_sampled_expectation_op() with self.assertRaisesRegex(ValueError, expected_regex='not supported'): dif.generate_differentiable_op(sampled_op=op) + with self.assertRaisesRegex(ValueError, expected_regex='not supported'): + dif.generate_differentiable_op(sampled_op=op, use_cuquantum=True) def test_no_gradient_circuits(self): """Confirm the adjoint differentiator has no gradient circuits.""" diff --git a/tensorflow_quantum/python/differentiators/differentiator.py b/tensorflow_quantum/python/differentiators/differentiator.py index bb3668a92..72ee25e28 100644 --- a/tensorflow_quantum/python/differentiators/differentiator.py +++ b/tensorflow_quantum/python/differentiators/differentiator.py @@ -55,12 +55,16 @@ class Differentiator(metaclass=abc.ABCMeta): to backpropagate through a quantum circuit. """ - def generate_differentiable_op(self, *, sampled_op=None, analytic_op=None): + def generate_differentiable_op(self, + *, + sampled_op=None, + analytic_op=None, + use_cuquantum=False): """Generate a differentiable op by attaching self to an op. This function returns a `tf.function` that passes values through to - `forward_op` during the forward pass and this differentiator (`self`) to - backpropagate through the op during the backward pass. If sampled_op + `forward_op` during the forward pass and this differentiator (`self`) + to backpropagate through the op during the backward pass. If sampled_op is provided the differentiators `differentiate_sampled` method will be invoked (which requires sampled_op to be a sample based expectation op with num_samples input tensor). If analytic_op is provided the @@ -80,6 +84,8 @@ def generate_differentiable_op(self, *, sampled_op=None, analytic_op=None): using this differentiator's `differentiate_sampled` method. analytic_op: A `callable` op that you want to make differentiable using this differentiators `differentiate_analytic` method. + use_cuquantum: A `bool` indicating whether to use cuQuantum version + op. Returns: A `callable` op that who's gradients are now registered to be @@ -112,6 +118,9 @@ def generate_differentiable_op(self, *, sampled_op=None, analytic_op=None): raise TypeError('Provided arguments must be callable tensorflow ' 'ops.') + if not isinstance(use_cuquantum, bool): + raise TypeError('use_cuquantum should be boolean.') + # TODO (mbbrough): find a better workaround than this to ensure # that the correct sample based expectation wasn't accidentally # put inside of the analytical_op argument or vice versa. @@ -149,6 +158,12 @@ def generate_differentiable_op(self, *, sampled_op=None, analytic_op=None): 'Given arg: {}.'.format(str(key)) + '' 'The signature should contain: {}.'.format( list(expected_signature))) + if use_cuquantum: + _differentiate_ana, _differentiate_sam = ( + self._differentiate_ana_cq, self._differentiate_sam_cq) + else: + _differentiate_ana, _differentiate_sam = (self._differentiate_ana, + self._differentiate_sam) @tf.custom_gradient def op_wrapper_analytic(programs, symbol_names, symbol_values, @@ -157,9 +172,8 @@ def op_wrapper_analytic(programs, symbol_names, symbol_values, symbol_values, pauli_sums) def gradient(grad): - return self._differentiate_ana(programs, symbol_names, - symbol_values, pauli_sums, - forward_pass_vals, grad) + return _differentiate_ana(programs, symbol_names, symbol_values, + pauli_sums, forward_pass_vals, grad) return forward_pass_vals, gradient @@ -171,10 +185,9 @@ def op_wrapper_sampled(programs, symbol_names, symbol_values, num_samples) def gradient(grad): - return self._differentiate_sam(programs, symbol_names, - symbol_values, pauli_sums, - num_samples, forward_pass_vals, - grad) + return _differentiate_sam(programs, symbol_names, symbol_values, + pauli_sums, num_samples, + forward_pass_vals, grad) return forward_pass_vals, gradient @@ -186,6 +199,13 @@ def gradient(grad): return return_func + def _differentiate_ana_cq(self, programs, symbol_names, symbol_values, + pauli_sums, forward_pass_vals, grad): + return None, None, self.differentiate_analytic_cuquantum( + programs, symbol_names, symbol_values, + pauli_sums, forward_pass_vals, grad), \ + None + def _differentiate_ana(self, programs, symbol_names, symbol_values, pauli_sums, forward_pass_vals, grad): return None, None, self.differentiate_analytic( @@ -193,6 +213,13 @@ def _differentiate_ana(self, programs, symbol_names, symbol_values, pauli_sums, forward_pass_vals, grad), \ None + def _differentiate_sam_cq(self, programs, symbol_names, symbol_values, + pauli_sums, num_samples, forward_pass_vals, grad): + return None, None, self.differentiate_sampled_cuquantum( + programs, symbol_names, symbol_values, + pauli_sums, num_samples, forward_pass_vals, grad), \ + None, None + def _differentiate_sam(self, programs, symbol_names, symbol_values, pauli_sums, num_samples, forward_pass_vals, grad): return None, None, self.differentiate_sampled( @@ -324,6 +351,30 @@ def get_gradient_circuits(self, programs, symbol_names, symbol_values): the output `batch_weights`. """ + @catch_empty_inputs + @tf.function + def differentiate_analytic_cuquantum(self, programs, symbol_names, + symbol_values, pauli_sums, + forward_pass_vals, grad): + """Differentiate a circuit with analytical expectation with GPU ops.""" + # `self.expectation_op` is already set to cuquantum op at + # generate_differentiable_op._differentiate_ana. + return self.differentiate_analytic(programs, symbol_names, + symbol_values, pauli_sums, + forward_pass_vals, grad) + + @catch_empty_inputs + @tf.function + def differentiate_sampled_cuquantum(self, programs, symbol_names, + symbol_values, pauli_sums, num_samples, + forward_pass_vals, grad): + """Differentiate a circuit with sampled expectation with GPU ops.""" + # `self.expectation_op` is already set to cuquantum op at + # generate_differentiable_op._differentiate_sam. + return self.differentiate_sampled(programs, symbol_names, symbol_values, + pauli_sums, num_samples, + forward_pass_vals, grad) + @catch_empty_inputs @tf.function def differentiate_analytic(self, programs, symbol_names, symbol_values, diff --git a/tensorflow_quantum/python/differentiators/differentiator_test.py b/tensorflow_quantum/python/differentiators/differentiator_test.py index b61b2a323..f4d4544fc 100644 --- a/tensorflow_quantum/python/differentiators/differentiator_test.py +++ b/tensorflow_quantum/python/differentiators/differentiator_test.py @@ -73,6 +73,26 @@ def test_generate_differentiable_op(self): WorkingDifferentiator().generate_differentiable_op( sampled_op=lambda programs, symbol_names, pauli_sums: 1) + def test_generate_differentiable_op_cuquantum(self): + """test the type checking on this method with `use_cuquantum`.""" + WorkingDifferentiator().generate_differentiable_op( + analytic_op=lambda programs, symbol_names, symbol_values, + pauli_sums: 1, + use_cuquantum=True) + WorkingDifferentiator().generate_differentiable_op( + sampled_op=lambda programs, symbol_names, symbol_values, pauli_sums, + num_samples: 1, + use_cuquantum=True) + with self.assertRaisesRegex(TypeError, expected_regex='boolean'): + WorkingDifferentiator().generate_differentiable_op( + analytic_op=lambda programs, symbol_names, symbol_values, + pauli_sums: 1, + use_cuquantum='junk') + with self.assertRaisesRegex(TypeError, expected_regex='boolean'): + WorkingDifferentiator().generate_differentiable_op( + sampled_op=lambda programs, symbol_names, pauli_sums: 1, + use_cuquantum='junk') + def test_single_op_link(self): """Tests if the `one-differentiator-per-op` policy is working well.""" wd = WorkingDifferentiator() diff --git a/tensorflow_quantum/python/differentiators/gradient_test.py b/tensorflow_quantum/python/differentiators/gradient_test.py index b85506b0d..9c06f4035 100644 --- a/tensorflow_quantum/python/differentiators/gradient_test.py +++ b/tensorflow_quantum/python/differentiators/gradient_test.py @@ -37,6 +37,8 @@ from tensorflow_quantum.core.ops.noise import noisy_expectation_op from tensorflow_quantum.core.ops.noise import noisy_sampled_expectation_op +RANDOM_SEED = 1234 + ANALYTIC_DIFFS = [ linear_combination.ForwardDifference(grid_spacing=0.0001), linear_combination.ForwardDifference(error_order=2, grid_spacing=0.0001), @@ -58,12 +60,22 @@ circuit_execution_ops.get_expectation_op() # C++ ] +ANALYTIC_GPU_OPS = [ + circuit_execution_ops.get_expectation_op(use_cuquantum=True, + quantum_concurrent=False) +] + SAMPLED_OPS = [ circuit_execution_ops.get_sampled_expectation_op( cirq.sim.Simulator()), # WF circuit_execution_ops.get_sampled_expectation_op() # C++ ] +SAMPLED_GPU_OPS = [ + circuit_execution_ops.get_sampled_expectation_op(use_cuquantum=True, + quantum_concurrent=False) +] + NOISY_OPS = [ noisy_sampled_expectation_op.sampled_expectation, noisy_expectation_op.expectation @@ -118,19 +130,35 @@ class AnalyticGradientCorrectnessTest(tf.test.TestCase, parameterized.TestCase): @parameterized.parameters( list( - util.kwargs_cartesian_product(**{ - 'differentiator': ANALYTIC_DIFFS, - 'op': ANALYTIC_OPS - })) + [{ - 'differentiator': adjoint.Adjoint(), - 'op': circuit_execution_ops.get_expectation_op() - }]) - def test_backprop(self, differentiator, op): + util.kwargs_cartesian_product( + **{ + 'differentiator': ANALYTIC_DIFFS, + 'op': ANALYTIC_OPS, + 'use_cuquantum': [False], + })) + [{ + 'differentiator': adjoint.Adjoint(), + 'op': circuit_execution_ops.get_expectation_op(), + 'use_cuquantum': False, + }] + + list( + util.kwargs_cartesian_product( + **{ + 'differentiator': ANALYTIC_DIFFS + [adjoint.Adjoint()], + 'op': ANALYTIC_GPU_OPS, + 'use_cuquantum': [True], + }))) + def test_backprop(self, differentiator, op, use_cuquantum): """Test that gradients are correctly backpropagated through a quantum circuit via comparison to analytical results. """ + if use_cuquantum and not circuit_execution_ops.is_gpu_configured(): + # GPU is not set. Ignores this sub-test. + self.skipTest("GPU is not set. Ignoring gpu tests...") differentiator.refresh() - op = differentiator.generate_differentiable_op(analytic_op=op) + op = differentiator.generate_differentiable_op( + analytic_op=op, + use_cuquantum=use_cuquantum, + ) def exact_grad(theta): new_theta = 2 * np.pi * theta @@ -165,23 +193,42 @@ def exact_grad(theta): 'n_qubits': [5], 'n_programs': [3], 'n_ops': [3], - 'symbol_names': [['a', 'b']] + 'symbol_names': [['a', 'b']], + 'use_cuquantum': [False], })) + [{ 'differentiator': adjoint.Adjoint(), 'op': circuit_execution_ops.get_expectation_op(), 'n_qubits': 10, 'n_programs': 5, 'n_ops': 3, - 'symbol_names': ['a', 'b'] - }]) + 'symbol_names': ['a', 'b'], + 'use_cuquantum': False, + }] + + list( + util.kwargs_cartesian_product( + **{ + 'differentiator': ANALYTIC_DIFFS + [adjoint.Adjoint()], + 'op': ANALYTIC_GPU_OPS, + 'n_qubits': [5], + 'n_programs': [3], + 'n_ops': [3], + 'symbol_names': [['a', 'b']], + 'use_cuquantum': [True], + }))) def test_gradients_vs_cirq_finite_difference(self, differentiator, op, n_qubits, n_programs, n_ops, - symbol_names): + symbol_names, use_cuquantum): """Compare TFQ differentiators to fine-grained noiseless cirq finite differencing. """ + if use_cuquantum and not circuit_execution_ops.is_gpu_configured(): + # GPU is not set. Ignores this sub-test. + self.skipTest("GPU is not set. Ignoring gpu tests...") differentiator.refresh() - op = differentiator.generate_differentiable_op(analytic_op=op) + op = differentiator.generate_differentiable_op( + analytic_op=op, + use_cuquantum=use_cuquantum, + ) qubits = cirq.GridQubit.rect(1, n_qubits) circuit_batch, resolver_batch = \ @@ -220,18 +267,39 @@ def test_gradients_vs_cirq_finite_difference(self, differentiator, op, @parameterized.parameters( list( - util.kwargs_cartesian_product(**{ - 'differentiator': ANALYTIC_DIFFS, - 'op': ANALYTIC_OPS, - })) + [{ - 'differentiator': adjoint.Adjoint(), - 'op': circuit_execution_ops.get_expectation_op(), - }]) - def test_analytic_value_with_simple_circuit(self, differentiator, op): + util.kwargs_cartesian_product( + **{ + 'differentiator': ANALYTIC_DIFFS, + 'op': ANALYTIC_OPS, + 'use_cuquantum': [False], + })) + [{ + 'differentiator': adjoint.Adjoint(), + 'op': circuit_execution_ops.get_expectation_op(), + 'use_cuquantum': False, + }] + + list( + util.kwargs_cartesian_product( + **{ + 'differentiator': ANALYTIC_DIFFS + [adjoint.Adjoint()], + 'op': ANALYTIC_GPU_OPS, + 'use_cuquantum': [True], + }))) + def test_analytic_value_with_simple_circuit( + self, + differentiator, + op, + use_cuquantum, + ): """Test the value of differentiator with simple circuit.""" + if use_cuquantum and not circuit_execution_ops.is_gpu_configured(): + # GPU is not set. Ignores this sub-test. + self.skipTest("GPU is not set. Ignoring gpu tests...") # Get an expectation op, with this differentiator attached. differentiator.refresh() - op = differentiator.generate_differentiable_op(analytic_op=op) + op = differentiator.generate_differentiable_op( + analytic_op=op, + use_cuquantum=use_cuquantum, + ) qubit = cirq.GridQubit(0, 0) circuit = util.convert_to_tensor( [cirq.Circuit(cirq.X(qubit)**sympy.Symbol('alpha'))]) @@ -249,15 +317,28 @@ def test_analytic_value_with_simple_circuit(self, differentiator, op): @parameterized.parameters( list( - util.kwargs_cartesian_product(**{ - 'differentiator': ANALYTIC_DIFFS, - 'op': ANALYTIC_OPS, - })) + [{ - 'differentiator': adjoint.Adjoint(), - 'op': circuit_execution_ops.get_expectation_op(), - }]) - def test_empty_circuit_grad(self, differentiator, op): + util.kwargs_cartesian_product( + **{ + 'differentiator': ANALYTIC_DIFFS, + 'op': ANALYTIC_OPS, + 'use_cuquantum': [False], + })) + [{ + 'differentiator': adjoint.Adjoint(), + 'op': circuit_execution_ops.get_expectation_op(), + 'use_cuquantum': False, + }] + + list( + util.kwargs_cartesian_product( + **{ + 'differentiator': ANALYTIC_DIFFS + [adjoint.Adjoint()], + 'op': ANALYTIC_GPU_OPS, + 'use_cuquantum': [True], + }))) + def test_empty_circuit_grad(self, differentiator, op, use_cuquantum): """Test that providing no circuits will fail gracefully.""" + if use_cuquantum and not circuit_execution_ops.is_gpu_configured(): + # GPU is not set. Ignores this sub-test. + self.skipTest("GPU is not set. Ignoring gpu tests...") differentiator.refresh() op = differentiator.generate_differentiable_op(analytic_op=op) circuit = tf.convert_to_tensor([], dtype=tf.string) @@ -284,11 +365,23 @@ class SampledGradientCorrectnessTest(tf.test.TestCase, parameterized.TestCase): **{ 'differentiator': SAMPLED_DIFFS, 'op': SAMPLED_OPS, - 'num_samples': [20000] - }))) + 'num_samples': [20000], + 'use_cuquantum': [False], + })) + list( + util.kwargs_cartesian_product( + **{ + 'differentiator': SAMPLED_DIFFS, + 'op': SAMPLED_GPU_OPS, + 'num_samples': [20000], + 'use_cuquantum': [True], + }))) def test_sampled_value_with_simple_circuit(self, differentiator, op, - num_samples): + num_samples, use_cuquantum): """Test the value of sampled differentiator with simple circuit.""" + if use_cuquantum and not circuit_execution_ops.is_gpu_configured(): + # GPU is not set. Ignores this sub-test. + self.skipTest("GPU is not set. Ignoring gpu tests...") + tf.random.set_seed(RANDOM_SEED) # Get an expectation op, with this differentiator attached. differentiator.refresh() op = differentiator.generate_differentiable_op(sampled_op=op) @@ -318,15 +411,33 @@ def test_sampled_value_with_simple_circuit(self, differentiator, op, 'n_programs': [5], 'n_ops': [2], 'symbol_names': [['a', 'b']], - 'num_samples': [30000] + 'num_samples': [30000], + 'use_cuquantum': [False], + })) + + list( + util.kwargs_cartesian_product( + **{ + 'diff_and_tol': zip(SAMPLED_DIFFS, SAMPLED_DIFFS_TOLS), + 'op': SAMPLED_GPU_OPS, + 'n_qubits': [3], + 'n_programs': [5], + 'n_ops': [2], + 'symbol_names': [['a', 'b']], + 'num_samples': [30000], + 'use_cuquantum': [True], }))) def test_approx_equality_shallow(self, diff_and_tol, op, n_qubits, symbol_names, n_ops, n_programs, - num_samples): + num_samples, use_cuquantum): """Test small circuits with limited depth.""" + if use_cuquantum and not circuit_execution_ops.is_gpu_configured(): + # GPU is not set. Ignores this sub-test. + self.skipTest("GPU is not set. Ignoring gpu tests...") + tf.random.set_seed(RANDOM_SEED) differentiator, tol = diff_and_tol differentiator.refresh() - op = differentiator.generate_differentiable_op(sampled_op=op) + op = differentiator.generate_differentiable_op( + sampled_op=op, use_cuquantum=use_cuquantum) qubits = cirq.GridQubit.rect(1, n_qubits) circuit_batch, resolver_batch = \ @@ -369,12 +480,25 @@ def test_approx_equality_shallow(self, diff_and_tol, op, n_qubits, @parameterized.parameters( list( - util.kwargs_cartesian_product(**{ - 'differentiator': SAMPLED_DIFFS, - 'op': SAMPLED_OPS, - }))) - def test_empty_circuit_sampled_grad(self, differentiator, op): + util.kwargs_cartesian_product( + **{ + 'differentiator': SAMPLED_DIFFS, + 'op': SAMPLED_OPS, + 'use_cuquantum': [False], + })) + list( + util.kwargs_cartesian_product( + **{ + 'differentiator': SAMPLED_DIFFS, + 'op': SAMPLED_GPU_OPS, + 'use_cuquantum': [True], + }))) + def test_empty_circuit_sampled_grad(self, differentiator, op, + use_cuquantum): """Test that providing no circuits will fail gracefully.""" + if use_cuquantum and not circuit_execution_ops.is_gpu_configured(): + # GPU is not set. Ignores this sub-test. + self.skipTest("GPU is not set. Ignoring gpu tests...") + tf.random.set_seed(RANDOM_SEED) differentiator.refresh() op = differentiator.generate_differentiable_op(sampled_op=op) circuit = tf.convert_to_tensor([], dtype=tf.string) diff --git a/tensorflow_quantum/python/layers/circuit_executors/expectation.py b/tensorflow_quantum/python/layers/circuit_executors/expectation.py index 35d0c1b50..cba81e7b5 100644 --- a/tensorflow_quantum/python/layers/circuit_executors/expectation.py +++ b/tensorflow_quantum/python/layers/circuit_executors/expectation.py @@ -21,6 +21,7 @@ import cirq from tensorflow_quantum.core.ops import circuit_execution_ops from tensorflow_quantum.core.ops.noise import noisy_expectation_op +from tensorflow_quantum.python import quantum_context from tensorflow_quantum.python.differentiators import adjoint from tensorflow_quantum.python.differentiators import parameter_shift from tensorflow_quantum.python.differentiators import differentiator as diff @@ -205,7 +206,11 @@ class Expectation(tf.keras.layers.Layer): """ - def __init__(self, backend='noiseless', differentiator=None, **kwargs): + def __init__(self, + backend='noiseless', + differentiator=None, + use_cuquantum=False, + **kwargs): """Instantiate this Layer. Create a layer that will output expectation values gained from @@ -225,6 +230,7 @@ def __init__(self, backend='noiseless', differentiator=None, **kwargs): which uses `tfq.differentiators.ParameterShift()`. If `backend` is also 'noiseless' then default is `tfq.differentiators.Adjoint`. + use_cuquantum: Calls TFQ cuQuantum version op. """ super().__init__(**kwargs) @@ -238,20 +244,29 @@ def __init__(self, backend='noiseless', differentiator=None, **kwargs): "Please use SampledExpectation instead.") used_op = None self.noisy = False - if backend == 'noiseless': - backend = None # Ingest differentiator. if differentiator is None: differentiator = parameter_shift.ParameterShift() - if backend is None: + if backend == 'noiseless' or backend is None: differentiator = adjoint.Adjoint() if not isinstance(differentiator, diff.Differentiator): raise TypeError("Differentiator must inherit from " "tfq.differentiators.Differentiator") - if backend == 'noisy': + if backend == 'noiseless' or backend is None: + mode = quantum_context.get_quantum_concurrent_op_mode() + quantum_concurrent = False if use_cuquantum else mode + used_op = circuit_execution_ops.get_expectation_op( + backend=None, + use_cuquantum=use_cuquantum, + quantum_concurrent=quantum_concurrent) + self._expectation_op = differentiator.generate_differentiable_op( + analytic_op=used_op, use_cuquantum=use_cuquantum) + elif backend == 'noisy': + if use_cuquantum: + raise ValueError("noisy backend does not currently support GPU") used_op = noisy_expectation_op.expectation self._expectation_op = differentiator.generate_differentiable_op( sampled_op=used_op) @@ -270,15 +285,20 @@ def call(self, symbol_values=None, operators=None, repetitions=None, - initializer=tf.keras.initializers.RandomUniform(0, 2 * np.pi)): + initializer=None): """Keras call function. - Input options: - `inputs`, `symbol_names`, `symbol_values`: - see `input_checks.expand_circuits` - `operators`: see `input_checks.expand_operators` - - Output shape: + Args: + inputs: See `input_checks.expand_circuits. + symbol_names: See `input_checks.expand_circuits. + symbol_values: See `input_checks.expand_circuits. + operators: See `input_checks.expand_operators` + repetitions: A Python `int` or a pre-converted `tf.Tensor` + containing a single `int` entry. + initializer: The keras initializer object for weights. + Defaults to uniform distribution [0..2*pi] + + Returns: `tf.Tensor` with shape [batch_size, n_ops] that holds the expectation value for each circuit with each op applied to it (after resolving the corresponding parameters in). @@ -287,6 +307,9 @@ def call(self, if symbol_values is None: values_empty = True + if initializer is None: + initializer = tf.keras.initializers.RandomUniform(0, 2 * np.pi) + inputs, symbol_names, symbol_values = input_checks.expand_circuits( inputs, symbol_names, symbol_values) diff --git a/tensorflow_quantum/python/layers/circuit_executors/expectation_test.py b/tensorflow_quantum/python/layers/circuit_executors/expectation_test.py index 1ef7b99fc..f36396232 100644 --- a/tensorflow_quantum/python/layers/circuit_executors/expectation_test.py +++ b/tensorflow_quantum/python/layers/circuit_executors/expectation_test.py @@ -27,10 +27,13 @@ import tensorflow as tf import cirq +from tensorflow_quantum.core.ops import circuit_execution_ops from tensorflow_quantum.python.layers.circuit_executors import expectation from tensorflow_quantum.python.differentiators import linear_combination from tensorflow_quantum.python import util +RANDOM_SEED = 1234 + def _gen_single_bit_rotation_problem(bit, symbols, noisy): """Generate a toy problem on 1 qubit.""" @@ -48,7 +51,7 @@ def _gen_single_bit_rotation_problem(bit, symbols, noisy): return circuit -class ExpectationTest(tf.test.TestCase): +class ExpectationTest(parameterized.TestCase, tf.test.TestCase): """Basic tests for the expectation layer.""" def test_expectation_instantiate(self): @@ -76,11 +79,15 @@ def run_sweep(self): expectation.Expectation(backend=MySampler()) with self.assertRaisesRegex( - TypeError, expected_regex="SimulatesExpectationValues or None"): + TypeError, + expected_regex="SimulatesExpectationValues or None", + ): expectation.Expectation(backend='junk') with self.assertRaisesRegex( - TypeError, expected_regex="tfq.differentiators.Differentiator"): + TypeError, + expected_regex="tfq.differentiators.Differentiator", + ): expectation.Expectation(differentiator='junk') def test_expectation_type_inputs_error(self): @@ -189,7 +196,10 @@ def test_static_cases(self): # Ensure tiling up of circuits works as expected. expectation.Expectation()(reg_circuit, operators=test_psum) - expectation.Expectation()(reg_circuit, operators=[test_psum, test_psum]) + expectation.Expectation()( + reg_circuit, + operators=[test_psum, test_psum], + ) # Ensure tiling up of symbol_values works as expected. expectation.Expectation()(symb_circuit, @@ -276,10 +286,17 @@ def test_static_cases_noisy(self): ], [cirq.Z(bit), cirq.Z(bit), cirq.Z(bit)]], repetitions=[[1, 2, 3], [4, 5, 6]]) - def test_expectation_simple_tf_train(self): + @parameterized.parameters([{ + 'use_cuquantum': False, + }, { + 'use_cuquantum': True, + }]) + def test_expectation_simple_tf_train(self, use_cuquantum): """Train a layer using standard tf (not keras). This is a subtle test that will work since we don't use keras compile. """ + tf.random.set_seed(RANDOM_SEED) + initializer = tf.keras.initializers.RandomUniform(0, 2 * np.pi) bit = cirq.GridQubit(0, 0) circuit = \ cirq.Circuit(cirq.rx(sympy.Symbol('theta'))(bit)) @@ -290,7 +307,8 @@ def test_expectation_simple_tf_train(self): with tf.GradientTape() as tape: circuit_out = layer(circuit, symbol_names=['theta'], - operators=op) + operators=op, + initializer=initializer) mse = tf.square(tf.reduce_sum(tf.subtract(circuit_out, -1))) grads = tape.gradient(mse, layer.trainable_weights) optimizer.apply_gradients(zip(grads, layer.trainable_weights)) @@ -302,19 +320,30 @@ class ExpectationFunctionalTests(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters([ { - 'backend': 'noisy' + 'backend': 'noisy', + 'use_cuquantum': False, + }, + { + 'backend': None, # old API usage + 'use_cuquantum': False, }, { - 'backend': None # old API usage + 'backend': None, + 'use_cuquantum': True, } ]) - def test_simple_param_value_input(self, backend): + def test_simple_param_value_input(self, backend, use_cuquantum): """Train a densely connected hybrid model. - This model will put a qubit in the zero or one state from a random state - given the input zero or one. This tests the input signature: + This model will put a qubit in the zero or one state from a random + state given the input zero or one. This tests the input signature: Expectation([input_value_batch]). """ + if use_cuquantum and not circuit_execution_ops.is_gpu_configured(): + # GPU is not set. Ignores this sub-test. + self.skipTest("GPU is not set. Ignoring gpu tests...") + tf.random.set_seed(RANDOM_SEED) + initializer = tf.keras.initializers.RandomUniform(0, 2 * np.pi) noisy = backend == 'noisy' bit = cirq.GridQubit(0, 0) symbols = sympy.symbols('x y z') @@ -325,12 +354,15 @@ def test_simple_param_value_input(self, backend): l1 = tf.keras.layers.Dense(10)(inputs) l2 = tf.keras.layers.Dense(3)(l1) reps = 1000 if noisy else None - outputs = expectation.Expectation(backend=backend)( - datum, - symbol_names=symbols, - operators=cirq.Z(bit), - symbol_values=l2, - repetitions=reps) + outputs = expectation.Expectation( + backend=backend, + use_cuquantum=use_cuquantum, + )(datum, + symbol_names=symbols, + operators=cirq.Z(bit), + symbol_values=l2, + repetitions=reps, + initializer=initializer) model = tf.keras.Model(inputs=[datum, inputs], outputs=outputs) data_in = np.array([[1], [0]], dtype=np.float32) @@ -347,18 +379,29 @@ def test_simple_param_value_input(self, backend): @parameterized.parameters([ { - 'backend': 'noisy' + 'backend': 'noisy', + 'use_cuquantum': False, }, { - 'backend': None # old API usage + 'backend': None, # old API usage + 'use_cuquantum': False, + }, + { + 'backend': None, + 'use_cuquantum': True, } ]) - def test_simple_op_input(self, backend): + def test_simple_op_input(self, backend, use_cuquantum): """Test a simple operator input Learn qubit in the z+ state using two different measurement operators. This tests input signature Expectation([operator_batch]) """ + if use_cuquantum and not circuit_execution_ops.is_gpu_configured(): + # GPU is not set. Ignores this sub-test. + self.skipTest("GPU is not set. Ignoring gpu tests...") + tf.random.set_seed(RANDOM_SEED) + normal_initializer = tf.keras.initializers.RandomNormal() noisy = backend == 'noisy' bit = cirq.GridQubit(0, 0) symbols = sympy.symbols('x, y, z') @@ -373,14 +416,19 @@ def test_simple_op_input(self, backend): op_input = tf.keras.Input(shape=(1,), dtype=tf.dtypes.string) reps = 1000 if noisy else None - output = expectation.Expectation(backend=backend)( - circuit_input, - symbol_names=symbols, - operators=op_input, - initializer=tf.keras.initializers.RandomNormal(), - repetitions=reps) - - model = tf.keras.Model(inputs=[circuit_input, op_input], outputs=output) + output = expectation.Expectation( + backend=backend, + use_cuquantum=use_cuquantum, + )(circuit_input, + symbol_names=symbols, + operators=op_input, + initializer=normal_initializer, + repetitions=reps) + + model = tf.keras.Model( + inputs=[circuit_input, op_input], + outputs=output, + ) model.compile( optimizer=tf.keras.optimizers.Adam(learning_rate=0.05), @@ -395,22 +443,34 @@ def test_simple_op_input(self, backend): @parameterized.parameters([ { - 'backend': 'noisy' + 'backend': 'noisy', + 'use_cuquantum': False, + }, + { + 'backend': None, # old api usage. + 'use_cuquantum': False, }, { - 'backend': None # old api usage. + 'backend': None, + 'use_cuquantum': True, }, { - 'backend': cirq.Simulator() + 'backend': cirq.Simulator(), + 'use_cuquantum': False, } ]) - def test_simple_op_and_param_input(self, backend): + def test_simple_op_and_param_input(self, backend, use_cuquantum): """Test a simple operator and parameter input. Train a NN to put a qubit in the z+ or x+ states based on a classical binary input. This tests the input signature: Expectation([value_batch, operator_batch]). """ + if use_cuquantum and not circuit_execution_ops.is_gpu_configured(): + # GPU is not set. Ignores this sub-test. + self.skipTest("GPU is not set. Ignoring gpu tests...") + tf.random.set_seed(RANDOM_SEED) + initializer = tf.keras.initializers.RandomUniform(0, 2 * np.pi) noisy = backend == 'noisy' bit = cirq.GridQubit(0, 0) symbols = sympy.symbols('x, y, z') @@ -426,12 +486,15 @@ def test_simple_op_and_param_input(self, backend): dense_1 = tf.keras.layers.Dense(10)(data_inp) dense_2 = tf.keras.layers.Dense(3)(dense_1) reps = 1000 if noisy else None - circuit_output = expectation.Expectation(backend=backend)( - circuit_inp, - symbol_names=symbols, - symbol_values=dense_2, - operators=op_inp, - repetitions=reps) + circuit_output = expectation.Expectation( + backend=backend, + use_cuquantum=use_cuquantum, + )(circuit_inp, + symbol_names=symbols, + symbol_values=dense_2, + operators=op_inp, + repetitions=reps, + initializer=initializer) functional_model = tf.keras.Model( inputs=[data_inp, op_inp, circuit_inp], outputs=[circuit_output]) @@ -448,18 +511,30 @@ def test_simple_op_and_param_input(self, backend): @parameterized.parameters([ { - 'backend': 'noisy' + 'backend': 'noisy', + 'use_cuquantum': False, }, { - 'backend': None # old api usage. + 'backend': None, # old API usage + 'use_cuquantum': False, + }, + { + 'backend': None, + 'use_cuquantum': True, } ]) - def test_dnn_qnn_dnn(self, backend): + def test_dnn_qnn_dnn(self, backend, use_cuquantum): """Train a fully hybrid network using an Expectation layer. Train the network to output +-5 given an input of 1 or 0. This tests that everything works when Expectation layer is a middle layers. """ + if use_cuquantum and not circuit_execution_ops.is_gpu_configured(): + # GPU is not set. Ignores this sub-test. + self.skipTest("GPU is not set. Ignoring gpu tests...") + tf.random.set_seed(RANDOM_SEED) + initializer = tf.keras.initializers.RandomUniform(0, 2 * np.pi) + noisy = backend == 'noisy' bit = cirq.GridQubit(0, 0) symbols = sympy.symbols('x, y, z') @@ -473,12 +548,15 @@ def test_dnn_qnn_dnn(self, backend): d1 = tf.keras.layers.Dense(10)(classical_input) d2 = tf.keras.layers.Dense(3)(d1) reps = 1000 if noisy else None - quantum = expectation.Expectation(backend=backend)( - circuit_input, - symbol_names=symbols, - symbol_values=d2, - operators=cirq.Z(bit), - repetitions=reps) + quantum = expectation.Expectation( + backend=backend, + use_cuquantum=use_cuquantum, + )(circuit_input, + symbol_names=symbols, + symbol_values=d2, + operators=cirq.Z(bit), + repetitions=reps, + initializer=initializer) d3 = tf.keras.layers.Dense(1)(quantum) model = tf.keras.Model(inputs=[circuit_input, classical_input], diff --git a/tensorflow_quantum/python/layers/circuit_executors/sample.py b/tensorflow_quantum/python/layers/circuit_executors/sample.py index 750885c20..3ba53c6bf 100644 --- a/tensorflow_quantum/python/layers/circuit_executors/sample.py +++ b/tensorflow_quantum/python/layers/circuit_executors/sample.py @@ -19,6 +19,7 @@ from tensorflow_quantum.core.ops import circuit_execution_ops from tensorflow_quantum.core.ops.noise import noisy_samples_op +from tensorflow_quantum.python import quantum_context from tensorflow_quantum.python.layers.circuit_executors import input_checks @@ -139,7 +140,7 @@ class Sample(tf.keras.layers.Layer): """ - def __init__(self, backend='noiseless', **kwargs): + def __init__(self, backend='noiseless', use_cuquantum=False, **kwargs): """Instantiate this Layer. Create a layer that will output bitstring samples taken from either a @@ -150,12 +151,20 @@ def __init__(self, backend='noiseless', **kwargs): to the noiseless simulator. Options are {'noisy', 'noiseless'}, however users may also specify a preconfigured cirq execution object to use instead, which must inherit `cirq.Sampler`. + use_cuquantum: Calls TFQ GPU version op. """ super().__init__(**kwargs) used_op = None - if backend == 'noiseless': - used_op = circuit_execution_ops.get_sampling_op(None) + if backend == 'noiseless' or backend is None: + mode = quantum_context.get_quantum_concurrent_op_mode() + quantum_concurrent = False if use_cuquantum else mode + used_op = circuit_execution_ops.get_sampling_op( + None, + use_cuquantum=use_cuquantum, + quantum_concurrent=quantum_concurrent) elif backend == 'noisy': + if use_cuquantum: + raise ValueError('noisy backend has no GPU support.') used_op = noisy_samples_op.samples else: used_op = circuit_execution_ops.get_sampling_op(backend) @@ -170,17 +179,18 @@ def call(self, repetitions=None): """Keras call function. - Input options: - `inputs`, `symbol_names`, `symbol_values`: - see `input_checks.expand_circuits` - `repetitions`: a Python `int` or a pre-converted - `tf.Tensor` containing a single `int` entry. + Args: + inputs: See `input_checks.expand_circuits`. + symbol_names: See `input_checks.expand_circuits`. + symbol_values: See `input_checks.expand_circuits`. + repetitions: A Python `int` or a pre-converted `tf.Tensor` + containing a single `int` entry. - Output shape: + Returns: `tf.RaggedTensor` with shape: - [batch size of symbol_values, repetitions, ] - or - [number of circuits, repetitions, ] + [batch size of symbol_values, repetitions, ] + or + [number of circuits, repetitions, ] """ if repetitions is None: raise ValueError("Number of repetitions not specified.") diff --git a/tensorflow_quantum/python/layers/circuit_executors/sample_test.py b/tensorflow_quantum/python/layers/circuit_executors/sample_test.py index 7103759b4..379fbaee3 100644 --- a/tensorflow_quantum/python/layers/circuit_executors/sample_test.py +++ b/tensorflow_quantum/python/layers/circuit_executors/sample_test.py @@ -27,9 +27,12 @@ import tensorflow as tf import cirq +from tensorflow_quantum.core.ops import circuit_execution_ops from tensorflow_quantum.python.layers.circuit_executors import sample from tensorflow_quantum.python import util +RANDOM_SEED = 1234 + class SampleTest(tf.test.TestCase, parameterized.TestCase): """Tests for the Sample layer.""" @@ -86,21 +89,32 @@ def test_sample_invalid_shape_inputs(self): @parameterized.parameters([ { - 'backend': 'noiseless' + 'backend': 'noiseless', + 'use_cuquantum': False, + }, + { + 'backend': 'noisy', + 'use_cuquantum': False, }, { - 'backend': 'noisy' + 'backend': cirq.Simulator(), + 'use_cuquantum': False, }, { - 'backend': cirq.Simulator() + 'backend': None, # old API usage. + 'use_cuquantum': False, }, { - 'backend': None # old API usage. + 'backend': None, + 'use_cuquantum': True, } ]) - def test_sample_invalid_combinations(self, backend): + def test_sample_invalid_combinations(self, backend, use_cuquantum): """Test with valid type inputs and valid value, but incorrect combo.""" - sampler = sample.Sample(backend) + if use_cuquantum and not circuit_execution_ops.is_gpu_configured(): + # GPU is not set. Ignores this sub-test. + self.skipTest("GPU is not set. Ignoring gpu tests...") + sampler = sample.Sample(backend, use_cuquantum=use_cuquantum) symbol = sympy.Symbol('alpha') circuit = cirq.Circuit(cirq.H(cirq.GridQubit(0, 0))**symbol) with self.assertRaisesRegex(Exception, expected_regex=""): @@ -142,9 +156,17 @@ def test_sample_invalid_combinations(self, backend): symbol_values=np.zeros((3, 1)), repetitions=5) - def test_sample_basic_inputs(self): + @parameterized.parameters([{ + 'use_cuquantum': False, + }, { + 'use_cuquantum': True, + }]) + def test_sample_basic_inputs(self, use_cuquantum): """Test that sample ingests inputs correctly in simple settings.""" - sampler = sample.Sample() + if use_cuquantum and not circuit_execution_ops.is_gpu_configured(): + # GPU is not set. Ignores this sub-test. + self.skipTest("GPU is not set. Ignoring gpu tests...") + sampler = sample.Sample(use_cuquantum=use_cuquantum) sampler(cirq.Circuit(), repetitions=10) sampler([cirq.Circuit()], repetitions=10) sampler(cirq.Circuit(), @@ -156,31 +178,49 @@ def test_sample_basic_inputs(self): symbol_values=[[0.5]], repetitions=10) - def test_sample_outputs_simple(self): + @parameterized.parameters([{ + 'use_cuquantum': False, + }, { + 'use_cuquantum': True, + }]) + def test_sample_outputs_simple(self, use_cuquantum): """Test the simplest call where nothing but circuits are provided.""" - sampler = sample.Sample() + if use_cuquantum and not circuit_execution_ops.is_gpu_configured(): + # GPU is not set. Ignores this sub-test. + self.skipTest("GPU is not set. Ignoring gpu tests...") + sampler = sample.Sample(use_cuquantum=use_cuquantum) circuit = cirq.Circuit(cirq.H(cirq.GridQubit(0, 0))) output = sampler([circuit, circuit], repetitions=5) self.assertShapeEqual(np.empty((2, 5, 1)), output.to_tensor()) - # TODO(trevormccrt): add QuantumEngineSampler to this once it is available + # TODO(trevormccrt): add ProcessorSampler to this once it is available @parameterized.parameters( list( util.kwargs_cartesian_product( backend=['noiseless', 'noisy', cirq.Simulator(), None], + use_cuquantum=[False, True], all_n_qubits=[[3, 4, 10]], n_samples=[1], symbol_names=[[], ['a', 'b']]))) - def test_sample_output(self, backend, all_n_qubits, n_samples, - symbol_names): + def test_sample_output(self, backend, use_cuquantum, all_n_qubits, + n_samples, symbol_names): """Test that expected output format is preserved. Check that any pre or post processing done inside the layers does not cause what is output from the layer to structurally deviate from what is expected. """ - sampler = sample.Sample(backend=backend) + if use_cuquantum and not circuit_execution_ops.is_gpu_configured(): + # GPU is not set. Ignores this sub-test. + self.skipTest("GPU is not set. Ignoring gpu tests...") + tf.random.set_seed(RANDOM_SEED) + if use_cuquantum: + # If use_cuquantum is True, + if backend is not None and backend != 'noiseless': + return + # Passes backend=None or backend == 'noiseless' only. + sampler = sample.Sample(backend=backend, use_cuquantum=use_cuquantum) bits = cirq.GridQubit.rect(1, max(all_n_qubits)) programs = [] expected_outputs = [] diff --git a/tensorflow_quantum/python/layers/circuit_executors/sampled_expectation.py b/tensorflow_quantum/python/layers/circuit_executors/sampled_expectation.py index fa434e332..0fdcc421f 100644 --- a/tensorflow_quantum/python/layers/circuit_executors/sampled_expectation.py +++ b/tensorflow_quantum/python/layers/circuit_executors/sampled_expectation.py @@ -22,6 +22,7 @@ import cirq from tensorflow_quantum.core.ops import circuit_execution_ops from tensorflow_quantum.core.ops.noise import noisy_sampled_expectation_op +from tensorflow_quantum.python import quantum_context from tensorflow_quantum.python.differentiators import differentiator as diff from tensorflow_quantum.python.differentiators import parameter_shift from tensorflow_quantum.python.layers.circuit_executors import input_checks @@ -213,7 +214,11 @@ class SampledExpectation(tf.keras.layers.Layer): """ - def __init__(self, backend='noiseless', differentiator=None, **kwargs): + def __init__(self, + backend='noiseless', + differentiator=None, + use_cuquantum=False, + **kwargs): """Instantiate this Layer. Create a layer that will output expectation values gained from @@ -227,6 +232,7 @@ def __init__(self, backend='noiseless', differentiator=None, **kwargs): derivative values of given operators_to_measure and circuit, which must inherit `tfq.differentiators.Differentiator`. Defaults to `parameter_shift.ParameterShift()` (None argument). + use_cuquantum: Calls TFQ GPU version op. """ super().__init__(**kwargs) @@ -246,10 +252,17 @@ def __init__(self, backend='noiseless', differentiator=None, **kwargs): "not cirq.Sampler. Please use Expectation instead.") used_op = None - if backend == 'noiseless': - backend = None - - if backend == 'noisy': + if backend == 'noiseless' or backend is None: + mode = quantum_context.get_quantum_concurrent_op_mode() + quantum_concurrent = False if use_cuquantum else mode + used_op = circuit_execution_ops.get_sampled_expectation_op( + backend=None, + use_cuquantum=use_cuquantum, + quantum_concurrent=quantum_concurrent, + ) + elif backend == 'noisy': + if use_cuquantum: + raise ValueError('noisy backend does not currently support GPU') used_op = noisy_sampled_expectation_op.sampled_expectation else: used_op = circuit_execution_ops.get_sampled_expectation_op( @@ -267,25 +280,31 @@ def call(self, symbol_values=None, operators=None, repetitions=None, - initializer=tf.keras.initializers.RandomUniform(0, 2 * np.pi)): + initializer=None): """Keras call function. - Input options: - `inputs`, `symbol_names`, `symbol_values`: - see `input_checks.expand_circuits` - `operators`: see `input_checks.expand_operators` - `repetitions`: a Python `int` or a pre-converted - `tf.Tensor` containing a single `int` entry. - - Output shape: + Args: + inputs: See `input_checks.expand_circuits. + symbol_names: See `input_checks.expand_circuits. + symbol_values: See `input_checks.expand_circuits. + operators: See `input_checks.expand_operators` + repetitions: A Python `int` or a pre-converted `tf.Tensor` + containing a single `int` entry. + initializer: The keras initializer object for weights. + Defaults to uniform distribution [0..2*pi] + + Returns: `tf.Tensor` with shape [batch_size, n_ops] that holds the - expectation value for each circuit with each op applied to it - (after resolving the corresponding parameters in). + expectation value for each circuit with each op applied to it + (after resolving the corresponding parameters in). """ values_empty = False if symbol_values is None: values_empty = True + if initializer is None: + initializer = tf.keras.initializers.RandomUniform(0, 2 * np.pi) + inputs, symbol_names, symbol_values = input_checks.expand_circuits( inputs, symbol_names, symbol_values) diff --git a/tensorflow_quantum/python/layers/circuit_executors/sampled_expectation_test.py b/tensorflow_quantum/python/layers/circuit_executors/sampled_expectation_test.py index b027cfbe5..c13afbfd4 100644 --- a/tensorflow_quantum/python/layers/circuit_executors/sampled_expectation_test.py +++ b/tensorflow_quantum/python/layers/circuit_executors/sampled_expectation_test.py @@ -27,11 +27,14 @@ import tensorflow as tf import cirq +from tensorflow_quantum.core.ops import circuit_execution_ops from tensorflow_quantum.python.layers.circuit_executors import \ sampled_expectation from tensorflow_quantum.python.differentiators import linear_combination from tensorflow_quantum.python import util +RANDOM_SEED = 1234 + class CustomSampler(cirq.Sampler): """Wrapper for cirq.Simulator to confirm that custom samplers work.""" @@ -98,23 +101,40 @@ def simulate_sweep(self): @parameterized.parameters([ { - 'backend': 'noisy' + 'backend': 'noisy', + 'use_cuquantum': False, + }, + { + 'backend': 'noiseless', + 'use_cuquantum': False, }, { - 'backend': 'noiseless' + 'backend': 'noiseless', + 'use_cuquantum': True, }, { - 'backend': cirq.Simulator() + 'backend': cirq.Simulator(), + 'use_cuquantum': False, }, { - 'backend': CustomSampler() + 'backend': CustomSampler(), + 'use_cuquantum': False, }, { - 'backend': None # older API usage. + 'backend': None, # older API usage. + 'use_cuquantum': False, + }, + { + 'backend': None, + 'use_cuquantum': True, } ]) - def test_sampled_expectation_type_inputs_error(self, backend): + def test_sampled_expectation_type_inputs_error(self, backend, + use_cuquantum): """Test that SampledExpectation errors within Keras call.""" + if use_cuquantum and not circuit_execution_ops.is_gpu_configured(): + # GPU is not set. Ignores this sub-test. + self.skipTest("GPU is not set. Ignoring gpu tests...") bit = cirq.GridQubit(0, 0) symbol = sympy.Symbol('alpha') @@ -125,44 +145,67 @@ def test_sampled_expectation_type_inputs_error(self, backend): with self.assertRaisesRegex(RuntimeError, expected_regex="repetitions not provided"): - sampled_expectation.SampledExpectation(backend=backend)( - symb_circuit, - symbol_names=[symbol], - symbol_values=[[0.5]], - operators=test_psum) + sampled_expectation.SampledExpectation( + backend=backend, + use_cuquantum=use_cuquantum, + )(symb_circuit, + symbol_names=[symbol], + symbol_values=[[0.5]], + operators=test_psum) with self.assertRaisesRegex(Exception, expected_regex="Unknown initializer"): - sampled_expectation.SampledExpectation(backend=backend)( - reg_circuit, - operators=test_psum, - initializer='junk', - repetitions=1) + sampled_expectation.SampledExpectation( + backend=backend, + use_cuquantum=use_cuquantum, + )(reg_circuit, + operators=test_psum, + initializer='junk', + repetitions=1) with self.assertRaisesRegex(Exception, expected_regex="cannot be parsed"): - sampled_expectation.SampledExpectation(backend=backend)( - reg_circuit, operators=test_psum, repetitions='junk') + sampled_expectation.SampledExpectation( + backend=backend, + use_cuquantum=use_cuquantum, + )(reg_circuit, operators=test_psum, repetitions='junk') @parameterized.parameters([ { - 'backend': 'noisy' + 'backend': 'noisy', + 'use_cuquantum': False, + }, + { + 'backend': 'noiseless', + 'use_cuquantum': False, + }, + { + 'backend': 'noiseless', + 'use_cuquantum': True, }, { - 'backend': 'noiseless' + 'backend': cirq.Simulator(), + 'use_cuquantum': False, }, { - 'backend': cirq.Simulator() + 'backend': CustomSampler(), + 'use_cuquantum': False, }, { - 'backend': CustomSampler() + 'backend': None, # older API usage. + 'use_cuquantum': False, }, { - 'backend': None # older API usage. + 'backend': None, + 'use_cuquantum': True, } ]) - def test_sampled_expectation_op_error(self, backend): + def test_sampled_expectation_op_error(self, backend, use_cuquantum): """Test that expectation errors within underlying ops correctly.""" + if use_cuquantum and not circuit_execution_ops.is_gpu_configured(): + # GPU is not set. Ignores this sub-test. + self.skipTest("GPU is not set. Ignoring gpu tests...") + # Note the expected_regex is left blank here since there is a # discrepancy between the error strings provided between backends. bit = cirq.GridQubit(0, 0) @@ -174,73 +217,101 @@ def test_sampled_expectation_op_error(self, backend): with self.assertRaisesRegex(Exception, expected_regex="pauli"): # Operators has wrong rank. Parse error. - sampled_expectation.SampledExpectation(backend=backend)( - [reg_circuit], - operators=util.convert_to_tensor([test_psum]), - repetitions=1) + sampled_expectation.SampledExpectation( + backend=backend, + use_cuquantum=use_cuquantum, + )([reg_circuit], + operators=util.convert_to_tensor([test_psum]), + repetitions=1) with self.assertRaisesRegex(Exception, expected_regex="symbol_values"): # symbol_values has wrong rank. - sampled_expectation.SampledExpectation(backend=backend)( - [symb_circuit], - symbol_names=[symbol], - symbol_values=[0.5], - operators=test_psum, - repetitions=1) + sampled_expectation.SampledExpectation( + backend=backend, + use_cuquantum=use_cuquantum, + )([symb_circuit], + symbol_names=[symbol], + symbol_values=[0.5], + operators=test_psum, + repetitions=1) with self.assertRaisesRegex(Exception, expected_regex="pauli"): # Wrong batch size for pauli operators. - sampled_expectation.SampledExpectation(backend=backend)( - symb_circuit, - symbol_names=[symbol], - operators=[[test_psum], [test_psum]], - repetitions=1) + sampled_expectation.SampledExpectation( + backend=backend, + use_cuquantum=use_cuquantum, + )(symb_circuit, + symbol_names=[symbol], + operators=[[test_psum], [test_psum]], + repetitions=1) with self.assertRaisesRegex(Exception, expected_regex="pauli"): # Wrong batch size for pauli operators. - sampled_expectation.SampledExpectation(backend=backend)( - reg_circuit, - operators=[[test_psum], [test_psum]], - repetitions=1) + sampled_expectation.SampledExpectation( + backend=backend, + use_cuquantum=use_cuquantum, + )(reg_circuit, operators=[[test_psum], [test_psum]], repetitions=1) with self.assertRaisesRegex(Exception, expected_regex="0"): # Wrong repetitions. - sampled_expectation.SampledExpectation(backend=backend)( - reg_circuit, operators=test_psum, repetitions=-1) + sampled_expectation.SampledExpectation( + backend=backend, + use_cuquantum=use_cuquantum, + )(reg_circuit, operators=test_psum, repetitions=-1) with self.assertRaisesRegex(Exception, expected_regex=""): # Wrong second dimension size for repetitions & pauli operators. - sampled_expectation.SampledExpectation(backend=backend)( - reg_circuit, operators=test_psum, repetitions=[5, 4, 3]) + sampled_expectation.SampledExpectation( + backend=backend, + use_cuquantum=use_cuquantum, + )(reg_circuit, operators=test_psum, repetitions=[5, 4, 3]) with self.assertRaisesRegex(Exception, expected_regex=""): # Wrong batch_size for symbol values. - sampled_expectation.SampledExpectation(backend=backend)( - [reg_circuit], - symbol_names=[symbol], - symbol_values=np.zeros((3, 1)), - operators=test_psum, - repetitions=5) + sampled_expectation.SampledExpectation( + backend=backend, + use_cuquantum=use_cuquantum, + )([reg_circuit], + symbol_names=[symbol], + symbol_values=np.zeros((3, 1)), + operators=test_psum, + repetitions=5) @parameterized.parameters([ { - 'backend': 'noisy' + 'backend': 'noisy', + 'use_cuquantum': False, + }, + { + 'backend': 'noiseless', + 'use_cuquantum': False, }, { - 'backend': 'noiseless' + 'backend': 'noiseless', + 'use_cuquantum': True, }, { - 'backend': cirq.Simulator() + 'backend': cirq.Simulator(), + 'use_cuquantum': False, }, { - 'backend': CustomSampler() + 'backend': CustomSampler(), + 'use_cuquantum': False, }, { - 'backend': None # older API usage. + 'backend': None, # older API usage. + 'use_cuquantum': False, + }, + { + 'backend': None, + 'use_cuquantum': True, } ]) - def test_static_cases(self, backend): + def test_static_cases(self, backend, use_cuquantum): """Run inputs through in complex cases.""" + if use_cuquantum and not circuit_execution_ops.is_gpu_configured(): + # GPU is not set. Ignores this sub-test. + self.skipTest("GPU is not set. Ignoring gpu tests...") bit = cirq.GridQubit(0, 0) symbol = sympy.Symbol('alpha') @@ -250,72 +321,102 @@ def test_static_cases(self, backend): reg_circuit = cirq.Circuit(cirq.H(bit)) # Passing a 2d operators input requires a 1d circuit input. - sampled_expectation.SampledExpectation(backend=backend)( - [reg_circuit, reg_circuit], - operators=[[test_psum, test_psum], [test_psum, test_psum]], - repetitions=1) + sampled_expectation.SampledExpectation( + backend=backend, + use_cuquantum=use_cuquantum, + )([reg_circuit, reg_circuit], + operators=[[test_psum, test_psum], [test_psum, test_psum]], + repetitions=1) # Passing 2d operators along with other inputs. - sampled_expectation.SampledExpectation(backend=backend)( - [symb_circuit, symb_circuit], - symbol_names=[symbol], - operators=[[test_psum, test_psum], [test_psum, test_psum]], - repetitions=1) - sampled_expectation.SampledExpectation(backend=backend)( - [symb_circuit, symb_circuit], - symbol_names=[symbol], - symbol_values=[[0.5], [0.8]], - operators=[[test_psum, test_psum], [test_psum, test_psum]], - repetitions=1) + sampled_expectation.SampledExpectation( + backend=backend, + use_cuquantum=use_cuquantum, + )([symb_circuit, symb_circuit], + symbol_names=[symbol], + operators=[[test_psum, test_psum], [test_psum, test_psum]], + repetitions=1) + sampled_expectation.SampledExpectation( + backend=backend, + use_cuquantum=use_cuquantum, + )([symb_circuit, symb_circuit], + symbol_names=[symbol], + symbol_values=[[0.5], [0.8]], + operators=[[test_psum, test_psum], [test_psum, test_psum]], + repetitions=1) # Ensure tiling up of circuits works as expected. - sampled_expectation.SampledExpectation(backend=backend)( - reg_circuit, operators=test_psum, repetitions=1) - sampled_expectation.SampledExpectation(backend=backend)( - reg_circuit, operators=[test_psum, test_psum], repetitions=1) + sampled_expectation.SampledExpectation( + backend=backend, + use_cuquantum=use_cuquantum, + )(reg_circuit, operators=test_psum, repetitions=1) + sampled_expectation.SampledExpectation( + backend=backend, + use_cuquantum=use_cuquantum, + )(reg_circuit, operators=[test_psum, test_psum], repetitions=1) # Ensure tiling up of symbol_values works as expected. - sampled_expectation.SampledExpectation(backend=backend)( - symb_circuit, - symbol_names=[symbol], - symbol_values=[[0.5], [0.8]], - operators=test_psum, - repetitions=1) - sampled_expectation.SampledExpectation(backend=backend)( - symb_circuit, - symbol_names=[symbol], - symbol_values=[[0.5]], - operators=test_psum, - repetitions=1) + sampled_expectation.SampledExpectation( + backend=backend, + use_cuquantum=use_cuquantum, + )(symb_circuit, + symbol_names=[symbol], + symbol_values=[[0.5], [0.8]], + operators=test_psum, + repetitions=1) + sampled_expectation.SampledExpectation( + backend=backend, + use_cuquantum=use_cuquantum, + )(symb_circuit, + symbol_names=[symbol], + symbol_values=[[0.5]], + operators=test_psum, + repetitions=1) # Test multiple operators with integer valued repetition. - sampled_expectation.SampledExpectation(backend=backend)( - symb_circuit, - symbol_names=[symbol], - symbol_values=[[0.5]], - operators=[-1.0 * cirq.Z(bit), - cirq.X(bit) + 2.0 * cirq.Z(bit)], - repetitions=1) - sampled_expectation.SampledExpectation(backend=backend)( - symb_circuit, - symbol_names=[symbol], - symbol_values=[[0.5]], - operators=[-1.0 * cirq.Z(bit), - cirq.X(bit) + 2.0 * cirq.Z(bit)], - repetitions=[5, 1]) - - def test_sampled_expectation_simple_tf_train(self): + sampled_expectation.SampledExpectation( + backend=backend, + use_cuquantum=use_cuquantum, + )(symb_circuit, + symbol_names=[symbol], + symbol_values=[[0.5]], + operators=[-1.0 * cirq.Z(bit), + cirq.X(bit) + 2.0 * cirq.Z(bit)], + repetitions=1) + sampled_expectation.SampledExpectation( + backend=backend, + use_cuquantum=use_cuquantum, + )(symb_circuit, + symbol_names=[symbol], + symbol_values=[[0.5]], + operators=[-1.0 * cirq.Z(bit), + cirq.X(bit) + 2.0 * cirq.Z(bit)], + repetitions=[5, 1]) + + @parameterized.parameters([{ + 'use_cuquantum': False, + }, { + 'use_cuquantum': True, + }]) + def test_sampled_expectation_simple_tf_train(self, use_cuquantum): """Train a layer using standard tf (not keras).""" + if use_cuquantum and not circuit_execution_ops.is_gpu_configured(): + # GPU is not set. Ignores this sub-test. + self.skipTest("GPU is not set. Ignoring gpu tests...") + tf.random.set_seed(RANDOM_SEED) + initializer = tf.keras.initializers.RandomUniform(0, 2 * np.pi) bit = cirq.GridQubit(0, 0) circuit = cirq.Circuit(cirq.rx(sympy.Symbol('theta'))(bit)) - layer = sampled_expectation.SampledExpectation() + layer = sampled_expectation.SampledExpectation( + use_cuquantum=use_cuquantum) optimizer = tf.optimizers.Adam(learning_rate=0.05) - for _ in range(10): + for _ in range(20): with tf.GradientTape() as tape: circuit_out = layer(circuit, symbol_names=['theta'], operators=cirq.Z(bit), - repetitions=100) + repetitions=1000, + initializer=initializer) mse = tf.square(tf.reduce_sum(tf.subtract(circuit_out, -1))) grads = tape.gradient(mse, layer.trainable_weights) optimizer.apply_gradients(zip(grads, layer.trainable_weights)) @@ -326,13 +427,27 @@ class SampledExpectationFunctionalTests(parameterized.TestCase, tf.test.TestCase): """Test hybrid/integrated models that include a SampledExpectation layer.""" - @parameterized.parameters([{'backend': 'noisy'}, {'backend': 'noiseless'}]) - def test_simple_param_value_input(self, backend): + @parameterized.parameters([{ + 'backend': 'noisy', + 'use_cuquantum': False, + }, { + 'backend': 'noiseless', + 'use_cuquantum': False, + }, { + 'backend': 'noiseless', + 'use_cuquantum': True, + }]) + def test_simple_param_value_input(self, backend, use_cuquantum): """Train a densely connected hybrid model. This model will put a qubit in the zero or one state from a random state given the input zero or one. """ + if use_cuquantum and not circuit_execution_ops.is_gpu_configured(): + # GPU is not set. Ignores this sub-test. + self.skipTest("GPU is not set. Ignoring gpu tests...") + tf.random.set_seed(RANDOM_SEED) + initializer = tf.keras.initializers.RandomUniform(0, 2 * np.pi) bit = cirq.GridQubit(0, 0) symbols = sympy.symbols('x y z') circuit = _gen_single_bit_rotation_problem( @@ -342,12 +457,15 @@ def test_simple_param_value_input(self, backend): datum = tf.keras.Input(shape=(), dtype=tf.dtypes.string) l1 = tf.keras.layers.Dense(10)(inputs) l2 = tf.keras.layers.Dense(3)(l1) - outputs = sampled_expectation.SampledExpectation(backend=backend)( - datum, - symbol_names=symbols, - operators=cirq.Z(bit), - symbol_values=l2, - repetitions=5000) + outputs = sampled_expectation.SampledExpectation( + backend=backend, + use_cuquantum=use_cuquantum, + )(datum, + symbol_names=symbols, + operators=cirq.Z(bit), + symbol_values=l2, + repetitions=5000, + initializer=initializer) model = tf.keras.Model(inputs=[datum, inputs], outputs=outputs) data_in = np.array([[1], [0]], dtype=np.float32) @@ -361,12 +479,26 @@ def test_simple_param_value_input(self, backend): history = model.fit(x=[circuits, data_in], y=data_out, epochs=30) self.assertAllClose(history.history['loss'][-1], 0, atol=0.3) - @parameterized.parameters([{'backend': 'noisy'}, {'backend': 'noiseless'}]) - def test_simple_op_input(self, backend): + @parameterized.parameters([{ + 'backend': 'noisy', + 'use_cuquantum': False, + }, { + 'backend': 'noiseless', + 'use_cuquantum': False, + }, { + 'backend': 'noiseless', + 'use_cuquantum': True, + }]) + def test_simple_op_input(self, backend, use_cuquantum): """Test a simple operator input Learn qubit in the z+ state using two different measurement operators. """ + if use_cuquantum and not circuit_execution_ops.is_gpu_configured(): + # GPU is not set. Ignores this sub-test. + self.skipTest("GPU is not set. Ignoring gpu tests...") + tf.random.set_seed(RANDOM_SEED) + initializer = tf.keras.initializers.RandomUniform(0, 2 * np.pi) bit = cirq.GridQubit(0, 0) symbols = sympy.symbols('x y z') ops = util.convert_to_tensor([[cirq.Z(bit)], [cirq.Z(bit)]]) @@ -382,10 +514,13 @@ def test_simple_op_input(self, backend): n_inp = tf.keras.Input(shape=(1,), dtype=tf.dtypes.int32) circuit_inp = tf.keras.Input(shape=(), dtype=tf.dtypes.string) circuit_output = sampled_expectation.SampledExpectation( - backend=backend)(circuit_inp, - symbol_names=symbols, - operators=op_inp, - repetitions=n_inp) + backend=backend, + use_cuquantum=use_cuquantum, + )(circuit_inp, + symbol_names=symbols, + operators=op_inp, + repetitions=n_inp, + initializer=initializer) model = tf.keras.Model(inputs=[circuit_inp, op_inp, n_inp], outputs=[circuit_output]) @@ -400,13 +535,27 @@ def test_simple_op_input(self, backend): self.assertAllClose(history.history['loss'][-1], 0, atol=1e-2) - @parameterized.parameters([{'backend': 'noisy'}, {'backend': 'noiseless'}]) - def test_simple_op_and_param_input(self, backend): + @parameterized.parameters([{ + 'backend': 'noisy', + 'use_cuquantum': False, + }, { + 'backend': 'noiseless', + 'use_cuquantum': False, + }, { + 'backend': 'noiseless', + 'use_cuquantum': True, + }]) + def test_simple_op_and_param_input(self, backend, use_cuquantum): """Test a simple operator and parameter input. Train a NN to put a qubit in the z+ or x+ states based on a classical binary input. """ + if use_cuquantum and not circuit_execution_ops.is_gpu_configured(): + # GPU is not set. Ignores this sub-test. + self.skipTest("GPU is not set. Ignoring gpu tests...") + tf.random.set_seed(RANDOM_SEED) + initializer = tf.keras.initializers.RandomUniform(0, 2 * np.pi) bit = cirq.GridQubit(0, 0) symbols = sympy.symbols('x y z') ops = util.convert_to_tensor([[cirq.Z(bit)], [cirq.Z(bit)]]) @@ -425,11 +574,14 @@ def test_simple_op_and_param_input(self, backend): dense_1 = tf.keras.layers.Dense(10)(data_inp) dense_2 = tf.keras.layers.Dense(3)(dense_1) circuit_output = sampled_expectation.SampledExpectation( - backend=backend)(circuit_inp, - symbol_names=symbols, - symbol_values=dense_2, - operators=op_inp, - repetitions=n_inp) + backend=backend, + use_cuquantum=use_cuquantum, + )(circuit_inp, + symbol_names=symbols, + symbol_values=dense_2, + operators=op_inp, + repetitions=n_inp, + initializer=initializer) functional_model = tf.keras.Model( inputs=[circuit_inp, data_inp, op_inp, n_inp], @@ -444,13 +596,27 @@ def test_simple_op_and_param_input(self, backend): epochs=20) self.assertAllClose(history.history['loss'][-1], 0, atol=3) - @parameterized.parameters([{'backend': 'noisy'}, {'backend': 'noiseless'}]) - def test_dnn_qnn_dnn(self, backend): + @parameterized.parameters([{ + 'backend': 'noisy', + 'use_cuquantum': False, + }, { + 'backend': 'noiseless', + 'use_cuquantum': False, + }, { + 'backend': 'noiseless', + 'use_cuquantum': True, + }]) + def test_dnn_qnn_dnn(self, backend, use_cuquantum): """Train a fully hybrid network using an SampledExpectation layer. Train the network to output +-5 given an input of 1 or 0. This tests that everything works when SampledExpectation layer is a middle layers. """ + if use_cuquantum and not circuit_execution_ops.is_gpu_configured(): + # GPU is not set. Ignores this sub-test. + self.skipTest("GPU is not set. Ignoring gpu tests...") + tf.random.set_seed(RANDOM_SEED) + initializer = tf.keras.initializers.RandomUniform(0, 2 * np.pi) bit = cirq.GridQubit(0, 0) symbols = sympy.symbols('x, y, z') circuits = util.convert_to_tensor([ @@ -464,12 +630,15 @@ def test_dnn_qnn_dnn(self, backend): circuit_input = tf.keras.Input(shape=(), dtype=tf.dtypes.string) d1 = tf.keras.layers.Dense(10)(classical_input) d2 = tf.keras.layers.Dense(3)(d1) - quantum = sampled_expectation.SampledExpectation(backend=backend)( - circuit_input, - symbol_names=symbols, - symbol_values=d2, - operators=cirq.Z(bit), - repetitions=5000) + quantum = sampled_expectation.SampledExpectation( + backend=backend, + use_cuquantum=use_cuquantum, + )(circuit_input, + symbol_names=symbols, + symbol_values=d2, + operators=cirq.Z(bit), + repetitions=5000, + initializer=initializer) d3 = tf.keras.layers.Dense(1)(quantum) model = tf.keras.Model(inputs=[circuit_input, classical_input], diff --git a/tensorflow_quantum/python/layers/circuit_executors/state.py b/tensorflow_quantum/python/layers/circuit_executors/state.py index f2b213ee1..456a83463 100644 --- a/tensorflow_quantum/python/layers/circuit_executors/state.py +++ b/tensorflow_quantum/python/layers/circuit_executors/state.py @@ -16,6 +16,7 @@ import tensorflow as tf from tensorflow_quantum.core.ops import circuit_execution_ops +from tensorflow_quantum.python import quantum_context from tensorflow_quantum.python.layers.circuit_executors import input_checks @@ -112,7 +113,7 @@ class State(tf.keras.layers.Layer): """ - def __init__(self, backend=None, **kwargs): + def __init__(self, backend=None, use_cuquantum=False, **kwargs): """Instantiate a State Layer. Create a layer that will simulate a quantum state and output it into @@ -126,18 +127,35 @@ def __init__(self, backend=None, **kwargs): `cirq.SimulatesFinalState`. Note that C++ Density Matrix simulation is not yet supported so to do Density Matrix simulation please use `cirq.DensityMatrixSimulator`. + use_cuquantum: Calls TFQ GPU version op. """ super().__init__(**kwargs) - self.state_op = circuit_execution_ops.get_state_op(backend) + + used_op = None + if backend == 'noiseless' or backend is None: + mode = quantum_context.get_quantum_concurrent_op_mode() + quantum_concurrent = False if use_cuquantum else mode + used_op = circuit_execution_ops.get_state_op( + backend=None, + use_cuquantum=use_cuquantum, + quantum_concurrent=quantum_concurrent, + ) + elif backend == 'noisy': + raise ValueError('noisy backend is not supported in State layer.') + else: + used_op = circuit_execution_ops.get_state_op(backend=backend) + + self.state_op = used_op def call(self, inputs, *, symbol_names=None, symbol_values=None): """Keras call function. - Input options: - `inputs`, `symbol_names`, `symbol_values`: - see `input_checks.expand_circuits` + Args: + inputs: See `input_checks.expand_circuits. + symbol_names: See `input_checks.expand_circuits. + symbol_values: See `input_checks.expand_circuits. - Output shape: + Returns: `tf.RaggedTensor` with shape: [batch size of symbol_values, ] or diff --git a/tensorflow_quantum/python/layers/circuit_executors/state_test.py b/tensorflow_quantum/python/layers/circuit_executors/state_test.py index 8904014b7..21286cfb6 100644 --- a/tensorflow_quantum/python/layers/circuit_executors/state_test.py +++ b/tensorflow_quantum/python/layers/circuit_executors/state_test.py @@ -27,6 +27,7 @@ import tensorflow as tf import cirq +from tensorflow_quantum.core.ops import circuit_execution_ops from tensorflow_quantum.python.layers.circuit_executors import state from tensorflow_quantum.python import util @@ -46,15 +47,24 @@ def test_state_create(self): state.State('junk') @parameterized.parameters([{ - 'backend': None + 'backend': None, + 'use_cuquantum': False, }, { - 'backend': cirq.Simulator() + 'backend': None, + 'use_cuquantum': True, }, { - 'backend': cirq.DensityMatrixSimulator() + 'backend': cirq.Simulator(), + 'use_cuquantum': False, + }, { + 'backend': cirq.DensityMatrixSimulator(), + 'use_cuquantum': False, }]) - def test_state_invalid_combinations(self, backend): + def test_state_invalid_combinations(self, backend, use_cuquantum): """Test with valid type inputs and valid value, but incorrect combo.""" - state_calc = state.State(backend) + if use_cuquantum and not circuit_execution_ops.is_gpu_configured(): + # GPU is not set. Ignores this sub-test. + self.skipTest("GPU is not set. Ignoring gpu tests...") + state_calc = state.State(backend, use_cuquantum=use_cuquantum) symbol = sympy.Symbol('alpha') circuit = cirq.Circuit(cirq.H(cirq.GridQubit(0, 0))**symbol) with self.assertRaisesRegex(Exception, expected_regex=""): @@ -110,18 +120,26 @@ def test_sample_outputs_simple(self): @parameterized.parameters([ { - 'backend_output': (None, WF_OUTPUT) + 'backend_output': (None, WF_OUTPUT), + 'use_cuquantum': False, + }, + { + 'backend_output': (None, WF_OUTPUT), + 'use_cuquantum': True, }, { - 'backend_output': (cirq.sim.sparse_simulator.Simulator(), WF_OUTPUT) + 'backend_output': + (cirq.sim.sparse_simulator.Simulator(), WF_OUTPUT), + 'use_cuquantum': False, }, { 'backend_output': (cirq.sim.density_matrix_simulator.DensityMatrixSimulator(), - DM_OUTPUT) + DM_OUTPUT), + 'use_cuquantum': False, }, ]) - def test_state_output(self, backend_output): + def test_state_output(self, backend_output, use_cuquantum): """Check that any output type is as expected. This layer only allows for 2 different outputs, depending on whether a @@ -129,9 +147,15 @@ def test_state_output(self, backend_output): post processing done inside the layers should not cause output from the layer to structurally deviate from what is expected. """ + if use_cuquantum and not circuit_execution_ops.is_gpu_configured(): + # GPU is not set. Ignores this sub-test. + self.skipTest("GPU is not set. Ignoring gpu tests...") backend = backend_output[0] output = backend_output[1] - state_executor = state.State(backend=backend) + state_executor = state.State( + backend=backend, + use_cuquantum=use_cuquantum, + ) bits = cirq.GridQubit.rect(1, 2) circuit = cirq.Circuit() circuit.append(cirq.H.on(bits[0]))