Skip to content

Commit

Permalink
refactor
Browse files Browse the repository at this point in the history
  • Loading branch information
natestemen committed Aug 9, 2024
1 parent fb2ce62 commit df71a39
Show file tree
Hide file tree
Showing 2 changed files with 28 additions and 30 deletions.
35 changes: 14 additions & 21 deletions mitiq/rem/inverse_confusion_matrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,38 +14,31 @@


def sample_probability_vector(
probability_vector: npt.NDArray[np.float64], samples: int
) -> List[Bitstring]:
probability_vector: Sequence[float], samples: int
) -> list[str]:
"""Generate a number of samples from a probability distribution as
bitstrings.
Args:
probability_vector: A probability vector.
samples: The number of samples to generate.
Returns:
A list of sampled bitstrings.
"""
# sample using the probability distribution given
num_values = len(probability_vector)
choices = np.random.choice(num_values, size=samples, p=probability_vector)

# convert samples to binary strings
bit_width = int(np.log2(num_values))
binary_repr_vec = np.vectorize(np.binary_repr)
binary_strings = binary_repr_vec(choices, width=bit_width)

# split the binary strings into an array of ints
bitstrings = (
np.apply_along_axis( # type: ignore
func1d=np.fromstring, # type: ignore
axis=1,
arr=binary_strings[:, None],
dtype="U1", # type: ignore
if not np.log2(num_values).is_integer():
raise ValueError(
"The length of the probability vector must be a power of 2."
)
.astype(np.uint8)
.tolist()

sampled_indices = np.random.choice(
num_values, size=samples, p=probability_vector
)

bit_width = int(np.log2(num_values))
bitstrings = [format(index, f"0{bit_width}b") for index in sampled_indices]

return bitstrings


Expand Down Expand Up @@ -132,7 +125,7 @@ def generate_tensored_inverse_confusion_matrix(

def closest_positive_distribution(
quasi_probabilities: npt.NDArray[np.float64],
) -> npt.NDArray[np.float64]:
) -> List[float]:
"""Given the input quasi-probability distribution returns the closest
positive probability distribution (with respect to the total variation
distance).
Expand Down Expand Up @@ -163,7 +156,7 @@ def distance(probabilities: npt.NDArray[np.float64]) -> np.float64:
raise ValueError(
"REM failed to determine the closest positive distribution."
)
return result.x
return result.x.tolist()


def mitigate_measurements(
Expand Down
23 changes: 14 additions & 9 deletions mitiq/rem/tests/test_inverse_confusion_matrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,30 +22,35 @@
)


def test_sample_probability_vector_invalid_size():
with pytest.raises(ValueError, match="power of 2"):
sample_probability_vector([1 / 3, 1 / 3, 1 / 3], 3)


def test_sample_probability_vector_single_qubit():
bitstrings = sample_probability_vector(np.array([1, 0]), 10)
assert all([b == [0] for b in bitstrings])
assert all(b == "0" for b in bitstrings)

bitstrings = sample_probability_vector(np.array([0, 1]), 10)
assert all([b == [1] for b in bitstrings])
assert all(b == "1" for b in bitstrings)

np.random.seed(0)
bitstrings = sample_probability_vector(np.array([0.5, 0.5]), 1000)
assert sum(b[0] for b in bitstrings) == 483
assert sum(int(b) for b in bitstrings) == 483


def test_sample_probability_vector_two_qubits():
bitstrings = sample_probability_vector(np.array([1, 0, 0, 0]), 10)
assert all([b == [0, 0] for b in bitstrings])
assert all(b == "00" for b in bitstrings)

bitstrings = sample_probability_vector(np.array([0, 1, 0, 0]), 10)
assert all([b == [0, 1] for b in bitstrings])
assert all(b == "01" for b in bitstrings)

bitstrings = sample_probability_vector(np.array([0, 0, 1, 0]), 10)
assert all([b == [1, 0] for b in bitstrings])
assert all(b == "10" for b in bitstrings)

bitstrings = sample_probability_vector(np.array([0, 0, 0, 1]), 10)
assert all([b == [1, 1] for b in bitstrings])
assert all(b == "11" for b in bitstrings)


def test_bitstrings_to_probability_vector():
Expand Down Expand Up @@ -138,12 +143,12 @@ def test_generate_tensored_inverse_confusion_matrix(
num_qubits, confusion_matrices
)
else:
assert np.isclose(
assert np.allclose(
generate_tensored_inverse_confusion_matrix(
num_qubits, confusion_matrices
),
expected,
).all()
)


def test_mitigate_measurements():
Expand Down

0 comments on commit df71a39

Please sign in to comment.