Skip to content

Commit

Permalink
Revert "added new cost functions"
Browse files Browse the repository at this point in the history
This reverts commit 08f8977.
  • Loading branch information
wrightjandrew committed Mar 16, 2024
1 parent bb121d2 commit 3be5e43
Show file tree
Hide file tree
Showing 3 changed files with 5 additions and 78 deletions.
30 changes: 2 additions & 28 deletions src/qibo/models/dbi/double_bracket.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,15 +35,6 @@ class DoubleBracketScheduling(Enum):
polynomial_approximation = polynomial_step
"""Use polynomial expansion (analytical) of the loss function."""

class DoubleBracketCostFunction(Enum):
"""Define the DBI cost function."""

off_diagonal_norm = auto()
"""Use off-diagonal norm as cost function."""
least_squares = auto()
"""Use least squares as cost function."""
energy_fluctuation = auto()
"""Use energy fluctuation as cost function."""

class DoubleBracketIteration:
"""
Expand Down Expand Up @@ -74,15 +65,11 @@ def __init__(
hamiltonian: Hamiltonian,
mode: DoubleBracketGeneratorType = DoubleBracketGeneratorType.canonical,
scheduling: DoubleBracketScheduling = DoubleBracketScheduling.grid_search,
cost: DoubleBracketCostFunction = DoubleBracketCostFunction.off_diagonal_norm,
state: int = 0,
):
self.h = hamiltonian
self.h0 = deepcopy(self.h)
self.mode = mode
self.scheduling = scheduling
self.cost = cost
self.state = state

def __call__(
self, step: float, mode: DoubleBracketGeneratorType = None, d: np.array = None
Expand Down Expand Up @@ -139,14 +126,6 @@ def off_diagonal_norm(self):
return np.sqrt(
np.real(np.trace(self.backend.to_numpy(off_diag_h_dag @ self.off_diag_h)))
)
@property
def least_squares(self,d: np.array):
"""Least squares cost function."""
H = self.backend.cast(
np.matrix(self.backend.to_numpy(self.h)).getH()
)
D = d
return -(np.linalg.trace(H@D)-0.5(np.linalg.norm(H)**2+np.linalg.norm(D)**2))

@property
def backend(self):
Expand Down Expand Up @@ -187,13 +166,8 @@ def loss(self, step: float, d: np.array = None, look_ahead: int = 1):
for _ in range(look_ahead):
self.__call__(mode=self.mode, step=step, d=d)

# loss values depending on the cost function
if self.cost == DoubleBracketCostFunction.off_diagonal_norm:
loss = self.off_diagonal_norm
elif self.cost == DoubleBracketCostFunction.least_squares:
loss = self.least_squares(d=d)
else:
loss = self.energy_fluctuation(self.state)
# off_diagonal_norm's value after the steps
loss = self.off_diagonal_norm

# set back the initial configuration
self.h = h_copy
Expand Down
1 change: 0 additions & 1 deletion src/qibo/models/dbi/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,4 +174,3 @@ def off_diagonal_norm_polynomial_expansion_coef(dbi_object, d, n):
# coefficients from high to low (n:0)
coef = list(reversed(trace_coefficients[: n + 1]))
return coef

52 changes: 3 additions & 49 deletions src/qibo/models/dbi/utils_scheduling.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,15 +7,6 @@

error = 1e-3

def variance(A, state):
"""Calculates the variance of a matrix A with respect to a state: Var($A$) = $\langle\mu|A^2|\mu\rangle-\langle\mu|A|\mu\rangle^2$"""
B = A@A
return B[state,state]-A[state,state]**2

def covariance(A, B, state):
"""Calculates the covariance of two matrices A and B with respect to a state: Cov($A,B$) = $\langle\mu|AB|\mu\rangle-\langle\mu|A|\mu\rangle\langle\mu|B|\mu\rangle$"""
C = A@B
return C[state,state]-A[state,state]*B[state,state]

def grid_search_step(
dbi_object,
Expand Down Expand Up @@ -73,7 +64,7 @@ def hyperopt_step(
d: diagonal operator for generating double-bracket iterations.
Returns:
(float): optimized best iteration step (minimizing loss function).
(float): optimized best iteration step (minimizing off-diagonal norm).
"""
if space is None:
space = hyperopt.hp.uniform
Expand All @@ -99,7 +90,6 @@ def polynomial_step(
n_max: int = 5,
d: np.array = None,
coef: Optional[list] = None,
cost: str = None,
):
r"""
Optimizes iteration step by solving the n_th order polynomial expansion of the loss function.
Expand All @@ -110,8 +100,7 @@ def polynomial_step(
d (np.array, optional): diagonal operator, default as $\delta(H)$.
backup_scheduling (`DoubleBracketScheduling`): the scheduling method to use in case no real positive roots are found.
"""
if cost is None:
cost = dbi_object.cost

if d is None:
d = dbi_object.diagonal_h_matrix

Expand All @@ -120,15 +109,7 @@ def polynomial_step(
"No solution can be found with polynomial approximation. Increase `n_max` or use other scheduling methods."
)
if coef is None:
if cost == "off_diagonal_norm":
coef = off_diagonal_norm_polynomial_expansion_coef(dbi_object, d, n)
elif cost == "least_squares":
coef = least_squares_polynomial_expansion_coef(dbi_object, d, n)
elif cost == "energy_fluctuation":
coef = energy_fluctuation_polynomial_expansion_coef(dbi_object, d, n, dbi_object.state)
else:
raise ValueError(f"Cost function {cost} not recognized.")

coef = off_diagonal_norm_polynomial_expansion_coef(dbi_object, d, n)
roots = np.roots(coef)
real_positive_roots = [
np.real(root) for root in roots if np.imag(root) < error and np.real(root) > 0
Expand Down Expand Up @@ -162,30 +143,3 @@ def off_diagonal_norm_polynomial_expansion_coef(dbi_object, d, n):
# coefficients from high to low (n:0)
coef = list(reversed(trace_coefficients[: n + 1]))
return coef

def least_squares_polynomial_expansion_coef(dbi_object, d, n):
if d is None:
d = dbi_object.diagonal_h_matrix
# generate Gamma's where $\Gamma_{k+1}=[W, \Gamma_{k}], $\Gamma_0=H
W = dbi_object.commutator(d, dbi_object.sigma(dbi_object.h.matrix))
Gamma_list = dbi_object.generate_Gamma_list(n, d)
exp_list = np.array([1 / math.factorial(k) for k in range(n + 1)])
# coefficients
coef = np.empty(n)
for i in range(n):
coef[i] = exp_list[i]*np.trace(d@Gamma_list[i+1])

return coef

#TODO: add a general expansion formula not stopping at 3rd order
def energy_fluctuation_polynomial_expansion_coef(dbi_object, d, n, state):
if d is None:
d = dbi_object.diagonal_h_matrix
# generate Gamma's where $\Gamma_{k+1}=[W, \Gamma_{k}], $\Gamma_0=H
Gamma_list = dbi_object.generate_Gamma_list(n, d)
# coefficients
coef = np.empty(3)
coef[0] = 2*covariance(Gamma_list[0], Gamma_list[1],state)
coef[1] = 2*variance(Gamma_list[1],state)
coef[2] = covariance(Gamma_list[0], Gamma_list[3],state)+3*covariance(Gamma_list[1], Gamma_list[2],state)
return coef

0 comments on commit 3be5e43

Please sign in to comment.