Skip to content

Commit

Permalink
Merge branch 'develop' into fix-default-imports
Browse files Browse the repository at this point in the history
  • Loading branch information
agriyakhetarpal authored Nov 14, 2023
2 parents 2213407 + bfddc83 commit d685c38
Show file tree
Hide file tree
Showing 14 changed files with 110 additions and 113 deletions.
4 changes: 3 additions & 1 deletion .github/release_workflow.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# Release workflow

This file contains the workflow required to make a `PyBaMM` release on GitHub and PyPI by the maintainers.
This file contains the workflow required to make a `PyBaMM` release on GitHub, PyPI, and conda-forge by the maintainers.

## rc0 releases (automated)

Expand Down Expand Up @@ -77,3 +77,5 @@ Some other essential things to check throughout the release process -
git tag -f <tag_name> <commit_hash>
git push -f <pybamm-team/PyBaMM_remote_name> <tag_name> # can only be carried out by the maintainers
```
- If changes are made to the API, console scripts, entry points, new optional dependencies are added, support for major Python versions is dropped or added, or core project information and metadata are modified at the time of the release, make sure to update the `meta.yaml` file in the `recipe/` folder of the [conda-forge/pybamm-feedstock](https://github.com/conda-forge/pybamm-feedstock) repository accordingly by following the instructions in the [conda-forge documentation](https://conda-forge.org/docs/maintainer/updating_pkgs.html#updating-the-feedstock-repository) and re-rendering the recipe
- The conda-forge release workflow will automatically be triggered following a stable PyPI release, and the aforementioned updates should be carried out directly in the main repository by pushing changes to the automated PR created by the conda-forge-bot. A manual PR can also be created if the updates are not included in the automated PR for some reason. This manual PR **must** bump the build number in `meta.yaml` and **must** be from a personal fork of the repository.
12 changes: 6 additions & 6 deletions .github/workflows/test_on_push.yml
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ jobs:

# Install and cache apt packages
- name: Install Linux system dependencies
uses: awalsh128/[email protected].0
uses: awalsh128/[email protected].1
if: matrix.os == 'ubuntu-latest'
with:
packages: gfortran gcc graphviz pandoc
Expand Down Expand Up @@ -130,7 +130,7 @@ jobs:

# Install and cache apt packages
- name: Install Linux system dependencies
uses: awalsh128/[email protected].0
uses: awalsh128/[email protected].1
with:
packages: gfortran gcc graphviz pandoc
execute_install_scripts: true
Expand Down Expand Up @@ -193,7 +193,7 @@ jobs:

# Install and cache apt packages
- name: Install Linux system dependencies
uses: awalsh128/[email protected].0
uses: awalsh128/[email protected].1
if: matrix.os == 'ubuntu-latest'
with:
packages: gfortran gcc graphviz pandoc
Expand Down Expand Up @@ -274,7 +274,7 @@ jobs:

# Install and cache apt packages
- name: Install Linux system dependencies
uses: awalsh128/[email protected].0
uses: awalsh128/[email protected].1
with:
packages: gfortran gcc graphviz pandoc
execute_install_scripts: true
Expand Down Expand Up @@ -319,7 +319,7 @@ jobs:

# Install and cache apt packages
- name: Install Linux system dependencies
uses: awalsh128/[email protected].0
uses: awalsh128/[email protected].1
with:
packages: gfortran gcc graphviz pandoc
execute_install_scripts: true
Expand Down Expand Up @@ -377,7 +377,7 @@ jobs:

# Install and cache apt packages
- name: Install Linux system dependencies
uses: awalsh128/[email protected].0
uses: awalsh128/[email protected].1
with:
packages: gfortran gcc graphviz
execute_install_scripts: true
Expand Down
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ ci:

repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: "v0.1.3"
rev: "v0.1.4"
hooks:
- id: ruff
args: [--fix, --show-fixes]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@
"%pip install \"pybamm[plot,cite]\" -q # install PyBaMM if it is not installed\n",
"import pybamm\n",
"import os\n",
"import matplotlib.pyplot as plt\n",
"os.chdir(pybamm.__path__[0]+'/..')"
]
},
Expand Down
12 changes: 8 additions & 4 deletions docs/source/examples/notebooks/models/pouch-cell-model.ipynb

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions pybamm/expression_tree/binary_operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,12 @@
def _preprocess_binary(left, right):
if isinstance(left, numbers.Number):
left = pybamm.Scalar(left)
if isinstance(right, numbers.Number):
right = pybamm.Scalar(right)
elif isinstance(left, np.ndarray):
if left.ndim > 1:
raise ValueError("left must be a 1D array")
left = pybamm.Vector(left)
if isinstance(right, numbers.Number):
right = pybamm.Scalar(right)
elif isinstance(right, np.ndarray):
if right.ndim > 1:
raise ValueError("right must be a 1D array")
Expand Down
6 changes: 4 additions & 2 deletions pybamm/expression_tree/broadcasts.py
Original file line number Diff line number Diff line change
Expand Up @@ -546,8 +546,10 @@ def full_like(symbols, fill_value):
return array_type(entries, domains=sum_symbol.domains)

except NotImplementedError:
if sum_symbol.shape_for_testing == (1, 1) or sum_symbol.shape_for_testing == (
1,
if (
sum_symbol.shape_for_testing == (1, 1)
or sum_symbol.shape_for_testing == (1,)
or sum_symbol.domain == []
):
return pybamm.Scalar(fill_value)
if sum_symbol.evaluates_on_edges("primary"):
Expand Down
55 changes: 26 additions & 29 deletions pybamm/models/full_battery_models/lithium_ion/electrode_soh.py
Original file line number Diff line number Diff line change
Expand Up @@ -410,10 +410,7 @@ def solve(self, inputs):
# Calculate theoretical energy
# TODO: energy calc for MSMR
if self.options["open-circuit potential"] != "MSMR":
energy = pybamm.lithium_ion.electrode_soh.theoretical_energy_integral(
self.parameter_values,
sol_dict,
)
energy = self.theoretical_energy_integral(sol_dict)
sol_dict.update({"Maximum theoretical energy [W.h]": energy})
return sol_dict

Expand Down Expand Up @@ -829,6 +826,27 @@ def get_min_max_ocps(self):
sol = self.solve(inputs)
return [sol["Un(x_0)"], sol["Un(x_100)"], sol["Up(y_100)"], sol["Up(y_0)"]]

def theoretical_energy_integral(self, inputs, points=1000):
x_0 = inputs["x_0"]
y_0 = inputs["y_0"]
x_100 = inputs["x_100"]
y_100 = inputs["y_100"]
Q_p = inputs["Q_p"]
x_vals = np.linspace(x_100, x_0, num=points)
y_vals = np.linspace(y_100, y_0, num=points)
# Calculate OCV at each stoichiometry
param = self.param
T = param.T_amb_av(0)
Vs = self.parameter_values.evaluate(
param.p.prim.U(y_vals, T) - param.n.prim.U(x_vals, T)
).flatten()
# Calculate dQ
Q = Q_p * (y_0 - y_100)
dQ = Q / (points - 1)
# Integrate and convert to W-h
E = np.trapz(Vs, dx=dQ)
return E


def get_initial_stoichiometries(
initial_value,
Expand Down Expand Up @@ -972,7 +990,7 @@ def get_min_max_ocps(
return esoh_solver.get_min_max_ocps()


def theoretical_energy_integral(parameter_values, inputs, points=100):
def theoretical_energy_integral(parameter_values, param, inputs, points=100):
"""
Calculate maximum energy possible from a cell given OCV, initial soc, and final soc
given voltage limits, open-circuit potentials, etc defined by parameter_values
Expand All @@ -991,30 +1009,8 @@ def theoretical_energy_integral(parameter_values, inputs, points=100):
E
The total energy of the cell in Wh
"""
x_0 = inputs["x_0"]
y_0 = inputs["y_0"]
x_100 = inputs["x_100"]
y_100 = inputs["y_100"]
Q_p = inputs["Q_p"]
x_vals = np.linspace(x_100, x_0, num=points)
y_vals = np.linspace(y_100, y_0, num=points)
# Calculate OCV at each stoichiometry
param = pybamm.LithiumIonParameters()
y = pybamm.standard_spatial_vars.y
z = pybamm.standard_spatial_vars.z
T = pybamm.yz_average(param.T_amb(y, z, 0))
Vs = np.empty(x_vals.shape)
for i in range(x_vals.size):
Vs[i] = (
parameter_values.evaluate(param.p.prim.U(y_vals[i], T)).item()
- parameter_values.evaluate(param.n.prim.U(x_vals[i], T)).item()
)
# Calculate dQ
Q = Q_p * (y_0 - y_100)
dQ = Q / (points - 1)
# Integrate and convert to W-h
E = np.trapz(Vs, dx=dQ)
return E
esoh_solver = ElectrodeSOHSolver(parameter_values, param)
return esoh_solver.theoretical_energy_integral(inputs, points=points)


def calculate_theoretical_energy(
Expand Down Expand Up @@ -1045,6 +1041,7 @@ def calculate_theoretical_energy(
Q_p = parameter_values.evaluate(pybamm.LithiumIonParameters().p.prim.Q_init)
E = theoretical_energy_integral(
parameter_values,
pybamm.LithiumIonParameters(),
{"x_100": x_100, "x_0": x_0, "y_100": y_100, "y_0": y_0, "Q_p": Q_p},
points=points,
)
Expand Down
5 changes: 2 additions & 3 deletions pybamm/models/submodels/thermal/lumped.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,10 +56,9 @@ def set_rhs(self, variables):
# Newton cooling, accounting for surface area to volume ratio
cell_surface_area = self.param.A_cooling
cell_volume = self.param.V_cell
total_cooling_coefficient = (
-self.param.h_total * cell_surface_area / cell_volume
Q_cool_vol_av = (
-self.param.h_total * (T_vol_av - T_amb) * cell_surface_area / cell_volume
)
Q_cool_vol_av = total_cooling_coefficient * (T_vol_av - T_amb)

self.rhs = {
T_vol_av: (Q_vol_av + Q_cool_vol_av) / self.param.rho_c_p_eff(T_vol_av)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,33 +58,29 @@ def set_rhs(self, variables):
y = pybamm.standard_spatial_vars.y
z = pybamm.standard_spatial_vars.z

# Account for surface area to volume ratio of pouch cell in surface and side
# cooling terms
cell_volume = self.param.L * self.param.L_y * self.param.L_z

# Calculate cooling, accounting for surface area to volume ratio of pouch cell
edge_area = self.param.L_z * self.param.L
yz_surface_area = self.param.L_y * self.param.L_z
yz_surface_cooling_coefficient = (
cell_volume = self.param.L * self.param.L_y * self.param.L_z
Q_yz_surface = (
-(self.param.n.h_cc(y, z) + self.param.p.h_cc(y, z))
* (T_av - T_amb)
* yz_surface_area
/ cell_volume
)

side_edge_area = self.param.L_z * self.param.L
side_edge_cooling_coefficient = (
Q_edge = (
-(self.param.h_edge(0, z) + self.param.h_edge(self.param.L_y, z))
* side_edge_area
* (T_av - T_amb)
* edge_area
/ cell_volume
)

total_cooling_coefficient = (
yz_surface_cooling_coefficient + side_edge_cooling_coefficient
)
Q_cool_total = Q_yz_surface + Q_edge

self.rhs = {
T_av: (
pybamm.div(self.param.lambda_eff(T_av) * pybamm.grad(T_av))
+ Q_av
+ total_cooling_coefficient * (T_av - T_amb)
+ Q_cool_total
)
/ self.param.rho_c_p_eff(T_av)
}
Expand All @@ -94,7 +90,7 @@ def set_boundary_conditions(self, variables):
T_amb = variables["Ambient temperature [K]"]
T_av = variables["X-averaged cell temperature [K]"]

# find tab locations (top vs bottom)
# Find tab locations (top vs bottom)
L_y = param.L_y
L_z = param.L_z
neg_tab_z = param.n.centre_z_tab
Expand All @@ -104,11 +100,10 @@ def set_boundary_conditions(self, variables):
pos_tab_top_bool = pybamm.Equality(pos_tab_z, L_z)
pos_tab_bottom_bool = pybamm.Equality(pos_tab_z, 0)

# calculate tab vs non-tab area on top and bottom
# Calculate tab vs non-tab area on top and bottom
neg_tab_area = param.n.L_tab * param.n.L_cc
pos_tab_area = param.p.L_tab * param.p.L_cc
total_area = param.L * param.L_y

non_tab_top_area = (
total_area
- neg_tab_area * neg_tab_top_bool
Expand All @@ -120,18 +115,22 @@ def set_boundary_conditions(self, variables):
- pos_tab_area * pos_tab_bottom_bool
)

# calculate effective cooling coefficients
# Calculate heat fluxes weighted by area
# Note: can't do y-average of h_edge here since y isn't meshed. Evaluate at
# midpoint.
top_cooling_coefficient = (
param.n.h_tab * neg_tab_area * neg_tab_top_bool
+ param.p.h_tab * pos_tab_area * pos_tab_top_bool
+ param.h_edge(L_y / 2, L_z) * non_tab_top_area
q_tab_n = -param.n.h_tab * (T_av - T_amb)
q_tab_p = -param.p.h_tab * (T_av - T_amb)
q_edge_top = -param.h_edge(L_y / 2, L_z) * (T_av - T_amb)
q_edge_bottom = -param.h_edge(L_y / 2, 0) * (T_av - T_amb)
q_top = (
q_tab_n * neg_tab_area * neg_tab_top_bool
+ q_tab_p * pos_tab_area * pos_tab_top_bool
+ q_edge_top * non_tab_top_area
) / total_area
bottom_cooling_coefficient = (
param.n.h_tab * neg_tab_area * neg_tab_bottom_bool
+ param.p.h_tab * pos_tab_area * pos_tab_bottom_bool
+ param.h_edge(L_y / 2, 0) * non_tab_bottom_area
q_bottom = (
q_tab_n * neg_tab_area * neg_tab_bottom_bool
+ q_tab_p * pos_tab_area * pos_tab_bottom_bool
+ q_edge_bottom * non_tab_bottom_area
) / total_area

# just use left and right for clarity
Expand All @@ -141,21 +140,14 @@ def set_boundary_conditions(self, variables):
self.boundary_conditions = {
T_av: {
"left": (
pybamm.boundary_value(
bottom_cooling_coefficient * (T_av - T_amb),
"left",
)
/ pybamm.boundary_value(lambda_eff, "left"),
pybamm.boundary_value(-q_bottom / lambda_eff, "left"),
"Neumann",
),
"right": (
pybamm.boundary_value(
-top_cooling_coefficient * (T_av - T_amb), "right"
)
/ pybamm.boundary_value(lambda_eff, "right"),
pybamm.boundary_value(q_top / lambda_eff, "right"),
"Neumann",
),
}
},
}

def set_initial_conditions(self, variables):
Expand Down
Loading

0 comments on commit d685c38

Please sign in to comment.