Skip to content

Commit

Permalink
Merge branch 'release/3.3.0' into main-3.X
Browse files Browse the repository at this point in the history
  • Loading branch information
mpu-creare committed Dec 27, 2023
2 parents 9553f44 + fcbf85f commit 538f65c
Show file tree
Hide file tree
Showing 47 changed files with 64 additions and 5,740 deletions.
10 changes: 10 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,15 @@
# Changelog

## 3.3.0

### Features
* Now supporting custom coordinate dimensions for downstream applications. Just use `podpac.utils.add_valid_dimension("my_dimension_name")` to register and start using your custom dimension name.
* Can now use `np.timedelta64` as a valid type for coordinates

### Maintenance
* Removed the "datalib" module, and made it its own package `podpacdatalib`.
*

## 3.2.1
### Bugfixes
* Fixed documentation build
Expand Down
2 changes: 1 addition & 1 deletion doc/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
language = "en"

# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
Expand Down
24 changes: 12 additions & 12 deletions podpac/core/algorithm/stats.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ def chunk_size(self):

chunk_size = podpac.settings["CHUNK_SIZE"]
if chunk_size == "auto":
return 1024**2 # TODO
return 1024 ** 2 # TODO
else:
return chunk_size

Expand Down Expand Up @@ -611,7 +611,7 @@ def reduce_chunked(self, xs, output):
Nx = np.isfinite(x).sum(dim=self._dims)
M1x = x.mean(dim=self._dims)
Ex = x - M1x
Ex2 = Ex**2
Ex2 = Ex ** 2
Ex3 = Ex2 * Ex
M2x = (Ex2).sum(dim=self._dims)
M3x = (Ex3).sum(dim=self._dims)
Expand All @@ -632,13 +632,13 @@ def reduce_chunked(self, xs, output):
n = Nb + Nx
NNx = Nb * Nx

M3.data[b] += M3x + d**3 * NNx * (Nb - Nx) / n**2 + 3 * d * (Nb * M2x - Nx * M2b) / n
M2.data[b] += M2x + d**2 * NNx / n
M3.data[b] += M3x + d ** 3 * NNx * (Nb - Nx) / n ** 2 + 3 * d * (Nb * M2x - Nx * M2b) / n
M2.data[b] += M2x + d ** 2 * NNx / n
M1.data[b] += d * Nx / n
N.data[b] = n

# calculate skew
skew = np.sqrt(N) * M3 / np.sqrt(M2**3)
skew = np.sqrt(N) * M3 / np.sqrt(M2 ** 3)
return skew


Expand Down Expand Up @@ -697,9 +697,9 @@ def reduce_chunked(self, xs, output):
Nx = np.isfinite(x).sum(dim=self._dims)
M1x = x.mean(dim=self._dims)
Ex = x - M1x
Ex2 = Ex**2
Ex2 = Ex ** 2
Ex3 = Ex2 * Ex
Ex4 = Ex2**2
Ex4 = Ex2 ** 2
M2x = (Ex2).sum(dim=self._dims)
M3x = (Ex3).sum(dim=self._dims)
M4x = (Ex4).sum(dim=self._dims)
Expand All @@ -724,18 +724,18 @@ def reduce_chunked(self, xs, output):

M4.data[b] += (
M4x
+ d**4 * NNx * (Nb**2 - NNx + Nx**2) / n**3
+ 6 * d**2 * (Nb**2 * M2x + Nx**2 * M2b) / n**2
+ d ** 4 * NNx * (Nb ** 2 - NNx + Nx ** 2) / n ** 3
+ 6 * d ** 2 * (Nb ** 2 * M2x + Nx ** 2 * M2b) / n ** 2
+ 4 * d * (Nb * M3x - Nx * M3b) / n
)

M3.data[b] += M3x + d**3 * NNx * (Nb - Nx) / n**2 + 3 * d * (Nb * M2x - Nx * M2b) / n
M2.data[b] += M2x + d**2 * NNx / n
M3.data[b] += M3x + d ** 3 * NNx * (Nb - Nx) / n ** 2 + 3 * d * (Nb * M2x - Nx * M2b) / n
M2.data[b] += M2x + d ** 2 * NNx / n
M1.data[b] += d * Nx / n
N.data[b] = n

# calculate kurtosis
kurtosis = N * M4 / M2**2 - 3
kurtosis = N * M4 / M2 ** 2 - 3
return kurtosis


Expand Down
8 changes: 3 additions & 5 deletions podpac/core/coordinates/array_coordinates1d.py
Original file line number Diff line number Diff line change
Expand Up @@ -306,8 +306,6 @@ def dtype(self):
return float
elif np.issubdtype(self.coordinates.dtype, np.datetime64):
return np.datetime64
elif np.issubdtype(self.coordinates.dtype, np.timedelta64):
return np.timedelta64

@property
def is_monotonic(self):
Expand Down Expand Up @@ -341,7 +339,7 @@ def bounds(self):
lo, hi = np.nan, np.nan
elif self.is_monotonic:
lo, hi = sorted([self.coordinates[0], self.coordinates[-1]])
elif (self.dtype is np.datetime64) or (self.dtype == np.timedelta64):
elif self.dtype is np.datetime64:
lo, hi = np.min(self.coordinates), np.max(self.coordinates)
else:
lo, hi = np.nanmin(self.coordinates), np.nanmax(self.coordinates)
Expand Down Expand Up @@ -438,14 +436,14 @@ def _select(self, bounds, return_index, outer):
try:
gt = self.coordinates >= max(self.coordinates[self.coordinates <= bounds[0]])
except ValueError as e:
if (self.dtype == np.datetime64) or (self.dtype == np.timedelta64):
if self.dtype == np.datetime64:
gt = ~np.isnat(self.coordinates)
else:
gt = self.coordinates >= -np.inf
try:
lt = self.coordinates <= min(self.coordinates[self.coordinates >= bounds[1]])
except ValueError as e:
if self.dtype == np.datetime64 or (self.dtype == np.timedelta64):
if self.dtype == np.datetime64:
lt = ~np.isnat(self.coordinates)
else:
lt = self.coordinates <= np.inf
Expand Down
2 changes: 1 addition & 1 deletion podpac/core/coordinates/polar_coordinates.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def __init__(self, center, radius, theta=None, theta_size=None, dims=None):
def _validate_dims(self, d):
val = d["value"]
for dim in val:
if dim not in ["lat", "lon"]: # Hardcoding example. What is actually trying to be accomplished?
if dim not in ["lat", "lon"]: # Hardcoding example. What is actually trying to be accomplished?
raise ValueError("PolarCoordinates dims must be 'lat' or 'lon', not '%s'" % dim)
if val[0] == val[1]:
raise ValueError("Duplicate dimension '%s'" % val[0])
Expand Down
102 changes: 0 additions & 102 deletions podpac/core/coordinates/test/test_array_coordinates1d.py
Original file line number Diff line number Diff line change
Expand Up @@ -275,108 +275,6 @@ def test_datetime_array(self):
assert c.step == np.timedelta64(-365, "D")
repr(c)

def test_timedelta_array(self):
# unsorted
values = [np.timedelta64(0, "h"), np.timedelta64(3, "h"), np.timedelta64(1, "h"), np.timedelta64(2, "h")]
a = np.array(values).astype(np.timedelta64)
c = ArrayCoordinates1d(values)
assert_equal(c.coordinates, a)
assert_equal(c.bounds, np.array([np.timedelta64(0, "h"), np.timedelta64(3, "h")]))
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == 4
assert c.shape == (4,)
assert c.dtype == np.timedelta64
assert c.deltatype == np.timedelta64
assert c.is_monotonic == False
assert c.is_descending == False
assert c.is_uniform == False
assert c.start is None
assert c.stop is None
assert c.step is None
repr(c)

# sorted ascending
values = [np.timedelta64(-1, "h"), np.timedelta64(1, "h"), np.timedelta64(2, "h"), np.timedelta64(3, "h")]
a = np.array(values)
c = ArrayCoordinates1d(values)
assert_equal(c.coordinates, a)
assert_equal(c.bounds, np.array([np.timedelta64(-1, "h"), np.timedelta64(3, "h")]))
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == 4
assert c.shape == (4,)
assert c.dtype == np.timedelta64
assert c.deltatype == np.timedelta64
assert c.is_monotonic == True
assert c.is_descending == False
assert c.is_uniform == False
assert c.start is None
assert c.stop is None
assert c.step is None
repr(c)

# sorted descending
values = [np.timedelta64(-1, "h"), np.timedelta64(1, "h"), np.timedelta64(2, "h"), np.timedelta64(3, "h")]
values = values[::-1]
a = np.array(values)
c = ArrayCoordinates1d(values)
assert_equal(c.coordinates, a)
assert_equal(c.bounds, np.array([np.timedelta64(-1, "h"), np.timedelta64(3, "h")]))
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == 4
assert c.shape == (4,)
assert c.dtype == np.timedelta64
assert c.deltatype == np.timedelta64
assert c.is_monotonic == True
assert c.is_descending == True
assert c.is_uniform == False
assert c.start is None
assert c.stop is None
assert c.step is None
repr(c)

# uniform ascending
values = [np.timedelta64(0, "h"), np.timedelta64(1, "h"), np.timedelta64(2, "h"), np.timedelta64(3, "h")]
a = np.array(values)
c = ArrayCoordinates1d(values)
assert_equal(c.coordinates, a)
assert_equal(c.bounds, np.array([np.timedelta64(0, "h"), np.timedelta64(3, "h")]))
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == 4
assert c.shape == (4,)
assert c.dtype == np.timedelta64
assert c.deltatype == np.timedelta64
assert c.is_monotonic == True
assert c.is_descending == False
assert c.is_uniform == True
assert c.start == np.timedelta64(0, "h")
assert c.stop == np.timedelta64(3, "h")
assert c.step == np.timedelta64(1, "h")
repr(c)

# uniform descending
values = [np.timedelta64(0, "h"), np.timedelta64(1, "h"), np.timedelta64(2, "h"), np.timedelta64(3, "h")][::-1]
a = np.array(values)
c = ArrayCoordinates1d(values)
assert_equal(c.coordinates, a)
assert_equal(c.bounds, np.array([np.timedelta64(0, "h"), np.timedelta64(3, "h")]))
assert c.coordinates[c.argbounds[0]] == c.bounds[0]
assert c.coordinates[c.argbounds[1]] == c.bounds[1]
assert c.size == 4
assert c.shape == (4,)
assert c.dtype == np.timedelta64
assert c.deltatype == np.timedelta64
assert c.is_monotonic == True
assert c.is_descending == True
assert c.is_uniform == True
assert c.start == np.timedelta64(3, "h")
assert c.stop == np.timedelta64(0, "h")
assert c.step == np.timedelta64(-1, "h")
repr(c)

def test_numerical_shaped(self):
values = [[1.0, 2.0, 3.0], [11.0, 12.0, 13.0]]
c = ArrayCoordinates1d(values)
Expand Down
10 changes: 0 additions & 10 deletions podpac/core/coordinates/test/test_uniform_coordinates1d.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,16 +114,6 @@ def test_datetime(self):
assert c.is_descending == True
assert c.is_uniform == True

def test_timedelta(self):
c = UniformCoordinates1d(np.timedelta64(0, "h"), np.timedelta64(5, "h"), np.timedelta64(1, "h"))
assert c.start == np.timedelta64(0, "h")
assert c.stop == np.timedelta64(5, "h")
assert c.step == np.timedelta64(1, "h")
c = UniformCoordinates1d(np.timedelta64(5, "h"), np.timedelta64(0, "h"), np.timedelta64(-1, "h"))
assert c.start == np.timedelta64(5, "h")
assert c.stop == np.timedelta64(0, "h")
assert c.step == np.timedelta64(-1, "h")

def test_datetime_inexact(self):
# ascending
c = UniformCoordinates1d("2018-01-01", "2018-01-06", "2,D")
Expand Down
12 changes: 4 additions & 8 deletions podpac/core/coordinates/uniform_coordinates1d.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,10 +52,10 @@ class UniformCoordinates1d(Coordinates1d):
:class:`Coordinates1d`, :class:`ArrayCoordinates1d`, :class:`crange`, :class:`clinspace`
"""

start = tl.Union([tl.Float(), tl.Instance(np.datetime64), tl.Instance(np.timedelta64)], read_only=True)
start = tl.Union([tl.Float(), tl.Instance(np.datetime64)], read_only=True)
start.__doc__ = ":float, datetime64: Start coordinate."

stop = tl.Union([tl.Float(), tl.Instance(np.datetime64), tl.Instance(np.timedelta64)], read_only=True)
stop = tl.Union([tl.Float(), tl.Instance(np.datetime64)], read_only=True)
stop.__doc__ = ":float, datetime64: Stop coordinate."

step = tl.Union([tl.Float(), tl.Instance(np.timedelta64)], read_only=True)
Expand Down Expand Up @@ -110,10 +110,6 @@ def __init__(self, start, stop, step=None, size=None, name=None, fix_stop_val=Fa
fstep = step
elif isinstance(start, np.datetime64) and isinstance(stop, np.datetime64) and isinstance(step, np.timedelta64):
fstep = step.astype(float)
elif (
isinstance(start, np.timedelta64) and isinstance(stop, np.timedelta64) and isinstance(step, np.timedelta64)
):
fstep = step.astype(float)
else:
raise TypeError(
"UniformCoordinates1d mismatching types (start '%s', stop '%s', step '%s')."
Expand Down Expand Up @@ -290,7 +286,7 @@ def __contains__(self, item):
if item < self.bounds[0] or item > self.bounds[1]:
return False

if (self.dtype == np.datetime64) or (self.dtype == np.timedelta64):
if self.dtype == np.datetime64:
return timedelta_divisible(item - self.start, self.step)
else:
return (item - self.start) % self.step == 0
Expand Down Expand Up @@ -503,7 +499,7 @@ def issubset(self, other):
if self.size == 1:
return True

if (self.dtype == np.datetime64) or (self.dtype == np.timedelta64):
if self.dtype == np.datetime64:
return timedelta_divisible(self.step, other.step)
else:
return self.step % other.step == 0
Expand Down
5 changes: 1 addition & 4 deletions podpac/core/coordinates/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -264,7 +264,7 @@ def make_coord_array(values):

a = np.atleast_1d(values)

if a.dtype == float or np.issubdtype(a.dtype, np.datetime64) or np.issubdtype(a.dtype, np.timedelta64):
if a.dtype == float or np.issubdtype(a.dtype, np.datetime64):
pass

elif np.issubdtype(a.dtype, np.number):
Expand Down Expand Up @@ -666,7 +666,4 @@ def add_valid_dimension(dimension_name):
if dimension_name in VALID_DIMENSION_NAMES:
raise ValueError(f"Dim `{dimension_name}` already a valid dimension.")

if "-" in dimension_name or "_" in dimension_name:
raise ValueError(f"Dim `{dimension_name}` may note contain `-` or `_`.")

VALID_DIMENSION_NAMES.append(dimension_name)
1 change: 1 addition & 0 deletions podpac/core/data/csv_source.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ class CSVRaw(FileKeysMixin, LoadFileMixin, BaseFileSource):
--------
CSV : Interpolated CSV file datasource for general use.
"""

# No support here for custom Dimension names? selection in dataset_source.py
header = tl.Any(default_value=0).tag(attr=True)
lat_key = tl.Union([tl.Unicode(), tl.Int()], default_value="lat").tag(attr=True)
Expand Down
1 change: 1 addition & 0 deletions podpac/core/data/dataset_source.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ class DatasetRaw(FileKeysMixin, LoadFileMixin, BaseFileSource):
--------
Dataset : Interpolated xarray dataset source for general use.
"""

# selection lets you use other dims
# dataset = tl.Instance(xr.Dataset).tag(readonly=True)
selection = tl.Dict(allow_none=True, default_value=None).tag(attr=True)
Expand Down
1 change: 1 addition & 0 deletions podpac/core/data/file_source.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,6 +163,7 @@ class FileKeysMixin(tl.HasTraits):
cf_calendar : str
calendar, when decoding CF datetimes
"""

# Other dims?
data_key = tl.Union([tl.Unicode(), tl.List(trait=tl.Unicode())]).tag(attr=True)
lat_key = tl.Unicode(default_value="lat").tag(attr=True)
Expand Down
1 change: 1 addition & 0 deletions podpac/core/data/zarr_source.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ class ZarrRaw(S3Mixin, FileKeysMixin, BaseFileSource):
--------
Zarr : Interpolated Zarr Datasource for general use.
"""

# Doesnt support other dims
file_mode = tl.Unicode(default_value="r").tag(readonly=True)
coordinate_index_type = "slice"
Expand Down
2 changes: 1 addition & 1 deletion podpac/core/interpolation/nearest_neighbor_interpolator.py
Original file line number Diff line number Diff line change
Expand Up @@ -310,7 +310,7 @@ def _get_uniform_index(self, dim, source, request, bounds=None):
# Find all the 0.5 and 1.5's that were rounded to even numbers, and make sure they all round down
I = (index % 0.5) == 0
rindex[I] = np.ceil(index[I])
else: # "unbiased", that's the default np.around behavior, so do nothing
else: # "unbiased", that's the default np.around behavior, so do nothing
pass

stop_ind = int(source.size)
Expand Down
1 change: 1 addition & 0 deletions podpac/core/interpolation/none_interpolator.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
from podpac.core.coordinates.utils import get_timedelta
from podpac.core.interpolation.selector import Selector, _higher_precision_time_coords1d, _higher_precision_time_stack


@common_doc(COMMON_INTERPOLATOR_DOCS)
class NoneInterpolator(Interpolator):
"""None Interpolation"""
Expand Down
1 change: 1 addition & 0 deletions podpac/core/interpolation/test/test_interpolation.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ class InterpArray(InterpolationMixin, ArrayRaw):
np.testing.assert_array_equal(iaso.data, aso.data)
np.testing.assert_array_equal(abso.data, data)


from podpac.core.coordinates.utils import VALID_DIMENSION_NAMES


Expand Down
Loading

0 comments on commit 538f65c

Please sign in to comment.