diff --git a/quarto/notes.qmd b/quarto/notes.qmd index bc1c4ac..507ee5e 100644 --- a/quarto/notes.qmd +++ b/quarto/notes.qmd @@ -126,5 +126,11 @@ Still, the memory usage spikes to about 10Gb RAM for saving the model. Looks like the dataset might work any moment now. Project left at running pre-commit. I've already made an initial commit. Might need to relax some pre-commit checks. :::{.callout-note} -What are library stubs, and how do I install them? Why do I need them? +What are library stubs, and how do I install them? Why do I need them? For now I just tell mypy not to ignore missing imports. ::: + +Ok, the dataset can now be served, it seems. There are still some ends to tie up in that code, but let's leave it for now, it looks like it works correctly. + +#### Model and training loop + +Now let's build the graph wavenet model. I will first adapt my existing code, which implements the forward and backward diffusions, but not the adaptive adjacency matrix. diff --git a/src/gwnet/datasets/metrla.py b/src/gwnet/datasets/metrla.py index 897a692..1eee929 100644 --- a/src/gwnet/datasets/metrla.py +++ b/src/gwnet/datasets/metrla.py @@ -123,7 +123,6 @@ def _get_targets_and_features( def process(self) -> None: # Read data into huge `Data` list. - # TODO node_ids, id_to_idx_map, adj = pd.read_pickle(self.raw_paths[1]) edges = torch.from_numpy(self._get_edges(adj)) edge_weights = torch.from_numpy(self._get_edge_weights(adj)) diff --git a/tests/metrla/test_creation.py b/tests/metrla/test_creation.py index 44f9ce5..72baa71 100644 --- a/tests/metrla/test_creation.py +++ b/tests/metrla/test_creation.py @@ -4,3 +4,7 @@ class TestMETRLA: def test_metrla_instantiate(self): METRLA("./data") + + def test_metrla_read(self): + dataset = METRLA("./data") + dataset[0]