Skip to content

Commit

Permalink
fix up tests for EKF and PF; add hosted runner
Browse files Browse the repository at this point in the history
  • Loading branch information
misko committed Oct 30, 2024
1 parent f452c2c commit 4dbf3cb
Show file tree
Hide file tree
Showing 14 changed files with 701 additions and 674 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/docker-build-and-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ on:

jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Login to Docker Hub
uses: docker/login-action@v3
Expand All @@ -20,9 +19,10 @@ jobs:
with:
push: true
tags: csmisko/ardupilotspf:latest
runs-on: self-hosted
pytest:
needs: build
runs-on: ubuntu-latest
runs-on: self-hosted
steps:
- uses: actions/checkout@v3
- name: Set up Python 3.10
Expand Down
35 changes: 33 additions & 2 deletions spf/dataset/fake_dataset.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,16 @@
import argparse
import os
import random
import shutil

import matplotlib.pyplot as plt
import numpy as np
import torch
import yaml
import random
import zarr

from spf.data_collector import rx_config_from_receiver_yaml
from spf.dataset.spf_dataset import pi_norm
import zarr

# V5 data format
from spf.dataset.v5_data import v5rx_2xf64_keys, v5rx_f64_keys, v5rx_new_dataset
Expand All @@ -23,6 +24,10 @@
torch_get_avg_phase_notrim,
torch_pi_norm_pi,
)
from spf.scripts.create_empirical_p_dist import (
create_empirical_p_dist,
get_empirical_p_dist_parser,
)
from spf.sdrpluto.sdr_controller import rx_config_from_receiver_yaml
from spf.utils import (
random_signal_matrix,
Expand Down Expand Up @@ -117,6 +122,32 @@ def phi_to_signal_matrix(
"""


def create_empirical_dist_for_datasets(datasets, precompute_cache, nthetas):

parser = get_empirical_p_dist_parser()

empirical_pkl_fn = precompute_cache + "/full.pkl"

args = parser.parse_args(
[
"--out",
empirical_pkl_fn,
"--nbins",
f"{nthetas}",
"--nthetas",
f"{nthetas}",
"--precompute-cache",
precompute_cache,
"--device",
"cpu",
"-d",
]
+ datasets
)
create_empirical_p_dist(args)
return empirical_pkl_fn


def create_fake_dataset(
yaml_config_str,
filename,
Expand Down
33 changes: 26 additions & 7 deletions spf/dataset/spf_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import bisect
import os
import pickle
from functools import cache
from multiprocessing import Pool, cpu_count
from typing import Dict, List

Expand Down Expand Up @@ -485,6 +486,7 @@ def __init__(
self.zarr_fn, readahead=self.readahead, map_size=2**32
)
self.yaml_config = yaml.safe_load(open(self.yaml_fn, "r"))

self.paired = paired
self.n_receivers = len(self.yaml_config["receivers"])
self.gpu = gpu
Expand All @@ -508,6 +510,16 @@ def __init__(
for receiver in self.yaml_config["receivers"]
]

for rx_idx in range(1, self.n_receivers):
assert (
self.yaml_config["receivers"][0]["antenna-spacing-m"]
== self.yaml_config["receivers"][rx_idx]["antenna-spacing-m"]
)

assert self.wavelengths[0] == self.wavelengths[rx_idx]

self.rx_spacing = self.yaml_config["receivers"][0]["antenna-spacing-m"]

self.rx_configs = [
rx_config_from_receiver_yaml(receiver)
for receiver in self.yaml_config["receivers"]
Expand Down Expand Up @@ -841,6 +853,19 @@ def get_session_idxs(self, session_idx):
snapshot_start_idx, snapshot_end_idx, self.snapshots_adjacent_stride
)

@cache
def get_empirical_dist(
self,
receiver_idx,
):
rx_spacing_str = rx_spacing_to_str(self.rx_spacing)
empirical_radio_key = (
f"r{receiver_idx}" if self.empirical_individual_radio else "r"
)
return self.empirical_data[rx_spacing_str][empirical_radio_key][
"sym" if self.empirical_symmetry else "nosym"
]

def render_session(self, receiver_idx, session_idx, double_flip=False):
self.reinit()

Expand Down Expand Up @@ -929,13 +954,7 @@ def render_session(self, receiver_idx, session_idx, double_flip=False):
# gc.collect()
# self.close()
if self.empirical_data is not None:
rx_spacing_str = rx_spacing_to_str(data["rx_spacing"][0].item())
empirical_radio_key = (
f"r{receiver_idx}" if self.empirical_individual_radio else "r"
)
empirical_dist = self.empirical_data[rx_spacing_str][empirical_radio_key][
"sym" if self.empirical_symmetry else "nosym"
]
empirical_dist = self.get_empirical_dist(receiver_idx)
data["empirical"] = empirical_dist[
to_bin(data["mean_phase_segmentation"][0], empirical_dist.shape[0])
].unsqueeze(0)
Expand Down
File renamed without changes.
File renamed without changes.
File renamed without changes.
125 changes: 125 additions & 0 deletions spf/filters/filters.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import numpy as np
import torch
from filterpy.common import Q_discrete_white_noise
from filterpy.monte_carlo import systematic_resample

from spf.rf import pi_norm

Expand Down Expand Up @@ -252,3 +253,127 @@ def trajectory(self, initial_conditions={}, dt=1.0, noise_std=0.01):
trajectory.append(self.posterior_to_mu_var(posterior))

return {"trajectory": trajectory}


class ParticleFilter(SPFFilter):

def fix_particles(self):
return self.particles

def trajectory(
self,
mean,
std,
N=128,
noise_std=None,
return_particles=False,
debug=False,
seed=0,
):
self.generator = torch.Generator()
self.generator.manual_seed(seed)
self.particles = create_gaussian_particles_xy(
mean, std, N, generator=self.generator
)
self.weights = torch.ones((N,), dtype=torch.float64) / N
trajectory = []
for idx in range(len(self.ds)):
self.predict(
dt=1.0,
noise_std=noise_std,
our_state=self.our_state(idx),
)
self.fix_particles()

self.update(z=self.observation(idx))

# resample if too few effective particles
if neff(self.weights) < N / 2:
indexes = torch.as_tensor(systematic_resample(self.weights.numpy()))
resample_from_index(self.particles, self.weights, indexes)

mu, var = estimate(self.particles, self.weights)

trajectory.append({"var": var, "mu": mu})
if return_particles:
trajectory[-1]["particles"] = self.particles.copy()
if debug:
trajectory[-1]["observation"] = self.observation(idx)
return trajectory


# @torch.jit.script
def create_gaussian_particles_xy(
mean: torch.Tensor, std: torch.Tensor, N: int, generator
):
assert mean.ndim == 2 and mean.shape[0] == 1
assert std.ndim == 2 and std.shape[0] == 1
return mean + (torch.randn(N, mean.shape[1], generator=generator) * std)


@torch.jit.script
def theta_phi_to_bins(theta_phi: torch.Tensor, nbins: int):
if isinstance(theta_phi, float):
return int(nbins * (theta_phi + torch.pi) / (2 * torch.pi)) % nbins
return (nbins * (theta_phi + torch.pi) / (2 * torch.pi)).to(torch.long) % nbins


@torch.jit.script
def theta_phi_to_p_vec(thetas, phis, full_p):
theta_bin = theta_phi_to_bins(thetas, nbins=full_p.shape[0])
phi_bin = theta_phi_to_bins(phis, nbins=full_p.shape[1])
return torch.take(full_p[:, phi_bin], theta_bin)


# @torch.jit.script
def add_noise(particles: torch.Tensor, noise_std: torch.Tensor, generator):
particles[:] += (
torch.randn(particles.shape, generator=generator) * noise_std
) # theta_noise=0.1, theta_dot_noise=0.001


@torch.jit.script
def resample_from_index(particles, weights, indexes):
particles[:] = particles[indexes]
weights.fill_(1.0 / len(weights))
weights /= torch.sum(weights) # normalize


@torch.jit.script
def neff(weights: torch.Tensor):
return 1.0 / torch.sum(torch.square(weights))


@torch.jit.script
def weighted_mean(inputs: torch.Tensor, weights: torch.Tensor):
return torch.sum(weights * inputs, dim=0) / weights.sum()


@torch.jit.script
def estimate(particles: torch.Tensor, weights: torch.Tensor):
# mean = torch.mean(self.particles, weights=self.weights, axis=0)
# var = torch.mean((self.particles - mean) ** 2, weights=self.weights, axis=0)
mean = weighted_mean(particles, weights.reshape(-1, 1))
var = weighted_mean((particles - mean) ** 2, weights.reshape(-1, 1))
return mean, var


@torch.jit.script
def theta_phi_to_p(theta, phi, full_p):
theta_bins = full_p.shape[0]
phi_bins = full_p.shape[1]
theta_bin = int(theta_bins * (theta + torch.pi) / (2 * torch.pi)) % theta_bins
phi_bin = int(phi_bins * (phi + torch.pi) / (2 * torch.pi)) % phi_bins
return full_p[theta_bin, phi_bin]


@torch.jit.script
def fix_particles_single(particles):
# this is required! because we need to flip the velocity of theta
while torch.abs(particles[:, 0]).max() > torch.pi / 2:
mask = torch.abs(particles[:, 0]) > torch.pi / 2
particles[mask, 0] = (
torch.sign(particles[mask, 0]) * torch.pi - particles[mask, 0]
)
particles[mask, 1] *= -1
return particles
Loading

0 comments on commit 4dbf3cb

Please sign in to comment.