From 46b3432dd56c3b38fa8a6306da7f2722c45bcd87 Mon Sep 17 00:00:00 2001 From: Nathaniel Hudson Date: Fri, 15 Nov 2024 13:07:49 -0600 Subject: [PATCH] minor changes --- demo.py | 23 ++-- .../engine/{control => controllers}/globus.py | 4 +- flight/federation/jobs/types.py | 2 +- flight/learning/__init__.py | 2 +- flight/learning/base.py | 9 +- flight/learning/params.py | 100 +++++++++++++----- flight/learning/scikit/module.py | 35 ++++-- flight/learning/torch/module.py | 89 +++++----------- flight/learning/torch/trainer.py | 16 ++- flight/learning/types.py | 21 +--- flight/strategies/base.py | 6 +- flight/strategies/commons/averaging.py | 34 ++---- flight/strategies/impl/fedavg.py | 9 +- tests/learning/test_params.py | 22 ++++ 14 files changed, 202 insertions(+), 170 deletions(-) rename flight/engine/{control => controllers}/globus.py (95%) diff --git a/demo.py b/demo.py index 5a3f4d3..a77eda4 100644 --- a/demo.py +++ b/demo.py @@ -15,7 +15,7 @@ NUM_LABELS = 10 -class TrainingModule(TorchModule): +class MyModule(TorchModule): def __init__(self): super().__init__() self.model = nn.Sequential( @@ -35,7 +35,7 @@ def forward(self, x): def training_step(self, batch, batch_idx) -> TensorLoss: x, y = batch y_hat = self(x) - return nn.functional.nll_loss(y_hat, y) + return nn.functional.cross_entropy(y_hat, y) def configure_optimizers(self) -> torch.optim.Optimizer: return torch.optim.Adam(self.parameters(), lr=0.02) @@ -48,24 +48,27 @@ def main(): train=False, transform=ToTensor(), ) - data = Subset(data, indices=list(range(2000))) + data = Subset(data, indices=list(range(200))) topo = fl.flat_topology(10) - # exit(0) - module = TrainingModule() + module = MyModule() fed_data = federated_split( topo=topo, - # data=TensorDataset( - # torch.randn(100, 1), torch.randint(low=0, high=NUM_LABELS, size=(100, 1)) - # ), data=data, num_labels=NUM_LABELS, label_alpha=100.0, sample_alpha=100.0, ) - trained_module, records = fl.federated_fit(topo, module, fed_data, rounds=2) + trained_module, records = fl.federated_fit(topo, module, fed_data, rounds=10) df = pd.DataFrame.from_records(records) - sns.lineplot(df, x="round", y="train/loss") + print(df.head()) + sns.lineplot( + df, + x="train/time", + y="train/loss", + hue="node/idx", + # errorbar=None, + ).set(yscale="linear") plt.show() diff --git a/flight/engine/control/globus.py b/flight/engine/controllers/globus.py similarity index 95% rename from flight/engine/control/globus.py rename to flight/engine/controllers/globus.py index d37ebf4..c8f901e 100644 --- a/flight/engine/control/globus.py +++ b/flight/engine/controllers/globus.py @@ -5,8 +5,8 @@ import globus_compute_sdk -from ...federation.topologies import Node -from .base import AbstractController +from flight.engine.controllers.base import AbstractController +from flight.federation.topologies import Node if t.TYPE_CHECKING: from flight.types import P, T diff --git a/flight/federation/jobs/types.py b/flight/federation/jobs/types.py index acebf4a..afb57d5 100644 --- a/flight/federation/jobs/types.py +++ b/flight/federation/jobs/types.py @@ -7,11 +7,11 @@ from flight.federation.topologies.node import Node, NodeState, WorkerState from flight.learning.base import AbstractDataModule, AbstractModule -from flight.learning.types import Params if t.TYPE_CHECKING: from flight.types import Record from flight.engine.transporters import AbstractTransporter + from flight.learning.params import Params from flight.strategies import AggrStrategy, TrainerStrategy, WorkerStrategy diff --git a/flight/learning/__init__.py b/flight/learning/__init__.py index 9dc4b75..1241756 100644 --- a/flight/learning/__init__.py +++ b/flight/learning/__init__.py @@ -40,8 +40,8 @@ """ from .base import AbstractDataModule, AbstractModule, AbstractTrainer +from .params import NpParams, TorchParams from .torch.utils import federated_split -from .types import NpParams, Params, TorchParams __all__ = [ "AbstractModule", diff --git a/flight/learning/base.py b/flight/learning/base.py index 9116bed..1d7063b 100644 --- a/flight/learning/base.py +++ b/flight/learning/base.py @@ -6,7 +6,8 @@ if t.TYPE_CHECKING: from ..federation.topologies import Node from ..types import Record - from .types import Data, DataIterable, DataKinds, FrameworkKind, Params + from .params import Params + from .types import Data, DataIterable, DataKinds, FrameworkKind # DataType = t.TypeVar("DataType", bound="AbstractDataModule") @@ -83,14 +84,10 @@ def size(self, node: Node | None = None, kind: DataKinds = "train") -> int | Non class AbstractModule(abc.ABC): @abc.abstractmethod - def get_params(self, to_numpy: bool = True) -> Params: + def get_params(self) -> Params: """ Getter method for the parameters of a trainable module (i.e., neural network). - Args: - to_numpy (bool): Flag to convert the parameters to numpy `ndarray`s. - Defaults to `True`. - Returns: The parameters of the module. """ diff --git a/flight/learning/params.py b/flight/learning/params.py index 54864b6..63f67e9 100644 --- a/flight/learning/params.py +++ b/flight/learning/params.py @@ -1,14 +1,30 @@ +from __future__ import annotations + +import functools import typing as t +from collections import OrderedDict from enum import Enum, auto import numpy as np import torch -from flight.learning import NpParams, TorchParams +NpParams: t.TypeAlias = dict[str, np.ndarray] +""" +Type alias for model parameters as a mapping where the keys are strings and +the values are Numpy `ndarray`s. +""" + +TorchParams: t.TypeAlias = dict[str, torch.Tensor] +""" +Type alias for model parameters as a mapping where the keys are strings and +the values are parameters as PyTorch `Tensor`s. +""" class UnsupportedParameterKindError(ValueError): - """An Exception raised when an unsupported parameter kind is detected.""" + """ + An Exception raised when an unsupported parameter kind is detected. + """ def __init__(self, message: str | None = None, *args): if message is None: @@ -20,7 +36,9 @@ def __init__(self, message: str | None = None, *args): class InconsistentParamValuesError(ValueError): - """An Exception raised when the parameter value kinds are inconsistent.""" + """ + An Exception raised when the parameter value kinds are inconsistent. + """ def __init__(self, message: str | None = None, *args): if message is None: @@ -29,8 +47,19 @@ def __init__(self, message: str | None = None, *args): class ParamKinds(Enum): + """ + An enumeration of the kinds of parameters supported by Flight. + """ + NUMPY = auto() + """ + Parameters implemented as NumPy `ndarray`s. + """ + TORCH = auto() + """ + Parameters implemented as PyTorch `Tensor`s. + """ def infer_param_kind(param: t.Any) -> ParamKinds: @@ -44,7 +73,7 @@ def infer_param_kind(param: t.Any) -> ParamKinds: The kind of parameter. Throws: - - `ValueError`: If the parameter kind is unknown or unsupported. + - `UnsupportedParameterKindError`: If the parameter kind is unknown/unsupported. """ if isinstance(param, np.ndarray): return ParamKinds.NUMPY @@ -70,6 +99,8 @@ def validate_param_kind(params: dict[str, t.Any]) -> ParamKinds: Throws: - `InconsistentParamValuesError`: If the parameter values are inconsistent. + - `UnsupportedParameterKindError`: If the parameter kind is unknown/unsupported. + This will be thrown by the `infer_param_kind` function. """ param_kinds = set(map(infer_param_kind, params.values())) if len(param_kinds) != 1: @@ -77,33 +108,50 @@ def validate_param_kind(params: dict[str, t.Any]) -> ParamKinds: return param_kinds.pop() -class Params: - def __init__(self, raw_params: dict[str, t.Any]): - self._raw_params = raw_params - self._inferred_kind = validate_param_kind(raw_params) +class Params(OrderedDict): + """ + A wrapper class for model parameters, implemented as an `OrderedDict`. + + Throws: + - `InconsistentParamValuesError`: If the parameter values are inconsistent. + - `UnsupportedParameterKindError`: If the parameter kind is unknown/unsupported. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) def numpy(self) -> NpParams: - match self._inferred_kind: + """ + Convert the parameters to NumPy `ndarray`s. + + Returns: + The parameters in NumPy `ndarray`s. + """ + match self.inferred_kind: case ParamKinds.NUMPY: - return self._raw_params + return self case ParamKinds.TORCH: - return {k: v.numpy() for k, v in self._raw_params.items()} + return OrderedDict((k, v.numpy()) for k, v in self.items()) def torch(self) -> TorchParams: - match self._inferred_kind: + """ + Convert the parameters to PyTorch `Tensor`s. + + Returns: + The parameters in the PyTorch `Tensor`s. + """ + match self.inferred_kind: case ParamKinds.TORCH: - return self._raw_params + return self case ParamKinds.NUMPY: - return {k: torch.from_numpy(v) for k, v in self._raw_params.items()} - - -# class NpParams(Params): -# @abc.abstractmethod -# def numpy(self) -> dict[str, npt.NDArray]: -# pass -# -# -# class TorchParams(Params): -# @abc.abstractmethod -# def numpy(self) -> dict[str, npt.NDArray]: -# pass + return OrderedDict((k, torch.from_numpy(v)) for k, v in self.items()) + + @functools.cached_property + def inferred_kind(self) -> ParamKinds: + """ + The inferred kind of the parameters. + + Returns: + The kind of parameters. + """ + return validate_param_kind(self) diff --git a/flight/learning/scikit/module.py b/flight/learning/scikit/module.py index 0aaccae..8b6bb04 100644 --- a/flight/learning/scikit/module.py +++ b/flight/learning/scikit/module.py @@ -1,17 +1,18 @@ from __future__ import annotations import typing as t -from collections import OrderedDict - -from sklearn.neural_network import MLPClassifier, MLPRegressor from flight.learning import AbstractModule -from flight.learning.types import FrameworkKind, Params +from flight.learning.params import Params +from flight.learning.types import FrameworkKind + +if t.TYPE_CHECKING: + from sklearn.neural_network import MLPClassifier, MLPRegressor class ScikitModule(AbstractModule): - WEIGHT_KEY_PREFIX = "weight" - BIAS_KEY_PREFIX = "bias" + WEIGHT_KEY_PREFIX: t.Final[str] = "weight" + BIAS_KEY_PREFIX: t.Final[str] = "bias" def __init__(self, module: MLPClassifier | MLPRegressor): self.module = module @@ -24,20 +25,33 @@ def __init__(self, module: MLPClassifier | MLPRegressor): def kind(self) -> FrameworkKind: return "scikit" - def get_params(self, _: bool = True) -> Params: + def get_params(self) -> Params: + """ + Getter method for the parameters of a trainable module (i.e., neural network) + + Returns: + The parameters of the module. + """ params = [] for i in range(self._n_layers): params.append((f"{self.WEIGHT_KEY_PREFIX}_{i}", self.module.coefs_[i])) params.append((f"{self.BIAS_KEY_PREFIX}_{i}", self.module.intercepts_[i])) - return OrderedDict(params) + return Params(params) def set_params(self, params: Params): + """ + Setter method for the parameters of a trainable module (i.e., neural network) + implemented in Scikit-Learn. + + Args: + params (Params): The parameters to set. + """ + params = params.numpy() param_keys = list(params.keys()) layer_nums = set(map(lambda txt: int(txt.split("_")[-1]), param_keys)) num_layers = max(layer_nums) + 1 - weights = [] - biases = [] + weights, biases = [], [] for i in range(num_layers): w_i = params[f"{self.WEIGHT_KEY_PREFIX}_{i}"] b_i = params[f"{self.BIAS_KEY_PREFIX}_{i}"] @@ -57,5 +71,4 @@ def _n_layers(self) -> int: "ScikitModule :: Inconsistent number of layers between " "coefficients/weights and intercepts/biases." ) - return n diff --git a/flight/learning/torch/module.py b/flight/learning/torch/module.py index 61f33b5..8fb53d1 100644 --- a/flight/learning/torch/module.py +++ b/flight/learning/torch/module.py @@ -8,13 +8,15 @@ from torch import nn from flight.learning import AbstractModule -from flight.learning.types import FrameworkKind, NpParams, Params, TorchParams +from flight.learning.params import Params +from flight.learning.types import FrameworkKind from .types import TensorLoss, TensorStepOutput -_DEFAULT_INCLUDE_STATE = False +_DEFAULT_INCLUDE_STATE: t.Final[bool] = False """ -... +Default constant for whether to include the state in the parameters of a +[`TorchModule`][flight.learning.torch.module.TorchModule]. """ @@ -22,8 +24,7 @@ class TorchModule(AbstractModule, nn.Module): """ Wrapper class for a PyTorch model (i.e., `torch.nn.Module`). - Based on PyTorch Lightning's - [LightningModule]( + Based on PyTorch Lightning's [LightningModule]( https://lightning.ai/docs/pytorch/stable/_modules/lightning/ pytorch/core/module.html#LightningModule ). @@ -40,23 +41,11 @@ def __init__(self, *args, **kwargs): def kind(self) -> FrameworkKind: return "torch" - @t.overload - def get_params(self, to_numpy: t.Literal[True]) -> NpParams: - ... - - @t.overload - def get_params(self, to_numpy: t.Literal[False]) -> TorchParams: - ... - - def get_params(self, to_numpy: bool = True) -> Params: + def get_params(self) -> Params: """ Getter method for the parameters of a trainable module (i.e., neural network) implemented in PyTorch. - Args: - to_numpy (bool): Flag to convert the parameters to numpy arrays. Defaults - to `True`. - Returns: The parameters of the module. If the `to_numpy` flag is set to `True`, then `NpParams` are returned (i.e., values are NumPy `ndarray`s); @@ -67,29 +56,22 @@ def get_params(self, to_numpy: bool = True) -> Params: you are doing. The default value is set to `True` to allow for standard mathematical operations in aggregation functions across different frameworks. - """ - def _parse_params(pair: tuple[str, torch.Tensor]): - """ - Helper hidden function that converts parameters to NumPy `ndarray`s if - specified by the `get_params` arg. - """ - if to_numpy: - return pair[0], pair[1].data.numpy() - else: - return pair[0], pair[1].data - - state_dict = self.state_dict() + Throws: + - `UnsupportedParameterKindError`: If the parameter kind is + unknown/unsupported + """ if self.include_state: - return OrderedDict(_parse_params(items) for items in state_dict.items()) + params = OrderedDict( + (name, param) for name, param in self.state_dict().items() + ) else: - param_names = dict(self.named_parameters()) - return OrderedDict( - _parse_params((name, value)) - for (name, value) in state_dict.items() - if name in param_names + params = OrderedDict( + (name, param.data) for (name, param) in self.named_parameters() ) + return Params(params) + def set_params(self, params: Params) -> None: """ Setter method for the parameters of a trainable module (i.e., neural network) @@ -101,29 +83,10 @@ def set_params(self, params: Params) -> None: Throws: - `ValueError`: if the parameter pair from (`next(iter(params.items())`) is not of length 2. - - `Exception`: can be thrown. if the parameter cannot be converted to a - PyTorch `Tensor`. + - `Exception`: can be thrown if the parameters cannot be converted to a + PyTorch `Tensor`s. """ - - def _parse_params(pair: tuple[str, torch.Tensor]): - """ - Helper hidden function that converts parameters to PyTorch `Tensor`s if - specified by the `get_params` arg. - """ - if len(pair) != 2: - raise ValueError("Invalid parameter pair; must be of length 2.") - - if isinstance(pair[1], torch.Tensor): - return pair[0], pair[1] - try: - return pair[0], torch.tensor(pair[1]) - except Exception as err: - err.add_note("Failed to convert parameter to PyTorch `Tensor`.") - raise err - - strict = self.include_state - new_params = OrderedDict(_parse_params(items) for items in params.items()) - return self.load_state_dict(new_params, strict=strict, assign=False) + self.load_state_dict(params.torch(), strict=self.include_state, assign=False) #################################################################################### @@ -159,8 +122,8 @@ def configure_optimizers(self) -> torch.optim.Optimizer: Returns: A configured optimizer or a list of optimizers for training the model. - Raises: - NotImplementedError: If the method is not overridden in a subclass. + Throws: + - `NotImplementedError`: If the method is not overridden in a subclass. """ #################################################################################### @@ -182,7 +145,7 @@ def predict_step(self, *args: t.Any, **kwargs) -> TensorStepOutput: Returns: The output of the prediction step, encapsulating the model's predictions. - Raises: + Throws: - `NotImplementedError`: If the method is not implemented. """ raise NotImplementedError( @@ -202,7 +165,7 @@ def test_step(self, *args: t.Any, **kwargs) -> TensorStepOutput: Returns: The output of the prediction step, encapsulating the model's predictions. - Raises: + Throws: - `NotImplementedError`: If the method is not implemented. """ raise NotImplementedError( @@ -223,7 +186,7 @@ def validation_step(self, *args: t.Any, **kwargs) -> TensorStepOutput: Returns: The output of the prediction step, encapsulating the model's predictions. - Raises: + Throws: - `NotImplementedError`: If the method is not implemented. """ raise NotImplementedError( diff --git a/flight/learning/torch/trainer.py b/flight/learning/torch/trainer.py index cf8a568..7efdc06 100644 --- a/flight/learning/torch/trainer.py +++ b/flight/learning/torch/trainer.py @@ -1,5 +1,6 @@ from __future__ import annotations +import datetime import pathlib import typing as t @@ -180,7 +181,15 @@ def fit( pbar, ) for loss in train_losses: - self._results.append({"epoch": epoch, "train/loss": loss.item()}) + self._results.append( + { + "train/time": datetime.datetime.now(), + "node/idx": self.node.idx, + "epoch": epoch, + "train/loss": loss.item(), + "step": self._curr_step, + } + ) # Validate the model during training. validate_now = epoch % validate_every_n_epochs == 0 @@ -189,6 +198,8 @@ def fit( for loss in val_losses: self._results.append( { + "valid/time": datetime.datetime.now(), + "node/idx": self.node.idx, "epoch": epoch, "val/loss": loss.item(), "step": self._curr_step, @@ -224,6 +235,8 @@ def _epoch( self._results.append( { + "train/time": datetime.datetime.now(), + "node/idx": self.node.idx, "epoch": epoch, "train/loss": loss.item(), "train/batch_idx": batch_idx, @@ -255,6 +268,7 @@ def validate( if loss is not None: self._results.append( { + "train/time": datetime.datetime.now(), "epoch": epoch, "valid/loss": loss.item(), "valid/batch_idx": batch_idx, diff --git a/flight/learning/types.py b/flight/learning/types.py index e961a8f..b79b301 100644 --- a/flight/learning/types.py +++ b/flight/learning/types.py @@ -16,23 +16,12 @@ The output of a local training step, which can be a loss or a dictionary of... """ -NpParams: t.TypeAlias = t.Dict[str, npt.NDArray] -""" -Type alias for model parameters as a mapping where the keys are strings and -the values are Numpy `ndarray`s. -""" -TorchParams: t.TypeAlias = t.Dict[str, Tensor] -""" -Type alias for model parameters as a mapping where the keys are strings and -the values are parameters as PyTorch `Tensor`s. -""" - -Params: t.TypeAlias = NpParams | TorchParams -""" -Type alias for model parameters; a mapping where the keys are strings and the -values are parameters (as either Numpy `ndarray`s or PyTorch `Tensor`s). -""" +# Params: t.TypeAlias = NpParams | TorchParams +# """ +# Type alias for model parameters; a mapping where the keys are strings and the +# values are parameters (as either Numpy `ndarray`s or PyTorch `Tensor`s). +# """ Loss: t.TypeAlias = Tensor """ diff --git a/flight/strategies/base.py b/flight/strategies/base.py index 03cf839..c9ffb31 100644 --- a/flight/strategies/base.py +++ b/flight/strategies/base.py @@ -17,7 +17,7 @@ if t.TYPE_CHECKING: from flight.federation.topologies.node import AggrState, NodeID, NodeState - from flight.learning import NpParams, Params + from flight.learning import Params class DefaultCoordStrategy(CoordStrategy): @@ -69,9 +69,9 @@ def aggregate_params( Returns: Aggregated parameters. """ - children_params: dict[NodeID, NpParams] = {} + children_params: dict[NodeID, Params] = {} for idx in children_states: - children_params[idx] = children_modules[idx].get_params(to_numpy=True) + children_params[idx] = children_modules[idx].get_params() return average_state_dicts(children_params, weights=None) diff --git a/flight/strategies/commons/averaging.py b/flight/strategies/commons/averaging.py index cf0a067..01aa11e 100644 --- a/flight/strategies/commons/averaging.py +++ b/flight/strategies/commons/averaging.py @@ -4,15 +4,16 @@ import numpy as np +from flight.learning.params import Params + if t.TYPE_CHECKING: from flight.federation.topologies.node import NodeID - from flight.learning.types import NpParams def average_state_dicts( - state_dicts: t.Mapping[NodeID, NpParams], + state_dicts: t.Mapping[NodeID, Params], weights: t.Mapping[NodeID, float] | None = None, -) -> NpParams: +) -> Params: """ Common implementation for averaging model parameters. @@ -37,32 +38,13 @@ def average_state_dicts( node_weights = {node: weights[node] / weight_sum for node in weights} avg_weights = {} - for node, state_dict in state_dicts.items(): + for node, node_params in state_dicts.items(): + node_params = node_params.numpy() w = node_weights[node] - for name, value in state_dict.items(): + for name, value in node_params.items(): if name not in avg_weights: avg_weights[name] = w * np.copy(value) else: avg_weights[name] += w * np.copy(value) - # with torch.no_grad(): - # avg_weights = {} - # for node, state_dict in state_dicts.items(): - # w = node_weights[node] - # for name, value in state_dict.items(): - # match value: - # # TODO: We need some abstraction for math operations across numpy - # # and tensors. - # case torch.Tensor(): - # value = w * torch.clone(value) - # case np.ndarray(): - # value = w * value - # case _: - # raise ValueError("Unsupported data type for parameter value.") - # - # if name not in avg_weights: - # avg_weights[name] = value - # else: - # avg_weights[name] += value - - return avg_weights + return Params(avg_weights) diff --git a/flight/strategies/impl/fedavg.py b/flight/strategies/impl/fedavg.py index aab515f..7e7c7a9 100644 --- a/flight/strategies/impl/fedavg.py +++ b/flight/strategies/impl/fedavg.py @@ -2,6 +2,8 @@ import typing as t +from flight.learning.params import Params + from ..base import ( DefaultAggrStrategy, DefaultTrainerStrategy, @@ -19,7 +21,6 @@ WorkerState, ) from flight.learning.base import AbstractDataModule, AbstractModule - from flight.learning.types import NpParams, Params class _FedAvgConstMixins: @@ -66,15 +67,15 @@ def aggregate_params( Returns: Params: The aggregated values. """ - children_params: dict[NodeID, NpParams] = {} + children_params: dict[NodeID, Params] = {} for idx in children_states: - children_params[idx] = children_modules[idx].get_params(to_numpy=True) + children_params[idx] = children_modules[idx].get_params() weights = {} for node, child_state in children_states.items(): weights[node] = child_state[FedAvgAggr.NUM_SAMPLES] - state[FedAvgAggr.NUM_SAMPLES] = sum(weights.values()) + return average_state_dicts(children_params, weights=weights) diff --git a/tests/learning/test_params.py b/tests/learning/test_params.py index ba19408..b8552f2 100644 --- a/tests/learning/test_params.py +++ b/tests/learning/test_params.py @@ -1,9 +1,12 @@ +from collections import OrderedDict + import numpy as np import pytest import torch from flight.learning.params import ( validate_param_kind, + Params, ParamKinds, infer_param_kind, InconsistentParamValuesError, @@ -15,6 +18,25 @@ def param_data() -> list[float]: return [0.0, 1.0, 2.0] +def test_params_cls(param_data): + p = np.array(param_data) + params = Params({f"p{i}": p for i in range(10)}) + + params_torch = params.torch() + params_np = params.numpy() + + assert isinstance(params_np, dict) + assert isinstance(params_torch, dict) + + assert isinstance(params_np, OrderedDict) + assert isinstance(params_torch, OrderedDict) + + key = next(iter(params)) + assert isinstance(params_np[key], np.ndarray) + assert isinstance(params_torch[key], torch.Tensor) + assert float(params_np[key][0]) == float(params_torch[key][0]) + + def test_validate_numpy_params(param_data): p = np.array(param_data) params = {f"p{i}": p for i in range(10)}