Skip to content

Commit

Permalink
Merge pull request #41 from ispras/frontend
Browse files Browse the repository at this point in the history
Frontend
  • Loading branch information
LukyanovKirillML authored Dec 6, 2024
2 parents 1bf8166 + 380a752 commit 33a185a
Show file tree
Hide file tree
Showing 38 changed files with 1,853 additions and 985 deletions.
2 changes: 1 addition & 1 deletion data/multiple-graphs/custom/small/raw/.info
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
{
"count": 8,
"nodes": [5, 4, 4, 8, 6, 7, 7, 9],
"directed": false,
"directed": true,
"node_attributes": {
"names": [
"a", "b"
Expand Down
24 changes: 23 additions & 1 deletion experiments/user_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,9 +122,31 @@ def simgnn():
print("len =", len(gen_dataset))


def nx_to_ptg_converter():
from aux.utils import GRAPHS_DIR
from base.dataset_converter import networkx_to_ptg
from base.datasets_processing import DatasetManager
import networkx as nx

nx_path = GRAPHS_DIR / 'networkx-graphs' / 'input' / 'reply_graph.edgelist'
nx_graph = nx.read_edgelist(nx_path)
nx_graph = nx.to_undirected(nx_graph)
ptg_graph = networkx_to_ptg(nx_graph)
if ptg_graph.x is None:
ptg_graph.x = torch.zeros((ptg_graph.num_nodes, 1))
if ptg_graph.y is None:
ptg_graph.y = torch.zeros(ptg_graph.num_nodes)
ptg_graph.y[0] = 1
ptg_dataset = UserLocalDataset('test_dataset_single', [ptg_graph])
gen_dataset = DatasetManager.register_torch_geometric_local(ptg_dataset)
print(len(gen_dataset))


if __name__ == '__main__':

# local()
converted_local()
# converted_local()
# api()
# simgnn()

nx_to_ptg_converter()
68 changes: 34 additions & 34 deletions metainfo/optimizers_parameters.json
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
{"Adam":
{
"lr": ["learn rate", "float", 0.001, {"min": 0.0001, "step": 0.001}, "learning rate"],
"beta1": ["beta1", "float", 0.9, {}, "coefficient used for computing running averages of gradient and its square"],
"beta2": ["beta2", "float", 0.999, {}, "coefficient used for computing running averages of gradient and its square"],
"eps": ["Epsilon", "float", 0.00000001, {}, "term added to the denominator to improve numerical stability"],
"weight_decay": ["Weight decay (L2)", "float", 5e-4, {}, "weight decay (L2 penalty)"],
"beta1": ["beta1", "float", 0.9, {"min": 0}, "coefficient used for computing running averages of gradient and its square"],
"beta2": ["beta2", "float", 0.999, {"min": 0}, "coefficient used for computing running averages of gradient and its square"],
"eps": ["Epsilon", "float", 0.00000001, {"min": 0}, "term added to the denominator to improve numerical stability"],
"weight_decay": ["Weight decay (L2)", "float", 5e-4, {"min": 0}, "weight decay (L2 penalty)"],
"amsgrad": ["AMSGrad", "bool", false, {}, "whether to use the AMSGrad"],
"_technical_parameter":
{
Expand All @@ -17,22 +17,22 @@
"lr": ["learn rate", "float", 1.0, {"min": 0.0001, "step": 1}, "coefficient that scale delta before it is applied to the parameters"],
"rho": ["rho", "float", 0.9, {}, "coefficient used for computing a running average of squared gradients"],
"eps": ["Epsilon", "float", 1e-6, {}, "term added to the denominator to improve numerical stability"],
"weight_decay": ["Weight decay (L2)", "float", 0, {}, "weight decay (L2 penalty)"]
"weight_decay": ["Weight decay (L2)", "float", 0, {"min": 0}, "weight decay (L2 penalty)"]
},
"Adagrad":
{
"lr": ["learn rate", "float", 0.01, {"min": 0.0001, "step": 0.01}, "learning rate"],
"lr_decay": ["lr decay", "float", 0, {}, "learning rate decay"],
"eps": ["Epsilon", "float", 1e-10, {}, "term added to the denominator to improve numerical stability"],
"weight_decay": ["Weight decay (L2)", "float", 0, {}, "weight decay (L2 penalty)"]
"lr_decay": ["lr decay", "float", 0, {"min": 0}, "learning rate decay"],
"eps": ["Epsilon", "float", 1e-10, {"min": 0}, "term added to the denominator to improve numerical stability"],
"weight_decay": ["Weight decay (L2)", "float", 0, {"min": 0}, "weight decay (L2 penalty)"]
},
"AdamW":
{
"lr": ["learn rate", "float", 0.001, {"min": 0.0001, "step": 0.001}, "learning rate"],
"beta1": ["beta1", "float", 0.9, {}, "coefficient used for computing running averages of gradient and its square"],
"beta2": ["beta2", "float", 0.999, {}, "coefficient used for computing running averages of gradient and its square"],
"eps": ["Epsilon", "float", 1e-8, {}, "term added to the denominator to improve numerical stability"],
"weight_decay": ["Weight decay (L2)", "float", 0.01, {}, "weight decay coefficient"],
"beta1": ["beta1", "float", 0.9, {"min": 0}, "coefficient used for computing running averages of gradient and its square"],
"beta2": ["beta2", "float", 0.999, {"min": 0}, "coefficient used for computing running averages of gradient and its square"],
"eps": ["Epsilon", "float", 1e-8, {"min": 0}, "term added to the denominator to improve numerical stability"],
"weight_decay": ["Weight decay (L2)", "float", 0.01, {"min": 0}, "weight decay coefficient"],
"amsgrad": ["AMSGrad", "bool", false, {}, "whether to use the AMSGrad"],
"maximize": ["maximize", "bool", false, {}, "maximize the params based on the objective, instead of minimizing"],
"_technical_parameter":
Expand All @@ -44,9 +44,9 @@
"SparseAdam":
{
"lr": ["learn rate", "float", 0.001, {"min": 0.0001, "step": 0.001}, "learning rate "],
"beta1": ["beta1", "float", 0.9, {}, "coefficients used for computing running averages of gradient and its square"],
"beta2": ["beta2", "float", 0.999, {}, "coefficients used for computing running averages of gradient and its square"],
"eps": ["Epsilon", "float", 1e-8, {}, "term added to the denominator to improve numerical stability"],
"beta1": ["beta1", "float", 0.9, {"min": 0}, "coefficients used for computing running averages of gradient and its square"],
"beta2": ["beta2", "float", 0.999, {"min": 0}, "coefficients used for computing running averages of gradient and its square"],
"eps": ["Epsilon", "float", 1e-8, {"min": 0}, "term added to the denominator to improve numerical stability"],
"_technical_parameter":
{
"parameters_grouping": [[
Expand All @@ -56,10 +56,10 @@
"Adamax":
{
"lr": ["learn rate", "float", 0.002, {"min": 0.0001, "step": 0.001}, "learning rate"],
"beta1": ["beta1", "float", 0.9, {}, "coefficient used for computing running averages of gradient and its square"],
"beta2": ["beta2", "float", 0.999, {}, "coefficient used for computing running averages of gradient and its square"],
"eps": ["Epsilon", "float", 1e-8, {}, "term added to the denominator to improve numerical stability"],
"weight_decay": ["Weight decay (L2)", "float", 0.01, {}, "weight decay (L2 penalty)"],
"beta1": ["beta1", "float", 0.9, {"min": 0}, "coefficient used for computing running averages of gradient and its square"],
"beta2": ["beta2", "float", 0.999, {"min": 0}, "coefficient used for computing running averages of gradient and its square"],
"eps": ["Epsilon", "float", 1e-8, {"min": 0}, "term added to the denominator to improve numerical stability"],
"weight_decay": ["Weight decay (L2)", "float", 0.01, {"min": 0}, "weight decay (L2 penalty)"],
"maximize": ["maximize", "bool", false, {}, "maximize the params based on the objective, instead of minimizing"],
"_technical_parameter":
{
Expand All @@ -73,7 +73,7 @@
"lambd": ["lambd", "float", 0.0001, {}, "decay term"],
"alpha": ["alpha", "float", 0.75, {}, "power for eta update"],
"t0": ["t0", "float", 1000000.0, {}, "point at which to start averaging"],
"weight_decay": ["Weight decay (L2)", "float", 0, {}, "weight decay (L2 penalty)"]
"weight_decay": ["Weight decay (L2)", "float", 0, {"min": 0}, "weight decay (L2 penalty)"]
},
"LBFGS":
{
Expand All @@ -85,11 +85,11 @@
"NAdam":
{
"lr": ["learn rate", "float", 0.002, {"min": 0.0001, "step": 0.001}, "learning rate"],
"beta1": ["beta1", "float", 0.9, {}, "coefficient used for computing running averages of gradient and its square"],
"beta2": ["beta2", "float", 0.999, {}, "coefficient used for computing running averages of gradient and its square"],
"eps": ["Epsilon", "float", 1e-8, {}, "term added to the denominator to improve numerical stability"],
"weight_decay": ["Weight decay (L2)", "float", 0, {}, "weight decay (L2 penalty)"],
"momentum_decay": ["Momentum decay", "float", 0.004, {}, "momentum momentum_decay"],
"beta1": ["beta1", "float", 0.9, {"min": 0}, "coefficient used for computing running averages of gradient and its square"],
"beta2": ["beta2", "float", 0.999, {"min": 0}, "coefficient used for computing running averages of gradient and its square"],
"eps": ["Epsilon", "float", 1e-8, {"min": 0}, "term added to the denominator to improve numerical stability"],
"weight_decay": ["Weight decay (L2)", "float", 0, {"min": 0}, "weight decay (L2 penalty)"],
"momentum_decay": ["Momentum decay", "float", 0.004, {"min": 0}, "momentum momentum_decay"],
"_technical_parameter":
{
"parameters_grouping": [[
Expand All @@ -99,10 +99,10 @@
"RAdam":
{
"lr": ["learn rate", "float", 0.001, {"min": 0.0001, "step": 0.001}, "learning rate"],
"beta1": ["beta1", "float", 0.9, {}, "coefficient used for computing running averages of gradient and its square"],
"beta2": ["beta2", "float", 0.999, {}, "coefficient used for computing running averages of gradient and its square"],
"eps": ["Epsilon", "float", 1e-8, {}, "term added to the denominator to improve numerical stability"],
"weight_decay": ["Weight decay (L2)", "float", 0, {}, "weight decay (L2 penalty)"],
"beta1": ["beta1", "float", 0.9, {"min": 0}, "coefficient used for computing running averages of gradient and its square"],
"beta2": ["beta2", "float", 0.999, {"min": 0}, "coefficient used for computing running averages of gradient and its square"],
"eps": ["Epsilon", "float", 1e-8, {"min": 0}, "term added to the denominator to improve numerical stability"],
"weight_decay": ["Weight decay (L2)", "float", 0, {"min": 0}, "weight decay (L2 penalty)"],
"_technical_parameter":
{
"parameters_grouping": [[
Expand All @@ -112,9 +112,9 @@
"RMSprop":
{
"lr": ["learn rate", "float", 0.01, {"min": 0.0001, "step": 0.01}, "learning rate"],
"alpha": ["alpha", "float", 0.99, {}, "smoothing constant"],
"eps": ["Epsilon", "float", 1e-8, {}, "term added to the denominator to improve numerical stability"],
"weight_decay": ["Weight decay (L2)", "float", 0, {}, "weight decay (L2 penalty)"],
"alpha": ["alpha", "float", 0.99, {"min": 0}, "smoothing constant"],
"eps": ["Epsilon", "float", 1e-8, {"min": 0}, "term added to the denominator to improve numerical stability"],
"weight_decay": ["Weight decay (L2)", "float", 0, {"min": 0}, "weight decay (L2 penalty)"],
"momentum": ["momentum", "float", 0, {}, "momentum factor"],
"centered": ["centered", "bool", false, {}, "if True, compute the centered RMSProp, the gradient is normalized by an estimation of its variance"]
},
Expand All @@ -136,8 +136,8 @@
"SGD":
{
"lr": ["learn rate", "float", 0.001, {"min": 0.0001, "step": 0.001}, "learning rate"],
"weight_decay": ["Weight decay (L2)", "float", 0, {}, "weight decay (L2 penalty)"],
"momentum": ["momentum", "float", 0, {}, "momentum factor"],
"weight_decay": ["Weight decay (L2)", "float", 0, {"min": 0}, "weight decay (L2 penalty)"],
"momentum": ["momentum", "float", 0, {"min": 0}, "momentum factor"],
"dampening": ["dampening", "float", 0, {}, "dampening for momentum"],
"nesterov": ["nesterov", "bool", false, {}, "enables Nesterov momentum"],
"maximize": ["maximize", "bool", false, {}, "maximize the params based on the objective, instead of minimizing"]
Expand Down
4 changes: 2 additions & 2 deletions requirements2.txt
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
## These are reqs needed for documentation

numpy==1.26.3
multiprocess
multiprocess==0.70.16
pydantic
tqdm
pyparsing
Expand All @@ -23,4 +23,4 @@ opt-einsum
pandas==2.2.0
pylint

torch_geometric==2.3.1
torch_geometric==2.3.1
2 changes: 1 addition & 1 deletion src/aux/configs.py
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ def set_defaults_config_pattern_info(

def to_json(
self
):
) -> dict:
""" Special method which allows to use json.dumps() on Config object """
return self.to_dict()

Expand Down
22 changes: 12 additions & 10 deletions src/base/custom_datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,41 +36,41 @@ def __init__(
@property
def node_attributes_dir(
self
):
) -> Path:
""" Path to dir with node attributes. """
return self.root_dir / 'raw' / (self.name + '.node_attributes')

@property
def edge_attributes_dir(
self
):
) -> Path:
""" Path to dir with edge attributes. """
return self.root_dir / 'raw' / (self.name + '.edge_attributes')

@property
def labels_dir(
self
):
) -> Path:
""" Path to dir with labels. """
return self.root_dir / 'raw' / (self.name + '.labels')

@property
def edges_path(
self
):
) -> Path:
""" Path to file with edge list. """
return self.root_dir / 'raw' / (self.name + '.ij')

@property
def edge_index_path(
self
):
) -> Path:
""" Path to dir with labels. """
return self.root_dir / 'raw' / (self.name + '.edge_index')

def check_validity(
self
):
) -> None:
""" Check that dataset files (graph and attributes) are valid and consistent with .info.
"""
# Assuming info is OK
Expand Down Expand Up @@ -159,6 +159,7 @@ def build(
return

self.dataset_var_data = None
self.stats.update_var_config()
self.dataset_var_config = dataset_var_config
self.dataset = LocalDataset(self.results_dir, process_func=self._create_ptg)

Expand All @@ -178,6 +179,7 @@ def _compute_stat(
with open(self.node_attributes_dir / a, 'r') as f:
attr_node_attrs[a] = json.load(f)

# FIXME misha - for single graph [0]
edges = self.edge_index
node_map = (lambda i: str(self.node_map[i])) if self.node_map else lambda i: str(i)

Expand Down Expand Up @@ -223,8 +225,8 @@ def _compute_stat(
pearson_corr[i][j] = min(1, max(-1, pc))

return {'attributes': attrs, 'correlations': pearson_corr.tolist()}
else:
return super()._compute_stat(stat)

raise NotImplementedError()

def _compute_dataset_data(
self
Expand Down Expand Up @@ -420,7 +422,7 @@ def _iter_nodes(

def _labeling_tensor(
self,
g_ix=None
g_ix: int = None
) -> list:
""" Returns list of labels (not tensors) """
y = []
Expand All @@ -445,7 +447,7 @@ def _labeling_tensor(

def _feature_tensor(
self,
g_ix=None
g_ix: int = None
) -> list:
""" Returns list of features (not tensors) for graph g_ix.
"""
Expand Down
Loading

0 comments on commit 33a185a

Please sign in to comment.