Skip to content

Commit

Permalink
refactored codebase (backwards compatible)
Browse files Browse the repository at this point in the history
  • Loading branch information
maniospas committed Jun 5, 2024
1 parent df14897 commit 5d36776
Show file tree
Hide file tree
Showing 147 changed files with 6,207 additions and 3,298 deletions.
23 changes: 17 additions & 6 deletions .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: [3.9]
python-version: [3.11]

steps:
- uses: actions/checkout@v2
Expand Down Expand Up @@ -39,8 +39,19 @@ jobs:
- name: Run tests
run: |
pytest -x --cov=pygrank --cov-report=xml tests/test_autorefs.py tests/test_core.py tests/test_measures.py tests/test_filters.py tests/test_autotune.py tests/test_filter_optimization.py tests/test_gnn.py tests/test_postprocessing.py tests/test_benchmarks.py tests/test_fairness.py tests/test_preprocessor.py
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v2
with:
fail_ci_if_error: true
verbose: true
- name: Generate coverage badge
run: coverage-badge -o coverage.svg -f
- name: Commit coverage badge
if: ${{ matrix.python-version == '3.11' }}
run: |
git config --global user.name 'github-actions'
git config --global user.email '[email protected]'
git add coverage.svg
if [ -n "$(git status --porcelain)" ]; then
git commit -m 'Updated coverage badge'
git push
else
echo "No changes in coverage to commit"
fi
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ __pycache__/
.Python
env/
build/
.pytest_cache/
develop-eggs/
dist/
downloads/
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ Fast node ranking algorithms on large graphs.
<br><sup><sub>*Externally install non-numpy backends before using them.*</sub></sup>

![build](https://github.com/MKLab-ITI/pygrank/actions/workflows/tests.yml/badge.svg)
[![codecov](https://codecov.io/gh/MKLab-ITI/pygrank/branch/master/graph/badge.svg?token=RYZOT4UY8Q)](https://codecov.io/gh/MKLab-ITI/pygrank)
![coverage](https://github.com/MKLab-ITI/pygrank/actions/workflows/tests.yml/coverage.svg)
[![Downloads](https://static.pepy.tech/personalized-badge/pygrank?period=total&units=international_system&left_color=black&right_color=orange&left_text=Downloads)](https://pepy.tech/project/pygrank)

# :rocket: New features (after 0.2.10)
Expand Down
252 changes: 185 additions & 67 deletions docgenerator.py

Large diffs are not rendered by default.

31 changes: 22 additions & 9 deletions examples/fairness/bounded_adam.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,8 @@
from tensorflow.python.ops import state_ops
from tensorflow.python.framework import ops
from tensorflow.python.training import optimizer
#from tensorflow.python.eager import context

# from tensorflow.python.eager import context
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import training_ops
Expand All @@ -17,7 +18,15 @@ class AdamBounded(optimizer.Optimizer):
@@__init__
"""

def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, use_locking=False, epsilon=1e-8, name="Adam"):
def __init__(
self,
learning_rate=0.001,
beta1=0.9,
beta2=0.999,
use_locking=False,
epsilon=1e-8,
name="Adam",
):
super(AdamBounded, self).__init__(use_locking, name)
self._lr = learning_rate
self._beta1 = beta1
Expand All @@ -40,8 +49,12 @@ def _create_slots(self, var_list):
# Create slots for the first and second moments.
first_var = min(var_list, key=lambda x: x.name)
with ops.colocate_with(first_var):
self._beta1_power = variable_scope.variable(self._beta1, name="beta1_power", trainable=False)
self._beta2_power = variable_scope.variable(self._beta2, name="beta2_power", trainable=False)
self._beta1_power = variable_scope.variable(
self._beta1, name="beta1_power", trainable=False
)
self._beta2_power = variable_scope.variable(
self._beta2, name="beta2_power", trainable=False
)
# Create slots for the first and second moments.
for v in var_list:
self._zeros_slot(v, "m1", self._name)
Expand All @@ -54,13 +67,13 @@ def _resource_apply_dense(self, grad, var):
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
v = self.get_slot(var, "v1")
v_t = v.assign(beta2_t * v + (1. - beta2_t) * grad**2)
v_t = v.assign(beta2_t * v + (1.0 - beta2_t) * grad**2)
m = self.get_slot(var, "m1")
m_t = m.assign(beta1_t * m + (1. - beta1_t) * grad)
m_t = m.assign(beta1_t * m + (1.0 - beta1_t) * grad)
alpha_t = tf.sqrt(1 - beta2_power) / (1 - beta1_power)

g_t = (m_t*alpha_t) / (tf.sqrt(v_t) + self.epsilon)
#g_t = tf.clip_by_value(g_t, -1, 1)
g_t = tf.where(tf.abs(g_t) < tf.abs(grad)*0.01, grad*0.01, g_t)
g_t = (m_t * alpha_t) / (tf.sqrt(v_t) + self.epsilon)
# g_t = tf.clip_by_value(g_t, -1, 1)
g_t = tf.where(tf.abs(g_t) < tf.abs(grad) * 0.01, grad * 0.01, g_t)
var_update = state_ops.assign_sub(var, lr_t * g_t)
return control_flow_ops.group(*[var_update, v_t, m_t])
20 changes: 16 additions & 4 deletions examples/fairness/main.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,20 @@
import pygrank as pg
import setting

pg.split()

print("----------- PPR.85 -----------")
filter = pg.PageRank(0.85, max_iters=10000, tol=1.E-9, assume_immutability=True, normalization="symmetric")
setting.experiment(filter, ["citeseer"], pg.AUC, [.1, .2, .3, .4, .5], fix_personalization=True, repeats=2, sensitive_group=100)
filter = pg.PageRank(
0.85,
max_iters=10000,
tol=1.0e-9,
assume_immutability=True,
normalization="symmetric",
)
setting.experiment(
filter,
["citeseer"],
pg.AUC,
[0.1, 0.2, 0.3, 0.4, 0.5],
fix_personalization=True,
repeats=2,
sensitive_group=100,
)
94 changes: 66 additions & 28 deletions examples/fairness/setting.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,34 +2,72 @@
from tensortune import Tensortune


def experiment(filter, datasets, task_metric, fraction_of_training, fix_personalization=True, repeats=1, sensitive_group=100):
def experiment(
filter,
datasets,
task_metric,
fraction_of_training,
fix_personalization=True,
repeats=1,
sensitive_group=100,
):
if hasattr(filter, "alpha"):
algorithms = {"None": filter,
"LFPRU": pg.LFPR(alpha=filter.alpha, max_iters=10000, tol=filter.convergence.tol, redistributor="uniform", fix_personalization=fix_personalization),
"LFPRP": pg.LFPR(alpha=filter.alpha, max_iters=10000, tol=filter.convergence.tol, redistributor="original", fix_personalization=fix_personalization),
"LFPRO": pg.AdHocFairness(filter, "O"),
"Mult": pg.AdHocFairness(filter, "B"),
#"FPers": pg.FairPersonalizer(filter, fix_personalization=fix_personalization),
"Tensortune": Tensortune(filter, fix_personalization=fix_personalization)
}
algorithms = {
"None": filter,
"LFPRU": pg.LFPR(
alpha=filter.alpha,
max_iters=10000,
tol=filter.convergence.tol,
redistributor="uniform",
fix_personalization=fix_personalization,
),
"LFPRP": pg.LFPR(
alpha=filter.alpha,
max_iters=10000,
tol=filter.convergence.tol,
redistributor="original",
fix_personalization=fix_personalization,
),
"LFPRO": pg.AdHocFairness(filter, "O"),
"Mult": pg.AdHocFairness(filter, "B"),
# "FPers": pg.FairPersonalizer(filter, fix_personalization=fix_personalization),
"Tensortune": Tensortune(filter, fix_personalization=fix_personalization),
}
else:
algorithms = {"None": filter,
"LFPRO": pg.AdHocFairness(filter, "O"),
"Mult": pg.AdHocFairness(filter, "B"),
"FPers": pg.FairPersonalizer(filter, fix_personalization=fix_personalization),
"Tensortune": Tensortune(filter, fix_personalization=fix_personalization, fairness_weight=1)
}
algorithms = {
"None": filter,
"LFPRO": pg.AdHocFairness(filter, "O"),
"Mult": pg.AdHocFairness(filter, "B"),
"FPers": pg.FairPersonalizer(
filter, fix_personalization=fix_personalization
),
"Tensortune": Tensortune(
filter, fix_personalization=fix_personalization, fairness_weight=1
),
}

pg.benchmark_print(pg.benchmark_average(pg.benchmark(algorithms,
pg.load_datasets_multiple_communities(datasets,
max_group_number=sensitive_group,
directed=False),
metrics=task_metric
if not fix_personalization
else lambda personalization, exclude: task_metric(personalization), # [pg.Utility(pg.Mabs, filter)],
sensitive=pg.pRule if not fix_personalization
else lambda personalization, exclude: pg.pRule(personalization),
fraction_of_training=fraction_of_training,
seed=list(range(repeats))
)),
delimiter=" & ", end_line="\\\\", decimals=3)
pg.benchmark_print(
pg.benchmark_average(
pg.benchmark(
algorithms,
pg.load_datasets_multiple_communities(
datasets, max_group_number=sensitive_group, directed=False
),
metrics=(
task_metric
if not fix_personalization
else lambda personalization, exclude: task_metric(personalization)
), # [pg.Utility(pg.Mabs, filter)],
sensitive=(
pg.pRule
if not fix_personalization
else lambda personalization, exclude: pg.pRule(personalization)
),
fraction_of_training=fraction_of_training,
seed=list(range(repeats)),
)
),
delimiter=" & ",
end_line="\\\\",
decimals=3,
)
Loading

0 comments on commit 5d36776

Please sign in to comment.