Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adding intersectional bias mitigation to AIF360 #538

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions aif360/algorithms/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
from aif360.algorithms.transformer import Transformer, addmetadata
from aif360.algorithms.intersectional_fairness import IntersectionalFairness
1,003 changes: 1,003 additions & 0 deletions aif360/algorithms/intersectional_fairness.py

Large diffs are not rendered by default.

121 changes: 121 additions & 0 deletions aif360/algorithms/isf_helpers/inprocessing/adversarial_debiasing.py
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

is there any way we can just use the AdversarialDebiasing class directly instead of this wrapper? this doesn't seem to be doing much.

Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
# SPDX-License-Identifier: Apache-2.0
#
# Copyright 2023 Fujitsu Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


from aif360.algorithms.inprocessing.adversarial_debiasing import AdversarialDebiasing as AD
import tensorflow as tf

from aif360.algorithms.isf_helpers.inprocessing.inprocessing import InProcessing


tf.compat.v1.disable_eager_execution()


class AdversarialDebiasing(InProcessing):

"""
Debiasing intersectional bias with adversarial learning(AD) called by ISF.

Parameters
----------
options : dictionary
parameter of AdversarialDebiasing
num_epochs: trials of model training
batch_size:Batch size for model training

Notes
-----
https://aif360.readthedocs.io/en/v0.2.3/_modules/aif360/algorithms/inprocessing/adversarial_debiasing.html

"""

def __init__(self, options):
super().__init__()
self.ds_train = None
self.options = options

def fit(self, ds_train):
"""
Save training dataset

Attributes
----------
ds_train : Dataset
Dataset for training
"""
self.ds_train = ds_train.copy(deepcopy=True)

def predict(self, ds_test):
"""
Model learning with debias using the training dataset imported by fit(), and predict using that model

Parameters
----------
ds_test : Dataset
Dataset for prediction

Returns
-------
ds_predict : numpy.ndarray
Predicted label
"""
ikey = ds_test.protected_attribute_names[0]
priv_g = [{ikey: ds_test.privileged_protected_attributes[0]}]
upriv_g = [{ikey: ds_test.unprivileged_protected_attributes[0]}]
sess = tf.compat.v1.Session()
model = AD(
privileged_groups=priv_g,
unprivileged_groups=upriv_g,
scope_name='debiased_classifier',
debias=True,
sess=sess)
model.fit(self.ds_train)
ds_predict = model.predict(ds_test)
sess.close()
tf.compat.v1.reset_default_graph()
return ds_predict

def bias_predict(self, ds_train):
"""
Model learning and prediction using AdversarialDebiasing of AIF360 without debias.

Parameters
----------
ds_train : Dataset
Dataset for training and prediction

Returns
-------
ds_predict : numpy.ndarray
Predicted label
"""
ikey = ds_train.protected_attribute_names[0]
priv_g = [{ikey: ds_train.privileged_protected_attributes[0]}]
upriv_g = [{ikey: ds_train.unprivileged_protected_attributes[0]}]
sess = tf.compat.v1.Session()
model = AD(
privileged_groups=priv_g,
unprivileged_groups=upriv_g,
scope_name='plain_classifier',
debias=False,
sess=sess,
num_epochs=self.options['num_epochs'],
batch_size=self.options['batch_size'])
model.fit(ds_train)
ds_predict = model.predict(ds_train)
sess.close()
tf.compat.v1.reset_default_graph()
return ds_predict
Comment on lines +91 to +121
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

unused

52 changes: 52 additions & 0 deletions aif360/algorithms/isf_helpers/inprocessing/inprocessing.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
# SPDX-License-Identifier: Apache-2.0
#
# Copyright 2023 Fujitsu Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from abc import ABCMeta
from abc import abstractmethod


class InProcessing(metaclass=ABCMeta):
"""
Abstract Base Class for all inprocessing techniques.
"""
def __init__(self):
super().__init__()
#the following line is need if we decide to expand support for more inprocessing algorithms besides adversarial debiasing
#self.model = None

@abstractmethod
def fit(self, ds_train):
"""
Train a model on the input.

Parameters
----------
ds_train : Dataset
Training Dataset.
"""
pass

@abstractmethod
def predict(self, ds):
"""
Predict on the input.

Parameters
----------
ds : Dataset
Dataset to predict.
"""
pass
161 changes: 161 additions & 0 deletions aif360/algorithms/isf_helpers/isf_analysis/intersectional_bias.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,161 @@
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: Apache-2.0
#
# Copyright 2023 Fujitsu Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import seaborn as sns

from aif360.algorithms.isf_helpers.isf_metrics.disparate_impact import DisparateImpact
from aif360.algorithms.isf_helpers.isf_utils.common import create_multi_group_label


def calc_intersectionalbias(dataset, metric="DisparateImpact"):
"""
Calculate intersectional bias(DisparateImpact) by more than one sensitive attributes

Parameters
----------
dataset : StructuredDataset
A dataset containing more than one sensitive attributes

metric : str
Fairness metric name
["DisparateImpact"]

Returns
-------
df_result : DataFrame
Intersectional bias(DisparateImpact)
"""

df = dataset.convert_to_dataframe()[0]
label_info = {dataset.label_names[0]: dataset.favorable_label}

if metric == "DisparateImpact":
fs = DisparateImpact()
else:
raise ValueError("metric name not in the list of allowed metrics")

df_result = pd.DataFrame(columns=[metric])
for multi_group_label in create_multi_group_label(dataset)[0]:
protected_attr_info = multi_group_label[0]
di = fs.bias_predict(df,
protected_attr_info=protected_attr_info,
label_info=label_info)
name = ''
for k, v in protected_attr_info.items():
name += k + " = " + str(v) + ","
df_result.loc[name[:-1]] = di

return df_result
Comment on lines +27 to +65
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

is it possible to use the built-in one_vs_rest() here?

y = df.set_index(dataset.protected_attribute_names)[dataset.label_names]
one_vs_rest(disparate_impact_ratio, y)



def plot_intersectionalbias_compare(ds_bef, ds_aft, vmax=1, vmin=0, center=0,
metric="DisparateImpact",
title={"right": "before", "left": "after"},
filename=None):
"""
Compare drawing of intersectional bias in heat map

Parameters
----------
ds_bef : StructuredDataset
Dataset containing two sensitive attributes (left figure)
ds_aft : StructuredDataset
Dataset containing two sensitive attributes (right figure)
filename : str, optional
File name(png)
e.g. "./result/pict.png"
metric : str
Fairness metric name
["DisparateImpact"]
title : dictonary, optional
Graph title (right figure, left figure)
"""

df_bef = calc_intersectionalbias_matrix(ds_bef, metric)
df_aft = calc_intersectionalbias_matrix(ds_aft, metric)

gs = GridSpec(1, 2)
ss1 = gs.new_subplotspec((0, 0))
ss2 = gs.new_subplotspec((0, 1))

ax1 = plt.subplot(ss1)
ax2 = plt.subplot(ss2)

ax1.set_title(title['right'])
sns.heatmap(df_bef, ax=ax1, vmax=vmax, vmin=vmin, center=center, annot=True, cmap='hot')

ax2.set_title(title['left'])
sns.heatmap(df_aft, ax=ax2, vmax=vmax, vmin=vmin, center=center, annot=True, cmap='hot')

if filename is not None:
plt.savefig(filename, format="png", dpi=300)
plt.show()


def calc_intersectionalbias_matrix(dataset, metric="DisparateImpact"):
"""
Comparison drawing of intersectional bias in heat map

Parameters
----------
dataset : StructuredDataset
Dataset containing two sensitive attributes
metric : str
Fairness metric name
["DisparateImpact"]

Returns
-------
df_result : DataFrame
Intersectional bias(DisparateImpact)
"""

protect_attr = dataset.protected_attribute_names

if len(protect_attr) != 2:
raise ValueError("specify 2 sensitive attributes.")

if metric == "DisparateImpact":
fs = DisparateImpact()
else:
raise ValueError("metric name not in the list of allowed metrics")

df = dataset.convert_to_dataframe()[0]
label_info = {dataset.label_names[0]: dataset.favorable_label}

protect_attr0_values = list(set(df[protect_attr[0]]))
protect_attr1_values = list(set(df[protect_attr[1]]))

df_result = pd.DataFrame(columns=protect_attr1_values)

for val0 in protect_attr0_values:
tmp_li = []
col_list = []
for val1 in protect_attr1_values:
di = fs.bias_predict(df,
protected_attr_info={protect_attr[0]: val0, protect_attr[1]: val1},
label_info=label_info)
tmp_li += [di]
col_list += [protect_attr[1]+"="+str(val1)]

df_result.loc[protect_attr[0]+"="+str(val0)] = tmp_li
df_result = df_result.set_axis(col_list, axis=1)

return df_result
Comment on lines +112 to +161
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this seems largely redundant with calc_intersectionalbias() above but pivoted. can't we just accomplish this in pandas?

Loading
Loading