diff --git a/docs/build/html/_modules/domainlab/algos/trainers/mmd_base.html b/docs/build/html/_modules/domainlab/algos/trainers/mmd_base.html new file mode 100644 index 000000000..2156f28ef --- /dev/null +++ b/docs/build/html/_modules/domainlab/algos/trainers/mmd_base.html @@ -0,0 +1,426 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + domainlab.algos.trainers.mmd_base — domainlab documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Skip to content +
+ +
+ + +
+ + + + +
+
+ +
+
+
+ +
+
+
+
+
+
+ + +
+
+
+ +
+
+ +

Source code for domainlab.algos.trainers.mmd_base

+"""
+Alexej, Xudong
+"""
+import torch
+from domainlab.algos.trainers.train_basic import TrainerBasic
+
+
+
[docs]class TrainerMMDBase(TrainerBasic): + """ + causal matching + """ +
[docs] def my_cdist(self, x1, x2): + """ + distance for Gaussian + """ + # along the last dimension + x1_norm = x1.pow(2).sum(dim=-1, keepdim=True) + x2_norm = x2.pow(2).sum(dim=-1, keepdim=True) + # x_2_norm is [batchsize, 1] + # matrix multiplication (2nd, 3rd) and addition to first argument + # X1[batchsize, dimfeat] * X2[dimfeat, batchsize) + # alpha: Scaling factor for the matrix product (default: 1) + # x2_norm.transpose(-2, -1) is row vector + # x_1_norm is column vector + res = torch.addmm(x2_norm.transpose(-2, -1), + x1, + x2.transpose(-2, -1), alpha=-2).add_(x1_norm) + return res.clamp_min_(1e-30)
+ +
[docs] def gaussian_kernel(self, x, y): + """ + kernel for MMD + """ + gamma = [0.001, 0.01, 0.1, 1, 10, 100, 1000] + dist = self.my_cdist(x, y) + tensor = torch.zeros_like(dist) + for g in gamma: + tensor.add_(torch.exp(dist.mul(-g))) + return tensor
+ +
[docs] def mmd(self, x, y): + """ + maximum mean discrepancy + """ + kxx = self.gaussian_kernel(x, x).mean() + kyy = self.gaussian_kernel(y, y).mean() + kxy = self.gaussian_kernel(x, y).mean() + return kxx + kyy - 2 * kxy
+
+ +
+
+
+
+
+ + + + + \ No newline at end of file diff --git a/docs/build/html/_modules/domainlab/algos/trainers/train_causIRL.html b/docs/build/html/_modules/domainlab/algos/trainers/train_causIRL.html new file mode 100644 index 000000000..1f484ff7f --- /dev/null +++ b/docs/build/html/_modules/domainlab/algos/trainers/train_causIRL.html @@ -0,0 +1,455 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + domainlab.algos.trainers.train_causIRL — domainlab documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Skip to content +
+ +
+ + +
+ + + + +
+
+ +
+
+
+ +
+
+
+
+
+
+ + +
+
+
+ +
+
+ +

Source code for domainlab.algos.trainers.train_causIRL

+"""
+Alex, Xudong
+"""
+import numpy as np
+import torch
+from domainlab.algos.trainers.train_basic import TrainerBasic
+
+
+
[docs]class TrainerCausalIRL(TrainerBasic): + """ + causal matching + """ +
[docs] def my_cdist(self, x1, x2): + """ + distance for Gaussian + """ + # along the last dimension + x1_norm = x1.pow(2).sum(dim=-1, keepdim=True) + x2_norm = x2.pow(2).sum(dim=-1, keepdim=True) + # x_2_norm is [batchsize, 1] + # matrix multiplication (2nd, 3rd) and addition to first argument + # X1[batchsize, dimfeat] * X2[dimfeat, batchsize) + # alpha: Scaling factor for the matrix product (default: 1) + # x2_norm.transpose(-2, -1) is row vector + # x_1_norm is column vector + res = torch.addmm(x2_norm.transpose(-2, -1), + x1, + x2.transpose(-2, -1), alpha=-2).add_(x1_norm) + return res.clamp_min_(1e-30)
+ +
[docs] def gaussian_kernel(self, x, y): + """ + kernel for MMD + """ + gamma=[0.001, 0.01, 0.1, 1, 10, 100, 1000] + dist = self.my_cdist(x, y) + tensor = torch.zeros_like(dist) + for g in gamma: + tensor.add_(torch.exp(dist.mul(-g))) + return tensor
+ +
[docs] def mmd(self, x, y): + """ + maximum mean discrepancy + """ + kxx = self.gaussian_kernel(x, x).mean() + kyy = self.gaussian_kernel(y, y).mean() + kxy = self.gaussian_kernel(x, y).mean() + return kxx + kyy - 2 * kxy
+ +
[docs] def tr_batch(self, tensor_x, tensor_y, tensor_d, others, ind_batch, epoch): + """ + optimize neural network one step upon a mini-batch of data + """ + self.before_batch(epoch, ind_batch) + tensor_x, tensor_y, tensor_d = ( + tensor_x.to(self.device), + tensor_y.to(self.device), + tensor_d.to(self.device), + ) + self.optimizer.zero_grad() + + features = self.get_model().extract_semantic_feat(tensor_x) + + pos_batch_break = np.random.randint(0, tensor_x.shape[0]) + first = features[:pos_batch_break] + second = features[pos_batch_break:] + if len(first) > 1 and len(second) > 1: + penalty = torch.nan_to_num(self.mmd(first, second)) + else: + penalty = torch.tensor(0) + loss = self.cal_loss(tensor_x, tensor_y, tensor_d, others) + loss = loss + penalty + loss.backward() + self.optimizer.step() + self.after_batch(epoch, ind_batch) + self.counter_batch += 1
+
+ +
+
+
+
+
+ + + + + \ No newline at end of file diff --git a/docs/build/html/_modules/domainlab/algos/trainers/train_coral.html b/docs/build/html/_modules/domainlab/algos/trainers/train_coral.html new file mode 100644 index 000000000..773bba964 --- /dev/null +++ b/docs/build/html/_modules/domainlab/algos/trainers/train_coral.html @@ -0,0 +1,427 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + domainlab.algos.trainers.train_coral — domainlab documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Skip to content +
+ +
+ + +
+ + + + +
+
+ +
+
+
+ +
+
+
+
+
+
+ + +
+
+
+ +
+
+ +

Source code for domainlab.algos.trainers.train_coral

+"""
+Deep CORAL: Correlation Alignment for Deep
+Domain Adaptation
+[au] Alexej, Xudong
+"""
+from domainlab.algos.trainers.mmd_base import TrainerMMDBase
+from domainlab.utils.hyperparameter_retrieval import get_gamma_reg
+
+
+
[docs]class TrainerCoral(TrainerMMDBase): + """ + cross domain MMD + """ +
[docs] def cross_domain_mmd(self, tuple_data_domains_batch): + """ + domain-pairwise mmd + """ + list_cross_domain_mmd = [] + list_domain_erm_loss = [] + num_domains = len(tuple_data_domains_batch) + for ind_domain_a in range(num_domains): + data_a, y_a, *_ = tuple_data_domains_batch[ind_domain_a] + feat_a = self.get_model().extract_semantic_feat(data_a) + list_domain_erm_loss.append(sum(self.get_model().cal_task_loss(data_a, y_a))) + for ind_domain_b in range(ind_domain_a, num_domains): + data_b, *_ = tuple_data_domains_batch[ind_domain_b] + feat_b = self.get_model().extract_semantic_feat(data_b) + mmd = self.mmd(feat_a, feat_b) + list_cross_domain_mmd.append(mmd) + return list_domain_erm_loss, list_cross_domain_mmd
+ +
[docs] def tr_epoch(self, epoch): + list_loaders = list(self.dict_loader_tr.values()) + loaders_zip = zip(*list_loaders) + self.model.train() + self.model.convert4backpack() + self.epo_loss_tr = 0 + + for ind_batch, tuple_data_domains_batch in enumerate(loaders_zip): + self.optimizer.zero_grad() + list_domain_erm_loss, list_cross_domain_mmd = self.cross_domain_mmd(tuple_data_domains_batch) + loss = sum(list_domain_erm_loss) + get_gamma_reg(self.aconf, self.name) * sum(list_cross_domain_mmd) + loss.backward() + self.optimizer.step() + self.epo_loss_tr += loss.detach().item() + self.after_batch(epoch, ind_batch) + + flag_stop = self.observer.update(epoch) # notify observer + return flag_stop
+
+ +
+
+
+
+
+ + + + + \ No newline at end of file diff --git a/docs/build/html/doc_coral.html b/docs/build/html/doc_coral.html new file mode 100644 index 000000000..19f3e33ab --- /dev/null +++ b/docs/build/html/doc_coral.html @@ -0,0 +1,458 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Deep CORAL — domainlab documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Skip to content +
+ +
+ + +
+ + + + +
+
+ +
+
+
+ +
+
+
+ + +
+
+ +
+

Deep CORAL

+
+

Deep CORAL: Correlation Alignment for Deep Domain Adaptation

+

nonlinear transformation that aligns correlations of +layer activations in deep neural network +https://arxiv.org/pdf/1607.01719

+
+
+

Examples

+
python main_out.py --te_d 0 --tr_d 3 7 --bs=32 --epos=1 --task=mnistcolor10 --model=erm --nname=conv_bn_pool_2 --trainer=coral
+
+
+
+
+ + +
+
+
+
+
+ + + + + \ No newline at end of file diff --git a/docs/build/html/doc_irl.html b/docs/build/html/doc_irl.html new file mode 100644 index 000000000..b3351991c --- /dev/null +++ b/docs/build/html/doc_irl.html @@ -0,0 +1,437 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Causal IRL — domainlab documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Skip to content +
+ +
+ + +
+ + + + +
+
+ +
+
+
+ +
+
+
+
+ +
+ +
+
+ +
+

Causal IRL

+
+
+

Invariant Causal Mechanisms through Distribution Matching

+

Source: https://arxiv.org/pdf/2206.11646

+
+

Example

+
python main_out.py --te_d 0 --tr_d 3 7 --bs=32 --debug --task=mnistcolor10 --model=erm --nname=conv_bn_pool_2 --trainer=causalirl"
+
+
+
+
+ + +
+
+
+
+
+ + + + + \ No newline at end of file diff --git a/docs/build/html/doc_model.html b/docs/build/html/doc_model.html new file mode 100644 index 000000000..c79dc234e --- /dev/null +++ b/docs/build/html/doc_model.html @@ -0,0 +1,422 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Model Specification — domainlab documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Skip to content +
+ +
+ + +
+ + + + +
+
+ +
+
+
+ +
+
+
+ + +
+
+ +
+

Model Specification

+

Domainlab is built to easily extend to other models. There exist two options. One is to implement a new model from scratch and add it to the existing models; the other is to extend the custom model abstract class. This guide outlines the necessary steps to add a new model to the domain generalization framework for both approaches mentioned above.

+
+

Option 1: Extend the Custom Model Class

+

Create a new model by extending AModelCustom.

+

Because AModelCustom extends AModelClassif, the only function a custom model needs to extend is dict_net_module_na2arg_na, which returns a dictionary with the key being the Pytorch module name and value being the command-line argument name. In addition, it is necessary to specify a function called get_node_na in the same Python file, which returns the custom algorithm builder as shown here.

+

To run the custom model, follow the examples here under ‘Custom algorithm defined in external python file’. It also shows an example of using additional command line arguments.

+
+
+

Option 2: Add alongside existing models

+

If the repository is cloned and it is possible to add files to the source code, one can extend one of the other base classes: AModel or AModelClassif.

+
+

Step 1: Implement Required Abstract Methods

+

Implement all abstract methods from the base class. For AModel, it is required to implement the following methods:

+

cal_task_loss(self, tensor_x, tensor_y): Computes the loss for the primary task, which for classification could be cross-entropy.
+_cal_reg_loss(self, tensor_x, tensor_y, tensor_d, others=None): Calculates the task independent regularization loss.

+
+
+

Step 2: Add Additional Arguments

+

If additional arguments for the model are needed, it is possible to specify a Python file with functionality for the argument parsing, as done here. The specified function needs to be applied in the arg_parser.py in the domainlab root directory in order to add the parameters to the argument dictionary.

+
+
+

Step 3: Create a Builder

+

After specifying the model and retrieving the correct parameters, we can create functionality to create the model. To do so, we need to create a NodeAlgoBuilder. For that, we create a class that inherits from NodeAlgoBuilder and extend the init_business(exp) method. We must create the trainer, model, observer, and device in this method. See here for an example.

+

After that, we can add the builder into the chain that creates all specified models here.

+
+
+
+ + +
+
+
+
+
+ + + + + \ No newline at end of file diff --git a/docs/build/html/doc_trainer.html b/docs/build/html/doc_trainer.html new file mode 100644 index 000000000..e30b08f60 --- /dev/null +++ b/docs/build/html/doc_trainer.html @@ -0,0 +1,427 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Model Specification — domainlab documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Skip to content +
+ +
+ + +
+ + + + +
+
+ +
+
+
+ +
+
+
+
+
+ +
+
+ +
+
+ +
+

Model Specification

+

When developing new trainers, you can extend the TrainerBasic class. This allows you to build upon established training routines while introducing specialized behaviors tailored to your model.

+
+

Steps to Extend TrainerBasic

+
    +
  1. Extend the class: Begin by inheriting from TrainerBasic.

  2. +
  3. Customize Key Methods: You have several methods that you can override to customize the trainer’s behavior. Here’s a brief overview of what they do:

    +
      +
    • before_tr(self): Set up necessary configurations or states before training begins. Useful for initial logging or setting model to train mode.

    • +
    • tr_epoch(self, epoch): Define the training logic for each epoch. This is where the bulk of your model’s training will be implemented.

    • +
    • before_epoch(self): Prepare anything specific at the start of each epoch, like resetting counters or updating learning rate schedules.

    • +
    • after_epoch(self, epoch): Typically used for logging and validation checks after each epoch.

    • +
    • tr_batch(self, tensor_x, tensor_y, tensor_d, others, ind_batch, epoch): Handle the processing of each batch. This includes forward and backward propagation.

    • +
    • before_batch(self, epoch, ind_batch) and after_batch(self, epoch, ind_batch): Perform actions right before and after processing a batch, respectively. Useful for implementing behaviors like batch-wise logging or applying gradients.

    • +
    +
  4. +
  5. Register Your Trainer: Make sure the framework can utilize the new trainer. For that, it is necessary to register it in the zoo_trainers.py.

  6. +
+
+
+

Example Implementation

+

Here is a simple example of a custom trainer that logs additional details at the start of each training:

+
class MyCustomTrainer(TrainerBasic):
+    def before_tr(self):
+        super().before_tr()  # Ensure to call the base method if needed
+        print("Starting training session.")
+
+    def tr_epoch(self, epoch):
+        # Custom training logic for each epoch
+        for ind_batch, data in enumerate(self.loader_tr):
+            self.tr_batch(data, ind_batch, epoch)
+        print(f"Completed epoch {epoch}")
+
+    def tr_batch(self, data, ind_batch, epoch):
+        # Process each batch
+        super().tr_batch(data, ind_batch, epoch)  # Optionally call the base method
+        print(f"Batch {ind_batch} of epoch {epoch} processed.")
+
+
+
+
+ + +
+
+
+
+
+ + + + + \ No newline at end of file