From c46205b08c1226fe7a1291801b14525e9b397a58 Mon Sep 17 00:00:00 2001 From: smilesun Date: Tue, 17 Sep 2024 16:01:29 +0200 Subject: [PATCH] . --- docs/docDIAL.md | 3 +-- docs/docFishr.md | 2 +- docs/docIRM.md | 2 +- docs/doc_mldg.md | 3 +-- 4 files changed, 4 insertions(+), 6 deletions(-) diff --git a/docs/docDIAL.md b/docs/docDIAL.md index 9f0a38e47..3aa28a49f 100644 --- a/docs/docDIAL.md +++ b/docs/docDIAL.md @@ -73,7 +73,7 @@ This procedure yields to the following availability of hyperparameter: - `--gamma_reg`: ? ($\epsilon$ in the paper) - `--lr`: learning rate ($\alpha$ in the paper) -# Examples +## Examples ``` python main_out.py --te_d=0 --task=mnistcolor10 --model=erm --trainer=dial --nname=conv_bn_pool_2 @@ -81,7 +81,6 @@ python main_out.py --te_d=0 --task=mnistcolor10 --model=erm --trainer=dial --nna -## Adversarial images training ```shell python main_out.py --te_d=0 --task=mnistcolor10 --keep_model --model=erm --trainer=dial --nname=conv_bn_pool_2 ``` diff --git a/docs/docFishr.md b/docs/docFishr.md index e2ba4c1b9..90c2da6b4 100644 --- a/docs/docFishr.md +++ b/docs/docFishr.md @@ -72,7 +72,7 @@ For more details, see the reference below or the domainlab code. -# Examples +## Examples ``` python main_out.py --te_d=0 --task=mini_vlcs --model=erm --trainer=fishr --nname=alexnet --bs=2 --nocu ``` diff --git a/docs/docIRM.md b/docs/docIRM.md index 0b8114de8..57d033db7 100644 --- a/docs/docIRM.md +++ b/docs/docIRM.md @@ -26,7 +26,7 @@ where $\lambda$ is a hyperparameter that controls the trade-off between the empi In practice, one could simply divide one mini-batch into two subsets, let $i$ and $j$ to index these two subsets, multiply subset $i$ and subset $j$ forms an unbiased estimation of the L2 norm of gradient. In detail: the squared gradient norm via inner product between $\nabla_{w|w=1} \ell(w \circ \Phi(X^{(d, i)}), Y^{(d, i)})$ of dimension dim(Grad) with $\nabla_{w|w=1} \ell(w \circ \Phi(X^{(d, j)}), Y^{(d, j)})$ of dimension dim(Grad) For more details, see section 3.2 and Appendix D of : Arjovsky et al., “Invariant Risk Minimization.” -# Examples +## Examples ```shell python main_out.py --te_d=0 --task=mnistcolor10 --model=erm --trainer=irm --nname=conv_bn_pool_2 diff --git a/docs/doc_mldg.md b/docs/doc_mldg.md index 47b061620..92e412180 100644 --- a/docs/doc_mldg.md +++ b/docs/doc_mldg.md @@ -4,8 +4,7 @@ Li, Da, et al. "Learning to generalize: Meta-learning for domain generalization." Proceedings of the AAAI conference on artificial intelligence. Vol. 32. No. 1. 2018. -# Examples -## Meta Learning Domain Generalization +## Examples ```shell python main_out.py --te_d=caltech --task=mini_vlcs --debug --bs=8 --model=erm --trainer=mldg --nname=alexnet ```