forked from shokru/mlfactor.github.io
-
Notifications
You must be signed in to change notification settings - Fork 0
/
lasso.html
896 lines (854 loc) · 115 KB
/
lasso.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
<!DOCTYPE html>
<html lang="" xml:lang="">
<head>
<meta charset="utf-8" />
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<title>Chapter 5 Penalized regressions and sparse hedging for minimum variance portfolios | Machine Learning for Factor Investing</title>
<meta name="description" content="Chapter 5 Penalized regressions and sparse hedging for minimum variance portfolios | Machine Learning for Factor Investing" />
<meta name="generator" content="bookdown 0.21 and GitBook 2.6.7" />
<meta property="og:title" content="Chapter 5 Penalized regressions and sparse hedging for minimum variance portfolios | Machine Learning for Factor Investing" />
<meta property="og:type" content="book" />
<meta name="twitter:card" content="summary" />
<meta name="twitter:title" content="Chapter 5 Penalized regressions and sparse hedging for minimum variance portfolios | Machine Learning for Factor Investing" />
<meta name="author" content="Guillaume Coqueret and Tony Guida" />
<meta name="date" content="2021-04-11" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<meta name="apple-mobile-web-app-capable" content="yes" />
<meta name="apple-mobile-web-app-status-bar-style" content="black" />
<link rel="prev" href="Data.html"/>
<link rel="next" href="trees.html"/>
<script src="libs/header-attrs-2.5/header-attrs.js"></script>
<script src="libs/jquery-2.2.3/jquery.min.js"></script>
<link href="libs/gitbook-2.6.7/css/style.css" rel="stylesheet" />
<link href="libs/gitbook-2.6.7/css/plugin-table.css" rel="stylesheet" />
<link href="libs/gitbook-2.6.7/css/plugin-bookdown.css" rel="stylesheet" />
<link href="libs/gitbook-2.6.7/css/plugin-highlight.css" rel="stylesheet" />
<link href="libs/gitbook-2.6.7/css/plugin-search.css" rel="stylesheet" />
<link href="libs/gitbook-2.6.7/css/plugin-fontsettings.css" rel="stylesheet" />
<link href="libs/gitbook-2.6.7/css/plugin-clipboard.css" rel="stylesheet" />
<link href="libs/anchor-sections-1.0/anchor-sections.css" rel="stylesheet" />
<script src="libs/anchor-sections-1.0/anchor-sections.js"></script>
<script src="libs/kePrint-0.0.1/kePrint.js"></script>
<link href="libs/lightable-0.0.1/lightable.css" rel="stylesheet" />
<style type="text/css">
pre > code.sourceCode { white-space: pre; position: relative; }
pre > code.sourceCode > span { display: inline-block; line-height: 1.25; }
pre > code.sourceCode > span:empty { height: 1.2em; }
.sourceCode { overflow: visible; }
code.sourceCode > span { color: inherit; text-decoration: inherit; }
pre.sourceCode { margin: 0; }
@media screen {
div.sourceCode { overflow: auto; }
}
@media print {
pre > code.sourceCode { white-space: pre-wrap; }
pre > code.sourceCode > span { text-indent: -5em; padding-left: 5em; }
}
pre.numberSource code
{ counter-reset: source-line 0; }
pre.numberSource code > span
{ position: relative; left: -4em; counter-increment: source-line; }
pre.numberSource code > span > a:first-child::before
{ content: counter(source-line);
position: relative; left: -1em; text-align: right; vertical-align: baseline;
border: none; display: inline-block;
-webkit-touch-callout: none; -webkit-user-select: none;
-khtml-user-select: none; -moz-user-select: none;
-ms-user-select: none; user-select: none;
padding: 0 4px; width: 4em;
color: #aaaaaa;
}
pre.numberSource { margin-left: 3em; border-left: 1px solid #aaaaaa; padding-left: 4px; }
div.sourceCode
{ }
@media screen {
pre > code.sourceCode > span > a:first-child::before { text-decoration: underline; }
}
code span.al { color: #ff0000; font-weight: bold; } /* Alert */
code span.an { color: #60a0b0; font-weight: bold; font-style: italic; } /* Annotation */
code span.at { color: #7d9029; } /* Attribute */
code span.bn { color: #40a070; } /* BaseN */
code span.bu { } /* BuiltIn */
code span.cf { color: #007020; font-weight: bold; } /* ControlFlow */
code span.ch { color: #4070a0; } /* Char */
code span.cn { color: #880000; } /* Constant */
code span.co { color: #60a0b0; font-style: italic; } /* Comment */
code span.cv { color: #60a0b0; font-weight: bold; font-style: italic; } /* CommentVar */
code span.do { color: #ba2121; font-style: italic; } /* Documentation */
code span.dt { color: #902000; } /* DataType */
code span.dv { color: #40a070; } /* DecVal */
code span.er { color: #ff0000; font-weight: bold; } /* Error */
code span.ex { } /* Extension */
code span.fl { color: #40a070; } /* Float */
code span.fu { color: #06287e; } /* Function */
code span.im { } /* Import */
code span.in { color: #60a0b0; font-weight: bold; font-style: italic; } /* Information */
code span.kw { color: #007020; font-weight: bold; } /* Keyword */
code span.op { color: #666666; } /* Operator */
code span.ot { color: #007020; } /* Other */
code span.pp { color: #bc7a00; } /* Preprocessor */
code span.sc { color: #4070a0; } /* SpecialChar */
code span.ss { color: #bb6688; } /* SpecialString */
code span.st { color: #4070a0; } /* String */
code span.va { color: #19177c; } /* Variable */
code span.vs { color: #4070a0; } /* VerbatimString */
code span.wa { color: #60a0b0; font-weight: bold; font-style: italic; } /* Warning */
</style>
</head>
<body>
<div class="book without-animation with-summary font-size-2 font-family-1" data-basepath=".">
<div class="book-summary">
<nav role="navigation">
<ul class="summary">
<li class="chapter" data-level="" data-path="preface.html"><a href="preface.html"><i class="fa fa-check"></i>Preface</a>
<ul>
<li class="chapter" data-level="" data-path="preface.html"><a href="preface.html#what-this-book-is-not-about"><i class="fa fa-check"></i>What this book is not about</a></li>
<li class="chapter" data-level="" data-path="preface.html"><a href="preface.html#the-targeted-audience"><i class="fa fa-check"></i>The targeted audience</a></li>
<li class="chapter" data-level="" data-path="preface.html"><a href="preface.html#how-this-book-is-structured"><i class="fa fa-check"></i>How this book is structured</a></li>
<li class="chapter" data-level="" data-path="preface.html"><a href="preface.html#companion-website"><i class="fa fa-check"></i>Companion website</a></li>
<li class="chapter" data-level="" data-path="preface.html"><a href="preface.html#why-r"><i class="fa fa-check"></i>Why R?</a></li>
<li class="chapter" data-level="" data-path="preface.html"><a href="preface.html#coding-instructions"><i class="fa fa-check"></i>Coding instructions</a></li>
<li class="chapter" data-level="" data-path="preface.html"><a href="preface.html#acknowledgments"><i class="fa fa-check"></i>Acknowledgments</a></li>
<li class="chapter" data-level="" data-path="preface.html"><a href="preface.html#future-developments"><i class="fa fa-check"></i>Future developments</a></li>
</ul></li>
<li class="part"><span><b>I Introduction</b></span></li>
<li class="chapter" data-level="1" data-path="notdata.html"><a href="notdata.html"><i class="fa fa-check"></i><b>1</b> Notations and data</a>
<ul>
<li class="chapter" data-level="1.1" data-path="notdata.html"><a href="notdata.html#notations"><i class="fa fa-check"></i><b>1.1</b> Notations</a></li>
<li class="chapter" data-level="1.2" data-path="notdata.html"><a href="notdata.html#dataset"><i class="fa fa-check"></i><b>1.2</b> Dataset</a></li>
</ul></li>
<li class="chapter" data-level="2" data-path="intro.html"><a href="intro.html"><i class="fa fa-check"></i><b>2</b> Introduction</a>
<ul>
<li class="chapter" data-level="2.1" data-path="intro.html"><a href="intro.html#context"><i class="fa fa-check"></i><b>2.1</b> Context</a></li>
<li class="chapter" data-level="2.2" data-path="intro.html"><a href="intro.html#portfolio-construction-the-workflow"><i class="fa fa-check"></i><b>2.2</b> Portfolio construction: the workflow</a></li>
<li class="chapter" data-level="2.3" data-path="intro.html"><a href="intro.html#machine-learning-is-no-magic-wand"><i class="fa fa-check"></i><b>2.3</b> Machine learning is no magic wand</a></li>
</ul></li>
<li class="chapter" data-level="3" data-path="factor.html"><a href="factor.html"><i class="fa fa-check"></i><b>3</b> Factor investing and asset pricing anomalies</a>
<ul>
<li class="chapter" data-level="3.1" data-path="factor.html"><a href="factor.html#introduction"><i class="fa fa-check"></i><b>3.1</b> Introduction</a></li>
<li class="chapter" data-level="3.2" data-path="factor.html"><a href="factor.html#detecting-anomalies"><i class="fa fa-check"></i><b>3.2</b> Detecting anomalies</a>
<ul>
<li class="chapter" data-level="3.2.1" data-path="factor.html"><a href="factor.html#challenges"><i class="fa fa-check"></i><b>3.2.1</b> Challenges</a></li>
<li class="chapter" data-level="3.2.2" data-path="factor.html"><a href="factor.html#simple-portfolio-sorts"><i class="fa fa-check"></i><b>3.2.2</b> Simple portfolio sorts </a></li>
<li class="chapter" data-level="3.2.3" data-path="factor.html"><a href="factor.html#factors"><i class="fa fa-check"></i><b>3.2.3</b> Factors</a></li>
<li class="chapter" data-level="3.2.4" data-path="factor.html"><a href="factor.html#predictive-regressions-sorts-and-p-value-issues"><i class="fa fa-check"></i><b>3.2.4</b> Predictive regressions, sorts, and p-value issues</a></li>
<li class="chapter" data-level="3.2.5" data-path="factor.html"><a href="factor.html#fama-macbeth-regressions"><i class="fa fa-check"></i><b>3.2.5</b> Fama-Macbeth regressions</a></li>
<li class="chapter" data-level="3.2.6" data-path="factor.html"><a href="factor.html#factor-competition"><i class="fa fa-check"></i><b>3.2.6</b> Factor competition</a></li>
<li class="chapter" data-level="3.2.7" data-path="factor.html"><a href="factor.html#advanced-techniques"><i class="fa fa-check"></i><b>3.2.7</b> Advanced techniques</a></li>
</ul></li>
<li class="chapter" data-level="3.3" data-path="factor.html"><a href="factor.html#factors-or-characteristics"><i class="fa fa-check"></i><b>3.3</b> Factors or characteristics?</a></li>
<li class="chapter" data-level="3.4" data-path="factor.html"><a href="factor.html#hot-topics-momentum-timing-and-esg"><i class="fa fa-check"></i><b>3.4</b> Hot topics: momentum, timing and ESG</a>
<ul>
<li class="chapter" data-level="3.4.1" data-path="factor.html"><a href="factor.html#factor-momentum"><i class="fa fa-check"></i><b>3.4.1</b> Factor momentum</a></li>
<li class="chapter" data-level="3.4.2" data-path="factor.html"><a href="factor.html#factor-timing"><i class="fa fa-check"></i><b>3.4.2</b> Factor timing</a></li>
<li class="chapter" data-level="3.4.3" data-path="factor.html"><a href="factor.html#the-green-factors"><i class="fa fa-check"></i><b>3.4.3</b> The green factors</a></li>
</ul></li>
<li class="chapter" data-level="3.5" data-path="factor.html"><a href="factor.html#the-links-with-machine-learning"><i class="fa fa-check"></i><b>3.5</b> The links with machine learning</a>
<ul>
<li class="chapter" data-level="3.5.1" data-path="factor.html"><a href="factor.html#a-short-list-of-recent-references"><i class="fa fa-check"></i><b>3.5.1</b> A short list of recent references</a></li>
<li class="chapter" data-level="3.5.2" data-path="factor.html"><a href="factor.html#explicit-connections-with-asset-pricing-models"><i class="fa fa-check"></i><b>3.5.2</b> Explicit connections with asset pricing models</a></li>
</ul></li>
<li class="chapter" data-level="3.6" data-path="factor.html"><a href="factor.html#coding-exercises"><i class="fa fa-check"></i><b>3.6</b> Coding exercises</a></li>
</ul></li>
<li class="chapter" data-level="4" data-path="Data.html"><a href="Data.html"><i class="fa fa-check"></i><b>4</b> Data preprocessing</a>
<ul>
<li class="chapter" data-level="4.1" data-path="Data.html"><a href="Data.html#know-your-data"><i class="fa fa-check"></i><b>4.1</b> Know your data</a></li>
<li class="chapter" data-level="4.2" data-path="Data.html"><a href="Data.html#missing-data"><i class="fa fa-check"></i><b>4.2</b> Missing data</a></li>
<li class="chapter" data-level="4.3" data-path="Data.html"><a href="Data.html#outlier-detection"><i class="fa fa-check"></i><b>4.3</b> Outlier detection</a></li>
<li class="chapter" data-level="4.4" data-path="Data.html"><a href="Data.html#feateng"><i class="fa fa-check"></i><b>4.4</b> Feature engineering</a>
<ul>
<li class="chapter" data-level="4.4.1" data-path="Data.html"><a href="Data.html#feature-selection"><i class="fa fa-check"></i><b>4.4.1</b> Feature selection</a></li>
<li class="chapter" data-level="4.4.2" data-path="Data.html"><a href="Data.html#scaling"><i class="fa fa-check"></i><b>4.4.2</b> Scaling the predictors</a></li>
</ul></li>
<li class="chapter" data-level="4.5" data-path="Data.html"><a href="Data.html#labelling"><i class="fa fa-check"></i><b>4.5</b> Labelling</a>
<ul>
<li class="chapter" data-level="4.5.1" data-path="Data.html"><a href="Data.html#simple-labels"><i class="fa fa-check"></i><b>4.5.1</b> Simple labels</a></li>
<li class="chapter" data-level="4.5.2" data-path="Data.html"><a href="Data.html#categorical-labels"><i class="fa fa-check"></i><b>4.5.2</b> Categorical labels</a></li>
<li class="chapter" data-level="4.5.3" data-path="Data.html"><a href="Data.html#the-triple-barrier-method"><i class="fa fa-check"></i><b>4.5.3</b> The triple barrier method</a></li>
<li class="chapter" data-level="4.5.4" data-path="Data.html"><a href="Data.html#filtering-the-sample"><i class="fa fa-check"></i><b>4.5.4</b> Filtering the sample</a></li>
<li class="chapter" data-level="4.5.5" data-path="Data.html"><a href="Data.html#horizons"><i class="fa fa-check"></i><b>4.5.5</b> Return horizons</a></li>
</ul></li>
<li class="chapter" data-level="4.6" data-path="Data.html"><a href="Data.html#pers"><i class="fa fa-check"></i><b>4.6</b> Handling persistence</a></li>
<li class="chapter" data-level="4.7" data-path="Data.html"><a href="Data.html#extensions"><i class="fa fa-check"></i><b>4.7</b> Extensions</a>
<ul>
<li class="chapter" data-level="4.7.1" data-path="Data.html"><a href="Data.html#transforming-features"><i class="fa fa-check"></i><b>4.7.1</b> Transforming features</a></li>
<li class="chapter" data-level="4.7.2" data-path="Data.html"><a href="Data.html#macrovar"><i class="fa fa-check"></i><b>4.7.2</b> Macro-economic variables</a></li>
<li class="chapter" data-level="4.7.3" data-path="Data.html"><a href="Data.html#active-learning"><i class="fa fa-check"></i><b>4.7.3</b> Active learning</a></li>
</ul></li>
<li class="chapter" data-level="4.8" data-path="Data.html"><a href="Data.html#additional-code-and-results"><i class="fa fa-check"></i><b>4.8</b> Additional code and results</a>
<ul>
<li class="chapter" data-level="4.8.1" data-path="Data.html"><a href="Data.html#impact-of-rescaling-graphical-representation"><i class="fa fa-check"></i><b>4.8.1</b> Impact of rescaling: graphical representation</a></li>
<li class="chapter" data-level="4.8.2" data-path="Data.html"><a href="Data.html#impact-of-rescaling-toy-example"><i class="fa fa-check"></i><b>4.8.2</b> Impact of rescaling: toy example</a></li>
</ul></li>
<li class="chapter" data-level="4.9" data-path="Data.html"><a href="Data.html#coding-exercises-1"><i class="fa fa-check"></i><b>4.9</b> Coding exercises</a></li>
</ul></li>
<li class="part"><span><b>II Common supervised algorithms</b></span></li>
<li class="chapter" data-level="5" data-path="lasso.html"><a href="lasso.html"><i class="fa fa-check"></i><b>5</b> Penalized regressions and sparse hedging for minimum variance portfolios</a>
<ul>
<li class="chapter" data-level="5.1" data-path="lasso.html"><a href="lasso.html#penalized-regressions"><i class="fa fa-check"></i><b>5.1</b> Penalized regressions</a>
<ul>
<li class="chapter" data-level="5.1.1" data-path="lasso.html"><a href="lasso.html#penreg"><i class="fa fa-check"></i><b>5.1.1</b> Simple regressions</a></li>
<li class="chapter" data-level="5.1.2" data-path="lasso.html"><a href="lasso.html#forms-of-penalizations"><i class="fa fa-check"></i><b>5.1.2</b> Forms of penalizations</a></li>
<li class="chapter" data-level="5.1.3" data-path="lasso.html"><a href="lasso.html#illustrations"><i class="fa fa-check"></i><b>5.1.3</b> Illustrations</a></li>
</ul></li>
<li class="chapter" data-level="5.2" data-path="lasso.html"><a href="lasso.html#sparse-hedging-for-minimum-variance-portfolios"><i class="fa fa-check"></i><b>5.2</b> Sparse hedging for minimum variance portfolios</a>
<ul>
<li class="chapter" data-level="5.2.1" data-path="lasso.html"><a href="lasso.html#presentation-and-derivations"><i class="fa fa-check"></i><b>5.2.1</b> Presentation and derivations</a></li>
<li class="chapter" data-level="5.2.2" data-path="lasso.html"><a href="lasso.html#sparseex"><i class="fa fa-check"></i><b>5.2.2</b> Example</a></li>
</ul></li>
<li class="chapter" data-level="5.3" data-path="lasso.html"><a href="lasso.html#predictive-regressions"><i class="fa fa-check"></i><b>5.3</b> Predictive regressions</a>
<ul>
<li class="chapter" data-level="5.3.1" data-path="lasso.html"><a href="lasso.html#literature-review-and-principle"><i class="fa fa-check"></i><b>5.3.1</b> Literature review and principle</a></li>
<li class="chapter" data-level="5.3.2" data-path="lasso.html"><a href="lasso.html#code-and-results"><i class="fa fa-check"></i><b>5.3.2</b> Code and results</a></li>
</ul></li>
<li class="chapter" data-level="5.4" data-path="lasso.html"><a href="lasso.html#coding-exercise"><i class="fa fa-check"></i><b>5.4</b> Coding exercise</a></li>
</ul></li>
<li class="chapter" data-level="6" data-path="trees.html"><a href="trees.html"><i class="fa fa-check"></i><b>6</b> Tree-based methods</a>
<ul>
<li class="chapter" data-level="6.1" data-path="trees.html"><a href="trees.html#simple-trees"><i class="fa fa-check"></i><b>6.1</b> Simple trees</a>
<ul>
<li class="chapter" data-level="6.1.1" data-path="trees.html"><a href="trees.html#principle"><i class="fa fa-check"></i><b>6.1.1</b> Principle</a></li>
<li class="chapter" data-level="6.1.2" data-path="trees.html"><a href="trees.html#treeclass"><i class="fa fa-check"></i><b>6.1.2</b> Further details on classification</a></li>
<li class="chapter" data-level="6.1.3" data-path="trees.html"><a href="trees.html#pruning-criteria"><i class="fa fa-check"></i><b>6.1.3</b> Pruning criteria</a></li>
<li class="chapter" data-level="6.1.4" data-path="trees.html"><a href="trees.html#code-and-interpretation"><i class="fa fa-check"></i><b>6.1.4</b> Code and interpretation</a></li>
</ul></li>
<li class="chapter" data-level="6.2" data-path="trees.html"><a href="trees.html#random-forests"><i class="fa fa-check"></i><b>6.2</b> Random forests</a>
<ul>
<li class="chapter" data-level="6.2.1" data-path="trees.html"><a href="trees.html#principle-1"><i class="fa fa-check"></i><b>6.2.1</b> Principle</a></li>
<li class="chapter" data-level="6.2.2" data-path="trees.html"><a href="trees.html#code-and-results-1"><i class="fa fa-check"></i><b>6.2.2</b> Code and results</a></li>
</ul></li>
<li class="chapter" data-level="6.3" data-path="trees.html"><a href="trees.html#adaboost"><i class="fa fa-check"></i><b>6.3</b> Boosted trees: Adaboost</a>
<ul>
<li class="chapter" data-level="6.3.1" data-path="trees.html"><a href="trees.html#methodology"><i class="fa fa-check"></i><b>6.3.1</b> Methodology</a></li>
<li class="chapter" data-level="6.3.2" data-path="trees.html"><a href="trees.html#illustration"><i class="fa fa-check"></i><b>6.3.2</b> Illustration</a></li>
</ul></li>
<li class="chapter" data-level="6.4" data-path="trees.html"><a href="trees.html#boosted-trees-extreme-gradient-boosting"><i class="fa fa-check"></i><b>6.4</b> Boosted trees: extreme gradient boosting</a>
<ul>
<li class="chapter" data-level="6.4.1" data-path="trees.html"><a href="trees.html#managing-loss"><i class="fa fa-check"></i><b>6.4.1</b> Managing loss</a></li>
<li class="chapter" data-level="6.4.2" data-path="trees.html"><a href="trees.html#penalization"><i class="fa fa-check"></i><b>6.4.2</b> Penalization</a></li>
<li class="chapter" data-level="6.4.3" data-path="trees.html"><a href="trees.html#aggregation"><i class="fa fa-check"></i><b>6.4.3</b> Aggregation</a></li>
<li class="chapter" data-level="6.4.4" data-path="trees.html"><a href="trees.html#tree-structure"><i class="fa fa-check"></i><b>6.4.4</b> Tree structure</a></li>
<li class="chapter" data-level="6.4.5" data-path="trees.html"><a href="trees.html#boostext"><i class="fa fa-check"></i><b>6.4.5</b> Extensions</a></li>
<li class="chapter" data-level="6.4.6" data-path="trees.html"><a href="trees.html#boostcode"><i class="fa fa-check"></i><b>6.4.6</b> Code and results</a></li>
<li class="chapter" data-level="6.4.7" data-path="trees.html"><a href="trees.html#instweight"><i class="fa fa-check"></i><b>6.4.7</b> Instance weighting</a></li>
</ul></li>
<li class="chapter" data-level="6.5" data-path="trees.html"><a href="trees.html#discussion"><i class="fa fa-check"></i><b>6.5</b> Discussion</a></li>
<li class="chapter" data-level="6.6" data-path="trees.html"><a href="trees.html#coding-exercises-2"><i class="fa fa-check"></i><b>6.6</b> Coding exercises</a></li>
</ul></li>
<li class="chapter" data-level="7" data-path="NN.html"><a href="NN.html"><i class="fa fa-check"></i><b>7</b> Neural networks</a>
<ul>
<li class="chapter" data-level="7.1" data-path="NN.html"><a href="NN.html#the-original-perceptron"><i class="fa fa-check"></i><b>7.1</b> The original perceptron</a></li>
<li class="chapter" data-level="7.2" data-path="NN.html"><a href="NN.html#multilayer-perceptron"><i class="fa fa-check"></i><b>7.2</b> Multilayer perceptron</a>
<ul>
<li class="chapter" data-level="7.2.1" data-path="NN.html"><a href="NN.html#introduction-and-notations"><i class="fa fa-check"></i><b>7.2.1</b> Introduction and notations</a></li>
<li class="chapter" data-level="7.2.2" data-path="NN.html"><a href="NN.html#universal-approximation"><i class="fa fa-check"></i><b>7.2.2</b> Universal approximation</a></li>
<li class="chapter" data-level="7.2.3" data-path="NN.html"><a href="NN.html#backprop"><i class="fa fa-check"></i><b>7.2.3</b> Learning via back-propagation</a></li>
<li class="chapter" data-level="7.2.4" data-path="NN.html"><a href="NN.html#NNclass"><i class="fa fa-check"></i><b>7.2.4</b> Further details on classification</a></li>
</ul></li>
<li class="chapter" data-level="7.3" data-path="NN.html"><a href="NN.html#howdeep"><i class="fa fa-check"></i><b>7.3</b> How deep we should go and other practical issues</a>
<ul>
<li class="chapter" data-level="7.3.1" data-path="NN.html"><a href="NN.html#architectural-choices"><i class="fa fa-check"></i><b>7.3.1</b> Architectural choices</a></li>
<li class="chapter" data-level="7.3.2" data-path="NN.html"><a href="NN.html#frequency-of-weight-updates-and-learning-duration"><i class="fa fa-check"></i><b>7.3.2</b> Frequency of weight updates and learning duration</a></li>
<li class="chapter" data-level="7.3.3" data-path="NN.html"><a href="NN.html#penalizations-and-dropout"><i class="fa fa-check"></i><b>7.3.3</b> Penalizations and dropout</a></li>
</ul></li>
<li class="chapter" data-level="7.4" data-path="NN.html"><a href="NN.html#code-samples-and-comments-for-vanilla-mlp"><i class="fa fa-check"></i><b>7.4</b> Code samples and comments for vanilla MLP</a>
<ul>
<li class="chapter" data-level="7.4.1" data-path="NN.html"><a href="NN.html#regression-example"><i class="fa fa-check"></i><b>7.4.1</b> Regression example</a></li>
<li class="chapter" data-level="7.4.2" data-path="NN.html"><a href="NN.html#classification-example"><i class="fa fa-check"></i><b>7.4.2</b> Classification example</a></li>
<li class="chapter" data-level="7.4.3" data-path="NN.html"><a href="NN.html#custloss"><i class="fa fa-check"></i><b>7.4.3</b> Custom losses</a></li>
</ul></li>
<li class="chapter" data-level="7.5" data-path="NN.html"><a href="NN.html#RNN"><i class="fa fa-check"></i><b>7.5</b> Recurrent networks</a>
<ul>
<li class="chapter" data-level="7.5.1" data-path="NN.html"><a href="NN.html#presentation"><i class="fa fa-check"></i><b>7.5.1</b> Presentation</a></li>
<li class="chapter" data-level="7.5.2" data-path="NN.html"><a href="NN.html#code-and-results-2"><i class="fa fa-check"></i><b>7.5.2</b> Code and results</a></li>
</ul></li>
<li class="chapter" data-level="7.6" data-path="NN.html"><a href="NN.html#tabular-networks-tabnets"><i class="fa fa-check"></i><b>7.6</b> Tabular networks (TabNets)</a>
<ul>
<li class="chapter" data-level="7.6.1" data-path="NN.html"><a href="NN.html#the-zoo-of-layers"><i class="fa fa-check"></i><b>7.6.1</b> The zoo of layers</a></li>
<li class="chapter" data-level="7.6.2" data-path="NN.html"><a href="NN.html#sparsemax-activation"><i class="fa fa-check"></i><b>7.6.2</b> Sparsemax activation</a></li>
<li class="chapter" data-level="7.6.3" data-path="NN.html"><a href="NN.html#feature-selection-1"><i class="fa fa-check"></i><b>7.6.3</b> Feature selection</a></li>
<li class="chapter" data-level="7.6.4" data-path="NN.html"><a href="NN.html#the-full-architecture"><i class="fa fa-check"></i><b>7.6.4</b> The full architecture</a></li>
<li class="chapter" data-level="7.6.5" data-path="NN.html"><a href="NN.html#code-and-results-3"><i class="fa fa-check"></i><b>7.6.5</b> Code and results</a></li>
</ul></li>
<li class="chapter" data-level="7.7" data-path="NN.html"><a href="NN.html#other-common-architectures"><i class="fa fa-check"></i><b>7.7</b> Other common architectures</a>
<ul>
<li class="chapter" data-level="7.7.1" data-path="NN.html"><a href="NN.html#generative-aversarial-networks"><i class="fa fa-check"></i><b>7.7.1</b> Generative adversarial networks</a></li>
<li class="chapter" data-level="7.7.2" data-path="NN.html"><a href="NN.html#autoencoders"><i class="fa fa-check"></i><b>7.7.2</b> Autoencoders</a></li>
<li class="chapter" data-level="7.7.3" data-path="NN.html"><a href="NN.html#CNN"><i class="fa fa-check"></i><b>7.7.3</b> A word on convolutional networks</a></li>
</ul></li>
<li class="chapter" data-level="7.8" data-path="NN.html"><a href="NN.html#coding-exercises-3"><i class="fa fa-check"></i><b>7.8</b> Coding exercises</a></li>
</ul></li>
<li class="chapter" data-level="8" data-path="svm.html"><a href="svm.html"><i class="fa fa-check"></i><b>8</b> Support vector machines</a>
<ul>
<li class="chapter" data-level="8.1" data-path="svm.html"><a href="svm.html#svm-for-classification"><i class="fa fa-check"></i><b>8.1</b> SVM for classification</a></li>
<li class="chapter" data-level="8.2" data-path="svm.html"><a href="svm.html#svm-for-regression"><i class="fa fa-check"></i><b>8.2</b> SVM for regression</a></li>
<li class="chapter" data-level="8.3" data-path="svm.html"><a href="svm.html#practice"><i class="fa fa-check"></i><b>8.3</b> Practice</a></li>
<li class="chapter" data-level="8.4" data-path="svm.html"><a href="svm.html#coding-exercises-4"><i class="fa fa-check"></i><b>8.4</b> Coding exercises</a></li>
</ul></li>
<li class="chapter" data-level="9" data-path="bayes.html"><a href="bayes.html"><i class="fa fa-check"></i><b>9</b> Bayesian methods</a>
<ul>
<li class="chapter" data-level="9.1" data-path="bayes.html"><a href="bayes.html#the-bayesian-framework"><i class="fa fa-check"></i><b>9.1</b> The Bayesian framework</a></li>
<li class="chapter" data-level="9.2" data-path="bayes.html"><a href="bayes.html#bayesian-sampling"><i class="fa fa-check"></i><b>9.2</b> Bayesian sampling</a>
<ul>
<li class="chapter" data-level="9.2.1" data-path="bayes.html"><a href="bayes.html#gibbs-sampling"><i class="fa fa-check"></i><b>9.2.1</b> Gibbs sampling</a></li>
<li class="chapter" data-level="9.2.2" data-path="bayes.html"><a href="bayes.html#metropolis-hastings-sampling"><i class="fa fa-check"></i><b>9.2.2</b> Metropolis-Hastings sampling</a></li>
</ul></li>
<li class="chapter" data-level="9.3" data-path="bayes.html"><a href="bayes.html#bayesian-linear-regression"><i class="fa fa-check"></i><b>9.3</b> Bayesian linear regression</a></li>
<li class="chapter" data-level="9.4" data-path="bayes.html"><a href="bayes.html#naive-bayes-classifier"><i class="fa fa-check"></i><b>9.4</b> Naive Bayes classifier</a></li>
<li class="chapter" data-level="9.5" data-path="bayes.html"><a href="bayes.html#BART"><i class="fa fa-check"></i><b>9.5</b> Bayesian additive trees</a>
<ul>
<li class="chapter" data-level="9.5.1" data-path="bayes.html"><a href="bayes.html#general-formulation"><i class="fa fa-check"></i><b>9.5.1</b> General formulation</a></li>
<li class="chapter" data-level="9.5.2" data-path="bayes.html"><a href="bayes.html#priors"><i class="fa fa-check"></i><b>9.5.2</b> Priors</a></li>
<li class="chapter" data-level="9.5.3" data-path="bayes.html"><a href="bayes.html#sampling-and-predictions"><i class="fa fa-check"></i><b>9.5.3</b> Sampling and predictions</a></li>
<li class="chapter" data-level="9.5.4" data-path="bayes.html"><a href="bayes.html#code"><i class="fa fa-check"></i><b>9.5.4</b> Code</a></li>
</ul></li>
</ul></li>
<li class="part"><span><b>III From predictions to portfolios</b></span></li>
<li class="chapter" data-level="10" data-path="valtune.html"><a href="valtune.html"><i class="fa fa-check"></i><b>10</b> Validating and tuning</a>
<ul>
<li class="chapter" data-level="10.1" data-path="valtune.html"><a href="valtune.html#mlmetrics"><i class="fa fa-check"></i><b>10.1</b> Learning metrics</a>
<ul>
<li class="chapter" data-level="10.1.1" data-path="valtune.html"><a href="valtune.html#regression-analysis"><i class="fa fa-check"></i><b>10.1.1</b> Regression analysis</a></li>
<li class="chapter" data-level="10.1.2" data-path="valtune.html"><a href="valtune.html#classification-analysis"><i class="fa fa-check"></i><b>10.1.2</b> Classification analysis</a></li>
</ul></li>
<li class="chapter" data-level="10.2" data-path="valtune.html"><a href="valtune.html#validation"><i class="fa fa-check"></i><b>10.2</b> Validation</a>
<ul>
<li class="chapter" data-level="10.2.1" data-path="valtune.html"><a href="valtune.html#the-variance-bias-tradeoff-theory"><i class="fa fa-check"></i><b>10.2.1</b> The variance-bias tradeoff: theory</a></li>
<li class="chapter" data-level="10.2.2" data-path="valtune.html"><a href="valtune.html#the-variance-bias-tradeoff-illustration"><i class="fa fa-check"></i><b>10.2.2</b> The variance-bias tradeoff: illustration</a></li>
<li class="chapter" data-level="10.2.3" data-path="valtune.html"><a href="valtune.html#the-risk-of-overfitting-principle"><i class="fa fa-check"></i><b>10.2.3</b> The risk of overfitting: principle</a></li>
<li class="chapter" data-level="10.2.4" data-path="valtune.html"><a href="valtune.html#the-risk-of-overfitting-some-solutions"><i class="fa fa-check"></i><b>10.2.4</b> The risk of overfitting: some solutions</a></li>
</ul></li>
<li class="chapter" data-level="10.3" data-path="valtune.html"><a href="valtune.html#the-search-for-good-hyperparameters"><i class="fa fa-check"></i><b>10.3</b> The search for good hyperparameters</a>
<ul>
<li class="chapter" data-level="10.3.1" data-path="valtune.html"><a href="valtune.html#methods"><i class="fa fa-check"></i><b>10.3.1</b> Methods</a></li>
<li class="chapter" data-level="10.3.2" data-path="valtune.html"><a href="valtune.html#example-grid-search"><i class="fa fa-check"></i><b>10.3.2</b> Example: grid search</a></li>
<li class="chapter" data-level="10.3.3" data-path="valtune.html"><a href="valtune.html#example-bayesian-optimization"><i class="fa fa-check"></i><b>10.3.3</b> Example: Bayesian optimization</a></li>
</ul></li>
<li class="chapter" data-level="10.4" data-path="valtune.html"><a href="valtune.html#short-discussion-on-validation-in-backtests"><i class="fa fa-check"></i><b>10.4</b> Short discussion on validation in backtests</a></li>
</ul></li>
<li class="chapter" data-level="11" data-path="ensemble.html"><a href="ensemble.html"><i class="fa fa-check"></i><b>11</b> Ensemble models</a>
<ul>
<li class="chapter" data-level="11.1" data-path="ensemble.html"><a href="ensemble.html#linear-ensembles"><i class="fa fa-check"></i><b>11.1</b> Linear ensembles</a>
<ul>
<li class="chapter" data-level="11.1.1" data-path="ensemble.html"><a href="ensemble.html#principles"><i class="fa fa-check"></i><b>11.1.1</b> Principles</a></li>
<li class="chapter" data-level="11.1.2" data-path="ensemble.html"><a href="ensemble.html#example"><i class="fa fa-check"></i><b>11.1.2</b> Example</a></li>
</ul></li>
<li class="chapter" data-level="11.2" data-path="ensemble.html"><a href="ensemble.html#stacked-ensembles"><i class="fa fa-check"></i><b>11.2</b> Stacked ensembles</a>
<ul>
<li class="chapter" data-level="11.2.1" data-path="ensemble.html"><a href="ensemble.html#two-stage-training"><i class="fa fa-check"></i><b>11.2.1</b> Two-stage training</a></li>
<li class="chapter" data-level="11.2.2" data-path="ensemble.html"><a href="ensemble.html#code-and-results-4"><i class="fa fa-check"></i><b>11.2.2</b> Code and results</a></li>
</ul></li>
<li class="chapter" data-level="11.3" data-path="ensemble.html"><a href="ensemble.html#extensions-1"><i class="fa fa-check"></i><b>11.3</b> Extensions</a>
<ul>
<li class="chapter" data-level="11.3.1" data-path="ensemble.html"><a href="ensemble.html#exogenous-variables"><i class="fa fa-check"></i><b>11.3.1</b> Exogenous variables</a></li>
<li class="chapter" data-level="11.3.2" data-path="ensemble.html"><a href="ensemble.html#shrinking-inter-model-correlations"><i class="fa fa-check"></i><b>11.3.2</b> Shrinking inter-model correlations</a></li>
</ul></li>
<li class="chapter" data-level="11.4" data-path="ensemble.html"><a href="ensemble.html#exercise"><i class="fa fa-check"></i><b>11.4</b> Exercise</a></li>
</ul></li>
<li class="chapter" data-level="12" data-path="backtest.html"><a href="backtest.html"><i class="fa fa-check"></i><b>12</b> Portfolio backtesting</a>
<ul>
<li class="chapter" data-level="12.1" data-path="backtest.html"><a href="backtest.html#protocol"><i class="fa fa-check"></i><b>12.1</b> Setting the protocol</a></li>
<li class="chapter" data-level="12.2" data-path="backtest.html"><a href="backtest.html#turning-signals-into-portfolio-weights"><i class="fa fa-check"></i><b>12.2</b> Turning signals into portfolio weights</a></li>
<li class="chapter" data-level="12.3" data-path="backtest.html"><a href="backtest.html#perfmet"><i class="fa fa-check"></i><b>12.3</b> Performance metrics</a>
<ul>
<li class="chapter" data-level="12.3.1" data-path="backtest.html"><a href="backtest.html#discussion-1"><i class="fa fa-check"></i><b>12.3.1</b> Discussion</a></li>
<li class="chapter" data-level="12.3.2" data-path="backtest.html"><a href="backtest.html#pure-performance-and-risk-indicators"><i class="fa fa-check"></i><b>12.3.2</b> Pure performance and risk indicators</a></li>
<li class="chapter" data-level="12.3.3" data-path="backtest.html"><a href="backtest.html#factor-based-evaluation"><i class="fa fa-check"></i><b>12.3.3</b> Factor-based evaluation</a></li>
<li class="chapter" data-level="12.3.4" data-path="backtest.html"><a href="backtest.html#risk-adjusted-measures"><i class="fa fa-check"></i><b>12.3.4</b> Risk-adjusted measures</a></li>
<li class="chapter" data-level="12.3.5" data-path="backtest.html"><a href="backtest.html#transaction-costs-and-turnover"><i class="fa fa-check"></i><b>12.3.5</b> Transaction costs and turnover</a></li>
</ul></li>
<li class="chapter" data-level="12.4" data-path="backtest.html"><a href="backtest.html#common-errors-and-issues"><i class="fa fa-check"></i><b>12.4</b> Common errors and issues</a>
<ul>
<li class="chapter" data-level="12.4.1" data-path="backtest.html"><a href="backtest.html#forward-looking-data"><i class="fa fa-check"></i><b>12.4.1</b> Forward looking data</a></li>
<li class="chapter" data-level="12.4.2" data-path="backtest.html"><a href="backtest.html#backov"><i class="fa fa-check"></i><b>12.4.2</b> Backtest overfitting</a></li>
<li class="chapter" data-level="12.4.3" data-path="backtest.html"><a href="backtest.html#simple-safeguards"><i class="fa fa-check"></i><b>12.4.3</b> Simple safeguards</a></li>
</ul></li>
<li class="chapter" data-level="12.5" data-path="backtest.html"><a href="backtest.html#implication-of-non-stationarity-forecasting-is-hard"><i class="fa fa-check"></i><b>12.5</b> Implication of non-stationarity: forecasting is hard</a>
<ul>
<li class="chapter" data-level="12.5.1" data-path="backtest.html"><a href="backtest.html#general-comments"><i class="fa fa-check"></i><b>12.5.1</b> General comments</a></li>
<li class="chapter" data-level="12.5.2" data-path="backtest.html"><a href="backtest.html#the-no-free-lunch-theorem"><i class="fa fa-check"></i><b>12.5.2</b> The no free lunch theorem</a></li>
</ul></li>
<li class="chapter" data-level="12.6" data-path="backtest.html"><a href="backtest.html#first-example-a-complete-backtest"><i class="fa fa-check"></i><b>12.6</b> First example: a complete backtest</a></li>
<li class="chapter" data-level="12.7" data-path="backtest.html"><a href="backtest.html#second-example-backtest-overfitting"><i class="fa fa-check"></i><b>12.7</b> Second example: backtest overfitting</a></li>
<li class="chapter" data-level="12.8" data-path="backtest.html"><a href="backtest.html#coding-exercises-5"><i class="fa fa-check"></i><b>12.8</b> Coding exercises</a></li>
</ul></li>
<li class="part"><span><b>IV Further important topics</b></span></li>
<li class="chapter" data-level="13" data-path="interp.html"><a href="interp.html"><i class="fa fa-check"></i><b>13</b> Interpretability</a>
<ul>
<li class="chapter" data-level="13.1" data-path="interp.html"><a href="interp.html#global-interpretations"><i class="fa fa-check"></i><b>13.1</b> Global interpretations</a>
<ul>
<li class="chapter" data-level="13.1.1" data-path="interp.html"><a href="interp.html#surr"><i class="fa fa-check"></i><b>13.1.1</b> Simple models as surrogates</a></li>
<li class="chapter" data-level="13.1.2" data-path="interp.html"><a href="interp.html#variable-importance"><i class="fa fa-check"></i><b>13.1.2</b> Variable importance (tree-based)</a></li>
<li class="chapter" data-level="13.1.3" data-path="interp.html"><a href="interp.html#variable-importance-agnostic"><i class="fa fa-check"></i><b>13.1.3</b> Variable importance (agnostic)</a></li>
<li class="chapter" data-level="13.1.4" data-path="interp.html"><a href="interp.html#partial-dependence-plot"><i class="fa fa-check"></i><b>13.1.4</b> Partial dependence plot</a></li>
</ul></li>
<li class="chapter" data-level="13.2" data-path="interp.html"><a href="interp.html#local-interpretations"><i class="fa fa-check"></i><b>13.2</b> Local interpretations</a>
<ul>
<li class="chapter" data-level="13.2.1" data-path="interp.html"><a href="interp.html#lime"><i class="fa fa-check"></i><b>13.2.1</b> LIME</a></li>
<li class="chapter" data-level="13.2.2" data-path="interp.html"><a href="interp.html#shapley-values"><i class="fa fa-check"></i><b>13.2.2</b> Shapley values</a></li>
<li class="chapter" data-level="13.2.3" data-path="interp.html"><a href="interp.html#breakdown"><i class="fa fa-check"></i><b>13.2.3</b> Breakdown</a></li>
</ul></li>
</ul></li>
<li class="chapter" data-level="14" data-path="causality.html"><a href="causality.html"><i class="fa fa-check"></i><b>14</b> Two key concepts: causality and non-stationarity</a>
<ul>
<li class="chapter" data-level="14.1" data-path="causality.html"><a href="causality.html#causality-1"><i class="fa fa-check"></i><b>14.1</b> Causality</a>
<ul>
<li class="chapter" data-level="14.1.1" data-path="causality.html"><a href="causality.html#granger"><i class="fa fa-check"></i><b>14.1.1</b> Granger causality</a></li>
<li class="chapter" data-level="14.1.2" data-path="causality.html"><a href="causality.html#causal-additive-models"><i class="fa fa-check"></i><b>14.1.2</b> Causal additive models</a></li>
<li class="chapter" data-level="14.1.3" data-path="causality.html"><a href="causality.html#structural-time-series-models"><i class="fa fa-check"></i><b>14.1.3</b> Structural time series models</a></li>
</ul></li>
<li class="chapter" data-level="14.2" data-path="causality.html"><a href="causality.html#nonstat"><i class="fa fa-check"></i><b>14.2</b> Dealing with changing environments</a>
<ul>
<li class="chapter" data-level="14.2.1" data-path="causality.html"><a href="causality.html#non-stationarity-yet-another-illustration"><i class="fa fa-check"></i><b>14.2.1</b> Non-stationarity: yet another illustration</a></li>
<li class="chapter" data-level="14.2.2" data-path="causality.html"><a href="causality.html#online-learning"><i class="fa fa-check"></i><b>14.2.2</b> Online learning</a></li>
<li class="chapter" data-level="14.2.3" data-path="causality.html"><a href="causality.html#homogeneous-transfer-learning"><i class="fa fa-check"></i><b>14.2.3</b> Homogeneous transfer learning</a></li>
</ul></li>
</ul></li>
<li class="chapter" data-level="15" data-path="unsup.html"><a href="unsup.html"><i class="fa fa-check"></i><b>15</b> Unsupervised learning</a>
<ul>
<li class="chapter" data-level="15.1" data-path="unsup.html"><a href="unsup.html#corpred"><i class="fa fa-check"></i><b>15.1</b> The problem with correlated predictors</a></li>
<li class="chapter" data-level="15.2" data-path="unsup.html"><a href="unsup.html#principal-component-analysis-and-autoencoders"><i class="fa fa-check"></i><b>15.2</b> Principal component analysis and autoencoders</a>
<ul>
<li class="chapter" data-level="15.2.1" data-path="unsup.html"><a href="unsup.html#a-bit-of-algebra"><i class="fa fa-check"></i><b>15.2.1</b> A bit of algebra</a></li>
<li class="chapter" data-level="15.2.2" data-path="unsup.html"><a href="unsup.html#pca"><i class="fa fa-check"></i><b>15.2.2</b> PCA</a></li>
<li class="chapter" data-level="15.2.3" data-path="unsup.html"><a href="unsup.html#ae"><i class="fa fa-check"></i><b>15.2.3</b> Autoencoders</a></li>
<li class="chapter" data-level="15.2.4" data-path="unsup.html"><a href="unsup.html#application"><i class="fa fa-check"></i><b>15.2.4</b> Application</a></li>
</ul></li>
<li class="chapter" data-level="15.3" data-path="unsup.html"><a href="unsup.html#clustering-via-k-means"><i class="fa fa-check"></i><b>15.3</b> Clustering via k-means</a></li>
<li class="chapter" data-level="15.4" data-path="unsup.html"><a href="unsup.html#nearest-neighbors"><i class="fa fa-check"></i><b>15.4</b> Nearest neighbors</a></li>
<li class="chapter" data-level="15.5" data-path="unsup.html"><a href="unsup.html#coding-exercise-1"><i class="fa fa-check"></i><b>15.5</b> Coding exercise</a></li>
</ul></li>
<li class="chapter" data-level="16" data-path="RL.html"><a href="RL.html"><i class="fa fa-check"></i><b>16</b> Reinforcement learning</a>
<ul>
<li class="chapter" data-level="16.1" data-path="RL.html"><a href="RL.html#theoretical-layout"><i class="fa fa-check"></i><b>16.1</b> Theoretical layout</a>
<ul>
<li class="chapter" data-level="16.1.1" data-path="RL.html"><a href="RL.html#general-framework"><i class="fa fa-check"></i><b>16.1.1</b> General framework</a></li>
<li class="chapter" data-level="16.1.2" data-path="RL.html"><a href="RL.html#q-learning"><i class="fa fa-check"></i><b>16.1.2</b> Q-learning</a></li>
<li class="chapter" data-level="16.1.3" data-path="RL.html"><a href="RL.html#sarsa"><i class="fa fa-check"></i><b>16.1.3</b> SARSA</a></li>
</ul></li>
<li class="chapter" data-level="16.2" data-path="RL.html"><a href="RL.html#the-curse-of-dimensionality"><i class="fa fa-check"></i><b>16.2</b> The curse of dimensionality</a></li>
<li class="chapter" data-level="16.3" data-path="RL.html"><a href="RL.html#policy-gradient"><i class="fa fa-check"></i><b>16.3</b> Policy gradient</a>
<ul>
<li class="chapter" data-level="16.3.1" data-path="RL.html"><a href="RL.html#principle-2"><i class="fa fa-check"></i><b>16.3.1</b> Principle</a></li>
<li class="chapter" data-level="16.3.2" data-path="RL.html"><a href="RL.html#extensions-2"><i class="fa fa-check"></i><b>16.3.2</b> Extensions</a></li>
</ul></li>
<li class="chapter" data-level="16.4" data-path="RL.html"><a href="RL.html#simple-examples"><i class="fa fa-check"></i><b>16.4</b> Simple examples</a>
<ul>
<li class="chapter" data-level="16.4.1" data-path="RL.html"><a href="RL.html#q-learning-with-simulations"><i class="fa fa-check"></i><b>16.4.1</b> Q-learning with simulations</a></li>
<li class="chapter" data-level="16.4.2" data-path="RL.html"><a href="RL.html#RLemp2"><i class="fa fa-check"></i><b>16.4.2</b> Q-learning with market data</a></li>
</ul></li>
<li class="chapter" data-level="16.5" data-path="RL.html"><a href="RL.html#concluding-remarks"><i class="fa fa-check"></i><b>16.5</b> Concluding remarks</a></li>
<li class="chapter" data-level="16.6" data-path="RL.html"><a href="RL.html#exercises"><i class="fa fa-check"></i><b>16.6</b> Exercises</a></li>
</ul></li>
<li class="part"><span><b>V Appendix</b></span></li>
<li class="chapter" data-level="17" data-path="data-description.html"><a href="data-description.html"><i class="fa fa-check"></i><b>17</b> Data description</a></li>
<li class="chapter" data-level="18" data-path="solutions-to-exercises.html"><a href="solutions-to-exercises.html"><i class="fa fa-check"></i><b>18</b> Solutions to exercises</a>
<ul>
<li class="chapter" data-level="18.1" data-path="solutions-to-exercises.html"><a href="solutions-to-exercises.html#chapter-3"><i class="fa fa-check"></i><b>18.1</b> Chapter 3</a></li>
<li class="chapter" data-level="18.2" data-path="solutions-to-exercises.html"><a href="solutions-to-exercises.html#chapter-4"><i class="fa fa-check"></i><b>18.2</b> Chapter 4</a></li>
<li class="chapter" data-level="18.3" data-path="solutions-to-exercises.html"><a href="solutions-to-exercises.html#chapter-5"><i class="fa fa-check"></i><b>18.3</b> Chapter 5</a></li>
<li class="chapter" data-level="18.4" data-path="solutions-to-exercises.html"><a href="solutions-to-exercises.html#chapter-6"><i class="fa fa-check"></i><b>18.4</b> Chapter 6</a></li>
<li class="chapter" data-level="18.5" data-path="solutions-to-exercises.html"><a href="solutions-to-exercises.html#chapter-7-the-autoencoder-model-universal-approximation"><i class="fa fa-check"></i><b>18.5</b> Chapter 7: the autoencoder model & universal approximation</a></li>
<li class="chapter" data-level="18.6" data-path="solutions-to-exercises.html"><a href="solutions-to-exercises.html#chapter-8"><i class="fa fa-check"></i><b>18.6</b> Chapter 8</a></li>
<li class="chapter" data-level="18.7" data-path="solutions-to-exercises.html"><a href="solutions-to-exercises.html#chapter-11-ensemble-neural-network"><i class="fa fa-check"></i><b>18.7</b> Chapter 11: ensemble neural network</a></li>
<li class="chapter" data-level="18.8" data-path="solutions-to-exercises.html"><a href="solutions-to-exercises.html#chapter-12"><i class="fa fa-check"></i><b>18.8</b> Chapter 12</a>
<ul>
<li class="chapter" data-level="18.8.1" data-path="solutions-to-exercises.html"><a href="solutions-to-exercises.html#ew-portfolios-with-the-tidyverse"><i class="fa fa-check"></i><b>18.8.1</b> EW portfolios with the tidyverse</a></li>
<li class="chapter" data-level="18.8.2" data-path="solutions-to-exercises.html"><a href="solutions-to-exercises.html#advanced-weighting-function"><i class="fa fa-check"></i><b>18.8.2</b> Advanced weighting function</a></li>
<li class="chapter" data-level="18.8.3" data-path="solutions-to-exercises.html"><a href="solutions-to-exercises.html#functional-programming-in-the-backtest"><i class="fa fa-check"></i><b>18.8.3</b> Functional programming in the backtest</a></li>
</ul></li>
<li class="chapter" data-level="18.9" data-path="solutions-to-exercises.html"><a href="solutions-to-exercises.html#chapter-15"><i class="fa fa-check"></i><b>18.9</b> Chapter 15</a></li>
<li class="chapter" data-level="18.10" data-path="solutions-to-exercises.html"><a href="solutions-to-exercises.html#chapter-16"><i class="fa fa-check"></i><b>18.10</b> Chapter 16</a></li>
</ul></li>
</ul>
</nav>
</div>
<div class="book-body">
<div class="body-inner">
<div class="book-header" role="navigation">
<h1>
<i class="fa fa-circle-o-notch fa-spin"></i><a href="./">Machine Learning for Factor Investing</a>
</h1>
</div>
<div class="page-wrapper" tabindex="-1" role="main">
<div class="page-inner">
<section class="normal" id="section-">
<div id="lasso" class="section level1" number="5">
<h1><span class="header-section-number">Chapter 5</span> Penalized regressions and sparse hedging for minimum variance portfolios</h1>
<p></p>
<p>In this chapter, we introduce the widespread concept of regularization for linear models. There are in fact several possible applications for these models. The first one is straightforward: resort to penalizations to improve the robustness of factor-based predictive regressions. The outcome can then be used to fuel an allocation scheme. For instance, <span class="citation"><a href="solutions-to-exercises.html#ref-han2018firm" role="doc-biblioref">Han et al.</a> (<a href="solutions-to-exercises.html#ref-han2018firm" role="doc-biblioref">2019</a>)</span> and <span class="citation"><a href="solutions-to-exercises.html#ref-rapach2019time" role="doc-biblioref">D. Rapach and Zhou</a> (<a href="solutions-to-exercises.html#ref-rapach2019time" role="doc-biblioref">2019</a>)</span> use penalized regressions to improve stock return prediction when combining forecasts that emanate from individual characteristics.</p>
<p>Similar ideas can be developed for macroeconomic predictions for instance, as in <span class="citation"><a href="solutions-to-exercises.html#ref-uematsu2019high" role="doc-biblioref">Uematsu and Tanaka</a> (<a href="solutions-to-exercises.html#ref-uematsu2019high" role="doc-biblioref">2019</a>)</span>.
The second application stems from a less known result which originates from <span class="citation"><a href="solutions-to-exercises.html#ref-stevens1998inverse" role="doc-biblioref">Stevens</a> (<a href="solutions-to-exercises.html#ref-stevens1998inverse" role="doc-biblioref">1998</a>)</span>. It links the weights of optimal mean-variance portfolios to particular cross-sectional regressions. The idea is then different and the purpose is to improve the quality of mean-variance driven portfolio weights. We present the two approaches below after an introduction on regularization techniques for linear models.</p>
<p>Other examples of financial applications of penalization can be found in <span class="citation"><a href="solutions-to-exercises.html#ref-d2011identifying" role="doc-biblioref">d’Aspremont</a> (<a href="solutions-to-exercises.html#ref-d2011identifying" role="doc-biblioref">2011</a>)</span>, <span class="citation"><a href="solutions-to-exercises.html#ref-ban2016machine" role="doc-biblioref">Ban, El Karoui, and Lim</a> (<a href="solutions-to-exercises.html#ref-ban2016machine" role="doc-biblioref">2016</a>)</span> and <span class="citation"><a href="solutions-to-exercises.html#ref-kremer2019sparse" role="doc-biblioref">Kremer et al.</a> (<a href="solutions-to-exercises.html#ref-kremer2019sparse" role="doc-biblioref">2019</a>)</span>. In any case, the idea is the same as in the seminal paper <span class="citation"><a href="solutions-to-exercises.html#ref-tibshirani1996regression" role="doc-biblioref">Tibshirani</a> (<a href="solutions-to-exercises.html#ref-tibshirani1996regression" role="doc-biblioref">1996</a>)</span>: standard (unconstrained) optimization programs may lead to noisy estimates, thus adding a structuring constraint helps remove some noise (at the cost of a possible bias). For instance, <span class="citation"><a href="solutions-to-exercises.html#ref-kremer2019sparse" role="doc-biblioref">Kremer et al.</a> (<a href="solutions-to-exercises.html#ref-kremer2019sparse" role="doc-biblioref">2019</a>)</span> use this concept to build more robust mean-variance (<span class="citation"><a href="solutions-to-exercises.html#ref-markowitz1952portfolio" role="doc-biblioref">Markowitz</a> (<a href="solutions-to-exercises.html#ref-markowitz1952portfolio" role="doc-biblioref">1952</a>)</span>) portfolios and <span class="citation"><a href="solutions-to-exercises.html#ref-freyberger2020dissecting" role="doc-biblioref">Freyberger, Neuhierl, and Weber</a> (<a href="solutions-to-exercises.html#ref-freyberger2020dissecting" role="doc-biblioref">2020</a>)</span> use it to single out the characteristics that <em>really</em> help explain the cross-section of equity returns.</p>
<div id="penalized-regressions" class="section level2" number="5.1">
<h2><span class="header-section-number">5.1</span> Penalized regressions</h2>
<p></p>
<div id="penreg" class="section level3" number="5.1.1">
<h3><span class="header-section-number">5.1.1</span> Simple regressions</h3>
<p>
The ideas behind linear models are at least two centuries old (<span class="citation"><a href="solutions-to-exercises.html#ref-legendre1805nouvelles" role="doc-biblioref">Legendre</a> (<a href="solutions-to-exercises.html#ref-legendre1805nouvelles" role="doc-biblioref">1805</a>)</span> is an early reference on least squares optimization). Given a matrix of predictors <span class="math inline">\(\textbf{X}\)</span>, we seek to decompose the output vector <span class="math inline">\(\textbf{y}\)</span> as a linear function of the columns of <span class="math inline">\(\textbf{X}\)</span> (written <span class="math inline">\(\textbf{X}\boldsymbol{\beta}\)</span>) plus an error term <span class="math inline">\(\boldsymbol{\epsilon}\)</span>: <span class="math inline">\(\textbf{y}=\textbf{X}\boldsymbol{\beta}+\boldsymbol{\epsilon}\)</span>.</p>
<p>The best choice of <span class="math inline">\(\boldsymbol{\beta}\)</span> is naturally the one that minimizes the error. For analytical tractability, it is the sum of squared errors that is minimized: <span class="math inline">\(L=\boldsymbol{\epsilon}'\boldsymbol{\epsilon}=\sum_{i=1}^I\epsilon_i^2\)</span>. The loss <span class="math inline">\(L\)</span> is called the sum of squared residuals (SSR). In order to find the optimal <span class="math inline">\(\boldsymbol{\beta}\)</span>, it is imperative to differentiate this loss <span class="math inline">\(L\)</span> with respect to <span class="math inline">\(\boldsymbol{\beta}\)</span> because the first order condition requires that the gradient be equal to zero:
<span class="math display">\[\begin{align*}
\nabla_{\boldsymbol{\beta}} L&=\frac{\partial}{\partial \boldsymbol{\beta}}(\textbf{y}-\textbf{X}\boldsymbol{\beta})'(\textbf{y}-\textbf{X}\boldsymbol{\beta})=\frac{\partial}{\partial \boldsymbol{\beta}}\boldsymbol{\beta}'\textbf{X}'\textbf{X}\boldsymbol{\beta}-2\textbf{y}'\textbf{X}\boldsymbol{\beta} \\
&=2\textbf{X}'\textbf{X}\boldsymbol{\beta} -2\textbf{X}'\textbf{y}
\end{align*}\]</span>
so that the first order condition <span class="math inline">\(\nabla_{\boldsymbol{\beta}}=\textbf{0}\)</span> is satisfied if
<span class="math display" id="eq:regbeta">\[\begin{equation}
\tag{5.1}
\boldsymbol{\beta}^*=(\textbf{X}'\textbf{X})^{-1}\textbf{X}'\textbf{y},
\end{equation}\]</span>
which is known as the standard <strong>ordinary least squares</strong> (OLS) solution of the linear model. If the matrix <span class="math inline">\(\textbf{X}\)</span> has dimensions <span class="math inline">\(I \times K\)</span>, then the <span class="math inline">\(\textbf{X}'\textbf{X}\)</span> can only be inverted if the number of rows <span class="math inline">\(I\)</span> is strictly superior to the number of columns <span class="math inline">\(K\)</span>. In some cases, that may not hold; there are more predictors than instances and there is no unique value of <span class="math inline">\(\boldsymbol{\beta}\)</span> that minimizes the loss. If <span class="math inline">\(\textbf{X}'\textbf{X}\)</span> is nonsingular (or positive definite), then the second order condition ensures that <span class="math inline">\(\boldsymbol{\beta}^*\)</span> yields a global minimum for the loss <span class="math inline">\(L\)</span> (the second order derivative of <span class="math inline">\(L\)</span> with respect to <span class="math inline">\(\boldsymbol{\beta}\)</span>, the Hessian matrix, is exactly <span class="math inline">\(\textbf{X}'\textbf{X}\)</span>).</p>
<p>Up to now, we have made no distributional assumption on any of the above quantities. Standard assumptions are the following:<br />
- <span class="math inline">\(\mathbb{E}[\textbf{y}|\textbf{X}]=\textbf{X}\boldsymbol{\beta}\)</span>: <strong>linear shape for the regression function</strong>;<br />
- <span class="math inline">\(\mathbb{E}[\boldsymbol{\epsilon}|\textbf{X}]=\textbf{0}\)</span>: errors are <strong>independent of predictors</strong>;<br />
- <span class="math inline">\(\mathbb{E}[\boldsymbol{\epsilon}\boldsymbol{\epsilon}'| \textbf{X}]=\sigma^2\textbf{I}\)</span>: <strong>homoscedasticity</strong> - errors are uncorrelated and have identical variance;<br />
- the <span class="math inline">\(\epsilon_i\)</span> are normally distributed.</p>
<p>Under these hypotheses, it is possible to perform statistical tests related to the <span class="math inline">\(\hat{\boldsymbol{\beta}}\)</span> coefficients. We refer to chapters 2 to 4 in <span class="citation"><a href="solutions-to-exercises.html#ref-greene2018econometric" role="doc-biblioref">Greene</a> (<a href="solutions-to-exercises.html#ref-greene2018econometric" role="doc-biblioref">2018</a>)</span> for a thorough treatment on linear models as well as to chapter 5 of the same book for details on the corresponding tests.</p>
</div>
<div id="forms-of-penalizations" class="section level3" number="5.1.2">
<h3><span class="header-section-number">5.1.2</span> Forms of penalizations</h3>
<p>
Penalized regressions have been popularized since the seminal work of <span class="citation"><a href="solutions-to-exercises.html#ref-tibshirani1996regression" role="doc-biblioref">Tibshirani</a> (<a href="solutions-to-exercises.html#ref-tibshirani1996regression" role="doc-biblioref">1996</a>)</span>. The idea is to impose a constraint on the coefficients of the regression, namely that their total magnitude be restrained. In his original paper, <span class="citation"><a href="solutions-to-exercises.html#ref-tibshirani1996regression" role="doc-biblioref">Tibshirani</a> (<a href="solutions-to-exercises.html#ref-tibshirani1996regression" role="doc-biblioref">1996</a>)</span> proposes to estimate the following model (LASSO):
<span class="math display" id="eq:lasso1">\[\begin{equation}
\tag{5.2}
y_i = \sum_{j=1}^J \beta_jx_{i,j} + \epsilon_i, \quad i =1,\dots,I, \quad \text{s.t.} \quad \sum_{j=1}^J |\beta_j| < \delta,
\end{equation}\]</span>
for some strictly positive constant <span class="math inline">\(\delta\)</span>. Under least square minimization, this amounts to solve the Lagrangian formulation:
<span class="math display" id="eq:lasso2">\[\begin{equation}
\tag{5.3}
\underset{\mathbf{\beta}}{\min} \, \left\{ \sum_{i=1}^I\left(y_i - \sum_{j=1}^J \beta_jx_{i,j} \right)^2+\lambda \sum_{j=1}^J |\beta_j| \right\},
\end{equation}\]</span>
for some value <span class="math inline">\(\lambda>0\)</span> which naturally depends on <span class="math inline">\(\delta\)</span> (the lower the <span class="math inline">\(\delta\)</span>, the higher the <span class="math inline">\(\lambda\)</span>: the constraint is more binding). This specification seems close to the ridge regression (<span class="math inline">\(L^2\)</span> regularization), which is in fact anterior to the Lasso:
<span class="math display" id="eq:ridge">\[\begin{equation}
\tag{5.4}
\underset{\mathbf{\beta}}{\min} \, \left\{ \sum_{i=1}^I\left(y_i - \sum_{j=1}^J\beta_jx_{i,j} \right)^2+\lambda \sum_{j=1}^J \beta_j^2 \right\},
\end{equation}\]</span>
and which is equivalent to estimating the following model
<span class="math display" id="eq:ridge6">\[\begin{equation}
\tag{5.5}
y_i = \sum_{j=1}^J \beta_jx_{i,j} + \epsilon_i, \quad i =1,\dots,I, \quad \text{s.t.} \quad \sum_{j=1}^J \beta_j^2 < \delta,
\end{equation}\]</span>
but the outcome is in fact quite different, which justifies a separate treatment. Mechanically, as <span class="math inline">\(\lambda\)</span>, the penalization intensity, increases (or as <span class="math inline">\(\delta\)</span> in <a href="lasso.html#eq:ridge6">(5.5)</a> <em>decreases</em>), the coefficients of the ridge regression all slowly decrease in magnitude towards zero. In the case of the LASSO, the convergence is somewhat more brutal as some coefficients shrink to zero very quickly. For <span class="math inline">\(\lambda\)</span> sufficiently large, only one coefficient will remain nonzero, while in the ridge regression, the zero value is only reached asymptotically for all coefficients. We invite the interested read to have a look at the survey in <span class="citation"><a href="solutions-to-exercises.html#ref-hastie2020ridge" role="doc-biblioref">Hastie</a> (<a href="solutions-to-exercises.html#ref-hastie2020ridge" role="doc-biblioref">2020</a>)</span> about all applications of ridge regressions in data science with links to other topics like cross-validation and dropout regularization, among others.</p>
<p>To depict the difference between the Lasso and the ridge regression, let us consider the case of <span class="math inline">\(K=2\)</span> predictors which is shown in Figure <a href="lasso.html#fig:lassoridge">5.1</a>. The optimal unconstrained solution <span class="math inline">\(\boldsymbol{\beta}^*\)</span> is pictured in red in the middle of the space. The problem is naturally that it does not satisfy the imposed conditions. These constraints are shown in light grey: they take the shape of a square <span class="math inline">\(|\beta_1|+|\beta_2| \le \delta\)</span> in the case of the Lasso and a circle <span class="math inline">\(\beta_1^2+\beta_2^2 \le \delta\)</span> for the ridge regression. In order to satisfy these constraints, the optimization needs to look in the vicinity of <span class="math inline">\(\boldsymbol{\beta}^*\)</span> by allowing for larger error levels. These error levels are shown as orange ellipsoids in the figure. When the requirement on the error is loose enough, one ellipsoid touches the acceptable boundary (in grey) and this is where the constrained solution is located.</p>
<div class="figure" style="text-align: center"><span id="fig:lassoridge"></span>
<img src="images/lassoridge.png" alt="Schematic view of Lasso (left) versus ridge (right) regressions." width="800px" />
<p class="caption">
FIGURE 5.1: Schematic view of Lasso (left) versus ridge (right) regressions.
</p>
</div>
<p>Both methods work when the number of exogenous variables surpasses that of observations, i.e., in the case where classical regressions are ill-defined. This is easy to see in the case of the ridge regression for which the OLS solution is simply
<span class="math display">\[\hat{\boldsymbol{\beta}}=(\mathbf{X}'\mathbf{X}+\lambda \mathbf{I}_N)^{-1}\mathbf{X}'\mathbf{Y}.\]</span>
The additional term <span class="math inline">\(\lambda \mathbf{I}_N\)</span> compared to Equation <a href="lasso.html#eq:regbeta">(5.1)</a> ensures that the inverse matrix is well-defined whenever <span class="math inline">\(\lambda>0\)</span>. As <span class="math inline">\(\lambda\)</span> increases, the magnitudes of the <span class="math inline">\(\hat{\beta}_i\)</span> decrease, which explains why penalizations are sometimes referred to as <strong>shrinkage</strong> methods (the estimated coefficients see their values shrink). </p>
<p><span class="citation"><a href="solutions-to-exercises.html#ref-zou2005regularization" role="doc-biblioref">Zou and Hastie</a> (<a href="solutions-to-exercises.html#ref-zou2005regularization" role="doc-biblioref">2005</a>)</span> propose to benefit from the best of both worlds when combining both penalizations in a convex manner (which they call the <strong>elasticnet</strong>):
<span class="math display" id="eq:elasticnet">\[\begin{equation}
\tag{5.6}
y_i = \sum_{j=1}^J \beta_jx_{i,j} + \epsilon_i, \quad \text{s.t.} \quad \alpha \sum_{j=1}^J |\beta_j| +(1-\alpha)\sum_{j=1}^J \beta_j^2< \delta, \quad i =1,\dots,N,
\end{equation}\]</span>
which is associated to the optimization program
<span class="math display" id="eq:elastic">\[\begin{equation}
\tag{5.7}
\underset{\mathbf{\beta}}{\min} \, \left\{ \sum_{i=1}^I\left(y_i - \sum_{j=1}^J\beta_jx_{i,j} \right)^2+\lambda \left(\alpha\sum_{j=1}^J |\beta_j|+ (1-\alpha)\sum_{j=1}^J \beta_j^2\right) \right\}.
\end{equation}\]</span></p>
<p>The main advantage of the LASSO compared to the ridge regression is its selection capability. Indeed, given a very large number of variables (or predictors), the LASSO will progressively rule out those that are the least relevant. The elasticnet preserves this selection ability and <span class="citation"><a href="solutions-to-exercises.html#ref-zou2005regularization" role="doc-biblioref">Zou and Hastie</a> (<a href="solutions-to-exercises.html#ref-zou2005regularization" role="doc-biblioref">2005</a>)</span> argue that in some cases, it is even more effective than the LASSO. The parameter <span class="math inline">\(\alpha \in [0,1]\)</span> tunes the smoothness of convergence (of the coefficients) towards zero. The closer <span class="math inline">\(\alpha\)</span> is to zero, the smoother the convergence.</p>
</div>
<div id="illustrations" class="section level3" number="5.1.3">
<h3><span class="header-section-number">5.1.3</span> Illustrations</h3>
<p>We begin with simple illustrations of penalized regressions. We start with the LASSO. The original implementation by the authors is in R, which is practical. The syntax is slightly different, compared to usual linear models. The illustrations are run on the whole dataset. First, we estimate the coefficients. By default, the function chooses a large array of penalization values so that the results for different penalization intensities (<span class="math inline">\(\lambda\)</span>) can be shown immediately.</p>
<div class="sourceCode" id="cb31"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb31-1"><a href="lasso.html#cb31-1" aria-hidden="true" tabindex="-1"></a><span class="fu">library</span>(glmnet)</span>
<span id="cb31-2"><a href="lasso.html#cb31-2" aria-hidden="true" tabindex="-1"></a>y_penalized <span class="ot"><-</span> data_ml<span class="sc">$</span>R1M_Usd <span class="co"># Dependent variable</span></span>
<span id="cb31-3"><a href="lasso.html#cb31-3" aria-hidden="true" tabindex="-1"></a>x_penalized <span class="ot"><-</span> data_ml <span class="sc">%>%</span> <span class="co"># Predictors</span></span>
<span id="cb31-4"><a href="lasso.html#cb31-4" aria-hidden="true" tabindex="-1"></a> dplyr<span class="sc">::</span><span class="fu">select</span>(<span class="fu">all_of</span>(features)) <span class="sc">%>%</span> <span class="fu">as.matrix</span>() </span>
<span id="cb31-5"><a href="lasso.html#cb31-5" aria-hidden="true" tabindex="-1"></a>fit_lasso <span class="ot"><-</span> <span class="fu">glmnet</span>(x_penalized, y_penalized, <span class="at">alpha =</span> <span class="dv">1</span>) <span class="co"># Model alpha = 1: LASSO</span></span></code></pre></div>
<p></p>
<p>Once the coefficients are computed, they require some wrangling before plotting. Also, there are too many of them, so we only plot a subset of them.</p>
<div class="sourceCode" id="cb32"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb32-1"><a href="lasso.html#cb32-1" aria-hidden="true" tabindex="-1"></a>lasso_res <span class="ot"><-</span> <span class="fu">summary</span>(fit_lasso<span class="sc">$</span>beta) <span class="co"># Extract LASSO coefs</span></span>
<span id="cb32-2"><a href="lasso.html#cb32-2" aria-hidden="true" tabindex="-1"></a>lambda <span class="ot"><-</span> fit_lasso<span class="sc">$</span>lambda <span class="co"># Values of the penalisation const</span></span>
<span id="cb32-3"><a href="lasso.html#cb32-3" aria-hidden="true" tabindex="-1"></a>lasso_res<span class="sc">$</span>Lambda <span class="ot"><-</span> lambda[lasso_res<span class="sc">$</span>j] <span class="co"># Put the labels where they belong</span></span>
<span id="cb32-4"><a href="lasso.html#cb32-4" aria-hidden="true" tabindex="-1"></a>lasso_res<span class="sc">$</span>Feature <span class="ot"><-</span> features[lasso_res<span class="sc">$</span>i] <span class="sc">%>%</span> <span class="fu">as.factor</span>() <span class="co"># Add names of variables to output</span></span>
<span id="cb32-5"><a href="lasso.html#cb32-5" aria-hidden="true" tabindex="-1"></a>lasso_res[<span class="dv">1</span><span class="sc">:</span><span class="dv">120</span>,] <span class="sc">%>%</span> <span class="co"># Take the first 120 estimates</span></span>
<span id="cb32-6"><a href="lasso.html#cb32-6" aria-hidden="true" tabindex="-1"></a> <span class="fu">ggplot</span>(<span class="fu">aes</span>(<span class="at">x =</span> Lambda, <span class="at">y =</span> x, <span class="at">color =</span> Feature)) <span class="sc">+</span> <span class="co"># Plot!</span></span>
<span id="cb32-7"><a href="lasso.html#cb32-7" aria-hidden="true" tabindex="-1"></a> <span class="fu">geom_line</span>() <span class="sc">+</span> <span class="fu">coord_fixed</span>(<span class="fl">0.25</span>) <span class="sc">+</span> <span class="fu">ylab</span>(<span class="st">"beta"</span>) <span class="sc">+</span> <span class="co"># Change aspect ratio of graph</span></span>
<span id="cb32-8"><a href="lasso.html#cb32-8" aria-hidden="true" tabindex="-1"></a> <span class="fu">theme</span>(<span class="at">legend.text =</span> <span class="fu">element_text</span>(<span class="at">size =</span> <span class="dv">7</span>)) <span class="co"># Reduce legend font size</span></span></code></pre></div>
<div class="figure"><span id="fig:lassoresults"></span>
<img src="ML_factor_files/figure-html/lassoresults-1.png" alt="LASSO model. The dependent variable is the 1 month ahead return." width="400px" />
<p class="caption">
FIGURE 5.2: LASSO model. The dependent variable is the 1 month ahead return.
</p>
</div>
<p></p>
<p>The graph plots the evolution of coefficients as the penalization intensity, <span class="math inline">\(\lambda\)</span>, increases. For some characteristics, like Ebit_Ta (in orange), the convergence to zero is rapid. Other variables resist the penalization longer, like Mkt_Cap_3M_Usd, which is the last one to vanish. Essentially, this means that at the first order, this variable is an important driver of future 1-month returns in our sample. Moreover, the negative sign of its coefficient is a confirmation (again, in this sample) of the size anomaly, according to which small firms experience higher future returns compared to their larger counterparts.</p>
<p>Next, we turn to ridge regressions.</p>
<div class="sourceCode" id="cb33"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb33-1"><a href="lasso.html#cb33-1" aria-hidden="true" tabindex="-1"></a>fit_ridge <span class="ot"><-</span> <span class="fu">glmnet</span>(x_penalized, y_penalized, <span class="at">alpha =</span> <span class="dv">0</span>) <span class="co"># alpha = 0: ridge</span></span>
<span id="cb33-2"><a href="lasso.html#cb33-2" aria-hidden="true" tabindex="-1"></a>ridge_res <span class="ot"><-</span> <span class="fu">summary</span>(fit_ridge<span class="sc">$</span>beta) <span class="co"># Extract ridge coefs</span></span>
<span id="cb33-3"><a href="lasso.html#cb33-3" aria-hidden="true" tabindex="-1"></a>lambda <span class="ot"><-</span> fit_ridge<span class="sc">$</span>lambda <span class="co"># Penalisation const</span></span>
<span id="cb33-4"><a href="lasso.html#cb33-4" aria-hidden="true" tabindex="-1"></a>ridge_res<span class="sc">$</span>Feature <span class="ot"><-</span> features[ridge_res<span class="sc">$</span>i] <span class="sc">%>%</span> <span class="fu">as.factor</span>()</span>
<span id="cb33-5"><a href="lasso.html#cb33-5" aria-hidden="true" tabindex="-1"></a>ridge_res<span class="sc">$</span>Lambda <span class="ot"><-</span> lambda[ridge_res<span class="sc">$</span>j] <span class="co"># Set labels right</span></span>
<span id="cb33-6"><a href="lasso.html#cb33-6" aria-hidden="true" tabindex="-1"></a>ridge_res <span class="sc">%>%</span> </span>
<span id="cb33-7"><a href="lasso.html#cb33-7" aria-hidden="true" tabindex="-1"></a> <span class="fu">filter</span>(Feature <span class="sc">%in%</span> <span class="fu">levels</span>(<span class="fu">droplevels</span>(lasso_res<span class="sc">$</span>Feature[<span class="dv">1</span><span class="sc">:</span><span class="dv">120</span>]))) <span class="sc">%>%</span> <span class="co"># Keep same features </span></span>
<span id="cb33-8"><a href="lasso.html#cb33-8" aria-hidden="true" tabindex="-1"></a> <span class="fu">ggplot</span>(<span class="fu">aes</span>(<span class="at">x =</span> Lambda, <span class="at">y =</span> x, <span class="at">color =</span> Feature)) <span class="sc">+</span> <span class="fu">ylab</span>(<span class="st">"beta"</span>) <span class="sc">+</span> <span class="co"># Plot!</span></span>
<span id="cb33-9"><a href="lasso.html#cb33-9" aria-hidden="true" tabindex="-1"></a> <span class="fu">geom_line</span>() <span class="sc">+</span> <span class="fu">scale_x_log10</span>() <span class="sc">+</span> <span class="fu">coord_fixed</span>(<span class="dv">45</span>) <span class="sc">+</span> <span class="co"># Aspect ratio </span></span>
<span id="cb33-10"><a href="lasso.html#cb33-10" aria-hidden="true" tabindex="-1"></a> <span class="fu">theme</span>(<span class="at">legend.text =</span> <span class="fu">element_text</span>(<span class="at">size =</span> <span class="dv">7</span>))</span></code></pre></div>
<div class="figure"><span id="fig:sparseridge"></span>
<img src="ML_factor_files/figure-html/sparseridge-1.png" alt="Ridge regression. The dependent variable is the 1 month ahead return." width="576" />
<p class="caption">
FIGURE 5.3: Ridge regression. The dependent variable is the 1 month ahead return.
</p>
</div>
<p></p>
<p>In Figure <a href="lasso.html#fig:sparseridge">5.3</a>, the convergence to zero is much smoother. We underline that the x-axis (penalization intensities) have a log-scale. This allows to see the early patterns (close to zero, to the left) more clearly. As in the previous figure, the Mkt_Cap_3M_Usd predictor clearly dominates, with again large negative coefficients. Nonetheless, as <span class="math inline">\(\lambda\)</span> increases, its domination over the other predictor fades.</p>
<p>By definition, the elasticnet will produce curves that behave like a blend of the two above approaches. Nonetheless, as long as <span class="math inline">\(\alpha >0\)</span>, the selective property of the LASSO will be preserved: some features will see their coefficients shrink rapidly to zero. In fact, the strength of the LASSO is such that a balanced mix of the two penalizations is not reached at <span class="math inline">\(\alpha = 1/2\)</span>, but rather at a much smaller value (possibly below 0.1).</p>
</div>
</div>
<div id="sparse-hedging-for-minimum-variance-portfolios" class="section level2" number="5.2">
<h2><span class="header-section-number">5.2</span> Sparse hedging for minimum variance portfolios</h2>
<p></p>
<div id="presentation-and-derivations" class="section level3" number="5.2.1">
<h3><span class="header-section-number">5.2.1</span> Presentation and derivations</h3>
<p>The idea of constructing sparse portfolios is not new per se (see, e.g., <span class="citation"><a href="solutions-to-exercises.html#ref-brodie2009sparse" role="doc-biblioref">Brodie et al.</a> (<a href="solutions-to-exercises.html#ref-brodie2009sparse" role="doc-biblioref">2009</a>)</span>, <span class="citation"><a href="solutions-to-exercises.html#ref-fastrich2015constructing" role="doc-biblioref">Fastrich, Paterlini, and Winker</a> (<a href="solutions-to-exercises.html#ref-fastrich2015constructing" role="doc-biblioref">2015</a>)</span>) and the link with the selective property of the LASSO is rather straightforward in classical quadratic programs. Note that the choice of the <span class="math inline">\(L^1\)</span> norm is imperative because when enforcing a simple <span class="math inline">\(L^2\)</span> norm, the diversification of the portfolio increases (see <span class="citation"><a href="solutions-to-exercises.html#ref-coqueret2015diversified" role="doc-biblioref">Coqueret</a> (<a href="solutions-to-exercises.html#ref-coqueret2015diversified" role="doc-biblioref">2015</a>)</span>).</p>
<p>The idea behind this section stems from <span class="citation"><a href="solutions-to-exercises.html#ref-goto2015improving" role="doc-biblioref">Goto and Xu</a> (<a href="solutions-to-exercises.html#ref-goto2015improving" role="doc-biblioref">2015</a>)</span> but the cornerstone result was first published by <span class="citation"><a href="solutions-to-exercises.html#ref-stevens1998inverse" role="doc-biblioref">Stevens</a> (<a href="solutions-to-exercises.html#ref-stevens1998inverse" role="doc-biblioref">1998</a>)</span> and we present it below. We provide details because the derivations are not commonplace in the literature.</p>
<p>In usual mean-variance allocations, one core ingredient is the inverse covariance matrix of assets <span class="math inline">\(\mathbf{\Sigma}^{-1}\)</span>. For instance, the maximum Sharpe ratio (MSR) portfolio is given by</p>
<p><span class="math display" id="eq:MSR">\[\begin{equation}
\tag{5.8}
\mathbf{w}^{\text{MSR}}=\frac{\mathbf{\Sigma}^{-1}\boldsymbol{\mu}}{\mathbf{1}'\mathbf{\Sigma}^{-1}\boldsymbol{\mu}},
\end{equation}\]</span>
where <span class="math inline">\(\mathbf{\mu}\)</span> is the vector of expected (excess) returns. Taking <span class="math inline">\(\mathbf{\mu}=\mathbf{1}\)</span> yields the minimum variance portfolio, which is agnostic in terms of the first moment of expected returns (and, as such, usually more robust than most alternatives which try to estimate <span class="math inline">\(\boldsymbol{\mu}\)</span> and often fail).</p>
<p>Usually, the traditional way is to estimate <span class="math inline">\(\boldsymbol{\Sigma}\)</span> and to invert it to get the MSR weights. However, several approaches aim at estimating <span class="math inline">\(\boldsymbol{\Sigma}^{-1}\)</span> and we present one of them below. We proceed one asset at a time, that is, one line of <span class="math inline">\(\boldsymbol{\Sigma}^{-1}\)</span> at a time.<br />
If we decompose the matrix <span class="math inline">\(\mathbf{\Sigma}\)</span> into:
<span class="math display">\[\mathbf{\Sigma}= \left[\begin{array}{cc} \sigma^2 & \mathbf{c}' \\
\mathbf{c}& \mathbf{C}\end{array} \right],\]</span>
classical partitioning results (e.g., Schur complements) imply
<span class="math display">\[\small \mathbf{\Sigma}^{-1}= \left[\begin{array}{cc} (\sigma^2 -\mathbf{c}'\mathbf{C}^{-1}\mathbf{c})^{-1} & - (\sigma^2 -\mathbf{c}'\mathbf{C}^{-1}\mathbf{c})^{-1}\mathbf{c}'\mathbf{C}^{-1} \\
- (\sigma^2 -\mathbf{c}'\mathbf{C}^{-1}\mathbf{c})^{-1}\mathbf{C}^{-1}\mathbf{c}& \mathbf{C}^{-1}+ (\sigma^2 -\mathbf{c}'\mathbf{C}^{-1}\mathbf{c})^{-1}\mathbf{C}^{-1}\mathbf{cc}'\mathbf{C}^{-1}\end{array} \right].\]</span>
We are interested in the first line, which has 2 components: the factor <span class="math inline">\((\sigma^2 -\mathbf{c}'\mathbf{C}^{-1}\mathbf{c})^{-1}\)</span> and the line vector <span class="math inline">\(\mathbf{c}'\mathbf{C}^{-1}\)</span>. <span class="math inline">\(\mathbf{C}\)</span> is the covariance matrix of assets <span class="math inline">\(2\)</span> to <span class="math inline">\(N\)</span> and <span class="math inline">\(\mathbf{c}\)</span> is the covariance between the first asset and all other assets. The first line of <span class="math inline">\(\mathbf{\Sigma}^{-1}\)</span> is
<span class="math display" id="eq:sparse1">\[\begin{equation}
\tag{5.9}
(\sigma^2 -\mathbf{c}'\mathbf{C}^{-1}\mathbf{c})^{-1} \left[1 \quad \underbrace{-\mathbf{c}'\mathbf{C}^{-1}}_{N-1 \text{ terms}} \right].
\end{equation}\]</span></p>
<p>We now consider an alternative setting. We regress the returns of the first asset on those of all other assets:
<span class="math display" id="eq:sparseeq">\[\begin{equation}
\tag{5.10}
r_{1,t}=a_1+\sum_{n=2}^N\beta_{1|n}r_{n,t}+\epsilon_t, \quad \text{ i.e., } \quad \mathbf{r}_1=a_1\mathbf{1}_T+\mathbf{R}_{-1}\mathbf{\beta}_1+\epsilon_1,
\end{equation}\]</span>
where <span class="math inline">\(\mathbf{R}_{-1}\)</span> gathers the returns of all assets except the first one. The OLS estimator for <span class="math inline">\(\mathbf{\beta}_1\)</span> is
<span class="math display" id="eq:sparse2">\[\begin{equation}
\tag{5.11}
\hat{\mathbf{\beta}}_{1}=\mathbf{C}^{-1}\mathbf{c},
\end{equation}\]</span></p>
<p>and this is the partitioned form (when a constant is included to the regression) stemming from the Frisch-Waugh-Lovell theorem (see chapter 3 in <span class="citation"><a href="solutions-to-exercises.html#ref-greene2018econometric" role="doc-biblioref">Greene</a> (<a href="solutions-to-exercises.html#ref-greene2018econometric" role="doc-biblioref">2018</a>)</span>).</p>
<p>In addition,
<span class="math display" id="eq:sparse3">\[\begin{equation}
\tag{5.12}
(1-R^2)\sigma_{\mathbf{r}_1}^2=\sigma_{\mathbf{r}_1}^2- \mathbf{c}'\mathbf{C}^{-1}\mathbf{c} =\sigma^2_{\epsilon_1}.
\end{equation}\]</span>
The proof of this last fact is given below.</p>
<p>With <span class="math inline">\(\mathbf{X}\)</span> being the concatenation of <span class="math inline">\(\mathbf{1}_T\)</span> with returns <span class="math inline">\(\mathbf{R}_{-1}\)</span> and with <span class="math inline">\(\mathbf{y}=\mathbf{r}_1\)</span>, the classical expression of the <span class="math inline">\(R^2\)</span> is <span class="math display">\[R^2=1-\frac{\mathbf{\epsilon}'\mathbf{\epsilon}}{T\sigma_Y^2}=1-\frac{\mathbf{y}'\mathbf{y}-\hat{\mathbf{\beta}'}\mathbf{X}'\mathbf{X}\hat{\mathbf{\beta}}}{T\sigma_Y^2}=1-\frac{\mathbf{y}'\mathbf{y}-\mathbf{y}'\mathbf{X}\hat{\mathbf{\beta}}}{T\sigma_Y^2},\]</span>
with fitted values <span class="math inline">\(\mathbf{X}\hat{\mathbf{\beta}}= \hat{a_1}\mathbf{1}_T+\mathbf{R}_{-1}\mathbf{C}^{-1}\mathbf{c}\)</span>. Hence,
<span class="math display">\[\begin{align*}
T\sigma_{\mathbf{r}_1}^2R^2&=T\sigma_{\mathbf{r}_1}^2-\mathbf{r}'_1\mathbf{r}_1+\hat{a_1}\mathbf{1}'_T\mathbf{r}_1+\mathbf{r}'_1\mathbf{R}_{-1}\mathbf{C}^{-1}\mathbf{c} \\
T(1-R^2)\sigma_{\mathbf{r}_1}^2&=\mathbf{r}'_1\mathbf{r}_1-\hat{a_1}\mathbf{1}'_T\mathbf{r}_1-\left(\mathbf{\tilde{r}}_1+\frac{\mathbf{1}_T\mathbf{1}'_T}{T}\mathbf{r}_1\right)'\left(\tilde{\mathbf{R}}_{-1}+\frac{\mathbf{1}_T\mathbf{1}'_T}{T}\mathbf{R}_{-1}\right)\mathbf{C}^{-1}\mathbf{c} \\
T(1-R^2)\sigma_{\mathbf{r}_1}^2&=\mathbf{r}'_1\mathbf{r}_1-\hat{a_1}\mathbf{1}'_T\mathbf{r}_1-T\mathbf{c}'\mathbf{C}^{-1}\mathbf{c} -\mathbf{r}'_1\frac{\mathbf{1}_T\mathbf{1}'_T}{T}\mathbf{R}_{-1} \mathbf{C}^{-1}\mathbf{c} \\
T(1-R^2)\sigma_{\mathbf{r}_1}^2&=\mathbf{r}'_1\mathbf{r}_1-\frac{(\mathbf{1}'_T\mathbf{r}_1)^2}{T}- T\mathbf{c}'\mathbf{C}^{-1}\mathbf{c} \\
(1-R^2)\sigma_{\mathbf{r}_1}^2&=\sigma_{\mathbf{r}_1}^2- \mathbf{c}'\mathbf{C}^{-1}\mathbf{c}
\end{align*}\]</span>
where in the fourth equality we have plugged <span class="math inline">\(\hat{a}_1=\frac{\mathbf{1'}_T}{T}(\mathbf{r}_1-\mathbf{R}_{-1}\mathbf{C}^{-1}\mathbf{c})\)</span>. Note that there is probably a simpler proof, see, e.g., section 3.5 in <span class="citation"><a href="solutions-to-exercises.html#ref-greene2018econometric" role="doc-biblioref">Greene</a> (<a href="solutions-to-exercises.html#ref-greene2018econometric" role="doc-biblioref">2018</a>)</span>.</p>
<p>Combining (<a href="lasso.html#eq:sparse1">(5.9)</a>, (<a href="lasso.html#eq:sparse2">(5.11)</a>) and (<a href="lasso.html#eq:sparse3">(5.12)</a>), we get that the first line of <span class="math inline">\(\mathbf{\Sigma}^{-1}\)</span> is equal to
<span class="math display" id="eq:sparsehedgeeq2">\[\begin{equation}
\tag{5.13}
\frac{1}{\sigma^2_{\epsilon_1}}\times \left[ 1 \quad -\hat{\boldsymbol{\beta}}_1'\right].
\end{equation}\]</span></p>
<p>Given the first line of <span class="math inline">\(\mathbf{\Sigma}^{-1}\)</span>, it suffices to multiply by <span class="math inline">\(\boldsymbol{\mu}\)</span> to get the portfolio weight in the first asset (up to a scaling constant).</p>
<p>There is a nice economic intuition behind the above results which justifies the term “sparse hedging.” We take the case of the minimum variance portfolio, for which <span class="math inline">\(\boldsymbol{\mu}=\boldsymbol{1}\)</span>. In Equation <a href="lasso.html#eq:sparseeq">(5.10)</a>, we try to explain the return of asset 1 with that of all other assets. In the above equation, up to a scaling constant, the portfolio has a unit position in the first asset and <span class="math inline">\(-\hat{\boldsymbol{\beta}}_1\)</span> positions in all other assets. Hence, the purpose of all other assets is clearly to hedge the return of the first one. In fact, these positions are aimed at minimizing the squared errors of the aggregate portfolio for the first asset (these errors are exactly <span class="math inline">\(\mathbf{\epsilon}_1\)</span>). Moreover, the scaling factor <span class="math inline">\(\sigma^{-2}_{\epsilon_1}\)</span> is also simple to interpret: the more we trust the regression output (because of a small <span class="math inline">\(\sigma^{2}_{\epsilon_1}\)</span>), the more we invest in the hedging portfolio of the asset.</p>
<p>This reasoning is easily generalized for any line of <span class="math inline">\(\mathbf{\Sigma}^{-1}\)</span>, which can be obtained by regressing the returns of asset <span class="math inline">\(i\)</span> on the returns of all other assets. If the allocation scheme has the form (<a href="lasso.html#eq:MSR">(5.8)</a>) for given values of <span class="math inline">\(\boldsymbol{\mu}\)</span>, then the pseudo-code for the sparse portfolio strategy is the following.</p>
<p>At each date (which we omit for notational convenience),</p>
<ul>
<li>For all stocks <span class="math inline">\(i\)</span>,<br />
</li>
</ul>
<ol style="list-style-type: decimal">
<li>estimate the elasticnet regression over the <span class="math inline">\(t=1,\dots,T\)</span> samples to get the <span class="math inline">\(i^{th}\)</span> line of <span class="math inline">\(\hat{\mathbf{\Sigma}}^{-1}\)</span>:
<span class="math display">\[ \small \left[\hat{\mathbf{\Sigma}}^{-1}\right]_{i,\cdot}= \underset{\mathbf{\beta}_{i|}}{\text{argmin}}\, \left\{\sum_{t=1}^T\left( r_{i,t}-a_i+\sum_{n\neq i}^N\beta_{i|n}r_{n,t}\right)^2+\lambda \alpha || \mathbf{\beta}_{i|}||_1+\lambda (1-\alpha)||\mathbf{\beta}_{i|}||_2^2\right\}
\]</span><br />
</li>
<li>to get the weights of asset <span class="math inline">\(i\)</span>, we compute the <span class="math inline">\(\mathbf{\mu}\)</span>-weighted sum:
<span class="math inline">\(w_i= \sigma_{\epsilon_i}^{-2}\left(\mu_i- \sum_{j\neq i}\mathbf{\beta}_{i|j}\mu_j\right)\)</span>,</li>
</ol>
<p>where we recall that the vectors <span class="math inline">\(\mathbf{\beta}_{i|}=[\mathbf{\beta}_{i|1},\dots,\mathbf{\beta}_{i|i-1},\mathbf{\beta}_{i|i+1},\dots,\mathbf{\beta}_{i|N}]\)</span> are the coefficients from regressing the returns of asset <span class="math inline">\(i\)</span> against the returns of all other assets.<br />
The introduction of the <strong>penalization norms</strong> is the new ingredient, compared to the original approach of <span class="citation"><a href="solutions-to-exercises.html#ref-stevens1998inverse" role="doc-biblioref">Stevens</a> (<a href="solutions-to-exercises.html#ref-stevens1998inverse" role="doc-biblioref">1998</a>)</span>. The benefits are twofold: first, introducing constraints yields weights that are more robust and less subject to errors in the estimates of <span class="math inline">\(\mathbf{\mu}\)</span>; second, because of sparsity, weights are more stable, less leveraged and thus the strategy is less impacted by transaction costs. Before we turn to numerical applications, we mention a more direct route to the estimation of a <strong>robust inverse covariance matrix</strong>: the Graphical LASSO. The GLASSO estimates the precision matrix (inverse covariance matrix) via maximum likelihood while imposing constraints/penalizations on the weights of the matrix. When the penalization is strong enough, this yields a sparse matrix, i.e., a matrix in which some and possibly many coefficients are zero. We refer to the original article <span class="citation"><a href="solutions-to-exercises.html#ref-friedman2008sparse" role="doc-biblioref">J. Friedman, Hastie, and Tibshirani</a> (<a href="solutions-to-exercises.html#ref-friedman2008sparse" role="doc-biblioref">2008</a>)</span> for more details on this subject.</p>
</div>
<div id="sparseex" class="section level3" number="5.2.2">
<h3><span class="header-section-number">5.2.2</span> Example</h3>
<p>The interest of sparse hedging portfolios is to propose a robust approach to the estimation of minimum variance policies. Indeed, since the vector of expected returns <span class="math inline">\(\boldsymbol{\mu}\)</span> is usually very noisy, a simple solution is to adopt an agnostic view by setting <span class="math inline">\(\boldsymbol{\mu}=\boldsymbol{1}\)</span>. In order to test the added value of the sparsity constraint, we must resort to a full backtest. In doing so, we anticipate the content of Chapter <a href="backtest.html#backtest">12</a>.</p>
<p>We first prepare the variables. Sparse portfolios are based on returns only; we thus base our analysis on the dedicated variable in matrix/rectangular format (<em>returns</em>) which were created at the end of Chapter <a href="notdata.html#notdata">1</a>.</p>
<p>Then, we initialize the output variables: portfolio weights and portfolio returns. We want to compare three strategies: an equally weighted (EW) benchmark of all stocks, the classical global minimum variance portfolio (GMV) and the sparse-hedging approach to minimum variance.</p>
<div class="sourceCode" id="cb34"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb34-1"><a href="lasso.html#cb34-1" aria-hidden="true" tabindex="-1"></a>t_oos <span class="ot"><-</span> returns<span class="sc">$</span>date[returns<span class="sc">$</span>date <span class="sc">></span> separation_date] <span class="sc">%>%</span> <span class="co"># Out-of-sample dates </span></span>
<span id="cb34-2"><a href="lasso.html#cb34-2" aria-hidden="true" tabindex="-1"></a> <span class="fu">unique</span>() <span class="sc">%>%</span> <span class="co"># Remove duplicates</span></span>
<span id="cb34-3"><a href="lasso.html#cb34-3" aria-hidden="true" tabindex="-1"></a> <span class="fu">as.Date</span>(<span class="at">origin =</span> <span class="st">"1970-01-01"</span>) <span class="co"># Transform in date format</span></span>
<span id="cb34-4"><a href="lasso.html#cb34-4" aria-hidden="true" tabindex="-1"></a>Tt <span class="ot"><-</span> <span class="fu">length</span>(t_oos) <span class="co"># Nb of dates, avoid T </span></span>
<span id="cb34-5"><a href="lasso.html#cb34-5" aria-hidden="true" tabindex="-1"></a>nb_port <span class="ot"><-</span> <span class="dv">3</span> <span class="co"># Nb of portfolios/strats.</span></span>
<span id="cb34-6"><a href="lasso.html#cb34-6" aria-hidden="true" tabindex="-1"></a>portf_weights <span class="ot"><-</span> <span class="fu">array</span>(<span class="dv">0</span>, <span class="at">dim =</span> <span class="fu">c</span>(Tt, nb_port, <span class="fu">ncol</span>(returns) <span class="sc">-</span> <span class="dv">1</span>)) <span class="co"># Initial portf. weights</span></span>
<span id="cb34-7"><a href="lasso.html#cb34-7" aria-hidden="true" tabindex="-1"></a>portf_returns <span class="ot"><-</span> <span class="fu">matrix</span>(<span class="dv">0</span>, <span class="at">nrow =</span> Tt, <span class="at">ncol =</span> nb_port) <span class="co"># Initial portf. returns </span></span></code></pre></div>
<p></p>
<p>Next, because it is the purpose of this section, we isolate the computation of the weights of sparse-hedging portfolios. In the case of minimum variance portfolios, when <span class="math inline">\(\boldsymbol{\mu}=\boldsymbol{1}\)</span>, the weight in asset 1 will simply be the sum of all terms in Equation <a href="lasso.html#eq:sparsehedgeeq2">(5.13)</a> and the other weights have similar forms.</p>
<div class="sourceCode" id="cb35"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb35-1"><a href="lasso.html#cb35-1" aria-hidden="true" tabindex="-1"></a>weights_sparsehedge <span class="ot"><-</span> <span class="cf">function</span>(returns, alpha, lambda){ <span class="co"># The parameters are defined here</span></span>
<span id="cb35-2"><a href="lasso.html#cb35-2" aria-hidden="true" tabindex="-1"></a> w <span class="ot"><-</span> <span class="dv">0</span> <span class="co"># Initiate weights</span></span>
<span id="cb35-3"><a href="lasso.html#cb35-3" aria-hidden="true" tabindex="-1"></a> <span class="cf">for</span>(i <span class="cf">in</span> <span class="dv">1</span><span class="sc">:</span><span class="fu">ncol</span>(returns)){ <span class="co"># Loop on the assets</span></span>
<span id="cb35-4"><a href="lasso.html#cb35-4" aria-hidden="true" tabindex="-1"></a> y <span class="ot"><-</span> returns[,i] <span class="co"># Dependent variable</span></span>
<span id="cb35-5"><a href="lasso.html#cb35-5" aria-hidden="true" tabindex="-1"></a> x <span class="ot"><-</span> returns[,<span class="sc">-</span>i] <span class="co"># Independent variable</span></span>
<span id="cb35-6"><a href="lasso.html#cb35-6" aria-hidden="true" tabindex="-1"></a> fit <span class="ot"><-</span> <span class="fu">glmnet</span>(x,y, <span class="at">family =</span> <span class="st">"gaussian"</span>, <span class="at">alpha =</span> alpha, <span class="at">lambda =</span> lambda)</span>
<span id="cb35-7"><a href="lasso.html#cb35-7" aria-hidden="true" tabindex="-1"></a> err <span class="ot"><-</span> y<span class="sc">-</span><span class="fu">predict</span>(fit, x) <span class="co"># Prediction errors</span></span>
<span id="cb35-8"><a href="lasso.html#cb35-8" aria-hidden="true" tabindex="-1"></a> w[i] <span class="ot"><-</span> (<span class="dv">1</span><span class="sc">-</span><span class="fu">sum</span>(fit<span class="sc">$</span>beta))<span class="sc">/</span><span class="fu">var</span>(err) <span class="co"># Output: weight of asset i</span></span>
<span id="cb35-9"><a href="lasso.html#cb35-9" aria-hidden="true" tabindex="-1"></a> }</span>
<span id="cb35-10"><a href="lasso.html#cb35-10" aria-hidden="true" tabindex="-1"></a> <span class="fu">return</span>(w <span class="sc">/</span> <span class="fu">sum</span>(w)) <span class="co"># Normalisation of weights</span></span>
<span id="cb35-11"><a href="lasso.html#cb35-11" aria-hidden="true" tabindex="-1"></a>}</span></code></pre></div>
<p></p>
<p>In order to benchmark our strategy, we define a meta-weighting function that embeds three strategies: (1) the EW benchmark, (2) the classical GMV and (3) the sparse-hedging minimum variance. For the GMV, since there are much more assets than dates, the covariance matrix is singular. Thus, we have a small heuristic shrinkage term. For a more rigorous treatment of this technique, we refer to the original article <span class="citation"><a href="solutions-to-exercises.html#ref-ledoit2004well" role="doc-biblioref">Olivier Ledoit and Wolf</a> (<a href="solutions-to-exercises.html#ref-ledoit2004well" role="doc-biblioref">2004</a>)</span> and to the recent improvements mentioned in <span class="citation"><a href="solutions-to-exercises.html#ref-ledoit2017nonlinear" role="doc-biblioref">Olivier Ledoit and Wolf</a> (<a href="solutions-to-exercises.html#ref-ledoit2017nonlinear" role="doc-biblioref">2017</a>)</span>. In short, we use <span class="math inline">\(\hat{\boldsymbol{\Sigma}}=\boldsymbol{\Sigma}_S+\delta \boldsymbol{I}\)</span> for some small constant <span class="math inline">\(\delta\)</span> (equal to 0.01 in the code below).</p>
<div class="sourceCode" id="cb36"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb36-1"><a href="lasso.html#cb36-1" aria-hidden="true" tabindex="-1"></a>weights_multi <span class="ot"><-</span> <span class="cf">function</span>(returns,j, alpha, lambda){</span>
<span id="cb36-2"><a href="lasso.html#cb36-2" aria-hidden="true" tabindex="-1"></a> N <span class="ot"><-</span> <span class="fu">ncol</span>(returns)</span>
<span id="cb36-3"><a href="lasso.html#cb36-3" aria-hidden="true" tabindex="-1"></a> <span class="cf">if</span>(j <span class="sc">==</span> <span class="dv">1</span>){ <span class="co"># j = 1 => EW</span></span>
<span id="cb36-4"><a href="lasso.html#cb36-4" aria-hidden="true" tabindex="-1"></a> <span class="fu">return</span>(<span class="fu">rep</span>(<span class="dv">1</span><span class="sc">/</span>N,N))</span>
<span id="cb36-5"><a href="lasso.html#cb36-5" aria-hidden="true" tabindex="-1"></a> }</span>
<span id="cb36-6"><a href="lasso.html#cb36-6" aria-hidden="true" tabindex="-1"></a> <span class="cf">if</span>(j <span class="sc">==</span> <span class="dv">2</span>){ <span class="co"># j = 2 => Minimum Variance</span></span>
<span id="cb36-7"><a href="lasso.html#cb36-7" aria-hidden="true" tabindex="-1"></a> sigma <span class="ot"><-</span> <span class="fu">cov</span>(returns) <span class="sc">+</span> <span class="fl">0.01</span> <span class="sc">*</span> <span class="fu">diag</span>(N) <span class="co"># Covariance matrix + regularizing term</span></span>
<span id="cb36-8"><a href="lasso.html#cb36-8" aria-hidden="true" tabindex="-1"></a> w <span class="ot"><-</span> <span class="fu">solve</span>(sigma) <span class="sc">%*%</span> <span class="fu">rep</span>(<span class="dv">1</span>,N) <span class="co"># Inverse & multiply</span></span>
<span id="cb36-9"><a href="lasso.html#cb36-9" aria-hidden="true" tabindex="-1"></a> <span class="fu">return</span>(w <span class="sc">/</span> <span class="fu">sum</span>(w)) <span class="co"># Normalize</span></span>
<span id="cb36-10"><a href="lasso.html#cb36-10" aria-hidden="true" tabindex="-1"></a> }</span>
<span id="cb36-11"><a href="lasso.html#cb36-11" aria-hidden="true" tabindex="-1"></a> <span class="cf">if</span>(j <span class="sc">==</span> <span class="dv">3</span>){ <span class="co"># j = 3 => Penalised / elasticnet</span></span>
<span id="cb36-12"><a href="lasso.html#cb36-12" aria-hidden="true" tabindex="-1"></a> w <span class="ot"><-</span> <span class="fu">weights_sparsehedge</span>(returns, alpha, lambda)</span>
<span id="cb36-13"><a href="lasso.html#cb36-13" aria-hidden="true" tabindex="-1"></a> }</span>
<span id="cb36-14"><a href="lasso.html#cb36-14" aria-hidden="true" tabindex="-1"></a>}</span></code></pre></div>
<p></p>
<p>Finally, we proceed to the backtesting loop. Given the number of assets, the execution of the loop takes a few minutes. At the end of the loop, we compute the standard deviation of portfolio returns (monthly volatility). This is the key indicator as minimum variance seeks to minimize this particular metric.</p>
<div class="sourceCode" id="cb37"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb37-1"><a href="lasso.html#cb37-1" aria-hidden="true" tabindex="-1"></a><span class="cf">for</span>(t <span class="cf">in</span> <span class="dv">1</span><span class="sc">:</span><span class="fu">length</span>(t_oos)){ <span class="co"># Loop = rebal. dates</span></span>
<span id="cb37-2"><a href="lasso.html#cb37-2" aria-hidden="true" tabindex="-1"></a> temp_data <span class="ot"><-</span> returns <span class="sc">%>%</span> <span class="co"># Data for weights</span></span>
<span id="cb37-3"><a href="lasso.html#cb37-3" aria-hidden="true" tabindex="-1"></a> <span class="fu">filter</span>(date <span class="sc"><</span> t_oos[t]) <span class="sc">%>%</span> <span class="co"># Expand. window</span></span>
<span id="cb37-4"><a href="lasso.html#cb37-4" aria-hidden="true" tabindex="-1"></a> dplyr<span class="sc">::</span><span class="fu">select</span>(<span class="sc">-</span>date) <span class="sc">%>%</span></span>
<span id="cb37-5"><a href="lasso.html#cb37-5" aria-hidden="true" tabindex="-1"></a> <span class="fu">as.matrix</span>() </span>
<span id="cb37-6"><a href="lasso.html#cb37-6" aria-hidden="true" tabindex="-1"></a> realised_returns <span class="ot"><-</span> returns <span class="sc">%>%</span> <span class="co"># OOS returns</span></span>
<span id="cb37-7"><a href="lasso.html#cb37-7" aria-hidden="true" tabindex="-1"></a> <span class="fu">filter</span>(date <span class="sc">==</span> t_oos[t]) <span class="sc">%>%</span> </span>
<span id="cb37-8"><a href="lasso.html#cb37-8" aria-hidden="true" tabindex="-1"></a> dplyr<span class="sc">::</span><span class="fu">select</span>(<span class="sc">-</span>date)</span>
<span id="cb37-9"><a href="lasso.html#cb37-9" aria-hidden="true" tabindex="-1"></a> <span class="cf">for</span>(j <span class="cf">in</span> <span class="dv">1</span><span class="sc">:</span>nb_port){ <span class="co"># Loop over strats</span></span>
<span id="cb37-10"><a href="lasso.html#cb37-10" aria-hidden="true" tabindex="-1"></a> portf_weights[t,j,] <span class="ot"><-</span> <span class="fu">weights_multi</span>(temp_data, j, <span class="fl">0.1</span>, <span class="fl">0.1</span>) <span class="co"># Hard-coded params!</span></span>
<span id="cb37-11"><a href="lasso.html#cb37-11" aria-hidden="true" tabindex="-1"></a> portf_returns[t,j] <span class="ot"><-</span> <span class="fu">sum</span>(portf_weights[t,j,] <span class="sc">*</span> realised_returns) <span class="co"># Portf. returns</span></span>
<span id="cb37-12"><a href="lasso.html#cb37-12" aria-hidden="true" tabindex="-1"></a> }</span>
<span id="cb37-13"><a href="lasso.html#cb37-13" aria-hidden="true" tabindex="-1"></a>}</span>
<span id="cb37-14"><a href="lasso.html#cb37-14" aria-hidden="true" tabindex="-1"></a><span class="fu">colnames</span>(portf_returns) <span class="ot"><-</span> <span class="fu">c</span>(<span class="st">"EW"</span>, <span class="st">"MV"</span>, <span class="st">"Sparse"</span>) <span class="co"># Colnames</span></span>
<span id="cb37-15"><a href="lasso.html#cb37-15" aria-hidden="true" tabindex="-1"></a><span class="fu">apply</span>(portf_returns, <span class="dv">2</span>, sd) <span class="co"># Portfolio volatilities (monthly scale)</span></span></code></pre></div>
<pre><code>## EW MV Sparse
## 0.04180422 0.03350424 0.02672169</code></pre>
<p></p>
<p>The aim of the sparse hedging restrictions is to provide a better estimate of the covariance structure of assets so that the estimation of minimum variance portfolio weights is more accurate. From the above exercise, we see that the monthly volatility is indeed reduced when building covariance matrices based on sparse hedging relationships. This is not the case if we use the shrunk sample covariance matrix because there is probably too much noise in the estimates of correlations between assets. Working with daily returns would likely improve the quality of the estimates. But the above backtest shows that the penalized methodology performs well even when the number of observations (dates) is small compared to the number of assets.</p>
</div>
</div>
<div id="predictive-regressions" class="section level2" number="5.3">
<h2><span class="header-section-number">5.3</span> Predictive regressions</h2>
<p></p>
<div id="literature-review-and-principle" class="section level3" number="5.3.1">
<h3><span class="header-section-number">5.3.1</span> Literature review and principle</h3>
<p>The topic of predictive regressions sits on a collection of very interesting articles. One influential contribution is <span class="citation"><a href="solutions-to-exercises.html#ref-stambaugh1999predictive" role="doc-biblioref">Stambaugh</a> (<a href="solutions-to-exercises.html#ref-stambaugh1999predictive" role="doc-biblioref">1999</a>)</span>, where the author shows the perils of regressions in which the independent variables are autocorrelated. In this case, the usual OLS estimate is biased and must therefore be corrected. The results have since then been extended in numerous directions (see <span class="citation"><a href="solutions-to-exercises.html#ref-campbell2006efficient" role="doc-biblioref">Campbell and Yogo</a> (<a href="solutions-to-exercises.html#ref-campbell2006efficient" role="doc-biblioref">2006</a>)</span> and <span class="citation"><a href="solutions-to-exercises.html#ref-hjalmarsson2011new" role="doc-biblioref">Hjalmarsson</a> (<a href="solutions-to-exercises.html#ref-hjalmarsson2011new" role="doc-biblioref">2011</a>)</span>, the survey in <span class="citation"><a href="solutions-to-exercises.html#ref-gonzalo2018predictive" role="doc-biblioref">Gonzalo and Pitarakis</a> (<a href="solutions-to-exercises.html#ref-gonzalo2018predictive" role="doc-biblioref">2018</a>)</span> and, more recently, the study of <span class="citation"><a href="solutions-to-exercises.html#ref-xu2020testing" role="doc-biblioref">Xu</a> (<a href="solutions-to-exercises.html#ref-xu2020testing" role="doc-biblioref">2020</a>)</span> on predictability over multiple horizons).</p>
<p>A second important topic pertains to the time-dependence of the coefficients in predictive regressions. One contribution in this direction is <span class="citation"><a href="solutions-to-exercises.html#ref-dangl2012predictive" role="doc-biblioref">Dangl and Halling</a> (<a href="solutions-to-exercises.html#ref-dangl2012predictive" role="doc-biblioref">2012</a>)</span>, where coefficients are estimated via a Bayesian procedure. More recently <span class="citation"><a href="solutions-to-exercises.html#ref-kelly2019characteristics" role="doc-biblioref">Kelly, Pruitt, and Su</a> (<a href="solutions-to-exercises.html#ref-kelly2019characteristics" role="doc-biblioref">2019</a>)</span> use time-dependent factor loadings to model the cross-section of stock returns. The time-varying nature of coefficients of predictive regressions is further documented by <span class="citation"><a href="solutions-to-exercises.html#ref-henkel2011time" role="doc-biblioref">Henkel, Martin, and Nardari</a> (<a href="solutions-to-exercises.html#ref-henkel2011time" role="doc-biblioref">2011</a>)</span> for short term returns. Lastly, <span class="citation"><a href="solutions-to-exercises.html#ref-farmer2019pockets" role="doc-biblioref">Farmer, Schmidt, and Timmermann</a> (<a href="solutions-to-exercises.html#ref-farmer2019pockets" role="doc-biblioref">2019</a>)</span> introduce the concept of pockets of predictability: assets or markets experience different phases; in some stages, they are predictable and in some others, they aren’t. Pockets are measured both by the number of days that a <em>t</em>-statistic is above a particular threshold and by the magnitude of the <span class="math inline">\(R^2\)</span> over the considered period. Formal statistical tests are developed by <span class="citation"><a href="solutions-to-exercises.html#ref-demetrescu2020testing" role="doc-biblioref">Demetrescu et al.</a> (<a href="solutions-to-exercises.html#ref-demetrescu2020testing" role="doc-biblioref">2020</a>)</span>.</p>
<p>The introduction of penalization within predictive regressions goes back at least to <span class="citation"><a href="solutions-to-exercises.html#ref-rapach2013international" role="doc-biblioref">D. E. Rapach, Strauss, and Zhou</a> (<a href="solutions-to-exercises.html#ref-rapach2013international" role="doc-biblioref">2013</a>)</span>, where they are used to assess lead-lag relationships between US markets and other international stock exchanges. More recently, <span class="citation"><a href="solutions-to-exercises.html#ref-chinco2019sparse" role="doc-biblioref">Alexander Chinco, Clark-Joseph, and Ye</a> (<a href="solutions-to-exercises.html#ref-chinco2019sparse" role="doc-biblioref">2019</a>)</span> use LASSO regressions to forecast high frequency returns based on past returns (in the cross-section) at various horizons. They report statistically significant gains. <span class="citation"><a href="solutions-to-exercises.html#ref-han2018firm" role="doc-biblioref">Han et al.</a> (<a href="solutions-to-exercises.html#ref-han2018firm" role="doc-biblioref">2019</a>)</span> and <span class="citation"><a href="solutions-to-exercises.html#ref-rapach2019time" role="doc-biblioref">D. Rapach and Zhou</a> (<a href="solutions-to-exercises.html#ref-rapach2019time" role="doc-biblioref">2019</a>)</span> use LASSO and elasticnet regressions (respectively) to improve forecast combinations and single out the characteristics that matter when explaining stock returns.</p>
<p>These contributions underline the relevance of the overlap between predictive regressions and penalized regressions. In simple machine-learning based asset pricing, we often seek to build models such as that of Equation <a href="factor.html#eq:genML">(3.6)</a>. If we stick to a linear relationship and add penalization terms, then the model becomes:
<span class="math display">\[r_{t+1,n} = \alpha_n + \sum_{k=1}^K\beta_n^kf^k_{t,n}+\epsilon_{t+1,n}, \quad \text{s.t.} \quad (1-\alpha)\sum_{j=1}^J |\beta_j| +\alpha\sum_{j=1}^J \beta_j^2< \theta\]</span>
where we use <span class="math inline">\(f^k_{t,n}\)</span> or <span class="math inline">\(x_{t,n}^k\)</span> interchangeably and <span class="math inline">\(\theta\)</span> is some penalization intensity. Again, one of the aims of the regularization is to generate more robust estimates. If the patterns extracted hold out of sample, then
<span class="math display">\[\hat{r}_{t+1,n} = \hat{\alpha}_n + \sum_{k=1}^K\hat{\beta}_n^kf^k_{t,n},\]</span>
will be a relatively reliable proxy of future performance.</p>
</div>
<div id="code-and-results" class="section level3" number="5.3.2">
<h3><span class="header-section-number">5.3.2</span> Code and results</h3>
<p>Given the form of our dataset, implementing penalized predictive regressions is easy.</p>
<div class="sourceCode" id="cb39"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb39-1"><a href="lasso.html#cb39-1" aria-hidden="true" tabindex="-1"></a>y_penalized_train <span class="ot"><-</span> training_sample<span class="sc">$</span>R1M_Usd <span class="co"># Dependent variable</span></span>
<span id="cb39-2"><a href="lasso.html#cb39-2" aria-hidden="true" tabindex="-1"></a>x_penalized_train <span class="ot"><-</span> training_sample <span class="sc">%>%</span> <span class="co"># Predictors</span></span>
<span id="cb39-3"><a href="lasso.html#cb39-3" aria-hidden="true" tabindex="-1"></a> dplyr<span class="sc">::</span><span class="fu">select</span>(<span class="fu">all_of</span>(features)) <span class="sc">%>%</span> <span class="fu">as.matrix</span>() </span>
<span id="cb39-4"><a href="lasso.html#cb39-4" aria-hidden="true" tabindex="-1"></a>fit_pen_pred <span class="ot"><-</span> <span class="fu">glmnet</span>(x_penalized_train, y_penalized_train, <span class="co"># Model</span></span>
<span id="cb39-5"><a href="lasso.html#cb39-5" aria-hidden="true" tabindex="-1"></a> <span class="at">alpha =</span> <span class="fl">0.1</span>, <span class="at">lambda =</span> <span class="fl">0.1</span>)</span></code></pre></div>
<p></p>
<p>We then report two key performance measures: the mean squared error and the hit ratio, which is the proportion of times that the prediction guesses the sign of the return correctly. A detailed account of metrics is given later in the book (Chapter <a href="backtest.html#backtest">12</a>).</p>
<div class="sourceCode" id="cb40"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb40-1"><a href="lasso.html#cb40-1" aria-hidden="true" tabindex="-1"></a>x_penalized_test <span class="ot"><-</span> testing_sample <span class="sc">%>%</span> <span class="co"># Predictors</span></span>
<span id="cb40-2"><a href="lasso.html#cb40-2" aria-hidden="true" tabindex="-1"></a> dplyr<span class="sc">::</span><span class="fu">select</span>(<span class="fu">all_of</span>(features)) <span class="sc">%>%</span> <span class="fu">as.matrix</span>() </span>
<span id="cb40-3"><a href="lasso.html#cb40-3" aria-hidden="true" tabindex="-1"></a><span class="fu">mean</span>((<span class="fu">predict</span>(fit_pen_pred, x_penalized_test) <span class="sc">-</span> testing_sample<span class="sc">$</span>R1M_Usd)<span class="sc">^</span><span class="dv">2</span>) <span class="co"># MSE</span></span></code></pre></div>
<pre><code>## [1] 0.03699696</code></pre>
<div class="sourceCode" id="cb42"><pre class="sourceCode r"><code class="sourceCode r"><span id="cb42-1"><a href="lasso.html#cb42-1" aria-hidden="true" tabindex="-1"></a><span class="fu">mean</span>(<span class="fu">predict</span>(fit_pen_pred, x_penalized_test) <span class="sc">*</span> testing_sample<span class="sc">$</span>R1M_Usd <span class="sc">></span> <span class="dv">0</span>) <span class="co"># Hit ratio</span></span></code></pre></div>
<pre><code>## [1] 0.5460346</code></pre>
<p></p>
<p>From an investor’s standpoint, the MSEs (or even the mean absolute error) are hard to interpret because it is complicated to map them mentally into some intuitive financial indicator. In this perspective, the hit ratio is more natural. It tells the proportion of correct signs achieved by the predictions. If the investor is long in positive signals and short in negative ones, the hit ratio indicates the proportion of ‘correct’ bets (the positions that go in the expected direction). A natural threshold is 50% but because of transaction costs, 51% of accurate forecasts probably won’t be profitable. The figure 0.546 can be deemed a relatively good hit ratio, though not a very impressive one.</p>
</div>
</div>
<div id="coding-exercise" class="section level2" number="5.4">
<h2><span class="header-section-number">5.4</span> Coding exercise</h2>
<p>On the test sample, evaluate the impact of the two elastic net parameters on out-of-sample accuracy.</p>
</div>
</div>
</section>
</div>
</div>
</div>
<a href="Data.html" class="navigation navigation-prev " aria-label="Previous page"><i class="fa fa-angle-left"></i></a>
<a href="trees.html" class="navigation navigation-next " aria-label="Next page"><i class="fa fa-angle-right"></i></a>
</div>
</div>
<script src="libs/gitbook-2.6.7/js/app.min.js"></script>
<script src="libs/gitbook-2.6.7/js/lunr.js"></script>
<script src="libs/gitbook-2.6.7/js/clipboard.min.js"></script>
<script src="libs/gitbook-2.6.7/js/plugin-search.js"></script>
<script src="libs/gitbook-2.6.7/js/plugin-sharing.js"></script>
<script src="libs/gitbook-2.6.7/js/plugin-fontsettings.js"></script>
<script src="libs/gitbook-2.6.7/js/plugin-bookdown.js"></script>
<script src="libs/gitbook-2.6.7/js/jquery.highlight.js"></script>
<script src="libs/gitbook-2.6.7/js/plugin-clipboard.js"></script>
<script>
gitbook.require(["gitbook"], function(gitbook) {
gitbook.start({
"sharing": {
"github": false,
"facebook": false,
"twitter": true,
"linkedin": true,
"weibo": false,
"instapaper": false,
"vk": false,
"all": ["facebook", "twitter", "linkedin", "weibo", "instapaper"]
},
"fontsettings": {
"theme": "white",
"family": "sans",
"size": 2
},
"edit": null,
"history": {
"link": null,
"text": null
},
"view": {
"link": null,
"text": null
},
"download": null,
"toc": {
"collapse": "section",
"scroll_highlight": true
},
"toolbar": {
"position": "fixed",
"download": false
},
"search": true,
"info": true
});
});
</script>
<!-- dynamically load mathjax for compatibility with self-contained -->
<script>
(function () {
var script = document.createElement("script");
script.type = "text/javascript";
var src = "true";
if (src === "" || src === "true") src = "https://mathjax.rstudio.com/latest/MathJax.js?config=TeX-MML-AM_CHTML";
if (location.protocol !== "file:")
if (/^https?:/.test(src))
src = src.replace(/^https?:/, '');
script.src = src;
document.getElementsByTagName("head")[0].appendChild(script);
})();
</script>
</body>
</html>