This repository has been archived by the owner on Mar 31, 2022. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 1
/
ExpandingtheHAWQSystem.html
882 lines (806 loc) · 79.3 KB
/
ExpandingtheHAWQSystem.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
<!doctype html>
<html>
<head>
<meta charset="utf-8">
<!-- Always force latest IE rendering engine or request Chrome Frame -->
<meta content="IE=edge,chrome=1" http-equiv="X-UA-Compatible">
<!-- REPLACE X WITH PRODUCT NAME -->
<title>Expanding the HAWQ System | Pivotal Docs</title>
<!-- Local CSS stylesheets -->
<link href="/stylesheets/master.css" media="screen,print" rel="stylesheet" type="text/css" />
<link href="/stylesheets/breadcrumbs.css" media="screen,print" rel="stylesheet" type="text/css" />
<link href="/stylesheets/search.css" media="screen,print" rel="stylesheet" type="text/css" />
<link href="/stylesheets/portal-style.css" media="screen,print" rel="stylesheet" type="text/css" />
<link href="/stylesheets/printable.css" media="print" rel="stylesheet" type="text/css" />
<!-- Confluence HTML stylesheet -->
<link href="/stylesheets/site-conf.css" media="screen,print" rel="stylesheet" type="text/css" />
<!-- Left-navigation code -->
<!-- http://www.designchemical.com/lab/jquery-vertical-accordion-menu-plugin/examples/# -->
<link href="/stylesheets/dcaccordion.css" rel="stylesheet" type="text/css" />
<script src="http://ajax.googleapis.com/ajax/libs/jquery/1.4.2/jquery.min.js" type="text/javascript"></script>
<script src="/javascripts/jquery.cookie.js" type="text/javascript"></script>
<script src="/javascripts/jquery.hoverIntent.minified.js" type="text/javascript"></script>
<script src="/javascripts/jquery.dcjqaccordion.2.7.min.js" type="text/javascript"></script>
<script type="text/javascript">
$(document).ready(function($){
$('#accordion-1').dcAccordion({
eventType: 'click',
autoClose: true,
saveState: true,
disableLink: false,
speed: 'fast',
classActive: 'test',
showCount: false
});
});
</script>
<link href="/stylesheets/grey.css" rel="stylesheet" type="text/css" />
<!-- End left-navigation code -->
<script src="/javascripts/all.js" type="text/javascript"></script>
<link href='http://www.gopivotal.com/misc/favicon.ico' rel='shortcut icon'>
<script type="text/javascript">
if (window.location.host === 'docs.gopivotal.com') {
var _gaq = _gaq || [];
_gaq.push(['_setAccount', 'UA-39702075-1']);
_gaq.push(['_setDomainName', 'gopivotal.com']);
_gaq.push(['_trackPageview']);
(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})();
}
</script>
</head>
<body class="pivotalcf pivotalcf_getstarted pivotalcf_getstarted_index">
<div class="viewport">
<div class="mobile-navigation--wrapper mobile-only">
<div class="navigation-drawer--container">
<div class="navigation-item-list">
<div class="navbar-link active">
<a href="http://gopivotal.com">
Home
<i class="icon-chevron-right pull-right"></i>
</a>
</div>
<div class="navbar-link">
<a href="http://gopivotal.com/paas">
PaaS
<i class="icon-chevron-right pull-right"></i>
</a>
</div>
<div class="navbar-link">
<a href="http://gopivotal.com/big-data">
Big Data
<i class="icon-chevron-right pull-right"></i>
</a>
</div>
<div class="navbar-link">
<a href="http://gopivotal.com/agile">
Agile
<i class="icon-chevron-right pull-right"></i>
</a>
</div>
<div class="navbar-link">
<a href="http://gopivotal.com/support">
Help & Support
<i class="icon-chevron-right pull-right"></i>
</a>
</div>
<div class="navbar-link">
<a href="http://gopivotal.com/products">
Products
<i class="icon-chevron-right pull-right"></i>
</a>
</div>
<div class="navbar-link">
<a href="http://gopivotal.com/solutions">
Solutions
<i class="icon-chevron-right pull-right"></i>
</a>
</div>
<div class="navbar-link">
<a href="http://gopivotal.com/partners">
Partners
<i class="icon-chevron-right pull-right"></i>
</a>
</div>
</div>
</div>
<div class="mobile-nav">
<div class="nav-icon js-open-nav-drawer">
<i class="icon-reorder"></i>
</div>
<div class="header-center-icon">
<a href="http://gopivotal.com">
<div class="icon icon-pivotal-logo-mobile"></div>
</a>
</div>
</div>
</div>
<div class='wrap'>
<script src="//use.typekit.net/clb0qji.js" type="text/javascript"></script>
<script type="text/javascript">
try {
Typekit.load();
} catch (e) {
}
</script>
<script type="text/javascript">
document.domain = "gopivotal.com";
</script>
<script type="text/javascript">
WebFontConfig = {
google: { families: [ 'Source+Sans+Pro:300italic,400italic,600italic,300,400,600:latin' ] }
};
(function() {
var wf = document.createElement('script');
wf.src = ('https:' == document.location.protocol ? 'https' : 'http') +
'://ajax.googleapis.com/ajax/libs/webfont/1/webfont.js';
wf.type = 'text/javascript';
wf.async = 'true';
var s = document.getElementsByTagName('script')[0];
s.parentNode.insertBefore(wf, s);
})(); </script>
<div id="search-dropdown-box">
<div class="search-dropdown--container js-search-dropdown">
<div class="container-fluid">
<div class="close-menu-large"><img src="http://www.gopivotal.com/sites/all/themes/gopo13/images/icon-close.png" /></div>
<div class="search-form--container">
<div class="form-search">
<div class='gcse-search'></div>
<script src="http://www.google.com/jsapi" type="text/javascript"></script>
<script src="/javascripts/cse.js" type="text/javascript"></script>
</div>
</div>
</div>
</div>
</div>
<header class="navbar desktop-only" id="nav">
<div class="navbar-inner">
<div class="container-fluid">
<div class="pivotal-logo--container">
<a class="pivotal-logo" href="http://gopivotal.com"><span></span></a>
</div>
<ul class="nav pull-right">
<li class="navbar-link">
<a href="http://www.gopivotal.com/paas" id="paas-nav-link">PaaS</a>
</li>
<li class="navbar-link">
<a href="http://www.gopivotal.com/big-data" id="big-data-nav-link">BIG DATA</a>
</li>
<li class="navbar-link">
<a href="http://www.gopivotal.com/agile" id="agile-nav-link">AGILE</a>
</li>
<li class="navbar-link">
<a href="http://www.gopivotal.com/oss" id="oss-nav-link">OSS</a>
</li>
<li class="nav-search">
<a class="js-search-input-open" id="click-to-search"><span></span></a>
</li>
</ul>
</div>
<a href="http://www.gopivotal.com/contact">
<img id="get-started" src="http://www.gopivotal.com/sites/all/themes/gopo13/images/get-started.png">
</a>
</div>
</header>
<div class="main-wrap">
<div class="container-fluid">
<!-- Google CSE Search Box -->
<div id='docs-search'>
<gcse:search></gcse:search>
</div>
<div id='all-docs-link'>
<a href="http://docs.gopivotal.com/">All Documentation</a>
</div>
<div class="container">
<div id="sub-nav" class="nav-container">
<!-- Collapsible left-navigation-->
<ul class="accordion" id="accordion-1">
<!-- REPLACE <li/> NODES-->
<li>
<a href="index.html">Home</a></br>
<li>
<a href="PivotalHD.html">Pivotal HD 2.0.1</a>
<ul>
<li>
<a href="PHDEnterprise2.0.1ReleaseNotes.html">PHD Enterprise 2.0.1 Release Notes</a>
</li>
</ul>
<ul>
<li>
<a href="PHDInstallationandAdministration.html">PHD Installation and Administration</a>
<ul>
<li>
<a href="OverviewofPHD.html">Overview of PHD</a>
</li>
</ul>
<ul>
<li>
<a href="InstallationOverview.html">Installation Overview</a>
</li>
</ul>
<ul>
<li>
<a href="PHDInstallationChecklist.html">PHD Installation Checklist</a>
</li>
</ul>
<ul>
<li>
<a href="InstallingPHDUsingtheCLI.html">Installing PHD Using the CLI</a>
</li>
</ul>
<ul>
<li>
<a href="UpgradeChecklist.html">Upgrade Checklist</a>
</li>
</ul>
<ul>
<li>
<a href="UpgradingPHDUsingtheCLI.html">Upgrading PHD Using the CLI</a>
</li>
</ul>
<ul>
<li>
<a href="AdministeringPHDUsingtheCLI.html">Administering PHD Using the CLI</a>
</li>
</ul>
<ul>
<li>
<a href="PHDFAQFrequentlyAskedQuestions.html">PHD FAQ (Frequently Asked Questions)</a>
</li>
</ul>
<ul>
<li>
<a href="PHDTroubleshooting.html">PHD Troubleshooting</a>
</li>
</ul>
</li>
</ul>
<ul>
<li>
<a href="StackandToolsReference.html">Stack and Tools Reference</a>
<ul>
<li>
<a href="OverviewofApacheStackandPivotalComponents.html">Overview of Apache Stack and Pivotal Components</a>
</li>
</ul>
<ul>
<li>
<a href="ManuallyInstallingPivotalHD2.0Stack.html">Manually Installing Pivotal HD 2.0 Stack</a>
</li>
</ul>
<ul>
<li>
<a href="ManuallyUpgradingPivotalHDStackfrom1.1.1to2.0.html">Manually Upgrading Pivotal HD Stack from 1.1.1 to 2.0</a>
</li>
</ul>
<ul>
<li>
<a href="PivotalHadoopEnhancements.html">Pivotal Hadoop Enhancements</a>
</li>
</ul>
<ul>
<li>
<a href="Security.html">Security</a>
</li>
</ul>
</li>
</ul>
</li>
<li>
<a href="PivotalCommandCenter.html">Pivotal Command Center 2.2.1</a>
<ul>
<li>
<a href="PCC2.2.1ReleaseNotes.html">PCC 2.2.1 Release Notes</a>
</li>
</ul>
<ul>
<li>
<a href="PCCUserGuide.html">PCC User Guide</a>
<ul>
<li>
<a href="PCCOverview.html">PCC Overview</a>
</li>
</ul>
<ul>
<li>
<a href="PCCInstallationChecklist.html">PCC Installation Checklist</a>
</li>
</ul>
<ul>
<li>
<a href="InstallingPCC.html">Installing PCC</a>
</li>
</ul>
<ul>
<li>
<a href="UsingPCC.html">Using PCC</a>
</li>
</ul>
<ul>
<li>
<a href="CreatingaYUMEPELRepository.html">Creating a YUM EPEL Repository</a>
</li>
</ul>
<ul>
<li>
<a href="CommandLineReference.html">Command Line Reference</a>
</li>
</ul>
</li>
</ul>
</li>
<li>
<a href="PivotalHAWQ.html">Pivotal HAWQ 1.2.0</a>
<ul>
<li>
<a href="HAWQ1.2.0.1ReleaseNotes.html">HAWQ 1.2.0.1 Release Notes</a>
</li>
</ul>
<ul>
<li>
<a href="HAWQInstallationandUpgrade.html">HAWQ Installation and Upgrade</a>
<ul>
<li>
<a href="PreparingtoInstallHAWQ.html">Preparing to Install HAWQ</a>
</li>
</ul>
<ul>
<li>
<a href="InstallingHAWQ.html">Installing HAWQ</a>
</li>
</ul>
<ul>
<li>
<a href="InstallingtheHAWQComponents.html">Installing the HAWQ Components</a>
</li>
</ul>
<ul>
<li>
<a href="UpgradingHAWQandComponents.html">Upgrading HAWQ and Components</a>
</li>
</ul>
<ul>
<li>
<a href="HAWQConfigurationParameterReference.html">HAWQ Configuration Parameter Reference</a>
</li>
</ul>
</li>
</ul>
<ul>
<li>
<a href="HAWQAdministration.html">HAWQ Administration</a>
<ul>
<li>
<a href="HAWQOverview.html">HAWQ Overview</a>
</li>
</ul>
<ul>
<li>
<a href="HAWQQueryProcessing.html">HAWQ Query Processing</a>
</li>
</ul>
<ul>
<li>
<a href="UsingHAWQtoQueryData.html">Using HAWQ to Query Data</a>
</li>
</ul>
<ul>
<li>
<a href="ConfiguringClientAuthentication.html">Configuring Client Authentication</a>
</li>
</ul>
<ul>
<li>
<a href="KerberosAuthentication.html">Kerberos Authentication</a>
</li>
</ul>
<ul>
<li>
<a href="ExpandingtheHAWQSystem.html">Expanding the HAWQ System</a>
</li>
</ul>
<ul>
<li>
<a href="HAWQInputFormatforMapReduce.html">HAWQ InputFormat for MapReduce</a>
</li>
</ul>
<ul>
<li>
<a href="HAWQFilespacesandHighAvailabilityEnabledHDFS.html">HAWQ Filespaces and High Availability Enabled HDFS</a>
</li>
</ul>
<ul>
<li>
<a href="SQLCommandReference.html">SQL Command Reference</a>
</li>
</ul>
<ul>
<li>
<a href="ManagementUtilityReference.html">Management Utility Reference</a>
</li>
</ul>
<ul>
<li>
<a href="ClientUtilityReference.html">Client Utility Reference</a>
</li>
</ul>
<ul>
<li>
<a href="HAWQServerConfigurationParameters.html">HAWQ Server Configuration Parameters</a>
</li>
</ul>
<ul>
<li>
<a href="HAWQEnvironmentVariables.html">HAWQ Environment Variables</a>
</li>
</ul>
<ul>
<li>
<a href="HAWQDataTypes.html">HAWQ Data Types</a>
</li>
</ul>
<ul>
<li>
<a href="SystemCatalogReference.html">System Catalog Reference</a>
</li>
</ul>
<ul>
<li>
<a href="hawq_toolkitReference.html">hawq_toolkit Reference</a>
</li>
</ul>
</li>
</ul>
<ul>
<li>
<a href="PivotalExtensionFrameworkPXF.html">Pivotal Extension Framework (PXF)</a>
<ul>
<li>
<a href="PXFInstallationandAdministration.html">PXF Installation and Administration</a>
</li>
</ul>
<ul>
<li>
<a href="PXFExternalTableandAPIReference.html">PXF External Table and API Reference</a>
</li>
</ul>
</div><!--end of sub-nav-->
<h3 class="title-container">Expanding the HAWQ System</h3>
<div class="content">
<!-- Python script replaces main content -->
<div id ="main"><div style="visibility:hidden; height:2px;">Pivotal Product Documentation : Expanding the HAWQ System</div><div class="wiki-content group" id="main-content">
<p>This chapter provides information on adding additional resources to an existing HAWQ system to scale performance.</p><p><style type="text/css">/*<![CDATA[*/
div.rbtoc1400035793481 {padding: 0px;}
div.rbtoc1400035793481 ul {list-style: disc;margin-left: 0px;}
div.rbtoc1400035793481 li {margin-left: 0px;padding-left: 0px;}
/*]]>*/</style><div class="toc-macro rbtoc1400035793481">
<ul class="toc-indentation">
<li><a href="#ExpandingtheHAWQSystem-PlanningtheHAWQExpansion">Planning the HAWQ Expansion</a></li>
<li><a href="#ExpandingtheHAWQSystem-SystemExpansionOverview">System Expansion Overview</a>
<ul class="toc-indentation">
<li><a href="#ExpandingtheHAWQSystem-SystemExpansionChecklist">System Expansion Checklist</a></li>
<li><a href="#ExpandingtheHAWQSystem-PlanningNewHardwarePlatforms">Planning New Hardware Platforms</a></li>
<li><a href="#ExpandingtheHAWQSystem-PlanningInitializationofNewSegments">Planning Initialization of New Segments</a></li>
<li><a href="#ExpandingtheHAWQSystem-IncreasingSegmentsPerHost">Increasing Segments Per Host</a></li>
<li><a href="#ExpandingtheHAWQSystem-AbouttheExpansionSchema">About the Expansion Schema</a></li>
<li><a href="#ExpandingtheHAWQSystem-PlanningTableRedistribution">Planning Table Redistribution</a></li>
<li><a href="#ExpandingtheHAWQSystem-RedistributingAppend-OnlyandCompressedTables">Redistributing Append-Only and Compressed Tables</a></li>
<li><a href="#ExpandingtheHAWQSystem-RedistributingTableswithUser-DefinedDataTypes">Redistributing Tables with User-Defined Data Types</a></li>
<li><a href="#ExpandingtheHAWQSystem-RedistributingPartitionedTables">Redistributing Partitioned Tables</a></li>
</ul>
</li>
<li><a href="#ExpandingtheHAWQSystem-PreparingandAddingNodes">Preparing and Adding Nodes</a>
<ul class="toc-indentation">
<li><a href="#ExpandingtheHAWQSystem-AddingNewNodestotheTrustedHostEnvironment">Adding New Nodes to the Trusted Host Environment</a></li>
<li><a href="#ExpandingtheHAWQSystem-ValidatingDiskI/OandMemoryBandwidth">Validating Disk I/O and Memory Bandwidth</a></li>
<li><a href="#ExpandingtheHAWQSystem-IntegratingNewHardwareintotheSystem">Integrating New Hardware into the System</a></li>
</ul>
</li>
<li><a href="#ExpandingtheHAWQSystem-InstallingHAWQComponentsontheNewSegments">Installing HAWQ Components on the New Segments</a>
<ul class="toc-indentation">
<li><a href="#ExpandingtheHAWQSystem-InstallingPL/RandpgcryptoafterExpansion">Installing PL/R and pgcrypto after Expansion</a></li>
<li><a href="#ExpandingtheHAWQSystem-InstallingPL/JavaafterExpansion">Installing PL/Java after Expansion</a></li>
</ul>
</li>
<li><a href="#ExpandingtheHAWQSystem-InstallingMADlibonNewlyAddedNodes">Installing MADlib on Newly Added Nodes</a></li>
<li><a href="#ExpandingtheHAWQSystem-InitializingNewSegments">Initializing New Segments</a>
<ul class="toc-indentation">
<li><a href="#ExpandingtheHAWQSystem-CreatinganInputFileforSystemExpansion">Creating an Input File for System Expansion</a></li>
<li><a href="#ExpandingtheHAWQSystem-RunninggpexpandtoInitializeNewSegments">Running gpexpand to Initialize New Segments</a></li>
<li><a href="#ExpandingtheHAWQSystem-RollingBackaFailedExpansionSetup">Rolling Back a Failed Expansion Setup</a></li>
</ul>
</li>
<li><a href="#ExpandingtheHAWQSystem-RedistributingTables">Redistributing Tables</a>
<ul class="toc-indentation">
<li><a href="#ExpandingtheHAWQSystem-MonitoringTableRedistribution">Monitoring Table Redistribution</a></li>
</ul>
</li>
<li><a href="#ExpandingtheHAWQSystem-RemovingtheExpansionSchema">Removing the Expansion Schema</a></li>
</ul>
</div></p><h2 id="ExpandingtheHAWQSystem-PlanningtheHAWQExpansion">Planning the HAWQ Expansion </h2><p align="LEFT">Careful planning is critical to the success of a system expansion operation. By thoroughly preparing all new hardware and carefully planning all the steps of the expansion procedure, you can minimize risk and down time for the HAWQ database. For performance-related considerations when expanding large-scale systems, see "Planning Table Redistribution".</p><p>This section provides an overview and a checklist for the system expansion process.</p><h2 id="ExpandingtheHAWQSystem-SystemExpansionOverview">System Expansion Overview</h2><p>System expansion consists of three phases:</p><ul><li>Adding and testing new hardware platforms</li><li>Initializing new segments</li><li>Redistributing tables</li></ul><p><strong>Adding and testing new hardware</strong> — You can refer to the general considerations for deploying new hardware described in "Planning New Hardware Platforms". For more information on hardware platforms, consult Pivotal platform engineers. After the new hardware platforms are provisioned and networked, you must run performance tests using Pivotal HAWQ utilities.</p><p><strong>Initializing new segments</strong> — Once the HAWQ Database binaries are installed on new hardware, you must initialize the new segments using gpexpand (not gpinitsystem). In this process, the utility creates a data and metadata directory and copies all the metadata from the existing segments to the new segments, capturing metadata for each user data table in an expansion schema for status tracking. After this process has completed successfully, the expansion operation is committed, and cannot be reversed.</p><p>These operations are performed with the system offline. The gpexpand utility will shut down the database during initialization if you have not already done so.</p><p><strong>Redistributing tables</strong> — As part of the initialization process, gpexpand nullifies hash distribution policies (except for the parent tables of a partitioned table) and sets the distribution policy for all tables to random distribution. This action is performed on all tables in all existing databases in the HAWQ Database instance.</p> <div class="aui-message warning shadowed information-macro">
<p class="title">Note</p>
<span class="aui-icon icon-warning">Icon</span>
<div class="message-content">
<p>Nullifying original distribution policies means marking the distribution policies of all the user tables to<em> Random</em>. It does NOT involve any data movement or distribution. Physical movement of the data happens only when the final redistribution according to the original policy is performed.</p>
</div>
</div>
<p>Users can continue to access HAWQ after initialization is complete and the system is back online, though they may experience some performance degradation on systems that rely heavily on hash distribution of tables. During this process, normal operations such as ETL jobs, user queries, and reporting can continue, although users might experience slower response times.</p><p>To complete system expansion, you must run gpexpand to redistribute data tables across the newly added segments. Depending on the size and scale of your system, this might be accomplished in a single session during low-use hours, or it might require you to divide the process into batches over an extended period. Each table or partition will be unavailable for read or write operations during the period in which it is being redistributed. As each table is successfully redistributed across the new segments according to its distribution key (if any), the performance of the database should incrementally improve until it equals and then exceeds pre-expansion performance levels.</p><p>In a typical operation, you will run the gpexpand utility four times, using different options, during the complete expansion process.</p><ul><li>To interactively create an expansion input file:</li></ul><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;">gpexpand -f hosts_file </pre>
</div></div><ul><li>To initialize segments and create expansion schema:</li></ul><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;">gpexpand -i input_file -D database_name </pre>
</div></div><ul><li>To redistribute tables:</li></ul><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;">gpexpand -d duration </pre>
</div></div><ul><li>To remove the expansion schema:</li></ul><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;">gpexpand -c </pre>
</div></div><p>In systems whose large scale requires multiple redistribution sessions, you may need to run gpexpand several more times to complete the expansion. For more information, see "Planning Table Redistribution".</p><h3 id="ExpandingtheHAWQSystem-SystemExpansionChecklist">System Expansion Checklist</h3><p>This checklist provides a quick overview of the steps required for a system expansion.</p><div class="table-wrap"><table class="confluenceTable"><tbody><tr><th class="confluenceTh" style="margin-left: 60.0px;">Online Pre-Expansion Preparation</th></tr><tr><td class="confluenceTd">Perform these tasks when the system is up and available</td></tr><tr><td class="confluenceTd"><ul class="inline-task-list" data-inline-tasks-content-id="67800017">
<li data-inline-task-id="1"><span>Devise and execute a plan for ordering, building, and networking new hardware platforms.</span></li>
</ul>
</td></tr><tr><td class="confluenceTd"><ul class="inline-task-list" data-inline-tasks-content-id="67800017">
<li data-inline-task-id="2">Devise a database expansion plan. Map the number of segments per host, schedule the offline period to test performance and create the expansion schema,schedule the intervals for table redistribution.</li>
</ul>
</td></tr><tr><td class="confluenceTd"><ul class="inline-task-list" data-inline-tasks-content-id="67800017">
<li data-inline-task-id="4">Install HAWQ database binaries on new hosts.</li>
</ul>
</td></tr><tr><td class="confluenceTd"><ul class="inline-task-list" data-inline-tasks-content-id="67800017">
<li data-inline-task-id="5">Copy SSH keys to the new hosts (gpssh-exkeys).</li>
</ul>
</td></tr><tr><td class="confluenceTd"><ul class="inline-task-list" data-inline-tasks-content-id="67800017">
<li data-inline-task-id="6">Validate the operating system environment of the new hardware (gpcheck).</li>
</ul>
</td></tr><tr><td class="confluenceTd"><ul class="inline-task-list" data-inline-tasks-content-id="67800017">
<li data-inline-task-id="7">Validate disk I/O and memory bandwidth of the new hardware (gpcheckperf)</li>
</ul>
</td></tr><tr><td class="confluenceTd"><ul class="inline-task-list" data-inline-tasks-content-id="67800017">
<li data-inline-task-id="8">Validate that the master data directory has no huge files uner pg_log or gpperfmon/data</li>
</ul>
</td></tr><tr><td class="confluenceTd" colspan="1"><ul class="inline-task-list" data-inline-tasks-content-id="67800017">
<li data-inline-task-id="10">Prepare an expansion input file (gpexpand).</li>
</ul>
</td></tr></tbody></table></div><div class="table-wrap"><table class="confluenceTable"><tbody><tr><th class="confluenceTh">Offline Expansion Tasks</th></tr><tr><td class="confluenceTd"><ul class="inline-task-list" data-inline-tasks-content-id="67800017">
<li data-inline-task-id="11">Validate the operating system environment of the combined existing and new hardware (gpcheck) </li>
</ul>
</td></tr><tr><td class="confluenceTd"><ul class="inline-task-list" data-inline-tasks-content-id="67800017">
<li data-inline-task-id="12">Validate disk I/O and memory bandwidth of the combined existing and new hardware (gpcheckperf) </li>
</ul>
</td></tr><tr><td class="confluenceTd" colspan="1"><ul class="inline-task-list" data-inline-tasks-content-id="67800017">
<li data-inline-task-id="24">Reinstall HAWQ Components</li>
</ul>
</td></tr><tr><td class="confluenceTd"><ul class="inline-task-list" data-inline-tasks-content-id="67800017">
<li data-inline-task-id="13">Initialize new segments into the array and create an expansion schema (gpexpand -i input_file)</li>
</ul>
</td></tr><tr><td class="confluenceTd"><ul class="inline-task-list" data-inline-tasks-content-id="67800017">
<li data-inline-task-id="14">Stop any automated snapshot or other processes that consume disk space. </li>
</ul>
</td></tr></tbody></table></div><p><span style="font-size: medium;"> <span style="font-size: medium;"> <span style="font-size: medium;"> </span> </span> </span></p><div class="table-wrap"><table class="confluenceTable"><tbody><tr><th class="confluenceTh">Online Expansion Tasks</th></tr><tr><td class="confluenceTd"><ul class="inline-task-list" data-inline-tasks-content-id="67800017">
<li data-inline-task-id="21">Redistribute the tables through the expanded system (gpexpand). </li>
</ul>
</td></tr><tr><td class="confluenceTd"><ul class="inline-task-list" data-inline-tasks-content-id="67800017">
<li data-inline-task-id="22">Remove expansion schema (gpexpand -c) </li>
</ul>
</td></tr><tr><td class="confluenceTd"><ul class="inline-task-list" data-inline-tasks-content-id="67800017">
<li data-inline-task-id="23">Run analyze to update distribution statistics. During the expansion using gpexpand a, or postexpansion using analyze.</li>
</ul>
</td></tr></tbody></table></div><p><span style="font-size: medium;"> </span></p><p> </p> <div class="aui-message warning shadowed information-macro">
<p class="title">Notes</p>
<span class="aui-icon icon-warning">Icon</span>
<div class="message-content">
<div>If you encounter problems during new segment initialization, you cannot use gp_dump to restore the system.</div><div><div>You can <em>ONLY </em>roll back a failed expansion operation.</div><div><div>Once a setup operation is complete and the expansion is committed, you cannot roll back.</div></div></div>
</div>
</div>
<p><span style="font-size: medium;"> </span></p><h3 id="ExpandingtheHAWQSystem-PlanningNewHardwarePlatforms"><span style="font-size: medium;"> <span style="font-size: medium;"> <span style="font-size: medium;">Planning New Hardware Platforms</span> </span> </span></h3><p>Careful preparation of new hardware for system expansion is extremely important. Deliberate and thorough deployment of compatible hardware can greatly minimize the risk of issues developing later in the system expansion process.</p><p align="LEFT">Pivotal recommends the following:</p><ul><li>All new segment hosts for the expanded HAWQ Database array should have hardware resources and configurations matching those of the existing hosts. </li><li>You work with HAWQ Platform Engineering <span style="font-size: medium;"> </span>prior to making a hardware purchase decision to expand the HAWQ Database.</li></ul><p>The steps to plan and set up new hardware platforms will vary greatly for each unique deployment. Some of the possible considerations include:</p><ul><li>Preparing the physical space for the new hardware. Consider cooling, power supply, and other physical factors.</li><li>Determining the physical networking and cabling required to connect the new and existing hardware.</li><li>Mapping the existing IP address spaces and developing a networking plan for the expanded system.</li><li>Capturing the system configuration (users, profiles, NICs, etc.) from existing hardware to list it in detail for ordering the new hardware.</li><li>Creating a custom build plan for deploying hardware with the desired configuration in the particular site and environment.</li></ul><p>After selecting and adding new hardware to your network environment, make sure you perform the burn-in tasks described in "Verifying OS Settings".</p><h3 id="ExpandingtheHAWQSystem-PlanningInitializationofNewSegments">Planning Initialization of New Segments</h3><p>Expanding the HAWQ Database requires a limited period of system down time. During this time period, you must run gpexpand to initialize new segments into the array and create an expansion schema.</p><p>The time required will depend on the number of schema objects in the HAWQ system, as well as other factors related to hardware performance.</p> <div class="aui-message warning shadowed information-macro">
<p class="title">Note</p>
<span class="aui-icon icon-warning">Icon</span>
<div class="message-content">
<p>After you begin initializing new segments, you can no longer restore the system using gp_dump files created for the per-expansion system. When initialization is successfully completed, the expansion is committed and cannot be rolled back.</p>
</div>
</div>
<h3 id="ExpandingtheHAWQSystem-IncreasingSegmentsPerHost">Increasing Segments Per Host</h3><p>By default, new hosts are initialized with the same number of segments as existing hosts. Optionally, you can increase the number of segments per host, or add new segments only to existing hosts.</p><p>For example, if existing hosts currently have two segments per host, you can use gpexpand to initialize two additional segments on existing hosts (for a total of four), and four new segments on new hosts.</p> <div class="aui-message warning shadowed information-macro">
<p class="title">Note</p>
<span class="aui-icon icon-warning">Icon</span>
<div class="message-content">
<p>When you are adding new segments on existing nodes or hosts, you do not need to perform pre-expansion tasks such as HAWQ binary deployment, copying ssh keys, and other tasks because these hosts have already been configured. You must make sure that these hosts have enough resources such as OS resources, and memory to manage new segments.</p>
</div>
</div>
<p>The interactive process for creating an expansion input file prompts for this option, and the input file format allows you to specify new segment directories manually as well. For more information, see Creating an Input File for System Expansion.</p><h3 id="ExpandingtheHAWQSystem-AbouttheExpansionSchema">About the Expansion Schema</h3><p>At initialization time, gpexpand creates an expansion schema. If you do not specify a particular database at initialization time (gpexpand -D), the schema is created in the database indicated by the PGDATABASE environment variable.</p><p>The expansion schema stores metadata for each table in the system so that its status can be tracked throughout the expansion process. The expansion schema consists of two tables and a view for tracking the progress of an expansion operation:</p><ul><li>gpexpand.status</li><li>gpexpand.status_detail</li><li>gpexpand.expansion_progress</li></ul><h3 id="ExpandingtheHAWQSystem-PlanningTableRedistribution">Planning Table Redistribution</h3><p>The redistribution of tables is performed with the system online. For many HAWQ systems, table redistribution can be completed in a single gpexpand session scheduled during a low-use period. Larger systems may require you to plan multiple sessions and to set the order of table redistribution so as to minimize the performance impact. </p><p>Pivotal recommends completing the table redistribution in one session, if your database size and design permit it.</p> <div class="aui-message warning shadowed information-macro">
<p class="title">Important</p>
<span class="aui-icon icon-warning">Icon</span>
<div class="message-content">
<p>HDFS manages the disk space on your HAWQ system. Therefore, to perform table redistribution, verify that you have enough disk space on your Hadoop cluster, <span style="color: rgb(0,0,0);">taking into consideration HDFS replication (whose default is 3x) </span>temporarily holds a copy of your largest table. Each table is unavailable for read and write operations while gpexpand is redistributing it among the segments.</p>
</div>
</div>
<p>The performance impact of table redistribution depends on the size, storage type, and partitioning design of a table. Redistributing a table with gpexpand takes approximately as much time per table as a CREATE TABLE AS SELECT operation would take. When redistributing a terabyte-scale fact table, the expansion utility can use a significant portion of available system resources, with resulting impact on the performance of queries or other database workload items.</p> <div class="aui-message warning shadowed information-macro">
<p class="title">Important</p>
<span class="aui-icon icon-warning">Icon</span>
<div class="message-content">
<p>You may encounter this issue after performing the following tasks:</p><ol><li>Upgrading the HAWQ cluster from 1.1.x to 1.2.x. </li><li>Running gpexpand</li></ol><p>During the distribution phase, gpexpand logs the error:</p><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: sql; gutter: false" style="font-size:12px;">[TIMESTAMP] gpexpand:[MASTER_HOST]:[USER]-[ERROR]:-Table template1.pg_catalog.pg_remote_credentials failed to expand: error 'ERROR: permission denied: "pg_remote_credentials" is a system catalog' in 'ALTER TABLE ONLY "pg_catalog"."pg_remote_credentials" SET WITH(REORGANIZE=TRUE) DISTRIBUTED BY ("rcowner")'</pre>
</div></div><p>This happens because gpexpand tries to distribute a catalog table. This is a known issue.</p><p>Note that, excluding any other errors, the upgraded cluster will be fully operational and that the expansion is successful. You can confirm this by connecting to the database and executing the following SQL command:</p><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: sql; gutter: false" style="font-size:12px;">select dbname, fq_name, status from gpexpand.logical_status;</pre>
</div></div><p><span>All the associated tables, except g_catalog.pg_remote_credentials, will show the status as "COMPLETED".</span></p> <div class="aui-message warning shadowed information-macro">
<p class="title">Note</p>
<span class="aui-icon icon-warning">Icon</span>
<div class="message-content">
<p><span>This is not an issue in clean install of 1.2.x cluster.</span></p>
</div>
</div>
</div>
</div>
<p> </p><h3 id="ExpandingtheHAWQSystem-RedistributingAppend-OnlyandCompressedTables">Redistributing Append-Only and Compressed Tables</h3><p>Append-only and compressed append-only tables are redistributed by gpexpand at different rates. The CPU capacity required to compress and decompress data tends to increase the impact on system performance. For similar-sized tables with similar-sized data, you may find overall performance differences, such as zlib-compressed append-only tables expanding at a significantly slower rate than uncompressed append-only tables (which can be potentially up to 80% slower).</p><h3 id="ExpandingtheHAWQSystem-RedistributingTableswithUser-DefinedDataTypes">Redistributing Tables with User-Defined Data Types</h3><p>Certain sequences of alter operations on tables could render such tables unalterable from a redistribution perspective. gpexpand does not support redistribution of unalterable tables. For example, if you have a table initially created with a column of user-defined types and the column is subsequently dropped, this table may qualify as unalterable. If gpexpand reports a table as unalterable, you need to redistribute the table manually. To do this, create a new table matching the schema of the unalterable table and execute the following statement: </p><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: sql; gutter: false" style="font-size:12px;">INSERT INTO <new_table>
SELECT * FROM <unalterable table>;</pre>
</div></div><h3 id="ExpandingtheHAWQSystem-RedistributingPartitionedTables">Redistributing Partitioned Tables</h3><p>Because the expansion utility can process a large table partition-by-partition, an efficient partition design reduces the performance impact of table redistribution. Only the child tables of a partitioned table are set to a random distribution policy, and only one child partition table is unavailable during redistribution.</p><h2 id="ExpandingtheHAWQSystem-PreparingandAddingNodes">Preparing and Adding Nodes</h2><p>To prepare new system nodes for expansion, install the HAWQ software binaries, exchange the required SSH keys and run performance tests. Pivotal recommends running performance tests at least twice: first on the new nodes only, and then on both the new and existing nodes together. The second set of tests must be run with the system offline, to prevent user activity from distorting test results.</p><p align="LEFT">Beyond these general guidelines, Pivotal recommends running performance tests any time that the networking of nodes is modified, or for any special conditions in the system environment. For example, if you plan to run the expanded system on two network clusters, run the performance tests on each cluster.</p><p>This rest of this section describes how to run the HAWQ administrative utilities to verify that your new nodes are ready for integration into the existing HAWQ system.</p><h3 id="ExpandingtheHAWQSystem-AddingNewNodestotheTrustedHostEnvironment">Adding New Nodes to the Trusted Host Environment</h3><p>New nodes must exchange SSH keys with the existing nodes to allow HAWQ administrative utilities to connect to all segments without a password prompt.</p><p>Pivotal recommends performing the key exchange process twice: once as root (for administration convenience) and once as the gpadmin user (required for the HAWQ management utilities). Perform the following tasks in this order:</p><ol><li>Exchange SSH keys as root.</li><li>Create the gpadmin user.</li><li>Exchange SSH keys as the gpadmin user.</li></ol><p><strong>Exchange SSH keys as root</strong></p><ol><li><p>Create two separate host list files: one that has all of the existing host names in your HAWQ array, and one that has all of the new expansion hosts. For existing hosts, you can use the same host file that you used for the initial setup of SSH keys in the system.<br/>The files should include all hosts (master, backup master and segment hosts) and list one host name per line. If using a multi-NIC configuration, make sure to exchange SSH keys using all of the configured host names for a given host. Make sure there are no blank lines or extra spaces. For example:</p><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;">mdw OR masterhost
sdw1-1 seghost1
sdw1-2 seghost2
sdw1-3 seghost3
sdw1-4
sdw2-1
sdw2-2
sdw2-3
sdw2-4
sdw3-1
sdw3-2
sdw3-3
sdw3-4</pre>
</div></div></li><li><p>Log in as root on the master host, and source the greenplum_path.sh file from your HAWQ installation.</p><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;">$ su -
# source /usr/local/hawq/greenplum_path.sh</pre>
</div></div></li><li><p>Run the gpssh-exkeys utility, referencing the host list files. For example:</p><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;"># gpssh-exkeys -f /home/gpadmin/existing_hosts_file -x /home/gpadmin/new_hosts_file</pre>
</div></div></li><li><p>gpssh-exkeys will check the remote hosts and perform the key exchange between all hosts. Enter the root user password when prompted. For example:</p><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;">***Enter password for root@hostname: <root_password></pre>
</div></div></li></ol><p align="LEFT"><strong>Create the gpadmin</strong> <strong>user</strong></p><ol><li><p>Use gpssh to create the gpadmin user on all of the new segment hosts (if the gpadmin user does not exist already). Use the list of new hosts that you created for the key exchange. For example:</p><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;"># gpssh -f new_hosts_file '/usr/sbin/useradd gpadmin -d /home/gpadmin -s /bin/bash'</pre>
</div></div></li><li><p>Set the new gpadmin user’s password. On Linux, you can do this on all segment hosts at once by using gpssh. For example:</p><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;"># gpssh -f new_hosts_file 'echo gpadmin_password | passwd gpadmin --stdin'</pre>
</div></div><p><br/>You must log in to each segment host and set the gpadmin user’s password on each host. For example:</p><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;"># ssh segment_hostname
# passwd gpadmin
# New password: <gpadmin_password>
# Retype new password: <gpadmin_password></pre>
</div></div></li><li><p>Verify that the gpadmin user has been created by searching for its home directory:</p><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;"># gpssh -f new_hosts_file ls -l /home</pre>
</div></div></li></ol><p><strong>Exchange SSH keys as the gpadmin user</strong></p><p>Log in as gpadmin, and run the gpssh-exkeys utility, referencing the host list files. For example:</p><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;"># gpssh-exkeys -e /home/gpadmin/existing_hosts_file -x /home/gpadmin/new_hosts_file</pre>
</div></div><p> gpssh-exkeys will check the remote hosts and perform the key exchange between all hosts. Enter the gpadmin user password when prompted. For example:</p><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;">***Enter password for gpadmin@hostname: <gpadmin_password></pre>
</div></div><p><span style="color: rgb(0,102,0);font-size: 16.0px;line-height: 1.5625;">Verifying OS Settings</span></p><p>Use the gpcheck utility to verify that all the new hosts in your array have the correct OS settings for running the HAWQ software.</p><p><strong>To run gpcheck</strong></p><ol><li><p>Log in on the master host as the user who will be running your HAWQ system (for example, gpadmin).</p><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;">$ su - gpadmin</pre>
</div></div></li><li>Run the gpcheck utility using your host file for new hosts. For example:<br/> </li></ol><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;">$ gpcheck -f new_hosts_file</pre>
</div></div><p> </p><h3 id="ExpandingtheHAWQSystem-ValidatingDiskI/OandMemoryBandwidth">Validating Disk I/O and Memory Bandwidth</h3><p>Use the gpcheckperf utility to test disk I/O and memory bandwidth.</p><p><strong>To run gpcheckperf</strong></p><ol><li><p>Run the gpcheckperf utility using the host file for new hosts. Use the -d option to specify the file systems you want to test on each host (you must have write access to these directories). For example:</p><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;">$ gpcheckperf -f new_hosts_file -d /data1 -d /data2 -v </pre>
</div></div></li><li>The utility may take a while to perform the tests, as it is copying very large files between the hosts. When it is finished, you will see the summary results for the Disk Write, Disk Read, and Stream tests.</li></ol><p>If your network is divided into subnets, repeat this procedure with a separate host file for each subnet.</p><h3 id="ExpandingtheHAWQSystem-IntegratingNewHardwareintotheSystem">Integrating New Hardware into the System</h3><p>Before initializing the system with all new segments, repeat the performance tests on all nodes in the system, new and existing. Shut down the system and run these same tests using host files that include <em>all </em> <span style="font-size: medium;"> </span>nodes, existing and new:</p><ul><li>Verifying OS Settings</li><li>Validating Disk I/O and Memory Bandwidth</li></ul><p align="LEFT">Because user activity may skew the results of these test, you must shut down HAWQ (gpstop) before running them.</p><h2 id="ExpandingtheHAWQSystem-InstallingHAWQComponentsontheNewSegments">Installing HAWQ Components on the New Segments</h2><p>This topic describes how to install HAWQ components on the new segments created after running gpexpand.</p><h3 id="ExpandingtheHAWQSystem-InstallingPL/RandpgcryptoafterExpansion">Installing PL/R and pgcrypto after Expansion</h3><p>If you have already installed PL/R and pgcrypto packages on existing segments, use the following instructions to install these packages on the expanded segments:</p><ol><li class="im">Ensure that you have installed HAWQ binaries on all new segments.</li><li class="im">Run gpssh-exkeys to set up password-less ssh on the cluster.</li><li class="im"><p>Untar the package if you are using the same versions of the packages installed on the existing cluster.</p><p>For PL/R: </p><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;">mkdir plr
mv plr*.tgz plr
cd plr
tar -xzf plr*.tgz</pre>
</div></div><p>For pgcrypto :</p><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;">mkdir pgcrypto
mv pgcrypto.tgz pgcrypto
cd pgcrypto
tar -xzf pgcrypto.tgz</pre>
</div></div></li><li class="im"><p>Ensure that the hostfile only lists new hostnames, each on a new line.</p></li><li class="im"><p>Run the install in expand mode:</p><p>For PL/R:</p><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;">./plr_install.sh -f ~/hostfile -x</pre>
</div></div><p> For pgcrypto:</p><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;">./pgcrypto_install.sh -f ~/hostfile -x</pre>
</div></div></li></ol><h3 id="ExpandingtheHAWQSystem-InstallingPL/JavaafterExpansion">Installing PL/Java after Expansion</h3><p>If you have already installed PL/Java on existing segments, use the following instructions to install these packages on the expanded segments:</p><p>These instructions assume that you have a precompiled build of PL/Java from Pivotal.</p><ol><li>Ensure that $JAVA_HOME variable is set to the same path on master and on all segments.</li><li><p>Extract the files from the PL/Java package.</p><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;">mkdir pljava
mv pljava*.tgz pljava
cd pljava
tar -xzf pljava.tgz</pre>
</div></div></li><li><p>Run the installer:</p><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;">./pljava_install.sh -f ~/hosts.txt</pre>
</div></div><p>Where ~/hosts.txt is a text file containing hostnames of segment hosts in HAWQ deployment that are currently active. The file must contain one hostname per line.</p></li><li><p>Restart HAWQ.</p><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;">source $GPHOME/greenplum_path.sh gpstop -ar</pre>
</div></div></li><li><p>Add the PL/Java class of configuration variables:</p><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;">gpconfig -c custom_variable_classes -v \'pljava\'</pre>
</div></div><p> If you have existing custom_variable_classes defined, prefix them with "pljava" in a comma separated list.</p></li><li><p>Run the create language command:</p><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;">psql -d <dbname> -c "CREATE LANGUAGE pljava"</pre>
</div></div></li></ol><h4 id="ExpandingtheHAWQSystem-InstallingcustomJARS">Installing custom JARS</h4><ol><li>Copy the jar file on master host in $GPHOME/lib/postgresql/java.</li><li><p>Copy the jar file on segments in the same location using gpscp from master.</p><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;">cd $GPHOME/lib/postgresql/java
gpscp -f ~/hosts.txt myfunc.jar =:$GPHOME/lib/postgresql/java/</pre>
</div></div></li><li>Set pljava_classpath to include the newly copied jar file.</li><li><p>From psql session, execute the following:</p><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;">set pljava_classpath='myfunc.jar';</pre>
</div></div><p><br/>This setting will be in effect for the psql session. If you want it to affect all sessions, use gpconfig -c pljava_classpath -v \'myfunc.jar\'</p></li></ol><h2 id="ExpandingtheHAWQSystem-InstallingMADlibonNewlyAddedNodes">Installing MADlib on Newly Added Nodes</h2><p>The following steps assume that MADlib was installed and running before adding new nodes. If MADlib was not installed, you can install it using the instructions provided in the HAWQ Installation Guide.</p><ol><li>Download the MADlib RPM.</li><li>Make sure that you have HAWQ binaries installed properly on all master and segment nodes in your cluster.</li><li>Make sure the HOSTFILE lists all the new segment nodes.</li><li><p>Run the following command:</p><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;">hawq_install.sh -r <RPM_FILEPATH> -f <HOSTFILE></pre>
</div></div></li><li>Complete the process by initializing the new segments. For more information, see the next section.</li></ol><div class="im"><h2 id="ExpandingtheHAWQSystem-InitializingNewSegments">Initializing New Segments</h2></div><p>Use the gpexpand utility to initialize the new segments, create the expansion schema, and set a system-wide random distribution policy for the database. The utility performs these tasks by default the first time you run it with a valid input file on the HAWQ master. Subsequently, it will detect that an expansion schema has been created, and will perform table redistribution.</p><h3 id="ExpandingtheHAWQSystem-CreatinganInputFileforSystemExpansion">Creating an Input File for System Expansion</h3><p>To begin expansion, the gpexpand utility requires an input file containing information about the new segments and hosts. If you run gpexpand without specifying an input file, the utility displays an interactive interview that collects the required information and automatically creates an input file for you.</p><p align="LEFT">If you choose to create the input file by using the interactive interview, you can optionally specify a file containing a list of expansion hosts. If your platform or command shell limits the length of the list of hostnames you are allowed enter when prompted in the interview, specifying the hosts with gpexpand -f (as shown below) could be mandatory.</p><h4 id="ExpandingtheHAWQSystem-CreatinganinputfileinInteractiveMode">Creating an input file in Interactive Mode</h4><p>Before running gpexpand to create an input file in interactive mode, make sure you have the following required information:</p><ul><li>Number of new hosts</li><li>New hostnames (or a hosts file)</li><li>Number of segments to add per host, if any</li></ul><p>The utility automatically generates an input file based on this information and on the <em>dbid</em><span style="font-size: medium;"> </span>, <em>content </em> <span style="font-size: medium;"> </span>ID, and data directory values stored in <em>gp_segment_configuration</em>, and <em>pg_filespace, </em>then saves the file in the current directory.</p><p><strong>To create an input file in interactive mode</strong></p><ol><li>Log in on the master host as the user who will be running your HAWQ system (for example, gpadmin).</li><li><p>Run gpexpand. The utility displays messages about preparing for an expansion operation and prompts you to quit or continue. <br/>Optionally, you can specify a hosts file using -f. For example:</p><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;">$ gpexpand -f /home/gpadmin/new_hosts_file</pre>
</div></div></li><li>At the prompt, select Y to continue.</li><li><p>Enter a comma-separated list of the hostnames of the new expansion hosts. If you specified a hosts file using -f, go to the next step. Your list should appear as follows:</p><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;">> sdw5, sdw6, sdw7, sdw8</pre>
</div></div> <div class="aui-message warning shadowed information-macro">
<p class="title">Note</p>
<span class="aui-icon icon-warning">Icon</span>
<div class="message-content">
<p>To add segments to existing hosts only, enter a blank line at this prompt. Do not specify localhost or any existing host name.</p>
</div>
</div>
</li><li>Enter the number of new segments to add, if any. By default, the number of new hosts initialized corresponds to the number of existing segments. Optionally, you can increase the number of segments per host. For example, if existing hosts currently have two segments each, entering a value of 2 will initialize two additional segments on the existing hosts, and four new segments on new hosts.</li><li><p>If you are adding new segments, enter the metadata path for each new segment.</p><p>After you have entered all required information, the utility generates an input file and saves it in the current directory. For example:</p><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;">gpexpand_inputfile_yyyymmdd_145134</pre>
</div></div><p align="LEFT">If your system has shared filesystem filespaces, gpexpand expects a filespace configuration file (<em>input_file_name </em> <span style="font-size: small;"> </span>.fs) to exist in the same directory as your expansion configuration file. See User-defined Filespaces and gpexpand for more information.</p></li></ol><h4 id="ExpandingtheHAWQSystem-User-definedFilespacesandgpexpand">User-defined Filespaces and gpexpand</h4><p>This topic describes two scenarios:</p><ul><li>HAWQ with no User-defined Filespaces</li><li>HAWQ with a User-defined Filespace</li></ul><p><strong>HAWQ with no User-defined Filespaces</strong></p><p>When you initialize a new HAWQ cluster, it has 2 filespaces by default:<em> pg_system</em> and <em>dfs_system</em> (Lookup system tables pg_filespace & pg_filespace_entry).</p><ul><li><em>pg_system</em> stores all the metadata used by the Master and the segments. This is a local filesystem path that corresponds to that segment.</li><li><em>dfs_system</em> stores all the user data. Unlike <em>pg_system,</em> this is a shared filespace and is a path under HDFS.</li></ul><p>Since HAWQ has these two default filespaces, the expansion utility expects corresponding filespaces for the new segments. gpexpand requests local filesystem paths for pg_system filespace, but auto-generates paths for shared filespace paths, to maintain consistency of paths between all the segment data directories</p><p><strong>HAWQ with a User-defined Filespace</strong></p><p>This means that one or more filespaces, other than the default, have been defined in the existing HAWQ system.</p><p>You can use the gpfilespace utility to add filespaces to your HAWQ system. User-defined filespaces always have a shared path. </p><p>Therefore, if you have one or more user-defined filespaces in your HAWQ system, gpexpand requests local filesystem paths for pg_system filespace, but auto-generates paths for shared filespace paths so that it can maintain consistency of paths between all the segment data directories.</p><h4 id="ExpandingtheHAWQSystem-ExpansionInputFileFormat">Expansion Input File Format</h4><p>You can create your own input file in the required format. Unless you have special needs for your expansion scenario, Pivotal recommends creating the input file using the interactive interview process.</p><p>The format for the expansion input.fs file is:</p><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;">filespaceOrder=filespace1_name :filespace2_name : ...
dbid:/path/for/filespace1 :/path/for/filespace2 : ...
dbid:/path/for/filespace1 :/path/for/filespace2 : ...
...</pre>
</div></div><p align="LEFT">An expansion input file in this format requires the following information for each new segment:</p><p align="LEFT"><strong>Table: Input file format</strong></p><div class="table-wrap"><table class="confluenceTable"><tbody><tr><th class="confluenceTh">Parameter</th><th class="confluenceTh">Values</th><th class="confluenceTh">Description</th></tr><tr><td class="confluenceTd">hostname</td><td class="confluenceTd">hostname</td><td class="confluenceTd">Hostname for the segment host</td></tr><tr><td class="confluenceTd">port</td><td class="confluenceTd">An available port number</td><td class="confluenceTd"><p align="LEFT">Database listener port for the segment, incremented on the existing segment <em>port </em> <span style="font-size: xx-small;"> </span>base number.</p></td></tr><tr><td class="confluenceTd">fselocation</td><td class="confluenceTd">Directory name</td><td class="confluenceTd"><p align="LEFT">The data directory (filespace) location for a segment as per the pg_filespace_entry system catalog. </p></td></tr><tr><td class="confluenceTd">dbid</td><td class="confluenceTd"><p align="LEFT">Integer. Must not conflict with existing <em>dbid </em> <span style="font-size: xx-small;"> </span>values.</p></td><td class="confluenceTd"><p align="LEFT">Database ID for the segment. The values you enter should be incremented sequentially from existing<em>dbid </em> <span style="font-size: xx-small;"> </span>values shown in the system catalog gp_segment_configuration. For example, to add four nodes to an existing ten-segment array with <em>dbid </em> <span style="font-size: xx-small;"> </span>values of 1-10, list new <em>dbid </em> <span style="font-size: xx-small;"> </span>values of 11, 12, 13 and 14.</p></td></tr><tr><td class="confluenceTd">content</td><td class="confluenceTd"><p align="LEFT">Integer. Must not conflict with existing <em>content </em> <span style="font-size: xx-small;"> </span>values.</p></td><td class="confluenceTd"><p align="LEFT">The content ID of the segment. A primary segment and its mirror should have the same content ID, incremented sequentially from existing values. For more information, see <em>content </em> <span style="font-size: xx-small;"> </span>in the reference for gp_segment_configuration.</p></td></tr><tr><td class="confluenceTd">preferred_role</td><td class="confluenceTd">p</td><td class="confluenceTd"><p align="LEFT">p (primary) is the only option.</p></td></tr></tbody></table></div><h3 id="ExpandingtheHAWQSystem-RunninggpexpandtoInitializeNewSegments">Running gpexpand to Initialize New Segments</h3><p align="LEFT">After you have created an input file, run gpexpand to initialize new segments. The utility will automatically stop HAWQ for the time required to initialize the segments, <span style="background-color: transparent;line-height: 1.4285715;">then restarts the system when finished.</span></p><p align="LEFT"><strong>To run gpexpand with an input file</strong></p><ol><li>Log in on the master host as the user running your HAWQ system (for example, gpadmin).</li><li><p>Run the gpexpand utility, specifying the input file with -i.</p><p> </p><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;">$ gpexpand -i input_file -D database1</pre>
</div></div><p><br/>The utility detects if there is an existing expansion schema for the HAWQ system. If there is an existing schema, you must remove it with gpexpand -c before beginning a new expansion operation. See "Removing the Expansion Schema". When the new segments are initialized and the expansion schema is successfully created, the utility prints a success message and exits.</p></li></ol><p align="LEFT">When the initialization process is complete, you can connect to HAWQ and view the expansion schema. The schema resides in the database you specified with -D, or in the database specified by the PGDATABASE environment variable. For more information, see "About the Expansion Schema".</p><h3 id="ExpandingtheHAWQSystem-RollingBackaFailedExpansionSetup">Rolling Back a Failed Expansion Setup</h3><p align="LEFT">You can roll back a failed expansion setup operation by using the command gpexpand -r | --rollback. However, this command is only allowed in a failure scenario. Once a setup operation has completed successfully, the expansion is committed, and you cannot roll back.</p><p>To roll back a failed expansion setup, use the following command, specifying the database that contains the expansion schema:</p><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;">gpexpand --rollback -D database_name</pre>
</div></div><h2 id="ExpandingtheHAWQSystem-RedistributingTables">Redistributing Tables</h2><p align="LEFT">After successfully creating an expansion schema, you can bring HAWQ back online and redistribute tables across the entire array. You can redistribute tables with gpexpand at specified intervals, targeting low-use hours when the utility’s CPU usage and table locks will have the least impact on database operations. Also, you can rank tables to ensure that the largest or most critical tables are redistributed in your preferred order.</p><p align="LEFT">While the redistribution of tables is underway:</p><ul><li><p>Any new tables or partitions created will be distributed across all segments exactly as they would be under normal operating conditions.</p></li><li><p>Queries will use all segments, even though the relevant data may not yet have been redistributed to the tables on the new segments.</p></li></ul><p>The table or partition currently being redistributed will be locked and unavailable for read or write operations. When its redistribution is completed, normal operations resume.</p><p> </p> <div class="aui-message warning shadowed information-macro">
<span class="aui-icon icon-warning">Icon</span>
<div class="message-content">
<p>gpexpand does not support redistribution of unalterable tables. Some sequences of alter operations on tables could render those tables unalterable for redistribution. For example, if you create a table with a column of user-defined types, then subsequently drop the column, this table may become unalterable. As a workaround, if gpexpand reports a table as unalterable, you need to redistribute the table manually. To do this, create a new table matching the schema of the unalterable table and execute the following command:</p><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;">insert into <new_table> select * from <unalterable table>;</pre>
</div></div>
</div>
</div>
<p> <strong>To redistribute tables with gpexpand</strong></p><ol><li>Log in on the master host as the user who will be running your HAWQ system (for example, gpadmin).</li><li><p>Run the gpexpand utility. Optionally, you can use either the -d or -e option to define the time period for the expansion session. For example, to run the utility for a maximum of 60 consecutive hours:</p><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;">$ gpexpand -d 60:00:00</pre>
</div></div><p>The utility redistributes tables until the last table in the schema is successfully marked completed, or until the specified duration or end time is reached. Each time a session is started or finished, the utility updates the status and updated time in <em>gpexpand.status </em></p></li></ol><h3 id="ExpandingtheHAWQSystem-MonitoringTableRedistribution">Monitoring Table Redistribution</h3><p align="LEFT">At any time during the process of redistributing tables, you can query the expansion schema. The view <em>gpexpand.expansion_progress </em> <span style="font-size: medium;"> </span>provides a summary of the current progress, including calculations of the estimated rate of table redistribution and estimated time to completion. The table <em>gpexpand.status_detail </em> <span style="font-size: medium;"> </span>can be queried for per-table status information.</p><h4 id="ExpandingtheHAWQSystem-ViewingExpansionStatus">Viewing Expansion Status</h4><p align="LEFT">Because the estimates in <em>gpexpand.expansion_progress </em> <span style="font-size: medium;"> </span>are based on the rates achieved for each table, the view cannot calculate an accurate estimate until the first table has completed. Calculations are restarted each time you re-run gpexpand to start a new table redistribution session.</p><p align="LEFT">To monitor progress by querying <em>gpexpand.expansion_progress</em>, connect to HAWQ using psql or another supported client. Query <em>gpexpand.expansion_progress </em> <span style="font-size: medium;"> </span>with a command like the following:</p><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;">=# select * from gpexpand.expansion_progress;
name | value
------------------------------+-----------------------
Bytes Left | 5534842880
Bytes Done | 142475264
Estimated Expansion Rate | 680.75667095996092 MB/s
Estimated Time to Completion | 00:01:01.008047
Tables Expanded | 4
Tables Left | 4
(6 rows)</pre>
</div></div><h4 id="ExpandingtheHAWQSystem-ViewingTableStatus">Viewing Table Status</h4><p align="LEFT">The table <em>gpexpand.status_detail </em> <span style="font-size: medium;"> </span>stores status, last updated time, and other useful information about each table in the schema. To monitor the status of a particular table by querying <em>gpexpand.status_detail</em>, connect to HAWQ using psql or another supported client. Query <em>gpexpand.status_detail </em> <span style="font-size: medium;"> </span>with a command similar to the following:</p><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;">=> SELECT status, expansion_started, source_bytes FROM gpexpand.status_detail WHERE fq_name = ‘public.sales’;
status | expansion_started | source_bytes
-----------+----------------------------+------------------------------------
COMPLETED | 2009-02-20 10:54:10.043869 | 4929748992
(1 row)</pre>
</div></div><h2 id="ExpandingtheHAWQSystem-RemovingtheExpansionSchema">Removing the Expansion Schema</h2><p align="LEFT">The expansion schema can safely be removed after the expansion operation is completed and verified. To run another expansion operation on a HAWQ system, you must first remove the existing expansion schema.</p><p align="LEFT"><strong>To remove the expansion schema</strong></p><ol><li>Log in on the master host as the user who will be running your HAWQ system (for example, gpadmin).</li><li>Run the gpexpand utility with the -c option. For example:</li></ol><div class="code panel pdl" style="border-width: 1px;"><div class="codeContent panelContent pdl">
<pre class="theme: Confluence; brush: java; gutter: false" style="font-size:12px;">$ gpexpand -c </pre>
</div></div><p> </p><p align="LEFT"><em> <span style="font-size: medium;"> <span style="font-size: medium;"> <span style="font-size: medium;"> </span> </span> </span> </em></p><p> </p><p align="LEFT"><em> <span style="font-size: medium;"> <span style="font-size: medium;"> <span style="font-size: medium;"> </span> </span> </span> </em> <span style="font-size: medium;"> </span></p><p align="LEFT"><span style="font-size: medium;"> <span style="font-size: medium;"> <span style="font-size: medium;"> </span> </span> </span></p><p align="LEFT"><span style="font-size: medium;"> <span style="font-size: medium;"> </span> </span></p><p align="LEFT"><span style="font-size: medium;"> </span></p><p align="LEFT"><span style="font-size: small;"> <span style="font-size: medium;"> </span> </span></p><p align="LEFT"><span style="font-size: medium;"> </span></p><p align="LEFT"><span style="font-size: medium;"> </span></p><p align="LEFT"><span style="font-size: medium;"> </span></p><p><span style="font-size: medium;"> </span></p><p><span style="font-size: medium;"> </span></p><p><span style="font-size: medium;"> <span style="font-size: medium;"> </span> </span></p><p><span style="font-size: medium;"> </span></p><p><span style="font-size: medium;"> <span style="font-size: medium;"> </span> </span></p><p><span style="font-size: xx-small;"> <span style="font-size: medium;"> </span> </span> </p><p><span style="font-size: medium;"> </span></p><p><span style="font-size: medium;"> <span style="font-size: medium;"> </span> </span></p><p><strong> <span style="font-size: xx-small;"> <span style="font-size: medium;"> </span> </span> </strong> <span style="font-size: xx-small;"> </span></p><p align="LEFT"><span style="font-size: medium;"> </span></p><p align="LEFT"><span style="font-size: medium;"> </span></p><p><span style="font-size: medium;"> </span></p><p><span style="font-size: xx-small;"> <span style="font-size: medium;"> </span> </span></p><p><span style="font-size: medium;"> <span style="font-size: medium;"> <span style="font-size: medium;"> <span style="font-size: medium;"> </span> </span> </span> </span></p><p><span style="font-size: medium;"> <span style="font-size: medium;"> </span> </span></p><p><span style="font-size: medium;"> <span style="font-size: medium;"> <span style="font-size: medium;"> </span> </span> </span></p><p><span style="font-size: medium;"> </span></p><p><span style="font-size: medium;"> </span></p><p> </p><p> </p>
</div></div>
</div><!-- end of content-->
</div><!-- end of container -->
</div><!--end of container-fluid-->
</div><!--end of main-wrap-->
<div class="site-footer desktop-only">
<div class="container-fluid">
<div class="site-footer-links">
<span class="version"><a href='/'>Pivotal Documentation</a></span>
<span>©
<script>
var d = new Date();
document.write(d.getFullYear());
</script>
<a href='http://gopivotal.com'>Pivotal Software</a> Inc. All Rights Reserved.
</span>
</div>
</div>
</div>
<script type="text/javascript">
(function() {
var didInit = false;
function initMunchkin() {
if(didInit === false) {
didInit = true;
Munchkin.init('625-IUJ-009');
}
}
var s = document.createElement('script');
s.type = 'text/javascript';
s.async = true;
s.src = document.location.protocol + '//munchkin.marketo.net/munchkin.js';
s.onreadystatechange = function() {
if (this.readyState == 'complete' || this.readyState == 'loaded') {
initMunchkin();
}
};
s.onload = initMunchkin;
document.getElementsByTagName('head')[0].appendChild(s);
})();
</script>
</div><!--end of viewport-->
<div id="scrim"></div>
</body>
</html>