forked from GoogleCloudPlatform/PerfKitBenchmarker
-
Notifications
You must be signed in to change notification settings - Fork 0
/
hammerdbcli_benchmark.py
281 lines (244 loc) · 8.2 KB
/
hammerdbcli_benchmark.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
# Copyright 2022 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmarking SQLServer with the Hammerdb benchmark.
This benchmark uses Windows as the OS for both the database server and the
HammerDB client(s).
"""
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import errors
from perfkitbenchmarker import sql_engine_utils
from perfkitbenchmarker.windows_packages import hammerdb
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'hammerdbcli'
BENCHMARK_CONFIG = """
hammerdbcli:
description: Runs hammerdb against specified databases.
relational_db:
engine: sqlserver
db_spec:
GCP:
machine_type:
cpus: 4
memory: 7680MiB
zone: us-central1-c
AWS:
machine_type: db.m5.xlarge
zone: us-west-1a
Azure:
machine_type:
compute_units: 500
zone: eastus
db_disk_spec:
GCP:
disk_size: 500
disk_type: pd-ssd
num_striped_disks: 1
mount_point: /scratch
AWS:
disk_size: 500
disk_type: gp2
num_striped_disks: 1
mount_point: /scratch
Azure:
disk_size: 500
disk_type: Premium_LRS
num_striped_disks: 1
mount_point: /scratch
vm_groups:
servers:
os_type: windows2022_desktop_sqlserver_2019_standard
vm_spec:
GCP:
machine_type: n2-standard-4
zone: us-central1-c
boot_disk_size: 50
boot_disk_type: pd-ssd
AWS:
machine_type: m6i.xlarge
zone: us-east-1a
Azure:
machine_type: Standard_D4s_v5
zone: eastus
boot_disk_type: Premium_LRS
disk_spec:
GCP:
disk_size: 500
disk_type: pd-ssd
num_striped_disks: 1
mount_point: /scratch
AWS:
disk_size: 500
disk_type: gp2
num_striped_disks: 1
mount_point: /scratch
Azure:
disk_size: 500
disk_type: Premium_LRS
num_striped_disks: 1
mount_point: /scratch
clients:
os_type: windows2022_desktop
vm_spec:
GCP:
machine_type: n2-standard-16
zone: us-central1-c
boot_disk_size: 50
boot_disk_type: pd-ssd
AWS:
machine_type: m6i.4xlarge
zone: us-east-1a
Azure:
machine_type: Standard_D16s_v5
zone: eastus
boot_disk_type: Premium_LRS
disk_spec:
GCP:
disk_size: 500
disk_type: pd-ssd
num_striped_disks: 1
mount_point: /scratch
AWS:
disk_size: 500
disk_type: gp2
num_striped_disks: 1
mount_point: /scratch
Azure:
disk_size: 500
disk_type: Premium_LRS
num_striped_disks: 1
mount_point: /scratch
"""
def GetConfig(user_config):
"""Load and return benchmark config.
Args:
user_config: user supplied configuration (flags and config file)
Returns:
loaded benchmark configuration
"""
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
if FLAGS.db_high_availability:
# We need two additional vms for sql ha deployment.
# First vm to act as the second node in sql cluster
# and additional vm to act as a domain controller.
config['relational_db']['vm_groups']['servers']['vm_count'] = 3
if FLAGS.db_high_availability_type == 'FCIMW':
config['relational_db']['vm_groups']['servers']['disk_count'] = 0
return config
def CheckPrerequisites(_):
"""Verifies that benchmark flags is correct."""
if hammerdb.HAMMERDB_OPTIMIZED_SERVER_CONFIGURATION.value not in [
hammerdb.NON_OPTIMIZED,
hammerdb.MINIMUM_RECOVERY,
]:
raise errors.Setup.InvalidFlagConfigurationError(
'Non-optimized hammerdbcli_optimized_server_configuration'
' is not implemented.'
)
def Prepare(benchmark_spec):
"""Prepare the benchmark by installing dependencies.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
relational_db = benchmark_spec.relational_db
vm = relational_db.client_vm
num_cpus = None
if hasattr(relational_db, 'server_vm'):
server_vm = relational_db.server_vm
num_cpus = server_vm.NumCpusForBenchmark()
hammerdb.SetDefaultConfig(num_cpus)
vm.Install('hammerdb')
is_azure = FLAGS.cloud == 'Azure' and FLAGS.use_managed_db
if is_azure and hammerdb.HAMMERDB_SCRIPT.value == 'tpc_c':
# Create the database first only Azure requires creating the database.
relational_db.client_vm_query_tools.IssueSqlCommand('CREATE DATABASE tpcc;')
hammerdb.SetupConfig(
vm,
sql_engine_utils.SQLSERVER,
hammerdb.HAMMERDB_SCRIPT.value,
relational_db.endpoint,
relational_db.port,
relational_db.spec.database_password,
relational_db.spec.database_username,
is_azure,
)
# SQL Server exhibits better performance when restarted after prepare step
if FLAGS.hammerdbcli_restart_before_run:
relational_db.RestartDatabase()
def SetMinimumRecover(relational_db):
"""Change sql server settings to make TPM nubmers stable."""
# https://www.mssqltips.com/sqlservertip/4541/adjust-targetrecoverytime-to-reduce-sql-server-io-spikes/
relational_db.client_vm_query_tools.IssueSqlCommand(
'ALTER DATABASE tpcc SET TARGET_RECOVERY_TIME = 12000 SECONDS;'
)
relational_db.client_vm_query_tools.IssueSqlCommand(
'ALTER DATABASE tpcc SET AUTO_UPDATE_STATISTICS OFF;'
)
relational_db.client_vm_query_tools.IssueSqlCommand(
'ALTER DATABASE SCOPED CONFIGURATION SET MAXDOP = 1;'
)
relational_db.client_vm_query_tools.IssueSqlCommand(
'ALTER DATABASE [tpcc] SET DELAYED_DURABILITY = DISABLED WITH NO_WAIT;'
)
relational_db.client_vm_query_tools.IssueSqlCommand(
"ALTER DATABASE [tpcc] MODIFY FILE ( NAME = N'tpcc', SIZE = 500 GB,"
' FILEGROWTH = 10%);'
)
relational_db.client_vm_query_tools.IssueSqlCommand(
"dbcc shrinkfile('tpcc_log',truncateonly)"
)
relational_db.client_vm_query_tools.IssueSqlCommand(
"alter database tpcc modify file (name='tpcc_log', size=64000)"
)
# Verify the setting changed
relational_db.client_vm_query_tools.IssueSqlCommand("dbcc loginfo('tpcc')")
relational_db.client_vm_query_tools.IssueSqlCommand(
'SELECT name,target_recovery_time_in_seconds FROM sys.databases;'
)
def Run(benchmark_spec):
"""Run the benchmark.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample instances.
"""
client_vms = benchmark_spec.vm_groups['clients']
relational_db = benchmark_spec.relational_db
if (
hammerdb.HAMMERDB_OPTIMIZED_SERVER_CONFIGURATION.value
== hammerdb.MINIMUM_RECOVERY
):
SetMinimumRecover(relational_db)
samples = hammerdb.Run(
client_vms[0],
sql_engine_utils.SQLSERVER,
hammerdb.HAMMERDB_SCRIPT.value,
timeout=None,
)
metadata = GetMetadata()
for sample in samples:
sample.metadata.update(metadata)
return samples
def GetMetadata():
metadata = hammerdb.GetMetadata(sql_engine_utils.SQLSERVER)
# No reason to support multiple runs in a single benchmark run yet.
metadata.pop('hammerdbcli_num_run', None)
# columnar engine not applicable to sqlserver.
metadata.pop('hammerdbcli_load_tpch_tables_to_columnar_engine', None)
return metadata
def Cleanup(_):
"""No custom cleanup as the VMs are destroyed after the test."""
pass