Skip to content

Commit

Permalink
Merge pull request GoogleCloudPlatform#2684 from SMU-ATT-Center-for-V…
Browse files Browse the repository at this point in the history
…irtualization:netperf_mss

PiperOrigin-RevId: 353351782
  • Loading branch information
copybara-github committed Jan 23, 2021
2 parents eab04e5 + 0359911 commit 44dd2d4
Show file tree
Hide file tree
Showing 4 changed files with 70 additions and 83 deletions.
71 changes: 33 additions & 38 deletions perfkitbenchmarker/linux_benchmarks/netperf_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,18 +159,17 @@ def Prepare(benchmark_spec):
if vm_util.ShouldRunOnExternalIpAddress():
# Open all of the command and data ports
vms[1].AllowPort(PORT_START, PORT_START + num_streams * 2 - 1)
netserver_cmd = ('for i in $(seq {port_start} 2 {port_end}); do '
'{netserver_path} -p $i & done').format(
port_start=PORT_START,
port_end=PORT_START + num_streams * 2 - 1,
netserver_path=netperf.NETSERVER_PATH)

port_end = PORT_START + num_streams * 2 - 1
netserver_cmd = (f'for i in $(seq {PORT_START} 2 {port_end}); do '
f'{netperf.NETSERVER_PATH} -p $i & done')
vms[1].RemoteCommand(netserver_cmd)

# Copy remote test script to client
path = data.ResourcePath(os.path.join(REMOTE_SCRIPTS_DIR, REMOTE_SCRIPT))
logging.info('Uploading %s to %s', path, vms[0])
vms[0].PushFile(path, REMOTE_SCRIPT)
vms[0].RemoteCommand('sudo chmod 777 %s' % REMOTE_SCRIPT)
vms[0].RemoteCommand(f'sudo chmod 777 {REMOTE_SCRIPT}')


def _SetupHostFirewall(benchmark_spec):
Expand Down Expand Up @@ -366,7 +365,7 @@ def RunNetperf(vm, benchmark_name, server_ip, num_streams):
# -I specifies the confidence % and width - here 99% confidence that the true
# value is within +/- 2.5% of the reported value
# -i specifies the maximum and minimum number of iterations.
confidence = ('-I 99,5 -i {0},3'.format(FLAGS.netperf_max_iter)
confidence = (f'-I 99,5 -i {FLAGS.netperf_max_iter},3'
if FLAGS.netperf_max_iter else '')
verbosity = '-v2 ' if enable_latency_histograms else ''

Expand All @@ -377,40 +376,36 @@ def RunNetperf(vm, benchmark_name, server_ip, num_streams):
'sending_thread_count': num_streams,
'max_iter': FLAGS.netperf_max_iter or 1}

netperf_cmd = ('{netperf_path} -p {{command_port}} -j {verbosity} '
'-t {benchmark_name} -H {server_ip} -l {length} {confidence}'
netperf_cmd = (f'{netperf.NETPERF_PATH} '
f'-p {{command_port}} '
f'-j {verbosity} '
f'-t {benchmark_name} '
f'-H {server_ip} '
f'-l {FLAGS.netperf_test_length} {confidence}'
' -- '
'-P ,{{data_port}} '
'-o {output_selector}').format(
netperf_path=netperf.NETPERF_PATH,
benchmark_name=benchmark_name,
server_ip=server_ip,
length=FLAGS.netperf_test_length,
output_selector=OUTPUT_SELECTOR,
confidence=confidence,
verbosity=verbosity)
f'-P ,{{data_port}} '
f'-o {OUTPUT_SELECTOR}')

if benchmark_name.upper() == 'UDP_STREAM':
netperf_cmd += (' -R 1 -m {send_size} -M {send_size} '.format(
send_size=FLAGS.netperf_udp_stream_send_size_in_bytes))
metadata[
'netperf_send_size_in_bytes'] = FLAGS.netperf_udp_stream_send_size_in_bytes
send_size = FLAGS.netperf_udp_stream_send_size_in_bytes
netperf_cmd += f' -R 1 -m {send_size} -M {send_size} '
metadata['netperf_send_size_in_bytes'] = (
FLAGS.netperf_udp_stream_send_size_in_bytes)

elif benchmark_name.upper() == 'TCP_STREAM':
netperf_cmd += (' -m {send_size} -M {send_size} '.format(
send_size=FLAGS.netperf_tcp_stream_send_size_in_bytes))
metadata[
'netperf_send_size_in_bytes'] = FLAGS.netperf_tcp_stream_send_size_in_bytes
send_size = FLAGS.netperf_tcp_stream_send_size_in_bytes
netperf_cmd += f' -m {send_size} -M {send_size} '
metadata['netperf_send_size_in_bytes'] = (
FLAGS.netperf_tcp_stream_send_size_in_bytes)

if FLAGS.netperf_thinktime != 0:
netperf_cmd += (' -X {thinktime},{thinktime_array_size},'
'{thinktime_run_length} ').format(
thinktime=FLAGS.netperf_thinktime,
thinktime_array_size=FLAGS.netperf_thinktime_array_size,
thinktime_run_length=FLAGS.netperf_thinktime_run_length)
netperf_cmd += (' -X '
f'{FLAGS.netperf_thinktime},'
f'{FLAGS.netperf_thinktime_array_size},'
f'{FLAGS.netperf_thinktime_run_length} ')

if FLAGS.netperf_mss and 'TCP' in benchmark_name.upper():
netperf_cmd += (' -G {mss}b'.format(mss=FLAGS.netperf_mss))
netperf_cmd += f' -G {FLAGS.netperf_mss}b'
metadata['netperf_mss_requested'] = FLAGS.netperf_mss

# Run all of the netperf processes and collect their stdout
Expand All @@ -421,8 +416,8 @@ def RunNetperf(vm, benchmark_name, server_ip, num_streams):
# complete
remote_cmd_timeout = \
FLAGS.netperf_test_length * (FLAGS.netperf_max_iter or 1) + 300
remote_cmd = ('./%s --netperf_cmd="%s" --num_streams=%s --port_start=%s' %
(REMOTE_SCRIPT, netperf_cmd, num_streams, PORT_START))
remote_cmd = (f'./{REMOTE_SCRIPT} --netperf_cmd="{netperf_cmd}" '
f'--num_streams={num_streams} --port_start={PORT_START}')
remote_stdout, _ = vm.RobustRemoteCommand(remote_cmd, should_log=True,
timeout=remote_cmd_timeout)

Expand Down Expand Up @@ -461,7 +456,7 @@ def RunNetperf(vm, benchmark_name, server_ip, num_streams):
# Create samples for throughput stats
for stat, value in throughput_stats.items():
samples.append(
sample.Sample('%s_Throughput_%s' % (benchmark_name, stat),
sample.Sample(f'{benchmark_name}_Throughput_{stat}',
float(value),
throughput_unit, metadata))
if enable_latency_histograms:
Expand All @@ -473,13 +468,13 @@ def RunNetperf(vm, benchmark_name, server_ip, num_streams):
hist_metadata = {'histogram': json.dumps(latency_histogram)}
hist_metadata.update(metadata)
samples.append(sample.Sample(
'%s_Latency_Histogram' % benchmark_name, 0, 'us', hist_metadata))
f'{benchmark_name}_Latency_Histogram', 0, 'us', hist_metadata))
# Calculate stats on aggregate latency histogram
latency_stats = _HistogramStatsCalculator(latency_histogram, [50, 90, 99])
# Create samples for the latency stats
for stat, value in latency_stats.items():
samples.append(
sample.Sample('%s_Latency_%s' % (benchmark_name, stat),
sample.Sample(f'{benchmark_name}_Latency_{stat}',
float(value),
'us', metadata))
return samples
Expand Down Expand Up @@ -541,4 +536,4 @@ def Cleanup(benchmark_spec):
"""
vms = benchmark_spec.vms
vms[1].RemoteCommand('sudo killall netserver')
vms[0].RemoteCommand('sudo rm -rf %s' % REMOTE_SCRIPT)
vms[0].RemoteCommand(f'sudo rm -rf {REMOTE_SCRIPT}')
45 changes: 20 additions & 25 deletions perfkitbenchmarker/linux_benchmarks/netperf_pps_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

"""Runs plain netperf aggregate script runemomniaggdemo.sh to test packets
per second
"""Runs plain netperf aggregate script runemomniaggdemo.sh to test packets/sec.
docs:
https://hewlettpackard.github.io/netperf/doc/netperf.html
Expand Down Expand Up @@ -69,10 +68,10 @@ def PrepareNetperfAggregate(vm):
vm.Install('python_rrdtool')
vm.Install('netperf')

PORT_END = PORT_START
port_end = PORT_START

if vm_util.ShouldRunOnExternalIpAddress():
vm.AllowPort(PORT_START, PORT_END)
vm.AllowPort(PORT_START, port_end)

netserver_cmd = ('{netserver_path} -p {port_start}').format(
port_start=PORT_START,
Expand Down Expand Up @@ -100,7 +99,6 @@ def ParseNetperfAggregateOutput(stdout):
Args:
stdout: the stdout of the netperf process
metadata: metadata for any sample.Sample objects we create
Returns:
A tuple containing (throughput_sample, latency_samples, latency_histogram)
Expand Down Expand Up @@ -137,40 +135,37 @@ def RunNetperfAggregate(vm, server_ips):
Args:
vm: The VM that the netperf TCP_RR benchmark will be run upon.
benchmark_name: The netperf benchmark to run, see the documentation.
server_ip: A machine that is running netserver.
num_streams: The number of netperf client threads to run.
server_ips: Machines that are running netserver.
Returns:
A sample.Sample object with the result.
"""

# setup remote hosts file
vm.RemoteCommand('cd %s && rm remote_hosts' % (netperf.NETPERF_EXAMPLE_DIR))
vm.RemoteCommand(f'cd {netperf.NETPERF_EXAMPLE_DIR} && rm remote_hosts')
ip_num = 0
for ip in server_ips:
vm.RemoteCommand("cd %s && echo 'REMOTE_HOSTS[%d]=%s' >> remote_hosts"
% (netperf.NETPERF_EXAMPLE_DIR, ip_num, ip))
vm.RemoteCommand(f"echo 'REMOTE_HOSTS[{ip_num}]={ip}' >> "
f"{netperf.NETPERF_EXAMPLE_DIR}/remote_hosts")
ip_num += 1

vm.RemoteCommand("cd %s && echo 'NUM_REMOTE_HOSTS=%d' >> remote_hosts"
% (netperf.NETPERF_EXAMPLE_DIR, len(server_ips)))
vm.RemoteCommand('cd %s && export PATH=$PATH:.'
% (netperf.NETPERF_EXAMPLE_DIR))
vm.RemoteCommand(f"echo 'NUM_REMOTE_HOSTS={len(server_ips)}' >> "
f"{netperf.NETPERF_EXAMPLE_DIR}/remote_hosts")

# allow script to be executed and run script
vm.RemoteCommand(
'cd %s && export PATH=$PATH:. && chmod '
'+x runemomniaggdemo.sh && '
'./runemomniaggdemo.sh' % (netperf.NETPERF_EXAMPLE_DIR),
f'cd {netperf.NETPERF_EXAMPLE_DIR} && '
'export PATH=$PATH:. && '
'chmod +x runemomniaggdemo.sh && '
'./runemomniaggdemo.sh',
ignore_failure=True,
should_log=True,
login_shell=False,
timeout=1200)

# print out netperf_tps.log to log
stdout_1, stderr_1 = vm.RemoteCommand(
'cd %s && cat netperf_tps.log' % (netperf.NETPERF_EXAMPLE_DIR),
f'cat {netperf.NETPERF_EXAMPLE_DIR}/netperf_tps.log',
ignore_failure=True,
should_log=True,
login_shell=False,
Expand All @@ -181,8 +176,8 @@ def RunNetperfAggregate(vm, server_ips):

# do post processing step
proc_stdout, _ = vm.RemoteCommand(
'cd %s && ./post_proc.py '
'--intervals netperf_tps.log' % (netperf.NETPERF_EXAMPLE_DIR),
f'cd {netperf.NETPERF_EXAMPLE_DIR} && ./post_proc.py '
'--intervals netperf_tps.log',
ignore_failure=True)

samples = ParseNetperfAggregateOutput(proc_stdout)
Expand Down Expand Up @@ -217,13 +212,13 @@ def Run(benchmark_spec):
results.extend(external_ip_results)

# check if all server vms internal ips are reachable
runInternal = True
run_internal = True
for tmp_vm in server_vms:
if not vm_util.ShouldRunOnInternalIpAddress(client_vm, tmp_vm):
runInternal = False
run_internal = False
break

if runInternal:
if run_internal:
server_ips = list((vm.internal_ip for vm in server_vms))
internal_ip_results = RunNetperfAggregate(client_vm, server_ips)

Expand All @@ -243,4 +238,4 @@ def Cleanup(benchmark_spec):
"""
vms = benchmark_spec.vms
for vm in vms:
vms.RemoteCommand('sudo killall netserver')
vm.RemoteCommand('sudo killall netserver')
6 changes: 3 additions & 3 deletions perfkitbenchmarker/linux_benchmarks/ping_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,8 @@ def Prepare(benchmark_spec): # pylint: disable=unused-argument
"""
if len(benchmark_spec.vms) != 2:
raise ValueError(
'Ping benchmark requires exactly two machines, found {0}'
.format(len(benchmark_spec.vms)))
'Ping benchmark requires exactly two machines, '
f'found {len(benchmark_spec.vms)}')
if vm_util.ShouldRunOnExternalIpAddress():
vms = benchmark_spec.vms
for vm in vms:
Expand Down Expand Up @@ -111,7 +111,7 @@ def _RunPing(sending_vm, receiving_vm, receiving_ip, ip_type):
return []

logging.info('Ping results (ip_type = %s):', ip_type)
ping_cmd = 'ping -c 100 %s' % receiving_ip
ping_cmd = f'ping -c 100 {receiving_ip}'
stdout, _ = sending_vm.RemoteCommand(ping_cmd, should_log=True)
stats = re.findall('([0-9]*\\.[0-9]*)', stdout.splitlines()[-1])
assert len(stats) == len(METRICS), stats
Expand Down
31 changes: 14 additions & 17 deletions perfkitbenchmarker/linux_packages/netperf.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,9 +32,8 @@
'benchmark produces.')
FLAGS = flags.FLAGS
NETPERF_TAR = 'netperf-2.7.0.tar.gz'
NETPERF_URL = 'https://github.com/HewlettPackard/netperf/archive/%s' % (
NETPERF_TAR)
NETPERF_DIR = '%s/netperf-netperf-2.7.0' % linux_packages.INSTALL_DIR
NETPERF_URL = f'https://github.com/HewlettPackard/netperf/archive/{NETPERF_TAR}'
NETPERF_DIR = f'{linux_packages.INSTALL_DIR}/netperf-netperf-2.7.0'

NETPERF_SRC_DIR = NETPERF_DIR + '/src'
NETSERVER_PATH = NETPERF_SRC_DIR + '/netserver'
Expand All @@ -50,25 +49,23 @@ def _Install(vm):
vm.Install('build_tools')

_CopyTar(vm)
vm.RemoteCommand('cd %s && tar xvzf %s' %
(linux_packages.INSTALL_DIR, NETPERF_TAR))
vm.RemoteCommand(f'cd {linux_packages.INSTALL_DIR} && tar xvzf {NETPERF_TAR}')
# Modify netperf to print out all buckets in its histogram rather than
# aggregating, edit runemomniaggdemo script, and apply fix to
# allow it to compile with --enable-demo flag correctly
vm.PushDataFile('netperf.patch', NETLIB_PATCH)

vm.RemoteCommand('cd %s && patch -l -p1 < netperf.patch' %
NETPERF_DIR)
vm.RemoteCommand(f'cd {NETPERF_DIR} && patch -l -p1 < netperf.patch')

vm.RemoteCommand('cd %s && CFLAGS=-DHIST_NUM_OF_BUCKET=%s '
'./configure --enable-burst '
'--enable-demo --enable-histogram '
'&& make && sudo make install' %
(NETPERF_DIR, FLAGS.netperf_histogram_buckets))
vm.RemoteCommand(
f'cd {NETPERF_DIR} && '
f'CFLAGS=-DHIST_NUM_OF_BUCKET={FLAGS.netperf_histogram_buckets} '
'./configure --enable-burst '
'--enable-demo --enable-histogram '
'&& make && sudo make install')

vm.RemoteCommand('cd %s && chmod +x runemomniaggdemo.sh'
'&& chmod +x find_max_burst.sh'
% (NETPERF_EXAMPLE_DIR))
vm.RemoteCommand(f'cd {NETPERF_EXAMPLE_DIR} && chmod +x runemomniaggdemo.sh'
'&& chmod +x find_max_burst.sh')

# Set keepalive to a low value to ensure that the control connection
# is not closed by the cloud networking infrastructure.
Expand Down Expand Up @@ -98,8 +95,8 @@ def _CopyTar(vm):
vm.PushDataFile(NETPERF_TAR, remote_path=(linux_packages.INSTALL_DIR + '/'))
except data.ResourceNotFound:
vm.Install('curl')
vm.RemoteCommand('curl %s -L -o %s/%s' %
(NETPERF_URL, linux_packages.INSTALL_DIR, NETPERF_TAR))
vm.RemoteCommand(
f'curl {NETPERF_URL} -L -o {linux_packages.INSTALL_DIR}/{NETPERF_TAR}')


def YumInstall(vm):
Expand Down

0 comments on commit 44dd2d4

Please sign in to comment.