diff --git a/.github/workflows/build-trace-tools.yml b/.github/workflows/build-trace-tools.yml new file mode 100644 index 000000000..955c0c908 --- /dev/null +++ b/.github/workflows/build-trace-tools.yml @@ -0,0 +1,19 @@ +name: Build the tracing tools + +on: + workflow_call: + +jobs: + run: + strategy: + matrix: + platform: [ubuntu-latest, macos-latest, windows-latest] + runs-on: ${{ matrix.platform }} + + steps: + - name: Check out reactor-c repository + uses: actions/checkout@v3 + - name: Building tracing utils + working-directory: ./util/tracing + run: make + shell: bash diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index fd78c1471..a59cc4bcf 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -22,6 +22,9 @@ jobs: build-rti: uses: lf-lang/reactor-c/.github/workflows/build-rti.yml@main + build-trace-tools: + uses: lf-lang/reactor-c/.github/workflows/build-trace-tools.yml@move-tracing-utils + fetch-lf: uses: lf-lang/lingua-franca/.github/workflows/extract-ref.yml@master with: diff --git a/util/tracing/Makefile b/util/tracing/Makefile new file mode 100644 index 000000000..201f65584 --- /dev/null +++ b/util/tracing/Makefile @@ -0,0 +1,37 @@ +# Makefile for utilities that convert Lingua Franca trace files +# into other formats. +# @author: Edward A. Lee +REACTOR_C=../../ +CC=gcc +CFLAGS= -I$(REACTOR_C)/include/core/ \ + -I$(REACTOR_C)/include/core/modal_models \ + -I$(REACTOR_C)/include/core/platform \ + -I$(REACTOR_C)/include/core/utils \ + -DLF_UNTHREADED=1 \ + -Wall +DEPS= +LIBS=-lcurl + +INSTALL_PREFIX ?= /usr/local/bin + +%.o: %.c $(DEPS) + $(CC) -c -o $@ $< $(CFLAGS) + +trace_to_csv: trace_to_csv.o trace_util.o + $(CC) -o trace_to_csv trace_to_csv.o trace_util.o + +trace_to_chrome: trace_to_chrome.o trace_util.o + $(CC) -o trace_to_chrome trace_to_chrome.o trace_util.o + +trace_to_influxdb: trace_to_influxdb.o trace_util.o + $(CC) -o trace_to_influxdb trace_to_influxdb.o trace_util.o $(LIBS) + +install: trace_to_csv trace_to_chrome trace_to_influxdb + mv trace_to_csv $(INSTALL_PREFIX) + mv trace_to_chrome $(INSTALL_PREFIX) + mv trace_to_influxdb $(INSTALL_PREFIX) + ln -f -s launch-fedsd.sh $(INSTALL_PREFIX)/fedsd + chmod +x launch-fedsd.sh + +clean: + rm -f *.o diff --git a/util/tracing/README.md b/util/tracing/README.md new file mode 100644 index 000000000..2e9f051a3 --- /dev/null +++ b/util/tracing/README.md @@ -0,0 +1,30 @@ +## util/tracing + +This directory contains the source code for utilities that are standalone executables +for post-processing tracing data created by the tracing function in Lingua Franca. + +Utilities for visualizing the data are contained in the [visualization](visualization/README.md) +directory. + +* trace\_to\_csv: Creates a comma-separated values text file from a binary trace file. + The resulting file is suitable for analyzing in spreadsheet programs such as Excel. + +* trace\_to\_chrome: Creates a JSON file suitable for importing into Chrome's trace + visualizer. Point Chrome to chrome://tracing/ and load the resulting file. + +* trace\_to\_influxdb: A preliminary implementation that takes a binary trace file + and uploads its data into [InfluxDB](https://en.wikipedia.org/wiki/InfluxDB). + +* fedsd: A utility that converts trace files from a federate into sequence diagrams + showing the interactions between federates and the RTI. + +## Installing + +``` + sudo make install +``` +Will install the tracing executables to `/usr/local/bin` to install them to a different location, use the `INSTALL_PREFIX` flag, e.g. + +``` + make install INSTALL_PREFIX=~/.local/bin +``` diff --git a/util/tracing/influxdb.h b/util/tracing/influxdb.h new file mode 100644 index 000000000..5d423d633 --- /dev/null +++ b/util/tracing/influxdb.h @@ -0,0 +1,447 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + Usage: + send_udp/post_http(c, + INFLUX_MEAS("foo"), + INFLUX_TAG("k", "v"), INFLUX_TAG("k2", "v2"), + INFLUX_F_STR("s", "string"), INFLUX_F_FLT("f", 28.39, 2), + + INFLUX_MEAS("bar"), + INFLUX_F_INT("i", 1048576), INFLUX_F_BOL("b", 1), + INFLUX_TS(1512722735522840439), + + INFLUX_END); + + **NOTICE**: For best performance you should sort tags by key before sending them to the database. + The sort should match the results from the [Go bytes.Compare function](https://golang.org/pkg/bytes/#Compare). + */ + +#define INFLUX_MEAS(m) IF_TYPE_MEAS, (m) +#define INFLUX_TAG(k, v) IF_TYPE_TAG, (k), (v) +#define INFLUX_F_STR(k, v) IF_TYPE_FIELD_STRING, (k), (v) +#define INFLUX_F_FLT(k, v, p) IF_TYPE_FIELD_FLOAT, (k), (double)(v), (int)(p) +#define INFLUX_F_INT(k, v) IF_TYPE_FIELD_INTEGER, (k), (long long)(v) +#define INFLUX_F_BOL(k, v) IF_TYPE_FIELD_BOOLEAN, (k), ((v) ? 1 : 0) +#define INFLUX_TS(ts) IF_TYPE_TIMESTAMP, (long long)(ts) +#define INFLUX_END IF_TYPE_ARG_END + +typedef struct _influx_client_t +{ + char* host; + int port; + char* db; // http only + char* usr; // http only [optional for auth] + char* pwd; // http only [optional for auth] + char* token; // http only +} influx_client_t; + +typedef struct _influx_v2_client_t +{ + char* host; + int port; + char* org; + char* bucket; + char* precision; + char* usr; // http only [optional for auth] + char* pwd; // http only [optional for auth] + char* token; // http only +} influx_v2_client_t; + +int format_line(char **buf, int *len, size_t used, ...); +int post_http(influx_client_t* c, ...); +int send_udp(influx_client_t* c, ...); +int post_curl(influx_v2_client_t* c, ...); + +#define IF_TYPE_ARG_END 0 +#define IF_TYPE_MEAS 1 +#define IF_TYPE_TAG 2 +#define IF_TYPE_FIELD_STRING 3 +#define IF_TYPE_FIELD_FLOAT 4 +#define IF_TYPE_FIELD_INTEGER 5 +#define IF_TYPE_FIELD_BOOLEAN 6 +#define IF_TYPE_TIMESTAMP 7 + +int _escaped_append(char** dest, size_t* len, size_t* used, const char* src, const char* escape_seq); +int _begin_line(char **buf); +int _format_line(char** buf, va_list ap); +int _format_line2(char** buf, va_list ap, size_t *, size_t); +int post_http_send_line(influx_client_t *c, char *buf, int len); +int send_udp_line(influx_client_t* c, char *line, int len); + +int post_http_send_line(influx_client_t *c, char *buf, int len) +{ + int sock = -1 , ret_code = 0, content_length = 0; + struct sockaddr_in addr; + struct iovec iv[2]; + char ch; + + iv[1].iov_base = buf; + iv[1].iov_len = len; + + if(!(iv[0].iov_base = (char*)malloc(len = 0x800))) { + free(iv[1].iov_base); + return -2; + } + + for(;;) { + iv[0].iov_len = snprintf((char*)iv[0].iov_base, len, + "POST /write?db=%s&u=%s&p=%s HTTP/1.1\r\n" + "Host: %s\r\n" + "Accept: application/json\r\n" + "Content-type: text/plain\r\n" + "Authorization: Token %s\r\n" + "Content-Length: %zd\r\n" + "\r\n", // Final blank line is needed. + c->db, c->usr ? c->usr : "", c->pwd ? c->pwd : "", c->host, c->token ? c->token : "", iv[1].iov_len); + if((int)iv[0].iov_len >= len && !(iv[0].iov_base = (char*)realloc(iv[0].iov_base, len *= 2))) { + free(iv[1].iov_base); + free(iv[0].iov_base); + return -3; + } + else + break; + } + + fprintf(stderr, "influxdb-c::post_http: iv[0] = '%s'\n", (char *)iv[0].iov_base); + fprintf(stderr, "influxdb-c::post_http: iv[1] = '%s'\n", (char *)iv[1].iov_base); + + addr.sin_family = AF_INET; + addr.sin_port = htons(c->port); + // EAL: Rather than just an IP address, allow a hostname, like "localhost" + struct hostent* resolved_host = gethostbyname(c->host); + if (!resolved_host) { + free(iv[1].iov_base); + free(iv[0].iov_base); + return -4; + } + memcpy(&addr.sin_addr, resolved_host->h_addr_list[0], resolved_host->h_length); + /* + if((addr.sin_addr.s_addr = inet_addr(resolved_host->h_addr)) == INADDR_NONE) { + free(iv[1].iov_base); + free(iv[0].iov_base); + return -4; + } + */ + + if((sock = socket(AF_INET, SOCK_STREAM, 0)) < 0) { + free(iv[1].iov_base); + free(iv[0].iov_base); + return -5; + } + + if(connect(sock, (struct sockaddr*)(&addr), sizeof(addr)) < 0) { + ret_code = -6; + goto END; + } + + if(writev(sock, iv, 2) < (int)(iv[0].iov_len + iv[1].iov_len)) { + ret_code = -7; + goto END; + } + iv[0].iov_len = len; + +#define _GET_NEXT_CHAR() (ch = (len >= (int)iv[0].iov_len && \ + (iv[0].iov_len = recv(sock, iv[0].iov_base, iv[0].iov_len, len = 0)) == (size_t)(-1) ? \ + 0 : *((char*)iv[0].iov_base + len++))) +#define _LOOP_NEXT(statement) for(;;) { if(!(_GET_NEXT_CHAR())) { ret_code = -8; goto END; } statement } +#define _UNTIL(c) _LOOP_NEXT( if(ch == c) break; ) +#define _GET_NUMBER(n) _LOOP_NEXT( if(ch >= '0' && ch <= '9') n = n * 10 + (ch - '0'); else break; ) +#define _(c) if((_GET_NEXT_CHAR()) != c) break; + + _UNTIL(' ')_GET_NUMBER(ret_code) + for(;;) { + _UNTIL('\n') + switch(_GET_NEXT_CHAR()) { + case 'C':_('o')_('n')_('t')_('e')_('n')_('t')_('-') + _('L')_('e')_('n')_('g')_('t')_('h')_(':')_(' ') + _GET_NUMBER(content_length) + break; + case '\r':_('\n') + while(content_length-- > 0 && _GET_NEXT_CHAR());// printf("%c", ch); + goto END; + } + if(!ch) { + ret_code = -10; + goto END; + } + } + ret_code = -11; +END: + close(sock); + free(iv[0].iov_base); + free(iv[1].iov_base); + return ret_code / 100 == 2 ? 0 : ret_code; +} +#undef _GET_NEXT_CHAR +#undef _LOOP_NEXT +#undef _UNTIL +#undef _GET_NUMBER +#undef _ + +int post_http(influx_client_t* c, ...) +{ + va_list ap; + char *line = NULL; + int ret_code = 0, len = 0; + + va_start(ap, c); + len = _format_line((char**)&line, ap); + va_end(ap); + if(len < 0) + return -1; + + ret_code = post_http_send_line(c, line, len); + + return ret_code; +} + +int send_udp_line(influx_client_t* c, char *line, int len) +{ + int sock = -1, ret = 0; + struct sockaddr_in addr; + + addr.sin_family = AF_INET; + addr.sin_port = htons(c->port); + if((addr.sin_addr.s_addr = inet_addr(c->host)) == INADDR_NONE) { + ret = -2; + goto END; + } + + if((sock = socket(AF_INET, SOCK_DGRAM, 0)) < 0) { + ret = -3; + goto END; + } + + if(sendto(sock, line, len, 0, (struct sockaddr *)&addr, sizeof(addr)) < len) + ret = -4; + +END: + if (sock >= 0) { + close(sock); + } + return ret; +} + +int send_udp(influx_client_t* c, ...) +{ + int ret = 0, len; + va_list ap; + char* line = NULL; + + va_start(ap, c); + len = _format_line(&line, ap); + va_end(ap); + if(len < 0) + return -1; + + ret = send_udp_line(c, line, len); + + free(line); + return ret; +} + +int post_curl(influx_v2_client_t* c, ...) +{ + va_list ap; + char *data = NULL; + int len = 0; + va_start(ap, c); + len = _format_line((char**)&data, ap); + va_end(ap); + + CURL *curl; + + /* In windows, this will init the winsock stuff */ + curl_global_init(CURL_GLOBAL_ALL); + CURLcode res; + + /* get a curl handle */ + curl = curl_easy_init(); + if(!curl) { + return CURLE_FAILED_INIT; + } + + char* url_string = (char*)malloc(len); + snprintf(url_string, len, + "http://%s:%d/api/v2/write?org=%s&bucket=%s&precision=%s", + c->host ? c->host: "localhost", c->port ? c->port : 8086, c->org, c->bucket, c->precision ? c->precision : "ns"); + + curl_easy_setopt(curl, CURLOPT_URL, url_string); + free(url_string); + + char* token_string = (char*)malloc(120*sizeof(char)); + sprintf(token_string, "Authorization: Token %s", c->token); + + struct curl_slist *list = NULL; + list = curl_slist_append(list, token_string); + curl_easy_setopt(curl, CURLOPT_HTTPHEADER, list); + free(token_string); + + curl_easy_setopt(curl, CURLOPT_POSTFIELDS, data); + curl_easy_setopt(curl, CURLOPT_USERAGENT, "libcurl-agent/1.0"); + + /* Perform the request, res will get the return code */ + res = curl_easy_perform(curl); + /* Check for errors */ + if(res != CURLE_OK){ + fprintf(stderr, "curl_easy_perform() failed: %s\n", + curl_easy_strerror(res)); + } + + free(data); + curl_easy_cleanup(curl); + curl_global_cleanup(); + return res; +} + +int format_line(char **buf, int *len, size_t used, ...) +{ + va_list ap; + va_start(ap, used); + used = _format_line2(buf, ap, (size_t *)len, used); + va_end(ap); + if(*len < 0) + return -1; + else + return used; +} + +int _begin_line(char **buf) +{ + int len = 0x100; + if(!(*buf = (char*)malloc(len))) + return -1; + return len; +} + +int _format_line(char** buf, va_list ap) +{ + size_t len = 0; + *buf = NULL; + return _format_line2(buf, ap, &len, 0); +} + +int _format_line2(char** buf, va_list ap, size_t *_len, size_t used) +{ +#define _APPEND(fmter...) \ + for(;;) {\ + if((written = snprintf(*buf + used, len - used, ##fmter)) < 0)\ + goto FAIL;\ + if(used + written >= len && !(*buf = (char*)realloc(*buf, len *= 2)))\ + return -1;\ + else {\ + used += written;\ + break;\ + }\ + } + + size_t len = *_len; + int written = 0, type = 0, last_type = 0; + unsigned long long i = 0; + double d = 0.0; + + if (*buf == NULL) { + len = _begin_line(buf); + used = 0; + } + + type = va_arg(ap, int); + while(type != IF_TYPE_ARG_END) { + if(type >= IF_TYPE_TAG && type <= IF_TYPE_FIELD_BOOLEAN) { + if(last_type < IF_TYPE_MEAS || last_type > (type == IF_TYPE_TAG ? IF_TYPE_TAG : IF_TYPE_FIELD_BOOLEAN)) + goto FAIL; + _APPEND("%c", (last_type <= IF_TYPE_TAG && type > IF_TYPE_TAG) ? ' ' : ','); + if(_escaped_append(buf, &len, &used, va_arg(ap, char*), ",= ")) + return -2; + _APPEND("="); + } + switch(type) { + case IF_TYPE_MEAS: + if(last_type) + _APPEND("\n"); + if(last_type && last_type <= IF_TYPE_TAG) + goto FAIL; + if(_escaped_append(buf, &len, &used, va_arg(ap, char*), ", ")) + return -3; + break; + case IF_TYPE_TAG: + if(_escaped_append(buf, &len, &used, va_arg(ap, char*), ",= ")) + return -4; + break; + case IF_TYPE_FIELD_STRING: + _APPEND("\""); + if(_escaped_append(buf, &len, &used, va_arg(ap, char*), "\"")) + return -5; + _APPEND("\""); + break; + case IF_TYPE_FIELD_FLOAT: + d = va_arg(ap, double); + i = va_arg(ap, int); + _APPEND("%.*lf", (int)i, d); + break; + case IF_TYPE_FIELD_INTEGER: + i = va_arg(ap, long long); + _APPEND("%lldi", i); + break; + case IF_TYPE_FIELD_BOOLEAN: + i = va_arg(ap, int); + _APPEND("%c", i ? 't' : 'f'); + break; + case IF_TYPE_TIMESTAMP: + if(last_type < IF_TYPE_FIELD_STRING || last_type > IF_TYPE_FIELD_BOOLEAN) + goto FAIL; + i = va_arg(ap, long long); + _APPEND(" %lld", i); + break; + default: + goto FAIL; + } + last_type = type; + type = va_arg(ap, int); + } + _APPEND("\n"); + if(last_type <= IF_TYPE_TAG) + goto FAIL; + *_len = len; + return used; +FAIL: + free(*buf); + *buf = NULL; + return -1; +} +#undef _APPEND + +int _escaped_append(char** dest, size_t* len, size_t* used, const char* src, const char* escape_seq) +{ + size_t i = 0; + + for(;;) { + if((i = strcspn(src, escape_seq)) > 0) { + if(*used + i > *len && !(*dest = (char*)realloc(*dest, (*len) *= 2))) + return -1; + strncpy(*dest + *used, src, i); + *used += i; + src += i; + } + if(*src) { + if(*used + 2 > *len && !(*dest = (char*)realloc(*dest, (*len) *= 2))) + return -2; + (*dest)[(*used)++] = '\\'; + (*dest)[(*used)++] = *src++; + } + else + return 0; + } + return 0; +} diff --git a/util/tracing/launch-fedsd.sh b/util/tracing/launch-fedsd.sh new file mode 100755 index 000000000..341e67332 --- /dev/null +++ b/util/tracing/launch-fedsd.sh @@ -0,0 +1,102 @@ +#!/bin/bash + +#============================================================================ +# Description: Visualize federated trace data for RTI-federate interactions. +# Authors: Chadlia Jerad +# Edward A. Lee +# Usage: Usage: fedsd -r [rti.csv] -f [fed.csv ...] +#============================================================================ + +#============================================================================ +# Preamble +#============================================================================ + +# Copied from build.sh FIXME: How to avoid copying + +# Find the directory in which this script resides in a way that is compatible +# with MacOS, which has a `readlink` implementation that does not support the +# necessary `-f` flag to canonicalize by following every symlink in every +# component of the given name recursively. +# This solution, adapted from an example written by Geoff Nixon, is POSIX- +# compliant and robust to symbolic links. If a chain of more than 1000 links +# is encountered, we return. +find_dir() ( + start_dir=$PWD + cd "$(dirname "$1")" + link=$(readlink "$(basename "$1")") + count=0 + while [ "${link}" ]; do + if [[ "${count}" -lt 1000 ]]; then + cd "$(dirname "${link}")" + link=$(readlink "$(basename "$1")") + ((count++)) + else + return + fi + done + real_path="$PWD/$(basename "$1")" + cd "${start_dir}" + echo `dirname "${real_path}"` +) + +# Report fatal error and exit. +function fatal_error() { + 1>&2 echo -e "\e[1mfedsd: \e[31mfatal error: \e[0m$1" + exit 1 +} + +abs_path="$(find_dir "$0")" + +if [[ "${abs_path}" ]]; then + base=`dirname $(dirname ${abs_path})` +else + fatal_error "Unable to determine absolute path to $0." +fi + +# Get the lft files +lft_files_list=$@ + +if [ -z "$lft_files_list" ] +then + echo "Usage: fedsd [lft files]" + exit 1 +fi + +# Initialize variables +csv_files_list='' +extension='.csv' +rti_csv_file='' + +# Iterate over the lft file list to: +# - First, transform into csv +# - Second, construct the csv fiel name +# - Then construct the csv file list +# The csv file list does include the rti, it is put in a separate variable +for each_lft_file in $lft_files_list + do + # Tranform to csv + ${base}/bin/trace_to_csv $each_lft_file + # Get the file name + csv=${each_lft_file%.*} + if [ $csv == 'rti' ] + then + # Set the rti csv file + rti_csv_file='rti.csv' + else + # Construct the csv file name and add it to the list + csv_files_list="$csv$extension $csv_files_list" + fi + done + +# echo $lft_files_list +# echo $rti_csv_file +# echo $csv_files_list + +# FIXME: Check that python3 is in the path. +if [ ! -z $rti_csv_file ] +then + python3 "${base}/util/tracing/visualization/fedsd.py" "-f" $csv_files_list +else + echo Building the communication diagram for the following trace files: $lft_files_list in trace_svg.html + python3 "${base}/util/tracing/visualization/fedsd.py" "-r" "$rti_csv_file" "-f" $csv_files_list +fi diff --git a/util/tracing/trace_to_chrome.c b/util/tracing/trace_to_chrome.c new file mode 100644 index 000000000..8e0c29dba --- /dev/null +++ b/util/tracing/trace_to_chrome.c @@ -0,0 +1,452 @@ +/** + * @file + * @author Edward A. Lee + * + * @section LICENSE +Copyright (c) 2020, The University of California at Berkeley + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + * @section DESCRIPTION + * Standalone program to convert a Lingua Franca trace file to a JSON file suitable + * for viewing in Chrome's event visualizer. To visualize the resulting file, + * point your chrome browser to chrome://tracing/ and the load the .json file. + */ +#define LF_TRACE +#include +#include +#include "reactor.h" +#include "trace.h" +#include "trace_util.h" + +#define PID_FOR_USER_EVENT 1000000 // Assumes no more than a million reactors. +#define PID_FOR_WORKER_WAIT 0 // Use 1000001 to show in separate trace. +#define PID_FOR_WORKER_ADVANCING_TIME 0 // Use 1000002 to show in separate trace. +#define PID_FOR_UNKNOWN_EVENT 2000000 + +/** Maximum thread ID seen. */ +int max_thread_id = 0; + +/** File containing the trace binary data. */ +FILE* trace_file = NULL; + +/** File for writing the output data. */ +FILE* output_file = NULL; + +/** + * Print a usage message. + */ +void usage() { + printf("\nUsage: trace_to_chrome [options] trace_file (with or without .lft extension)\n"); + printf("Options: \n"); + printf(" -p, --physical\n"); + printf(" Use only physical time, not logical time, for all horizontal axes.\n"); + printf("\n"); +} + +/** Maximum reaction number encountered. */ +int max_reaction_number = 0; + +/** Indicator to plot vs. physical time only. */ +bool physical_time_only = false; + +/** + * Read a trace in the specified file and write it to the specified json file. + * @param trace_file An open trace file. + * @param output_file An open output .json file. + * @return The number of records read or 0 upon seeing an EOF. + */ +size_t read_and_write_trace(FILE* trace_file, FILE* output_file) { + int trace_length = read_trace(trace_file); + if (trace_length == 0) return 0; + // Write each line. + for (int i = 0; i < trace_length; i++) { + char* reaction_name = "\"UNKNOWN\""; + + // Ignore federated trace events. + if (trace[i].event_type > federated) continue; + + if (trace[i].dst_id >= 0) { + reaction_name = (char*)malloc(4); + snprintf(reaction_name, 4, "%d", trace[i].dst_id); + } + // printf("DEBUG: Reactor's self struct pointer: %p\n", trace[i].pointer); + int reactor_index; + char* reactor_name = get_object_description(trace[i].pointer, &reactor_index); + if (reactor_name == NULL) { + if (trace[i].event_type == worker_wait_starts || trace[i].event_type == worker_wait_ends) { + reactor_name = "WAIT"; + } else if (trace[i].event_type == scheduler_advancing_time_starts + || trace[i].event_type == scheduler_advancing_time_starts) { + reactor_name = "ADVANCE TIME"; + } else { + reactor_name = "NO REACTOR"; + } + } + // Default name is the reactor name. + char* name = reactor_name; + + int trigger_index; + char* trigger_name = get_trigger_name(trace[i].trigger, &trigger_index); + if (trigger_name == NULL) { + trigger_name = "NONE"; + } + // By default, the timestamp used in the trace is the elapsed + // physical time in microseconds. But for schedule_called events, + // it will instead be the logical time at which the action or timer + // is to be scheduled. + interval_t elapsed_physical_time = (trace[i].physical_time - start_time)/1000; + interval_t timestamp = elapsed_physical_time; + interval_t elapsed_logical_time = (trace[i].logical_time - start_time)/1000; + + if (elapsed_physical_time < 0) { + fprintf(stderr, "WARNING: Negative elapsed physical time %lld. Skipping trace entry.\n", elapsed_physical_time); + continue; + } + if (elapsed_logical_time < 0) { + fprintf(stderr, "WARNING: Negative elapsed logical time %lld. Skipping trace entry.\n", elapsed_logical_time); + continue; + } + + // Default thread id is the worker number. + int thread_id = trace[i].src_id; + + char* args; + asprintf(&args, "{" + "\"reaction\": %s," // reaction number. + "\"logical time\": %lld," // logical time. + "\"physical time\": %lld," // physical time. + "\"microstep\": %d" // microstep. + "}", + reaction_name, + elapsed_logical_time, + elapsed_physical_time, + trace[i].microstep + ); + char* phase; + int pid; + switch(trace[i].event_type) { + case reaction_starts: + phase = "B"; + pid = 0; // Process 0 will be named "Execution" + break; + case reaction_ends: + phase = "E"; + pid = 0; // Process 0 will be named "Execution" + break; + case schedule_called: + phase = "i"; + pid = reactor_index + 1; // One pid per reactor. + if (!physical_time_only) { + timestamp = elapsed_logical_time + trace[i].extra_delay/1000; + } + thread_id = trigger_index; + name = trigger_name; + break; + case user_event: + pid = PID_FOR_USER_EVENT; + phase= "i"; + if (!physical_time_only) { + timestamp = elapsed_logical_time; + } + thread_id = reactor_index; + break; + case user_value: + pid = PID_FOR_USER_EVENT; + phase= "C"; + if (!physical_time_only) { + timestamp = elapsed_logical_time; + } + thread_id = reactor_index; + free(args); + asprintf(&args, "{\"value\": %lld}", trace[i].extra_delay); + break; + case worker_wait_starts: + pid = PID_FOR_WORKER_WAIT; + phase = "B"; + break; + case worker_wait_ends: + pid = PID_FOR_WORKER_WAIT; + phase = "E"; + break; + case scheduler_advancing_time_starts: + pid = PID_FOR_WORKER_ADVANCING_TIME; + phase = "B"; + break; + case scheduler_advancing_time_ends: + pid = PID_FOR_WORKER_ADVANCING_TIME; + phase = "E"; + break; + default: + fprintf(stderr, "WARNING: Unrecognized event type %d: %s\n", + trace[i].event_type, trace_event_names[trace[i].event_type]); + pid = PID_FOR_UNKNOWN_EVENT; + phase = "i"; + } + fprintf(output_file, "{" + "\"name\": \"%s\", " // name is the reactor or trigger name. + "\"cat\": \"%s\", " // category is the type of event. + "\"ph\": \"%s\", " // phase is "B" (begin), "E" (end), or "X" (complete). + "\"tid\": %d, " // thread ID. + "\"pid\": %d, " // process ID is required. + "\"ts\": %lld, " // timestamp in microseconds + "\"args\": %s" // additional arguments from above. + "},\n", + name, + trace_event_names[trace[i].event_type], + phase, + thread_id, + pid, + timestamp, + args + ); + free(args); + + if (trace[i].src_id > max_thread_id) { + max_thread_id = trace[i].src_id; + } + // If the event is reaction_starts and physical_time_only is not set, + // then also generate an instantaneous + // event to be shown in the reactor's section, along with timers and actions. + if (trace[i].event_type == reaction_starts && !physical_time_only) { + phase = "i"; + pid = reactor_index + 1; + reaction_name = (char*)malloc(4); + char name[13]; + snprintf(name, 13, "reaction %d", trace[i].dst_id); + + // NOTE: If the reactor has more than 1024 timers and actions, then + // there will be a collision of thread IDs here. + thread_id = 1024 + trace[i].dst_id; + if (trace[i].dst_id > max_reaction_number) { + max_reaction_number = trace[i].dst_id; + } + + fprintf(output_file, "{" + "\"name\": \"%s\", " // name is the reactor or trigger name. + "\"cat\": \"%s\", " // category is the type of event. + "\"ph\": \"%s\", " // phase is "B" (begin), "E" (end), or "X" (complete). + "\"tid\": %d, " // thread ID. + "\"pid\": %d, " // process ID is required. + "\"ts\": %lld, " // timestamp in microseconds + "\"args\": {" + "\"microstep\": %d, " // microstep. + "\"physical time\": %lld" // physical time. + "}},\n", + name, + "Reaction", + phase, + thread_id, + pid, + elapsed_logical_time, + trace[i].microstep, + elapsed_physical_time + ); + } + } + return trace_length; +} + +/** + * Write metadata events, which provide names in the renderer. + * @param output_file An open output .json file. + */ +void write_metadata_events(FILE* output_file) { + // Thread 0 is the main thread. + fprintf(output_file, "{" + "\"name\": \"thread_name\", " + "\"ph\": \"M\", " // mark as metadata. + "\"pid\": 0, " + "\"tid\": 0, " + "\"args\": {" + "\"name\": \"Main thread\"" + "}},\n" + ); + + // Name the worker threads. + for (int i = 1; i <= max_thread_id; i++) { + fprintf(output_file, "{" + "\"name\": \"thread_name\", " + "\"ph\": \"M\", " // mark as metadata. + "\"pid\": 0, " + "\"tid\": %d, " + "\"args\": {" + "\"name\": \"Worker %d\"" + "}},\n", + i, i + ); + fprintf(output_file, "{" + "\"name\": \"thread_name\", " + "\"ph\": \"M\", " // mark as metadata. + "\"pid\": %d, " + "\"tid\": %d, " + "\"args\": {" + "\"name\": \"Worker %d\"" + "}},\n", + PID_FOR_WORKER_WAIT, i, i + ); + fprintf(output_file, "{" + "\"name\": \"thread_name\", " + "\"ph\": \"M\", " // mark as metadata. + "\"pid\": %d, " + "\"tid\": %d, " + "\"args\": {" + "\"name\": \"Worker %d\"" + "}},\n", + PID_FOR_WORKER_ADVANCING_TIME, i, i + ); + } + + // Name reactions for each reactor. + for (int reactor_index = 1; reactor_index <= object_table_size; reactor_index++) { + for (int reaction_number = 0; reaction_number <= max_reaction_number; reaction_number++) { + fprintf(output_file, "{" + "\"name\": \"thread_name\", " + "\"ph\": \"M\", " // mark as metadata. + "\"pid\": %d, " + "\"tid\": %d, " + "\"args\": {" + "\"name\": \"Reaction %d\"" + "}},\n", + reactor_index, reaction_number + 1024, reaction_number + ); + } + } + + // Write the reactor names for the logical timelines. + for (int i = 0; i < object_table_size; i++) { + if (object_table[i].type == trace_trigger) { + // We need the reactor index (not the name) to set the pid. + int reactor_index; + get_object_description(object_table[i].pointer, &reactor_index); + fprintf(output_file, "{" + "\"name\": \"thread_name\", " // metadata for thread name. + "\"ph\": \"M\", " // mark as metadata. + "\"pid\": %d, " // the "process" to identify by reactor. + "\"tid\": %d," // The "thread" to label with action or timer name. + "\"args\": {" + "\"name\": \"Trigger %s\"" + "}},\n", + reactor_index + 1, // Offset of 1 prevents collision with Execution. + i, + object_table[i].description); + } else if (object_table[i].type == trace_reactor) { + fprintf(output_file, "{" + "\"name\": \"process_name\", " // metadata for process name. + "\"ph\": \"M\", " // mark as metadata. + "\"pid\": %d, " // the "process" to label as reactor. + "\"args\": {" + "\"name\": \"Reactor %s reactions, actions, and timers in logical time\"" + "}},\n", + i + 1, // Offset of 1 prevents collision with Execution. + object_table[i].description); + } else if (object_table[i].type == trace_user) { + fprintf(output_file, "{" + "\"name\": \"thread_name\", " // metadata for thread name. + "\"ph\": \"M\", " // mark as metadata. + "\"pid\": %d, " // the "process" to label as reactor. + "\"tid\": %d," // The "thread" to label with action or timer name. + "\"args\": {" + "\"name\": \"%s\"" + "}},\n", + PID_FOR_USER_EVENT, + i, // This is the index in the object table. + object_table[i].description); + } + } + // Name the "process" for "Execution" + fprintf(output_file, "{" + "\"name\": \"process_name\", " // metadata for process name. + "\"ph\": \"M\", " // mark as metadata. + "\"pid\": 0, " // the "process" to label "Execution". + "\"args\": {" + "\"name\": \"Execution of %s\"" + "}},\n", + top_level); + // Name the "process" for "Worker Waiting" if the PID is not the main execution one. + if (PID_FOR_WORKER_WAIT > 0) { + fprintf(output_file, "{" + "\"name\": \"process_name\", " // metadata for process name. + "\"ph\": \"M\", " // mark as metadata. + "\"pid\": %d, " // the "process" to label "Workers waiting for reaction queue". + "\"args\": {" + "\"name\": \"Workers waiting for reaction queue\"" + "}},\n", + PID_FOR_WORKER_WAIT); + } + // Name the "process" for "Worker advancing time" if the PID is not the main execution one. + if (PID_FOR_WORKER_ADVANCING_TIME > 0) { + fprintf(output_file, "{" + "\"name\": \"process_name\", " // metadata for process name. + "\"ph\": \"M\", " // mark as metadata. + "\"pid\": %d, " // the "process" to label "Workers waiting for reaction queue". + "\"args\": {" + "\"name\": \"Workers advancing time\"" + "}},\n", + PID_FOR_WORKER_ADVANCING_TIME); + } + // Name the "process" for "User Events" + // Last metadata entry lacks a comma. + fprintf(output_file, "{" + "\"name\": \"process_name\", " // metadata for process name. + "\"ph\": \"M\", " // mark as metadata. + "\"pid\": %d, " // the "process" to label "User events". + "\"args\": {" + "\"name\": \"User events in %s, shown in physical time:\"" + "}}\n", + PID_FOR_USER_EVENT, top_level); +} + +int main(int argc, char* argv[]) { + char* filename = NULL; + for (int i = 1; i < argc; i++) { + if (strncmp(argv[i], "-p", 2) == 0 || strncmp(argv[i], "--physical", 10) == 0) { + physical_time_only = true; + } else if (argv[i][0] == '-') { + usage(); + return(1); + } else { + filename = argv[i]; + } + } + if (filename == NULL) { + usage(); + exit(0); + } + + // Open the trace file. + trace_file = open_file(filename, "r"); + + // Construct the name of the csv output file and open it. + char* root = root_name(filename); + char json_filename[strlen(root) + 6]; + strcpy(json_filename, root); + strcat(json_filename, ".json"); + output_file = open_file(json_filename, "w"); + + if (read_header(trace_file) >= 0) { + // Write the opening bracket into the json file. + fprintf(output_file, "{ \"traceEvents\": [\n"); + while (read_and_write_trace(trace_file, output_file) != 0) {}; + write_metadata_events(output_file); + fprintf(output_file, "]}\n"); + } +} diff --git a/util/tracing/trace_to_csv.c b/util/tracing/trace_to_csv.c new file mode 100644 index 000000000..1abf08c08 --- /dev/null +++ b/util/tracing/trace_to_csv.c @@ -0,0 +1,436 @@ +/** + * @file + * @author Edward A. Lee + * + * @section LICENSE +Copyright (c) 2020, The University of California at Berkeley + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + * @section DESCRIPTION + * Standalone program to convert a Lingua Franca trace file to a comma-separated values + * text file. + */ +#define LF_TRACE +#include +#include +#include "reactor.h" +#include "trace.h" +#include "trace_util.h" + +#define MAX_NUM_REACTIONS 64 // Maximum number of reactions reported in summary stats. +#define MAX_NUM_WORKERS 64 + +/** File containing the trace binary data. */ +FILE* trace_file = NULL; + +/** File for writing the output data. */ +FILE* output_file = NULL; + +/** File for writing summary statistics. */ +FILE* summary_file = NULL; + +/** Size of the stats table is object_table_size plus twice MAX_NUM_WORKERS. */ +int table_size; + +/** + * Print a usage message. + */ +void usage() { + printf("\nUsage: trace_to_csv [options] trace_file_root (without .lft extension)\n\n"); + /* No options yet: + printf("\nOptions: \n\n"); + printf(" -f, --fast [true | false]\n"); + printf(" Whether to wait for physical time to match logical time.\n\n"); + printf("\n\n"); + */ +} + +/** + * Struct for collecting summary statistics for reaction invocations. + */ +typedef struct reaction_stats_t { + int occurrences; + instant_t latest_start_time; + interval_t total_exec_time; + interval_t max_exec_time; + interval_t min_exec_time; +} reaction_stats_t; + +/** + * Struct for collecting summary statistics. + */ +typedef struct summary_stats_t { + trace_event_t event_type; // Use reaction_ends for reactions. + const char* description; // Description in the reaction table (e.g. reactor name). + int occurrences; // Number of occurrences of this description. + int num_reactions_seen; + reaction_stats_t reactions[MAX_NUM_REACTIONS]; +} summary_stats_t; + +/** + * Sumary stats array. This array has the same size as the + * object table. Pointer in the array will be void if there + * are no stats for the object table item. + */ +summary_stats_t** summary_stats; + +/** Largest timestamp seen. */ +instant_t latest_time = 0LL; + +/** + * Read a trace in the specified file and write it to the specified CSV file. + * @return The number of records read or 0 upon seeing an EOF. + */ +size_t read_and_write_trace() { + int trace_length = read_trace(); + if (trace_length == 0) return 0; + // Write each line. + for (int i = 0; i < trace_length; i++) { + // printf("DEBUG: reactor self struct pointer: %p\n", trace[i].pointer); + int object_instance = -1; + char* reactor_name = get_object_description(trace[i].pointer, &object_instance); + if (reactor_name == NULL) { + reactor_name = "NO REACTOR"; + } + int trigger_instance = -1; + char* trigger_name = get_trigger_name(trace[i].trigger, &trigger_instance); + if (trigger_name == NULL) { + trigger_name = "NO TRIGGER"; + } + fprintf(output_file, "%s, %s, %d, %d, %lld, %d, %lld, %s, %lld\n", + trace_event_names[trace[i].event_type], + reactor_name, + trace[i].src_id, + trace[i].dst_id, + trace[i].logical_time - start_time, + trace[i].microstep, + trace[i].physical_time - start_time, + trigger_name, + trace[i].extra_delay + ); + // Update summary statistics. + if (trace[i].physical_time > latest_time) { + latest_time = trace[i].physical_time; + } + if (object_instance >= 0 && summary_stats[NUM_EVENT_TYPES + object_instance] == NULL) { + summary_stats[NUM_EVENT_TYPES + object_instance] = (summary_stats_t*)calloc(1, sizeof(summary_stats_t)); + } + if (trigger_instance >= 0 && summary_stats[NUM_EVENT_TYPES + trigger_instance] == NULL) { + summary_stats[NUM_EVENT_TYPES + trigger_instance] = (summary_stats_t*)calloc(1, sizeof(summary_stats_t)); + } + + summary_stats_t* stats = NULL; + interval_t exec_time; + reaction_stats_t* rstats; + int index; + + // Count of event type. + if (summary_stats[trace[i].event_type] == NULL) { + summary_stats[trace[i].event_type] = (summary_stats_t*)calloc(1, sizeof(summary_stats_t)); + } + summary_stats[trace[i].event_type]->event_type = trace[i].event_type; + summary_stats[trace[i].event_type]->description = trace_event_names[trace[i].event_type]; + summary_stats[trace[i].event_type]->occurrences++; + + switch(trace[i].event_type) { + case reaction_starts: + case reaction_ends: + // This code relies on the mutual exclusion of reactions in a reactor + // and the ordering of reaction_starts and reaction_ends events. + if (trace[i].dst_id >= MAX_NUM_REACTIONS) { + fprintf(stderr, "WARNING: Too many reactions. Not all will be shown in summary file.\n"); + continue; + } + stats = summary_stats[NUM_EVENT_TYPES + object_instance]; + stats->description = reactor_name; + if (trace[i].dst_id >= stats->num_reactions_seen) { + stats->num_reactions_seen = trace[i].dst_id + 1; + } + rstats = &stats->reactions[trace[i].dst_id]; + if (trace[i].event_type == reaction_starts) { + rstats->latest_start_time = trace[i].physical_time; + } else { + rstats->occurrences++; + exec_time = trace[i].physical_time - rstats->latest_start_time; + rstats->latest_start_time = 0LL; + rstats->total_exec_time += exec_time; + if (exec_time > rstats->max_exec_time) { + rstats->max_exec_time = exec_time; + } + if (exec_time < rstats->min_exec_time || rstats->min_exec_time == 0LL) { + rstats->min_exec_time = exec_time; + } + } + break; + case schedule_called: + if (trigger_instance < 0) { + // No trigger. Do not report. + continue; + } + stats = summary_stats[NUM_EVENT_TYPES + trigger_instance]; + stats->description = trigger_name; + break; + case user_event: + // Although these are not exec times and not reactions, + // commandeer the first entry in the reactions array to track values. + stats = summary_stats[NUM_EVENT_TYPES + object_instance]; + stats->description = reactor_name; + break; + case user_value: + // Although these are not exec times and not reactions, + // commandeer the first entry in the reactions array to track values. + stats = summary_stats[NUM_EVENT_TYPES + object_instance]; + stats->description = reactor_name; + rstats = &stats->reactions[0]; + rstats->occurrences++; + // User values are stored in the "extra_delay" field, which is an interval_t. + interval_t value = trace[i].extra_delay; + rstats->total_exec_time += value; + if (value > rstats->max_exec_time) { + rstats->max_exec_time = value; + } + if (value < rstats->min_exec_time || rstats->min_exec_time == 0LL) { + rstats->min_exec_time = value; + } + break; + case worker_wait_starts: + case worker_wait_ends: + case scheduler_advancing_time_starts: + case scheduler_advancing_time_ends: + // Use the reactions array to store data. + // There will be two entries per worker, one for waits on the + // reaction queue and one for waits while advancing time. + index = trace[i].src_id * 2; + // Even numbered indices are used for waits on reaction queue. + // Odd numbered indices for waits for time advancement. + if (trace[i].event_type == scheduler_advancing_time_starts + || trace[i].event_type == scheduler_advancing_time_ends) { + index++; + } + if (object_table_size + index >= table_size) { + fprintf(stderr, "WARNING: Too many workers. Not all will be shown in summary file.\n"); + continue; + } + stats = summary_stats[NUM_EVENT_TYPES + object_table_size + index]; + if (stats == NULL) { + stats = (summary_stats_t*)calloc(1, sizeof(summary_stats_t)); + summary_stats[NUM_EVENT_TYPES + object_table_size + index] = stats; + } + // num_reactions_seen here will be used to store the number of + // entries in the reactions array, which is twice the number of workers. + if (index >= stats->num_reactions_seen) { + stats->num_reactions_seen = index; + } + rstats = &stats->reactions[index]; + if (trace[i].event_type == worker_wait_starts + || trace[i].event_type == scheduler_advancing_time_starts + ) { + rstats->latest_start_time = trace[i].physical_time; + } else { + rstats->occurrences++; + exec_time = trace[i].physical_time - rstats->latest_start_time; + rstats->latest_start_time = 0LL; + rstats->total_exec_time += exec_time; + if (exec_time > rstats->max_exec_time) { + rstats->max_exec_time = exec_time; + } + if (exec_time < rstats->min_exec_time || rstats->min_exec_time == 0LL) { + rstats->min_exec_time = exec_time; + } + } + break; + default: + // No special summary statistics for the rest. + break; + } + // Common stats across event types. + if (stats != NULL) { + stats->occurrences++; + stats->event_type = trace[i].event_type; + } + } + return trace_length; +} + +/** + * Write the summary file. + */ +void write_summary_file() { + // Overall stats. + fprintf(summary_file, "Start time:, %lld\n", start_time); + fprintf(summary_file, "End time:, %lld\n", latest_time); + fprintf(summary_file, "Total time:, %lld\n", latest_time - start_time); + + fprintf(summary_file, "\nTotal Event Occurrences\n"); + for (int i = 0; i < NUM_EVENT_TYPES; i++) { + summary_stats_t* stats = summary_stats[i]; + if (stats != NULL) { + fprintf(summary_file, "%s, %d\n", + stats->description, + stats->occurrences + ); + } + } + + // First pass looks for reaction invocations. + // First print a header. + fprintf(summary_file, "\nReaction Executions\n"); + fprintf(summary_file, "Reactor, Reaction, Occurrences, Total Time, Pct Total Time, Avg Time, Max Time, Min Time\n"); + for (int i = NUM_EVENT_TYPES; i < table_size; i++) { + summary_stats_t* stats = summary_stats[i]; + if (stats != NULL && stats->num_reactions_seen > 0) { + for (int j = 0; j < stats->num_reactions_seen; j++) { + reaction_stats_t* rstats = &stats->reactions[j]; + if (rstats->occurrences > 0) { + fprintf(summary_file, "%s, %d, %d, %lld, %f, %lld, %lld, %lld\n", + stats->description, + j, // Reaction number. + rstats->occurrences, + rstats->total_exec_time, + rstats->total_exec_time * 100.0 / (latest_time - start_time), + rstats->total_exec_time / rstats->occurrences, + rstats->max_exec_time, + rstats->min_exec_time + ); + } + } + } + } + + // Next pass looks for calls to schedule. + bool first = true; + for (int i = NUM_EVENT_TYPES; i < table_size; i++) { + summary_stats_t* stats = summary_stats[i]; + if (stats != NULL && stats->event_type == schedule_called && stats->occurrences > 0) { + if (first) { + first = false; + fprintf(summary_file, "\nSchedule calls\n"); + fprintf(summary_file, "Trigger, Occurrences\n"); + } + fprintf(summary_file, "%s, %d\n", stats->description, stats->occurrences); + } + } + + // Next pass looks for user-defined events. + first = true; + for (int i = NUM_EVENT_TYPES; i < table_size; i++) { + summary_stats_t* stats = summary_stats[i]; + if (stats != NULL + && (stats->event_type == user_event || stats->event_type == user_value) + && stats->occurrences > 0) { + if (first) { + first = false; + fprintf(summary_file, "\nUser events\n"); + fprintf(summary_file, "Description, Occurrences, Total Value, Avg Value, Max Value, Min Value\n"); + } + fprintf(summary_file, "%s, %d", stats->description, stats->occurrences); + if (stats->event_type == user_value && stats->reactions[0].occurrences > 0) { + // This assumes that the first "reactions" entry has been comandeered for this data. + fprintf(summary_file, ", %lld, %lld, %lld, %lld\n", + stats->reactions[0].total_exec_time, + stats->reactions[0].total_exec_time / stats->reactions[0].occurrences, + stats->reactions[0].max_exec_time, + stats->reactions[0].min_exec_time + ); + } else { + fprintf(summary_file, "\n"); + } + } + } + + // Next pass looks for wait events. + first = true; + for (int i = NUM_EVENT_TYPES; i < table_size; i++) { + summary_stats_t* stats = summary_stats[i]; + if (stats != NULL && ( + stats->event_type == worker_wait_ends + || stats->event_type == scheduler_advancing_time_ends) + ) { + if (first) { + first = false; + fprintf(summary_file, "\nWorkers Waiting\n"); + fprintf(summary_file, "Worker, Waiting On, Occurrences, Total Time, Pct Total Time, Avg Time, Max Time, Min Time\n"); + } + char* waitee = "reaction queue"; + if (stats->event_type == scheduler_advancing_time_ends + || stats->event_type == scheduler_advancing_time_starts) { + waitee = "advancing time"; + } + for (int j = 0; j <= stats->num_reactions_seen; j++) { + reaction_stats_t* rstats = &stats->reactions[j]; + if (rstats->occurrences > 0) { + fprintf(summary_file, "%d, %s, %d, %lld, %f, %lld, %lld, %lld\n", + j / 2, + waitee, + rstats->occurrences, + rstats->total_exec_time, + rstats->total_exec_time * 100.0 / (latest_time - start_time), + rstats->total_exec_time / rstats->occurrences, + rstats->max_exec_time, + rstats->min_exec_time + ); + } + } + } + } +} + +int main(int argc, char* argv[]) { + if (argc != 2) { + usage(); + exit(0); + } + // Open the trace file. + trace_file = open_file(argv[1], "r"); + if (trace_file == NULL) exit(1); + + // Construct the name of the csv output file and open it. + char* root = root_name(argv[1]); + char csv_filename[strlen(root) + 5]; + strcpy(csv_filename, root); + strcat(csv_filename, ".csv"); + output_file = open_file(csv_filename, "w"); + if (output_file == NULL) exit(1); + + // Construct the name of the summary output file and open it. + char summary_filename[strlen(root) + 13]; + strcpy(summary_filename, root); + strcat(summary_filename, "_summary.csv"); + summary_file = open_file(summary_filename, "w"); + if (summary_file == NULL) exit(1); + + free(root); + + if (read_header() >= 0) { + // Allocate an array for summary statistics. + table_size = NUM_EVENT_TYPES + object_table_size + (MAX_NUM_WORKERS * 2); + summary_stats = (summary_stats_t**)calloc(table_size, sizeof(summary_stats_t*)); + + // Write a header line into the CSV file. + fprintf(output_file, "Event, Reactor, Source, Destination, Elapsed Logical Time, Microstep, Elapsed Physical Time, Trigger, Extra Delay\n"); + while (read_and_write_trace() != 0) {}; + + write_summary_file(); + + // File closing is handled by termination function. + } +} diff --git a/util/tracing/trace_to_influxdb.c b/util/tracing/trace_to_influxdb.c new file mode 100644 index 000000000..4f7f0ff85 --- /dev/null +++ b/util/tracing/trace_to_influxdb.c @@ -0,0 +1,279 @@ +/** + * @file + * @author Edward A. Lee + * @author Ravi Akella + * + * @section LICENSE +Copyright (c) 2021, The University of California at Berkeley + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + * @section DESCRIPTION + * + * Standalone program to send a Lingua Franca trace file to InfluxDB. + * InfluxDB is a database server to which data can be posted using HTTP + * or sent as a UDP datagram. + * + * ## Compiling this Program + * + * To compile this program, simply do this in this source directory: + * ``` + * sudo make install + * ``` + * This will place an executable program `trace_to_influxdb` in the directory `usr/local/bin`. + * + * ## Setting up InfluxDB + * + * To set up InfluxDB, see: + * + * [https://docs.influxdata.com/influxdb/v2.0/get-started/](https://docs.influxdata.com/influxdb/v2.0/get-started/) + * + * If you have previously installed InfluxDB and you want a fresh start, do this: + * ```shell + * rm -rf ~/.influxdbv2/ + * ps aux | grep nflux + * ``` + * The second command will report any InfluxDB processes that are running. Kill them with + * ```shell + * kill -9 PID + * ``` + * where 'PID' is replaced with whatever process ID(s) are reported by the `ps` command above. + * + * To start an InfluxDB server on localhost with port 8087: + * ```shell + * influxd --http-bind-address :8087 --reporting-disabled + * ``` + * The 'reporting-disabled' option simply disables notifications to the InfluxDB mother ship. + * + * You then need to set up at least one user, organization, and bucket. You can do this by pointing your browser to + * ``` + * http://localhost:8087 + * ``` + * The browser will walk you through the process of creating a user, password, organization, and initial bucket. E.g.: + * ``` + * User: eal superSecretPassword + * Organization: iCyPhy + * Bucket: test + * ``` + * The UI in the browser will then give you the options Quick Start or Advanced, either of which you can select. + * If you select "Data" on the left, you can browse Buckets to verify that your test bucket was created. + * + * ## Uploading Trace Data to InfluxDB + * + * First, generate a trace file by setting a target parameter in a Lingua Franca program: + * ``` + * target C { + * tracing: true + * }; + * ``` + * Then, when you run this program, a binary file with extension `.lft` will be created. + * + * In your browser, in the InfluxDB UI, select Data on the left, then select the Tokens tab. + * Select a token and copy the token string to clipboard. It will looks something like this: + * ``` + * N1mK4b7z29YuWrWG_rBRJF3owaXjPA6gBVOgGG3eStS_zbESHTYJgfJWHB2JA_y3-BMYlMPVa05ccLVA1S770A== + * ``` + * Then, invoke the conversion program as follows: + * ```shell + * trace_to_influxdb Filename.lft \ + * --token N1mK4b7z29YuWrWG_rBRJF3owaXjPA6gBVOgGG3eStS_zbESHTYJgfJWHB2JA_y3-BMYlMPVa05ccLVA1S770A== + * ``` + * where 'Filename' and the token are replaced with your values. + * This will upload the trace data to InfluxDB. + * + * You can also specify the following command-line options: + * * -h, --host: The host name running InfluxDB. If not given, this defaults to "localhost". + * * -p, --port: The port for accessing InfluxDB. This defaults to 8086. If you used 8087, as shown above, then you have to give this option. + * + * The data can then be viewed in the InfluxDB browser, or you can configure an external + * tool such as Grafana to visualize it (see https://grafana.com/docs/grafana/latest/datasources/influxdb/). + */ +#define LF_TRACE +#include +#include "reactor.h" +#include "trace.h" +#include "trace_util.h" +#include "influxdb.h" + +#define MAX_NUM_REACTIONS 64 // Maximum number of reactions reported in summary stats. +#define MAX_NUM_WORKERS 64 + +/** File containing the trace binary data. */ +FILE* trace_file = NULL; + +/** Struct identifying the influx client. */ +influx_client_t influx_client; +influx_v2_client_t influx_v2_client; +/** + * Print a usage message. + */ +void usage() { + printf("\nUsage: trace_to_influxdb [options] trace_file [options]\n\n"); + printf("\nOptions: \n\n"); + printf(" -t, --token TOKEN\n"); + printf(" The token for access to InfluxDB (required argument).\n\n"); + printf(" -h, --host HOSTNAME\n"); + printf(" The host name for access to InfluxDB (default is 'localhost').\n\n"); + printf(" -p, --port PORT\n"); + printf(" The port for access to InfluxDB (default is 8086).\n\n"); + printf(" -o, --ort ORGANIZATION\n"); + printf(" The organization for access to InfluxDB (default is 'iCyPhy').\n\n"); + printf(" -b, --bucket BUCKET\n"); + printf(" The bucket into which to put the data (default is 'test').\n\n"); + printf("\n\n"); +} + +/** Largest timestamp seen. */ +instant_t latest_time = 0LL; + +/** + * Read a trace in the specified file and write it to the specified CSV file. + * @return The number of records read or 0 upon seeing an EOF. + */ +size_t read_and_write_trace() { + int trace_length = read_trace(); + if (trace_length == 0) return 0; + // Write each line. + for (int i = 0; i < trace_length; i++) { + + // Ignore federated traces. + if (trace[i].event_type > federated) continue; + + char* reaction_name = "none"; + if (trace[i].dst_id >= 0) { + reaction_name = (char*)malloc(4); + snprintf(reaction_name, 4, "%d", trace[i].dst_id); + } + // printf("DEBUG: reactor self struct pointer: %p\n", trace[i].pointer); + int object_instance = -1; + char* reactor_name = get_object_description(trace[i].pointer, &object_instance); + if (reactor_name == NULL) { + reactor_name = "NO REACTOR"; + } + int trigger_instance = -1; + char* trigger_name = get_trigger_name(trace[i].trigger, &trigger_instance); + if (trigger_name == NULL) { + trigger_name = "NO TRIGGER"; + } + // FIXME: Treating physical time as the timestamp. + // Do we want this to optionally be logical time? + // FIXME: What is the difference between a TAG and F_STR (presumably, Field String)? + // Presumably, the HTTP post is formatted as a "line protocol" command. See: + // https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/ + int response_code = post_curl(&influx_v2_client, + INFLUX_MEAS(trace_event_names[trace[i].event_type]), + INFLUX_TAG("Reactor", reactor_name), + INFLUX_TAG("Reaction", reaction_name), + INFLUX_F_INT("Worker", trace[i].src_id), + INFLUX_F_INT("Logical Time", trace[i].logical_time), + INFLUX_F_INT("Microstep", trace[i].microstep), + INFLUX_F_STR("Trigger Name", trigger_name), + INFLUX_F_INT("Extra Delay", trace[i].extra_delay), + INFLUX_TS(trace[i].physical_time), + INFLUX_END + ); + if (response_code != 0) { + fprintf(stderr, "****** response code: %d\n", response_code); + return 0; + } + } + return trace_length; +} + +int main(int argc, char* argv[]) { + if (argc < 2) { + usage(); + exit(0); + } + // Defaults. + influx_v2_client.token = NULL; + influx_v2_client.host = "localhost"; + influx_v2_client.port = 8086; + influx_v2_client.org = "iCyPhy"; + influx_v2_client.bucket = "test"; + + char* filename = NULL; + + for (int i = 1; i < argc; i++) { + if (strcmp("-t", argv[i]) == 0 || strcmp("--token", argv[i]) == 0) { + if (i++ == argc - 1) { + usage(); + fprintf(stderr, "No token specified.\n"); + exit(1); + } + influx_v2_client.token = argv[i]; + } else if (strcmp("-h", argv[i]) == 0 || strcmp("--host", argv[i]) == 0) { + if (i++ == argc - 1) { + usage(); + fprintf(stderr, "No host specified.\n"); + exit(1); + } + influx_v2_client.host = argv[i]; + } else if (strcmp("-p", argv[i]) == 0 || strcmp("--port", argv[i]) == 0) { + if (i++ == argc - 1) { + usage(); + fprintf(stderr, "No port specified.\n"); + exit(1); + } + influx_v2_client.port = atoi(argv[i]); + if (influx_v2_client.port == 0) { + fprintf(stderr, "Invalid port: %s.\n", argv[i]); + } + } else if (strcmp("-o", argv[i]) == 0 || strcmp("--org", argv[i]) == 0) { + if (i++ == argc - 1) { + usage(); + fprintf(stderr, "No organization specified.\n"); + exit(1); + } + influx_v2_client.org = argv[i]; + } else if (strcmp("-b", argv[i]) == 0 || strcmp("--bucket", argv[i]) == 0) { + if (i++ == argc - 1) { + usage(); + fprintf(stderr, "No bucket specified.\n"); + exit(1); + } + influx_v2_client.bucket = argv[i]; + } else { + // Must be the filename. + filename = argv[i]; + } + } + if (influx_v2_client.token == NULL) { + fprintf(stderr, "No token specified.\n"); + exit(1); + } + if (filename == NULL) { + fprintf(stderr, "No trace file specified.\n"); + exit(1); + } + + // Open the trace file. + trace_file = open_file(filename, "r"); + + if (read_header() >= 0) { + size_t num_records = 0, result; + while ((result = read_and_write_trace()) != 0) { + num_records = result; + }; + printf("***** %zu records written to InfluxDB.\n", num_records); + // File closing is handled by termination function. + } +} diff --git a/util/tracing/trace_util.c b/util/tracing/trace_util.c new file mode 100644 index 000000000..551ec9447 --- /dev/null +++ b/util/tracing/trace_util.c @@ -0,0 +1,295 @@ +/** + * @file + * @author Edward A. Lee + * + * @section LICENSE +Copyright (c) 2020, The University of California at Berkeley + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + * @section DESCRIPTION + * Standalone program to convert a Lingua Franca trace file to a comma-separated values + * text file. + */ +#define LF_TRACE +#include +#include +#include +#include "reactor.h" +#include "trace.h" +#include "trace_util.h" + +/** Buffer for reading object descriptions. Size limit is BUFFER_SIZE bytes. */ +char buffer[BUFFER_SIZE]; + +/** Buffer for reading trace records. */ +trace_record_t trace[TRACE_BUFFER_CAPACITY]; + +/** The start time read from the trace file. */ +instant_t start_time; + +/** Name of the top-level reactor (first entry in symbol table). */ +char* top_level = NULL; + +/** Table of pointers to the self struct of a reactor. */ +// FIXME: Replace with hash table implementation. +object_description_t* object_table; +int object_table_size = 0; + +typedef struct open_file_t open_file_t; +typedef struct open_file_t { + FILE* file; + open_file_t* next; +} open_file_t; +open_file_t* _open_files = NULL; + +/** + * Function to be invoked upon exiting. + */ +void termination() { + // Free memory in object description table. + for (int i = 0; i < object_table_size; i++) { + free(object_table[i].description); + } + while (_open_files != NULL) { + fclose(_open_files->file); + open_file_t* tmp = _open_files->next; + free(_open_files); + _open_files = tmp; + } + printf("Done!\n"); +} + +const char PATH_SEPARATOR = +#ifdef _WIN32 + '\\'; +#else + '/'; +#endif + +char* root_name(const char* path) { + if (path == NULL) return NULL; + + // Remove any path. + char* last_separator = strrchr(path, PATH_SEPARATOR); + if (last_separator != NULL) path = last_separator + 1; + + // Allocate and copy name without extension. + char* last_period = strrchr(path, '.'); + size_t length = (last_period == NULL) ? + strlen(path) : last_period - path; + char* result = (char*)malloc(length + 1); + if (result == NULL) return NULL; + strncpy(result, path, length); + result[length] = '\0'; + + return result; +} + +FILE* open_file(const char* path, const char* mode) { + FILE* result = fopen(path, mode); + if (result == NULL) { + fprintf(stderr, "No file named %s.\n", path); + usage(); + exit(2); + } + open_file_t* record = (open_file_t*)malloc(sizeof(open_file_t)); + if (record == NULL) { + fprintf(stderr, "Out of memory.\n"); + exit(3); + } + record->file = result; + record->next = _open_files; + _open_files = record; + return result; +} + +/** + * Get the description of the object pointed to by the specified pointer. + * For example, this can be the name of a reactor (pointer points to + * the self struct) or a user-define string. + * If there is no such pointer in the symbol table, return NULL. + * If the index argument is non-null, then put the index + * of the entry in the table into the int pointed to + * or -1 if none was found. + * @param pointer The pointer to to an object, e.g. a self struct. + * @param index An optional pointer into which to write the index. + */ +char* get_object_description(void* pointer, int* index) { + // FIXME: Replace with a hash table implementation. + for (int i = 0; i < object_table_size; i++) { + if (object_table[i].pointer == pointer) { + if (index != NULL) { + *index = i; + } + return object_table[i].description; + } + } + if (index != NULL) { + *index = 0; + } + return NULL; +} + +/** + * Get the trigger name for the specified pointer. + * If there is no such trigger, return NULL. + * If the index argument is non-null, then put the index + * of the trigger in the table into the int pointed to + * or -1 if none was found. + * @param reactor The pointer to a self struct. + * @param index An optional pointer into which to write the index. + */ +char* get_trigger_name(void* trigger, int* index) { + // FIXME: Replace with a hash table implementation. + for (int i = 0; i < object_table_size; i++) { + if (object_table[i].trigger == trigger && object_table[i].type == trace_trigger) { + if (index != NULL) { + *index = i; + } + return object_table[i].description; + } + } + if (index != NULL) { + *index = 0; + } + return NULL; +} + +/** + * Print the object to description table. + */ +void print_table() { + printf("------- objects traced:\n"); + for (int i = 0; i < object_table_size; i++) { + char* type; + if (object_table[i].type == trace_reactor) { + type = "reactor"; + } else if (object_table[i].type == trace_trigger) { + type = "trigger"; + } else if (object_table[i].type == trace_user) { + type = "user-defined"; + } else { + type = "unknown type"; + } + printf("pointer = %p, trigger = %p, type = %s: %s\n", + object_table[i].pointer, + object_table[i].trigger, + type, + object_table[i].description); + } + printf("-------\n"); +} + +/** + * Read header information. + * @return The number of objects in the object table or -1 for failure. + */ +size_t read_header() { + // Read the start time. + int items_read = fread(&start_time, sizeof(instant_t), 1, trace_file); + if (items_read != 1) _LF_TRACE_FAILURE(trace_file); + + printf("Start time is %lld.\n", start_time); + + // Read the table mapping pointers to descriptions. + // First read its length. + items_read = fread(&object_table_size, sizeof(int), 1, trace_file); + if (items_read != 1) _LF_TRACE_FAILURE(trace_file); + + printf("There are %d objects traced.\n", object_table_size); + + object_table = calloc(object_table_size, sizeof(trace_record_t)); + if (object_table == NULL) { + fprintf(stderr, "ERROR: Memory allocation failure %d.\n", errno); + return -1; + } + + // Next, read each table entry. + for (int i = 0; i < object_table_size; i++) { + void* reactor; + items_read = fread(&reactor, sizeof(void*), 1, trace_file); + if (items_read != 1) _LF_TRACE_FAILURE(trace_file); + object_table[i].pointer = reactor; + + void* trigger; + items_read = fread(&trigger, sizeof(trigger_t*), 1, trace_file); + if (items_read != 1) _LF_TRACE_FAILURE(trace_file); + object_table[i].trigger = trigger; + + // Next, read the type. + _lf_trace_object_t trace_type; + items_read = fread(&trace_type, sizeof(_lf_trace_object_t), 1, trace_file); + if (items_read != 1) _LF_TRACE_FAILURE(trace_file); + object_table[i].type = trace_type; + + // Next, read the string description into the buffer. + int description_length = 0; + char character; + items_read = fread(&character, sizeof(char), 1, trace_file); + if (items_read != 1) _LF_TRACE_FAILURE(trace_file); + while(character != 0 && description_length < BUFFER_SIZE - 1) { + buffer[description_length++] = character; + items_read = fread(&character, sizeof(char), 1, trace_file); + if (items_read != 1) _LF_TRACE_FAILURE(trace_file); + } + // Terminate with null. + buffer[description_length++] = 0; + + // Allocate memory to store the description. + object_table[i].description = malloc(description_length); + strcpy(object_table[i].description, buffer); + + if (top_level == NULL) { + top_level = object_table[i].description; + } + } + print_table(); + return object_table_size; +} + +/** + * Read the trace from the specified file and put it in the trace global + * variable. Return the length of the trace. + * @return The number of trace record read or 0 upon seeing an EOF. + */ +int read_trace() { + // Read first the int giving the length of the trace. + int trace_length; + int items_read = fread(&trace_length, sizeof(int), 1, trace_file); + if (items_read != 1) { + if (feof(trace_file)) return 0; + fprintf(stderr, "Failed to read trace length.\n"); + exit(3); + } + if (trace_length > TRACE_BUFFER_CAPACITY) { + fprintf(stderr, "ERROR: Trace length %d exceeds capacity. File is garbled.\n", trace_length); + exit(4); + } + // printf("DEBUG: Trace of length %d being converted.\n", trace_length); + + items_read = fread(&trace, sizeof(trace_record_t), trace_length, trace_file); + if (items_read != trace_length) { + fprintf(stderr, "Failed to read trace of length %d.\n", trace_length); + exit(5); + } + return trace_length; +} diff --git a/util/tracing/trace_util.h b/util/tracing/trace_util.h new file mode 100644 index 000000000..dab1f5e98 --- /dev/null +++ b/util/tracing/trace_util.h @@ -0,0 +1,134 @@ +/** + * @file + * @author Edward A. Lee + * + * @section LICENSE +Copyright (c) 2020, The University of California at Berkeley + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF +THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + * @section DESCRIPTION + * Header file for common utilities used in converting Lingua Franca trace files + * into other formats. + */ +#define LF_TRACE +#include "reactor.h" +#include "trace.h" + +/** Macro to use when access to trace file fails. */ +#define _LF_TRACE_FAILURE(trace_file) \ + do { \ + fprintf(stderr, "ERROR: Access to trace file failed.\n"); \ + fclose(trace_file); \ + trace_file = NULL; \ + exit(1); \ + } while(0) + +/** Buffer for reading object descriptions. Size limit is BUFFER_SIZE bytes. */ +#define BUFFER_SIZE 1024 + +/** Buffer for reading trace records. */ +extern trace_record_t trace[]; + +/** File containing the trace binary data. */ +extern FILE* trace_file; + +/** File for writing the output data. */ +extern FILE* output_file; + +/** File for writing summary statistics. */ +extern FILE* summary_file; + +/** + * Print a usage message. + */ +void usage(); + +/** The start time read from the trace file. */ +extern instant_t start_time; + +/** Table of pointers to a description of the object. */ +extern object_description_t* object_table; +extern int object_table_size; + +/** Name of the top-level reactor (first entry in symbol table). */ +extern char* top_level; + +/** + * @brief Return the root file name from the given path. + * Given a path to a file, this function returns a dynamically + * allocated string (which you must free) that points to the root + * filename without the preceding path and without the file extension. + * @param path The path including the full filename. + * @return The root name of the file or NULL for failure. + */ +char* root_name(const char* path); + +/** + * @brief Open the specified file for reading or writing. + * This function records the file for closing at termination. + * @param path The path to the file. + * @param mode "r" for reading and "w" for writing. + * @return A pointer to the open file or NULL for failure. + */ +FILE* open_file(const char* path, const char* mode); + +/** + * Get the description of the object pointed to by the specified pointer. + * For example, this can be the name of a reactor (pointer points to + * the self struct) or a user-defined string. + * If there is no such pointer in the symbol table, return NULL. + * If the index argument is non-null, then put the index + * of the entry in the table into the int pointed to + * or -1 if none was found. + * @param pointer The pointer to to an object, e.g. a self struct. + * @param index An optional pointer into which to write the index. + */ +char* get_object_description(void* reactor, int* index); + +/** + * Get the trigger name for the specified pointer. + * If there is no such trigger, return NULL. + * If the index argument is non-null, then put the index + * of the trigger in the table into the int pointed to + * or -1 if none was found. + * @param reactor The pointer to a self struct. + * @param index An optional pointer into which to write the index. + */ +char* get_trigger_name(void* trigger, int* index); + +/** + * Print the object to description table. + */ +void print_table(); + +/** + * Read header information. + * @return The number of objects in the object table or -1 for failure. + */ +size_t read_header(); + +/** + * Read the trace from the specified file and put it in the trace global + * variable. Return the length of the trace. + * @return The number of trace record read or 0 upon seeing an EOF. + */ +int read_trace(); diff --git a/util/tracing/visualization/.gitignore b/util/tracing/visualization/.gitignore new file mode 100644 index 000000000..c18dd8d83 --- /dev/null +++ b/util/tracing/visualization/.gitignore @@ -0,0 +1 @@ +__pycache__/ diff --git a/util/tracing/visualization/README.md b/util/tracing/visualization/README.md new file mode 100644 index 000000000..c74803663 --- /dev/null +++ b/util/tracing/visualization/README.md @@ -0,0 +1,35 @@ +# Trace sequence diagram visualizer + +`fedsd` is a utility that reports the interactions (exchanged messages) +between federates and the RTI in a sequence-diagram-like format. + +To use `fedsd`, you need to first obtain an execution trace. To do this, enable the tracing mechanism in your Lingua Franca program by setting the `tracing` target property to `true` and then compile and run the program. + +This utility starts by transforming each `.lft` file into a `.csv` file, by +internally running `trace_to_csv`. It then aggregates the data from all `.csv` +files to do the matching and draw the sequence diagram. + +# Installing +`fedsd` is installed together with the rest of the tracing tools. For instructions refer to `~/util/tracing/README.md`. + + +# Running + +In case the federation is launched using the `bash` script under `bin`, an `.lft` trace +file will be generated for each of the federates, in addition to `rti.lft`. The latter +contains the RTI trace. + +If, however, the federation is launched manually, then running the `RTI` command should be passed the `-t` flag in order to make sure that it, too, has tracing enabled: +``` +$ RTI -n -t +``` + +It is most convenient to launch the RTI and all federates from the same working directory so that they will all write their trace file to that directory. + +Once the federation stopped executing, run `fedsd` on all generated `.lft` files: +``` +$ fedsd *.lft +``` + +The output is an html file named `trace_svg.html` (in the current directory) that contains the sequence of interactions +between the federates and the RTI. diff --git a/util/tracing/visualization/fedsd.py b/util/tracing/visualization/fedsd.py new file mode 100644 index 000000000..ce77998b5 --- /dev/null +++ b/util/tracing/visualization/fedsd.py @@ -0,0 +1,331 @@ +''' +Define arrows: + (x1, y1) ==> (x2, y2), when unique result (this arrow will be tilted) + (x1, y1) --> (x2, y2), when a possible result (could be not tilted)? +If not arrow, then triangle with text + +In the dataframe, each row will be marked with one op these values: + - 'arrow': draw a non-dashed arrow + - 'dot': draw a dot only + - 'marked': marked, not to be drawn + - 'pending': pending + - 'adv': for reporting logical time advancing, draw a simple dash +''' + +# Styles to determine appearance: +css_style = ' \ +' + +#!/usr/bin/env python3 +import argparse # For arguments parsing +import pandas as pd # For csv manipulation +from os.path import exists +from pathlib import Path +import math +import fedsd_helper as fhlp + +# Define the arguments to pass in the command line +parser = argparse.ArgumentParser(description='Set of the csv trace files to render.') +parser.add_argument('-r','--rti', type=str, default="rti.csv", + help='RTI csv trace file.') +parser.add_argument('-f','--federates', nargs='+', action='append', + help='List of the federates csv trace files.') + +# Events matching at the sender and receiver ends depend on whether they are tagged +# (the elapsed logical time and microstep have to be the same) or not. +# Set of tagged events (messages) +non_tagged_messages = {'FED_ID', 'ACK', 'REJECT', 'ADR_RQ', 'ADR_AD', 'MSG', 'P2P_MSG'} + +def load_and_process_csv_file(csv_file) : + ''' + Loads and processes the csv entries, based on the type of the actor (if RTI + or federate). + + Args: + * csv_file: String file name + Returns: + * The processed dataframe. + ''' + # Load tracepoints, rename the columns and clean non useful data + df = pd.read_csv(csv_file) + df.columns = ['event', 'reactor', 'self_id', 'partner_id', 'logical_time', 'microstep', 'physical_time', 't', 'ed'] + df = df.drop(columns=['reactor', 't', 'ed']) + + # Remove all the lines that do not contain communication information + # which boils up to having 'RTI' in the 'event' column + df = df[df['event'].str.contains('Sending|Receiving|Scheduler advancing time ends') == True] + + # Fix the parameters of the event 'Scheduler advancing time ends' + # We rely on the fact that the first row of the csv file cannot be the end of advancing time + id = df.iloc[-1]['self_id'] + df['self_id'] = id + df = df.astype({'self_id': 'int', 'partner_id': 'int'}) + + # Add an inout column to set the arrow direction + df['inout'] = df['event'].apply(lambda e: 'in' if 'Receiving' in e else 'out') + + # Prune event names + df['event'] = df['event'].apply(lambda e: fhlp.prune_event_name[e]) + return df + + +if __name__ == '__main__': + args = parser.parse_args() + + # The RTI and each of the federates have a fixed x coordinate. They will be + # saved in a dict + x_coor = {} + actors = [] + actors_names = {} + padding = 50 + spacing = 200 # Spacing between federates + + # Set the RTI x coordinate + x_coor[-1] = padding * 2 + actors.append(-1) + actors_names[-1] = "RTI" + + trace_df = pd.DataFrame() + + ############################################################################ + #### Federates trace processing + ############################################################################ + # Loop over the given list of federates trace files + if (args.federates) : + for fed_trace in args.federates[0]: + if (not exists(fed_trace)): + print('Warning: Trace file ' + fed_trace + ' does not exist! Will resume though') + continue + try: + fed_df = load_and_process_csv_file(fed_trace) + except Exception as e: + print(f"Warning: Problem processing trace file {fed_trace}: `{e}`") + continue + + if (not fed_df.empty): + # Get the federate id number + fed_id = fed_df.iloc[-1]['self_id'] + # Add to the list of sequence diagram actors and add the name + actors.append(fed_id) + actors_names[fed_id] = Path(fed_trace).stem + # Derive the x coordinate of the actor + x_coor[fed_id] = (padding * 2) + (spacing * (len(actors) - 1)) + fed_df['x1'] = x_coor[fed_id] + trace_df = pd.concat([trace_df, fed_df]) + fed_df = fed_df[0:0] + + + ############################################################################ + #### RTI trace processing, if any + ############################################################################ + if (exists(args.rti)): + rti_df = load_and_process_csv_file(args.rti) + rti_df['x1'] = x_coor[-1] + else: + # If there is no RTI, derive one. + # This is particularly useful for tracing enclaves + # FIXME: Currently, `fedsd` is used either for federates OR enclaves. + # As soon as there is a consensus on how to visualize federations where + # a federate has several enclves, the utility will be updated. + rti_df = trace_df[['event', 'self_id', 'partner_id', 'logical_time', 'microstep', 'physical_time', 'inout']].copy() + rti_df = rti_df[rti_df['event'].str.contains('AdvLT') == False] + rti_df.columns = ['event', 'partner_id', 'self_id', 'logical_time', 'microstep', 'physical_time', 'inout'] + rti_df['inout'] = rti_df['inout'].apply(lambda e: 'in' if 'out' in e else 'out') + rti_df['x1'] = rti_df['self_id'].apply(lambda e: x_coor[int(e)]) + + trace_df = pd.concat([trace_df, rti_df]) + + # Sort all traces by physical time and then reset the index + trace_df = trace_df.sort_values(by=['physical_time']) + trace_df = trace_df.reset_index(drop=True) + + # Add the Y column and initialize it with the padding value + trace_df['y1'] = math.ceil(padding * 3 / 2) # Or set a small shift + + ############################################################################ + #### Compute the 'y1' coordinates + ############################################################################ + ppt = 0 # Previous physical time + cpt = 0 # Current physical time + py = 0 # Previous y + min = 15 # Minimum spacing between events when time has not advanced. + scale = 1 # Will probably be set manually + first_pass = True + for index, row in trace_df.iterrows(): + if (not first_pass) : + cpt = row['physical_time'] + # print('cpt = '+str(cpt)+' and ppt = '+str(ppt)) + # From the email: + # Y = T_previous + min + log10(1 + (T - T_previous)*scale) + # But rather think it should be: + if (cpt != ppt) : + py = math.ceil(py + min + (1 + math.log10(cpt - ppt) * scale)) + trace_df.at[index, 'y1'] = py + + ppt = row['physical_time'] + py = trace_df.at[index, 'y1'] + first_pass = False + + ############################################################################ + #### Derive arrows that match sided communications + ############################################################################ + # Intialize all rows as pending to be matched + trace_df['arrow'] = 'pending' + trace_df['x2'] = -1 + trace_df['y2'] = -1 + + # Iterate and check possible sides + for index in trace_df.index: + # If the tracepoint is pending, proceed to look for a match + if (trace_df.at[index,'arrow'] == 'pending') : + # Look for a match only if it is not about advancing time + if (trace_df.at[index,'event'] == 'AdvLT') : + trace_df.at[index,'arrow'] = 'adv' + continue + self_id = trace_df.at[index,'self_id'] + partner_id = trace_df.at[index,'partner_id'] + event = trace_df.at[index,'event'] + logical_time = trace_df.at[index, 'logical_time'] + microstep = trace_df.at[index, 'microstep'] + inout = trace_df.at[index, 'inout'] + + # Match tracepoints + # Depends on whether the event is tagged or not + if (trace_df.at[index,'event'] not in non_tagged_messages): + matching_df = trace_df[\ + (trace_df['inout'] != inout) & \ + (trace_df['self_id'] == partner_id) & \ + (trace_df['partner_id'] == self_id) & \ + (trace_df['arrow'] == 'pending') & \ + (trace_df['event'] == event) & \ + (trace_df['logical_time'] == logical_time) & \ + (trace_df['microstep'] == microstep) \ + ] + else : + matching_df = trace_df[\ + (trace_df['inout'] != inout) & \ + (trace_df['self_id'] == partner_id) & \ + (trace_df['partner_id'] == self_id) & \ + (trace_df['arrow'] == 'pending') & \ + (trace_df['event'] == event) + ] + + if (matching_df.empty) : + # If no matching receiver, than set the arrow to 'dot', + # meaning that only a dot will be rendered + trace_df.at[index, 'arrow'] = 'dot' + else: + # If there is one or more matching rows, then consider + # the first one + matching_index = matching_df.index[0] + matching_row = matching_df.loc[matching_index] + if (inout == 'out'): + trace_df.at[index, 'x2'] = matching_row['x1'] + trace_df.at[index, 'y2'] = matching_row['y1'] + else: + trace_df.at[index, 'x2'] = trace_df.at[index, 'x1'] + trace_df.at[index, 'y2'] = trace_df.at[index, 'y1'] + trace_df.at[index, 'x1'] = matching_row['x1'] + trace_df.at[index, 'y1'] = matching_row['y1'] + + # Mark it, so not to consider it anymore + trace_df.at[matching_index, 'arrow'] = 'marked' + trace_df.at[index, 'arrow'] = 'arrow' + + ############################################################################ + #### Write to svg file + ############################################################################ + svg_width = padding * 2 + (len(actors) - 1) * spacing + padding * 2 + 200 + svg_height = padding + trace_df.iloc[-1]['y1'] + + with open('trace_svg.html', 'w', encoding='utf-8') as f: + # Print header + f.write('\n') + f.write('\n') + f.write('\n\n') + + f.write('\n') + + f.write(css_style) + + # Print the circles and the names + for key in x_coor: + title = actors_names[key] + if (key == -1): + f.write(fhlp.svg_string_comment('RTI Actor and line')) + center = 15 + else: + f.write(fhlp.svg_string_comment('Federate '+str(key)+': ' + title + ' Actor and line')) + center = 5 + f.write(fhlp.svg_string_draw_line(x_coor[key], math.ceil(padding/2), x_coor[key], svg_height, False)) + f.write('\t\n') + f.write('\t'+title+'\n') + + # Now, we need to iterate over the traces to draw the lines + f.write(fhlp.svg_string_comment('Draw interactions')) + for index, row in trace_df.iterrows(): + # For time labels, display them on the left for the RTI, right for everthing else. + anchor = 'start' + if (row['self_id'] < 0): + anchor = 'end' + + # formatted physical time. + # FIXME: Using microseconds is hardwired here. + physical_time = f'{int(row["physical_time"]/1000):,}' + + if (row['event'] in {'FED_ID', 'ACK', 'REJECT', 'ADR_RQ', 'ADR_AD', 'MSG', 'P2P_MSG'}): + label = row['event'] + else: + label = row['event'] + '(' + f'{int(row["logical_time"]):,}' + ', ' + str(row['microstep']) + ')' + + if (row['arrow'] == 'arrow'): + f.write(fhlp.svg_string_draw_arrow(row['x1'], row['y1'], row['x2'], row['y2'], label, row['event'])) + if (row['inout'] in 'in'): + f.write(fhlp.svg_string_draw_side_label(row['x2'], row['y2'], physical_time, anchor)) + else: + f.write(fhlp.svg_string_draw_side_label(row['x1'], row['y1'], physical_time, anchor)) + elif (row['arrow'] == 'dot'): + if (row['inout'] == 'in'): + label = "(in) from " + str(row['partner_id']) + ' ' + label + else : + label = "(out) to " + str(row['partner_id']) + ' ' + label + + if (anchor == 'end'): + f.write(fhlp.svg_string_draw_side_label(row['x1'], row['y1'], physical_time, anchor)) + f.write(fhlp.svg_string_draw_dot(row['x1'], row['y1'], label)) + else: + f.write(fhlp.svg_string_draw_dot_with_time(row['x1'], row['y1'], physical_time, label)) + + elif (row['arrow'] == 'marked'): + f.write(fhlp.svg_string_draw_side_label(row['x1'], row['y1'], physical_time, anchor)) + + elif (row['arrow'] == 'adv'): + f.write(fhlp.svg_string_draw_adv(row['x1'], row['y1'], label)) + + f.write('\n\n\n') + + # Print footer + f.write('\n') + f.write('\n') + + # Write to a csv file, just to double check + trace_df.to_csv('all.csv', index=True) diff --git a/util/tracing/visualization/fedsd_helper.py b/util/tracing/visualization/fedsd_helper.py new file mode 100644 index 000000000..37b339669 --- /dev/null +++ b/util/tracing/visualization/fedsd_helper.py @@ -0,0 +1,255 @@ +import math + +# Disctionary for pruning event names. Usefule for tracepoint matching and +# communication rendering +prune_event_name = { + "Sending ACK": "ACK", + "Sending TIMESTAMP": "TIMESTAMP", + "Sending NET": "NET", + "Sending LTC": "LTC", + "Sending STOP_REQ": "STOP_REQ", + "Sending STOP_REQ_REP": "STOP_REQ_REP", + "Sending STOP_GRN": "STOP_GRN", + "Sending FED_ID": "FED_ID", + "Sending PTAG": "PTAG", + "Sending TAG": "TAG", + "Sending REJECT": "REJECT", + "Sending RESIGN": "RESIGN", + "Sending PORT_ABS": "ABS", + "Sending CLOSE_RQ": "CLOSE_RQ", + "Sending TAGGED_MSG": "T_MSG", + "Sending P2P_TAGGED_MSG": "P2P_T_MSG", + "Sending MSG": "MSG", + "Sending P2P_MSG": "P2P_MSG", + "Sending ADR_AD": "ADR_AD", + "Sending ADR_QR": "ADR_QR", + "Receiving ACK": "ACK", + "Receiving TIMESTAMP": "TIMESTAMP", + "Receiving NET": "NET", + "Receiving LTC": "LTC", + "Receiving STOP_REQ": "STOP_REQ", + "Receiving STOP_REQ_REP": "STOP_REQ_REP", + "Receiving STOP_GRN": "STOP_GRN", + "Receiving FED_ID": "FED_ID", + "Receiving PTAG": "PTAG", + "Receiving TAG": "TAG", + "Receiving REJECT": "REJECT", + "Receiving RESIGN": "RESIGN", + "Receiving PORT_ABS": "ABS", + "Receiving CLOSE_RQ": "CLOSE_RQ", + "Receiving TAGGED_MSG": "T_MSG", + "Receiving P2P_TAGGED_MSG": "P2P_T_MSG", + "Receiving MSG": "MSG", + "Receiving P2P_MSG": "P2P_MSG", + "Receiving ADR_AD": "ADR_AD", + "Receiving ADR_QR": "ADR_QR", + "Receiving UNIDENTIFIED": "UNIDENTIFIED", + "Scheduler advancing time ends": "AdvLT" +} + +prune_event_name.setdefault(" ", "UNIDENTIFIED") + +################################################################################ +### Routines to write to csv file +################################################################################ + +def svg_string_draw_line(x1, y1, x2, y2, type=''): + ''' + Constructs the svg html string to draw a line from (x1, y1) to (x2, y2). + + Args: + * x1: Int X coordinate of the source point + * y1: Int Y coordinate of the source point + * x2: Int X coordinate of the sink point + * y2: Int Y coordinate of the sink point + * type: The type of the message (for styling) + Returns: + * String: the svg string of the lineĀ© + ''' + str_line = '\t\n' + return str_line + + +def svg_string_draw_arrow_head(x1, y1, x2, y2, type='') : + ''' + Constructs the svg html string to draw the arrow end + + Args: + * x1: Int X coordinate of the source point + * y1: Int Y coordinate of the source point + * x2: Int X coordinate of the sink point + * y2: Int Y coordinate of the sink point + * type: The type (for styling) + Returns: + * String: the svg string of the triangle + ''' + + if (y2 != y1): + rotation = - math.ceil(math.atan((x2-x1)/(y2-y1)) * 180 / 3.14) - 90 + else: + if (x1 > x2): + rotation = 0 + else: + rotation = - 180 + + style = '' + if (type): + style = ' class="'+type+'"' + + str_line = '' + if (x1 > x2) : + str_line = '\t\n' + else : + str_line = '\t\n' + + return str_line + + +def svg_string_draw_label(x1, y1, x2, y2, label) : + ''' + Computes the rotation angle of the text and then constructs the svg string. + + Args: + * x1: Int X coordinate of the source point + * y1: Int Y coordinate of the source point + * x2: Int X coordinate of the sink point + * y2: Int Y coordinate of the sink point + * label: The label to draw + Returns: + * String: the svg string of the text + ''' + # FIXME: Need further improvement, based of the position of the arrows + # FIXME: Rotation value is not that accurate. + if (x2 < x1) : + # Left-going arrow. + if (y2 != y1): + rotation = - math.ceil(math.atan((x2-x1)/(y2-y1)) * 180 / 3.14) - 90 + else: + rotation = 0 + + str_line = '\t'+label+'\n' + else : + # Right-going arrow. + if (y2 != y1): + rotation = - math.ceil(math.atan((x1-x2)/(y1-y2)) * 180 / 3.14) + 90 + else: + rotation = 0 + str_line = '\t'+label+'\n' + #print('rot = '+str(rotation)+' x1='+str(x1)+' y1='+str(y1)+' x2='+str(x2)+' y2='+str(y2)) + return str_line + + +def svg_string_draw_arrow(x1, y1, x2, y2, label, type=''): + ''' + Constructs the svg html string to draw the arrow from (x1, y1) to (x2, y2). + The arrow end is constructed, together with the label + + Args: + * x1: Int X coordinate of the source point + * y1: Int Y coordinate of the source point + * x2: Int X coordinate of the sink point + * y2: Int Y coordinate of the sink point + * label: String Label to draw on top of the arrow + * type: The type of the message + Returns: + * String: the svg string of the arrow + ''' + str_line1 = svg_string_draw_line(x1, y1, x2, y2, type) + str_line2 = svg_string_draw_arrow_head(x1, y1, x2, y2, type) + str_line3 = svg_string_draw_label(x1, y1, x2, y2, label) + return str_line1 + str_line2 + str_line3 + +def svg_string_draw_side_label(x, y, label, anchor="start") : + ''' + Put a label to the right of the x, y point, + unless x is small, in which case put it to the left. + + Args: + * x: Int X coordinate of the source point + * y: Int Y coordinate of the source point + * label: Label to put by the point. + * anchor: One of "start", "middle", or "end" to specify the text-anchor. + Returns: + * String: the svg string of the text + ''' + offset = 5 + if (anchor == 'end'): + offset = -5 + elif (anchor == 'middle'): + offset = 0 + str_line = '\t'+label+'\n' + + return str_line + +def svg_string_comment(comment): + ''' + Constructs the svg html string to write a comment into an svg file. + + Args: + * comment: String Comment to add + Returns: + * String: the svg string of the comment + ''' + str_line = '\n\t\n' + return str_line + + +def svg_string_draw_dot(x, y, label) : + ''' + Constructs the svg html string to draw at a dot. + + Args: + * x: Int X coordinate of the dot + * y: Int Y coordinate of the dot + * label: String to draw + Returns: + * String: the svg string of the triangle + ''' + str_line = '' + str_line = '\t\n' + str_line = str_line + '\t'+label+'\n' + return str_line + +def svg_string_draw_dot_with_time(x, y, time, label) : + ''' + Constructs the svg html string to draw at a dot with a prefixed physical time. + + Args: + * x: Int X coordinate of the dot + * y: Int Y coordinate of the dot + * time: The time + * label: String to draw + Returns: + * String: the svg string of the triangle + ''' + str_line = '' + str_line = '\t\n' + str_line = str_line + '\t '+time+': '+label+'\n' + return str_line + +def svg_string_draw_adv(x, y, label) : + ''' + Constructs the svg html string to draw at a dash, meaning that logical time is advancing there. + + Args: + * x: Int X coordinate of the dash + * y: Int Y coordinate of the dash + * label: String to draw + Returns: + * String: the svg string of the triangle + ''' + str_line1 = svg_string_draw_line(x-5, y, x+5, y, "ADV") + str_line2 = svg_string_draw_side_label(x, y, label) + return str_line1 + str_line2