diff --git a/.gitignore b/.gitignore
index 742de2ac4..b8237101d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,7 +3,6 @@
/docs/api
**/.vscode/
**/build/
-**/lib/
**/.DS_Store
/core/federated/RTI/build/
/cmake-build-debug/
diff --git a/core/CMakeLists.txt b/core/CMakeLists.txt
index 8eec31eb8..e4a9f1b6c 100644
--- a/core/CMakeLists.txt
+++ b/core/CMakeLists.txt
@@ -1,17 +1,7 @@
set(CORE_ROOT ${CMAKE_CURRENT_SOURCE_DIR})
set(LF_ROOT ${CMAKE_CURRENT_LIST_DIR}/..)
-if(${CMAKE_SYSTEM_NAME} STREQUAL "Windows")
- set(CMAKE_SYSTEM_VERSION 10.0)
- message("Using Windows SDK version ${CMAKE_VS_WINDOWS_TARGET_PLATFORM_VERSION}")
-elseif(${CMAKE_SYSTEM_NAME} STREQUAL "Nrf52")
- list(APPEND REACTORC_COMPILE_DEFS PLATFORM_NRF52)
-elseif(${CMAKE_SYSTEM_NAME} STREQUAL "Zephyr")
- list(APPEND REACTORC_COMPILE_DEFS PLATFORM_ZEPHYR)
- set(PLATFORM_ZEPHYR true)
-elseif(${CMAKE_SYSTEM_NAME} STREQUAL "Rp2040")
- list(APPEND REACTORC_COMPILE_DEFS PLATFORM_RP2040)
-endif()
+include(${LF_ROOT}/core/lf_utils.cmake)
# Get the general common sources for reactor-c
list(APPEND GENERAL_SOURCES tag.c clock.c port.c mixed_radix.c reactor_common.c lf_token.c environment.c)
@@ -50,23 +40,18 @@ endif()
# Include sources from subdirectories
include(utils/CMakeLists.txt)
+
+if (DEFINED MODAL_REACTORS)
include(modal_models/CMakeLists.txt)
+endif()
# Print sources used for compilation
list(JOIN REACTORC_SOURCES ", " PRINTABLE_SOURCE_LIST)
message(STATUS "Including the following sources: " ${PRINTABLE_SOURCE_LIST})
-# Create the reactor-c library. If we are targeting Zephyr we have to use the
-# Zephyr Cmake extension to create the library and add the sources.
-if(PLATFORM_ZEPHYR)
- message("--- Building Zephyr library")
- zephyr_library_named(reactor-c)
- zephyr_library_sources(${REACTORC_SOURCES})
- zephyr_library_link_libraries(kernel)
-else()
- add_library(reactor-c)
- target_sources(reactor-c PRIVATE ${REACTORC_SOURCES})
-endif()
+add_library(reactor-c)
+target_sources(reactor-c PRIVATE ${REACTORC_SOURCES})
+lf_enable_compiler_warnings(reactor-c)
if (DEFINED LF_TRACE)
include(${LF_ROOT}/trace/api/CMakeLists.txt)
@@ -98,9 +83,6 @@ include(${LF_ROOT}/platform/impl/CMakeLists.txt)
target_link_libraries(reactor-c PUBLIC lf::platform-api)
target_link_libraries(reactor-c PRIVATE lf::platform-impl)
-# Apply compile definitions to the reactor-c library.
-target_compile_definitions(reactor-c PUBLIC ${REACTORC_COMPILE_DEFS})
-
target_include_directories(reactor-c PUBLIC ../include)
target_include_directories(reactor-c PUBLIC ../include/core)
target_include_directories(reactor-c PUBLIC ../include/core/federated)
@@ -134,14 +116,18 @@ if(DEFINED _LF_CLOCK_SYNC_ON)
endif()
endif()
-# Link with thread library, unless we are on the Zephyr platform.
-if(NOT DEFINED LF_SINGLE_THREADED OR DEFINED LF_TRACE)
- if (NOT PLATFORM_ZEPHYR)
- find_package(Threads REQUIRED)
- target_link_libraries(reactor-c PUBLIC Threads::Threads)
- endif()
+# Unless specified otherwise initial event queue and reaction queue to size 10
+if (NOT DEFINED INITIAL_EVENT_QUEUE_SIZE)
+ set(INITIAL_EVENT_QUEUE_SIZE 10)
+endif()
+if (NOT DEFINED INITIAL_REACT_QUEUE_SIZE)
+ set(INITIAL_REACT_QUEUE_SIZE 10)
endif()
+target_compile_definitions(reactor-c PRIVATE INITIAL_EVENT_QUEUE_SIZE=${INITIAL_EVENT_QUEUE_SIZE})
+target_compile_definitions(reactor-c PRIVATE INITIAL_REACT_QUEUE_SIZE=${INITIAL_REACT_QUEUE_SIZE})
+target_compile_definitions(reactor-c PUBLIC PLATFORM_${CMAKE_SYSTEM_NAME})
+
# Macro for translating a command-line argument into compile definition for
# reactor-c lib
macro(define X)
@@ -151,12 +137,6 @@ macro(define X)
endif(DEFINED ${X})
endmacro()
-# FIXME: May want these to be application dependent, hence passed as
-# parameters to Cmake.
-target_compile_definitions(reactor-c PRIVATE INITIAL_EVENT_QUEUE_SIZE=10)
-target_compile_definitions(reactor-c PRIVATE INITIAL_REACT_QUEUE_SIZE=10)
-target_compile_definitions(reactor-c PUBLIC PLATFORM_${CMAKE_SYSTEM_NAME})
-
# Search and apply all possible compile definitions
message(STATUS "Applying preprocessor definitions...")
define(_LF_CLOCK_SYNC_ATTENUATION)
diff --git a/core/environment.c b/core/environment.c
index 7189090e4..4523c4721 100644
--- a/core/environment.c
+++ b/core/environment.c
@@ -38,6 +38,31 @@
#include "scheduler.h"
#endif
+//////////////////
+// Local functions, not intended for use outside this file.
+
+/**
+ * @brief Callback function to determine whether two events have the same trigger.
+ * This function is used by event queue and recycle.
+ * Return 1 if the triggers are identical, 0 otherwise.
+ * @param event1 A pointer to an event.
+ * @param event2 A pointer to an event.
+ */
+static int event_matches(void* event1, void* event2) {
+ return (((event_t*)event1)->trigger == ((event_t*)event2)->trigger);
+}
+
+/**
+ * @brief Callback function to print information about an event.
+ * This function is used by event queue and recycle.
+ * @param element A pointer to an event.
+ */
+static void print_event(void* event) {
+ event_t* e = (event_t*)event;
+ LF_PRINT_DEBUG("tag: " PRINTF_TAG ", trigger: %p, token: %p", e->base.tag.time, e->base.tag.microstep,
+ (void*)e->trigger, (void*)e->token);
+}
+
/**
* @brief Initialize the threaded part of the environment struct.
*/
@@ -53,9 +78,12 @@ static void environment_init_threaded(environment_t* env, int num_workers) {
LF_MUTEX_INIT(&env->mutex);
LF_COND_INIT(&env->event_q_changed, &env->mutex);
LF_COND_INIT(&env->global_tag_barrier_requestors_reached_zero, &env->mutex);
-
+#else
+ (void)env;
+ (void)num_workers;
#endif
}
+
/**
* @brief Initialize the single-threaded-specific parts of the environment struct.
*/
@@ -67,6 +95,8 @@ static void environment_init_single_threaded(environment_t* env) {
env->reaction_q = pqueue_init(INITIAL_REACT_QUEUE_SIZE, in_reverse_order, get_reaction_index, get_reaction_position,
set_reaction_position, reaction_matches, print_reaction);
+#else
+ (void)env;
#endif
}
@@ -97,6 +127,10 @@ static void environment_init_modes(environment_t* env, int num_modes, int num_st
} else {
env->modes = NULL;
}
+#else
+ (void)env;
+ (void)num_modes;
+ (void)num_state_resets;
#endif
}
@@ -113,31 +147,26 @@ static void environment_init_federated(environment_t* env, int num_is_present_fi
env->_lf_intended_tag_fields = NULL;
env->_lf_intended_tag_fields_size = 0;
}
+#else
+ (void)env;
+ (void)num_is_present_fields;
#endif
}
-void environment_init_tags(environment_t* env, instant_t start_time, interval_t duration) {
- env->current_tag = (tag_t){.time = start_time, .microstep = 0u};
-
- tag_t stop_tag = FOREVER_TAG_INITIALIZER;
- if (duration >= 0LL) {
- // A duration has been specified. Calculate the stop time.
- stop_tag.time = env->current_tag.time + duration;
- stop_tag.microstep = 0;
- }
- env->stop_tag = stop_tag;
-}
-
static void environment_free_threaded(environment_t* env) {
#if !defined(LF_SINGLE_THREADED)
free(env->thread_ids);
lf_sched_free(env->scheduler);
+#else
+ (void)env;
#endif
}
static void environment_free_single_threaded(environment_t* env) {
#ifdef LF_SINGLE_THREADED
pqueue_free(env->reaction_q);
+#else
+ (void)env;
#endif
}
@@ -148,15 +177,22 @@ static void environment_free_modes(environment_t* env) {
free(env->modes->state_resets);
free(env->modes);
}
+#else
+ (void)env;
#endif
}
static void environment_free_federated(environment_t* env) {
#ifdef FEDERATED_DECENTRALIZED
free(env->_lf_intended_tag_fields);
+#else
+ (void)env;
#endif
}
+//////////////////
+// Functions defined in environment.h.
+
void environment_free(environment_t* env) {
free(env->name);
free(env->timer_triggers);
@@ -165,9 +201,8 @@ void environment_free(environment_t* env) {
free(env->reset_reactions);
free(env->is_present_fields);
free(env->is_present_fields_abbreviated);
- pqueue_free(env->event_q);
- pqueue_free(env->recycle_q);
- pqueue_free(env->next_q);
+ pqueue_tag_free(env->event_q);
+ pqueue_tag_free(env->recycle_q);
environment_free_threaded(env);
environment_free_single_threaded(env);
@@ -175,10 +210,23 @@ void environment_free(environment_t* env) {
environment_free_federated(env);
}
+void environment_init_tags(environment_t* env, instant_t start_time, interval_t duration) {
+ env->current_tag = (tag_t){.time = start_time, .microstep = 0u};
+
+ tag_t stop_tag = FOREVER_TAG_INITIALIZER;
+ if (duration >= 0LL) {
+ // A duration has been specified. Calculate the stop time.
+ stop_tag.time = env->current_tag.time + duration;
+ stop_tag.microstep = 0;
+ }
+ env->stop_tag = stop_tag;
+}
+
int environment_init(environment_t* env, const char* name, int id, int num_workers, int num_timers,
int num_startup_reactions, int num_shutdown_reactions, int num_reset_reactions,
int num_is_present_fields, int num_modes, int num_state_resets, int num_watchdogs,
const char* trace_file_name) {
+ (void)trace_file_name; // Will be used with future enclave support.
env->name = malloc(strlen(name) + 1); // +1 for the null terminator
LF_ASSERT_NON_NULL(env->name);
@@ -241,12 +289,9 @@ int environment_init(environment_t* env, const char* name, int id, int num_worke
env->_lf_handle = 1;
// Initialize our priority queues.
- env->event_q = pqueue_init(INITIAL_EVENT_QUEUE_SIZE, in_reverse_order, get_event_time, get_event_position,
- set_event_position, event_matches, print_event);
- env->recycle_q = pqueue_init(INITIAL_EVENT_QUEUE_SIZE, in_no_particular_order, get_event_time, get_event_position,
- set_event_position, event_matches, print_event);
- env->next_q = pqueue_init(INITIAL_EVENT_QUEUE_SIZE, in_no_particular_order, get_event_time, get_event_position,
- set_event_position, event_matches, print_event);
+ env->event_q = pqueue_tag_init_customize(INITIAL_EVENT_QUEUE_SIZE, pqueue_tag_compare, event_matches, print_event);
+ env->recycle_q =
+ pqueue_tag_init_customize(INITIAL_EVENT_QUEUE_SIZE, in_no_particular_order, event_matches, print_event);
// Initialize functionality depending on target properties.
environment_init_threaded(env, num_workers);
diff --git a/core/federated/RTI/main.c b/core/federated/RTI/main.c
index 294dd1f2f..d95bcb597 100644
--- a/core/federated/RTI/main.c
+++ b/core/federated/RTI/main.c
@@ -225,7 +225,7 @@ int process_args(int argc, const char* argv[]) {
}
i++;
long num_federates = strtol(argv[i], NULL, 10);
- if (num_federates == 0L || num_federates == LONG_MAX || num_federates == LONG_MIN) {
+ if (num_federates <= 0L || num_federates == LONG_MAX || num_federates == LONG_MIN) {
lf_print_error("--number_of_federates needs a valid positive integer argument.");
usage(argc, argv);
return 0;
@@ -272,11 +272,6 @@ int process_args(int argc, const char* argv[]) {
return 0;
}
}
- if (rti.base.number_of_scheduling_nodes == 0) {
- lf_print_error("--number_of_federates needs a valid positive integer argument.");
- usage(argc, argv);
- return 0;
- }
return 1;
}
int main(int argc, const char* argv[]) {
diff --git a/core/federated/RTI/rti.Dockerfile b/core/federated/RTI/rti.Dockerfile
index 7b30bfa2d..e70e34584 100644
--- a/core/federated/RTI/rti.Dockerfile
+++ b/core/federated/RTI/rti.Dockerfile
@@ -1,7 +1,6 @@
# Docker file for building the image of the rti
FROM alpine:latest
-COPY core /lingua-franca/core
-COPY include /lingua-franca/include
+COPY . /lingua-franca
WORKDIR /lingua-franca/core/federated/RTI
RUN set -ex && apk add --no-cache gcc musl-dev cmake make && \
mkdir container && \
diff --git a/core/federated/RTI/rti_remote.c b/core/federated/RTI/rti_remote.c
index 69eabbde6..9fbb440b7 100644
--- a/core/federated/RTI/rti_remote.c
+++ b/core/federated/RTI/rti_remote.c
@@ -280,27 +280,7 @@ void send_downstream_next_event_tag(scheduling_node_t* e, tag_t tag) {
encode_int32((int32_t)tag.microstep, &(buffer[1 + sizeof(int64_t)]));
if (rti_remote->base.tracing_enabled) {
- tracepoint_rti_to_federate(rti_remote->base.trace, send_DNET, e->id, &tag);
- }
- if (write_to_socket(((federate_info_t*)e)->socket, message_length, buffer)) {
- lf_print_error("RTI failed to send downstream next event tag to federate %d.", e->id);
- e->state = NOT_CONNECTED;
- } else {
- e->last_DNET = tag;
- LF_PRINT_LOG("RTI sent to federate %d the Downstream Next Event Tag (DNET) " PRINTF_TAG ".", e->id,
- tag.time - start_time, tag.microstep);
- }
-}
-
-void send_downstream_next_event_tag(scheduling_node_t* e, tag_t tag) {
- size_t message_length = 1 + sizeof(int64_t) + sizeof(uint32_t);
- unsigned char buffer[message_length];
- buffer[0] = MSG_TYPE_DOWNSTREAM_NEXT_EVENT_TAG;
- encode_int64(tag.time, &(buffer[1]));
- encode_int32((int32_t)tag.microstep, &(buffer[1 + sizeof(int64_t)]));
-
- if (rti_remote->base.tracing_enabled) {
- tracepoint_rti_to_federate(rti_remote->base.trace, send_DNET, e->id, &tag);
+ tracepoint_rti_to_federate(send_DNET, e->id, &tag);
}
if (write_to_socket(((federate_info_t*)e)->socket, message_length, buffer)) {
lf_print_error("RTI failed to send downstream next event tag to federate %d.", e->id);
@@ -931,11 +911,6 @@ void* clock_synchronization_thread(void* noargs) {
}
// Initiate a clock synchronization every rti->clock_sync_period_ns
- // Initiate a clock synchronization every rti->clock_sync_period_ns
- struct timespec sleep_time = {(time_t)rti_remote->clock_sync_period_ns / BILLION,
- rti_remote->clock_sync_period_ns % BILLION};
- struct timespec remaining_time;
-
bool any_federates_connected = true;
while (any_federates_connected) {
// Sleep
@@ -1382,6 +1357,7 @@ static int receive_connection_information(int* socket_id, uint16_t fed_id) {
unsigned char* connections_info_body = NULL;
if (connections_info_body_size > 0) {
connections_info_body = (unsigned char*)malloc(connections_info_body_size);
+ LF_ASSERT_NON_NULL(connections_info_body);
read_from_socket_fail_on_error(socket_id, connections_info_body_size, connections_info_body, NULL,
"RTI failed to read MSG_TYPE_NEIGHBOR_STRUCTURE message body from federate %d.",
fed_id);
diff --git a/core/federated/clock-sync.c b/core/federated/clock-sync.c
index ddd845d6f..577d1104a 100644
--- a/core/federated/clock-sync.c
+++ b/core/federated/clock-sync.c
@@ -438,6 +438,7 @@ void handle_T4_clock_sync_message(unsigned char* buffer, int socket, instant_t r
* Thread that listens for UDP inputs from the RTI.
*/
void* listen_to_rti_UDP_thread(void* args) {
+ (void)args;
initialize_lf_thread_id();
// Listen for UDP messages from the RTI.
// The only expected messages are T1 and T4, which have
@@ -468,12 +469,12 @@ void* listen_to_rti_UDP_thread(void* args) {
if (bytes > 0) {
bytes_read += bytes;
}
- } while ((errno == EAGAIN || errno == EWOULDBLOCK) && bytes_read < message_size);
+ } while ((errno == EAGAIN || errno == EWOULDBLOCK) && bytes_read < (ssize_t)message_size);
// Get local physical time before doing anything else.
instant_t receive_time = lf_time_physical();
- if (bytes_read < message_size) {
+ if (bytes_read < (ssize_t)message_size) {
// Either the socket has closed or the RTI has sent EOF.
// Exit the thread to halt clock synchronization.
lf_print_error("Clock sync: UDP socket to RTI is broken: %s. Clock sync is now disabled.", strerror(errno));
@@ -533,9 +534,9 @@ void clock_sync_remove_offset(instant_t* t) { *t -= (_lf_clock_sync_offset + _lf
void clock_sync_set_constant_bias(interval_t offset) { _lf_clock_sync_constant_bias = offset; }
#else
-void clock_sync_apply_offset(instant_t* t) {}
-void clock_sync_remove_offset(instant_t* t) {}
-void clock_sync_set_constant_bias(interval_t offset) {}
+void clock_sync_apply_offset(instant_t* t) { (void)t; }
+void clock_sync_remove_offset(instant_t* t) { (void)t; }
+void clock_sync_set_constant_bias(interval_t offset) { (void)offset; }
#endif
/**
@@ -550,6 +551,8 @@ int create_clock_sync_thread(lf_thread_t* thread_id) {
#ifdef _LF_CLOCK_SYNC_ON
// One for UDP messages if clock synchronization is enabled for this federate
return lf_thread_create(thread_id, listen_to_rti_UDP_thread, NULL);
+#else
+ (void)thread_id;
#endif // _LF_CLOCK_SYNC_ON
return 0;
}
diff --git a/core/federated/federate.c b/core/federated/federate.c
index 6ba3e7cc9..a96f98598 100644
--- a/core/federated/federate.c
+++ b/core/federated/federate.c
@@ -87,11 +87,11 @@ federate_instance_t _fed = {.socket_TCP_RTI = -1,
.is_last_TAG_provisional = false,
.has_upstream = false,
.has_downstream = false,
- .last_skipped_LTC = (tag_t){.time = NEVER, .microstep = 0u},
- .last_DNET = (tag_t){.time = NEVER, .microstep = 0u},
+ .last_skipped_LTC = {.time = NEVER, .microstep = 0u},
+ .last_DNET = {.time = NEVER, .microstep = 0u},
.received_stop_request_from_rti = false,
- .last_sent_LTC = (tag_t){.time = NEVER, .microstep = 0u},
- .last_sent_NET = (tag_t){.time = NEVER, .microstep = 0u},
+ .last_sent_LTC = {.time = NEVER, .microstep = 0u},
+ .last_sent_NET = {.time = NEVER, .microstep = 0u},
.min_delay_from_physical_action_to_federate_output = NEVER};
federation_metadata_t federation_metadata = {
@@ -186,7 +186,7 @@ extern size_t staa_lst_size;
* @return A pointer to an action struct or null if the ID is out of range.
*/
static lf_action_base_t* action_for_port(int port_id) {
- if (port_id >= 0 && port_id < _lf_action_table_size) {
+ if (port_id >= 0 && ((size_t)port_id) < _lf_action_table_size) {
return _lf_action_table[port_id];
}
lf_print_error_and_exit("Invalid port ID: %d", port_id);
@@ -208,7 +208,7 @@ static lf_action_base_t* action_for_port(int port_id) {
static void update_last_known_status_on_input_ports(tag_t tag) {
LF_PRINT_DEBUG("In update_last_known_status_on_input ports.");
bool notify = false;
- for (int i = 0; i < _lf_action_table_size; i++) {
+ for (size_t i = 0; i < _lf_action_table_size; i++) {
lf_action_base_t* input_port_action = _lf_action_table[i];
// This is called when a TAG is received.
// But it is possible for an input port to have received already
@@ -217,7 +217,7 @@ static void update_last_known_status_on_input_ports(tag_t tag) {
// is in the future and should not be rolled back. So in that case,
// we do not update the last known status tag.
if (lf_tag_compare(tag, input_port_action->trigger->last_known_status_tag) >= 0) {
- LF_PRINT_DEBUG("Updating the last known status tag of port %d from " PRINTF_TAG " to " PRINTF_TAG ".", i,
+ LF_PRINT_DEBUG("Updating the last known status tag of port %zu from " PRINTF_TAG " to " PRINTF_TAG ".", i,
input_port_action->trigger->last_known_status_tag.time - lf_time_start(),
input_port_action->trigger->last_known_status_tag.microstep, tag.time - lf_time_start(),
tag.microstep);
@@ -438,7 +438,7 @@ static void close_inbound_socket(int fed_id, int flag) {
* @param intended_tag The intended tag.
*/
static bool handle_message_now(environment_t* env, trigger_t* trigger, tag_t intended_tag) {
- return trigger->reactions[0]->index >= max_level_allowed_to_advance &&
+ return trigger->reactions[0]->index >= ((index_t)max_level_allowed_to_advance) &&
lf_tag_compare(intended_tag, lf_tag(env)) == 0 && lf_tag_compare(intended_tag, trigger->last_tag) > 0 &&
lf_tag_compare(intended_tag, trigger->last_known_status_tag) > 0 && env->execution_started &&
!trigger->is_physical;
@@ -453,6 +453,7 @@ static bool handle_message_now(environment_t* env, trigger_t* trigger, tag_t int
* @return 0 for success, -1 for failure.
*/
static int handle_message(int* socket, int fed_id) {
+ (void)fed_id;
// Read the header.
size_t bytes_to_read = sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t);
unsigned char buffer[bytes_to_read];
@@ -1048,7 +1049,7 @@ static void handle_tag_advance_grant(void) {
*/
static bool a_port_is_unknown(staa_t* staa_elem) {
bool do_wait = false;
- for (int j = 0; j < staa_elem->num_actions; ++j) {
+ for (size_t j = 0; j < staa_elem->num_actions; ++j) {
if (staa_elem->actions[j]->trigger->status == unknown) {
do_wait = true;
break;
@@ -1056,20 +1057,21 @@ static bool a_port_is_unknown(staa_t* staa_elem) {
}
return do_wait;
}
-#endif
/**
* @brief Return the port ID of the port associated with the given action.
* @return The port ID or -1 if there is no match.
*/
static int id_of_action(lf_action_base_t* input_port_action) {
- for (int i = 0; i < _lf_action_table_size; i++) {
+ for (size_t i = 0; i < _lf_action_table_size; i++) {
if (_lf_action_table[i] == input_port_action)
return i;
}
return -1;
}
+#endif
+
/**
* @brief Thread handling setting the known absent status of input ports.
* For the code-generated array of staa offsets `staa_lst`, which is sorted by STAA offset,
@@ -1079,18 +1081,19 @@ static int id_of_action(lf_action_base_t* input_port_action) {
*/
#ifdef FEDERATED_DECENTRALIZED
static void* update_ports_from_staa_offsets(void* args) {
+ (void)args;
initialize_lf_thread_id();
if (staa_lst_size == 0)
return NULL; // Nothing to do.
// NOTE: Using only the top-level environment, which is the one that deals with network
// input ports.
environment_t* env;
- int num_envs = _lf_get_environments(&env);
+ _lf_get_environments(&env);
LF_MUTEX_LOCK(&env->mutex);
while (1) {
LF_PRINT_DEBUG("**** (update thread) starting");
tag_t tag_when_started_waiting = lf_tag(env);
- for (int i = 0; i < staa_lst_size; ++i) {
+ for (size_t i = 0; i < staa_lst_size; ++i) {
staa_t* staa_elem = staa_lst[i];
// The staa_elem is adjusted in the code generator to have subtracted the delay on the connection.
// The list is sorted in increasing order of adjusted STAA offsets.
@@ -1113,7 +1116,7 @@ static void* update_ports_from_staa_offsets(void* args) {
}
while (a_port_is_unknown(staa_elem)) {
LF_PRINT_DEBUG("**** (update thread) waiting until: " PRINTF_TIME, wait_until_time - lf_time_start());
- if (wait_until(env, wait_until_time, &lf_port_status_changed)) {
+ if (wait_until(wait_until_time, &lf_port_status_changed)) {
if (lf_tag_compare(lf_tag(env), tag_when_started_waiting) != 0) {
break;
}
@@ -1125,7 +1128,7 @@ static void* update_ports_from_staa_offsets(void* args) {
lf_time_start());
*/
- for (int j = 0; j < staa_elem->num_actions; ++j) {
+ for (size_t j = 0; j < staa_elem->num_actions; ++j) {
lf_action_base_t* input_port_action = staa_elem->actions[j];
if (input_port_action->trigger->status == unknown) {
input_port_action->trigger->status = absent;
@@ -1157,7 +1160,7 @@ static void* update_ports_from_staa_offsets(void* args) {
// it would be huge mistake to enter the wait for a new tag below because the
// program will freeze. First, check whether any ports are unknown:
bool port_unkonwn = false;
- for (int i = 0; i < staa_lst_size; ++i) {
+ for (size_t i = 0; i < staa_lst_size; ++i) {
staa_t* staa_elem = staa_lst[i];
if (a_port_is_unknown(staa_elem)) {
port_unkonwn = true;
@@ -1254,9 +1257,6 @@ static void handle_provisional_tag_advance_grant() {
// (which it should be). Do not do this if the federate has not fully
// started yet.
- instant_t dummy_event_time = PTAG.time;
- microstep_t dummy_event_relative_microstep = PTAG.microstep;
-
if (lf_tag_compare(env->current_tag, PTAG) == 0) {
// The current tag can equal the PTAG if we are at the start time
// or if this federate has been able to advance time to the current
@@ -1280,22 +1280,18 @@ static void handle_provisional_tag_advance_grant() {
// Nothing more to do.
LF_MUTEX_UNLOCK(&env->mutex);
return;
- } else if (PTAG.time == env->current_tag.time) {
- // We now know env->current_tag < PTAG, but the times are equal.
- // Adjust the microstep for scheduling the dummy event.
- dummy_event_relative_microstep -= env->current_tag.microstep;
}
// We now know env->current_tag < PTAG.
- if (dummy_event_time != FOREVER) {
- // Schedule a dummy event at the specified time and (relative) microstep.
+ if (PTAG.time != FOREVER) {
+ // Schedule a dummy event at the specified tag.
LF_PRINT_DEBUG("At tag " PRINTF_TAG ", inserting into the event queue a dummy event "
- "with time " PRINTF_TIME " and (relative) microstep " PRINTF_MICROSTEP ".",
- env->current_tag.time - start_time, env->current_tag.microstep, dummy_event_time - start_time,
- dummy_event_relative_microstep);
- // Dummy event points to a NULL trigger and NULL real event.
- event_t* dummy = _lf_create_dummy_events(env, NULL, dummy_event_time, NULL, dummy_event_relative_microstep);
- pqueue_insert(env->event_q, dummy);
+ "with time " PRINTF_TIME " and microstep " PRINTF_MICROSTEP ".",
+ env->current_tag.time - start_time, env->current_tag.microstep, PTAG.time - start_time,
+ PTAG.microstep);
+ // Dummy event points to a NULL trigger.
+ event_t* dummy = _lf_create_dummy_events(env, PTAG);
+ pqueue_tag_insert(env->event_q, (pqueue_tag_element_t*)dummy);
}
LF_MUTEX_UNLOCK(&env->mutex);
@@ -1450,32 +1446,7 @@ static void handle_downstream_next_event_tag() {
tag_t DNET = extract_tag(buffer);
// Trace the event when tracing is enabled
- tracepoint_federate_from_rti(_fed.trace, receive_DNET, _lf_my_fed_id, &DNET);
-
- LF_PRINT_LOG("Received Downstream Next Event Tag (DNET): " PRINTF_TAG ".", DNET.time - start_time, DNET.microstep);
-
- _fed.last_DNET = DNET;
-
- if (lf_tag_compare(_fed.last_skipped_LTC, NEVER_TAG) != 0 &&
- lf_tag_compare(_fed.last_skipped_LTC, _fed.last_DNET) >= 0) {
- send_tag(MSG_TYPE_LATEST_TAG_COMPLETE, _fed.last_skipped_LTC);
- _fed.last_skipped_LTC = NEVER_TAG;
- }
-}
-
-/**
- * Handle a downstream next event tag (DNET) message from the RTI.
- * FIXME: Use this tag to eliminate unncessary LTC or NET messages.
- */
-static void handle_downstream_next_event_tag() {
- size_t bytes_to_read = sizeof(instant_t) + sizeof(microstep_t);
- unsigned char buffer[bytes_to_read];
- read_from_socket_fail_on_error(&_fed.socket_TCP_RTI, bytes_to_read, buffer, NULL,
- "Failed to read downstream next event tag from RTI.");
- tag_t DNET = extract_tag(buffer);
-
- // Trace the event when tracing is enabled
- tracepoint_federate_from_rti(_fed.trace, receive_DNET, _lf_my_fed_id, &DNET);
+ tracepoint_federate_from_rti(receive_DNET, _lf_my_fed_id, &DNET);
LF_PRINT_LOG("Received Downstream Next Event Tag (DNET): " PRINTF_TAG ".", DNET.time - start_time, DNET.microstep);
@@ -1491,7 +1462,7 @@ static void handle_downstream_next_event_tag() {
/**
* Send a resign signal to the RTI.
*/
-static void send_resign_signal(environment_t* env) {
+static void send_resign_signal() {
size_t bytes_to_write = 1;
unsigned char buffer[bytes_to_write];
buffer[0] = MSG_TYPE_RESIGN;
@@ -1505,7 +1476,7 @@ static void send_resign_signal(environment_t* env) {
/**
* Send a failed signal to the RTI.
*/
-static void send_failed_signal(environment_t* env) {
+static void send_failed_signal() {
size_t bytes_to_write = 1;
unsigned char buffer[bytes_to_write];
buffer[0] = MSG_TYPE_FAILED;
@@ -1528,6 +1499,7 @@ static void handle_rti_failed_message(void) { exit(1); }
* @param args Ignored
*/
static void* listen_to_rti_TCP(void* args) {
+ (void)args;
initialize_lf_thread_id();
// Buffer for incoming messages.
// This does not constrain the message size
@@ -1678,10 +1650,10 @@ void lf_terminate_execution(environment_t* env) {
if (_fed.socket_TCP_RTI >= 0) {
if (_lf_normal_termination) {
tracepoint_federate_to_rti(send_RESIGN, _lf_my_fed_id, &env->current_tag);
- send_resign_signal(env);
+ send_resign_signal();
} else {
tracepoint_federate_to_rti(send_FAILED, _lf_my_fed_id, &env->current_tag);
- send_failed_signal(env);
+ send_failed_signal();
}
}
@@ -1711,7 +1683,7 @@ void lf_terminate_execution(environment_t* env) {
if (_fed.number_of_inbound_p2p_connections > 0 && _fed.inbound_socket_listeners != NULL) {
LF_PRINT_LOG("Waiting for %zu threads listening for incoming messages to exit.",
_fed.number_of_inbound_p2p_connections);
- for (int i = 0; i < _fed.number_of_inbound_p2p_connections; i++) {
+ for (size_t i = 0; i < _fed.number_of_inbound_p2p_connections; i++) {
// Ignoring errors here.
lf_thread_join(_fed.inbound_socket_listeners[i], NULL);
}
@@ -1736,7 +1708,6 @@ void lf_terminate_execution(environment_t* env) {
void lf_connect_to_federate(uint16_t remote_federate_id) {
int result = -1;
- int count_retries = 0;
// Ask the RTI for port number of the remote federate.
// The buffer is used for both sending and receiving replies.
@@ -1744,7 +1715,7 @@ void lf_connect_to_federate(uint16_t remote_federate_id) {
unsigned char buffer[sizeof(int32_t) + INET_ADDRSTRLEN + 1];
int port = -1;
struct in_addr host_ip_addr;
- int count_tries = 0;
+ instant_t start_connect = lf_time_physical();
while (port == -1 && !_lf_termination_executed) {
buffer[0] = MSG_TYPE_ADDRESS_QUERY;
// NOTE: Sending messages in little endian.
@@ -1782,7 +1753,7 @@ void lf_connect_to_federate(uint16_t remote_federate_id) {
// remote federate has not yet sent an MSG_TYPE_ADDRESS_ADVERTISEMENT message to the RTI.
// Sleep for some time before retrying.
if (port == -1) {
- if (count_tries++ >= CONNECT_MAX_RETRIES) {
+ if (CHECK_TIMEOUT(start_connect, CONNECT_TIMEOUT)) {
lf_print_error_and_exit("TIMEOUT obtaining IP/port for federate %d from the RTI.", remote_federate_id);
}
// Wait ADDRESS_QUERY_RETRY_INTERVAL nanoseconds.
@@ -1803,8 +1774,8 @@ void lf_connect_to_federate(uint16_t remote_federate_id) {
LF_PRINT_LOG("Received address %s port %d for federate %d from RTI.", hostname, uport, remote_federate_id);
#endif
- // Iterate until we either successfully connect or exceed the number of
- // attempts given by CONNECT_MAX_RETRIES.
+ // Iterate until we either successfully connect or we exceed the CONNECT_TIMEOUT
+ start_connect = lf_time_physical();
int socket_id = -1;
while (result < 0 && !_lf_termination_executed) {
// Create an IPv4 socket for TCP (not UDP) communication over IP (0).
@@ -1830,12 +1801,11 @@ void lf_connect_to_federate(uint16_t remote_federate_id) {
// Note that this should not really happen since the remote federate should be
// accepting socket connections. But possibly it will be busy (in process of accepting
// another socket connection?). Hence, we retry.
- count_retries++;
- if (count_retries > CONNECT_MAX_RETRIES) {
- // If the remote federate is not accepting the connection after CONNECT_MAX_RETRIES
+ if (CHECK_TIMEOUT(start_connect, CONNECT_TIMEOUT)) {
+ // If the remote federate is not accepting the connection after CONNECT_TIMEOUT
// treat it as a soft error condition and return.
- lf_print_error("Failed to connect to federate %d after %d retries. Giving up.", remote_federate_id,
- CONNECT_MAX_RETRIES);
+ lf_print_error("Failed to connect to federate %d with timeout: " PRINTF_TIME ". Giving up.", remote_federate_id,
+ CONNECT_TIMEOUT);
return;
}
lf_print_warning("Could not connect to federate %d. Will try again every" PRINTF_TIME "nanoseconds.\n",
@@ -1917,10 +1887,10 @@ void lf_connect_to_rti(const char* hostname, int port) {
_fed.socket_TCP_RTI = create_real_time_tcp_socket_errexit();
int result = -1;
- int count_retries = 0;
struct addrinfo* res = NULL;
- while (count_retries++ < CONNECT_MAX_RETRIES && !_lf_termination_executed) {
+ instant_t start_connect = lf_time_physical();
+ while (!CHECK_TIMEOUT(start_connect, CONNECT_TIMEOUT) && !_lf_termination_executed) {
if (res != NULL) {
// This is a repeated attempt.
if (_fed.socket_TCP_RTI >= 0)
@@ -1942,7 +1912,7 @@ void lf_connect_to_rti(const char* hostname, int port) {
// Reconstruct the address info.
rti_address(hostname, uport, &res);
}
- lf_print("Trying RTI again on port %d (attempt %d).", uport, count_retries);
+ lf_print("Trying RTI again on port %d.", uport);
} else {
// This is the first attempt.
rti_address(hostname, uport, &res);
@@ -2033,7 +2003,7 @@ void lf_connect_to_rti(const char* hostname, int port) {
}
}
if (result < 0) {
- lf_print_error_and_exit("Failed to connect to RTI after %d tries.", CONNECT_MAX_RETRIES);
+ lf_print_error_and_exit("Failed to connect to RTI with timeout: " PRINTF_TIME, CONNECT_TIMEOUT);
}
freeaddrinfo(res); /* No longer needed */
@@ -2137,7 +2107,7 @@ void lf_enqueue_port_absent_reactions(environment_t* env) {
LF_PRINT_DEBUG("No port absent reactions.");
return;
}
- for (int i = 0; i < num_port_absent_reactions; i++) {
+ for (size_t i = 0; i < num_port_absent_reactions; i++) {
reaction_t* reaction = port_absent_reaction[i];
if (reaction && reaction->status == inactive) {
LF_PRINT_DEBUG("Inserting port absent reaction on reaction queue.");
@@ -2147,9 +2117,8 @@ void lf_enqueue_port_absent_reactions(environment_t* env) {
}
void* lf_handle_p2p_connections_from_federates(void* env_arg) {
- assert(env_arg);
- environment_t* env = (environment_t*)env_arg;
- int received_federates = 0;
+ LF_ASSERT_NON_NULL(env_arg);
+ size_t received_federates = 0;
// Allocate memory to store thread IDs.
_fed.inbound_socket_listeners = (lf_thread_t*)calloc(_fed.number_of_inbound_p2p_connections, sizeof(lf_thread_t));
while (received_federates < _fed.number_of_inbound_p2p_connections && !_lf_termination_executed) {
@@ -2280,7 +2249,6 @@ void lf_latest_tag_complete(tag_t tag_to_send) {
}
parse_rti_code_t lf_parse_rti_addr(const char* rti_addr) {
- bool has_host = false, has_port = false, has_user = false;
rti_addr_info_t rti_addr_info = {0};
extract_rti_addr_info(rti_addr, &rti_addr_info);
if (!rti_addr_info.has_host && !rti_addr_info.has_port && !rti_addr_info.has_user) {
@@ -2288,8 +2256,8 @@ parse_rti_code_t lf_parse_rti_addr(const char* rti_addr) {
}
if (rti_addr_info.has_host) {
if (validate_host(rti_addr_info.rti_host_str)) {
- char* rti_host = (char*)calloc(256, sizeof(char));
- strncpy(rti_host, rti_addr_info.rti_host_str, 255);
+ char* rti_host = (char*)calloc(257, sizeof(char));
+ strncpy(rti_host, rti_addr_info.rti_host_str, 256);
federation_metadata.rti_host = rti_host;
} else {
return INVALID_HOST;
@@ -2304,8 +2272,8 @@ parse_rti_code_t lf_parse_rti_addr(const char* rti_addr) {
}
if (rti_addr_info.has_user) {
if (validate_user(rti_addr_info.rti_user_str)) {
- char* rti_user = (char*)calloc(256, sizeof(char));
- strncpy(rti_user, rti_addr_info.rti_user_str, 255);
+ char* rti_user = (char*)calloc(257, sizeof(char));
+ strncpy(rti_user, rti_addr_info.rti_user_str, 256);
federation_metadata.rti_user = rti_user;
} else {
return INVALID_USER;
@@ -2318,7 +2286,7 @@ void lf_reset_status_fields_on_input_port_triggers() {
environment_t* env;
_lf_get_environments(&env);
tag_t now = lf_tag(env);
- for (int i = 0; i < _lf_action_table_size; i++) {
+ for (size_t i = 0; i < _lf_action_table_size; i++) {
if (lf_tag_compare(_lf_action_table[i]->trigger->last_known_status_tag, now) >= 0) {
set_network_port_status(i, absent); // Default may be overriden to become present.
} else {
@@ -2475,8 +2443,9 @@ tag_t lf_send_next_event_tag(environment_t* env, tag_t tag, bool wait_for_reply)
// Create a dummy event that will force this federate to advance time and subsequently
// enable progress for downstream federates. Increment the time by ADVANCE_MESSAGE_INTERVAL
// to prevent too frequent dummy events.
- event_t* dummy = _lf_create_dummy_events(env, NULL, tag.time + ADVANCE_MESSAGE_INTERVAL, NULL, 0);
- pqueue_insert(env->event_q, dummy);
+ tag_t dummy_event_tag = (tag_t){.time = tag.time + ADVANCE_MESSAGE_INTERVAL, .microstep = tag.microstep};
+ event_t* dummy = _lf_create_dummy_events(env, dummy_event_tag);
+ pqueue_tag_insert(env->event_q, (pqueue_tag_element_t*)dummy);
}
LF_PRINT_DEBUG("Inserted a dummy event for logical time " PRINTF_TIME ".", tag.time - lf_time_start());
@@ -2654,11 +2623,10 @@ int lf_send_tagged_message(environment_t* env, interval_t additional_delay, int
int* socket;
if (message_type == MSG_TYPE_P2P_TAGGED_MESSAGE) {
socket = &_fed.sockets_for_outbound_p2p_connections[federate];
- tracepoint_federate_to_federate(_fed.trace, send_P2P_TAGGED_MSG, _lf_my_fed_id, federate,
- ¤t_message_intended_tag);
+ tracepoint_federate_to_federate(send_P2P_TAGGED_MSG, _lf_my_fed_id, federate, ¤t_message_intended_tag);
} else {
socket = &_fed.socket_TCP_RTI;
- tracepoint_federate_to_rti(_fed.trace, send_TAGGED_MSG, _lf_my_fed_id, ¤t_message_intended_tag);
+ tracepoint_federate_to_rti(send_TAGGED_MSG, _lf_my_fed_id, ¤t_message_intended_tag);
}
if (lf_tag_compare(_fed.last_DNET, current_message_intended_tag) > 0) {
@@ -2727,6 +2695,8 @@ bool lf_update_max_level(tag_t tag, bool is_provisional) {
int prev_max_level_allowed_to_advance = max_level_allowed_to_advance;
max_level_allowed_to_advance = INT_MAX;
#ifdef FEDERATED_DECENTRALIZED
+ (void)tag;
+ (void)is_provisional;
size_t action_table_size = _lf_action_table_size;
lf_action_base_t** action_table = _lf_action_table;
#else
@@ -2742,7 +2712,7 @@ bool lf_update_max_level(tag_t tag, bool is_provisional) {
size_t action_table_size = _lf_zero_delay_cycle_action_table_size;
lf_action_base_t** action_table = _lf_zero_delay_cycle_action_table;
#endif // FEDERATED_DECENTRALIZED
- for (int i = 0; i < action_table_size; i++) {
+ for (size_t i = 0; i < action_table_size; i++) {
lf_action_base_t* input_port_action = action_table[i];
#ifdef FEDERATED_DECENTRALIZED
// In decentralized execution, if the current_tag is close enough to the
diff --git a/core/federated/network/net_util.c b/core/federated/network/net_util.c
index c6b3b57a6..3ab04c5a2 100644
--- a/core/federated/network/net_util.c
+++ b/core/federated/network/net_util.c
@@ -165,7 +165,6 @@ int write_to_socket(int socket, size_t num_bytes, unsigned char* buffer) {
return -1;
}
ssize_t bytes_written = 0;
- va_list args;
while (bytes_written < (ssize_t)num_bytes) {
ssize_t more = write(socket, buffer + bytes_written, num_bytes - (size_t)bytes_written);
if (more <= 0 && (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR)) {
@@ -414,10 +413,6 @@ void extract_header(unsigned char* buffer, uint16_t* port_id, uint16_t* federate
// The next four bytes are the message length.
uint32_t local_length_signed = extract_uint32(&(buffer[sizeof(uint16_t) + sizeof(uint16_t)]));
- if (local_length_signed < 0) {
- lf_print_error_and_exit("Received an invalid message length (%d) from federate %d.", local_length_signed,
- *federate_id);
- }
*length = (size_t)local_length_signed;
// printf("DEBUG: Federate receiving message to port %d to federate %d of length %d.\n", port_id, federate_id,
@@ -492,7 +487,7 @@ bool validate_user(const char* user) {
return match_regex(user, username_regex);
}
-bool extract_match_group(const char* rti_addr, char* dest, regmatch_t group, int max_len, int min_len,
+bool extract_match_group(const char* rti_addr, char* dest, regmatch_t group, size_t max_len, size_t min_len,
const char* err_msg) {
size_t size = group.rm_eo - group.rm_so;
if (size > max_len || size < min_len) {
@@ -505,7 +500,7 @@ bool extract_match_group(const char* rti_addr, char* dest, regmatch_t group, int
}
bool extract_match_groups(const char* rti_addr, char** rti_addr_strs, bool** rti_addr_flags, regmatch_t* group_array,
- int* gids, int* max_lens, int* min_lens, const char** err_msgs) {
+ int* gids, size_t* max_lens, size_t* min_lens, const char** err_msgs) {
for (int i = 0; i < 3; i++) {
if (group_array[gids[i]].rm_so != -1) {
if (!extract_match_group(rti_addr, rti_addr_strs[i], group_array[gids[i]], max_lens[i], min_lens[i],
@@ -527,8 +522,8 @@ void extract_rti_addr_info(const char* rti_addr, rti_addr_info_t* rti_addr_info)
int gids[3] = {user_gid, host_gid, port_gid};
char* rti_addr_strs[3] = {rti_addr_info->rti_user_str, rti_addr_info->rti_host_str, rti_addr_info->rti_port_str};
bool* rti_addr_flags[3] = {&rti_addr_info->has_user, &rti_addr_info->has_host, &rti_addr_info->has_port};
- int max_lens[3] = {255, 255, 5};
- int min_lens[3] = {1, 1, 1};
+ size_t max_lens[3] = {255, 255, 5};
+ size_t min_lens[3] = {1, 1, 1};
const char* err_msgs[3] = {"User name must be between 1 to 255 characters long.",
"Host must be between 1 to 255 characters long.",
"Port must be between 1 to 5 characters long."};
@@ -543,7 +538,7 @@ void extract_rti_addr_info(const char* rti_addr, rti_addr_info_t* rti_addr_info)
if (regexec(®ex_compiled, rti_addr, max_groups, group_array, 0) == 0) {
// Check for matched username. group_array[0] is the entire matched string.
- for (int i = 1; i < max_groups; i++) {
+ for (size_t i = 1; i < max_groups; i++) {
// Annoyingly, the rm_so and rm_eo fields are long long on some platforms and int on others.
// To suppress warnings, cast to long long
LF_PRINT_DEBUG("runtime rti_addr regex: so: %lld eo: %lld\n", (long long)group_array[i].rm_so,
diff --git a/core/lf_token.c b/core/lf_token.c
index 4e13b9e6a..48913f85b 100644
--- a/core/lf_token.c
+++ b/core/lf_token.c
@@ -76,7 +76,7 @@ static lf_token_t* _lf_writable_copy_locked(lf_port_base_t* port) {
lf_token_t* token = port->tmplt.token;
if (token == NULL)
return NULL;
- LF_PRINT_DEBUG("lf_writable_copy: Requesting writable copy of token %p with reference count %zu.", token,
+ LF_PRINT_DEBUG("lf_writable_copy: Requesting writable copy of token %p with reference count %zu.", (void*)token,
token->ref_count);
if (port->num_destinations == 1 && token->ref_count == 1) {
LF_PRINT_DEBUG("lf_writable_copy: Avoided copy because there "
@@ -170,13 +170,13 @@ token_freed _lf_free_token(lf_token_t* token) {
}
if (hashset_num_items(_lf_token_recycling_bin) < _LF_TOKEN_RECYCLING_BIN_SIZE_LIMIT) {
// Recycle instead of freeing.
- LF_PRINT_DEBUG("_lf_free_token: Putting token on the recycling bin: %p", token);
+ LF_PRINT_DEBUG("_lf_free_token: Putting token on the recycling bin: %p", (void*)token);
if (!hashset_add(_lf_token_recycling_bin, token)) {
- lf_print_warning("Putting token %p on the recycling bin, but it is already there!", token);
+ lf_print_warning("Putting token %p on the recycling bin, but it is already there!", (void*)token);
}
} else {
// Recycling bin is full.
- LF_PRINT_DEBUG("_lf_free_token: Freeing allocated memory for token: %p", token);
+ LF_PRINT_DEBUG("_lf_free_token: Freeing allocated memory for token: %p", (void*)token);
free(token);
}
#if !defined NDEBUG
@@ -197,7 +197,7 @@ lf_token_t* _lf_new_token(token_type_t* type, void* value, size_t length) {
if (hashset_iterator_next(iterator) >= 0) {
result = hashset_iterator_value(iterator);
hashset_remove(_lf_token_recycling_bin, result);
- LF_PRINT_DEBUG("_lf_new_token: Retrieved token from the recycling bin: %p", result);
+ LF_PRINT_DEBUG("_lf_new_token: Retrieved token from the recycling bin: %p", (void*)result);
}
free(iterator);
}
@@ -212,7 +212,7 @@ lf_token_t* _lf_new_token(token_type_t* type, void* value, size_t length) {
if (result == NULL) {
// Nothing found on the recycle bin.
result = (lf_token_t*)calloc(1, sizeof(lf_token_t));
- LF_PRINT_DEBUG("_lf_new_token: Allocated memory for token: %p", result);
+ LF_PRINT_DEBUG("_lf_new_token: Allocated memory for token: %p", (void*)result);
}
result->type = type;
result->length = length;
@@ -224,7 +224,7 @@ lf_token_t* _lf_new_token(token_type_t* type, void* value, size_t length) {
lf_token_t* _lf_get_token(token_template_t* tmplt) {
if (tmplt->token != NULL) {
if (tmplt->token->ref_count == 1) {
- LF_PRINT_DEBUG("_lf_get_token: Reusing template token: %p with ref_count %zu", tmplt->token,
+ LF_PRINT_DEBUG("_lf_get_token: Reusing template token: %p with ref_count %zu", (void*)tmplt->token,
tmplt->token->ref_count);
// Free any previous value in the token.
_lf_free_token_value(tmplt->token);
@@ -268,7 +268,8 @@ void _lf_initialize_template(token_template_t* tmplt, size_t element_size) {
lf_token_t* _lf_initialize_token_with_value(token_template_t* tmplt, void* value, size_t length) {
assert(tmplt != NULL);
- LF_PRINT_DEBUG("_lf_initialize_token_with_value: template %p, value %p", tmplt, value);
+ LF_PRINT_DEBUG("_lf_initialize_token_with_value: template %p, value %p", (void*)tmplt, value);
+
lf_token_t* result = _lf_get_token(tmplt);
result->value = value;
// Count allocations to issue a warning if this is never freed.
@@ -322,14 +323,15 @@ void _lf_free_all_tokens() {
void _lf_replace_template_token(token_template_t* tmplt, lf_token_t* newtoken) {
assert(tmplt != NULL);
- LF_PRINT_DEBUG("_lf_replace_template_token: template: %p newtoken: %p.", tmplt, newtoken);
+ LF_PRINT_DEBUG("_lf_replace_template_token: template: %p newtoken: %p.", (void*)tmplt, (void*)newtoken);
if (tmplt->token != newtoken) {
if (tmplt->token != NULL) {
_lf_done_using(tmplt->token);
}
if (newtoken != NULL) {
newtoken->ref_count++;
- LF_PRINT_DEBUG("_lf_replace_template_token: Incremented ref_count of %p to %zu.", newtoken, newtoken->ref_count);
+ LF_PRINT_DEBUG("_lf_replace_template_token: Incremented ref_count of %p to %zu.", (void*)newtoken,
+ newtoken->ref_count);
}
tmplt->token = newtoken;
}
@@ -339,16 +341,16 @@ token_freed _lf_done_using(lf_token_t* token) {
if (token == NULL) {
return NOT_FREED;
}
- LF_PRINT_DEBUG("_lf_done_using: token = %p, ref_count = %zu.", token, token->ref_count);
+ LF_PRINT_DEBUG("_lf_done_using: token = %p, ref_count = %zu.", (void*)token, token->ref_count);
if (token->ref_count == 0) {
- lf_print_warning("Token being freed that has already been freed: %p", token);
+ lf_print_warning("Token being freed that has already been freed: %p", (void*)token);
return NOT_FREED;
}
token->ref_count--;
return _lf_free_token(token);
}
-void _lf_free_token_copies(struct environment_t* env) {
+void _lf_free_token_copies() {
while (_lf_tokens_allocated_in_reactions != NULL) {
lf_token_t* next = _lf_tokens_allocated_in_reactions->next;
_lf_done_using(_lf_tokens_allocated_in_reactions);
diff --git a/core/lf_utils.cmake b/core/lf_utils.cmake
new file mode 100644
index 000000000..5b6b35921
--- /dev/null
+++ b/core/lf_utils.cmake
@@ -0,0 +1,11 @@
+function(lf_enable_compiler_warnings target)
+ if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux")
+ target_compile_options(${target} PRIVATE -Wall -Wextra -Wpedantic -Werror)
+ elseif(${CMAKE_SYSTEM_NAME} STREQUAL "Windows")
+ target_compile_options(${target} PRIVATE /W4)
+ elseif(${CMAKE_SYSTEM_NAME} STREQUAL "Darwin")
+ target_compile_options(${target} PRIVATE -Wall)
+ else()
+ target_compile_options(${target} PRIVATE -Wall)
+ endif()
+endfunction()
\ No newline at end of file
diff --git a/core/modal_models/modes.c b/core/modal_models/modes.c
index e6e6f5d95..922d02f9e 100644
--- a/core/modal_models/modes.c
+++ b/core/modal_models/modes.c
@@ -56,8 +56,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Forward declaration of functions and variables supplied by reactor_common.c
void _lf_trigger_reaction(environment_t* env, reaction_t* reaction, int worker_number);
-event_t* _lf_create_dummy_events(environment_t* env, trigger_t* trigger, instant_t time, event_t* next,
- microstep_t offset);
+event_t* _lf_create_dummy_events(environment_t* env, tag_t tag);
// ----------------------------------------------------------------------------
@@ -400,28 +399,17 @@ void _lf_process_mode_changes(environment_t* env, reactor_mode_state_t* states[]
event->trigger != NULL) { // History transition to a different mode
// Remaining time that the event would have been waiting before mode was left
instant_t local_remaining_delay =
- event->time -
+ event->base.tag.time -
(state->next_mode->deactivation_time != 0 ? state->next_mode->deactivation_time : lf_time_start());
tag_t current_logical_tag = env->current_tag;
// Reschedule event with original local delay
LF_PRINT_DEBUG("Modes: Re-enqueuing event with a suspended delay of " PRINTF_TIME
" (previous TTH: " PRINTF_TIME ", Mode suspended at: " PRINTF_TIME ").",
- local_remaining_delay, event->time, state->next_mode->deactivation_time);
+ local_remaining_delay, event->base.tag.time, state->next_mode->deactivation_time);
tag_t schedule_tag = {.time = current_logical_tag.time + local_remaining_delay,
.microstep = (local_remaining_delay == 0 ? current_logical_tag.microstep + 1 : 0)};
_lf_schedule_at_tag(env, event->trigger, schedule_tag, event->token);
-
- // Also schedule events stacked up in super dense time.
- event_t* e = event;
- while (e->next != NULL) {
- schedule_tag.microstep++;
- _lf_schedule_at_tag(env, e->next->trigger, schedule_tag, e->next->token);
- event_t* tmp = e->next;
- e = tmp->next;
- // A fresh event was created by schedule, hence, recycle old one
- lf_recycle_event(env, tmp);
- }
}
// A fresh event was created by schedule, hence, recycle old one
lf_recycle_event(env, event);
@@ -490,7 +478,7 @@ void _lf_process_mode_changes(environment_t* env, reactor_mode_state_t* states[]
// Retract all events from the event queue that are associated with now inactive modes
if (env->event_q != NULL) {
- size_t q_size = pqueue_size(env->event_q);
+ size_t q_size = pqueue_tag_size(env->event_q);
if (q_size > 0) {
event_t** delayed_removal = (event_t**)calloc(q_size, sizeof(event_t*));
size_t delayed_removal_count = 0;
@@ -509,7 +497,7 @@ void _lf_process_mode_changes(environment_t* env, reactor_mode_state_t* states[]
LF_PRINT_DEBUG("Modes: Pulling %zu events from the event queue to suspend them. %d events are now suspended.",
delayed_removal_count, _lf_suspended_events_num);
for (size_t i = 0; i < delayed_removal_count; i++) {
- pqueue_remove(env->event_q, delayed_removal[i]);
+ pqueue_tag_remove(env->event_q, (pqueue_tag_element_t*)(delayed_removal[i]));
}
free(delayed_removal);
@@ -519,7 +507,8 @@ void _lf_process_mode_changes(environment_t* env, reactor_mode_state_t* states[]
if (env->modes->triggered_reactions_request) {
// Insert a dummy event in the event queue for the next microstep to make
// sure startup/reset reactions (if any) are triggered as soon as possible.
- pqueue_insert(env->event_q, _lf_create_dummy_events(env, NULL, env->current_tag.time, NULL, 1));
+ tag_t dummy_event_tag = (tag_t){.time = env->current_tag.time, .microstep = 1};
+ pqueue_tag_insert(env->event_q, (pqueue_tag_element_t*)_lf_create_dummy_events(env, dummy_event_tag));
}
}
}
diff --git a/core/reactor.c b/core/reactor.c
index 8bf3b9460..00df9e07f 100644
--- a/core/reactor.c
+++ b/core/reactor.c
@@ -27,9 +27,18 @@
extern instant_t start_time;
int lf_thread_id() { return 0; }
-int lf_mutex_unlock(lf_mutex_t* mutex) { return 0; }
-int lf_mutex_init(lf_mutex_t* mutex) { return 0; }
-int lf_mutex_lock(lf_mutex_t* mutex) { return 0; }
+int lf_mutex_unlock(lf_mutex_t* mutex) {
+ (void)mutex;
+ return 0;
+}
+int lf_mutex_init(lf_mutex_t* mutex) {
+ (void)mutex;
+ return 0;
+}
+int lf_mutex_lock(lf_mutex_t* mutex) {
+ (void)mutex;
+ return 0;
+}
// Defined in reactor_common.c:
extern bool fast;
@@ -83,11 +92,13 @@ void lf_print_snapshot(environment_t* env) {
}
#else // NDEBUG
void lf_print_snapshot(environment_t* env) {
+ (void)env;
// Do nothing.
}
#endif // NDEBUG
void _lf_trigger_reaction(environment_t* env, reaction_t* reaction, int worker_number) {
+ (void)worker_number;
assert(env != GLOBAL_ENVIRONMENT);
#ifdef MODAL_REACTORS
@@ -209,7 +220,7 @@ int next(environment_t* env) {
// Enter the critical section and do not leave until we have
// determined which tag to commit to and start invoking reactions for.
LF_CRITICAL_SECTION_ENTER(env);
- event_t* event = (event_t*)pqueue_peek(env->event_q);
+ event_t* event = (event_t*)pqueue_tag_peek(env->event_q);
// pqueue_dump(event_q, event_q->prt);
// If there is no next event and -keepalive has been specified
// on the command line, then we will wait the maximum time possible.
@@ -220,13 +231,7 @@ int next(environment_t* env) {
lf_set_stop_tag(env, (tag_t){.time = env->current_tag.time, .microstep = env->current_tag.microstep + 1});
}
} else {
- next_tag.time = event->time;
- // Deduce the microstep
- if (next_tag.time == env->current_tag.time) {
- next_tag.microstep = env->current_tag.microstep + 1;
- } else {
- next_tag.microstep = 0;
- }
+ next_tag = event->base.tag;
}
if (lf_is_tag_after_stop_tag(env, next_tag)) {
@@ -234,10 +239,10 @@ int next(environment_t* env) {
next_tag = env->stop_tag;
}
- LF_PRINT_LOG("Next event (elapsed) time is " PRINTF_TIME ".", next_tag.time - start_time);
+ LF_PRINT_LOG("Next event (elapsed) tag is " PRINTF_TAG ".", next_tag.time - start_time, next_tag.microstep);
// Wait until physical time >= event.time.
int finished_sleep = wait_until(env, next_tag.time);
- LF_PRINT_LOG("Next event (elapsed) time is " PRINTF_TIME ".", next_tag.time - start_time);
+ LF_PRINT_LOG("Next event (elapsed) tag is " PRINTF_TAG ".", next_tag.time - start_time, next_tag.microstep);
if (finished_sleep != 0) {
LF_PRINT_DEBUG("***** wait_until was interrupted.");
// Sleep was interrupted. This could happen when a physical action
@@ -247,10 +252,10 @@ int next(environment_t* env) {
LF_CRITICAL_SECTION_EXIT(env);
return 1;
}
- // Advance current time to match that of the first event on the queue.
+ // Advance current tag to match that of the first event on the queue.
// We can now leave the critical section. Any events that will be added
// to the queue asynchronously will have a later tag than the current one.
- _lf_advance_logical_time(env, next_tag.time);
+ _lf_advance_tag(env, next_tag);
// Trigger shutdown reactions if appropriate.
if (lf_tag_compare(env->current_tag, env->stop_tag) >= 0) {
@@ -273,8 +278,7 @@ int next(environment_t* env) {
void lf_request_stop(void) {
// There is only one enclave, so get its environment.
environment_t* env;
- int num_environments = _lf_get_environments(&env);
- assert(num_environments == 1);
+ _lf_get_environments(&env);
tag_t new_stop_tag;
new_stop_tag.time = env->current_tag.time;
@@ -368,9 +372,18 @@ int lf_reactor_c_main(int argc, const char* argv[]) {
* @brief Notify of new event by calling the single-threaded platform API
* @param env Environment in which we are executing.
*/
-int lf_notify_of_event(environment_t* env) { return _lf_single_threaded_notify_of_event(); }
+int lf_notify_of_event(environment_t* env) {
+ (void)env;
+ return _lf_single_threaded_notify_of_event();
+}
-int lf_critical_section_enter(environment_t* env) { return lf_disable_interrupts_nested(); }
+int lf_critical_section_enter(environment_t* env) {
+ (void)env;
+ return lf_disable_interrupts_nested();
+}
-int lf_critical_section_exit(environment_t* env) { return lf_enable_interrupts_nested(); }
+int lf_critical_section_exit(environment_t* env) {
+ (void)env;
+ return lf_enable_interrupts_nested();
+}
#endif
diff --git a/core/reactor_common.c b/core/reactor_common.c
index 6fc3d3824..33e5582f5 100644
--- a/core/reactor_common.c
+++ b/core/reactor_common.c
@@ -119,7 +119,7 @@ void lf_free(struct allocation_record_t** head) {
LF_PRINT_DEBUG("Freeing memory at %p", record->allocated);
free(record->allocated);
struct allocation_record_t* tmp = record->next;
- LF_PRINT_DEBUG("Freeing allocation record at %p", record);
+ LF_PRINT_DEBUG("Freeing allocation record at %p", (void*)record);
free(record);
record = tmp;
}
@@ -171,7 +171,7 @@ void _lf_start_time_step(environment_t* env) {
LF_PRINT_LOG("--------- Start time step at tag " PRINTF_TAG ".", env->current_tag.time - start_time,
env->current_tag.microstep);
// Handle dynamically created tokens for mutable inputs.
- _lf_free_token_copies(env);
+ _lf_free_token_copies();
bool** is_present_fields = env->is_present_fields_abbreviated;
int size = env->is_present_fields_abbreviated_size;
@@ -199,7 +199,7 @@ void _lf_start_time_step(environment_t* env) {
#ifdef FEDERATED
// If the environment is the top-level one, we have some work to do.
environment_t* envs;
- int num_envs = _lf_get_environments(&envs);
+ _lf_get_environments(&envs);
if (env == envs) {
// This is the top-level environment.
@@ -230,19 +230,15 @@ void _lf_pop_events(environment_t* env) {
_lf_handle_mode_triggered_reactions(env);
#endif
- event_t* event = (event_t*)pqueue_peek(env->event_q);
- while (event != NULL && event->time == env->current_tag.time) {
- event = (event_t*)pqueue_pop(env->event_q);
+ event_t* event = (event_t*)pqueue_tag_peek(env->event_q);
+ while (event != NULL && lf_tag_compare(event->base.tag, env->current_tag) == 0) {
+ event = (event_t*)pqueue_tag_pop(env->event_q);
- if (event->is_dummy) {
+ if (event->trigger == NULL) {
LF_PRINT_DEBUG("Popped dummy event from the event queue.");
- if (event->next != NULL) {
- LF_PRINT_DEBUG("Putting event from the event queue for the next microstep.");
- pqueue_insert(env->next_q, event->next);
- }
lf_recycle_event(env, event);
// Peek at the next event in the event queue.
- event = (event_t*)pqueue_peek(env->event_q);
+ event = (event_t*)pqueue_tag_peek(env->event_q);
continue;
}
@@ -278,7 +274,7 @@ void _lf_pop_events(environment_t* env) {
reaction->is_STP_violated = true;
LF_PRINT_LOG("Trigger %p has violated the reaction's STP offset. Intended tag: " PRINTF_TAG
". Current tag: " PRINTF_TAG,
- event->trigger, event->intended_tag.time - start_time, event->intended_tag.microstep,
+ (void*)event->trigger, event->intended_tag.time - start_time, event->intended_tag.microstep,
env->current_tag.time - start_time, env->current_tag.microstep);
// Need to update the last_known_status_tag of the port because otherwise,
// the MLAA could get stuck, causing the program to lock up.
@@ -328,31 +324,17 @@ void _lf_pop_events(environment_t* env) {
// Mark the trigger present.
event->trigger->status = present;
- // If this event points to a next event, insert it into the next queue.
- if (event->next != NULL) {
- // Insert the next event into the next queue.
- pqueue_insert(env->next_q, event->next);
- }
-
lf_recycle_event(env, event);
// Peek at the next event in the event queue.
- event = (event_t*)pqueue_peek(env->event_q);
+ event = (event_t*)pqueue_tag_peek(env->event_q);
};
-
- LF_PRINT_DEBUG("There are %zu events deferred to the next microstep.", pqueue_size(env->next_q));
-
- // After populating the reaction queue, see if there are things on the
- // next queue to put back into the event queue.
- while (pqueue_peek(env->next_q) != NULL) {
- pqueue_insert(env->event_q, pqueue_pop(env->next_q));
- }
}
event_t* lf_get_new_event(environment_t* env) {
assert(env != GLOBAL_ENVIRONMENT);
// Recycle event_t structs, if possible.
- event_t* e = (event_t*)pqueue_pop(env->recycle_q);
+ event_t* e = (event_t*)pqueue_tag_pop(env->recycle_q);
if (e == NULL) {
e = (event_t*)calloc(1, sizeof(struct event_t));
if (e == NULL)
@@ -376,7 +358,7 @@ void _lf_initialize_timer(environment_t* env, trigger_t* timer) {
// && (timer->offset != 0 || timer->period != 0)) {
event_t* e = lf_get_new_event(env);
e->trigger = timer;
- e->time = lf_time_logical(env) + timer->offset;
+ e->base.tag = (tag_t){.time = lf_time_logical(env) + timer->offset, .microstep = 0};
_lf_add_suspended_event(e);
return;
}
@@ -401,9 +383,9 @@ void _lf_initialize_timer(environment_t* env, trigger_t* timer) {
// Recycle event_t structs, if possible.
event_t* e = lf_get_new_event(env);
e->trigger = timer;
- e->time = lf_time_logical(env) + delay;
+ e->base.tag = (tag_t){.time = lf_time_logical(env) + delay, .microstep = 0};
// NOTE: No lock is being held. Assuming this only happens at startup.
- pqueue_insert(env->event_q, e);
+ pqueue_tag_insert(env->event_q, (pqueue_tag_element_t*)e);
tracepoint_schedule(env, timer, delay); // Trace even though schedule is not called.
}
@@ -462,38 +444,21 @@ void _lf_trigger_shutdown_reactions(environment_t* env) {
void lf_recycle_event(environment_t* env, event_t* e) {
assert(env != GLOBAL_ENVIRONMENT);
- e->time = 0LL;
+ e->base.tag = (tag_t){.time = 0LL, .microstep = 0};
e->trigger = NULL;
- e->pos = 0;
e->token = NULL;
- e->is_dummy = false;
#ifdef FEDERATED_DECENTRALIZED
e->intended_tag = (tag_t){.time = NEVER, .microstep = 0u};
#endif
- e->next = NULL;
- pqueue_insert(env->recycle_q, e);
+ pqueue_tag_insert(env->recycle_q, (pqueue_tag_element_t*)e);
}
-event_t* _lf_create_dummy_events(environment_t* env, trigger_t* trigger, instant_t time, event_t* next,
- microstep_t offset) {
- event_t* first_dummy = lf_get_new_event(env);
- event_t* dummy = first_dummy;
- dummy->time = time;
- dummy->is_dummy = true;
- dummy->trigger = trigger;
- while (offset > 0) {
- if (offset == 1) {
- dummy->next = next;
- break;
- }
- dummy->next = lf_get_new_event(env);
- dummy = dummy->next;
- dummy->time = time;
- dummy->is_dummy = true;
- dummy->trigger = trigger;
- offset--;
- }
- return first_dummy;
+event_t* _lf_create_dummy_events(environment_t* env, tag_t tag) {
+ event_t* dummy = lf_get_new_event(env);
+ dummy->base.tag = tag;
+
+ dummy->trigger = NULL;
+ return dummy;
}
void lf_replace_token(event_t* event, lf_token_t* token) {
@@ -519,7 +484,7 @@ trigger_handle_t _lf_schedule_at_tag(environment_t* env, trigger_t* trigger, tag
// Increment the reference count of the token.
if (token != NULL) {
token->ref_count++;
- LF_PRINT_DEBUG("_lf_schedule_at_tag: Incremented ref_count of %p to %zu.", token, token->ref_count);
+ LF_PRINT_DEBUG("_lf_schedule_at_tag: Incremented ref_count of %p to %zu.", (void*)token, token->ref_count);
}
// Do not schedule events if the tag is after the stop tag
@@ -531,7 +496,7 @@ trigger_handle_t _lf_schedule_at_tag(environment_t* env, trigger_t* trigger, tag
event_t* e = lf_get_new_event(env);
// Set the event time
- e->time = tag.time;
+ e->base.tag = tag;
tracepoint_schedule(env, trigger, tag.time - current_logical_tag.time);
@@ -547,133 +512,37 @@ trigger_handle_t _lf_schedule_at_tag(environment_t* env, trigger_t* trigger, tag
e->intended_tag = trigger->intended_tag;
#endif
- event_t* found = (event_t*)pqueue_find_equal_same_priority(env->event_q, e);
+ event_t* found = (event_t*)pqueue_tag_find_equal_same_tag(env->event_q, (pqueue_tag_element_t*)e);
if (found != NULL) {
- if (tag.microstep == 0u) {
- // The microstep is 0, which means that the event is being scheduled
- // at a future time and at the beginning of the skip list of events
- // at that time.
- // In case the event is a dummy event
- // convert it to a real event.
- found->is_dummy = false;
- switch (trigger->policy) {
- case drop:
- if (found->token != token) {
- _lf_done_using(token);
- }
- lf_recycle_event(env, e);
- return (0);
- break;
- case replace:
- // Replace the payload of the event at the head with our
- // current payload.
- lf_replace_token(found, token);
+ switch (trigger->policy) {
+ case drop:
+ if (found->token != token) {
+ _lf_done_using(token);
+ }
+ lf_recycle_event(env, e);
+ return (0);
+ break;
+ case replace:
+ // Replace the payload of the event at the head with our
+ // current payload.
+ lf_replace_token(found, token);
+ lf_recycle_event(env, e);
+ return 0;
+ break;
+ default:
+ // Adding a microstep to the original
+ // intended tag.
+ tag.microstep++;
+ e->base.tag = tag;
+ if (lf_is_tag_after_stop_tag(env, (tag_t){.time = tag.time, .microstep = tag.microstep})) {
+ // Scheduling e will incur a microstep after the stop tag,
+ // which is illegal.
lf_recycle_event(env, e);
return 0;
- break;
- default:
- // Adding a microstep to the original
- // intended tag.
- if (lf_is_tag_after_stop_tag(env, (tag_t){.time = found->time, .microstep = 1})) {
- // Scheduling e will incur a microstep after the stop tag,
- // which is illegal.
- lf_recycle_event(env, e);
- return 0;
- }
- if (found->next != NULL) {
- lf_print_error("_lf_schedule_at_tag: in-order contract violated.");
- return -1;
- }
- found->next = e;
- }
- } else {
- // We are requesting a microstep greater than 0
- // where there is already an event for this trigger on the event queue.
- // That event may itself be a dummy event for a real event that is
- // also at a microstep greater than 0.
- // We have to insert our event into the chain or append it
- // to the end of the chain, depending on which microstep is lesser.
- microstep_t microstep_of_found = 0;
- if (tag.time == current_logical_tag.time) {
- // This is a situation where the head of the queue
- // is an event with microstep == current_microstep + 1
- // which should be reflected in our steps calculation.
- microstep_of_found += current_logical_tag.microstep + 1; // Indicating that
- // the found event
- // is at this microstep.
}
- // Follow the chain of events until the right point
- // to insert the new event.
- while (microstep_of_found < tag.microstep - 1) {
- if (found->next == NULL) {
- // The chain stops short of where we want to be.
- // If it exactly one microstep short of where we want to be,
- // then we don't need a dummy. Otherwise, we do.
- microstep_t undershot_by = (tag.microstep - 1) - microstep_of_found;
- if (undershot_by > 0) {
- found->next = _lf_create_dummy_events(env, trigger, tag.time, e, undershot_by);
- } else {
- found->next = e;
- }
- return 1;
- }
- found = found->next;
- microstep_of_found++;
- }
- // At this point, microstep_of_found == tag.microstep - 1.
- if (found->next == NULL) {
- found->next = e;
- } else {
- switch (trigger->policy) {
- case drop:
- if (found->next->token != token) {
- _lf_done_using(token);
- }
- lf_recycle_event(env, e);
- return 0;
- break;
- case replace:
- // Replace the payload of the event at the head with our
- // current payload.
- lf_replace_token(found->next, token);
- lf_recycle_event(env, e);
- return 0;
- break;
- default:
- // Adding a microstep to the original
- // intended tag.
- if (lf_is_tag_after_stop_tag(env, (tag_t){.time = found->time, .microstep = microstep_of_found + 1})) {
- // Scheduling e will incur a microstep at timeout,
- // which is illegal.
- lf_recycle_event(env, e);
- return 0;
- }
- if (found->next->next != NULL) {
- lf_print_error("_lf_schedule_at_tag: in-order contract violated.");
- return -1;
- }
- found->next->next = e;
- }
- }
- }
- } else {
- // No existing event queued.
- microstep_t relative_microstep = tag.microstep;
- if (tag.time == current_logical_tag.time) {
- relative_microstep -= current_logical_tag.microstep;
- }
- if ((tag.time == current_logical_tag.time && relative_microstep == 1 && env->execution_started) ||
- tag.microstep == 0) {
- // Do not need a dummy event if we are scheduling at 1 microstep
- // in the future at current time or at microstep 0 in a future time.
- // Note that if execution hasn't started, then we have to insert dummy events.
- pqueue_insert(env->event_q, e);
- } else {
- // Create a dummy event. Insert it into the queue, and let its next
- // pointer point to the actual event.
- pqueue_insert(env->event_q, _lf_create_dummy_events(env, trigger, tag.time, e, relative_microstep));
}
}
+ pqueue_tag_insert(env->event_q, (pqueue_tag_element_t*)e);
trigger_handle_t return_value = env->_lf_handle++;
if (env->_lf_handle < 0) {
env->_lf_handle = 1;
@@ -757,36 +626,37 @@ trigger_handle_t _lf_insert_reactions_for_trigger(environment_t* env, trigger_t*
return 1;
}
-void _lf_advance_logical_time(environment_t* env, instant_t next_time) {
+void _lf_advance_tag(environment_t* env, tag_t next_tag) {
assert(env != GLOBAL_ENVIRONMENT);
-// FIXME: The following checks that _lf_advance_logical_time()
+// FIXME: The following checks that _lf_advance_tag()
// is being called correctly. Namely, check if logical time
// is being pushed past the head of the event queue. This should
-// never happen if _lf_advance_logical_time() is called correctly.
+// never happen if _lf_advance_tag() is called correctly.
// This is commented out because it will add considerable overhead
// to the ordinary execution of LF programs. Instead, there might
// be a need for a target property that enables these kinds of logic
// assertions for development purposes only.
#ifndef NDEBUG
- event_t* next_event = (event_t*)pqueue_peek(env->event_q);
+ event_t* next_event = (event_t*)pqueue_tag_peek(env->event_q);
if (next_event != NULL) {
- if (next_time > next_event->time) {
- lf_print_error_and_exit("_lf_advance_logical_time(): Attempted to move time to " PRINTF_TIME ", which is "
- "past the head of the event queue, " PRINTF_TIME ".",
- next_time - start_time, next_event->time - start_time);
+ if (lf_tag_compare(next_tag, next_event->base.tag) > 0) {
+ lf_print_error_and_exit("_lf_advance_tag(): Attempted to move tag to " PRINTF_TAG ", which is "
+ "past the head of the event queue, " PRINTF_TAG ".",
+ next_tag.time - start_time, next_tag.microstep, next_event->base.tag.time - start_time,
+ next_event->base.tag.microstep);
}
}
#endif
- if (env->current_tag.time < next_time) {
- env->current_tag.time = next_time;
- env->current_tag.microstep = 0;
- } else if (env->current_tag.time == next_time) {
- env->current_tag.microstep++;
+ if (lf_tag_compare(env->current_tag, next_tag) < 0) {
+ env->current_tag = next_tag;
} else {
- lf_print_error_and_exit("_lf_advance_logical_time(): Attempted to move tag back in time.");
+ lf_print_error_and_exit("_lf_advance_tag(): Attempted to move (elapsed) tag to " PRINTF_TAG ", which is "
+ "earlier than or equal to the (elapsed) current tag, " PRINTF_TAG ".",
+ next_tag.time - start_time, next_tag.microstep, env->current_tag.time - start_time,
+ env->current_tag.microstep);
}
- LF_PRINT_LOG("Advanced (elapsed) tag to " PRINTF_TAG " at physical time " PRINTF_TIME, next_time - start_time,
+ LF_PRINT_LOG("Advanced (elapsed) tag to " PRINTF_TAG " at physical time " PRINTF_TIME, next_tag.time - start_time,
env->current_tag.microstep, lf_time_physical_elapsed());
}
@@ -854,7 +724,7 @@ void schedule_output_reactions(environment_t* env, reaction_t* reaction, int wor
for (int j = 0; j < reaction->triggered_sizes[i]; j++) {
trigger_t* trigger = triggerArray[j];
if (trigger != NULL) {
- LF_PRINT_DEBUG("Trigger %p lists %d reactions.", trigger, trigger->number_of_reactions);
+ LF_PRINT_DEBUG("Trigger %p lists %d reactions.", (void*)trigger, trigger->number_of_reactions);
for (int k = 0; k < trigger->number_of_reactions; k++) {
reaction_t* downstream_reaction = trigger->reactions[k];
#ifdef FEDERATED_DECENTRALIZED // Only pass down tardiness for federated LF programs
@@ -1202,11 +1072,11 @@ void initialize_global(void) {
_lf_count_token_allocations = 0;
#endif
- environment_t* envs;
- int num_envs = _lf_get_environments(&envs);
#if defined(LF_SINGLE_THREADED)
int max_threads_tracing = 1;
#else
+ environment_t* envs;
+ int num_envs = _lf_get_environments(&envs);
int max_threads_tracing = envs[0].num_workers * num_envs + 1; // add 1 for the main thread
#endif
#if defined(FEDERATED)
@@ -1269,12 +1139,12 @@ void termination(void) {
_lf_terminate_modal_reactors(&env[i]);
#endif
// If the event queue still has events on it, report that.
- if (env[i].event_q != NULL && pqueue_size(env[i].event_q) > 0) {
+ if (env[i].event_q != NULL && pqueue_tag_size(env[i].event_q) > 0) {
lf_print_warning("---- There are %zu unprocessed future events on the event queue.",
- pqueue_size(env[i].event_q));
- event_t* event = (event_t*)pqueue_peek(env[i].event_q);
- interval_t event_time = event->time - start_time;
- lf_print_warning("---- The first future event has timestamp " PRINTF_TIME " after start time.", event_time);
+ pqueue_tag_size(env[i].event_q));
+ event_t* event = (event_t*)pqueue_tag_peek(env[i].event_q);
+ lf_print_warning("---- The first future event has timestamp " PRINTF_TAG " after start tag.",
+ event->base.tag.time - start_time, event->base.tag.microstep);
}
// Print elapsed times.
// If these are negative, then the program failed to start up.
@@ -1329,7 +1199,7 @@ void termination(void) {
}
index_t lf_combine_deadline_and_level(interval_t deadline, int level) {
- if (deadline > ULLONG_MAX >> 16)
+ if (deadline > (interval_t)(ULLONG_MAX >> 16))
return ((ULLONG_MAX >> 16) << 16) | level;
else
return (deadline << 16) | level;
diff --git a/core/tag.c b/core/tag.c
index b45b67acc..9bb35933f 100644
--- a/core/tag.c
+++ b/core/tag.c
@@ -106,7 +106,7 @@ instant_t lf_time_logical(void* env) {
interval_t lf_time_logical_elapsed(void* env) { return lf_time_logical(env) - start_time; }
instant_t lf_time_physical(void) {
- instant_t now, last_read_local;
+ instant_t now;
// Get the current clock value
LF_ASSERTN(lf_clock_gettime(&now), "Failed to read physical clock.");
return now;
@@ -194,7 +194,7 @@ size_t lf_readable_time(char* buffer, instant_t time) {
}
size_t printed = lf_comma_separated_time(buffer, time);
buffer += printed;
- snprintf(buffer, 3, " %s", units);
+ snprintf(buffer, 4, " %s", units);
buffer += strlen(units) + 1;
}
return (buffer - original_buffer);
diff --git a/core/threaded/reactor_threaded.c b/core/threaded/reactor_threaded.c
index 3419c3483..57f888fc2 100644
--- a/core/threaded/reactor_threaded.c
+++ b/core/threaded/reactor_threaded.c
@@ -184,7 +184,7 @@ void lf_set_present(lf_port_base_t* port) {
// Support for sparse destination multiports.
if (port->sparse_record && port->destination_channel >= 0 && port->sparse_record->size >= 0) {
- int next = lf_atomic_fetch_add32(&port->sparse_record->size, 1);
+ size_t next = (size_t)lf_atomic_fetch_add32(&port->sparse_record->size, 1);
if (next >= port->sparse_record->capacity) {
// Buffer is full. Have to revert to the classic iteration.
port->sparse_record->size = -1;
@@ -220,7 +220,7 @@ void lf_set_present(lf_port_base_t* port) {
* the stop time, if one was specified. Return true if the full wait time
* was reached.
*/
-bool wait_until(environment_t* env, instant_t logical_time, lf_cond_t* condition) {
+bool wait_until(instant_t logical_time, lf_cond_t* condition) {
LF_PRINT_DEBUG("-------- Waiting until physical time matches logical time " PRINTF_TIME, logical_time);
interval_t wait_until_time = logical_time;
#ifdef FEDERATED_DECENTRALIZED // Only apply the STA if coordination is decentralized
@@ -276,24 +276,18 @@ tag_t get_next_event_tag(environment_t* env) {
assert(env != GLOBAL_ENVIRONMENT);
// Peek at the earliest event in the event queue.
- event_t* event = (event_t*)pqueue_peek(env->event_q);
+ event_t* event = (event_t*)pqueue_tag_peek(env->event_q);
tag_t next_tag = FOREVER_TAG;
if (event != NULL) {
// There is an event in the event queue.
- if (event->time < env->current_tag.time) {
- lf_print_error_and_exit("get_next_event_tag(): Earliest event on the event queue (" PRINTF_TIME ") is "
- "earlier than the current time (" PRINTF_TIME ").",
- event->time - start_time, env->current_tag.time - start_time);
+ if (lf_tag_compare(event->base.tag, env->current_tag) < 0) {
+ lf_print_error_and_exit("get_next_event_tag(): Earliest event on the event queue (" PRINTF_TAG ") is "
+ "earlier than the current tag (" PRINTF_TAG ").",
+ event->base.tag.time - start_time, event->base.tag.microstep,
+ env->current_tag.time - start_time, env->current_tag.microstep);
}
- next_tag.time = event->time;
- if (next_tag.time == env->current_tag.time) {
- LF_PRINT_DEBUG("Earliest event matches current time. Incrementing microstep. Event is dummy: %d.",
- event->is_dummy);
- next_tag.microstep = env->current_tag.microstep + 1;
- } else {
- next_tag.microstep = 0;
- }
+ next_tag = event->base.tag;
}
// If a timeout tag was given, adjust the next_tag from the
@@ -302,7 +296,7 @@ tag_t get_next_event_tag(environment_t* env) {
next_tag = env->stop_tag;
}
LF_PRINT_LOG("Earliest event on the event queue (or stop time if empty) is " PRINTF_TAG ". Event queue has size %zu.",
- next_tag.time - start_time, next_tag.microstep, pqueue_size(env->event_q));
+ next_tag.time - start_time, next_tag.microstep, pqueue_tag_size(env->event_q));
return next_tag;
}
@@ -326,6 +320,8 @@ tag_t send_next_event_tag(environment_t* env, tag_t tag, bool wait_for_reply) {
#elif defined(LF_ENCLAVES)
return rti_next_event_tag_locked(env->enclave_info, tag);
#else
+ (void)env;
+ (void)wait_for_reply;
return tag;
#endif
}
@@ -408,7 +404,7 @@ void _lf_next_locked(environment_t* env) {
// behavior with centralized coordination as with unfederated execution.
#else // not FEDERATED_CENTRALIZED nor LF_ENCLAVES
- if (pqueue_peek(env->event_q) == NULL && !keepalive_specified) {
+ if (pqueue_tag_peek(env->event_q) == NULL && !keepalive_specified) {
// There is no event on the event queue and keepalive is false.
// No event in the queue
// keepalive is not set so we should stop.
@@ -425,7 +421,7 @@ void _lf_next_locked(environment_t* env) {
// This can be interrupted if a physical action triggers (e.g., a message
// arrives from an upstream federate or a local physical action triggers).
LF_PRINT_LOG("Waiting until elapsed time " PRINTF_TIME ".", (next_tag.time - start_time));
- while (!wait_until(env, next_tag.time, &env->event_q_changed)) {
+ while (!wait_until(next_tag.time, &env->event_q_changed)) {
LF_PRINT_DEBUG("_lf_next_locked(): Wait until time interrupted.");
// Sleep was interrupted. Check for a new next_event.
// The interruption could also have been due to a call to lf_request_stop().
@@ -476,7 +472,7 @@ void _lf_next_locked(environment_t* env) {
}
// At this point, finally, we have an event to process.
- _lf_advance_logical_time(env, next_tag.time);
+ _lf_advance_tag(env, next_tag);
_lf_start_time_step(env);
@@ -486,7 +482,7 @@ void _lf_next_locked(environment_t* env) {
_lf_trigger_shutdown_reactions(env);
}
- // Pop all events from event_q with timestamp equal to env->current_tag.time,
+ // Pop all events from event_q with timestamp equal to env->current_tag,
// extract all the reactions triggered by these events, and
// stick them into the reaction queue.
_lf_pop_events(env);
@@ -647,7 +643,7 @@ void _lf_initialize_start_tag(environment_t* env) {
// Here we wait until the start time and also release the environment mutex.
// this means that the other worker threads will be allowed to start. We need
// this to avoid potential deadlock in federated startup.
- while (!wait_until(env, start_time + lf_fed_STA_offset, &env->event_q_changed)) {
+ while (!wait_until(start_time + lf_fed_STA_offset, &env->event_q_changed)) {
};
LF_PRINT_DEBUG("Done waiting for start time + STA offset " PRINTF_TIME ".", start_time + lf_fed_STA_offset);
LF_PRINT_DEBUG("Physical time is ahead of current time by " PRINTF_TIME ". This should be close to the STA offset.",
@@ -857,6 +853,8 @@ void _lf_worker_invoke_reaction(environment_t* env, int worker_number, reaction_
void try_advance_level(environment_t* env, volatile size_t* next_reaction_level) {
#ifdef FEDERATED
lf_stall_advance_level_federation(env, *next_reaction_level);
+#else
+ (void)env;
#endif
if (*next_reaction_level < SIZE_MAX)
*next_reaction_level += 1;
@@ -971,13 +969,15 @@ void lf_print_snapshot(environment_t* env) {
LF_PRINT_DEBUG("Pending:");
// pqueue_dump(reaction_q, print_reaction); FIXME: reaction_q is not
// accessible here
- LF_PRINT_DEBUG("Event queue size: %zu. Contents:", pqueue_size(env->event_q));
- pqueue_dump(env->event_q, print_reaction);
+ LF_PRINT_DEBUG("Event queue size: %zu. Contents:", pqueue_tag_size(env->event_q));
+ // FIXME: There is no pqueue_tag_dump now
+ pqueue_tag_dump(env->event_q);
LF_PRINT_DEBUG(">>> END Snapshot");
}
}
#else // NDEBUG
void lf_print_snapshot(environment_t* env) {
+ (void)env;
// Do nothing.
}
#endif // NDEBUG
@@ -987,7 +987,7 @@ void start_threads(environment_t* env) {
assert(env != GLOBAL_ENVIRONMENT);
LF_PRINT_LOG("Starting %u worker threads in environment", env->num_workers);
- for (unsigned int i = 0; i < env->num_workers; i++) {
+ for (int i = 0; i < env->num_workers; i++) {
if (lf_thread_create(&env->thread_ids[i], worker, env) != 0) {
lf_print_error_and_exit("Could not start thread-%u", i);
}
diff --git a/core/threaded/scheduler_GEDF_NP.c b/core/threaded/scheduler_GEDF_NP.c
index 0f3d971d5..d590adecb 100644
--- a/core/threaded/scheduler_GEDF_NP.c
+++ b/core/threaded/scheduler_GEDF_NP.c
@@ -155,7 +155,8 @@ void _lf_scheduler_try_advance_tag_and_distribute(lf_scheduler_t* scheduler) {
void _lf_sched_wait_for_work(lf_scheduler_t* scheduler, size_t worker_number) {
// Increment the number of idle workers by 1 and check if this is the last
// worker thread to become idle.
- if (lf_atomic_add_fetch32((int32_t*)&scheduler->number_of_idle_workers, 1) == scheduler->number_of_workers) {
+ if (((size_t)lf_atomic_add_fetch32((int32_t*)&scheduler->number_of_idle_workers, 1)) ==
+ scheduler->number_of_workers) {
// Last thread to go idle
LF_PRINT_DEBUG("Scheduler: Worker %zu is the last idle thread.", worker_number);
// Call on the scheduler to distribute work or advance tag.
@@ -279,6 +280,7 @@ reaction_t* lf_sched_get_ready_reaction(lf_scheduler_t* scheduler, int worker_nu
* @param done_reaction The reaction that is done.
*/
void lf_sched_done_with_reaction(size_t worker_number, reaction_t* done_reaction) {
+ (void)worker_number;
if (!lf_atomic_bool_compare_and_swap32((int32_t*)&done_reaction->status, queued, inactive)) {
lf_print_error_and_exit("Unexpected reaction status: %d. Expected %d.", done_reaction->status, queued);
}
@@ -301,6 +303,7 @@ void lf_sched_done_with_reaction(size_t worker_number, reaction_t* done_reaction
* worker number does not make sense (e.g., the caller is not a worker thread).
*/
void lf_scheduler_trigger_reaction(lf_scheduler_t* scheduler, reaction_t* reaction, int worker_number) {
+ (void)worker_number;
if (reaction == NULL || !lf_atomic_bool_compare_and_swap32((int32_t*)&reaction->status, inactive, queued)) {
return;
}
diff --git a/core/threaded/scheduler_NP.c b/core/threaded/scheduler_NP.c
index 630464dd6..01b510477 100644
--- a/core/threaded/scheduler_NP.c
+++ b/core/threaded/scheduler_NP.c
@@ -92,7 +92,7 @@ int _lf_sched_distribute_ready_reactions(lf_scheduler_t* scheduler) {
(void*)((reaction_t***)scheduler->triggered_reactions)[scheduler->next_reaction_level - 1];
LF_PRINT_DEBUG("Start of rxn queue at %zu is %p", scheduler->next_reaction_level - 1,
- ((reaction_t**)scheduler->executing_reactions)[0]);
+ (void*)((reaction_t**)scheduler->executing_reactions)[0]);
if (((reaction_t**)scheduler->executing_reactions)[0] != NULL) {
// There is at least one reaction to execute
return 1;
@@ -109,14 +109,11 @@ int _lf_sched_distribute_ready_reactions(lf_scheduler_t* scheduler) {
*/
void _lf_sched_notify_workers(lf_scheduler_t* scheduler) {
// Calculate the number of workers that we need to wake up, which is the
- // Note: All threads are idle. Therefore, there is no need to lock the mutex
- // while accessing the index for the current level.
- size_t workers_to_awaken = LF_MIN(scheduler->number_of_idle_workers,
- scheduler->indexes[scheduler->next_reaction_level - 1 // Current
- // reaction
- // level
- // to execute.
- ]);
+ // number of reactions enabled at this level.
+ // Note: All threads are idle. Therefore, there is no need to lock the mutex while accessing the index for the
+ // current level.
+ size_t workers_to_awaken =
+ LF_MIN(scheduler->number_of_idle_workers, (size_t)(scheduler->indexes[scheduler->next_reaction_level - 1]));
LF_PRINT_DEBUG("Scheduler: Notifying %zu workers.", workers_to_awaken);
scheduler->number_of_idle_workers -= workers_to_awaken;
@@ -188,7 +185,7 @@ void _lf_scheduler_try_advance_tag_and_distribute(lf_scheduler_t* scheduler) {
void _lf_sched_wait_for_work(lf_scheduler_t* scheduler, size_t worker_number) {
// Increment the number of idle workers by 1 and check if this is the last
// worker thread to become idle.
- if (lf_atomic_add_fetch32((int32_t*)&scheduler->number_of_idle_workers, 1) == scheduler->number_of_workers) {
+ if (lf_atomic_add_fetch32((int32_t*)&scheduler->number_of_idle_workers, 1) == (int)scheduler->number_of_workers) {
// Last thread to go idle
LF_PRINT_DEBUG("Scheduler: Worker %zu is the last idle thread.", worker_number);
// Call on the scheduler to distribute work or advance tag.
@@ -338,6 +335,7 @@ reaction_t* lf_sched_get_ready_reaction(lf_scheduler_t* scheduler, int worker_nu
* @param done_reaction The reaction that is done.
*/
void lf_sched_done_with_reaction(size_t worker_number, reaction_t* done_reaction) {
+ (void)worker_number;
if (!lf_atomic_bool_compare_and_swap32((int32_t*)&done_reaction->status, queued, inactive)) {
lf_print_error_and_exit("Unexpected reaction status: %d. Expected %d.", done_reaction->status, queued);
}
@@ -363,6 +361,8 @@ void lf_sched_done_with_reaction(size_t worker_number, reaction_t* done_reaction
*
*/
void lf_scheduler_trigger_reaction(lf_scheduler_t* scheduler, reaction_t* reaction, int worker_number) {
+ (void)worker_number;
+
if (reaction == NULL || !lf_atomic_bool_compare_and_swap32((int32_t*)&reaction->status, inactive, queued)) {
return;
}
diff --git a/core/threaded/scheduler_adaptive.c b/core/threaded/scheduler_adaptive.c
index 99f50c043..4b0843028 100644
--- a/core/threaded/scheduler_adaptive.c
+++ b/core/threaded/scheduler_adaptive.c
@@ -293,13 +293,13 @@ static void worker_states_init(lf_scheduler_t* scheduler, size_t number_of_worke
worker_states->worker_conds = (lf_cond_t*)malloc(sizeof(lf_cond_t) * num_conds);
worker_states->cumsum_of_worker_group_sizes = (size_t*)calloc(num_conds, sizeof(size_t));
worker_states->mutex_held = (bool*)calloc(number_of_workers, sizeof(bool));
- for (int i = 0; i < number_of_workers; i++) {
+ for (size_t i = 0; i < number_of_workers; i++) {
worker_states->cumsum_of_worker_group_sizes[cond_of(i)]++;
}
- for (int i = 1; i < num_conds; i++) {
+ for (size_t i = 1; i < num_conds; i++) {
worker_states->cumsum_of_worker_group_sizes[i] += worker_states->cumsum_of_worker_group_sizes[i - 1];
}
- for (int i = 0; i < num_conds; i++) {
+ for (size_t i = 0; i < num_conds; i++) {
LF_COND_INIT(worker_states->worker_conds + i, &scheduler->env->mutex);
}
worker_states->num_loose_threads = scheduler->number_of_workers;
@@ -322,7 +322,7 @@ static void worker_states_free(lf_scheduler_t* scheduler) {
static void worker_states_awaken_locked(lf_scheduler_t* scheduler, size_t worker, size_t num_to_awaken) {
worker_states_t* worker_states = scheduler->custom_data->worker_states;
worker_assignments_t* worker_assignments = scheduler->custom_data->worker_assignments;
- assert(num_to_awaken <= worker_assignments->max_num_workers);
+ LF_ASSERT(num_to_awaken <= worker_assignments->max_num_workers, "Sched requested to wake too many workers");
if ((worker == 0) && (num_to_awaken <= 1)) {
worker_states->num_loose_threads = 1;
return;
@@ -339,7 +339,7 @@ static void worker_states_awaken_locked(lf_scheduler_t* scheduler, size_t worker
worker_states->num_loose_threads += worker >= worker_states->num_loose_threads;
worker_states->num_awakened = worker_states->num_loose_threads;
scheduler->custom_data->level_counter++;
- for (int cond = 0; cond <= max_cond; cond++) {
+ for (size_t cond = 0; cond <= max_cond; cond++) {
lf_cond_broadcast(worker_states->worker_conds + cond);
}
}
@@ -348,8 +348,8 @@ static void worker_states_awaken_locked(lf_scheduler_t* scheduler, size_t worker
static void worker_states_lock(lf_scheduler_t* scheduler, size_t worker) {
worker_states_t* worker_states = scheduler->custom_data->worker_states;
worker_assignments_t* worker_assignments = scheduler->custom_data->worker_assignments;
- assert(worker_states->num_loose_threads > 0);
- assert(worker_states->num_loose_threads <= worker_assignments->max_num_workers);
+ LF_ASSERT(worker_states->num_loose_threads > 0, "Sched: No loose threads");
+ LF_ASSERT(worker_states->num_loose_threads <= worker_assignments->max_num_workers, "Sched: Too many loose threads");
size_t lt = worker_states->num_loose_threads;
if (lt > 1 || !fast) { // FIXME: Lock should be partially optimized out even when !fast
LF_MUTEX_LOCK(&scheduler->env->mutex);
@@ -377,9 +377,7 @@ static void worker_states_unlock(lf_scheduler_t* scheduler, size_t worker) {
static bool worker_states_finished_with_level_locked(lf_scheduler_t* scheduler, size_t worker) {
worker_states_t* worker_states = scheduler->custom_data->worker_states;
worker_assignments_t* worker_assignments = scheduler->custom_data->worker_assignments;
- assert(worker >= 0);
- assert(worker_states->num_loose_threads > 0);
- assert(worker_assignments->num_reactions_by_worker[worker] != 1);
+ LF_ASSERT(worker_assignments->num_reactions_by_worker[worker] != 1, "Sched: Current worker not assigned");
assert(((int64_t)worker_assignments->num_reactions_by_worker[worker]) <= 0);
// Why use an atomic operation when we are supposed to be "as good as locked"? Because I took a
// shortcut, and the shortcut was imperfect.
@@ -401,8 +399,8 @@ static bool worker_states_finished_with_level_locked(lf_scheduler_t* scheduler,
static void worker_states_sleep_and_unlock(lf_scheduler_t* scheduler, size_t worker, size_t level_counter_snapshot) {
worker_states_t* worker_states = scheduler->custom_data->worker_states;
worker_assignments_t* worker_assignments = scheduler->custom_data->worker_assignments;
- assert(worker < worker_assignments->max_num_workers);
- assert(worker_states->num_loose_threads <= worker_assignments->max_num_workers);
+ LF_ASSERT(worker < worker_assignments->max_num_workers, "Sched: Invalid worker");
+ LF_ASSERT(worker_states->num_loose_threads <= worker_assignments->max_num_workers, "Sched: Too many loose threads");
if (!worker_states->mutex_held[worker]) {
LF_MUTEX_LOCK(&scheduler->env->mutex);
}
@@ -413,7 +411,8 @@ static void worker_states_sleep_and_unlock(lf_scheduler_t* scheduler, size_t wor
lf_cond_wait(worker_states->worker_conds + cond);
} while (level_counter_snapshot == scheduler->custom_data->level_counter || worker >= worker_states->num_awakened);
}
- assert(!worker_states->mutex_held[worker]); // This thread holds the mutex, but it did not report that.
+ LF_ASSERT(!worker_states->mutex_held[worker],
+ "Sched: Worker doesnt hold the mutex"); // This thread holds the mutex, but it did not report that.
LF_MUTEX_UNLOCK(&scheduler->env->mutex);
}
@@ -423,7 +422,6 @@ static void worker_states_sleep_and_unlock(lf_scheduler_t* scheduler, size_t wor
*/
static void advance_level_and_unlock(lf_scheduler_t* scheduler, size_t worker) {
worker_assignments_t* worker_assignments = scheduler->custom_data->worker_assignments;
- worker_states_t* worker_states = scheduler->custom_data->worker_states;
size_t max_level = worker_assignments->num_levels - 1;
while (true) {
if (worker_assignments->current_level == max_level) {
@@ -443,7 +441,7 @@ static void advance_level_and_unlock(lf_scheduler_t* scheduler, size_t worker) {
size_t total_num_reactions = get_num_reactions(scheduler);
if (total_num_reactions) {
size_t num_workers_to_awaken = LF_MIN(total_num_reactions, worker_assignments->num_workers);
- assert(num_workers_to_awaken > 0);
+ LF_ASSERT(num_workers_to_awaken > 0, "");
worker_states_awaken_locked(scheduler, worker, num_workers_to_awaken);
worker_states_unlock(scheduler, worker);
return;
@@ -476,7 +474,7 @@ static void possible_nums_workers_init(lf_scheduler_t* scheduler) {
data_collection->possible_nums_workers = (size_t*)malloc(pnw_length * sizeof(size_t));
temp = 1;
data_collection->possible_nums_workers[0] = 0;
- for (int i = 1; i < pnw_length; i++) {
+ for (size_t i = 1; i < pnw_length; i++) {
data_collection->possible_nums_workers[i] = temp;
temp *= 2;
}
@@ -492,7 +490,7 @@ static void possible_nums_workers_init(lf_scheduler_t* scheduler) {
* would like to optimize.
*/
static int get_jitter(size_t current_state, interval_t execution_time) {
- static const size_t parallelism_cost_max = 114688;
+ static const interval_t parallelism_cost_max = 114688;
// The following handles the case where the current level really is just fluff:
// No parallelism needed, no work to be done.
if (execution_time < 16384 && current_state == 1)
@@ -595,7 +593,6 @@ static size_t restrict_to_range(size_t start_inclusive, size_t end_inclusive, si
*/
static void compute_number_of_workers(lf_scheduler_t* scheduler, size_t* num_workers_by_level,
size_t* max_num_workers_by_level, bool jitter) {
- worker_assignments_t* worker_assignments = scheduler->custom_data->worker_assignments;
data_collection_t* data_collection = scheduler->custom_data->data_collection;
for (size_t level = 0; level < data_collection->num_levels; level++) {
interval_t this_execution_time =
@@ -603,7 +600,6 @@ static void compute_number_of_workers(lf_scheduler_t* scheduler, size_t* num_wor
size_t ideal_number_of_workers;
size_t max_reasonable_num_workers = max_num_workers_by_level[level];
ideal_number_of_workers = data_collection->execution_times_argmins[level];
- int range = 1;
if (jitter) {
ideal_number_of_workers =
get_nums_workers_neighboring_state(scheduler, ideal_number_of_workers, this_execution_time);
@@ -621,7 +617,6 @@ static void compute_number_of_workers(lf_scheduler_t* scheduler, size_t* num_wor
*/
static void compute_costs(lf_scheduler_t* scheduler, size_t* num_workers_by_level) {
data_collection_t* data_collection = scheduler->custom_data->data_collection;
- worker_assignments_t* worker_assignments = scheduler->custom_data->worker_assignments;
for (size_t level = 0; level < data_collection->num_levels; level++) {
interval_t score = data_collection->execution_times_by_num_workers_by_level[level][num_workers_by_level[level]];
if (!data_collection->execution_times_mins[level] | (score < data_collection->execution_times_mins[level]) |
@@ -640,7 +635,6 @@ static void compute_costs(lf_scheduler_t* scheduler, size_t* num_workers_by_leve
*/
static void data_collection_end_tag(lf_scheduler_t* scheduler, size_t* num_workers_by_level,
size_t* max_num_workers_by_level) {
- worker_assignments_t* worker_assignments = scheduler->custom_data->worker_assignments;
data_collection_t* data_collection = scheduler->custom_data->data_collection;
if (data_collection->collecting_data && data_collection->start_times_by_level[0]) {
compute_costs(scheduler, num_workers_by_level);
@@ -716,13 +710,13 @@ reaction_t* lf_sched_get_ready_reaction(lf_scheduler_t* scheduler, int worker_nu
}
void lf_sched_done_with_reaction(size_t worker_number, reaction_t* done_reaction) {
- assert(worker_number >= 0);
- assert(done_reaction->status != inactive);
+ (void)worker_number;
+ LF_ASSERT(done_reaction->status != inactive, "");
done_reaction->status = inactive;
}
void lf_scheduler_trigger_reaction(lf_scheduler_t* scheduler, reaction_t* reaction, int worker_number) {
- assert(worker_number >= -1);
+ LF_ASSERT(worker_number >= -1, "Sched: Invalid worker number");
if (!lf_atomic_bool_compare_and_swap32((int32_t*)&reaction->status, inactive, queued))
return;
worker_assignments_put(scheduler, reaction);
diff --git a/core/threaded/watchdog.c b/core/threaded/watchdog.c
index 4f26f26e7..3b9a6d62c 100644
--- a/core/threaded/watchdog.c
+++ b/core/threaded/watchdog.c
@@ -43,13 +43,12 @@ void _lf_initialize_watchdogs(environment_t* env) {
* @brief Terminate all watchdog threads.
*/
void _lf_watchdog_terminate_all(environment_t* env) {
- void* thread_return;
+ void* thread_ret;
for (int i = 0; i < env->watchdogs_size; i++) {
watchdog_t* watchdog = env->watchdogs[i];
LF_MUTEX_LOCK(watchdog->base->reactor_mutex);
_lf_watchdog_terminate(watchdog);
LF_MUTEX_UNLOCK(watchdog->base->reactor_mutex);
- void* thread_ret;
lf_thread_join(watchdog->thread_id, &thread_ret);
}
}
diff --git a/core/tracepoint.c b/core/tracepoint.c
index 24b2f2434..b121be8da 100644
--- a/core/tracepoint.c
+++ b/core/tracepoint.c
@@ -61,7 +61,7 @@ int register_user_trace_event(void* self, char* description) {
}
void call_tracepoint(int event_type, void* reactor, tag_t tag, int worker, int src_id, int dst_id,
- instant_t* physical_time, trigger_t* trigger, interval_t extra_delay, bool is_interval_start) {
+ instant_t* physical_time, trigger_t* trigger, interval_t extra_delay) {
instant_t local_time;
if (physical_time == NULL) {
local_time = lf_time_physical();
@@ -97,7 +97,7 @@ void tracepoint_schedule(environment_t* env, trigger_t* trigger, interval_t extr
// This is OK because it is called only while holding the mutex lock.
// True argument specifies to record physical time as late as possible, when
// the event is already on the event queue.
- call_tracepoint(schedule_called, reactor, env->current_tag, -1, 0, 0, NULL, trigger, extra_delay, true);
+ call_tracepoint(schedule_called, reactor, env->current_tag, -1, 0, 0, NULL, trigger, extra_delay);
}
/**
@@ -119,7 +119,7 @@ void tracepoint_user_event(void* self, char* description) {
// There will be a performance hit for this.
LF_ASSERT(self, "A pointer to the self struct is needed to trace an event");
environment_t* env = ((self_base_t*)self)->environment;
- call_tracepoint(user_event, description, env->current_tag, -1, -1, -1, NULL, NULL, 0, false);
+ call_tracepoint(user_event, description, env->current_tag, -1, -1, -1, NULL, NULL, 0);
}
/**
@@ -144,7 +144,7 @@ void tracepoint_user_value(void* self, char* description, long long value) {
// because multiple reactions might be calling the same tracepoint function.
// There will be a performance hit for this.
environment_t* env = ((self_base_t*)self)->environment;
- call_tracepoint(user_value, description, env->current_tag, -1, -1, -1, NULL, NULL, value, false);
+ call_tracepoint(user_value, description, env->current_tag, -1, -1, -1, NULL, NULL, value);
}
////////////////////////////////////////////////////////////
@@ -168,8 +168,7 @@ void tracepoint_federate_to_rti(trace_event_t event_type, int fed_id, tag_t* tag
-1, // int dst_id,
NULL, // instant_t* physical_time (will be generated)
NULL, // trigger_t* trigger,
- 0, // interval_t extra_delay
- true // is_interval_start
+ 0 // interval_t extra_delay
);
}
@@ -190,8 +189,7 @@ void tracepoint_federate_from_rti(trace_event_t event_type, int fed_id, tag_t* t
-1, // int dst_id,
NULL, // instant_t* physical_time (will be generated)
NULL, // trigger_t* trigger,
- 0, // interval_t extra_delay
- false // is_interval_start
+ 0 // interval_t extra_delay
);
}
@@ -212,8 +210,7 @@ void tracepoint_federate_to_federate(trace_event_t event_type, int fed_id, int p
partner_id, // int dst_id,
NULL, // instant_t* physical_time (will be generated)
NULL, // trigger_t* trigger,
- 0, // interval_t extra_delay
- true // is_interval_start
+ 0 // interval_t extra_delay
);
}
@@ -234,8 +231,7 @@ void tracepoint_federate_from_federate(trace_event_t event_type, int fed_id, int
partner_id, // int dst_id,
NULL, // instant_t* physical_time (will be generated)
NULL, // trigger_t* trigger,
- 0, // interval_t extra_delay
- false // is_interval_start
+ 0 // interval_t extra_delay
);
}
#endif // FEDERATED
@@ -261,8 +257,7 @@ void tracepoint_rti_to_federate(trace_event_t event_type, int fed_id, tag_t* tag
fed_id, // int dst_id
NULL, // instant_t* physical_time (will be generated)
NULL, // trigger_t* trigger,
- 0, // interval_t extra_delay
- true // is_interval_start
+ 0 // interval_t extra_delay
);
}
@@ -282,8 +277,7 @@ void tracepoint_rti_from_federate(trace_event_t event_type, int fed_id, tag_t* t
fed_id, // int dst_id
NULL, // instant_t* physical_time (will be generated)
NULL, // trigger_t* trigger,
- 0, // interval_t extra_delay
- false // is_interval_start
+ 0 // interval_t extra_delay
);
}
diff --git a/core/utils/CMakeLists.txt b/core/utils/CMakeLists.txt
index 7ab0db8d4..723b942c8 100644
--- a/core/utils/CMakeLists.txt
+++ b/core/utils/CMakeLists.txt
@@ -1,5 +1,8 @@
-set(UTIL_SOURCES vector.c pqueue_base.c pqueue_tag.c pqueue.c util.c lf_semaphore.c)
+set(UTIL_SOURCES vector.c pqueue_base.c pqueue_tag.c pqueue.c util.c)
+if(NOT DEFINED LF_SINGLE_THREADED)
+ list(APPEND UTIL_SOURCES lf_semaphore.c)
+endif()
list(TRANSFORM UTIL_SOURCES PREPEND utils/)
list(APPEND REACTORC_SOURCES ${UTIL_SOURCES})
diff --git a/core/utils/pqueue.c b/core/utils/pqueue.c
index e73e3ed48..b2bf05090 100644
--- a/core/utils/pqueue.c
+++ b/core/utils/pqueue.c
@@ -2,10 +2,14 @@
* @file pqueue.c
* @author Marten Lohstroh
* @author Edward A. Lee
+ * @author Byeonggil Jun
* @copyright (c) 2020-2023, The University of California at Berkeley.
* License: BSD 2-clause
*
- * @brief Priority queue definitions for the event queue and reaction queue.
+ * @brief Priority queue definitions for queues where the priority is a number that can be compared with ordinary
+ * numerical comparisons.
+ *
+ * This is used for the reaction queue. The event queue uses a `tag_t` struct for its priority, so it cannot use this.
*/
#include "low_level_platform.h"
@@ -13,32 +17,23 @@
#include "util.h"
#include "lf_types.h"
-int in_reverse_order(pqueue_pri_t thiz, pqueue_pri_t that) { return (thiz > that); }
-
-int in_no_particular_order(pqueue_pri_t thiz, pqueue_pri_t that) { return 0; }
+int in_reverse_order(pqueue_pri_t thiz, pqueue_pri_t that) { return (thiz > that) ? 1 : (thiz < that) ? -1 : 0; }
-int event_matches(void* event1, void* event2) { return (((event_t*)event1)->trigger == ((event_t*)event2)->trigger); }
+int in_no_particular_order(pqueue_pri_t thiz, pqueue_pri_t that) {
+ (void)thiz;
+ (void)that;
+ return 0;
+}
int reaction_matches(void* a, void* b) { return (a == b); }
-pqueue_pri_t get_event_time(void* event) { return (pqueue_pri_t)(((event_t*)event)->time); }
-
pqueue_pri_t get_reaction_index(void* reaction) { return ((reaction_t*)reaction)->index; }
-size_t get_event_position(void* event) { return ((event_t*)event)->pos; }
-
size_t get_reaction_position(void* reaction) { return ((reaction_t*)reaction)->pos; }
-void set_event_position(void* event, size_t pos) { ((event_t*)event)->pos = pos; }
-
void set_reaction_position(void* reaction, size_t pos) { ((reaction_t*)reaction)->pos = pos; }
void print_reaction(void* reaction) {
reaction_t* r = (reaction_t*)reaction;
- LF_PRINT_DEBUG("%s: chain_id: %llu, index: %llx, reaction: %p", r->name, r->chain_id, r->index, r);
-}
-
-void print_event(void* event) {
- event_t* e = (event_t*)event;
- LF_PRINT_DEBUG("time: " PRINTF_TIME ", trigger: %p, token: %p", e->time, e->trigger, e->token);
+ LF_PRINT_DEBUG("%s: chain_id: %llu, index: %llx, reaction: %p", r->name, r->chain_id, r->index, reaction);
}
diff --git a/core/utils/pqueue_base.c b/core/utils/pqueue_base.c
index 30d84286e..5d602e1de 100644
--- a/core/utils/pqueue_base.c
+++ b/core/utils/pqueue_base.c
@@ -29,6 +29,15 @@
* - The provided pqueue_eq_elem_f implementation is used to test and
* search for equal elements present in the queue; and
* - Removed capability to reassign priorities.
+ *
+ * Modified by Byeonggil Jun (Apr, 2024).
+ * Changes:
+ * - Made the pqueue_cmp_pri_f function return do the three-way comparison
+ * rather than the two-way comparison.
+ * - The changed pqueue_cmp_pri_f function is used to check the equality of
+ * two elements in the pqueue_find_equal_same_priority function.
+ * - Remove the pqueue_find_equal function.
+ *
*/
#include
@@ -44,9 +53,9 @@
#define LF_RIGHT(i) (((i) << 1) + 1)
#define LF_PARENT(i) ((i) >> 1)
-void* find_equal(pqueue_t* q, void* e, int pos, pqueue_pri_t max) {
+static void* find_same_priority(pqueue_t* q, void* e, int pos) {
if (pos < 0) {
- lf_print_error_and_exit("find_equal() called with a negative pos index.");
+ lf_print_error_and_exit("find_same_priority() called with a negative pos index.");
}
// Stop the recursion when we've reached the end of the
@@ -59,19 +68,20 @@ void* find_equal(pqueue_t* q, void* e, int pos, pqueue_pri_t max) {
void* rval;
void* curr = q->d[pos];
- // Stop the recursion when we've surpassed the maximum priority.
- if (!curr || q->cmppri(q->getpri(curr), max)) {
+ // Stop the recursion once we've surpassed the priority of the element
+ // we're looking for.
+ if (!curr || q->cmppri(q->getpri(curr), q->getpri(e)) == 1) {
return NULL;
}
- if (q->eqelem(curr, e)) {
+ if (q->cmppri(q->getpri(curr), q->getpri(e)) == 0) {
return curr;
} else {
- rval = find_equal(q, e, LF_LEFT(pos), max);
+ rval = find_same_priority(q, e, LF_LEFT(pos));
if (rval)
return rval;
else
- return find_equal(q, e, LF_RIGHT(pos), max);
+ return find_same_priority(q, e, LF_RIGHT(pos));
}
return NULL;
}
@@ -93,11 +103,11 @@ void* find_equal_same_priority(pqueue_t* q, void* e, int pos) {
// Stop the recursion once we've surpassed the priority of the element
// we're looking for.
- if (!curr || q->cmppri(q->getpri(curr), q->getpri(e))) {
+ if (!curr || q->cmppri(q->getpri(curr), q->getpri(e)) == 1) {
return NULL;
}
- if (q->getpri(curr) == q->getpri(e) && q->eqelem(curr, e)) {
+ if (q->cmppri(q->getpri(curr), q->getpri(e)) == 0 && q->eqelem(curr, e)) {
return curr;
} else {
rval = find_equal_same_priority(q, e, LF_LEFT(pos));
@@ -157,7 +167,7 @@ static size_t maxchild(pqueue_t* q, size_t i) {
if (child_node >= q->size)
return 0;
- if ((child_node + 1) < q->size && (q->cmppri(q->getpri(q->d[child_node]), q->getpri(q->d[child_node + 1]))))
+ if ((child_node + 1) < q->size && (q->cmppri(q->getpri(q->d[child_node]), q->getpri(q->d[child_node + 1])) == 1))
child_node++; /* use right child instead of left */
return child_node;
@@ -168,7 +178,7 @@ static size_t bubble_up(pqueue_t* q, size_t i) {
void* moving_node = q->d[i];
pqueue_pri_t moving_pri = q->getpri(moving_node);
- for (parent_node = LF_PARENT(i); ((i > 1) && q->cmppri(q->getpri(q->d[parent_node]), moving_pri));
+ for (parent_node = LF_PARENT(i); ((i > 1) && q->cmppri(q->getpri(q->d[parent_node]), moving_pri) == 1);
i = parent_node, parent_node = LF_PARENT(i)) {
q->d[i] = q->d[parent_node];
q->setpos(q->d[i], i);
@@ -184,7 +194,7 @@ static void percolate_down(pqueue_t* q, size_t i) {
void* moving_node = q->d[i];
pqueue_pri_t moving_pri = q->getpri(moving_node);
- while ((child_node = maxchild(q, i)) && q->cmppri(moving_pri, q->getpri(q->d[child_node]))) {
+ while ((child_node = maxchild(q, i)) && (q->cmppri(moving_pri, q->getpri(q->d[child_node])) == 1)) {
q->d[i] = q->d[child_node];
q->setpos(q->d[i], i);
i = child_node;
@@ -194,9 +204,9 @@ static void percolate_down(pqueue_t* q, size_t i) {
q->setpos(moving_node, i);
}
-void* pqueue_find_equal_same_priority(pqueue_t* q, void* e) { return find_equal_same_priority(q, e, 1); }
+void* pqueue_find_same_priority(pqueue_t* q, void* e) { return find_same_priority(q, e, 1); }
-void* pqueue_find_equal(pqueue_t* q, void* e, pqueue_pri_t max) { return find_equal(q, e, 1, max); }
+void* pqueue_find_equal_same_priority(pqueue_t* q, void* e) { return find_equal_same_priority(q, e, 1); }
int pqueue_insert(pqueue_t* q, void* d) {
void** tmp;
@@ -227,7 +237,7 @@ int pqueue_remove(pqueue_t* q, void* d) {
return 0; // Nothing to remove
size_t posn = q->getpos(d);
q->d[posn] = q->d[--q->size];
- if (q->cmppri(q->getpri(d), q->getpri(q->d[posn])))
+ if (q->cmppri(q->getpri(d), q->getpri(q->d[posn])) == 1)
bubble_up(q, posn);
else
percolate_down(q, posn);
@@ -320,7 +330,7 @@ static int subtree_is_valid(pqueue_t* q, int pos) {
if ((size_t)left_pos < q->size) {
/* has a left child */
- if (q->cmppri(q->getpri(q->d[pos]), q->getpri(q->d[LF_LEFT(pos)])))
+ if (q->cmppri(q->getpri(q->d[pos]), q->getpri(q->d[LF_LEFT(pos)])) == 1)
return 0;
if (!subtree_is_valid(q, LF_LEFT(pos)))
return 0;
@@ -332,7 +342,7 @@ static int subtree_is_valid(pqueue_t* q, int pos) {
}
if ((size_t)right_pos < q->size) {
/* has a right child */
- if (q->cmppri(q->getpri(q->d[pos]), q->getpri(q->d[LF_RIGHT(pos)])))
+ if (q->cmppri(q->getpri(q->d[pos]), q->getpri(q->d[LF_RIGHT(pos)])) == 1)
return 0;
if (!subtree_is_valid(q, LF_RIGHT(pos)))
return 0;
diff --git a/core/utils/pqueue_support.h b/core/utils/pqueue_support.h
deleted file mode 100644
index b7c0a08c1..000000000
--- a/core/utils/pqueue_support.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/*************
-Copyright (c) 2022, The University of California at Berkeley.
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
-1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
-2. Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
-EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
-THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
-THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-***************/
-
-/**
- * @file pqueue_support.h
- * @author Edward A. Lee
- * @author Marten Lohstroh
- * @brief Header-only support functions for pqueue.
- */
-
-#ifndef PQUEUE_SUPPORT_H
-#define PQUEUE_SUPPORT_H
-
-#include "../reactor.h"
-
-// ********** Priority Queue Support Start
-
-/**
- * Return whether the first and second argument are given in reverse order.
- */
-static int in_reverse_order(pqueue_pri_t thiz, pqueue_pri_t that) { return (thiz > that); }
-
-/**
- * Return false (0) regardless of reaction order.
- */
-static int in_no_particular_order(pqueue_pri_t thiz, pqueue_pri_t that) { return false; }
-
-/**
- * Return whether or not the given events have matching triggers.
- */
-static int event_matches(void* next, void* curr) { return (((event_t*)next)->trigger == ((event_t*)curr)->trigger); }
-
-/**
- * Return whether or not the given reaction_t pointers
- * point to the same struct.
- */
-static int reaction_matches(void* next, void* curr) { return (next == curr); }
-
-/**
- * Report a priority equal to the time of the given event.
- * Used for sorting pointers to event_t structs in the event queue.
- */
-static pqueue_pri_t get_event_time(void* a) { return (pqueue_pri_t)(((event_t*)a)->time); }
-
-/**
- * Report a priority equal to the index of the given reaction.
- * Used for sorting pointers to reaction_t structs in the
- * blocked and executing queues.
- */
-static pqueue_pri_t get_reaction_index(void* a) { return ((reaction_t*)a)->index; }
-
-/**
- * Return the given event's position in the queue.
- */
-static size_t get_event_position(void* a) { return ((event_t*)a)->pos; }
-
-/**
- * Return the given reaction's position in the queue.
- */
-static size_t get_reaction_position(void* a) { return ((reaction_t*)a)->pos; }
-
-/**
- * Set the given event's position in the queue.
- */
-static void set_event_position(void* a, size_t pos) { ((event_t*)a)->pos = pos; }
-
-/**
- * Return the given reaction's position in the queue.
- */
-static void set_reaction_position(void* a, size_t pos) { ((reaction_t*)a)->pos = pos; }
-
-/**
- * Print some information about the given reaction.
- *
- * DEBUG function only.
- */
-static void print_reaction(void* reaction) {
- reaction_t* r = (reaction_t*)reaction;
- LF_PRINT_DEBUG("%s: chain_id:%llu, index: %llx, reaction: %p", r->name, r->chain_id, r->index, r);
-}
-
-/**
- * Print some information about the given event.
- *
- * DEBUG function only.
- */
-static void print_event(void* event) {
- event_t* e = (event_t*)event;
- LF_PRINT_DEBUG("time: " PRINTF_TIME ", trigger: %p, token: %p", e->time, e->trigger, e->token);
-}
-
-// ********** Priority Queue Support End
-#endif
diff --git a/core/utils/pqueue_tag.c b/core/utils/pqueue_tag.c
index 9406ca1ca..24899374b 100644
--- a/core/utils/pqueue_tag.c
+++ b/core/utils/pqueue_tag.c
@@ -25,17 +25,6 @@
*/
static pqueue_pri_t pqueue_tag_get_priority(void* element) { return (pqueue_pri_t)element; }
-/**
- * @brief Callback comparison function for the tag-based priority queue.
- * Return 0 if the first argument is less than second and 1 otherwise.
- * This function is of type pqueue_cmp_pri_f.
- * @param priority1 A pointer to a pqueue_tag_element_t, cast to pqueue_pri_t.
- * @param priority2 A pointer to a pqueue_tag_element_t, cast to pqueue_pri_t.
- */
-static int pqueue_tag_compare(pqueue_pri_t priority1, pqueue_pri_t priority2) {
- return (lf_tag_compare(((pqueue_tag_element_t*)priority1)->tag, ((pqueue_tag_element_t*)priority2)->tag) > 0);
-}
-
/**
* @brief Callback function to determine whether two elements are equivalent.
* Return 1 if the tags contained by given elements are identical, 0 otherwise.
@@ -75,13 +64,23 @@ static void pqueue_tag_print_element(void* element) {
//////////////////
// Functions defined in pqueue_tag.h.
+int pqueue_tag_compare(pqueue_pri_t priority1, pqueue_pri_t priority2) {
+ return (lf_tag_compare(((pqueue_tag_element_t*)priority1)->tag, ((pqueue_tag_element_t*)priority2)->tag));
+}
+
pqueue_tag_t* pqueue_tag_init(size_t initial_size) {
return (pqueue_tag_t*)pqueue_init(initial_size, pqueue_tag_compare, pqueue_tag_get_priority, pqueue_tag_get_position,
pqueue_tag_set_position, pqueue_tag_matches, pqueue_tag_print_element);
}
+pqueue_tag_t* pqueue_tag_init_customize(size_t initial_size, pqueue_cmp_pri_f cmppri, pqueue_eq_elem_f eqelem,
+ pqueue_print_entry_f prt) {
+ return (pqueue_tag_t*)pqueue_init(initial_size, cmppri, pqueue_tag_get_priority, pqueue_tag_get_position,
+ pqueue_tag_set_position, eqelem, prt);
+}
+
void pqueue_tag_free(pqueue_tag_t* q) {
- for (int i = 1; i < q->size; i++) {
+ for (size_t i = 1; i < q->size; i++) {
if (q->d[i] != NULL && ((pqueue_tag_element_t*)q->d[i])->is_dynamic) {
free(q->d[i]);
}
@@ -101,11 +100,14 @@ int pqueue_tag_insert_tag(pqueue_tag_t* q, tag_t t) {
}
pqueue_tag_element_t* pqueue_tag_find_with_tag(pqueue_tag_t* q, tag_t t) {
- // Create elements on the stack. These elements are only needed during
- // the duration of this function call, so putting them on the stack is OK.
+ // Create an element on the stack. This element is only needed during
+ // the duration of this function call, so putting it on the stack is OK.
pqueue_tag_element_t element = {.tag = t, .pos = 0, .is_dynamic = false};
- pqueue_tag_element_t forever = {.tag = FOREVER_TAG, .pos = 0, .is_dynamic = false};
- return pqueue_find_equal((pqueue_t*)q, (void*)&element, (pqueue_pri_t)&forever);
+ return pqueue_find_same_priority((pqueue_t*)q, (void*)&element);
+}
+
+pqueue_tag_element_t* pqueue_tag_find_equal_same_tag(pqueue_tag_t* q, pqueue_tag_element_t* e) {
+ return pqueue_find_equal_same_priority((pqueue_t*)q, (void*)e);
}
int pqueue_tag_insert_if_no_match(pqueue_tag_t* q, tag_t t) {
@@ -149,3 +151,5 @@ void pqueue_tag_remove_up_to(pqueue_tag_t* q, tag_t t) {
head = pqueue_tag_peek_tag(q);
}
}
+
+void pqueue_tag_dump(pqueue_tag_t* q) { pqueue_dump((pqueue_t*)q, pqueue_tag_print_element); }
diff --git a/core/utils/util.c b/core/utils/util.c
index 554707edb..881b6dc05 100644
--- a/core/utils/util.c
+++ b/core/utils/util.c
@@ -73,20 +73,20 @@ int lf_fed_id() { return _lf_my_fed_id; }
// Declaration needed to attach attributes to suppress warnings of the form:
// "warning: function '_lf_message_print' might be a candidate for 'gnu_printf'
// format attribute [-Wsuggest-attribute=format]"
-void _lf_message_print(int is_error, const char* prefix, const char* format, va_list args, int log_level)
- ATTRIBUTE_FORMAT_PRINTF(3, 0);
+void _lf_message_print(const char* prefix, const char* format, va_list args, int log_level)
+ ATTRIBUTE_FORMAT_PRINTF(2, 0);
/**
* Print a fatal error message. Internal function.
*/
static void lf_vprint_fatal_error(const char* format, va_list args) {
- _lf_message_print(1, "FATAL ERROR: ", format, args, LOG_LEVEL_ERROR);
+ _lf_message_print("FATAL ERROR: ", format, args, LOG_LEVEL_ERROR);
}
/**
* Internal implementation of the next few reporting functions.
*/
-void _lf_message_print(int is_error, const char* prefix, const char* format, va_list args,
+void _lf_message_print(const char* prefix, const char* format, va_list args,
int log_level) { // Disable warnings about format check.
// The logging level may be set either by a LOG_LEVEL #define
// (which is code generated based on the logging target property)
@@ -151,7 +151,7 @@ void lf_print(const char* format, ...) {
va_end(args);
}
-void lf_vprint(const char* format, va_list args) { _lf_message_print(0, "", format, args, LOG_LEVEL_INFO); }
+void lf_vprint(const char* format, va_list args) { _lf_message_print("", format, args, LOG_LEVEL_INFO); }
void lf_print_log(const char* format, ...) {
va_list args;
@@ -160,7 +160,7 @@ void lf_print_log(const char* format, ...) {
va_end(args);
}
-void lf_vprint_log(const char* format, va_list args) { _lf_message_print(0, "LOG: ", format, args, LOG_LEVEL_LOG); }
+void lf_vprint_log(const char* format, va_list args) { _lf_message_print("LOG: ", format, args, LOG_LEVEL_LOG); }
void lf_print_debug(const char* format, ...) {
va_list args;
@@ -169,9 +169,7 @@ void lf_print_debug(const char* format, ...) {
va_end(args);
}
-void lf_vprint_debug(const char* format, va_list args) {
- _lf_message_print(0, "DEBUG: ", format, args, LOG_LEVEL_DEBUG);
-}
+void lf_vprint_debug(const char* format, va_list args) { _lf_message_print("DEBUG: ", format, args, LOG_LEVEL_DEBUG); }
void lf_print_error(const char* format, ...) {
va_list args;
@@ -180,9 +178,7 @@ void lf_print_error(const char* format, ...) {
va_end(args);
}
-void lf_vprint_error(const char* format, va_list args) {
- _lf_message_print(1, "ERROR: ", format, args, LOG_LEVEL_ERROR);
-}
+void lf_vprint_error(const char* format, va_list args) { _lf_message_print("ERROR: ", format, args, LOG_LEVEL_ERROR); }
void lf_print_warning(const char* format, ...) {
va_list args;
@@ -192,7 +188,7 @@ void lf_print_warning(const char* format, ...) {
}
void lf_vprint_warning(const char* format, va_list args) {
- _lf_message_print(1, "WARNING: ", format, args, LOG_LEVEL_WARNING);
+ _lf_message_print("WARNING: ", format, args, LOG_LEVEL_WARNING);
}
void lf_print_error_and_exit(const char* format, ...) {
diff --git a/include/core/environment.h b/include/core/environment.h
index 98753c6fb..a776dee95 100644
--- a/include/core/environment.h
+++ b/include/core/environment.h
@@ -73,9 +73,8 @@ typedef struct environment_t {
int id;
tag_t current_tag;
tag_t stop_tag;
- pqueue_t* event_q;
- pqueue_t* recycle_q;
- pqueue_t* next_q;
+ pqueue_tag_t* event_q;
+ pqueue_tag_t* recycle_q;
bool** is_present_fields;
int is_present_fields_size;
bool** is_present_fields_abbreviated;
diff --git a/include/core/federated/federate.h b/include/core/federated/federate.h
index e8dc4bf0d..fd7b23a3d 100644
--- a/include/core/federated/federate.h
+++ b/include/core/federated/federate.h
@@ -254,7 +254,7 @@ void lf_connect_to_federate(uint16_t);
* This will return the socket descriptor for the connection.
* If port_number is 0, then start at DEFAULT_PORT and increment
* the port number on each attempt. If an attempt fails, wait CONNECT_RETRY_INTERVAL
- * and try again. If it fails after CONNECT_MAX_RETRIES, the program exits.
+ * and try again. If it fails after CONNECT_TIMEOUT, the program exits.
* If it succeeds, it sets the _fed.socket_TCP_RTI global variable to refer to
* the socket for communicating with the RTI.
* @param hostname A hostname, such as "localhost".
diff --git a/include/core/federated/network/net_common.h b/include/core/federated/network/net_common.h
index 11bbc47f3..e0edecf61 100644
--- a/include/core/federated/network/net_common.h
+++ b/include/core/federated/network/net_common.h
@@ -205,10 +205,10 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/**
* Bound on the number of retries to connect to the RTI.
- * A federate will retry every CONNECT_RETRY_INTERVAL seconds
- * this many times before giving up.
+ * A federate will retry every CONNECT_RETRY_INTERVAL seconds until
+ * CONNECTION_TIMEOUT expires.
*/
-#define CONNECT_MAX_RETRIES 100
+#define CONNECT_TIMEOUT MINUTES(1)
/**
* Maximum number of port addresses that a federate will try to connect to the RTI on.
@@ -483,7 +483,6 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
do { \
buffer[0] = MSG_TYPE_STOP_REQUEST; \
encode_int64(time, &(buffer[1])); \
- assert(microstep >= 0); \
encode_int32((int32_t)microstep, &(buffer[1 + sizeof(instant_t)])); \
} while (0)
@@ -501,7 +500,6 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
do { \
buffer[0] = MSG_TYPE_STOP_REQUEST_REPLY; \
encode_int64(time, &(buffer[1])); \
- assert(microstep >= 0); \
encode_int32((int32_t)microstep, &(buffer[1 + sizeof(instant_t)])); \
} while (0)
@@ -518,7 +516,6 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
do { \
buffer[0] = MSG_TYPE_STOP_GRANTED; \
encode_int64(time, &(buffer[1])); \
- assert(microstep >= 0); \
encode_int32((int32_t)microstep, &(buffer[1 + sizeof(instant_t)])); \
} while (0)
diff --git a/include/core/federated/network/net_util.h b/include/core/federated/network/net_util.h
index 555c8df89..24b4782f9 100644
--- a/include/core/federated/network/net_util.h
+++ b/include/core/federated/network/net_util.h
@@ -365,7 +365,7 @@ bool validate_user(const char* user);
* Extract one match group from the rti_addr regex .
* @return true if SUCCESS, else false.
*/
-bool extract_match_group(const char* rti_addr, char* dest, regmatch_t group, int max_len, int min_len,
+bool extract_match_group(const char* rti_addr, char* dest, regmatch_t group, size_t max_len, size_t min_len,
const char* err_msg);
/**
@@ -373,7 +373,7 @@ bool extract_match_group(const char* rti_addr, char* dest, regmatch_t group, int
* @return true if success, else false.
*/
bool extract_match_groups(const char* rti_addr, char** rti_addr_strs, bool** rti_addr_flags, regmatch_t* group_array,
- int* gids, int* max_lens, int* min_lens, const char** err_msgs);
+ int* gids, size_t* max_lens, size_t* min_lens, const char** err_msgs);
/**
* Extract the host, port and user from rti_addr.
diff --git a/include/core/lf_token.h b/include/core/lf_token.h
index f60fd18b5..219538dd3 100644
--- a/include/core/lf_token.h
+++ b/include/core/lf_token.h
@@ -341,7 +341,7 @@ token_freed _lf_done_using(lf_token_t* token);
* to avoid memory leaks.
* @param env Environment in which we are executing.
*/
-void _lf_free_token_copies(struct environment_t* env);
+void _lf_free_token_copies(void);
#endif /* LF_TOKEN_H */
/** @} */
diff --git a/include/core/lf_types.h b/include/core/lf_types.h
index 5598cf820..75a61e405 100644
--- a/include/core/lf_types.h
+++ b/include/core/lf_types.h
@@ -21,6 +21,7 @@
#include "modal_models/modes.h" // Modal model support
#include "utils/pqueue.h"
+#include "utils/pqueue_tag.h"
#include "lf_token.h"
#include "tag.h"
#include "vector.h"
@@ -195,15 +196,12 @@ typedef struct event_t event_t;
/** Event activation record to push onto the event queue. */
struct event_t {
- instant_t time; // Time of release.
- trigger_t* trigger; // Associated trigger, NULL if this is a dummy event.
- size_t pos; // Position in the priority queue.
- lf_token_t* token; // Pointer to the token wrapping the value.
- bool is_dummy; // Flag to indicate whether this event is merely a placeholder or an actual event.
+ pqueue_tag_element_t base; // Elements of pqueue_tag. It contains tag of release and position in the priority queue.
+ trigger_t* trigger; // Associated trigger, NULL if this is a dummy event.
+ lf_token_t* token; // Pointer to the token wrapping the value.
#ifdef FEDERATED
tag_t intended_tag; // The intended tag.
#endif
- event_t* next; // Pointer to the next event lined up in superdense time.
};
/**
diff --git a/include/core/reactor_common.h b/include/core/reactor_common.h
index fc1451a96..8086ed7db 100644
--- a/include/core/reactor_common.h
+++ b/include/core/reactor_common.h
@@ -195,16 +195,15 @@ void _lf_trigger_startup_reactions(environment_t* env);
void _lf_trigger_shutdown_reactions(environment_t* env);
/**
- * Create dummy events to be used as spacers in the event queue.
+ * @brief Create a dummy event with the specified tag.
+ *
+ * A dummy event is an event with no triggers that can be put on the event queue to trigger a tag advance to the
+ * specified tag.
* @param env Environment in which we are executing.
- * @param trigger The eventual event to be triggered.
- * @param time The logical time of that event.
- * @param next The event to place after the dummy events.
- * @param offset The number of dummy events to insert.
- * @return A pointer to the first dummy event.
+ * @param tag The tag of that event.
+ * @return A pointer to the dummy event.
*/
-event_t* _lf_create_dummy_events(environment_t* env, trigger_t* trigger, instant_t time, event_t* next,
- microstep_t offset);
+event_t* _lf_create_dummy_events(environment_t* env, tag_t tag);
/**
* @brief Schedule an event at a specific tag (time, microstep).
@@ -250,9 +249,9 @@ trigger_handle_t _lf_insert_reactions_for_trigger(environment_t* env, trigger_t*
* the current time, then increase the microstep. Otherwise, update the current
* time and set the microstep to zero.
* @param env The environment in which we are executing
- * @param next_time The time step to advance to.
+ * @param next_tag The tag step to advance to.
*/
-void _lf_advance_logical_time(environment_t* env, instant_t next_time);
+void _lf_advance_tag(environment_t* env, tag_t next_tag);
/**
* @brief Pop all events from event_q with tag equal to current tag.
diff --git a/include/core/threaded/reactor_threaded.h b/include/core/threaded/reactor_threaded.h
index 6971cec17..96de7ac49 100644
--- a/include/core/threaded/reactor_threaded.h
+++ b/include/core/threaded/reactor_threaded.h
@@ -90,7 +90,7 @@ void _lf_decrement_tag_barrier_locked(environment_t* env);
int _lf_wait_on_tag_barrier(environment_t* env, tag_t proposed_tag);
void lf_synchronize_with_other_federates(void);
-bool wait_until(environment_t* env, instant_t logical_time_ns, lf_cond_t* condition);
+bool wait_until(instant_t logical_time_ns, lf_cond_t* condition);
tag_t get_next_event_tag(environment_t* env);
tag_t send_next_event_tag(environment_t* env, tag_t tag, bool wait_for_reply);
void _lf_next_locked(environment_t* env);
diff --git a/include/core/tracepoint.h b/include/core/tracepoint.h
index f28c59f9d..caa27f558 100644
--- a/include/core/tracepoint.h
+++ b/include/core/tracepoint.h
@@ -108,70 +108,6 @@ typedef enum {
#include "trace.h"
-/**
- * String description of event types.
- */
-static const char* trace_event_names[] = {
- "Reaction starts",
- "Reaction ends",
- "Reaction deadline missed",
- "Schedule called",
- "User-defined event",
- "User-defined valued event",
- "Worker wait starts",
- "Worker wait ends",
- "Scheduler advancing time starts",
- "Scheduler advancing time ends",
- "Federated marker",
- // Sending messages
- "Sending ACK",
- "Sending FAILED",
- "Sending TIMESTAMP",
- "Sending NET",
- "Sending LTC",
- "Sending STOP_REQ",
- "Sending STOP_REQ_REP",
- "Sending STOP_GRN",
- "Sending FED_ID",
- "Sending PTAG",
- "Sending TAG",
- "Sending REJECT",
- "Sending RESIGN",
- "Sending PORT_ABS",
- "Sending CLOSE_RQ",
- "Sending TAGGED_MSG",
- "Sending P2P_TAGGED_MSG",
- "Sending MSG",
- "Sending P2P_MSG",
- "Sending ADR_AD",
- "Sending ADR_QR",
- "Sending DNET",
- // Receiving messages
- "Receiving ACK",
- "Receiving FAILED",
- "Receiving TIMESTAMP",
- "Receiving NET",
- "Receiving LTC",
- "Receiving STOP_REQ",
- "Receiving STOP_REQ_REP",
- "Receiving STOP_GRN",
- "Receiving FED_ID",
- "Receiving PTAG",
- "Receiving TAG",
- "Receiving REJECT",
- "Receiving RESIGN",
- "Receiving PORT_ABS",
- "Receiving CLOSE_RQ",
- "Receiving TAGGED_MSG",
- "Receiving P2P_TAGGED_MSG",
- "Receiving MSG",
- "Receiving P2P_MSG",
- "Receiving ADR_AD",
- "Receiving ADR_QR",
- "Receiving DNET",
- "Receiving UNIDENTIFIED",
-};
-
/**
* @brief A trace record that gets written in binary to the trace file in the default implementation.
*/
@@ -203,7 +139,7 @@ typedef struct trace_record_t {
* argument is currently unused)
*/
void call_tracepoint(int event_type, void* reactor, tag_t tag, int worker, int src_id, int dst_id,
- instant_t* physical_time, trigger_t* trigger, interval_t extra_delay, bool is_interval_start);
+ instant_t* physical_time, trigger_t* trigger, interval_t extra_delay);
/**
* Register a trace object.
@@ -232,8 +168,7 @@ int register_user_trace_event(void* self, char* description);
* @param worker The thread number of the worker thread or 0 for single-threaded execution.
*/
#define tracepoint_reaction_starts(env, reaction, worker) \
- call_tracepoint(reaction_starts, reaction->self, env->current_tag, worker, worker, reaction->number, NULL, NULL, 0, \
- true)
+ call_tracepoint(reaction_starts, reaction->self, env->current_tag, worker, worker, reaction->number, NULL, NULL, 0)
/**
* Trace the end of a reaction execution.
@@ -242,8 +177,7 @@ int register_user_trace_event(void* self, char* description);
* @param worker The thread number of the worker thread or 0 for single-threaded execution.
*/
#define tracepoint_reaction_ends(env, reaction, worker) \
- call_tracepoint(reaction_ends, reaction->self, env->current_tag, worker, worker, reaction->number, NULL, NULL, 0, \
- false)
+ call_tracepoint(reaction_ends, reaction->self, env->current_tag, worker, worker, reaction->number, NULL, NULL, 0)
/**
* Trace a call to schedule.
@@ -285,7 +219,7 @@ void tracepoint_user_value(void* self, char* description, long long value);
* @param worker The thread number of the worker thread or 0 for single-threaded execution.
*/
#define tracepoint_worker_wait_starts(env, worker) \
- call_tracepoint(worker_wait_starts, NULL, env->current_tag, worker, worker, -1, NULL, NULL, 0, true)
+ call_tracepoint(worker_wait_starts, NULL, env->current_tag, worker, worker, -1, NULL, NULL, 0)
/**
* Trace the end of a worker waiting for something to change on the event or reaction queue.
@@ -293,7 +227,7 @@ void tracepoint_user_value(void* self, char* description, long long value);
* @param worker The thread number of the worker thread or 0 for single-threaded execution.
*/
#define tracepoint_worker_wait_ends(env, worker) \
- call_tracepoint(worker_wait_ends, NULL, env->current_tag, worker, worker, -1, NULL, NULL, 0, false)
+ call_tracepoint(worker_wait_ends, NULL, env->current_tag, worker, worker, -1, NULL, NULL, 0)
/**
* Trace the start of the scheduler waiting for logical time to advance or an event to
@@ -301,7 +235,7 @@ void tracepoint_user_value(void* self, char* description, long long value);
* @param trace The trace object.
*/
#define tracepoint_scheduler_advancing_time_starts(env) \
- call_tracepoint(scheduler_advancing_time_starts, NULL, env->current_tag, -1, -1, -1, NULL, NULL, 0, true);
+ call_tracepoint(scheduler_advancing_time_starts, NULL, env->current_tag, -1, -1, -1, NULL, NULL, 0);
/**
* Trace the end of the scheduler waiting for logical time to advance or an event to
@@ -309,7 +243,7 @@ void tracepoint_user_value(void* self, char* description, long long value);
* @param trace The trace object.
*/
#define tracepoint_scheduler_advancing_time_ends(env) \
- call_tracepoint(scheduler_advancing_time_ends, NULL, env->current_tag, -1, -1, -1, NULL, NULL, 0, false)
+ call_tracepoint(scheduler_advancing_time_ends, NULL, env->current_tag, -1, -1, -1, NULL, NULL, 0)
/**
* Trace the occurrence of a deadline miss.
@@ -319,7 +253,7 @@ void tracepoint_user_value(void* self, char* description, long long value);
*/
#define tracepoint_reaction_deadline_missed(env, reaction, worker) \
call_tracepoint(reaction_deadline_missed, reaction->self, env->current_tag, worker, worker, reaction->number, NULL, \
- NULL, 0, false)
+ NULL, 0)
/**
* @brief Check if the tracing library is compatible with the current version
@@ -371,10 +305,28 @@ void tracepoint_federate_to_federate(trace_event_t event_type, int fed_id, int p
void tracepoint_federate_from_federate(trace_event_t event_type, int fed_id, int partner_id, tag_t* tag);
#else
-#define tracepoint_federate_to_rti(...) ;
-#define tracepoint_federate_from_rti(...) ;
-#define tracepoint_federate_to_federate(...) ;
-#define tracepoint_federate_from_federate(...) ;
+static inline void tracepoint_federate_to_rti(trace_event_t event_type, int fed_id, tag_t* tag) {
+ (void)event_type;
+ (void)fed_id;
+ (void)tag;
+}
+static inline void tracepoint_federate_from_rti(trace_event_t event_type, int fed_id, tag_t* tag) {
+ (void)event_type;
+ (void)fed_id;
+ (void)tag;
+}
+static inline void tracepoint_federate_to_federate(trace_event_t event_type, int fed_id, int partner_id, tag_t* tag) {
+ (void)event_type;
+ (void)fed_id;
+ (void)partner_id;
+ (void)tag;
+}
+static inline void tracepoint_federate_from_federate(trace_event_t event_type, int fed_id, int partner_id, tag_t* tag) {
+ (void)event_type;
+ (void)fed_id;
+ (void)partner_id;
+ (void)tag;
+}
#endif // FEDERATED
////////////////////////////////////////////////////////////
@@ -401,38 +353,115 @@ void tracepoint_rti_to_federate(trace_event_t event_type, int fed_id, tag_t* tag
void tracepoint_rti_from_federate(trace_event_t event_type, int fed_id, tag_t* tag);
#else
-#define tracepoint_rti_to_federate(...) ;
-#define tracepoint_rti_from_federate(...) ;
+static inline void tracepoint_rti_to_federate(trace_event_t event_type, int fed_id, tag_t* tag) {
+ (void)event_type;
+ (void)fed_id;
+ (void)tag;
+}
+static inline void tracepoint_rti_from_federate(trace_event_t event_type, int fed_id, tag_t* tag) {
+ (void)event_type;
+ (void)fed_id;
+ (void)tag;
+}
#endif // RTI_TRACE
#else
typedef struct trace_t trace_t;
+static inline int register_user_trace_event(void* self, char* description) {
+ (void)self;
+ (void)description;
+ return 0;
+}
+static inline void tracepoint_schedule(environment_t* env, trigger_t* trigger, interval_t extra_delay) {
+ (void)env;
+ (void)trigger;
+ (void)extra_delay;
+}
+static inline void tracepoint_user_event(void* self, char* description) {
+ (void)self;
+ (void)description;
+}
+static inline void tracepoint_user_value(void* self, char* description, long long value) {
+ (void)self;
+ (void)description;
+ (void)value;
+}
+static inline void tracepoint_rti_to_federate(trace_event_t event_type, int fed_id, tag_t* tag) {
+ (void)event_type;
+ (void)fed_id;
+ (void)tag;
+}
+static inline void tracepoint_rti_from_federate(trace_event_t event_type, int fed_id, tag_t* tag) {
+ (void)event_type;
+ (void)fed_id;
+ (void)tag;
+}
+static inline void tracepoint_federate_to_rti(trace_event_t event_type, int fed_id, tag_t* tag) {
+ (void)event_type;
+ (void)fed_id;
+ (void)tag;
+}
+static inline void tracepoint_federate_from_rti(trace_event_t event_type, int fed_id, tag_t* tag) {
+ (void)event_type;
+ (void)fed_id;
+ (void)tag;
+}
+static inline void tracepoint_federate_to_federate(trace_event_t event_type, int fed_id, int partner_id, tag_t* tag) {
+ (void)event_type;
+ (void)fed_id;
+ (void)partner_id;
+ (void)tag;
+}
+static inline void tracepoint_federate_from_federate(trace_event_t event_type, int fed_id, int partner_id, tag_t* tag) {
+ (void)event_type;
+ (void)fed_id;
+ (void)partner_id;
+ (void)tag;
+}
+static inline void lf_tracing_global_init(char* file_name_prefix, int process_id, int max_num_local_threads) {
+ (void)file_name_prefix;
+ (void)process_id;
+ (void)max_num_local_threads;
+}
+static inline void lf_tracing_global_shutdown() {}
+static inline void lf_tracing_set_start_time(int64_t start_time) { (void)start_time; }
-// empty definition in case we compile without tracing
-#define _lf_register_trace_event(...) 1
-#define register_user_trace_event(...) 1
-#define tracepoint_reaction_starts(...)
-#define tracepoint_reaction_ends(...)
-#define tracepoint_schedule(...)
-#define tracepoint_user_event(...)
-#define tracepoint_user_value(...)
-#define tracepoint_worker_wait_starts(...)
-#define tracepoint_worker_wait_ends(...)
-#define tracepoint_scheduler_advancing_time_starts(...) ;
-#define tracepoint_scheduler_advancing_time_ends(...) ;
-#define tracepoint_reaction_deadline_missed(...) ;
-#define tracepoint_federate_to_rti(...) ;
-#define tracepoint_federate_from_rti(...) ;
-#define tracepoint_federate_to_federate(...) ;
-#define tracepoint_federate_from_federate(...) ;
-#define tracepoint_rti_to_federate(...) ;
-#define tracepoint_rti_from_federate(...) ;
-
-#define lf_tracing_register_trace_event(...) ;
-#define lf_tracing_set_start_time(...) ;
-#define tracepoint(...) ;
-#define lf_tracing_global_init(...) ;
-#define lf_tracing_global_shutdown(...) ;
+#define tracepoint_reaction_starts(env, reaction, worker) \
+ while (0) { \
+ (void)env; \
+ (void)reaction; \
+ (void)worker; \
+ }
+#define tracepoint_reaction_ends(env, reaction, worker) \
+ while (0) { \
+ (void)env; \
+ (void)reaction; \
+ (void)worker; \
+ }
+#define tracepoint_worker_wait_starts(env, worker) \
+ while (0) { \
+ (void)env; \
+ (void)worker; \
+ }
+#define tracepoint_worker_wait_ends(env, worker) \
+ while (0) { \
+ (void)env; \
+ (void)worker; \
+ }
+#define tracepoint_scheduler_advancing_time_starts(env) \
+ while (0) { \
+ (void)env; \
+ }
+#define tracepoint_scheduler_advancing_time_ends(env) \
+ while (0) { \
+ (void)env; \
+ }
+#define tracepoint_reaction_deadline_missed(env, reaction, worker) \
+ while (0) { \
+ (void)env; \
+ (void)reaction; \
+ (void)worker; \
+ }
#endif // LF_TRACE
#endif // TRACEPOINT_H
diff --git a/include/core/utils/impl/hashmap.h b/include/core/utils/impl/hashmap.h
index 8490e3f62..e64774887 100644
--- a/include/core/utils/impl/hashmap.h
+++ b/include/core/utils/impl/hashmap.h
@@ -125,7 +125,6 @@ void HASHMAP(free)(HASHMAP(t) * hashmap) {
void HASHMAP(put)(HASHMAP(t) * hashmap, K key, V value) {
assert(key != hashmap->nothing);
- assert(key >= 0);
HASHMAP(entry_t)* write_to = HASHMAP(get_actual_address)(hashmap, key);
write_to->key = key;
write_to->value = value;
diff --git a/include/core/utils/pqueue.h b/include/core/utils/pqueue.h
index e317acbcd..448b94462 100644
--- a/include/core/utils/pqueue.h
+++ b/include/core/utils/pqueue.h
@@ -2,10 +2,14 @@
* @file pqueue.h
* @author Marten Lohstroh
* @author Edward A. Lee
+ * @author Byeonggil Jun
* @copyright (c) 2020-2023, The University of California at Berkeley.
* License: BSD 2-clause
*
- * @brief Priority queue declarations for the event queue and reaction queue.
+ * @brief Priority queue definitions for queues where the priority is a number that can be compared with ordinary
+ * numerical comparisons.
+ *
+ * This is used for the reaction queue. The event queue uses a `tag_t` struct for its priority, so it cannot use this.
*/
#ifndef PQUEUE_H
@@ -27,13 +31,6 @@ int in_reverse_order(pqueue_pri_t thiz, pqueue_pri_t that);
*/
int in_no_particular_order(pqueue_pri_t thiz, pqueue_pri_t that);
-/**
- * Return 1 if the two events have the same trigger.
- * @param event1 A pointer to an event_t.
- * @param event2 A pointer to an event_t.
- */
-int event_matches(void* event1, void* event2);
-
/**
* Return 1 if the two arguments are identical pointers.
* @param a First argument.
@@ -41,13 +38,6 @@ int event_matches(void* event1, void* event2);
*/
int reaction_matches(void* a, void* b);
-/**
- * Report a priority equal to the time of the given event.
- * This is used for sorting pointers to event_t structs in the event queue.
- * @param a A pointer to an event_t.
- */
-pqueue_pri_t get_event_time(void* event);
-
/**
* Report a priority equal to the index of the given reaction.
* Used for sorting pointers to reaction_t structs in the
@@ -56,25 +46,12 @@ pqueue_pri_t get_event_time(void* event);
*/
pqueue_pri_t get_reaction_index(void* reaction_t);
-/**
- * Return the given event's position in the queue.
- * @param event A pointer to an event_t.
- */
-size_t get_event_position(void* event);
-
/**
* Return the given reaction's position in the queue.
* @param reaction A pointer to a reaction_t.
*/
size_t get_reaction_position(void* reaction);
-/**
- * Set the given event's position in the queue.
- * @param event A pointer to an event_t
- * @param pos The position.
- */
-void set_event_position(void* event, size_t pos);
-
/**
* Set the given reaction's position in the queue.
* @param event A pointer to a reaction_t.
@@ -89,11 +66,4 @@ void set_reaction_position(void* reaction, size_t pos);
*/
void print_reaction(void* reaction);
-/**
- * Print some information about the given event.
- * This only prints something if logging is set to DEBUG.
- * @param event A pointer to an event_t.
- */
-void print_event(void* event);
-
#endif /* PQUEUE_H */
diff --git a/include/core/utils/pqueue_base.h b/include/core/utils/pqueue_base.h
index 8c9fc8f2c..b913ab64f 100644
--- a/include/core/utils/pqueue_base.h
+++ b/include/core/utils/pqueue_base.h
@@ -30,6 +30,14 @@
* search for equal elements present in the queue; and
* - Removed capability to reassign priorities.
*
+ * Modified by Byeonggil Jun (Apr, 2024).
+ * Changes:
+ * - Made the pqueue_cmp_pri_f function return do the three-way comparison
+ * rather than the two-way comparison.
+ * - The changed pqueue_cmp_pri_f function is used to check the equality of
+ * two elements in the pqueue_find_equal_same_priority function.
+ * - Remove the pqueue_find_equal function.
+ *
* @brief Priority Queue function declarations used as a base for Lingua Franca priority queues.
*
* @{
@@ -81,7 +89,7 @@ typedef struct pqueue_t {
* @param n the initial estimate of the number of queue items for which memory
* should be preallocated
* @param cmppri The callback function to run to compare two elements
- * This callback should return 0 for 'lower' and non-zero
+ * This callback should return -1 for 'lower', 0 for 'same', and 1
* for 'higher', or vice versa if reverse priority is desired
* @param getpri the callback function to run to set a score to an element
* @param getpos the callback function to get the current element's position
@@ -140,23 +148,21 @@ void* pqueue_pop(pqueue_t* q);
void pqueue_empty_into(pqueue_t** dest, pqueue_t** src);
/**
- * Find the highest-ranking item with the same priority that matches the
- * supplied entry.
+ * Return an entry with the same priority as the specified entry or NULL if there is no such entry.
* @param q the queue
* @param e the entry to compare against
* @return NULL if no matching event has been found, otherwise the entry
*/
-void* pqueue_find_equal_same_priority(pqueue_t* q, void* e);
+void* pqueue_find_same_priority(pqueue_t* q, void* e);
/**
- * Find the highest-ranking item with priority up to and including the given
- * maximum priority that matches the supplied entry.
+ * Return an entry with the same priority (determined by `cmppri`) that matches the supplied entry (determined
+ * by `eqelem`) or `NULL` if there is no such entry.
* @param q the queue
* @param e the entry to compare against
- * @param max_priority the maximum priority to consider
* @return NULL if no matching event has been found, otherwise the entry
*/
-void* pqueue_find_equal(pqueue_t* q, void* e, pqueue_pri_t max_priority);
+void* pqueue_find_equal_same_priority(pqueue_t* q, void* e);
/**
* Remove an item from the queue.
diff --git a/include/core/utils/pqueue_tag.h b/include/core/utils/pqueue_tag.h
index d69de5e56..e06e074be 100644
--- a/include/core/utils/pqueue_tag.h
+++ b/include/core/utils/pqueue_tag.h
@@ -1,5 +1,5 @@
/**
- * @file tag_pqueue.h
+ * @file pqueue_tag.h
* @author Byeonggil Jun
* @author Edward A. Lee
* @copyright (c) 2023, The University of California at Berkeley
@@ -60,6 +60,16 @@ typedef struct {
*/
typedef pqueue_t pqueue_tag_t;
+/**
+ * @brief Callback comparison function for the tag-based priority queue.
+ * Return -1 if the first argument is less than second, 0 if the two arguments are the same,
+ * and 1 otherwise.
+ * This function is of type pqueue_cmp_pri_f.
+ * @param priority1 A pointer to a pqueue_tag_element_t, cast to pqueue_pri_t.
+ * @param priority2 A pointer to a pqueue_tag_element_t, cast to pqueue_pri_t.
+ */
+int pqueue_tag_compare(pqueue_pri_t priority1, pqueue_pri_t priority2);
+
/**
* @brief Create a priority queue sorted by tags.
*
@@ -69,6 +79,20 @@ typedef pqueue_t pqueue_tag_t;
*/
pqueue_tag_t* pqueue_tag_init(size_t initial_size);
+/**
+ * @brief Create a priority queue that stores elements with a particular payload.
+ *
+ * @param cmppri the callback function to compare priorities
+ * @param eqelem the callback function to check equivalence of payloads.
+ * @param prt the callback function to print elements
+ *
+ * The elements of the priority queue will be of type pqueue_tag_element_t.
+ * The caller should call pqueue_tag_free() when finished with the queue.
+ * @return A dynamically allocated priority queue or NULL if memory allocation fails.
+ */
+pqueue_tag_t* pqueue_tag_init_customize(size_t initial_size, pqueue_cmp_pri_f cmppri, pqueue_eq_elem_f eqelem,
+ pqueue_print_entry_f prt);
+
/**
* @brief Free all memory used by the queue including elements that are marked dynamic.
*
@@ -124,6 +148,15 @@ int pqueue_tag_insert_if_no_match(pqueue_tag_t* q, tag_t t);
*/
pqueue_tag_element_t* pqueue_tag_find_with_tag(pqueue_tag_t* q, tag_t t);
+/**
+ * @brief Return an item with the same tag (`cmppri` returns 0) that matches the supplied element
+ * (`eqelem` returns non-zero) or NULL if there is none.
+ * @param q The queue.
+ * @param e The element.
+ * @return An entry with the specified tag or NULL if there isn't one.
+ */
+pqueue_tag_element_t* pqueue_tag_find_equal_same_tag(pqueue_tag_t* q, pqueue_tag_element_t* e);
+
/**
* @brief Return highest-ranking item (the one with the least tag) without removing it.
* @param q The queue.
@@ -175,4 +208,12 @@ void pqueue_tag_remove(pqueue_tag_t* q, pqueue_tag_element_t* e);
*/
void pqueue_tag_remove_up_to(pqueue_tag_t* q, tag_t t);
+/**
+ * Dump the queue and it's internal structure.
+ * @internal
+ * debug function only
+ * @param q the queue
+ */
+void pqueue_tag_dump(pqueue_tag_t* q);
+
#endif // PQUEUE_TAG_H
diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt
index f0b2c18bc..0b805b7e4 100644
--- a/lib/CMakeLists.txt
+++ b/lib/CMakeLists.txt
@@ -1,3 +1,8 @@
+set(LF_ROOT ${CMAKE_CURRENT_LIST_DIR}/..)
+include(${LF_ROOT}/core/lf_utils.cmake)
+
add_library(lib schedule.c)
target_link_libraries(lib PRIVATE lf::low-level-platform-api)
target_link_libraries(lib PRIVATE lf::logging-api)
+
+lf_enable_compiler_warnings(lib)
\ No newline at end of file
diff --git a/lib/schedule.c b/lib/schedule.c
index 645bb41ec..5aa9fd528 100644
--- a/lib/schedule.c
+++ b/lib/schedule.c
@@ -45,12 +45,6 @@ trigger_handle_t lf_schedule_token(void* action, interval_t extra_delay, lf_toke
}
trigger_handle_t lf_schedule_copy(void* action, interval_t offset, void* value, size_t length) {
- if (length < 0) {
- lf_print_error("schedule_copy():"
- " Ignoring request to copy a value with a negative length (%zu).",
- length);
- return -1;
- }
if (value == NULL) {
return lf_schedule_token(action, offset, NULL);
}
@@ -129,13 +123,13 @@ trigger_handle_t lf_schedule_trigger(environment_t* env, trigger_t* trigger, int
extra_delay = 0LL;
}
- LF_PRINT_DEBUG("lf_schedule_trigger: scheduling trigger %p with delay " PRINTF_TIME " and token %p.", trigger,
- extra_delay, token);
+ LF_PRINT_DEBUG("lf_schedule_trigger: scheduling trigger %p with delay " PRINTF_TIME " and token %p.", (void*)trigger,
+ extra_delay, (void*)token);
// Increment the reference count of the token.
if (token != NULL) {
token->ref_count++;
- LF_PRINT_DEBUG("lf_schedule_trigger: Incremented ref_count of %p to %zu.", token, token->ref_count);
+ LF_PRINT_DEBUG("lf_schedule_trigger: Incremented ref_count of %p to %zu.", (void*)token, token->ref_count);
}
// The trigger argument could be null, meaning that nothing is triggered.
@@ -155,17 +149,14 @@ trigger_handle_t lf_schedule_trigger(environment_t* env, trigger_t* trigger, int
if (!trigger->is_timer) {
delay += trigger->offset;
}
- tag_t intended_tag = (tag_t){.time = env->current_tag.time + delay, .microstep = 0};
+ tag_t intended_tag = lf_delay_tag(env->current_tag, delay);
- LF_PRINT_DEBUG("lf_schedule_trigger: env->current_tag.time = " PRINTF_TIME ". Total logical delay = " PRINTF_TIME "",
- env->current_tag.time, delay);
+ LF_PRINT_DEBUG("lf_schedule_trigger: env->current_tag = " PRINTF_TAG ". Total logical delay = " PRINTF_TIME "",
+ env->current_tag.time, env->current_tag.microstep, delay);
interval_t min_spacing = trigger->period;
event_t* e = lf_get_new_event(env);
- // Initialize the next pointer.
- e->next = NULL;
-
// Set the payload.
e->token = token;
@@ -179,6 +170,7 @@ trigger_handle_t lf_schedule_trigger(environment_t* env, trigger_t* trigger, int
if (trigger->is_physical) {
// Get the current physical time and assign it as the intended time.
intended_tag.time = lf_time_physical() + delay;
+ intended_tag.microstep = 0;
} else {
// FIXME: We need to verify that we are executing within a reaction?
// See reactor_threaded.
@@ -204,18 +196,17 @@ trigger_handle_t lf_schedule_trigger(environment_t* env, trigger_t* trigger, int
e->intended_tag = trigger->intended_tag;
#endif
- // Check for conflicts (a queued event with the same trigger and time).
+ // Check for conflicts (a queued event with the same trigger and tag).
if (min_spacing <= 0) {
// No minimum spacing defined.
- e->time = intended_tag.time;
- event_t* found = (event_t*)pqueue_find_equal_same_priority(env->event_q, e);
+ e->base.tag = intended_tag;
+ event_t* found = (event_t*)pqueue_tag_find_equal_same_tag(env->event_q, (pqueue_tag_element_t*)e);
// Check for conflicts. Let events pile up in super dense time.
if (found != NULL) {
- intended_tag.microstep++;
- // Skip to the last node in the linked list.
- while (found->next != NULL) {
- found = found->next;
+ while (found != NULL) {
intended_tag.microstep++;
+ e->base.tag = intended_tag;
+ found = (event_t*)pqueue_tag_find_equal_same_tag(env->event_q, (pqueue_tag_element_t*)e);
}
if (lf_is_tag_after_stop_tag(env, intended_tag)) {
LF_PRINT_DEBUG("Attempt to schedule an event after stop_tag was rejected.");
@@ -224,9 +215,8 @@ trigger_handle_t lf_schedule_trigger(environment_t* env, trigger_t* trigger, int
lf_recycle_event(env, e);
return 0;
}
- // Hook the event into the list.
- found->next = e;
trigger->last_tag = intended_tag;
+ pqueue_tag_insert(env->event_q, (pqueue_tag_element_t*)e);
return (0); // FIXME: return value
}
// If there are not conflicts, schedule as usual. If intended time is
@@ -254,14 +244,13 @@ trigger_handle_t lf_schedule_trigger(environment_t* env, trigger_t* trigger, int
return (0);
case replace:
LF_PRINT_DEBUG("Policy is replace. Replacing the previous event.");
- // If the event with the previous time is still on the event
+ // If the event with the previous tag is still on the event
// queue, then replace the token. To find this event, we have
// to construct a dummy event_t struct.
event_t* dummy = lf_get_new_event(env);
- dummy->next = NULL;
dummy->trigger = trigger;
- dummy->time = trigger->last_tag.time;
- event_t* found = (event_t*)pqueue_find_equal_same_priority(env->event_q, dummy);
+ dummy->base.tag = trigger->last_tag;
+ event_t* found = (event_t*)pqueue_tag_find_equal_same_tag(env->event_q, (pqueue_tag_element_t*)dummy);
if (found != NULL) {
// Recycle the existing token and the new event
@@ -298,16 +287,20 @@ trigger_handle_t lf_schedule_trigger(environment_t* env, trigger_t* trigger, int
intended_tag.time = env->current_tag.time;
}
#endif
+ if (lf_tag_compare(intended_tag, env->current_tag) == 0) {
+ // Increment microstep.
+ intended_tag.microstep++;
+ }
// Set the tag of the event.
- e->time = intended_tag.time;
-
- // Do not schedule events if if the event time is past the stop time
- // (current microsteps are checked earlier).
- LF_PRINT_DEBUG("Comparing event with elapsed time " PRINTF_TIME " against stop time " PRINTF_TIME ".",
- e->time - lf_time_start(), env->stop_tag.time - lf_time_start());
- if (e->time > env->stop_tag.time) {
- LF_PRINT_DEBUG("lf_schedule_trigger: event time is past the timeout. Discarding event.");
+ e->base.tag = intended_tag;
+
+ // Do not schedule events if the event time is past the stop tag.
+ LF_PRINT_DEBUG("Comparing event with elapsed tag " PRINTF_TAG " against stop tag " PRINTF_TAG ".",
+ e->base.tag.time - lf_time_start(), e->base.tag.microstep, env->stop_tag.time - lf_time_start(),
+ env->stop_tag.microstep);
+ if (lf_is_tag_after_stop_tag(env, intended_tag)) {
+ LF_PRINT_DEBUG("lf_schedule_trigger: event tag is past the timeout. Discarding event.");
_lf_done_using(token);
lf_recycle_event(env, e);
return (0);
@@ -318,16 +311,11 @@ trigger_handle_t lf_schedule_trigger(environment_t* env, trigger_t* trigger, int
trigger->last_tag = intended_tag;
// Queue the event.
- // NOTE: There is no need for an explicit microstep because
- // when this is called, all events at the current tag
- // (time and microstep) have been pulled from the queue,
- // and any new events added at this tag will go into the reaction_q
- // rather than the event_q, so anything put in the event_q with this
- // same time will automatically be executed at the next microstep.
- LF_PRINT_LOG("Inserting event in the event queue with elapsed time " PRINTF_TIME ".", e->time - lf_time_start());
- pqueue_insert(env->event_q, e);
-
- tracepoint_schedule(env, trigger, e->time - env->current_tag.time);
+ LF_PRINT_LOG("Inserting event in the event queue with elapsed tag " PRINTF_TAG ".",
+ e->base.tag.time - lf_time_start(), e->base.tag.microstep);
+ pqueue_tag_insert(env->event_q, (pqueue_tag_element_t*)e);
+
+ tracepoint_schedule(env, trigger, e->base.tag.time - env->current_tag.time);
// FIXME: make a record of handle and implement unschedule.
// NOTE: Rather than wrapping around to get a negative number,
diff --git a/lingua-franca-ref.txt b/lingua-franca-ref.txt
index 1f7391f92..8b25206ff 100644
--- a/lingua-franca-ref.txt
+++ b/lingua-franca-ref.txt
@@ -1 +1 @@
-master
+master
\ No newline at end of file
diff --git a/logging/api/logging_macros.h b/logging/api/logging_macros.h
index 73939f576..6f7ea1eba 100644
--- a/logging/api/logging_macros.h
+++ b/logging/api/logging_macros.h
@@ -76,7 +76,7 @@
#if defined(NDEBUG)
#define LF_ASSERT(condition, format, ...) (void)(condition)
#define LF_ASSERTN(condition, format, ...) (void)(condition)
-#define LF_ASSERT_NON_NULL(pointer)
+#define LF_ASSERT_NON_NULL(pointer) (void)(pointer)
#else
#define LF_ASSERT(condition, format, ...) \
do { \
diff --git a/low_level_platform/api/low_level_platform.h b/low_level_platform/api/low_level_platform.h
index e37a166a2..2867aa0f4 100644
--- a/low_level_platform/api/low_level_platform.h
+++ b/low_level_platform/api/low_level_platform.h
@@ -74,10 +74,14 @@ int lf_critical_section_exit(environment_t* env);
#define LF_TIMEOUT 1
+// Worker priorities range from 0 to 99 where 99 is the highest priority.
+#define LF_SCHED_MAX_PRIORITY 99
+#define LF_SCHED_MIN_PRIORITY 0
+
// To support the single-threaded runtime, we need the following functions. They
// are not required by the threaded runtime and is thus hidden behind a #ifdef.
#if defined(LF_SINGLE_THREADED)
-typedef void lf_mutex_t;
+typedef void* lf_mutex_t;
/**
* @brief Disable interrupts with support for nested calls
* @return 0 on success
@@ -107,12 +111,16 @@ int lf_mutex_lock(lf_mutex_t* mutex);
*/
int lf_available_cores();
+/**
+ * @brief Return the lf_thread_t of the calling thread.
+ */
+lf_thread_t lf_thread_self();
+
/**
* Create a new thread, starting with execution of lf_thread
* getting passed arguments. The new handle is stored in thread_id.
*
* @return 0 on success, platform-specific error number otherwise.
- *
*/
int lf_thread_create(lf_thread_t* thread, void* (*lf_thread)(void*), void* arguments);
@@ -132,6 +140,54 @@ int lf_thread_create(lf_thread_t* thread, void* (*lf_thread)(void*), void* argum
*/
int lf_thread_join(lf_thread_t thread, void** thread_return);
+/**
+ * @brief The thread scheduling policies.
+ */
+typedef enum {
+ LF_SCHED_FAIR, // Non real-time scheduling policy. Corresponds to SCHED_OTHER
+ LF_SCHED_TIMESLICE, // Real-time, time-slicing priority-based policty. Corresponds to SCHED_RR.
+ LF_SCHED_PRIORITY, // Real-time, priority-only based scheduling. Corresponds to SCHED_FIFO.
+} lf_scheduling_policy_type_t;
+
+typedef struct {
+ lf_scheduling_policy_type_t policy; // The scheduling policy
+ int priority; // The priority, if applicable
+ interval_t time_slice; // The time-slice allocated, if applicable.
+} lf_scheduling_policy_t;
+
+/**
+ * @brief Pin a thread to a specific CPU.
+ *
+ * @param thread The thread
+ * @param cpu_number the CPU ID
+ * @return 0 on success, platform-specific error number otherwise.
+ */
+int lf_thread_set_cpu(lf_thread_t thread, int cpu_number);
+
+/**
+ * @brief Set the priority of a thread.
+ * Priority ranges from 0 to 99 where a higher
+ * number indicates higher priority. Setting the priority of a thread only
+ * makes sense if the thread is scheduled with LF_SCHED_TIMESLICE or LF_THREAD_PRIORITY
+ *
+ * @param thread The thread.
+ * @param priority The priority.
+ * @return int 0 on success, platform-specific error otherwise
+ */
+int lf_thread_set_priority(lf_thread_t thread, int priority);
+
+/**
+ * @brief Set the scheduling policy of a thread. This is based on the scheduling
+ * concept from Linux explained here: https://man7.org/linux/man-pages/man7/sched.7.html
+ * A scheduling policy is specific to a thread/worker. We have three policies
+ * LF_SCHED_PRIORITY which corresponds to SCHED_FIFO on Linux.
+ * LF_SCHED_TIMESLICE which corresponds to SCHED_RR on Linux.
+ * LF_SCHED_FAIR which corresponds to SCHED_OTHER on Linux.
+ *
+ * @return int 0 on success, platform-specific error number otherwise.
+ */
+int lf_thread_set_scheduling_policy(lf_thread_t thread, lf_scheduling_policy_t* policy);
+
/**
* Initialize a mutex.
*
diff --git a/low_level_platform/api/platform/lf_C11_threads_support.h b/low_level_platform/api/platform/lf_C11_threads_support.h
deleted file mode 100644
index 64a25797f..000000000
--- a/low_level_platform/api/platform/lf_C11_threads_support.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* C11 threads support for the C target of Lingua Franca. */
-
-/*************
-Copyright (c) 2019, The University of California at Berkeley.
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
-1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
-2. Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
-EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
-THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
-THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-***************/
-
-/** \file if_c11_threads_support.c
- * C11 threads support for the C target of Lingua Franca.
- *
- * @author{Soroush Bateni }
- */
-#ifndef LF_C11_THREADS_SUPPORT_H
-#define LF_C11_THREADS_SUPPORT_H
-
-#include
-
-typedef mtx_t lf_mutex_t;
-typedef struct {
- lf_mutex_t* mutex;
- cnd_t condition;
-} lf_cond_t;
-typedef thrd_t lf_thread_t;
-
-#endif
diff --git a/low_level_platform/api/platform/lf_POSIX_threads_support.h b/low_level_platform/api/platform/lf_POSIX_threads_support.h
index d27e7a16f..340fc3e88 100644
--- a/low_level_platform/api/platform/lf_POSIX_threads_support.h
+++ b/low_level_platform/api/platform/lf_POSIX_threads_support.h
@@ -44,4 +44,4 @@ typedef struct {
} lf_cond_t;
typedef pthread_t lf_thread_t;
-#endif
\ No newline at end of file
+#endif
diff --git a/low_level_platform/api/platform/lf_linux_support.h b/low_level_platform/api/platform/lf_linux_support.h
index 18f68b2aa..cdeb17969 100644
--- a/low_level_platform/api/platform/lf_linux_support.h
+++ b/low_level_platform/api/platform/lf_linux_support.h
@@ -40,12 +40,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "lf_tag_64_32.h"
#if !defined LF_SINGLE_THREADED
-#if __STDC_VERSION__ < 201112L || defined(__STDC_NO_THREADS__)
-// (Not C++11 or later) or no threads support
#include "lf_POSIX_threads_support.h"
-#else
-#include "lf_C11_threads_support.h"
-#endif
#endif
#if !defined(_POSIX_TIMERS) || _POSIX_TIMERS <= 0
diff --git a/low_level_platform/api/platform/lf_macos_support.h b/low_level_platform/api/platform/lf_macos_support.h
index 357729f08..e0f4cbf4b 100644
--- a/low_level_platform/api/platform/lf_macos_support.h
+++ b/low_level_platform/api/platform/lf_macos_support.h
@@ -38,12 +38,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "lf_tag_64_32.h"
#if !defined LF_SINGLE_THREADED
-#if __STDC_VERSION__ < 201112L || defined(__STDC_NO_THREADS__)
-// (Not C++11 or later) or no threads support
#include "lf_POSIX_threads_support.h"
-#else
-#include "lf_C11_threads_support.h"
-#endif
#endif
#endif // LF_MACOS_SUPPORT_H
diff --git a/low_level_platform/api/platform/lf_nrf52_support.h b/low_level_platform/api/platform/lf_nrf52_support.h
index b93edaf8e..18613b2e0 100644
--- a/low_level_platform/api/platform/lf_nrf52_support.h
+++ b/low_level_platform/api/platform/lf_nrf52_support.h
@@ -48,7 +48,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/**
* No mutex or condition variable needed for single threaded NRF platforms
*/
-typedef void lf_mutex_t;
+typedef void* lf_mutex_t;
typedef void _lf_cond_var_t;
#endif // LF_nRF52832_SUPPORT_H
diff --git a/low_level_platform/api/platform/lf_platform_util.h b/low_level_platform/api/platform/lf_platform_util.h
new file mode 100644
index 000000000..0874f4cdf
--- /dev/null
+++ b/low_level_platform/api/platform/lf_platform_util.h
@@ -0,0 +1,8 @@
+#ifndef LF_PLATFORM_UTIL_H
+#define LF_PLATFORM_UTIL_H
+/**
+ * @brief Maps a priority into a destination priority range.
+ */
+int map_priorities(int priority, int dest_min, int dest_max);
+
+#endif
\ No newline at end of file
diff --git a/low_level_platform/api/platform/lf_zephyr_board_support.h b/low_level_platform/api/platform/lf_zephyr_board_support.h
index 77834e985..2b6b77e09 100644
--- a/low_level_platform/api/platform/lf_zephyr_board_support.h
+++ b/low_level_platform/api/platform/lf_zephyr_board_support.h
@@ -35,54 +35,35 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#define LF_ZEPHYR_THREAD_PRIORITY_DEFAULT 5
#define LF_ZEPHYR_STACK_SIZE_DEFAULT 2048
-// Unless the user explicitly asks for the kernel clock, then we use a counter
-// clock because it is more precise.
-#if !defined(LF_ZEPHYR_CLOCK_KERNEL)
+#if defined(LF_ZEPHYR_CLOCK_COUNTER)
#if defined(CONFIG_SOC_FAMILY_NRF)
-#define LF_ZEPHYR_CLOCK_COUNTER
#define LF_TIMER DT_NODELABEL(timer1)
#define LF_WAKEUP_OVERHEAD_US 100
#define LF_MIN_SLEEP_US 10
#define LF_RUNTIME_OVERHEAD_US 19
#elif defined(CONFIG_BOARD_ATSAMD20_XPRO)
#define LF_TIMER DT_NODELABEL(tc4)
-#define LF_ZEPHYR_CLOCK_COUNTER
#elif defined(CONFIG_SOC_FAMILY_SAM)
#define LF_TIMER DT_NODELABEL(tc0)
-#define LF_ZEPHYR_CLOCK_COUNTER
#elif defined(CONFIG_COUNTER_MICROCHIP_MCP7940N)
-#define LF_ZEPHYR_CLOCK_COUNTER
#define LF_TIMER DT_NODELABEL(extrtc0)
#elif defined(CONFIG_COUNTER_RTC0)
-#define LF_ZEPHYR_CLOCK_COUNTER
#define LF_TIMER DT_NODELABEL(rtc0)
#elif defined(CONFIG_COUNTER_RTC_STM32)
#define LF_TIMER DT_INST(0, st_stm32_rtc)
-#define LF_ZEPHYR_CLOCK_COUNTER
#elif defined(CONFIG_COUNTER_XLNX_AXI_TIMER)
#define LF_TIMER DT_INST(0, xlnx_xps_timer_1_00_a)
-#define LF_ZEPHYR_CLOCK_COUNTER
#elif defined(CONFIG_COUNTER_TMR_ESP32)
#define LF_TIMER DT_NODELABEL(timer0)
-#define LF_ZEPHYR_CLOCK_COUNTER
#elif defined(CONFIG_COUNTER_MCUX_CTIMER)
#define LF_TIMER DT_NODELABEL(ctimer0)
-#define LF_ZEPHYR_CLOCK_COUNTER
#elif defined(CONFIG_SOC_MIMXRT1176_CM7)
#define LF_TIMER DT_NODELABEL(gpt2)
-#define LF_ZEPHYR_CLOCK_COUNTER
#else
// This board does not have support for the counter clock. If the user
// explicitly asked for this cock, then throw an error.
-#if defined(LF_ZEPHYR_CLOCK_COUNTER)
#error "LF_ZEPHYR_CLOCK_COUNTER was requested but it is not supported by the board"
-#else
-#define LF_ZEPHYR_CLOCK_KERNEL
-#endif
-#endif // BOARD
#endif
-
-#if defined(LF_ZEPHYR_CLOCK_COUNTER)
#ifndef LF_WAKEUP_OVERHEAD_US
#define LF_WAKEUP_OVERHEAD_US 0
#endif
@@ -98,10 +79,6 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef LF_TIMER_ALARM_CHANNEL
#define LF_TIMER_ALARM_CHANNEL 0
#endif
-#else
-#if !defined(LF_ZEPHYR_CLOCK_KERNEL)
-#error Neither hi-res nor lo-res clock specified
#endif
-#endif // LF_ZEPHYR_CLOCK_COUNTER
#endif
diff --git a/low_level_platform/api/platform/lf_zephyr_support.h b/low_level_platform/api/platform/lf_zephyr_support.h
index 49172eb21..0f7ab6b4d 100644
--- a/low_level_platform/api/platform/lf_zephyr_support.h
+++ b/low_level_platform/api/platform/lf_zephyr_support.h
@@ -49,29 +49,6 @@ typedef struct {
} lf_cond_t;
typedef struct k_thread* lf_thread_t;
-/**
- * @brief Add `value` to `*ptr` and return original value of `*ptr`
- */
-int _zephyr_atomic_fetch_add(int* ptr, int value);
-/**
- * @brief Add `value` to `*ptr` and return new updated value of `*ptr`
- */
-int _zephyr_atomic_add_fetch(int* ptr, int value);
-
-/**
- * @brief Compare and swap for boolaen value.
- * If `*ptr` is equal to `value` then overwrite it
- * with `newval`. If not do nothing. Retruns true on overwrite.
- */
-bool _zephyr_bool_compare_and_swap(bool* ptr, bool value, bool newval);
-
-/**
- * @brief Compare and swap for integers. If `*ptr` is equal
- * to `value`, it is updated to `newval`. The function returns
- * the original value of `*ptr`.
- */
-int _zephyr_val32_compare_and_swap(uint32_t* ptr, int value, int newval);
-
#endif // !LF_SINGLE_THREADED
#endif // LF_ZEPHYR_SUPPORT_H
diff --git a/low_level_platform/impl/CMakeLists.txt b/low_level_platform/impl/CMakeLists.txt
index 7641dd663..5f6244664 100644
--- a/low_level_platform/impl/CMakeLists.txt
+++ b/low_level_platform/impl/CMakeLists.txt
@@ -1,32 +1,70 @@
# Check which system we are running on to select the correct platform support
# file and assign the file's path to LF_PLATFORM_FILE
set(LF_ROOT ${CMAKE_CURRENT_LIST_DIR}/../..)
-set(LF_LOW_LEVEL_PLATFORM_FILES
- ${CMAKE_CURRENT_LIST_DIR}/src/lf_unix_clock_support.c
- ${CMAKE_CURRENT_LIST_DIR}/src/lf_unix_syscall_support.c
- ${CMAKE_CURRENT_LIST_DIR}/src/lf_linux_support.c
- ${CMAKE_CURRENT_LIST_DIR}/src/lf_macos_support.c
- ${CMAKE_CURRENT_LIST_DIR}/src/lf_windows_support.c
- ${CMAKE_CURRENT_LIST_DIR}/src/lf_nrf52_support.c
- ${CMAKE_CURRENT_LIST_DIR}/src/lf_zephyr_support.c
- ${CMAKE_CURRENT_LIST_DIR}/src/lf_zephyr_clock_counter.c
- ${CMAKE_CURRENT_LIST_DIR}/src/lf_zephyr_clock_kernel.c
- ${CMAKE_CURRENT_LIST_DIR}/src/lf_rp2040_support.c
- ${CMAKE_CURRENT_LIST_DIR}/src/lf_atomic_windows.c
- ${CMAKE_CURRENT_LIST_DIR}/src/lf_atomic_gcc_clang.c
- ${CMAKE_CURRENT_LIST_DIR}/src/lf_atomic_irq.c
- ${CMAKE_CURRENT_LIST_DIR}/src/platform_internal.c
-)
-if(PLATFORM_ZEPHYR)
- message("--- Building Zephyr library")
+include(${LF_ROOT}/core/lf_utils.cmake)
+
+if(${CMAKE_SYSTEM_NAME} STREQUAL "Windows")
+ set(CMAKE_SYSTEM_VERSION 10.0)
+ message("Using Windows SDK version ${CMAKE_VS_WINDOWS_TARGET_PLATFORM_VERSION}")
+ set(LF_LOW_LEVEL_PLATFORM_FILES
+ ${CMAKE_CURRENT_LIST_DIR}/src/lf_windows_support.c
+ ${CMAKE_CURRENT_LIST_DIR}/src/lf_atomic_windows.c
+ )
+elseif(${CMAKE_SYSTEM_NAME} STREQUAL "Linux")
+ set(LF_LOW_LEVEL_PLATFORM_FILES
+ ${CMAKE_CURRENT_LIST_DIR}/src/lf_unix_clock_support.c
+ ${CMAKE_CURRENT_LIST_DIR}/src/lf_linux_support.c
+ ${CMAKE_CURRENT_LIST_DIR}/src/lf_atomic_gcc_clang.c
+ )
+elseif(${CMAKE_SYSTEM_NAME} STREQUAL "Darwin")
+ set(LF_LOW_LEVEL_PLATFORM_FILES
+ ${CMAKE_CURRENT_LIST_DIR}/src/lf_unix_clock_support.c
+ ${CMAKE_CURRENT_LIST_DIR}/src/lf_macos_support.c
+ ${CMAKE_CURRENT_LIST_DIR}/src/lf_atomic_gcc_clang.c
+ )
+elseif(${CMAKE_SYSTEM_NAME} STREQUAL "Nrf52")
+ set(LF_LOW_LEVEL_PLATFORM_FILES
+ ${CMAKE_CURRENT_LIST_DIR}/src/lf_nrf52_support.c
+ ${CMAKE_CURRENT_LIST_DIR}/src/lf_atomic_irq.c
+ )
+elseif(${CMAKE_SYSTEM_NAME} STREQUAL "Zephyr")
+ set(LF_LOW_LEVEL_PLATFORM_FILES
+ ${CMAKE_CURRENT_LIST_DIR}/src/lf_zephyr_support.c
+ ${CMAKE_CURRENT_LIST_DIR}/src/lf_zephyr_clock_counter.c
+ ${CMAKE_CURRENT_LIST_DIR}/src/lf_zephyr_clock_kernel.c
+ ${CMAKE_CURRENT_LIST_DIR}/src/lf_atomic_irq.c
+ )
+elseif(${CMAKE_SYSTEM_NAME} STREQUAL "Rp2040")
+ set(LF_LOW_LEVEL_PLATFORM_FILES
+ ${CMAKE_CURRENT_LIST_DIR}/src/lf_rp2040_support.c
+ ${CMAKE_CURRENT_LIST_DIR}/src/lf_atomic_irq.c
+ )
+else()
+ message(FATAL_ERROR "Your platform is not supported! The C target supports Linux, MacOS, Windows, Zephyr, Nrf52 and RP2040.")
+endif()
+
+list(APPEND LF_LOW_LEVEL_PLATFORM_FILES ${CMAKE_CURRENT_LIST_DIR}/src/lf_platform_util.c)
+
+if(${CMAKE_SYSTEM_NAME} STREQUAL "Zephyr")
+ if(${LF_ZEPHYR_CLOCK_COUNTER})
+ message(STATUS "Building Zephyr library with Counter clock ")
+ else()
+ message(STATUS "Building Zephyr library with Kernel clock ")
+ endif()
zephyr_library_named(lf-low-level-platform-impl)
zephyr_library_sources(${LF_LOW_LEVEL_PLATFORM_FILES})
zephyr_library_link_libraries(kernel)
else()
-message("--- Building non-Zephyr library")
add_library(lf-low-level-platform-impl STATIC ${LF_LOW_LEVEL_PLATFORM_FILES})
+ # Link the platform to a threading library
+ if(NOT DEFINED LF_SINGLE_THREADED OR DEFINED LF_TRACE)
+ find_package(Threads REQUIRED)
+ target_link_libraries(lf-low-level-platform-impl PRIVATE Threads::Threads)
+ endif()
endif()
+
add_library(lf::low-level-platform-impl ALIAS lf-low-level-platform-impl)
+lf_enable_compiler_warnings(lf-low-level-platform-impl)
target_link_libraries(lf-low-level-platform-impl PRIVATE lf::low-level-platform-api)
target_link_libraries(lf-low-level-platform-impl PUBLIC lf-logging-api)
@@ -45,3 +83,4 @@ low_level_platform_define(MODAL_REACTORS)
low_level_platform_define(USER_THREADS)
low_level_platform_define(NUMBER_OF_WORKERS)
low_level_platform_define(NUMBER_OF_WATCHDOGS)
+low_level_platform_define(LF_ZEPHYR_CLOCK_COUNTER)
diff --git a/low_level_platform/impl/Platform.cmake b/low_level_platform/impl/Platform.cmake
deleted file mode 100644
index cc6042c7c..000000000
--- a/low_level_platform/impl/Platform.cmake
+++ /dev/null
@@ -1,18 +0,0 @@
-# Check which system we are running on to select the correct platform support
-# file and assign the file's path to LF_PLATFORM_FILE
-if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux")
- set(LF_PLATFORM_FILE lf_linux_support.c)
-elseif(${CMAKE_SYSTEM_NAME} STREQUAL "Darwin")
- set(LF_PLATFORM_FILE lf_macos_support.c)
-elseif(${CMAKE_SYSTEM_NAME} STREQUAL "Windows")
- set(LF_PLATFORM_FILE lf_windows_support.c)
- set(CMAKE_SYSTEM_VERSION 10.0)
- message("Using Windows SDK version ${CMAKE_VS_WINDOWS_TARGET_PLATFORM_VERSION}")
-elseif(${CMAKE_SYSTEM_NAME} STREQUAL "Zephyr")
- set(LF_PLATFORM_FILE lf_zephyr_support.c)
-elseif(${CMAKE_SYSTEM_NAME} STREQUAL "Rp2040")
- message("Using pico-sdk for RP2040 target")
- set(LF_PLATFORM_FILE lf_rp2040_support.c)
-else()
- message(FATAL_ERROR "Your platform is not supported! The C target supports Linux, MacOS and Windows.")
-endif()
diff --git a/low_level_platform/impl/src/lf_C11_threads_support.c b/low_level_platform/impl/src/lf_C11_threads_support.c
deleted file mode 100644
index 527ce28d3..000000000
--- a/low_level_platform/impl/src/lf_C11_threads_support.c
+++ /dev/null
@@ -1,52 +0,0 @@
-#if !defined(LF_SINGLE_THREADED) && !defined(PLATFORM_ARDUINO)
-#include "low_level_platform.h"
-#include "platform/lf_C11_threads_support.h"
-#include
-#include
-#include // For fixed-width integral types
-
-int lf_thread_create(lf_thread_t* thread, void* (*lf_thread)(void*), void* arguments) {
- return thrd_create((thrd_t*)thread, (thrd_start_t)lf_thread, arguments);
-}
-
-int lf_thread_join(lf_thread_t thread, void** thread_return) {
- // thrd_join wants the second argument to be an int* rather than a void**
- return thrd_join((thrd_t)thread, (int*)thread_return);
-}
-
-int lf_mutex_init(lf_mutex_t* mutex) {
- // Set up a timed and recursive mutex (default behavior)
- return mtx_init((mtx_t*)mutex, mtx_timed | mtx_recursive);
-}
-
-int lf_mutex_lock(lf_mutex_t* mutex) { return mtx_lock((mtx_t*)mutex); }
-
-int lf_mutex_unlock(lf_mutex_t* mutex) { return mtx_unlock((mtx_t*)mutex); }
-
-int lf_cond_init(lf_cond_t* cond, lf_mutex_t* mutex) {
- cond->mutex = mutex;
- return cnd_init((cnd_t*)&cond->condition);
-}
-
-int lf_cond_broadcast(lf_cond_t* cond) { return cnd_broadcast((cnd_t*)&cond->condition); }
-
-int lf_cond_signal(lf_cond_t* cond) { return cnd_signal((cnd_t*)&cond->condition); }
-
-int lf_cond_wait(lf_cond_t* cond) { return cnd_wait((cnd_t*)&cond->condition, (mtx_t*)cond->mutex); }
-
-int _lf_cond_timedwait(lf_cond_t* cond, instant_t wakeup_time) {
- struct timespec timespec_absolute_time = {.tv_sec = wakeup_time / BILLION, .tv_nsec = wakeup_time % BILLION};
-
- int return_value = cnd_timedwait((cnd_t*)&cond->condition, (mtx_t*)cond->mutex, ×pec_absolute_time);
-
- switch (return_value) {
- case thrd_timedout:
- return_value = LF_TIMEOUT;
- break;
-
- default:
- break;
- }
- return return_value;
-}
-#endif
diff --git a/low_level_platform/impl/src/lf_POSIX_threads_support.c b/low_level_platform/impl/src/lf_POSIX_threads_support.c
index 57f3a6811..255f38255 100644
--- a/low_level_platform/impl/src/lf_POSIX_threads_support.c
+++ b/low_level_platform/impl/src/lf_POSIX_threads_support.c
@@ -6,11 +6,16 @@
#include
#include
#include // For fixed-width integral types
+#include
+
+int lf_available_cores() { return (int)sysconf(_SC_NPROCESSORS_ONLN); }
int lf_thread_create(lf_thread_t* thread, void* (*lf_thread)(void*), void* arguments) {
return pthread_create((pthread_t*)thread, NULL, lf_thread, arguments);
}
+lf_thread_t lf_thread_self() { return pthread_self(); }
+
int lf_thread_join(lf_thread_t thread, void** thread_return) { return pthread_join((pthread_t)thread, thread_return); }
int lf_mutex_init(lf_mutex_t* mutex) {
diff --git a/low_level_platform/impl/src/lf_linux_support.c b/low_level_platform/impl/src/lf_linux_support.c
index 3edf8d7ea..716d6c1c9 100644
--- a/low_level_platform/impl/src/lf_linux_support.c
+++ b/low_level_platform/impl/src/lf_linux_support.c
@@ -25,30 +25,105 @@ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
***************/
-/**
+/**i
* @brief Platform support for the Linux operating system.
*
* @author{Soroush Bateni }
* @author{Marten Lohstroh }
+ * @author{Erling Jellum }
*/
+#define _GNU_SOURCE // Needed to get access to Linux thread-scheduling API
#include "platform/lf_linux_support.h"
+#include "platform/lf_platform_util.h"
#include "low_level_platform.h"
+#include "platform/lf_unix_clock_support.h"
+
#if defined LF_SINGLE_THREADED
#include "lf_os_single_threaded_support.c"
-#endif
-
-#if !defined LF_SINGLE_THREADED
-#if __STDC_VERSION__ < 201112L || defined(__STDC_NO_THREADS__)
-// (Not C++11 or later) or no threads support
-#include "lf_POSIX_threads_support.c"
#else
-#include "lf_C11_threads_support.c"
-#endif
-#endif
+#include "lf_POSIX_threads_support.c"
-#include "platform/lf_unix_clock_support.h"
+int lf_thread_set_cpu(lf_thread_t thread, int cpu_number) {
+ // First verify that we have num_cores>cpu_number
+ if (lf_available_cores() <= cpu_number) {
+ return -1;
+ }
+
+ // Create a CPU-set consisting of only the desired CPU
+ cpu_set_t cpu_set;
+ CPU_ZERO(&cpu_set);
+ CPU_SET(cpu_number, &cpu_set);
+
+ return pthread_setaffinity_np(thread, sizeof(cpu_set), &cpu_set);
+}
+
+int lf_thread_set_priority(lf_thread_t thread, int priority) {
+ int posix_policy, min_pri, max_pri, final_priority;
+ struct sched_param schedparam;
+
+ if (priority > LF_SCHED_MAX_PRIORITY || priority < LF_SCHED_MIN_PRIORITY) {
+ return -1;
+ }
+
+ // Get the current scheduling policy
+ if (pthread_getschedparam(thread, &posix_policy, &schedparam) != 0) {
+ return -1;
+ }
+
+ min_pri = sched_get_priority_min(posix_policy);
+ max_pri = sched_get_priority_max(posix_policy);
+ if (min_pri == -1 || max_pri == -1) {
+ return -1;
+ }
+
+ final_priority = map_priorities(priority, min_pri, max_pri);
+ if (final_priority < 0) {
+ return -1;
+ }
+
+ return pthread_setschedprio(thread, final_priority);
+}
+
+int lf_thread_set_scheduling_policy(lf_thread_t thread, lf_scheduling_policy_t* policy) {
+ int posix_policy;
+ struct sched_param schedparam;
+
+ // Get the current scheduling policy
+ if (pthread_getschedparam(thread, &posix_policy, &schedparam) != 0) {
+ return -1;
+ }
+
+ // Update the policy
+ switch (policy->policy) {
+ case LF_SCHED_FAIR:
+ posix_policy = SCHED_OTHER;
+ break;
+ case LF_SCHED_TIMESLICE:
+ posix_policy = SCHED_RR;
+ break;
+ case LF_SCHED_PRIORITY:
+ posix_policy = SCHED_FIFO;
+ break;
+ default:
+ return -1;
+ break;
+ }
+
+ // Write it back
+ if (pthread_setschedparam(thread, posix_policy, &schedparam) != 0) {
+ return -3;
+ }
+
+ // Set the priority
+ if (lf_thread_set_priority(thread, policy->priority) != 0) {
+ return -1;
+ }
+
+ return 0;
+}
+#endif
int lf_sleep(interval_t sleep_duration) {
const struct timespec tp = convert_ns_to_timespec(sleep_duration);
@@ -57,6 +132,7 @@ int lf_sleep(interval_t sleep_duration) {
}
int _lf_interruptable_sleep_until_locked(environment_t* env, instant_t wakeup_time) {
+ (void)env;
interval_t sleep_duration = wakeup_time - lf_time_physical();
if (sleep_duration <= 0) {
diff --git a/low_level_platform/impl/src/lf_macos_support.c b/low_level_platform/impl/src/lf_macos_support.c
index 54fcfd296..d6b59c4c9 100644
--- a/low_level_platform/impl/src/lf_macos_support.c
+++ b/low_level_platform/impl/src/lf_macos_support.c
@@ -36,15 +36,17 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#if defined LF_SINGLE_THREADED
#include "lf_os_single_threaded_support.c"
-#endif
-
-#if !defined LF_SINGLE_THREADED
-#if __STDC_VERSION__ < 201112L || defined(__STDC_NO_THREADS__)
-// (Not C++11 or later) or no threads support
-#include "lf_POSIX_threads_support.c"
#else
-#include "lf_C11_threads_support.c"
-#endif
+#include "lf_POSIX_threads_support.c"
+
+/**
+ * Real-time scheduling API not implemented for macOS.
+ */
+int lf_thread_set_cpu(lf_thread_t thread, int cpu_number) { return -1; }
+
+int lf_thread_set_priority(lf_thread_t thread, int priority) { return -1; }
+
+int lf_thread_set_scheduling_policy(lf_thread_t thread, lf_scheduling_policy_t* policy) { return -1; }
#endif
#include "platform/lf_unix_clock_support.h"
@@ -57,6 +59,7 @@ int lf_sleep(interval_t sleep_duration) {
}
int _lf_interruptable_sleep_until_locked(environment_t* env, instant_t wakeup_time) {
+ (void)env;
interval_t sleep_duration = wakeup_time - lf_time_physical();
if (sleep_duration <= 0) {
diff --git a/low_level_platform/impl/src/lf_platform_util.c b/low_level_platform/impl/src/lf_platform_util.c
new file mode 100644
index 000000000..317e696bd
--- /dev/null
+++ b/low_level_platform/impl/src/lf_platform_util.c
@@ -0,0 +1,25 @@
+#include "low_level_platform.h"
+#include "platform/lf_platform_util.h"
+
+int map_priorities(int priority, int dest_min, int dest_max) {
+ // Check if priority is within the legal range
+ if (priority < LF_SCHED_MIN_PRIORITY || priority > LF_SCHED_MAX_PRIORITY) {
+ return -1;
+ }
+
+ // Perform the linear mapping
+ return dest_min +
+ ((dest_max - dest_min) / (LF_SCHED_MAX_PRIORITY - LF_SCHED_MIN_PRIORITY)) * (priority - LF_SCHED_MIN_PRIORITY);
+}
+
+#ifndef PLATFORM_ZEPHYR // on Zephyr, this is handled separately
+#ifndef LF_SINGLE_THREADED
+static int _lf_worker_thread_count = 0;
+
+static thread_local int lf_thread_id_var = -1;
+
+int lf_thread_id() { return lf_thread_id_var; }
+
+void initialize_lf_thread_id() { lf_thread_id_var = lf_atomic_fetch_add32(&_lf_worker_thread_count, 1); }
+#endif
+#endif
diff --git a/low_level_platform/impl/src/lf_unix_syscall_support.c b/low_level_platform/impl/src/lf_unix_syscall_support.c
deleted file mode 100644
index 992824c33..000000000
--- a/low_level_platform/impl/src/lf_unix_syscall_support.c
+++ /dev/null
@@ -1,16 +0,0 @@
-#if defined(PLATFORM_Linux) || defined(PLATFORM_Darwin)
-/**
- * @file lf_unix_syscall_support.c
- * @author Soroush Bateni (soroush@utdallas.edu)
- * @brief Platform support for syscalls in Unix-like systems.
- * @version 0.1
- * @date 2022-03-09
- *
- * @copyright Copyright (c) 2022 The University of Texas at Dallas
- *
- */
-
-#include
-
-int lf_available_cores() { return (int)sysconf(_SC_NPROCESSORS_ONLN); }
-#endif
diff --git a/low_level_platform/impl/src/lf_windows_support.c b/low_level_platform/impl/src/lf_windows_support.c
index 1cdadc43c..a0f1fe4dc 100644
--- a/low_level_platform/impl/src/lf_windows_support.c
+++ b/low_level_platform/impl/src/lf_windows_support.c
@@ -187,6 +187,15 @@ int lf_thread_join(lf_thread_t thread, void** thread_return) {
return 0;
}
+/**
+ * Real-time scheduling API not implemented for Windows.
+ */
+int lf_thread_set_cpu(lf_thread_t thread, int cpu_number) { return -1; }
+
+int lf_thread_set_priority(lf_thread_t thread, int priority) { return -1; }
+
+int lf_thread_set_scheduling_policy(lf_thread_t thread, lf_scheduling_policy_t* policy) { return -1; }
+
int lf_mutex_init(_lf_critical_section_t* critical_section) {
// Set up a recursive mutex
InitializeCriticalSection((PCRITICAL_SECTION)critical_section);
diff --git a/low_level_platform/impl/src/lf_zephyr_clock_counter.c b/low_level_platform/impl/src/lf_zephyr_clock_counter.c
index fcb285d44..42f3de0e7 100644
--- a/low_level_platform/impl/src/lf_zephyr_clock_counter.c
+++ b/low_level_platform/impl/src/lf_zephyr_clock_counter.c
@@ -24,7 +24,6 @@ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
***************/
-
/**
* @brief This implements the timing-related platform API ontop of the Zephyr
* Counter API. The Counter API is a generic interface to a timer peripheral. It
@@ -43,7 +42,6 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
static int64_t epoch_duration_nsec;
static int64_t epoch_duration_usec;
-static uint32_t counter_max_ticks;
static volatile int64_t last_epoch_nsec = 0;
static uint32_t counter_freq;
static volatile bool async_event = false;
diff --git a/low_level_platform/impl/src/lf_zephyr_clock_kernel.c b/low_level_platform/impl/src/lf_zephyr_clock_kernel.c
index 183eebbbe..8c1f5ac1a 100644
--- a/low_level_platform/impl/src/lf_zephyr_clock_kernel.c
+++ b/low_level_platform/impl/src/lf_zephyr_clock_kernel.c
@@ -1,6 +1,6 @@
#if defined(PLATFORM_ZEPHYR)
#include "platform/lf_zephyr_board_support.h"
-#if defined(LF_ZEPHYR_CLOCK_KERNEL)
+#if !defined(LF_ZEPHYR_CLOCK_COUNTER)
/*************
Copyright (c) 2023, Norwegian University of Science and Technology.
@@ -41,52 +41,62 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "low_level_platform.h"
#include "logging_macros.h"
-static int64_t epoch_duration_nsec;
-static volatile int64_t last_epoch_nsec = 0;
+// Convert Zephyr ticks into an interval_t. According to Zephyr docs the
+// ticks are 100Hz for QEMU emulations, and normally a multiple of 10.
+#if CONFIG_SYS_CLOCK_TICKS_PER_SEC == 100
+#define TICKS_TO_NSEC(ticks) MSEC(10 * ticks)
+#elif CONFIG_SYS_CLOCK_TICKS_PER_SEC == 1000
+#define TICKS_TO_NSEC(ticks) MSEC(ticks)
+#elif CONFIG_SYS_CLOCK_TICKS_PER_SEC == 10000
+#define TICKS_TO_NSEC(ticks) USEC(100 * ticks)
+#elif CONFIG_SYS_CLOCK_TICKS_PER_SEC == 100000
+#define TICKS_TO_NSEC(ticks) USEC(10 * ticks)
+#elif CONFIG_SYS_CLOCK_TICKS_PER_SEC == 1000000
+#define TICKS_TO_NSEC(ticks) USEC(1 * ticks)
+#elif CONFIG_SYS_CLOCK_TICKS_PER_SEC == 10000000
+#define TICKS_TO_NSEC(ticks) NSEC(100 * ticks)
+#else
+#define TICKS_TO_NSEC(ticks) ((SECONDS(1) / CONFIG_SYS_CLOCK_TICKS_PER_SEC) * ticks)
+#endif
+
static uint32_t timer_freq;
static volatile bool async_event = false;
+// Statically create an initialize the semaphore used for sleeping.
+K_SEM_DEFINE(sleeping_sem, 0, 1)
+
void _lf_initialize_clock() {
- timer_freq = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
- LF_PRINT_LOG("--- Using LF Zephyr Kernel Clock with a frequency of %u Hz\n", timer_freq);
- last_epoch_nsec = 0;
- epoch_duration_nsec = ((1LL << 32) * SECONDS(1)) / CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
+ timer_freq = CONFIG_SYS_CLOCK_TICKS_PER_SEC;
+ lf_print("--- Using LF Zephyr Kernel Clock with a frequency of %u Hz", timer_freq);
}
-/**
- * Detect wraps by storing the previous clock readout. When a clock readout is
- * less than the previous we have had a wrap. This only works of `_lf_clock_gettime`
- * is invoked at least once per epoch.
- */
+/** Uses Zephyr's monotonic increasing uptime count. */
int _lf_clock_gettime(instant_t* t) {
- static uint32_t last_read_cycles = 0;
- uint32_t now_cycles = k_cycle_get_32();
- if (now_cycles < last_read_cycles) {
- last_epoch_nsec += epoch_duration_nsec;
- }
- *t = (SECOND(1) / CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC) * now_cycles + last_epoch_nsec;
- last_read_cycles = now_cycles;
+ interval_t uptime = k_uptime_ticks();
+ *t = TICKS_TO_NSEC(uptime);
return 0;
}
-/**
- * Interruptable sleep is implemented using busy-waiting.
- */
+/** Interruptable sleep is implemented by a taking a semaphore with a timeout. */
int _lf_interruptable_sleep_until_locked(environment_t* env, instant_t wakeup) {
async_event = false;
+ interval_t duration = wakeup - lf_time_physical();
+ if (duration <= 0) {
+ return 0;
+ }
+
if (lf_critical_section_exit(env)) {
lf_print_error_and_exit("Failed to exit critical section.");
}
- instant_t now;
- do {
- _lf_clock_gettime(&now);
- } while ((now < wakeup) && !async_event);
+
+ int res = k_sem_take(&sleeping_sem, K_NSEC(duration));
+
if (lf_critical_section_enter(env)) {
lf_print_error_and_exit("Failed to exit critical section.");
}
- if (async_event) {
+ if (res < 0 || async_event == true) {
async_event = false;
return -1;
} else {
@@ -95,11 +105,13 @@ int _lf_interruptable_sleep_until_locked(environment_t* env, instant_t wakeup) {
}
/**
- * Asynchronous events are notified by setting a flag which breaks the sleeping
- * thread out of the busy-wait.
+ * Asynchronous events are notified by signalling a semaphore which will wakeup
+ * the runtime if it is sleeping, and setting a flag to indicate what has
+ * happened.
*/
int _lf_single_threaded_notify_of_event() {
async_event = true;
+ k_sem_give(&sleeping_sem);
return 0;
}
diff --git a/low_level_platform/impl/src/lf_zephyr_support.c b/low_level_platform/impl/src/lf_zephyr_support.c
index ff5c37e59..f3470edbb 100644
--- a/low_level_platform/impl/src/lf_zephyr_support.c
+++ b/low_level_platform/impl/src/lf_zephyr_support.c
@@ -33,6 +33,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "platform/lf_zephyr_support.h"
#include "platform/lf_zephyr_board_support.h"
+#include "platform/lf_platform_util.h"
#include "low_level_platform.h"
#include "tag.h"
@@ -154,6 +155,56 @@ void initialize_lf_thread_id() {
int lf_thread_id() { return *((int*)k_thread_custom_data_get()); }
+lf_thread_t lf_thread_self() { return k_current_get(); }
+
+int lf_thread_set_cpu(lf_thread_t thread, int cpu_number) { return k_thread_cpu_pin(thread, cpu_number); }
+
+/**
+ * Real-time scheduling API
+ */
+int lf_thread_set_priority(lf_thread_t thread, int priority) {
+ int final_priority;
+ if (priority > LF_SCHED_MAX_PRIORITY || priority < LF_SCHED_MIN_PRIORITY) {
+ return -1;
+ }
+
+ final_priority = map_priorities(priority, CONFIG_NUM_PREEMPT_PRIORITIES - 1, 0);
+ if (final_priority < 0) {
+ return -1;
+ }
+
+ k_thread_priority_set(thread, final_priority);
+ return 0;
+}
+
+int lf_thread_set_scheduling_policy(lf_thread_t thread, lf_scheduling_policy_t* policy) {
+ // Update the policy
+ switch (policy->policy) {
+ break;
+ case LF_SCHED_TIMESLICE: {
+ // This sets timeslicing for all threads on all priorities. I.e. it is not
+ // set on a per-thread basis.
+ k_sched_time_slice_set(policy->time_slice / MSEC(1), 0);
+ if (lf_thread_set_priority(thread, policy->priority) != 0) {
+ return -1;
+ }
+ break;
+ }
+ case LF_SCHED_PRIORITY: {
+ if (lf_thread_set_priority(thread, policy->priority) != 0) {
+ return -1;
+ }
+ break;
+ }
+ case LF_SCHED_FAIR:
+ default:
+ return -1;
+ break;
+ }
+
+ return 0;
+}
+
int lf_mutex_init(lf_mutex_t* mutex) { return k_mutex_init(mutex); }
int lf_mutex_lock(lf_mutex_t* mutex) {
diff --git a/low_level_platform/impl/src/platform_internal.c b/low_level_platform/impl/src/platform_internal.c
deleted file mode 100644
index fc14c9f22..000000000
--- a/low_level_platform/impl/src/platform_internal.c
+++ /dev/null
@@ -1,13 +0,0 @@
-#include "low_level_platform.h"
-
-#ifndef PLATFORM_ZEPHYR // on Zephyr, this is handled separately
-#ifndef LF_SINGLE_THREADED
-static int _lf_worker_thread_count = 0;
-
-static thread_local int lf_thread_id_var = -1;
-
-int lf_thread_id() { return lf_thread_id_var; }
-
-void initialize_lf_thread_id() { lf_thread_id_var = lf_atomic_fetch_add32(&_lf_worker_thread_count, 1); }
-#endif
-#endif
diff --git a/platform/impl/CMakeLists.txt b/platform/impl/CMakeLists.txt
index df24aac27..cef66b5ef 100644
--- a/platform/impl/CMakeLists.txt
+++ b/platform/impl/CMakeLists.txt
@@ -1,13 +1,11 @@
set(LF_PLATFORM_FILES ${CMAKE_CURRENT_LIST_DIR}/platform.c)
-if(PLATFORM_ZEPHYR)
- message("--- Building Zephyr library")
+if(${CMAKE_SYSTEM_NAME} STREQUAL "Zephyr")
zephyr_library_named(lf-platform-impl)
zephyr_library_sources(${LF_PLATFORM_FILES})
zephyr_library_link_libraries(kernel)
else()
-message("--- Building non-Zephyr library")
add_library(lf-platform-impl STATIC)
target_sources(lf-platform-impl PUBLIC ${LF_PLATFORM_FILES})
endif()
diff --git a/platform/impl/platform.c b/platform/impl/platform.c
index ddd182404..361f36992 100644
--- a/platform/impl/platform.c
+++ b/platform/impl/platform.c
@@ -20,7 +20,8 @@ lf_platform_mutex_ptr_t lf_platform_mutex_new() {
if (mutex)
lf_mutex_init(mutex);
return mutex;
-};
+}
+
void lf_platform_mutex_free(lf_platform_mutex_ptr_t mutex) { free((void*)mutex); }
int lf_platform_mutex_lock(lf_platform_mutex_ptr_t mutex) { return lf_mutex_lock((lf_mutex_t*)mutex); }
int lf_platform_mutex_unlock(lf_platform_mutex_ptr_t mutex) { return lf_mutex_unlock((lf_mutex_t*)mutex); }
diff --git a/tag/api/tag.h b/tag/api/tag.h
index 2ad4cc73c..c903aaf53 100644
--- a/tag/api/tag.h
+++ b/tag/api/tag.h
@@ -47,6 +47,9 @@
#define ZERO_TAG \
(tag_t) { .time = 0LL, .microstep = 0u }
+// Returns true if timeout has elapsed.
+#define CHECK_TIMEOUT(start, duration) (lf_time_physical() > ((start) + (duration)))
+
// Convenience for converting times
#define BILLION ((instant_t)1000000000LL)
diff --git a/test/Tests.cmake b/test/Tests.cmake
index 4130b7c09..2ec3a90ba 100644
--- a/test/Tests.cmake
+++ b/test/Tests.cmake
@@ -62,7 +62,7 @@ if (NOT DEFINED LF_SINGLE_THREADED)
${RTI_DIR}/rti_remote.c
${CoreLibPath}/tracepoint.c
${LF_PLATFORM_FILE}
- ${LF_ROOT}/low_level_platform/impl/src/platform_internal.c
+ ${LF_ROOT}/low_level_platform/impl/src/lf_platform_util.c
${LF_ROOT}/low_level_platform/impl/src/lf_atomic_gcc_clang.c
${LF_ROOT}/low_level_platform/impl/src/lf_unix_clock_support.c
${CoreLibPath}/utils/util.c
diff --git a/test/general/utils/hashmap_test.c b/test/general/utils/hashmap_test.c
index 2134071e6..28e8deb27 100644
--- a/test/general/utils/hashmap_test.c
+++ b/test/general/utils/hashmap_test.c
@@ -15,9 +15,9 @@ static hashmap_object2int_entry_t mock[CAPACITY];
static size_t mock_size = 0;
void test_put(hashmap_object2int_t* h) {
- void* key = NULL;
+ int* key = NULL;
while (!key)
- key = NULL + (rand() % CAPACITY); // Generate a dummy pointer.
+ key += (rand() % CAPACITY); // Generate a dummy pointer.
int value = rand();
hashmap_object2int_entry_t entry = (hashmap_object2int_entry_t){.key = key, .value = value};
hashmap_object2int_put(h, entry.key, entry.value);
@@ -54,7 +54,6 @@ void test_get(hashmap_object2int_t* h) {
* which each of two actions are performed, expressed as percents.
*/
void run_test(hashmap_object2int_t* h, int* distribution) {
- int result = 1;
int r = rand();
int choice = (r < 0 ? -r : r) % 100;
if ((choice = choice - distribution[0]) < 0) {
diff --git a/test/src_gen_stub.c b/test/src_gen_stub.c
index d67d238ea..1e4630a6e 100644
--- a/test/src_gen_stub.c
+++ b/test/src_gen_stub.c
@@ -19,4 +19,4 @@ void logical_tag_complete(tag_t tag_to_send) {}
int _lf_get_environments(environment_t** envs) {
*envs = &_env;
return 1;
-}
\ No newline at end of file
+}
diff --git a/trace/impl/CMakeLists.txt b/trace/impl/CMakeLists.txt
index 6aeeb6870..f4a6b8b55 100644
--- a/trace/impl/CMakeLists.txt
+++ b/trace/impl/CMakeLists.txt
@@ -1,9 +1,13 @@
+set(LF_ROOT ${CMAKE_CURRENT_LIST_DIR}/../..)
+include(${LF_ROOT}/core/lf_utils.cmake)
+
add_library(lf-trace-impl STATIC)
add_library(lf::trace-impl ALIAS lf-trace-impl)
target_link_libraries(lf-trace-impl PRIVATE lf::trace-api)
target_link_libraries(lf-trace-impl PRIVATE lf::platform-api)
target_link_libraries(lf-trace-impl PRIVATE lf::logging-api)
target_link_libraries(lf-trace-impl PRIVATE lf::version-api)
+lf_enable_compiler_warnings(lf-trace-impl)
target_sources(lf-trace-impl PUBLIC ${CMAKE_CURRENT_LIST_DIR}/src/trace_impl.c)
diff --git a/trace/impl/include/trace_impl.h b/trace/impl/include/trace_impl.h
index 3e1bd6fe6..779a7be4a 100644
--- a/trace/impl/include/trace_impl.h
+++ b/trace/impl/include/trace_impl.h
@@ -6,6 +6,9 @@
/** Size of the table of trace objects. */
#define TRACE_OBJECT_TABLE_SIZE 1024
+/** Max length of trace file name*/
+#define TRACE_MAX_FILENAME_LENGTH 128
+
// TYPE DEFINITIONS **********************************************************
/**
@@ -20,10 +23,10 @@ typedef struct trace_t {
* which will create a significant pause in the calling thread.
*/
trace_record_nodeps_t** _lf_trace_buffer;
- int* _lf_trace_buffer_size;
+ size_t* _lf_trace_buffer_size;
/** The number of trace buffers allocated when tracing starts. */
- int _lf_number_of_trace_buffers;
+ size_t _lf_number_of_trace_buffers;
/** Marker that tracing is stopping or has stopped. */
int _lf_trace_stop;
@@ -32,11 +35,11 @@ typedef struct trace_t {
FILE* _lf_trace_file;
/** The file name where the traces are written*/
- char* filename;
+ char filename[TRACE_MAX_FILENAME_LENGTH];
/** Table of pointers to a description of the object. */
object_description_t _lf_trace_object_descriptions[TRACE_OBJECT_TABLE_SIZE];
- int _lf_trace_object_descriptions_size;
+ size_t _lf_trace_object_descriptions_size;
/** Indicator that the trace header information has been written to the file. */
bool _lf_trace_header_written;
diff --git a/trace/impl/src/trace_impl.c b/trace/impl/src/trace_impl.c
index d43a36e87..7f79c49a5 100644
--- a/trace/impl/src/trace_impl.c
+++ b/trace/impl/src/trace_impl.c
@@ -45,7 +45,7 @@ static int write_trace_header(trace_t* trace) {
_LF_TRACE_FAILURE(trace);
// Next we write the table.
- for (int i = 0; i < trace->_lf_trace_object_descriptions_size; i++) {
+ for (size_t i = 0; i < trace->_lf_trace_object_descriptions_size; i++) {
// Write the pointer to the self struct.
items_written = fwrite(&trace->_lf_trace_object_descriptions[i].pointer, sizeof(void*), 1, trace->_lf_trace_file);
if (items_written != 1)
@@ -63,7 +63,7 @@ static int write_trace_header(trace_t* trace) {
_LF_TRACE_FAILURE(trace);
// Write the description.
- int description_size = strlen(trace->_lf_trace_object_descriptions[i].description);
+ size_t description_size = strlen(trace->_lf_trace_object_descriptions[i].description);
items_written = fwrite(trace->_lf_trace_object_descriptions[i].description, sizeof(char),
description_size + 1, // Include null terminator.
trace->_lf_trace_file);
@@ -137,11 +137,11 @@ static void start_trace(trace_t* trace, int max_num_local_threads) {
trace->_lf_trace_buffer =
(trace_record_nodeps_t**)malloc(sizeof(trace_record_nodeps_t*) * (trace->_lf_number_of_trace_buffers + 1));
trace->_lf_trace_buffer++; // the buffer at index -1 is a fallback for user threads.
- for (int i = -1; i < trace->_lf_number_of_trace_buffers; i++) {
+ for (int i = -1; i < (int)trace->_lf_number_of_trace_buffers; i++) {
trace->_lf_trace_buffer[i] = (trace_record_nodeps_t*)malloc(sizeof(trace_record_nodeps_t) * TRACE_BUFFER_CAPACITY);
}
// Array of counters that track the size of each trace record (per thread).
- trace->_lf_trace_buffer_size = (int*)calloc(sizeof(int), trace->_lf_number_of_trace_buffers + 1);
+ trace->_lf_trace_buffer_size = (size_t*)calloc(sizeof(size_t), trace->_lf_number_of_trace_buffers + 1);
trace->_lf_trace_buffer_size++;
trace->_lf_trace_stop = 0;
@@ -150,15 +150,8 @@ static void start_trace(trace_t* trace, int max_num_local_threads) {
static void trace_new(char* filename) {
- // Determine length of the filename
- size_t len = strlen(filename) + 1;
-
- // Allocate memory for the filename on the trace struct
- trace.filename = (char*)malloc(len * sizeof(char));
- LF_ASSERT(trace.filename, "Out of memory");
-
// Copy it to the struct
- strncpy(trace.filename, filename, len);
+ strncpy(trace.filename, filename, TRACE_MAX_FILENAME_LENGTH);
// FIXME: location of trace file should be customizable.
trace._lf_trace_file = fopen(trace.filename, "w");
if (trace._lf_trace_file == NULL) {
@@ -171,16 +164,14 @@ static void trace_new(char* filename) {
}
}
-static void trace_free(trace_t* trace) { free(trace->filename); }
-
static void stop_trace_locked(trace_t* trace) {
if (trace->_lf_trace_stop) {
// Trace was already stopped. Nothing to do.
return;
}
- for (int i = -1; i < trace->_lf_number_of_trace_buffers; i++) {
+ for (int i = -1; i < (int)trace->_lf_number_of_trace_buffers; i++) {
// Flush the buffer if it has data.
- LF_PRINT_DEBUG("Trace buffer %d has %d records.", i, trace->_lf_trace_buffer_size[i]);
+ LF_PRINT_DEBUG("Trace buffer %d has %zu records.", i, trace->_lf_trace_buffer_size[i]);
if (trace->_lf_trace_buffer_size && trace->_lf_trace_buffer_size[i] > 0) {
flush_trace_locked(trace, i);
}
@@ -231,6 +222,7 @@ void lf_tracing_register_trace_event(object_description_t description) {
}
void lf_tracing_tracepoint(int worker, trace_record_nodeps_t* tr) {
+ (void)worker;
// Worker argument determines which buffer to write to.
int tid = lf_thread_id();
if (tid < 0) {
@@ -239,8 +231,8 @@ void lf_tracing_tracepoint(int worker, trace_record_nodeps_t* tr) {
// Therefore we should fall back to using a mutex.
lf_platform_mutex_lock(trace_mutex);
}
- if (tid > trace._lf_number_of_trace_buffers) {
- lf_print_error_and_exit("the thread id (%d) exceeds the number of trace buffers (%d)", tid,
+ if (tid > (int)trace._lf_number_of_trace_buffers) {
+ lf_print_error_and_exit("the thread id (%d) exceeds the number of trace buffers (%zu)", tid,
trace._lf_number_of_trace_buffers);
}
@@ -280,6 +272,5 @@ void lf_tracing_global_init(char* file_name_prefix, int fedid, int max_num_local
void lf_tracing_set_start_time(int64_t time) { start_time = time; }
void lf_tracing_global_shutdown() {
stop_trace(&trace);
- trace_free(&trace);
lf_platform_mutex_free(trace_mutex);
}
diff --git a/util/tracing/trace_util.c b/util/tracing/trace_util.c
index ed32c5baa..ffe0c6b8f 100644
--- a/util/tracing/trace_util.c
+++ b/util/tracing/trace_util.c
@@ -62,6 +62,65 @@ typedef struct open_file_t {
} open_file_t;
open_file_t* _open_files = NULL;
+const char* trace_event_names[] = {
+ "Reaction starts",
+ "Reaction ends",
+ "Reaction deadline missed",
+ "Schedule called",
+ "User-defined event",
+ "User-defined valued event",
+ "Worker wait starts",
+ "Worker wait ends",
+ "Scheduler advancing time starts",
+ "Scheduler advancing time ends",
+ "Federated marker",
+ // Sending messages
+ "Sending ACK",
+ "Sending FAILED",
+ "Sending TIMESTAMP",
+ "Sending NET",
+ "Sending LTC",
+ "Sending STOP_REQ",
+ "Sending STOP_REQ_REP",
+ "Sending STOP_GRN",
+ "Sending FED_ID",
+ "Sending PTAG",
+ "Sending TAG",
+ "Sending REJECT",
+ "Sending RESIGN",
+ "Sending PORT_ABS",
+ "Sending CLOSE_RQ",
+ "Sending TAGGED_MSG",
+ "Sending P2P_TAGGED_MSG",
+ "Sending MSG",
+ "Sending P2P_MSG",
+ "Sending ADR_AD",
+ "Sending ADR_QR",
+ // Receiving messages
+ "Receiving ACK",
+ "Receiving FAILED",
+ "Receiving TIMESTAMP",
+ "Receiving NET",
+ "Receiving LTC",
+ "Receiving STOP_REQ",
+ "Receiving STOP_REQ_REP",
+ "Receiving STOP_GRN",
+ "Receiving FED_ID",
+ "Receiving PTAG",
+ "Receiving TAG",
+ "Receiving REJECT",
+ "Receiving RESIGN",
+ "Receiving PORT_ABS",
+ "Receiving CLOSE_RQ",
+ "Receiving TAGGED_MSG",
+ "Receiving P2P_TAGGED_MSG",
+ "Receiving MSG",
+ "Receiving P2P_MSG",
+ "Receiving ADR_AD",
+ "Receiving ADR_QR",
+ "Receiving UNIDENTIFIED",
+};
+
/**
* Function to be invoked upon exiting.
*/
diff --git a/util/tracing/trace_util.h b/util/tracing/trace_util.h
index 67d3c705b..089e57ee1 100644
--- a/util/tracing/trace_util.h
+++ b/util/tracing/trace_util.h
@@ -33,6 +33,11 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "reactor.h"
#include "trace.h"
+/**
+ * String description of event types.
+ */
+extern const char* trace_event_names[];
+
/** Macro to use when access to trace file fails. */
#define _LF_TRACE_FAILURE(trace_file) \
do { \