Skip to content

Commit

Permalink
perf(veristand-zeromq-bridge): change to mutlithreading (#20511)
Browse files Browse the repository at this point in the history
  • Loading branch information
hongbo-miao authored Nov 16, 2024
1 parent 03227c5 commit 2393f05
Show file tree
Hide file tree
Showing 2 changed files with 87 additions and 46 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -2,87 +2,103 @@
using NetMQ.Sockets;
using NationalInstruments.VeriStand.ClientAPI;
using System;
using System.Diagnostics;
using Google.Protobuf;
using System.Linq;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Diagnostics;
using System.Threading;
using System.Threading.Tasks;
using System.Linq;
using Google.Protobuf;
using VeriStandZeroMQBridge;

public class Program
{
private const string GATEWAY_IP = "localhost";
private const string SYSTEM_DEFINITION_PATH = @"C:\Users\Public\Documents\National Instruments\NI VeriStand 2024\Examples\Stimulus Profile\Engine Demo\Engine Demo.nivssdf";
private const int TOTAL_MESSAGES = 100000;
private const int TOTAL_MESSAGES = 200000;
private const int INTERVAL_MS = 5000;
private const int CONNECTION_TIMEOUT_MS = 60000;
private const string ZMQ_ADDRESS = "tcp://*:5555";
private const int NUM_PRODUCERS = 4; // Number of producer threads
private const int QUEUE_CAPACITY = 10000;

private static BlockingCollection<byte[]> messageQueue;
private static volatile bool isRunning = true;

public static async Task Main(string[] args)
{
messageQueue = new BlockingCollection<byte[]>(QUEUE_CAPACITY);

using (var publisher = new PublisherSocket())
{
publisher.Bind(ZMQ_ADDRESS);
Console.WriteLine($"[ZMQ] Publisher bound to {ZMQ_ADDRESS}");
await Console.Out.WriteLineAsync($"[ZMQ] Publisher bound to {ZMQ_ADDRESS}");

try
{
var workspace = new Factory().GetIWorkspace2(GATEWAY_IP);
string[] aliases, channels;
workspace.GetAliasList(out aliases, out channels);
Console.WriteLine($"[Config] Aliases: {string.Join(", ", aliases)} | Channels: {string.Join(", ", channels)}");
await Console.Out.WriteLineAsync($"[Config] Aliases Count: {aliases.Length} | Aliases: {string.Join(", ", aliases)}");
await Console.Out.WriteLineAsync($"[Config] Channels Count: {channels.Length} | Aliases: {string.Join(", ", channels)}");

double[] values = new double[aliases.Length];
workspace.ConnectToSystem(SYSTEM_DEFINITION_PATH, true, CONNECTION_TIMEOUT_MS);
Console.WriteLine("[Status] Data collection started");
await Console.Out.WriteLineAsync("[Status] Data collection started");

int messageCount = 0;
int messagesLastInterval = 0;
var totalStopwatch = Stopwatch.StartNew();
var intervalStopwatch = Stopwatch.StartNew();
var messageCount = 0;
var messagesLastInterval = 0;

var tasks = new List<Task>();
// Start producer tasks
var producerTasks = new List<Task>();
for (int i = 0; i < NUM_PRODUCERS; i++)
{
producerTasks.Add(Task.Run(() => ProducerTask(workspace, aliases)));
}

while (messageCount < TOTAL_MESSAGES)
// Consumer task
var consumerTask = Task.Run(() =>
{
workspace.GetMultipleChannelValues(aliases, out values);
var signals = new Signals
{
Timestamp = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds(),
Signals_ = { aliases.Zip(values, (alias, value) =>
new Signal {
Alias = alias,
Value = value
}) }
};

// Serialize and send asynchronously
byte[] data = signals.ToByteArray();
var sendTask = Task.Run(() => publisher.SendFrame(data));
tasks.Add(sendTask);

// Clean up completed tasks
tasks.RemoveAll(t => t.IsCompleted);

messageCount++;
messagesLastInterval++;

if (intervalStopwatch.ElapsedMilliseconds >= INTERVAL_MS)
while (!messageQueue.IsCompleted)
{
double messagesPerSecond = messagesLastInterval / (intervalStopwatch.ElapsedMilliseconds / 1000.0);
await Console.Out.WriteLineAsync($"[Update] Speed: {messagesPerSecond:F2} msg/s | Total: {messageCount:N0} | Value Count: {values.Length}");
try
{
var data = messageQueue.Take();
publisher.SendFrame(data);

Interlocked.Increment(ref messageCount);
Interlocked.Increment(ref messagesLastInterval);

messagesLastInterval = 0;
intervalStopwatch.Restart();
if (messageCount >= TOTAL_MESSAGES)
{
isRunning = false;
break;
}
}
catch (InvalidOperationException)
{
break;
}
}
});

// Monitoring task
while (isRunning)
{
await Task.Delay(INTERVAL_MS);
var currentMessagesLastInterval = Interlocked.Exchange(ref messagesLastInterval, 0);
double messagesPerSecond = currentMessagesLastInterval / (INTERVAL_MS / 1000.0);
await Console.Out.WriteLineAsync($"[Update] Speed: {messagesPerSecond:F2} msg/s | Total: {messageCount:N0} | Queue Size: {messageQueue.Count}");
}

// Wait for all remaining tasks to complete
await Task.WhenAll(tasks);
// Cleanup
messageQueue.CompleteAdding();
await Task.WhenAll(producerTasks);
await consumerTask;

double totalTime = totalStopwatch.Elapsed.TotalSeconds;
double averageMessagesPerSecond = TOTAL_MESSAGES / totalTime;
await Console.Out.WriteLineAsync($"[Complete] Runtime: {totalTime:F2}s | Avg Speed: {averageMessagesPerSecond:F2} msg/s | Total Messages: {TOTAL_MESSAGES:N0}");
double averageMessagesPerSecond = messageCount / totalTime;
await Console.Out.WriteLineAsync($"[Complete] Runtime: {totalTime:F2}s | Avg Speed: {averageMessagesPerSecond:F2} msg/s | Total Messages: {messageCount:N0}");
}
catch (Exception ex)
{
Expand All @@ -91,4 +107,29 @@ public static async Task Main(string[] args)
}
}
}

private static void ProducerTask(IWorkspace2 workspace, string[] aliases)
{
double[] values = new double[aliases.Length];

while (isRunning)
{
workspace.GetMultipleChannelValues(aliases, out values);
var signals = new Signals
{
Timestamp = DateTimeOffset.UtcNow.ToUnixTimeMilliseconds(),
Signals_ = { aliases.Zip(values, (alias, value) =>
new Signal {
Alias = alias,
Value = value
}) }
};

byte[] data = signals.ToByteArray();
if (!messageQueue.TryAdd(data))
{
Thread.Sleep(1); // Back off if the queue is full
}
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -126,8 +126,8 @@ async fn main() -> Result<(), Box<dyn Error>> {
}
}));
handles.retain(|handle| !handle.is_finished());
while handles.len() >= 2000 {
futures::future::join_all(handles.drain(..1000)).await;
while handles.len() >= 16 {
futures::future::join_all(handles.drain(..8)).await;
}
},
_ = tokio::time::sleep(INACTIVITY_TIMEOUT) => {
Expand Down

0 comments on commit 2393f05

Please sign in to comment.