Skip to content

Commit

Permalink
Use a progress bar as TUI
Browse files Browse the repository at this point in the history
  • Loading branch information
berkayurun authored and aewag committed Apr 27, 2023
1 parent 3b06d01 commit eb94dad
Show file tree
Hide file tree
Showing 9 changed files with 35 additions and 25 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ jobs:
uses: actions/checkout@v2

- name: install packages
run: sudo apt update; sudo apt upgrade -y; sudo apt install -y build-essential ninja-build libglib2.0-dev libfdt-dev libpixman-1-dev zlib1g-dev python3-tables python3-pandas python3-prctl python3-json5 python3-protobuf libprotobuf-c-dev protobuf-compiler protobuf-c-compiler; pip install protobuf==4.21.12
run: sudo apt update; sudo apt upgrade -y; sudo apt install -y build-essential ninja-build libglib2.0-dev libfdt-dev libpixman-1-dev zlib1g-dev python3-tables python3-pandas python3-prctl python3-json5 python3-protobuf libprotobuf-c-dev protobuf-compiler protobuf-c-compiler python3-tqdm; pip install protobuf==4.21.12

- name: Checkout submodules
run: git submodule update --init
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/codeql-analysis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ jobs:

- name: Install python Packages
if: ${{matrix.language == 'python'}}
run: sudo apt update; sudo apt upgrade -y; sudo apt install -y python3-tables python3-pandas python3-prctl libcap-dev python3-protobuf; pip install protobuf==4.21.12
run: sudo apt update; sudo apt upgrade -y; sudo apt install -y python3-tables python3-pandas python3-prctl libcap-dev python3-protobuf python3-tqdm; pip install protobuf==4.21.12

- name: Build QEMU
if: ${{ matrix.language == 'cpp'}}
Expand Down
2 changes: 1 addition & 1 deletion build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ install_python3_distro() {
if [ "${ID:-linux}" = "debian" ] || [ "${ID_LIKE#*debian*}" != "${ID_LIKE}" ]
then
echo "Looks like Debian!"
sudo apt-get install python3-tables python3-pandas python3-prctl python3-protobuf
sudo apt-get install python3-tables python3-pandas python3-prctl python3-protobuf python3-tqdm

echo "Rebuild protobuf files to support the installed package versions"
cd protobuf
Expand Down
9 changes: 6 additions & 3 deletions calculate_trigger.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
import logging

import pandas
from tqdm import tqdm

from faultclass import build_filters

Expand Down Expand Up @@ -103,7 +104,7 @@ def search_for_fault_location(
goldenrun_tb_exec,
goldenrun_tb_info,
):
logger.info(f"Search trigger to fault INSN at 0x{fault_address:08x}")
logger.debug(f"Search trigger to fault INSN at 0x{fault_address:08x}")
[idx, ins] = find_fault(
fault_address, goldenrun_tb_exec, goldenrun_tb_info, trigger_occurrences
)
Expand Down Expand Up @@ -204,7 +205,7 @@ def search_for_fault_location(
if ins >= sub_tb_data["tb_start"]:
trigger_hitcounter += tb_hitcounters[sub_tb]

logger.info(
logger.debug(
"Found trigger for faulting instruction address {} at {} with "
"hitcounter {}".format(fault_address, ins, trigger_hitcounter)
)
Expand All @@ -213,12 +214,14 @@ def search_for_fault_location(

def calculate_trigger_addresses(fault_list, goldenrun_tb_exec, goldenrun_tb_info):
""""""
logger.info("Calculating trigger addresses")

"check every fault list"
cachelist = []
lists = build_filters(goldenrun_tb_info)
for list in lists:
list = list.reverse()
for faults in fault_list:
for faults in tqdm(fault_list):
for fault in faults["faultlist"]:
if fault.trigger.address >= 0 or fault.trigger.hitcounter == 0:
continue
Expand Down
25 changes: 14 additions & 11 deletions controller.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@

import pandas as pd
import prctl
from tqdm import tqdm

try:
import json5 as json
Expand Down Expand Up @@ -250,7 +251,7 @@ def get_system_ram():
sp = str(tmp).split("kB")
t = sp[0]
mem = int(t.split(":")[1], 0)
clogger.info("system ram is {}kB".format(mem))
clogger.debug("system ram is {}kB".format(mem))
return mem


Expand Down Expand Up @@ -342,10 +343,12 @@ def controller(
continue
goldenrun_data[keyword] = pd.DataFrame(goldenrun_data[keyword])

clogger.info("Simulating faults")
pbar = tqdm(total=len(faultlist))
itter = 0
while 1:
if len(p_list) == 0 and itter == len(faultlist):
clogger.info("Done inserting qemu jobs")
clogger.debug("Done inserting qemu jobs")
break

if (
Expand Down Expand Up @@ -376,8 +379,7 @@ def controller(
)
p.start()
p_list.append({"process": p, "start_time": time.time()})

clogger.info(f"Started worker {faults['index']}. Running: {len(p_list)}.")
clogger.debug(f"Started worker {faults['index']}. Running: {len(p_list)}.")
clogger.debug(f"Fault address: {faults['faultlist'][0].address}")
clogger.debug(
f"Fault trigger address: {faults['faultlist'][0].trigger.address}"
Expand Down Expand Up @@ -415,22 +417,24 @@ def controller(
"Find finished processes"
p["process"].join(timeout=0)
if p["process"].is_alive() is False:
# Update the progress bar
pbar.update(1)
"Recalculate moving average"
p_time_list.append(current_time - p["start_time"])
len_p_time_list = len(p_time_list)
if len_p_time_list > num_workers + 2:
p_time_list.pop(0)
p_time_mean = sum(p_time_list) / len_p_time_list
clogger.info("Current running Average {}".format(p_time_mean))
clogger.debug("Current running Average {}".format(p_time_mean))
"Remove process from list"
p_list.pop(i)
break

clogger.info("{} experiments remaining in queue".format(queue_output.qsize()))

clogger.debug("{} experiments remaining in queue".format(queue_output.qsize()))
pbar.close()
p_logger.join()

clogger.info("Done with qemu and logger")
clogger.debug("Done with qemu and logger")

t1 = time.time()
m, s = divmod(t1 - t0, 60)
Expand All @@ -439,14 +443,13 @@ def controller(

tperindex = (t1 - t0) / len(faultlist)
tperworker = tperindex / num_workers
clogger.info(
clogger.debug(
"Took average of {}s per fault, python worker rough runtime is {}s".format(
tperindex, tperworker
)
)

clogger.info("controller exit")

clogger.debug("controller exit")
return config_qemu


Expand Down
7 changes: 4 additions & 3 deletions faultclass.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ def run_qemu(
f = open(f"log_{index}.txt", "wt", encoding="utf-8")
f.write(tmp.decode("utf-8"))
qlogger.debug(tmp.decode("utf-8"))
qlogger.info(f"Ended qemu for exp {index}! Took {time.time() - t0}")
qlogger.debug(f"Ended qemu for exp {index}! Took {time.time() - t0}")
except KeyboardInterrupt:
ps.kill()
logger.warning(f"Terminate QEMU {index}")
Expand Down Expand Up @@ -557,7 +557,7 @@ def readout_data(
tbfaultedlist = readout_tb_faulted(data_protobuf)
output["tbfaulted"] = tbfaultedlist

logger.info(f"Data received now on post processing for Experiment {index}")
logger.debug(f"Data received now on post processing for Experiment {index}")

max_ram_usage = gather_process_ram_usage(queue_ram_usage, max_ram_usage)

Expand Down Expand Up @@ -778,7 +778,8 @@ def python_worker(
)
p_qemu.join()
delete_fifos()
logger.info(

logger.debug(
"Python worker for experiment {} done. Took {}s, mem usage {}KiB".format(
index, time.time() - t0, mem
)
Expand Down
8 changes: 5 additions & 3 deletions goldenrun.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
from multiprocessing import Queue

import pandas as pd
from tqdm import tqdm

from calculate_trigger import calculate_trigger_addresses
from faultclass import Fault
Expand Down Expand Up @@ -113,7 +114,7 @@ def run_goldenrun(
if "end" in config_qemu:
for tb in experiment["data"]["tbinfo"]:
config_qemu["max_instruction_count"] += tb["num_exec"] * tb["ins_count"]
logger.info(
logger.debug(
"Max instruction count is {}".format(
config_qemu["max_instruction_count"]
)
Expand Down Expand Up @@ -141,11 +142,12 @@ def checktriggers_in_tb(faultconfig, data):
valid_triggers = []
invalid_triggers = []
for faultdescription in faultconfig:
logger.info(
logger.debug(
"Check Fault {}/{} for valid trigger".format(
faultdescription["index"] + 1, len(faultconfig)
)
)

for fault in faultdescription["faultlist"]:
if fault.trigger.address in valid_triggers:
continue
Expand Down Expand Up @@ -362,7 +364,7 @@ def process_wildcard_faults(faultconfig, tbexec, tbinfo):
index_base = faultconfig[-1]["index"] + 1

wildcard_faults = []
for faultentry in faultconfig:
for faultentry in tqdm(faultconfig):
expanded_faults = []

for fault in faultentry["faultlist"]:
Expand Down
4 changes: 2 additions & 2 deletions hdf5logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -330,7 +330,7 @@ def hdf5collector(
# readout queue and get next output from qemu. Will block
exp = queue_output.get()
t1 = time.time()
logger.info(
logger.debug(
"got exp {}, {} still need to be performed. Took {}s. Elements in queu: {}".format(
exp["index"], num_exp, t1 - t0, queue_output.qsize()
)
Expand Down Expand Up @@ -391,4 +391,4 @@ def hdf5collector(
del exp

f.close()
logger.info("Data Logging done")
logger.debug("Data Logging done")
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,4 @@ python-prctl==1.8.1
tables==3.7.0
json5==0.9.10
protobuf==4.21.12
tqdm==4.65.0

0 comments on commit eb94dad

Please sign in to comment.