diff --git a/controller.py b/controller.py index 6bfe89f..48628c7 100755 --- a/controller.py +++ b/controller.py @@ -483,6 +483,60 @@ def read_backup(hdf5_file): return [backup_expanded_faults, backup_config, backup_goldenrun] +def read_simulated_faults(hdf5_file): + with tables.open_file(hdf5_file, "r") as f_in: + # Process simulated faults + simulated_faults_hash = {} + exp_n = 0 + + for exp in tqdm( + f_in.root.fault, + total=f_in.root.fault._v_nchildren, + desc="Reading simulated faults", + ): + simulated_exp = { + "index": exp_n, + "faultlist": [ + Fault( + fault["fault_address"], + [], + fault["fault_type"], + fault["fault_model"], + fault["fault_lifespan"], + fault["fault_mask"], + fault["trigger_address"], + fault["trigger_hitcounter"], + fault["fault_num_bytes"], + fault["fault_wildcard"], + ) + for fault in exp.faults.iterrows() + ], + } + + config_hash = hashlib.sha256() + for fault in simulated_exp["faultlist"]: + config_hash.update(str(fault).encode()) + simulated_faults_hash[config_hash.digest()] = simulated_exp + + exp_n = exp_n + 1 + + return simulated_faults_hash + + +def get_not_simulated_faults(faultlist, simulated_faults): + missing_faultlist = [] + + for faultconfig in faultlist: + config_hash = hashlib.sha256() + for fault in faultconfig["faultlist"]: + config_hash.update(str(fault).encode()) + + if not config_hash.digest() in simulated_faults: + missing_faultlist.append(faultconfig) + + return missing_faultlist + + def controller( args, hdf5mode, @@ -491,6 +545,7 @@ def controller( num_workers, queuedepth, compressionlevel, + missing_only, goldenrun_only, goldenrun=True, logger=hdf5collector, @@ -588,6 +643,20 @@ def controller( log_config = True log_goldenrun = True + if missing_only: + simulated_faults = read_simulated_faults(hdf5_file) + faultlist = get_not_simulated_faults(faultlist, simulated_faults) + + log_config = False + log_goldenrun = False + + overwrite_faults = False + + if faultlist: + clogger.info(f"{len(faultlist)} faults are missing and will be simulated") + else: + clogger.info("All faults are already simulated") + p_logger = Process( target=logger, args=( @@ -741,7 +810,7 @@ def controller( if faultlist: tperindex = (t1 - t0) / len(faultlist) else: - tperindex = (t1 - t0) + tperindex = t1 - t0 tperworker = tperindex / num_workers clogger.debug( @@ -838,6 +907,13 @@ def get_argument_parser(): action="store_true", required=False, ) + parser.add_argument( + "--missing-only", + "-m", + help="Only run missing experiments", + action="store_true", + required=False, + ) return parser @@ -880,6 +956,13 @@ def process_arguments(args): else: parguments["goldenrun_only"] = False + if args.missing_only and hdf5file.is_file(): + parguments["missing_only"] = True + parguments["hdf5mode"] = "a" + parguments["goldenrun"] = False + else: + parguments["missing_only"] = False + qemu_conf = json.load(args.qemu) args.qemu.close() print(qemu_conf) @@ -987,7 +1070,8 @@ def init_logging(): parguments["num_workers"], # num_workers parguments["queuedepth"], # queuedepth parguments["compressionlevel"], # compressionlevel - parguments["goldenrun_only"], + parguments["missing_only"], # missing_only flag + parguments["goldenrun_only"], # goldenrun_only flag parguments["goldenrun"], # goldenrun hdf5collector, # logger None, # qemu_pre diff --git a/faultclass.py b/faultclass.py index 695e62c..48fe9ac 100644 --- a/faultclass.py +++ b/faultclass.py @@ -123,6 +123,19 @@ def __init__( self.num_bytes = num_bytes self.wildcard = wildcard + def __str__(self): + return ( + f"{self.trigger.address}" + f"{self.trigger.hitcounter}" + f"{self.address}" + f"{self.type}" + f"{self.model}" + f"{self.lifespan}" + f"{self.mask}" + f"{self.num_bytes}" + f"{self.wildcard}" + ) + def write_fault_list_to_pipe(fault_list, fifo): fault_pack = fault_pb2.FaultPack()