Skip to content

Commit

Permalink
Allow to remove prefixes for source files
Browse files Browse the repository at this point in the history
  • Loading branch information
vmordan committed Oct 20, 2023
1 parent 0900cef commit abea256
Show file tree
Hide file tree
Showing 6 changed files with 54 additions and 26 deletions.
12 changes: 7 additions & 5 deletions scripts/components/exporter.py
Original file line number Diff line number Diff line change
Expand Up @@ -278,7 +278,7 @@ def export(self, report_launches: str, report_resources: str, report_components:
proofs = {}

# Process several error traces in parallel.
source_files = set()
source_files = dict()
with open(report_launches, encoding='utf8', errors='ignore') as file_obj, \
open(report_resources, encoding='utf8', errors='ignore') as res_obj:
resources_data = res_obj.readlines()[1:]
Expand Down Expand Up @@ -408,7 +408,8 @@ def export(self, report_launches: str, report_resources: str, report_components:
with zipfile.ZipFile(witness, 'r') as arc_arch:
src = json.loads(arc_arch.read(ERROR_TRACE_SOURCES).
decode('utf8', errors='ignore'))
source_files.update(src)
for src_file, src_file_res in src:
source_files[src_file] = src_file_res
except Exception as exception:
self.logger.warning(f"Cannot process sources: {exception}\n",
exc_info=True)
Expand Down Expand Up @@ -439,7 +440,8 @@ def export(self, report_launches: str, report_resources: str, report_components:
with zipfile.ZipFile(witness, 'r') as arc_arch:
src = json.loads(arc_arch.read(ERROR_TRACE_SOURCES).
decode('utf8', errors='ignore'))
source_files.update(src)
for src_file, src_file_res in src:
source_files[src_file] = src_file_res
except Exception as exception:
self.logger.warning(f"Cannot process sources: "
f"{exception}\n", exc_info=True)
Expand Down Expand Up @@ -577,8 +579,8 @@ def export(self, report_launches: str, report_resources: str, report_components:
reports.append(root_element)
with zipfile.ZipFile(DEFAULT_SOURCES_ARCH, mode='w',
compression=zipfile.ZIP_DEFLATED) as arch_obj:
for src_file in source_files:
arch_obj.write(src_file)
for src_file, src_file_res in source_files.items():
arch_obj.write(src_file, arcname=src_file_res)
final_zip.write(DEFAULT_SOURCES_ARCH)
os.remove(DEFAULT_SOURCES_ARCH)

Expand Down
28 changes: 18 additions & 10 deletions scripts/components/mea.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,19 +58,19 @@

class MEA(Component):
"""
Multiple Error Analysis (MEA) is aimed at processing several error traces, which violates
Multiple Error Analysis (MEA) is aimed at processing several error traces, which violates
the same property. Error traces are called equivalent, if they correspond to the same error.
Error trace equivalence for two traces et1 and et2 is determined in the following way:
et1 = et2 <=> comparison(conversion(parser(et1)), comparison(parser(et2))),
where parser function parses the given file with error trace and returns its internal
where parser function parses the given file with error trace and returns its internal
representation, conversion function transforms its internal representation (for example,
by removing some elements) and comparison function compares its internal representation.
Definitions:
- parsed error trace - result of parser(et), et - file name with error trace (xml);
- converted error trace - result of conversion(pet), pet - parsed error trace.
"""
def __init__(self, general_config: dict, error_traces: list, install_dir: str, rule: str = "",
result_dir: str = "", is_standalone=False):
result_dir: str = "", is_standalone=False, remove_prefixes=None):
super().__init__(COMPONENT_MEA, general_config)
self.install_dir = install_dir
if result_dir:
Expand All @@ -97,6 +97,7 @@ def __init__(self, general_config: dict, error_traces: list, install_dir: str, r
self.unzip = self.__get_option_for_rule(TAG_UNZIP, True)
self.dry_run = self.__get_option_for_rule(TAG_DRY_RUN, False)
self.source_dir = self.__get_option_for_rule(TAG_SOURCE_DIR, None)
self.remove_prefixes = remove_prefixes

# Cache of filtered converted error traces.
self.__cache = {}
Expand Down Expand Up @@ -335,9 +336,9 @@ def __print_trace_archive(self, error_trace_file_name: str, witness_type=WITNESS
def __process_parsed_trace(parsed_error_trace: dict):
# Normalize source paths.
src_files = []
for src_file in parsed_error_trace['files']:
src_file = os.path.normpath(src_file)
src_files.append(src_file)
for src_file, src_file_res in parsed_error_trace['files']:
tmp_res = (os.path.normpath(src_file), src_file_res)
src_files.append(tmp_res)
parsed_error_trace['files'] = src_files

def __parse_trace(self, error_trace_file: str, supported_types: set) -> dict:
Expand All @@ -346,7 +347,7 @@ def __parse_trace(self, error_trace_file: str, supported_types: set) -> dict:
"Witness processor", logging.WARNING if self.debug else logging.ERROR
)
try:
json_error_trace = import_error_trace(logger, error_trace_file, self.source_dir)
json_error_trace = import_error_trace(logger, error_trace_file, self.source_dir, self.remove_prefixes)
if self.dry_run:
warnings = json_error_trace.get('warnings', [])
if warnings:
Expand All @@ -373,13 +374,20 @@ def __print_parsed_error_trace(self, parsed_error_trace: dict, converted_error_t
json_trace_name, source_files, converted_traces_files = \
self.__get_aux_file_names(error_trace_file)

with open(json_trace_name, 'w', encoding='utf8') as file_obj:
json.dump(parsed_error_trace, file_obj, ensure_ascii=False, sort_keys=True, indent="\t")

with open(source_files, 'w', encoding='utf8') as file_obj:
json.dump(parsed_error_trace['files'], file_obj, ensure_ascii=False, sort_keys=True,
indent="\t")

files = []
for file, file_res in parsed_error_trace['files']:
if self.is_standalone:
files.append(file)
else:
files.append(file_res)
parsed_error_trace['files'] = files
with open(json_trace_name, 'w', encoding='utf8') as file_obj:
json.dump(parsed_error_trace, file_obj, ensure_ascii=False, sort_keys=True, indent="\t")

converted_traces = {}
if parsed_error_trace.get('type') == WITNESS_VIOLATION:
for conversion_function in EXPORTING_CONVERTED_FUNCTIONS:
Expand Down
11 changes: 8 additions & 3 deletions scripts/klever_bridge/launcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,15 +67,20 @@ def __process_single_klever_result(self, result: VerificationResults, output_dir
self.config[COMPONENT_MEA][TAG_SOURCE_DIR] = os.path.join(output_dir, os.path.pardir)
launch_directory = self._copy_result_files(files, self.process_dir)

result.work_dir = launch_directory
result.parse_output_dir(launch_directory, self.install_dir, self.result_dir_et, columns)
jobs_dir = os.path.join(self.output_dir, os.path.pardir, JOBS_DIR, self.job_id, KLEVER_WORK_DIR)
source_dir = os.path.join(self.tasks_dir, self.kernel_dir.lstrip(os.sep))
result.work_dir = launch_directory
remove_src_prefixes = [
os.path.realpath(source_dir),
os.path.realpath(os.path.join(jobs_dir, "job", "root", "specifications")),
os.path.realpath(os.path.join(jobs_dir, "job", "vtg"))
]
result.parse_output_dir(launch_directory, self.install_dir, self.result_dir_et, columns, remove_src_prefixes)
self._process_coverage(result, launch_directory, [source_dir, self.tasks_dir,
os.path.join(output_dir, os.path.pardir)],
work_dir=jobs_dir)
if result.initial_traces > 1:
result.filter_traces(launch_directory, self.install_dir, self.result_dir_et)
result.filter_traces(launch_directory, self.install_dir, self.result_dir_et, remove_src_prefixes)
queue.put(result)
sys.exit(0)

Expand Down
4 changes: 2 additions & 2 deletions scripts/mea/et/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
from mea.et.tmpvars import generic_simplifications


def import_error_trace(logger, witness, source_dir=None):
def import_error_trace(logger, witness, source_dir=None, remove_prefixes=None):
"""
Main function for importing a witness into the CV internal format
"""
Expand All @@ -44,7 +44,7 @@ def import_error_trace(logger, witness, source_dir=None):
# Do final checks
internal_witness.final_checks(witness_parser.entry_point)

return internal_witness.serialize()
return internal_witness.serialize(remove_prefixes)


# This is intended for testing purposes, when one has a witness and would like to debug its
Expand Down
16 changes: 14 additions & 2 deletions scripts/mea/et/internal_witness.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,14 +104,26 @@ def prune(self):
sink_edges = set([self._edges.index(e) for e in self._edges if e.get('sink')])
self._edges = [e for index, e in enumerate(self._edges) if index not in sink_edges]

def serialize(self):
def serialize(self, remove_prefixes=None):
capitalize_attr_names(self._attrs)

files = []
remove_prefixes_res = []
if remove_prefixes:
for prefix in remove_prefixes:
remove_prefixes_res.append(prefix.lstrip(os.sep))
for file in self._files:
res_file = file
for prefix in remove_prefixes_res:
res_file = re.sub(prefix, '', res_file)
res_file = res_file.lstrip(os.sep)
files.append((file, res_file))

data = {
'attrs': self._attrs,
'edges': self._edges,
'entry node': 0,
'files': self._files,
'files': files,
'funcs': self._funcs,
'actions': self._actions,
'callback actions': self._callback_actions,
Expand Down
9 changes: 5 additions & 4 deletions scripts/models/verification_result.py
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,7 @@ def __parse_xml_node(self, columns):
self.resources[title] = int(float(value))

def parse_output_dir(self, launch_dir: str, install_dir: str, result_dir: str,
parsed_columns=None):
parsed_columns=None, ignore_src_prefixes=None):
"""
Get verification results from launch directory.
"""
Expand Down Expand Up @@ -287,7 +287,8 @@ def parse_output_dir(self, launch_dir: str, install_dir: str, result_dir: str,
# Trace should be checked if it is correct or not.
start_time_cpu = time.process_time()
start_wall_time = time.time()
mea = MEA(self.config, error_traces, install_dir, self.rule, result_dir)
mea = MEA(self.config, error_traces, install_dir, self.rule, result_dir,
remove_prefixes=ignore_src_prefixes)
is_exported, witness_type = mea.process_traces_without_filtering()
if is_exported:
# Trace is fine, just recheck final verdict.
Expand Down Expand Up @@ -317,14 +318,14 @@ def parse_output_dir(self, launch_dir: str, install_dir: str, result_dir: str,
else:
os.remove(file)

def filter_traces(self, launch_dir: str, install_dir: str, result_dir: str):
def filter_traces(self, launch_dir: str, install_dir: str, result_dir: str, remove_src_prefixes=None):
"""
Perform Multiple Error Analysis to filter found error traces (only for several traces).
"""
start_time_cpu = time.process_time()
start_wall_time = time.time()
traces = glob.glob(f"{launch_dir}/witness*")
mea = MEA(self.config, traces, install_dir, self.rule, result_dir)
mea = MEA(self.config, traces, install_dir, self.rule, result_dir, remove_prefixes=remove_src_prefixes)
self.filtered_traces = len(mea.filter())
if self.filtered_traces:
self.verdict = VERDICT_UNSAFE
Expand Down

0 comments on commit abea256

Please sign in to comment.