diff --git a/src/clusterfuzz/_internal/cron/grouper.py b/src/clusterfuzz/_internal/cron/grouper.py index f5e61c4562..e2a49117ae 100644 --- a/src/clusterfuzz/_internal/cron/grouper.py +++ b/src/clusterfuzz/_internal/cron/grouper.py @@ -426,6 +426,10 @@ def group_testcases(): try: issue_tracker = issue_tracker_utils.get_issue_tracker_for_testcase( testcase) + if issue_tracker: + logs.info( + f'Running grouping with issue tracker {issue_tracker.project}, ' + f' for testcase {testcase_id}') except ValueError: logs.error('Couldn\'t get issue tracker for issue.') del testcase_map[testcase_id] diff --git a/src/clusterfuzz/_internal/cron/triage.py b/src/clusterfuzz/_internal/cron/triage.py index 41017748c0..1ed9778b60 100644 --- a/src/clusterfuzz/_internal/cron/triage.py +++ b/src/clusterfuzz/_internal/cron/triage.py @@ -353,16 +353,22 @@ def main(): testcase = data_handler.get_testcase_by_id(testcase_id) except errors.InvalidTestcaseError: # Already deleted. + logs.info( + f'Skipping testcase {testcase_id}, since it was already deleted.') continue critical_tasks_completed = data_handler.critical_tasks_completed(testcase) # Skip if testcase's job is removed. if testcase.job_type not in all_jobs: + logs.info(f'Skipping testcase {testcase_id}, since its job was removed ' + f' ({testcase.job_type})') continue # Skip if testcase's job is in exclusions list. if testcase.job_type in excluded_jobs: + logs.info(f'Skipping testcase {testcase_id}, since its job is in the' + f' exclusion list ({testcase.job_type})') continue # Emmit the metric for testcases that should be triaged. @@ -370,20 +376,27 @@ def main(): # Skip if we are running progression task at this time. if testcase.get_metadata('progression_pending'): + logs.info(f'Skipping testcase {testcase_id}, progression pending') continue # If the testcase has a bug filed already, no triage is needed. if _is_bug_filed(testcase): + logs.info( + f'Skipping testcase {testcase_id}, since a bug was already filed.') continue # Check if the crash is important, i.e. it is either a reproducible crash # or an unreproducible crash happening frequently. if not _is_crash_important(testcase): + logs.info( + f'Skipping testcase {testcase_id}, since the crash is not important.') continue # Require that all tasks like minimizaton, regression testing, etc have # finished. if not critical_tasks_completed: + logs.info( + f'Skipping testcase {testcase_id}, critical tasks still pending.') continue # For testcases that are not part of a group, wait an additional time to @@ -398,22 +411,32 @@ def main(): # metadata works well. if not testcase.group_id and not dates.time_has_expired( testcase.timestamp, hours=data_types.MIN_ELAPSED_TIME_SINCE_REPORT): + logs.info(f'Skipping testcase {testcase_id}, pending grouping.') continue if not testcase.get_metadata('ran_grouper'): # Testcase should be considered by the grouper first before filing. + logs.info(f'Skipping testcase {testcase_id}, pending grouping.') continue # If this project does not have an associated issue tracker, we cannot # file this crash anywhere. - issue_tracker = issue_tracker_utils.get_issue_tracker_for_testcase(testcase) + try: + issue_tracker = issue_tracker_utils.get_issue_tracker_for_testcase( + testcase) + except ValueError: + issue_tracker = None if not issue_tracker: + logs.info(f'No issue tracker detected for testcase {testcase_id}, ' + 'publishing message.') issue_filer.notify_issue_update(testcase, 'new') continue # If there are similar issues to this test case already filed or recently # closed, skip filing a duplicate bug. if _check_and_update_similar_bug(testcase, issue_tracker): + logs.info(f'Skipping testcase {testcase_id}, since a similar bug' + ' was already filed.') continue # Clean up old triage messages that would be not applicable now. @@ -421,6 +444,7 @@ def main(): # File the bug first and then create filed bug metadata. if not _file_issue(testcase, issue_tracker, throttler): + logs.info(f'Issue filing failed for testcase id {testcase_id}') continue _create_filed_bug_metadata(testcase) diff --git a/src/clusterfuzz/_internal/issue_management/issue_tracker_utils.py b/src/clusterfuzz/_internal/issue_management/issue_tracker_utils.py index 07b9fc62d2..a305bae680 100644 --- a/src/clusterfuzz/_internal/issue_management/issue_tracker_utils.py +++ b/src/clusterfuzz/_internal/issue_management/issue_tracker_utils.py @@ -21,6 +21,7 @@ from clusterfuzz._internal.issue_management import issue_tracker_policy from clusterfuzz._internal.issue_management import jira from clusterfuzz._internal.issue_management import monorail +from clusterfuzz._internal.metrics import logs from clusterfuzz._internal.system import environment _ISSUE_TRACKER_CACHE_CAPACITY = 8 @@ -43,7 +44,10 @@ def _get_issue_tracker_project_name(testcase=None): """Return issue tracker project name given a testcase or default.""" from clusterfuzz._internal.datastore import data_handler job_type = testcase.job_type if testcase else None - return data_handler.get_issue_tracker_name(job_type) + issue_tracker_name = data_handler.get_issue_tracker_name(job_type) + logs.info( + f'For testcase {testcase.key}, using issue tracker {issue_tracker_name}') + return issue_tracker_name def request_or_task_cache(func): @@ -66,8 +70,12 @@ def get_issue_tracker(project_name=None): issue_project_config = issue_tracker_config.get(project_name) if not issue_project_config: raise ValueError('Issue tracker for {} does not exist'.format(project_name)) + logs.info(f'Issue tracker = {project_name}, issue tracker config = ' + f'{issue_project_config}') - constructor = _ISSUE_TRACKER_CONSTRUCTORS.get(issue_project_config['type']) + issue_tracker_type = issue_project_config['type'] + constructor = _ISSUE_TRACKER_CONSTRUCTORS.get(issue_tracker_type) + logs.info(f'Using the issue tracker constructor for {issue_tracker_type}') if not constructor: raise ValueError('Invalid issue tracker type: ' + issue_project_config['type'])