From 191d14bcf7a56b37eb4dbce5da19092ee5c3e2d2 Mon Sep 17 00:00:00 2001 From: Jonathan Metzman Date: Thu, 26 Dec 2024 08:23:09 -0500 Subject: [PATCH] It's not used anymore now that everything is run using batch. Also, bump fuzz tasks scheduled so we can postprocess on tworkers. Bug: b/381528482 --- src/clusterfuzz/_internal/cron/manage_vms.py | 471 +------ .../_internal/cron/schedule_fuzz.py | 2 +- .../handlers/cron/manage_vms_test.py | 1165 +---------------- 3 files changed, 5 insertions(+), 1633 deletions(-) diff --git a/src/clusterfuzz/_internal/cron/manage_vms.py b/src/clusterfuzz/_internal/cron/manage_vms.py index 0653299e1c..64d939dc9a 100644 --- a/src/clusterfuzz/_internal/cron/manage_vms.py +++ b/src/clusterfuzz/_internal/cron/manage_vms.py @@ -13,34 +13,20 @@ # limitations under the License. """Cron to managed VMs.""" -from collections import namedtuple from concurrent.futures import ThreadPoolExecutor import copy -import itertools import json import logging from typing import Any from typing import Dict from typing import Optional -from google.cloud import ndb - -from clusterfuzz._internal.base import utils from clusterfuzz._internal.config import local_config from clusterfuzz._internal.cron.helpers import bot_manager -from clusterfuzz._internal.datastore import data_types -from clusterfuzz._internal.datastore import ndb_utils from clusterfuzz._internal.google_cloud_utils import compute_engine_projects -PROJECT_MIN_CPUS = 1 - -# This is the maximum number of instances supported in a single instance group. -PROJECT_MAX_CPUS = 1000 - NUM_THREADS = 8 -WorkerInstance = namedtuple('WorkerInstance', ['name', 'project']) - class ManageVmsError(Exception): """Base exception class.""" @@ -51,21 +37,6 @@ def _get_project_ids(): return list(local_config.Config(local_config.GCE_CLUSTERS_PATH).get().keys()) -def _instance_name_from_url(instance_url): - """Extract instance name from url.""" - return instance_url.split('/')[-1] - - -def get_resource_name(prefix, project_name): - """Get a name that can be used for GCE resources.""" - # https://cloud.google.com/compute/docs/reference/latest/instanceGroupManagers - max_name_length = 58 - - project_name = project_name.lower().replace('_', '-') - name = prefix + '-' + project_name - return name[:max_name_length] - - def get_template_body(gce_project, template_name, task_tag=None, @@ -321,449 +292,9 @@ def update_cluster(self, str(e)) -class OssFuzzClustersManager(ClustersManager): - """Manager for clusters in OSS-Fuzz.""" - - def __init__(self, project_id): - super().__init__(project_id) - self.worker_to_assignment = {} - for assignment in self.gce_project.host_worker_assignments: - self.worker_to_assignment[assignment.worker] = assignment - - self.all_host_names = set() - - def update_clusters(self): - """Update all clusters in a project.""" - self.start_thread_pool() - - all_projects = list(data_types.OssFuzzProject.query().order( - data_types.OssFuzzProject.name)) - - self.cleanup_old_projects([project.name for project in all_projects]) - - projects = [project for project in all_projects if not project.high_end] - high_end_projects = [ - project for project in all_projects if project.high_end - ] - - project_infos = [ - self.get_or_create_project_info(project.name) for project in projects - ] - - high_end_project_infos = [ - self.get_or_create_project_info(project.name) - for project in high_end_projects - ] - - for project, project_info in itertools.chain( - list(zip(projects, project_infos)), - list(zip(high_end_projects, high_end_project_infos))): - self.cleanup_clusters(project, project_info) - - for cluster in self.gce_project.clusters: - self.update_project_cpus(projects, project_infos, high_end_projects, - high_end_project_infos, cluster) - - self.cleanup_old_assignments(self.all_host_names) - self.finish_updates() - - def get_or_create_project_info(self, project_name): - """Get OSS-Fuzz CPU info by project name (or create a new one if it doesn't - exist).""" - key = ndb.Key(data_types.OssFuzzProjectInfo, project_name) - project_info = key.get() - if not project_info: - project_info = data_types.OssFuzzProjectInfo( - name=project_name, id=project_name) - project_info.put() - - return project_info - - def get_or_create_host_worker_assignment(self, host_name, instance_num): - """Get OSS-Fuzz host worker assignment (or create a new one if it doesn't - exist).""" - key_id = '%s-%d' % (host_name, instance_num) - key = ndb.Key(data_types.HostWorkerAssignment, key_id) - assignment = key.get() - if not assignment: - assignment = data_types.HostWorkerAssignment( - host_name=host_name, instance_num=instance_num, id=key_id) - assignment.put() - - return assignment - - def cleanup_old_assignments(self, host_names): - """Remove old OSS-Fuzz host worker assignment entries.""" - to_delete = [] - for assignment in data_types.HostWorkerAssignment.query(): - if assignment.host_name not in host_names: - to_delete.append(assignment.key) - - ndb_utils.delete_multi(to_delete) - - def distribute_cpus(self, projects, total_cpus): - """Distribute OSS-Fuzz CPUs for each project by weight. - - |projects| should be sorted - alphabetically by name to ensure determinism for the same set of CPUs. - """ - available_cpus = total_cpus - total_weight = sum(project.cpu_weight for project in projects) - - cpu_count = [] - - for project in projects: - if total_weight: - share = project.cpu_weight / total_weight - else: - share = 0.0 - - share_cpus = int(total_cpus * share) - share_cpus = max(PROJECT_MIN_CPUS, share_cpus) - share_cpus = min(PROJECT_MAX_CPUS, share_cpus) - - if share_cpus <= available_cpus: - cpu_count.append(share_cpus) - available_cpus -= share_cpus - else: - cpu_count.append(0) - - # indexes into |project| sorted by highest weight first. - indexes_by_weight = sorted( - list(range(len(projects))), - key=lambda k: projects[k].cpu_weight, - reverse=True) - - # Distribute the remainder from rounding errors (and capping) up to the cap, - # preferring projects with a higher weight first. - while available_cpus: - cpus_allocated = 0 - - for i in range(len(cpu_count)): - project_index = indexes_by_weight[i] - - if cpu_count[project_index] < PROJECT_MAX_CPUS: - cpu_count[project_index] += 1 - cpus_allocated += 1 - - if cpus_allocated >= available_cpus: - break - - if not cpus_allocated: - # Hit the cap for each project. Realistically, this shouldn't ever - # happen. - break - - available_cpus -= cpus_allocated - - if available_cpus: - logging.warning('%d CPUs are not being used.', available_cpus) - - return cpu_count - - def do_assign_hosts_to_workers(self, host_names, worker_instances, - workers_per_host): - """Assign OSS-Fuzz host instances to workers.""" - # Sort host and worker instance names to make assignment deterministic for - # the same initial set of host and workers. - host_names.sort() - worker_instances.sort(key=lambda w: w.name) - - # Algorithm: - # For each host instance, - # - If there is already an assignment, and a worker with the same name - # still exists, do nothing. - # - Otherwise, assign it to the first unassigned worker (in alphabetical - # order). - # This should ensure that a worker is reassigned only if it was - # reimaged/new. - current_worker_names = {worker.name for worker in worker_instances} - previous_assigned_workers = set() - - new_assignments = [] - - for host_name in host_names: - for i in range(0, workers_per_host): - assignment = self.get_or_create_host_worker_assignment(host_name, i) - if (assignment.worker_name and - assignment.worker_name in current_worker_names): - # Existing assignment is still valid. Don't do anything for these. - logging.info('Keeping old assignment of %s(%d) -> %s.', host_name, i, - assignment.worker_name) - previous_assigned_workers.add(assignment.worker_name) - continue - - # This host instance was either unassigned or the worker it was - # connected to no longer exists, so we need to assign it to a new - # worker. - new_assignments.append(assignment) - - new_workers = [ - worker for worker in worker_instances - if worker.name not in previous_assigned_workers - ] - - assert len(new_assignments) == len(new_workers) - for assignment, worker in zip(new_assignments, new_workers): - assignment.worker_name = worker.name - assignment.project_name = worker.project - logging.info('New assignment: %s(%d) - >%s.', assignment.host_name, - assignment.instance_num, assignment.worker_name) - - return new_assignments - - def delete_gce_resources(self, project_info, cluster_info): - """Delete instance templates and instance groups.""" - manager = bot_manager.BotManager(self.gce_project.project_id, - cluster_info.gce_zone) - - resource_name = get_resource_name(cluster_info.cluster, project_info.name) - - try: - manager.instance_group(resource_name).delete() - except bot_manager.NotFoundError: - logging.info('Instance group %s already deleted.', resource_name) - - try: - manager.instance_template(resource_name).delete() - except bot_manager.NotFoundError: - logging.info('Instance template %s already deleted.', resource_name) - - def cleanup_old_projects(self, existing_project_names): - """Cleanup old projects.""" - to_delete = [] - - for project_info in list(data_types.OssFuzzProjectInfo.query()): - if project_info.name in existing_project_names: - continue - - logging.info('Deleting %s', project_info.name) - - for cluster_info in project_info.clusters: - self.delete_gce_resources(project_info, cluster_info) - - to_delete.append(project_info.key) - - ndb_utils.delete_multi(to_delete) - - def cleanup_clusters(self, project, project_info): - """Remove nonexistant clusters.""" - existing_cluster_names = [ - cluster.name for cluster in self.gce_project.clusters - ] - - # Delete clusters that no longer exist, or the if the high end flag changed - # for a project. - to_delete = [ - cluster_info for cluster_info in project_info.clusters if - (cluster_info.cluster not in existing_cluster_names or project.high_end - != self.gce_project.get_cluster(cluster_info.cluster).high_end) - ] - if not to_delete: - return - - for cluster_info in to_delete: - logging.info('Deleting old cluster %s for %s.', cluster_info.cluster, - project_info.name) - self.delete_gce_resources(project_info, cluster_info) - - project_info.clusters = [ - cluster_info for cluster_info in project_info.clusters - if cluster_info.cluster in existing_cluster_names - ] - project_info.put() - - def update_project_cluster(self, - project, - project_info, - cluster, - cpu_count, - disk_size_gb=None): - """Update cluster allocation for a project.""" - service_account = None - tls_cert = None - - if cluster.worker: - # If this cluster is for untrusted workers, use the project service - # account. - service_account = project.service_account - tls_cert = ndb.Key(data_types.WorkerTlsCert, project.name).get() - if not tls_cert: - logging.warning('TLS certs not set up yet for %s.', project.name) - return - - cluster_info = project_info.get_cluster_info(cluster.name) - if not cluster_info: - project_info.clusters.append( - data_types.OssFuzzProjectInfo.ClusterInfo( - cluster=cluster.name, - gce_zone=cluster.gce_zone, - cpu_count=cpu_count)) - cluster_info = project_info.clusters[-1] - - # Get a name that can be used for the instance template and instance group. - resource_name = get_resource_name(cluster.name, project_info.name) - - def do_update(): - """Update the cluster and cpu count info.""" - self.update_cluster( - cluster, - resource_name, - cpu_count, - task_tag=project_info.name, - disk_size_gb=disk_size_gb, - service_account=service_account, - tls_cert=tls_cert) - - cluster_info.cpu_count = cpu_count - - self.pending_updates.append(self.thread_pool.submit(do_update)) - - def update_project_cpus(self, projects, project_infos, high_end_projects, - high_end_project_infos, cluster): - """Update CPU allocations for each project.""" - # Calculate CPUs in each cluster. - if not cluster.distribute: - self.pending_updates.append( - self.thread_pool.submit(self.update_cluster, cluster, cluster.name, - cluster.instance_count)) - return - - if cluster.high_end: - current_projects = high_end_projects - current_project_infos = high_end_project_infos - else: - current_projects = projects - current_project_infos = project_infos - - cpu_counts = self.distribute_cpus(current_projects, cluster.instance_count) - - # Resize projects starting with ones that reduce number of CPUs. This is - # so that we always have quota when we're resizing a project cluster. - # pylint: disable=cell-var-from-loop - def _cpu_diff_key(index): - cluster_info = current_project_infos[index].get_cluster_info(cluster.name) - if cluster_info and cluster_info.cpu_count is not None: - old_cpu_count = cluster_info.cpu_count - else: - old_cpu_count = 0 - - return cpu_counts[index] - old_cpu_count - - resize_order = sorted(list(range(len(cpu_counts))), key=_cpu_diff_key) - for i in resize_order: - project = current_projects[i] - project_info = current_project_infos[i] - self.update_project_cluster( - project, - project_info, - cluster, - cpu_counts[i], - disk_size_gb=project.disk_size_gb) - - self.wait_updates() - ndb_utils.put_multi(project_infos) - ndb_utils.put_multi(high_end_project_infos) - - # If the workers are done, we're ready to assign them. - # Note: This assumes that hosts are always specified before workers. - if cluster.name in self.worker_to_assignment: - self.assign_hosts_to_workers(self.worker_to_assignment[cluster.name]) - - def get_all_workers_in_cluster(self, manager, cluster_name): - """Get all workers in a cluster.""" - workers = [] - project_infos = list(data_types.OssFuzzProjectInfo.query().order( - data_types.OssFuzzProjectInfo.name)) - - for project_info in project_infos: - cluster_info = next((cluster for cluster in project_info.clusters - if cluster.cluster == cluster_name), None) - if not cluster_info or cluster_info.cpu_count == 0: - continue - - worker_group_name = get_resource_name(cluster_info.cluster, - project_info.name) - worker_instance_group = manager.instance_group(worker_group_name) - if not worker_instance_group.exists(): - logging.error('Worker instance group %s does not exist.', - worker_group_name) - continue - - instances = list(worker_instance_group.list_managed_instances()) - if len(instances) != cluster_info.cpu_count: - logging.error( - 'Number of instances in instance group %s does not match.' - 'Expected %d, got %d.', worker_group_name, cluster_info.cpu_count, - len(instances)) - raise ManageVmsError('Inconsistent instance count in group.') - - for instance in instances: - workers.append( - WorkerInstance( - name=_instance_name_from_url(instance['instance']), - project=project_info.name)) - - return workers - - def assign_hosts_to_workers(self, assignment): - """Assign host instances to workers.""" - host_cluster = self.gce_project.get_cluster(assignment.host) - worker_cluster = self.gce_project.get_cluster(assignment.worker) - - if host_cluster.gce_zone != worker_cluster.gce_zone: - logging.error('Mismatching zones for %s and %s.', assignment.host, - assignment.worker) - return - - if (host_cluster.instance_count * assignment.workers_per_host != - worker_cluster.instance_count): - logging.error('Invalid host/worker cluster size for %s and %s.', - assignment.host, assignment.worker) - return - - if host_cluster.high_end != worker_cluster.high_end: - logging.error('Mismatching high end setting for %s and %s', - assignment.host, assignment.worker) - return - - manager = bot_manager.BotManager(self.gce_project.project_id, - host_cluster.gce_zone) - host_instance_group = manager.instance_group(host_cluster.name) - - if not host_instance_group.exists(): - logging.error('Host instance group %s does not exist.', host_cluster.name) - return - - host_names = [ - _instance_name_from_url(instance['instance']) - for instance in host_instance_group.list_managed_instances() - ] - self.all_host_names.update(host_names) - worker_instances = self.get_all_workers_in_cluster(manager, - worker_cluster.name) - - if len(worker_instances) != worker_cluster.instance_count: - logging.error( - 'Actual number of worker instances for %s did not match. ' - 'Expected %d, got %d.', worker_cluster.name, - worker_cluster.instance_count, len(worker_instances)) - return - - new_assignments = self.do_assign_hosts_to_workers( - host_names, worker_instances, assignment.workers_per_host) - ndb_utils.put_multi(new_assignments) - - def main(): """CPU distributor for OSS-Fuzz projects.""" - if utils.is_oss_fuzz(): - manager_class = OssFuzzClustersManager - else: - manager_class = ClustersManager - for project_id in _get_project_ids(): - manager = manager_class(project_id) - manager.update_clusters() + ClustersManager(project_id).update_clusters() logging.info('Mange VMs succeeded.') return True diff --git a/src/clusterfuzz/_internal/cron/schedule_fuzz.py b/src/clusterfuzz/_internal/cron/schedule_fuzz.py index 4b39412cb8..3b01cd1cec 100644 --- a/src/clusterfuzz/_internal/cron/schedule_fuzz.py +++ b/src/clusterfuzz/_internal/cron/schedule_fuzz.py @@ -253,7 +253,7 @@ def get_available_cpus(project: str, regions: List[str]) -> int: # Add up all queued and scheduled. region_counts = [sum(tup) for tup in region_counts] logs.info(f'Region counts: {region_counts}') - if region_counts[0] > 1000: + if region_counts[0] > 5000: # Check queued tasks. logs.info('Too many jobs queued, not scheduling more fuzzing.') return 0 diff --git a/src/clusterfuzz/_internal/tests/appengine/handlers/cron/manage_vms_test.py b/src/clusterfuzz/_internal/tests/appengine/handlers/cron/manage_vms_test.py index 34226c3d5f..aa5f84444d 100644 --- a/src/clusterfuzz/_internal/tests/appengine/handlers/cron/manage_vms_test.py +++ b/src/clusterfuzz/_internal/tests/appengine/handlers/cron/manage_vms_test.py @@ -13,1165 +13,6 @@ # limitations under the License. """manage_vms tests.""" -import copy -import functools -import unittest -from unittest import mock - -from google.cloud import ndb - -from clusterfuzz._internal.cron import manage_vms -from clusterfuzz._internal.cron.helpers import bot_manager -from clusterfuzz._internal.datastore import data_types -from clusterfuzz._internal.google_cloud_utils import compute_engine_projects -from clusterfuzz._internal.tests.test_libs import helpers as test_helpers -from clusterfuzz._internal.tests.test_libs import test_utils - -AUTO_HEALING_POLICY = compute_engine_projects.AutoHealingPolicy( - health_check='global/healthChecks/example-check', - initial_delay_sec=300, -) - -AUTO_HEALING_POLICY_DICT = { - 'healthCheck': 'global/healthChecks/example-check', - 'initialDelaySec': 300 -} - -INSTANCE_GROUPS = { - 'oss-fuzz-linux-zone2-pre-proj2': { - 'targetSize': 1, - 'autoHealingPolicies': [AUTO_HEALING_POLICY], - }, - 'oss-fuzz-linux-zone2-pre-proj3': { - 'targetSize': 499, - }, - 'oss-fuzz-linux-zone2-pre-proj4': { - 'targetSize': 99, - }, - 'oss-fuzz-linux-zone2-pre-proj5': { - 'targetSize': 99, - } -} - -INSTANCE_TEMPLATES = { - 'oss-fuzz-linux-zone2-pre-proj2': { - 'description': '{"version": 1}', - 'properties': { - 'metadata': { - 'items': [], - }, - 'disks': [{ - 'initializeParams': { - 'diskSizeGb': '30', - }, - }], - 'serviceAccounts': [{ - 'email': - 'email', - 'scopes': [ - 'https://www.googleapis.com/auth/' - 'devstorage.full_control', - 'https://www.googleapis.com/auth/logging.write', - 'https://www.googleapis.com/auth/userinfo.email', - 'https://www.googleapis.com/auth/appengine.apis', - 'https://www.googleapis.com/auth/prodxmon', - 'https://www.googleapis.com/auth/bigquery', - ] - }], - } - }, - 'oss-fuzz-linux-zone2-pre-proj3': { - 'description': '{"version": 1}', - 'properties': { - 'metadata': { - 'items': [], - }, - 'disks': [{ - 'initializeParams': { - 'diskSizeGb': '30', - }, - }], - 'serviceAccounts': [{ - 'email': - 'email', - 'scopes': [ - 'https://www.googleapis.com/auth/' - 'devstorage.full_control', - 'https://www.googleapis.com/auth/logging.write', - 'https://www.googleapis.com/auth/userinfo.email', - 'https://www.googleapis.com/auth/appengine.apis', - 'https://www.googleapis.com/auth/prodxmon', - 'https://www.googleapis.com/auth/bigquery', - ] - }], - } - }, - 'oss-fuzz-linux-zone2-pre-proj4': { - 'description': '{"version": 0}', - 'properties': { - 'metadata': { - 'items': [], - }, - 'disks': [{ - 'initializeParams': { - 'diskSizeGb': '30', - }, - }], - 'serviceAccounts': [{ - 'email': - 'email', - 'scopes': [ - 'https://www.googleapis.com/auth/' - 'devstorage.full_control', - 'https://www.googleapis.com/auth/logging.write', - 'https://www.googleapis.com/auth/userinfo.email', - 'https://www.googleapis.com/auth/appengine.apis', - 'https://www.googleapis.com/auth/prodxmon', - 'https://www.googleapis.com/auth/bigquery', - ] - }], - } - }, - 'oss-fuzz-linux-zone2-pre-proj5': { - 'description': '{"version": 1}', - 'properties': { - 'metadata': { - 'items': [], - }, - 'disks': [{ - 'initializeParams': { - 'diskSizeGb': '30', - }, - }], - 'serviceAccounts': [{ - 'email': - 'email', - 'scopes': [ - 'https://www.googleapis.com/auth/' - 'devstorage.full_control', - 'https://www.googleapis.com/auth/logging.write', - 'https://www.googleapis.com/auth/userinfo.email', - 'https://www.googleapis.com/auth/appengine.apis', - 'https://www.googleapis.com/auth/prodxmon', - 'https://www.googleapis.com/auth/bigquery', - ] - }], - } - } -} - -INSTANCES = { - 'oss-fuzz-linux-zone3-host': [{ - 'instance': 'https://blah/oss-fuzz-linux-zone3-host-abcd', - }, { - 'instance': 'https://blah/oss-fuzz-linux-zone3-host-efgh', - }], - 'oss-fuzz-linux-zone3-worker-proj1': [{ - 'instance': 'https://blah/oss-fuzz-linux-zone3-worker-proj1-%04d' % i - } for i in range(1, 2)], - 'oss-fuzz-linux-zone3-worker-proj2': [{ - 'instance': 'https://blah/oss-fuzz-linux-zone3-worker-proj2-%04d' % i - } for i in range(1, 5)], - 'oss-fuzz-linux-zone3-worker-proj3': [{ - 'instance': 'https://blah/oss-fuzz-linux-zone3-worker-proj3-%04d' % i - } for i in range(1, 10)], - 'oss-fuzz-linux-zone3-worker-proj4': [{ - 'instance': 'https://blah/oss-fuzz-linux-zone3-worker-proj4-%04d' % i - } for i in range(1, 2)], - 'oss-fuzz-linux-zone3-worker-proj5': [{ - 'instance': 'https://blah/oss-fuzz-linux-zone3-worker-proj5-%04d' % i - } for i in range(1, 2)], - 'oss-fuzz-linux-zone3-host-high-end': [{ - 'instance': 'https://blah/oss-fuzz-linux-zone3-host-high-end-1' - }], - 'oss-fuzz-linux-zone3-worker-high-end-proj6': [{ - 'instance': ('https://blah/' - 'oss-fuzz-linux-zone3-worker-high-end-proj6-%04d' % i) - } for i in range(1, 3)], -} - -OSS_FUZZ_CLUSTERS = compute_engine_projects.Project( - project_id='clusterfuzz-external', - clusters=[ - compute_engine_projects.Cluster( - name='oss-fuzz-linux-zone2-pre', - gce_zone='us-east2-a', - instance_count=997, - instance_template='external-pre-zone2', - distribute=True, - auto_healing_policy=AUTO_HEALING_POLICY, - worker=False, - high_end=False), - compute_engine_projects.Cluster( - name='oss-fuzz-linux-zone3-host', - gce_zone='us-central1-d', - instance_count=2, - instance_template='host-zone3', - distribute=False, - worker=False, - high_end=False), - compute_engine_projects.Cluster( - name='oss-fuzz-linux-zone3-worker', - gce_zone='us-central1-d', - instance_count=16, - instance_template='worker-zone3', - distribute=True, - worker=True, - high_end=False), - compute_engine_projects.Cluster( - name='oss-fuzz-linux-zone3-host-high-end', - gce_zone='us-central1-d', - instance_count=1, - instance_template='host-high-end-zone3', - distribute=False, - worker=False, - high_end=True), - compute_engine_projects.Cluster( - name='oss-fuzz-linux-zone3-worker-high-end', - gce_zone='us-central1-d', - instance_count=2, - instance_template='worker-zone3', - distribute=True, - worker=True, - high_end=True), - ], - instance_templates=[ - { - 'name': 'external-pre-zone2', - 'description': '{"version": 1}', - 'properties': { - 'metadata': { - 'items': [], - }, - 'disks': [{ - 'initializeParams': { - 'diskSizeGb': 30, - }, - }], - 'serviceAccounts': [{ - 'email': - 'email', - 'scopes': [ - 'https://www.googleapis.com/auth/' - 'devstorage.full_control', - 'https://www.googleapis.com/auth/logging.write', - 'https://www.googleapis.com/auth/userinfo.email', - 'https://www.googleapis.com/auth/appengine.apis', - 'https://www.googleapis.com/auth/prodxmon', - 'https://www.googleapis.com/auth/bigquery', - ] - }], - } - }, - { - 'name': 'host-zone3', - 'description': '{"version": 1}', - 'properties': { - 'metadata': { - 'items': [], - }, - 'disks': [{ - 'initializeParams': { - 'diskSizeGb': 30, - }, - }], - 'serviceAccounts': [{ - 'email': - 'email', - 'scopes': [ - 'https://www.googleapis.com/auth/' - 'devstorage.full_control', - 'https://www.googleapis.com/auth/logging.write', - 'https://www.googleapis.com/auth/userinfo.email', - 'https://www.googleapis.com/auth/appengine.apis', - 'https://www.googleapis.com/auth/prodxmon', - 'https://www.googleapis.com/auth/bigquery', - ] - }], - } - }, - { - 'name': 'worker-zone3', - 'description': '{"version": 1}', - 'properties': { - 'metadata': { - 'items': [], - }, - 'disks': [{ - 'initializeParams': { - 'diskSizeGb': 30, - }, - }], - 'serviceAccounts': [{ - 'email': - 'email', - 'scopes': [ - 'https://www.googleapis.com/auth/' - 'devstorage.full_control', - 'https://www.googleapis.com/auth/logging.write', - 'https://www.googleapis.com/auth/userinfo.email', - 'https://www.googleapis.com/auth/prodxmon', - ] - }], - } - }, - { - 'name': 'host-high-end-zone3', - 'description': '{"version": 1}', - 'properties': { - 'metadata': { - 'items': [], - }, - 'disks': [{ - 'initializeParams': { - 'diskSizeGb': 100, - }, - }], - 'serviceAccounts': [{ - 'email': - 'email', - 'scopes': [ - 'https://www.googleapis.com/auth/' - 'devstorage.full_control', - 'https://www.googleapis.com/auth/logging.write', - 'https://www.googleapis.com/auth/userinfo.email', - 'https://www.googleapis.com/auth/prodxmon', - ] - }], - } - }, - ], - host_worker_assignments=[ - compute_engine_projects.HostWorkerAssignment( - host='oss-fuzz-linux-zone3-host', - worker='oss-fuzz-linux-zone3-worker', - workers_per_host=8), - compute_engine_projects.HostWorkerAssignment( - host='oss-fuzz-linux-zone3-host-high-end', - worker='oss-fuzz-linux-zone3-worker-high-end', - workers_per_host=2), - ]) - - -def mock_resource(spec): - """Mock resource.""" - resource = mock.Mock(spec=spec) - resource.created = False - resource.body = None - - def create(*args, **kwargs): # pylint: disable=unused-argument - if resource.created: - raise bot_manager.AlreadyExistsError - - resource.created = True - - def get(): - if resource.created: - return resource.body - - raise bot_manager.NotFoundError - - def exists(): - return resource.created - - def delete(): - if not resource.created: - raise bot_manager.NotFoundError - - resource.created = False - - resource.create.side_effect = create - resource.get.side_effect = get - resource.exists.side_effect = exists - resource.delete.side_effect = delete - - return resource - - -class MockBotManager: - """Mock BotManager.""" - - def __init__(self, project_id, zone, instance_groups, instance_templates): - self.project_id = project_id - self.zone = zone - self.instance_groups = instance_groups - self.instance_templates = instance_templates - - def _get_resource(self, name, cache, values, spec): - """Get resource.""" - if name in cache: - return cache[name] - - resource = mock_resource(spec=spec) - if name in values: - resource.created = True - resource.body = values[name] - - cache[name] = resource - return resource - - def instance_group(self, name): - """Get an InstanceGroup resource with the given name.""" - resource = self._get_resource(name, self.instance_groups, INSTANCE_GROUPS, - bot_manager.InstanceGroup) - - if name in INSTANCES: - resource.list_managed_instances.return_value = INSTANCES[name] - - return resource - - def instance_template(self, name): - """Get an InstanceTemplate resource with the given name.""" - return self._get_resource(name, self.instance_templates, INSTANCE_TEMPLATES, - bot_manager.InstanceTemplate) - - -def expected_instance_template(gce_project_name, - name, - project_name, - disk_size_gb=None, - service_account=None, - tls_cert=False): - """Get the expected instance template for a project.""" - gce_project = compute_engine_projects.load_project(gce_project_name) - expected = copy.deepcopy(gce_project.get_instance_template(name)) - expected['properties']['metadata']['items'].append({ - 'key': 'task-tag', - 'value': project_name, - }) - - if disk_size_gb: - disk = expected['properties']['disks'][0] - disk['initializeParams']['diskSizeGb'] = disk_size_gb - - if service_account: - expected['properties']['serviceAccounts'][0]['email'] = service_account - - if tls_cert: - expected['properties']['metadata']['items'].extend([{ - 'key': 'tls-cert', - 'value': project_name + '_cert', - }, { - 'key': 'tls-key', - 'value': project_name + '_key', - }]) - - return expected - - -def expected_host_instance_template(gce_project_name, name): - """Get the expected instance template for a project.""" - gce_project = compute_engine_projects.load_project(gce_project_name) - return copy.deepcopy(gce_project.get_instance_template(name)) - - -@test_utils.with_cloud_emulators('datastore') -class CronTest(unittest.TestCase): - """Test manage_vms cron.""" - - def setUp(self): - test_helpers.patch_environ(self) - test_helpers.patch(self, [ - 'clusterfuzz._internal.base.utils.is_oss_fuzz', - 'clusterfuzz._internal.cron.helpers.bot_manager.BotManager', - 'clusterfuzz._internal.system.environment.is_running_on_app_engine', - 'clusterfuzz._internal.google_cloud_utils.compute_engine_projects.load_project', - ]) - - self.mock.is_oss_fuzz.return_value = True - self.mock.is_running_on_app_engine.return_value = True - self.mock.load_project.return_value = OSS_FUZZ_CLUSTERS - - data_types.OssFuzzProject( - id='proj1', - name='proj1', - cpu_weight=1.0, - service_account='proj1@serviceaccount.com').put() - - data_types.OssFuzzProject( - id='proj2', - name='proj2', - cpu_weight=2.0, - service_account='proj2@serviceaccount.com').put() - - data_types.OssFuzzProject( - id='proj3', - name='proj3', - cpu_weight=5.0, - service_account='proj3@serviceaccount.com').put() - - data_types.OssFuzzProject( - id='proj4', - name='proj4', - cpu_weight=1.0, - service_account='proj4@serviceaccount.com').put() - - data_types.OssFuzzProject( - id='proj5', - name='proj5', - cpu_weight=1.0, - service_account='proj5@serviceaccount.com', - disk_size_gb=10).put() - - data_types.OssFuzzProject( - id='proj6', - name='proj6', - cpu_weight=1.0, - service_account='proj6@serviceaccount.com', - high_end=True).put() - - for j in range(1, 7): - project_name = 'proj%d' % j - data_types.WorkerTlsCert( - id=project_name, - project_name=project_name, - cert_contents=project_name.encode() + b'_cert', - key_contents=project_name.encode() + b'_key').put() - - data_types.OssFuzzProjectInfo(id='old_proj', name='old_proj').put() - - data_types.OssFuzzProjectInfo( - id='proj2', - name='proj2', - clusters=[ - data_types.OssFuzzProjectInfo.ClusterInfo( - cluster='oss-fuzz-linux-zone2-pre', - gce_zone='us-east2-a', - cpu_count=1, - ), - data_types.OssFuzzProjectInfo.ClusterInfo( - cluster='old-cluster', - gce_zone='us-east2-a', - cpu_count=1, - ), - ]).put() - - data_types.OssFuzzProjectInfo( - id='proj3', - name='proj3', - clusters=[ - data_types.OssFuzzProjectInfo.ClusterInfo( - cluster='oss-fuzz-linux-zone2-pre', - gce_zone='us-east2-a', - cpu_count=499, - ) - ]).put() - - data_types.OssFuzzProjectInfo( - id='proj4', - name='proj4', - clusters=[ - data_types.OssFuzzProjectInfo.ClusterInfo( - cluster='oss-fuzz-linux-zone2-pre', - gce_zone='us-east2-a', - cpu_count=99, - ) - ]).put() - - data_types.OssFuzzProjectInfo( - id='proj5', - name='proj5', - clusters=[ - data_types.OssFuzzProjectInfo.ClusterInfo( - cluster='oss-fuzz-linux-zone2-pre', - gce_zone='us-east2-a', - cpu_count=99, - ) - ]).put() - - data_types.OssFuzzProjectInfo( - id='old_proj', - name='old_proj', - clusters=[ - data_types.OssFuzzProjectInfo.ClusterInfo( - cluster='oss-fuzz-linux-zone2-pre', - gce_zone='us-east2-a', - cpu_count=5, - ) - ]).put() - - data_types.HostWorkerAssignment( - id='old-host-0', - host_name='old-host', - worker_name='worker', - instance_num=0).put() - - instance_groups = {} - instance_templates = {} - self.mock.BotManager.side_effect = functools.partial( - MockBotManager, - instance_groups=instance_groups, - instance_templates=instance_templates) - - def test_update_cpus(self): - """Tests CPU distribution cron.""" - self.maxDiff = None - manager = manage_vms.OssFuzzClustersManager('clusterfuzz-external') - manager.update_clusters() - - proj1 = ndb.Key(data_types.OssFuzzProjectInfo, 'proj1').get() - self.assertIsNotNone(proj1) - self.assertDictEqual({ - 'name': - 'proj1', - 'clusters': [{ - 'cluster': 'oss-fuzz-linux-zone2-pre', - 'cpu_count': 100, - 'gce_zone': 'us-east2-a', - }, { - 'cluster': 'oss-fuzz-linux-zone3-worker', - 'cpu_count': 1, - 'gce_zone': 'us-central1-d', - }], - }, proj1.to_dict()) - - proj2 = ndb.Key(data_types.OssFuzzProjectInfo, 'proj2').get() - self.assertIsNotNone(proj2) - self.assertDictEqual({ - 'name': - 'proj2', - 'clusters': [{ - 'cluster': 'oss-fuzz-linux-zone2-pre', - 'cpu_count': 200, - 'gce_zone': 'us-east2-a', - }, { - 'cluster': 'oss-fuzz-linux-zone3-worker', - 'cpu_count': 4, - 'gce_zone': 'us-central1-d', - }], - }, proj2.to_dict()) - - proj3 = ndb.Key(data_types.OssFuzzProjectInfo, 'proj3').get() - self.assertIsNotNone(proj3) - self.assertDictEqual({ - 'name': - 'proj3', - 'clusters': [{ - 'cluster': 'oss-fuzz-linux-zone2-pre', - 'cpu_count': 499, - 'gce_zone': 'us-east2-a', - }, { - 'cluster': 'oss-fuzz-linux-zone3-worker', - 'cpu_count': 9, - 'gce_zone': 'us-central1-d', - }], - }, proj3.to_dict()) - - proj4 = ndb.Key(data_types.OssFuzzProjectInfo, 'proj4').get() - self.assertIsNotNone(proj4) - self.assertDictEqual({ - 'name': - 'proj4', - 'clusters': [{ - 'cluster': 'oss-fuzz-linux-zone2-pre', - 'cpu_count': 99, - 'gce_zone': 'us-east2-a', - }, { - 'cluster': 'oss-fuzz-linux-zone3-worker', - 'cpu_count': 1, - 'gce_zone': 'us-central1-d', - }], - }, proj4.to_dict()) - - proj5 = ndb.Key(data_types.OssFuzzProjectInfo, 'proj5').get() - self.assertIsNotNone(proj5) - self.assertDictEqual({ - 'name': - 'proj5', - 'clusters': [{ - 'cluster': 'oss-fuzz-linux-zone2-pre', - 'cpu_count': 99, - 'gce_zone': 'us-east2-a', - }, { - 'cluster': 'oss-fuzz-linux-zone3-worker', - 'cpu_count': 1, - 'gce_zone': 'us-central1-d', - }], - }, proj5.to_dict()) - - proj6 = ndb.Key(data_types.OssFuzzProjectInfo, 'proj6').get() - self.assertIsNotNone(proj6) - self.assertDictEqual({ - 'name': - 'proj6', - 'clusters': [{ - 'cluster': 'oss-fuzz-linux-zone3-worker-high-end', - 'cpu_count': 2, - 'gce_zone': 'us-central1-d', - }], - }, proj6.to_dict()) - - old_proj = ndb.Key(data_types.OssFuzzProjectInfo, 'old_proj').get() - self.assertIsNone(old_proj) - - mock_bot_manager = self.mock.BotManager('clusterfuzz-external', - 'us-east2-a') - - # proj1: new project. - mock_bot_manager.instance_template( - 'oss-fuzz-linux-zone2-pre-proj1').create.assert_called_with( - expected_instance_template('clusterfuzz-external', - 'external-pre-zone2', 'proj1')) - mock_bot_manager.instance_group( - 'oss-fuzz-linux-zone2-pre-proj1').create.assert_called_with( - 'oss-fuzz-linux-zone2-pre-proj1', - 'oss-fuzz-linux-zone2-pre-proj1', - size=100, - auto_healing_policy=AUTO_HEALING_POLICY_DICT, - wait_for_instances=False) - mock_bot_manager.instance_group( - 'oss-fuzz-linux-zone2-pre-proj1').resize.assert_not_called() - - # proj2: already exists. needs a resize. old cluster should be deleted. - mock_bot_manager.instance_template( - 'oss-fuzz-linux-zone2-pre-proj2').create.assert_not_called() - mock_bot_manager.instance_template( - 'oss-fuzz-linux-zone2-pre-proj2').delete.assert_not_called() - mock_bot_manager.instance_group( - 'oss-fuzz-linux-zone2-pre-proj2').create.assert_not_called() - mock_bot_manager.instance_group( - 'oss-fuzz-linux-zone2-pre-proj2').delete.assert_not_called() - mock_bot_manager.instance_group( - 'oss-fuzz-linux-zone2-pre-proj2').resize.assert_called_with( - 200, wait_for_instances=False) - mock_bot_manager.instance_template( - 'old-cluster-proj2').delete.assert_called() - mock_bot_manager.instance_group('old-cluster-proj2').delete.assert_called() - - # proj3: already exists. no changes needed. - mock_bot_manager.instance_template( - 'oss-fuzz-linux-zone2-pre-proj3').delete.assert_not_called() - mock_bot_manager.instance_template( - 'oss-fuzz-linux-zone2-pre-proj3').create.assert_not_called() - mock_bot_manager.instance_group( - 'oss-fuzz-linux-zone2-pre-proj3').create.assert_not_called() - mock_bot_manager.instance_group( - 'oss-fuzz-linux-zone2-pre-proj3').resize.assert_not_called() - mock_bot_manager.instance_group( - 'oss-fuzz-linux-zone2-pre-proj3').delete.assert_not_called() - - # proj4: needs a template update (version change). - mock_bot_manager.instance_template( - 'oss-fuzz-linux-zone2-pre-proj4').delete.assert_called() - mock_bot_manager.instance_template( - 'oss-fuzz-linux-zone2-pre-proj4').create.assert_called_with( - expected_instance_template('clusterfuzz-external', - 'external-pre-zone2', 'proj4')) - mock_bot_manager.instance_group( - 'oss-fuzz-linux-zone2-pre-proj4').delete.assert_called() - mock_bot_manager.instance_group( - 'oss-fuzz-linux-zone2-pre-proj4').create.assert_called_with( - 'oss-fuzz-linux-zone2-pre-proj4', - 'oss-fuzz-linux-zone2-pre-proj4', - size=99, - auto_healing_policy=AUTO_HEALING_POLICY_DICT, - wait_for_instances=False) - mock_bot_manager.instance_group( - 'oss-fuzz-linux-zone2-pre-proj4').resize.assert_not_called() - - # proj5: needs a template update (disk size change). - mock_bot_manager.instance_template( - 'oss-fuzz-linux-zone2-pre-proj5').delete.assert_called() - mock_bot_manager.instance_template( - 'oss-fuzz-linux-zone2-pre-proj5').create.assert_called_with( - expected_instance_template( - 'clusterfuzz-external', - 'external-pre-zone2', - 'proj5', - disk_size_gb=10)) - mock_bot_manager.instance_group( - 'oss-fuzz-linux-zone2-pre-proj5').delete.assert_called() - mock_bot_manager.instance_group( - 'oss-fuzz-linux-zone2-pre-proj5').create.assert_called_with( - 'oss-fuzz-linux-zone2-pre-proj5', - 'oss-fuzz-linux-zone2-pre-proj5', - size=99, - auto_healing_policy=AUTO_HEALING_POLICY_DICT, - wait_for_instances=False) - mock_bot_manager.instance_group( - 'oss-fuzz-linux-zone2-pre-proj5').resize.assert_not_called() - - # proj6: high end project. - for j in range(1, 6): - mock_bot_manager.instance_group( - 'oss-fuzz-linux-zone3-worker-high-end-proj' + - str(j)).create.assert_not_called() - - mock_bot_manager.instance_group( - 'oss-fuzz-linux-zone3-worker-high-end-proj6').create.assert_called() - - # old_proj: deleted. - mock_bot_manager.instance_group( - 'oss-fuzz-linux-zone2-pre-old-proj').create.assert_not_called() - mock_bot_manager.instance_template( - 'oss-fuzz-linux-zone2-pre-old-proj').delete.assert_called() - mock_bot_manager.instance_group( - 'oss-fuzz-linux-zone2-pre-old-proj').delete.assert_called() - - # host instances: created. - mock_bot_manager.instance_template( - 'oss-fuzz-linux-zone3-host').create.assert_called_with( - expected_host_instance_template('clusterfuzz-external', - 'host-zone3')) - mock_bot_manager.instance_group( - 'oss-fuzz-linux-zone3-host').create.assert_called_with( - 'oss-fuzz-linux-zone3-host', - 'oss-fuzz-linux-zone3-host', - size=2, - auto_healing_policy=None, - wait_for_instances=False) - - mock_bot_manager.instance_group( - 'oss-fuzz-linux-zone3-host-high-end').create.assert_called_with( - 'oss-fuzz-linux-zone3-host-high-end', - 'oss-fuzz-linux-zone3-host-high-end', - size=1, - auto_healing_policy=None, - wait_for_instances=False) - - # Worker instances: created. - mock_bot_manager.instance_template( - 'oss-fuzz-linux-zone3-worker-proj1').create.assert_called_with( - expected_instance_template( - 'clusterfuzz-external', - 'worker-zone3', - 'proj1', - service_account='proj1@serviceaccount.com', - tls_cert=True)) - mock_bot_manager.instance_group( - 'oss-fuzz-linux-zone3-worker-proj1').create.assert_called_with( - 'oss-fuzz-linux-zone3-worker-proj1', - 'oss-fuzz-linux-zone3-worker-proj1', - size=1, - auto_healing_policy=None, - wait_for_instances=False) - - mock_bot_manager.instance_template( - 'oss-fuzz-linux-zone3-worker-proj2').create.assert_called_with( - expected_instance_template( - 'clusterfuzz-external', - 'worker-zone3', - 'proj2', - service_account='proj2@serviceaccount.com', - tls_cert=True)) - mock_bot_manager.instance_group( - 'oss-fuzz-linux-zone3-worker-proj2').create.assert_called_with( - 'oss-fuzz-linux-zone3-worker-proj2', - 'oss-fuzz-linux-zone3-worker-proj2', - size=4, - auto_healing_policy=None, - wait_for_instances=False) - - self.assertCountEqual([{ - 'instance_num': 0, - 'worker_name': 'oss-fuzz-linux-zone3-worker-proj1-0001', - 'project_name': 'proj1', - 'host_name': 'oss-fuzz-linux-zone3-host-abcd' - }, { - 'instance_num': 1, - 'worker_name': 'oss-fuzz-linux-zone3-worker-proj2-0001', - 'project_name': 'proj2', - 'host_name': 'oss-fuzz-linux-zone3-host-abcd' - }, { - 'instance_num': 2, - 'worker_name': 'oss-fuzz-linux-zone3-worker-proj2-0002', - 'project_name': 'proj2', - 'host_name': 'oss-fuzz-linux-zone3-host-abcd' - }, { - 'instance_num': 3, - 'worker_name': 'oss-fuzz-linux-zone3-worker-proj2-0003', - 'project_name': 'proj2', - 'host_name': 'oss-fuzz-linux-zone3-host-abcd' - }, { - 'instance_num': 4, - 'worker_name': 'oss-fuzz-linux-zone3-worker-proj2-0004', - 'project_name': 'proj2', - 'host_name': 'oss-fuzz-linux-zone3-host-abcd' - }, { - 'instance_num': 5, - 'worker_name': 'oss-fuzz-linux-zone3-worker-proj3-0001', - 'project_name': 'proj3', - 'host_name': 'oss-fuzz-linux-zone3-host-abcd' - }, { - 'instance_num': 6, - 'worker_name': 'oss-fuzz-linux-zone3-worker-proj3-0002', - 'project_name': 'proj3', - 'host_name': 'oss-fuzz-linux-zone3-host-abcd' - }, { - 'instance_num': 7, - 'worker_name': 'oss-fuzz-linux-zone3-worker-proj3-0003', - 'project_name': 'proj3', - 'host_name': 'oss-fuzz-linux-zone3-host-abcd' - }, { - 'instance_num': 0, - 'worker_name': 'oss-fuzz-linux-zone3-worker-proj3-0004', - 'project_name': 'proj3', - 'host_name': 'oss-fuzz-linux-zone3-host-efgh' - }, { - 'instance_num': 1, - 'worker_name': 'oss-fuzz-linux-zone3-worker-proj3-0005', - 'project_name': 'proj3', - 'host_name': 'oss-fuzz-linux-zone3-host-efgh' - }, { - 'instance_num': 2, - 'worker_name': 'oss-fuzz-linux-zone3-worker-proj3-0006', - 'project_name': 'proj3', - 'host_name': 'oss-fuzz-linux-zone3-host-efgh' - }, { - 'instance_num': 3, - 'worker_name': 'oss-fuzz-linux-zone3-worker-proj3-0007', - 'project_name': 'proj3', - 'host_name': 'oss-fuzz-linux-zone3-host-efgh' - }, { - 'instance_num': 4, - 'worker_name': 'oss-fuzz-linux-zone3-worker-proj3-0008', - 'project_name': 'proj3', - 'host_name': 'oss-fuzz-linux-zone3-host-efgh' - }, { - 'instance_num': 5, - 'worker_name': 'oss-fuzz-linux-zone3-worker-proj3-0009', - 'project_name': 'proj3', - 'host_name': 'oss-fuzz-linux-zone3-host-efgh' - }, { - 'instance_num': 6, - 'worker_name': 'oss-fuzz-linux-zone3-worker-proj4-0001', - 'project_name': 'proj4', - 'host_name': 'oss-fuzz-linux-zone3-host-efgh' - }, { - 'instance_num': 7, - 'worker_name': 'oss-fuzz-linux-zone3-worker-proj5-0001', - 'project_name': 'proj5', - 'host_name': 'oss-fuzz-linux-zone3-host-efgh' - }, { - 'instance_num': 0, - 'worker_name': 'oss-fuzz-linux-zone3-worker-high-end-proj6-0001', - 'project_name': 'proj6', - 'host_name': 'oss-fuzz-linux-zone3-host-high-end-1' - }, { - 'instance_num': 1, - 'worker_name': 'oss-fuzz-linux-zone3-worker-high-end-proj6-0002', - 'project_name': 'proj6', - 'host_name': 'oss-fuzz-linux-zone3-host-high-end-1' - }], [ - assignment.to_dict() - for assignment in data_types.HostWorkerAssignment.query() - ]) - - -class OssFuzzDistributeCpusTest(unittest.TestCase): - """Tests OSS-Fuzz CPU distribution.""" - - def setUp(self): - test_helpers.patch(self, [ - 'clusterfuzz._internal.google_cloud_utils.compute_engine_projects.load_project', - ]) - self.mock.load_project.return_value = OSS_FUZZ_CLUSTERS - - def test_equal(self): - """Tests for each project receiving equal share.""" - projects = [ - data_types.OssFuzzProject(name='proj1', cpu_weight=1.0), - data_types.OssFuzzProject(name='proj2', cpu_weight=1.0), - data_types.OssFuzzProject(name='proj3', cpu_weight=1.0), - ] - - result = manage_vms.OssFuzzClustersManager( - 'clusterfuzz-external').distribute_cpus(projects, 30) - self.assertListEqual([10, 10, 10], result) - - def test_equal_uneven(self): - """Tests for each project receiving equal share with an uneven division.""" - projects = [ - data_types.OssFuzzProject(name='proj1', cpu_weight=1.0), - data_types.OssFuzzProject(name='proj2', cpu_weight=1.0), - data_types.OssFuzzProject(name='proj3', cpu_weight=1.0), - ] - - result = manage_vms.OssFuzzClustersManager( - 'clusterfuzz-external').distribute_cpus(projects, 31) - self.assertListEqual([11, 10, 10], result) - - result = manage_vms.OssFuzzClustersManager( - 'clusterfuzz-external').distribute_cpus(projects, 32) - self.assertListEqual([11, 11, 10], result) - - def test_weight_preference(self): - """Tests that remainders are given to projects with higher weights - - first. - """ - projects = [ - data_types.OssFuzzProject(name='proj1', cpu_weight=1.0), - data_types.OssFuzzProject(name='proj2', cpu_weight=1.01), - data_types.OssFuzzProject(name='proj3', cpu_weight=1.1), - ] - - result = manage_vms.OssFuzzClustersManager( - 'clusterfuzz-external').distribute_cpus(projects, 4) - self.assertListEqual([1, 1, 2], result) - - result = manage_vms.OssFuzzClustersManager( - 'clusterfuzz-external').distribute_cpus(projects, 5) - self.assertListEqual([1, 2, 2], result) - - def test_not_enough(self): - """Tests allocation with not enough CPUs.""" - projects = [ - data_types.OssFuzzProject(name='proj1', cpu_weight=1.0), - data_types.OssFuzzProject(name='proj2', cpu_weight=1.0), - data_types.OssFuzzProject(name='proj3', cpu_weight=1.0), - ] - - result = manage_vms.OssFuzzClustersManager( - 'clusterfuzz-external').distribute_cpus(projects, 1) - self.assertListEqual([1, 0, 0], result) - - result = manage_vms.OssFuzzClustersManager( - 'clusterfuzz-external').distribute_cpus(projects, 2) - self.assertListEqual([1, 1, 0], result) - - def test_minimum(self): - """Tests that projects are given a minimum share.""" - projects = [ - data_types.OssFuzzProject(name='proj1', cpu_weight=0.0), - data_types.OssFuzzProject(name='proj2', cpu_weight=0.0), - data_types.OssFuzzProject(name='proj3', cpu_weight=0.0), - ] - - result = manage_vms.OssFuzzClustersManager( - 'clusterfuzz-external').distribute_cpus(projects, 3) - self.assertListEqual([1, 1, 1], result) - - result = manage_vms.OssFuzzClustersManager( - 'clusterfuzz-external').distribute_cpus(projects, 10) - self.assertListEqual([4, 3, 3], result) - - def test_maximum(self): - """Tests that projects are capped at the maximum share.""" - projects = [ - data_types.OssFuzzProject(name='proj1', cpu_weight=1.0), - data_types.OssFuzzProject(name='proj2', cpu_weight=1.0), - data_types.OssFuzzProject(name='proj3', cpu_weight=1.0), - ] - - result = manage_vms.OssFuzzClustersManager( - 'clusterfuzz-external').distribute_cpus(projects, 10000) - self.assertListEqual([1000, 1000, 1000], result) - - def test_primes(self): - """Test a bunch of different distributions.""" - projects = [ - data_types.OssFuzzProject(name='proj1', cpu_weight=2.0), - data_types.OssFuzzProject(name='proj2', cpu_weight=3.0), - data_types.OssFuzzProject(name='proj3', cpu_weight=5.0), - data_types.OssFuzzProject(name='proj4', cpu_weight=7.0), - data_types.OssFuzzProject(name='proj5', cpu_weight=11.0), - ] - - result = manage_vms.OssFuzzClustersManager( - 'clusterfuzz-external').distribute_cpus(projects, 101) - self.assertListEqual([7, 10, 18, 26, 40], result) - self.assertEqual(101, sum(result)) - - result = manage_vms.OssFuzzClustersManager( - 'clusterfuzz-external').distribute_cpus(projects, 887) - self.assertListEqual([63, 95, 158, 222, 349], result) - self.assertEqual(887, sum(result)) - - result = manage_vms.OssFuzzClustersManager( - 'clusterfuzz-external').distribute_cpus(projects, 2741) - self.assertListEqual([214, 313, 509, 705, 1000], result) - self.assertEqual(2741, sum(result)) - - result = manage_vms.OssFuzzClustersManager( - 'clusterfuzz-external').distribute_cpus(projects, 3571) - self.assertListEqual([356, 483, 738, 994, 1000], result) - self.assertEqual(3571, sum(result)) - - -@test_utils.with_cloud_emulators('datastore') -class AssignHostWorkerTest(unittest.TestCase): - """Tests host -> worker assignment.""" - - def setUp(self): - test_helpers.patch(self, [ - 'clusterfuzz._internal.google_cloud_utils.compute_engine_projects.load_project', - ]) - self.mock.load_project.return_value = OSS_FUZZ_CLUSTERS - - def test_assign_keep_existing(self): - """Test that assignment keeps existing assignments.""" - host_names = ['host'] - worker_instances = [ - manage_vms.WorkerInstance(name='worker-proj-0', project='proj'), - manage_vms.WorkerInstance(name='worker-proj-1', project='proj'), - manage_vms.WorkerInstance(name='worker-proj-2', project='proj'), - manage_vms.WorkerInstance(name='worker-proj-3', project='proj'), - manage_vms.WorkerInstance(name='worker-proj-4', project='proj'), - manage_vms.WorkerInstance(name='worker-proj-5', project='proj'), - manage_vms.WorkerInstance(name='worker-proj-6', project='proj'), - manage_vms.WorkerInstance(name='worker-proj-7', project='proj'), - ] - - data_types.HostWorkerAssignment( - host_name='host', - instance_num=2, - worker_name='worker-proj-6', - project_name='proj', - id='host-2').put() - - data_types.HostWorkerAssignment( - host_name='host', - instance_num=3, - worker_name='worker-proj-1', - project_name='proj', - id='host-3').put() - - data_types.HostWorkerAssignment( - host_name='host', - instance_num=0, - worker_name='worker-nonexistent-1', - project_name='nonexistent', - id='host-0').put() - - manager = manage_vms.OssFuzzClustersManager('clusterfuzz-external') - new_assignments = manager.do_assign_hosts_to_workers( - host_names, worker_instances, 8) - self.assertListEqual([ - { - 'host_name': 'host', - 'instance_num': 0, - 'project_name': 'proj', - 'worker_name': 'worker-proj-0' - }, - { - 'host_name': 'host', - 'instance_num': 1, - 'project_name': 'proj', - 'worker_name': 'worker-proj-2' - }, - { - 'host_name': 'host', - 'instance_num': 4, - 'project_name': 'proj', - 'worker_name': 'worker-proj-3' - }, - { - 'host_name': 'host', - 'instance_num': 5, - 'project_name': 'proj', - 'worker_name': 'worker-proj-4' - }, - { - 'host_name': 'host', - 'instance_num': 6, - 'project_name': 'proj', - 'worker_name': 'worker-proj-5' - }, - { - 'host_name': 'host', - 'instance_num': 7, - 'project_name': 'proj', - 'worker_name': 'worker-proj-7' - }, - ], [assignment.to_dict() for assignment in new_assignments]) +# TODO(unassigned): Write some tests for this module. +# All of the old tests were for the oss-fuzz manager that was deleted during the +# batch migration.