diff --git a/.envrc b/.envrc index cdf547d21b488e..0d0d2e6d0ec45c 100644 --- a/.envrc +++ b/.envrc @@ -27,11 +27,8 @@ source "${SENTRY_ROOT}/scripts/lib.sh" # consequently, using "exit" anywhere will skip this notice from showing. # so need to use set -e, and return 1. trap notice ERR -# This is used to group issues on Sentry.io. -# If an issue does not call info() or die() it will be grouped under this -error_message="Unknown issue" -# This has to be the same value as what sentry-cli accepts -log_level="info" + +complete_success="yup" help_message() { cat <&2 - log_level="warning" + complete_success="nope" } die() { echo -e "${red}${bold}FATAL: ${*}${reset}" >&2 - # When reporting to Sentry, this will allow grouping the errors differently - # NOTE: The first line of the output is used to group issues - error_message=("${@}") - log_level="error" return 1 } @@ -139,19 +121,6 @@ else export SENTRY_DEVSERVICES_DSN=https://23670f54c6254bfd9b7de106637808e9@o1.ingest.sentry.io/1492057 fi -# We can remove these lines in few months -if [ "$SHELL" == "/bin/zsh" ]; then - zshrc_path="${HOME}/.zshrc" - header="# Apple M1 environment variables" - if grep -qF "${header}" "${zshrc_path}"; then - echo -e "\n${red}Please delete from ${zshrc_path}, the following three lines:${reset}" - echo -e "${header} -export CPATH=/opt/homebrew/Cellar/librdkafka/1.8.2/include -export LDFLAGS=-L/opt/homebrew/Cellar/gettext/0.21/lib" - echo -e "\nWe have moved exporting of these variables to the right place." - return 1 - fi -fi ### System ### @@ -233,7 +202,7 @@ if [ ${#commands_to_run[@]} -ne 0 ]; then show_commands_info fi -if [ "${log_level}" != "info" ]; then +if [ "${complete_success}" != "yup" ]; then help_message warn "\nPartial success. The virtualenv is active, however, you're not fully up-to-date (see messages above)." else diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 93feef65f82f2e..9304b08c3b972c 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -13,7 +13,7 @@ ## Snuba /src/sentry/eventstream/ @getsentry/owners-snuba -/src/sentry/consumers/ @getsentry/owners-snuba +/src/sentry/consumers/ @getsentry/ops @getsentry/owners-snuba /src/sentry/post_process_forwarder/ @getsentry/owners-snuba /src/sentry/utils/snuba.py @getsentry/owners-snuba @getsentry/performance /src/sentry/utils/snql.py @getsentry/owners-snuba diff --git a/migrations_lockfile.txt b/migrations_lockfile.txt index f9750478f9bfd6..a745ab7c0e5373 100644 --- a/migrations_lockfile.txt +++ b/migrations_lockfile.txt @@ -9,5 +9,5 @@ feedback: 0004_index_together hybridcloud: 0015_apitokenreplica_hashed_token_index nodestore: 0002_nodestore_no_dictfield replays: 0004_index_together -sentry: 0687_alert_rule_project_backfill_migration +sentry: 0688_add_project_flag_high_priority_alerts social_auth: 0002_default_auto_field diff --git a/scripts/do.sh b/scripts/do.sh index 9a2f20c744b0fb..693f87e2d2579e 100755 --- a/scripts/do.sh +++ b/scripts/do.sh @@ -9,10 +9,6 @@ HERE="$( # shellcheck disable=SC1090 source "${HERE}/lib.sh" -# This block is to enable reporting issues to Sentry.io -# SENTRY_DSN already defined in .envrc -configure-sentry-cli - # This guarantees that we're within a venv. A caller that is not within # a venv can avoid enabling this by setting SENTRY_NO_VENV_CHECK [ -z "${SENTRY_NO_VENV_CHECK+x}" ] && eval "${HERE}/ensure-venv.sh" diff --git a/scripts/lib.sh b/scripts/lib.sh index 05db02e2a3dd35..2cc8be59c87a19 100755 --- a/scripts/lib.sh +++ b/scripts/lib.sh @@ -34,18 +34,6 @@ require() { command -v "$1" >/dev/null 2>&1 } -configure-sentry-cli() { - if [ -z "${SENTRY_DEVENV_NO_REPORT+x}" ]; then - if ! require sentry-cli; then - if [ -f "${venv_name}/bin/pip" ]; then - pip-install sentry-cli - else - curl -sL https://sentry.io/get-cli/ | SENTRY_CLI_VERSION=2.14.4 bash - fi - fi - fi -} - query-valid-python-version() { python_version=$(python3 -V 2>&1 | awk '{print $2}') if [[ -n "${SENTRY_PYTHON_VERSION:-}" ]]; then diff --git a/src/sentry/api/endpoints/internal/integration_proxy.py b/src/sentry/api/endpoints/internal/integration_proxy.py index 3e93de1e706e18..65f23fc100dc23 100644 --- a/src/sentry/api/endpoints/internal/integration_proxy.py +++ b/src/sentry/api/endpoints/internal/integration_proxy.py @@ -2,14 +2,20 @@ import logging from collections import defaultdict +from collections.abc import Mapping +from typing import Any from urllib.parse import urljoin from django.http import HttpRequest, HttpResponse, HttpResponseBadRequest from requests import Request, Response +from rest_framework.request import Request as DRFRequest +from rest_framework.response import Response as DRFResponse +from sentry_sdk import Scope from sentry.api.api_owners import ApiOwner from sentry.api.api_publish_status import ApiPublishStatus from sentry.api.base import Endpoint, control_silo_endpoint +from sentry.auth.exceptions import IdentityNotValid from sentry.constants import ObjectStatus from sentry.models.integrations.organization_integration import OrganizationIntegration from sentry.silo.base import SiloMode @@ -221,3 +227,15 @@ def http_method_not_allowed(self, request): ) logger.info("proxy_success", extra=self.log_extra) return response + + def handle_exception( # type: ignore[override] + self, + request: DRFRequest, + exc: Exception, + handler_context: Mapping[str, Any] | None = None, + scope: Scope | None = None, + ) -> DRFResponse: + if isinstance(exc, IdentityNotValid): + return self.respond(status=400) + + return super().handle_exception(request, exc, handler_context, scope) diff --git a/src/sentry/api/endpoints/organization_events.py b/src/sentry/api/endpoints/organization_events.py index e2ad0564fb035e..c0a551f7239df7 100644 --- a/src/sentry/api/endpoints/organization_events.py +++ b/src/sentry/api/endpoints/organization_events.py @@ -46,6 +46,7 @@ Referrer.API_PERFORMANCE_STATUS_BREAKDOWN.value, Referrer.API_PERFORMANCE_VITAL_DETAIL.value, Referrer.API_PERFORMANCE_DURATIONPERCENTILECHART.value, + Referrer.API_PERFORMANCE_TRANSACTIONS_STATISTICAL_DETECTOR_ROOT_CAUSE_ANALYSIS.value, Referrer.API_PROFILING_LANDING_TABLE.value, Referrer.API_PROFILING_LANDING_FUNCTIONS_CARD.value, Referrer.API_PROFILING_PROFILE_SUMMARY_TOTALS.value, diff --git a/src/sentry/api/endpoints/organization_events_spans_performance.py b/src/sentry/api/endpoints/organization_events_spans_performance.py index 04aa406cd71644..2dcfb454eb5ccd 100644 --- a/src/sentry/api/endpoints/organization_events_spans_performance.py +++ b/src/sentry/api/endpoints/organization_events_spans_performance.py @@ -399,6 +399,7 @@ class ExampleSpan: start_timestamp: float finish_timestamp: float exclusive_time: float + trace_id: str def serialize(self) -> Any: return { @@ -406,6 +407,7 @@ def serialize(self) -> Any: "startTimestamp": self.start_timestamp, "finishTimestamp": self.finish_timestamp, "exclusiveTime": self.exclusive_time, + "trace": self.trace_id, } @@ -803,6 +805,7 @@ def get_example_transaction( start_timestamp=span["start_timestamp"], finish_timestamp=span["timestamp"], exclusive_time=span["exclusive_time"], + trace_id=trace_context["trace_id"], ) for span in matching_spans ] diff --git a/src/sentry/api/endpoints/project_events.py b/src/sentry/api/endpoints/project_events.py index 6ced518ae34c36..578da869f417d2 100644 --- a/src/sentry/api/endpoints/project_events.py +++ b/src/sentry/api/endpoints/project_events.py @@ -11,6 +11,7 @@ from sentry.api.base import region_silo_endpoint from sentry.api.bases.project import ProjectEndpoint from sentry.api.serializers import EventSerializer, SimpleEventSerializer, serialize +from sentry.snuba.events import Columns from sentry.types.ratelimit import RateLimit, RateLimitCategory @@ -42,6 +43,9 @@ def get(self, request: Request, project) -> Response: include the full event body, including the stacktrace. Set to 1 to enable. + :qparam bool sample: return events in pseudo-random order. This is deterministic, + same query will return the same events in the same order. + :pparam string organization_slug: the slug of the organization the groups belong to. :pparam string project_slug: the slug of the project the groups @@ -61,6 +65,7 @@ def get(self, request: Request, project) -> Response: event_filter.start = timezone.now() - timedelta(days=7) full = request.GET.get("full", False) + sample = request.GET.get("sample", False) data_fn = partial( eventstore.backend.get_events, @@ -69,6 +74,11 @@ def get(self, request: Request, project) -> Response: tenant_ids={"organization_id": project.organization_id}, ) + if sample: + # not a true random ordering, but event_id is UUID, that's random enough + # for our purposes and doesn't have heavy performance impact + data_fn = partial(data_fn, orderby=[Columns.EVENT_ID.value.alias]) + serializer = EventSerializer() if full else SimpleEventSerializer() return self.paginate( request=request, diff --git a/src/sentry/buffer/redis.py b/src/sentry/buffer/redis.py index 175aa8e29de057..5d9e3858ba1be4 100644 --- a/src/sentry/buffer/redis.py +++ b/src/sentry/buffer/redis.py @@ -3,6 +3,7 @@ import logging import pickle import threading +from collections.abc import Callable from datetime import date, datetime, timezone from enum import Enum from time import time @@ -52,6 +53,33 @@ def _validate_json_roundtrip(value: dict[str, Any], model: type[models.Model]) - logger.exception("buffer.invalid_value", extra={"value": value, "model": model}) +class BufferHookEvent(Enum): + FLUSH = "flush" + + +class BufferHookRegistry: + def __init__(self, *args: Any, **kwargs: Any) -> None: + self._registry: dict[BufferHookEvent, Callable[..., Any]] = {} + + def add_handler(self, key: BufferHookEvent) -> Callable[..., Any]: + def decorator(func: Callable[..., Any]) -> Callable[..., Any]: + self._registry[key] = func + return func + + return decorator + + def callback(self, buffer_hook_event: BufferHookEvent, data: RedisBuffer) -> bool: + try: + callback = self._registry[buffer_hook_event] + except KeyError: + return False + + return callback(data) + + +redis_buffer_registry = BufferHookRegistry() + + class RedisOperation(Enum): SET_ADD = "sadd" SET_GET = "smembers" diff --git a/src/sentry/conf/server.py b/src/sentry/conf/server.py index efded1cda81c31..6c67c2e3d4ebf4 100644 --- a/src/sentry/conf/server.py +++ b/src/sentry/conf/server.py @@ -1527,6 +1527,8 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]: "organizations:ddm-metrics-api-unit-normalization": False, # Enables import of metric dashboards "organizations:ddm-dashboard-import": False, + # Enables category "metrics" in stats_v2 endpoint + "organizations:metrics-stats": False, # Enable the default alert at project creation to be the high priority alert "organizations:default-high-priority-alerts": False, # Enables automatically deriving of code mappings diff --git a/src/sentry/event_manager.py b/src/sentry/event_manager.py index 7c26ef6e6cea62..ddc3f74cba12f0 100644 --- a/src/sentry/event_manager.py +++ b/src/sentry/event_manager.py @@ -50,23 +50,29 @@ GroupingConfig, get_grouping_config_dict_for_project, ) -from sentry.grouping.ingest import ( - add_group_id_to_grouphashes, - check_for_category_mismatch, - check_for_group_creation_load_shed, - extract_hashes, +from sentry.grouping.ingest.config import ( + is_in_transition, + project_uses_optimized_grouping, + update_grouping_config_if_needed, +) +from sentry.grouping.ingest.hashing import ( find_existing_grouphash, find_existing_grouphash_new, get_hash_values, - is_in_transition, maybe_run_background_grouping, maybe_run_secondary_grouping, - project_uses_optimized_grouping, + run_primary_grouping, +) +from sentry.grouping.ingest.metrics import ( record_calculation_metric_with_result, record_hash_calculation_metrics, record_new_group_metrics, - run_primary_grouping, - update_grouping_config_if_needed, +) +from sentry.grouping.ingest.utils import ( + add_group_id_to_grouphashes, + check_for_category_mismatch, + check_for_group_creation_load_shed, + extract_hashes, ) from sentry.grouping.result import CalculatedHashes from sentry.ingest.inbound_filters import FilterStatKeys @@ -1457,8 +1463,8 @@ def _save_aggregate( metrics.timer("event_manager.create_group_transaction") as metric_tags, transaction.atomic(router.db_for_write(GroupHash)), ): - span.set_tag("create_group_transaction.outcome", "no_group") - metric_tags["create_group_transaction.outcome"] = "no_group" + span.set_tag("outcome", "wait_for_lock") + metric_tags["outcome"] = "wait_for_lock" all_grouphash_ids = [h.id for h in flat_grouphashes] if root_hierarchical_grouphash is not None: @@ -1496,8 +1502,8 @@ def _save_aggregate( is_new = True is_regression = False - span.set_tag("create_group_transaction.outcome", "new_group") - metric_tags["create_group_transaction.outcome"] = "new_group" + span.set_tag("outcome", "new_group") + metric_tags["outcome"] = "new_group" record_calculation_metric_with_result( project=project, has_secondary_hashes=has_secondary_hashes, @@ -1782,8 +1788,8 @@ def create_group_with_grouphashes( metrics.timer("event_manager.create_group_transaction") as metrics_timer_tags, transaction.atomic(router.db_for_write(GroupHash)), ): - span.set_tag("create_group_transaction.outcome", "no_group") - metrics_timer_tags["create_group_transaction.outcome"] = "no_group" + span.set_tag("outcome", "wait_for_lock") + metrics_timer_tags["outcome"] = "wait_for_lock" # If we're in this branch, we checked our grouphashes and didn't find one with a group # attached. We thus want to create a new group, but we need to guard against another @@ -1810,8 +1816,8 @@ def create_group_with_grouphashes( # If we still haven't found a matching grouphash, we're now safe to go ahead and create # the group. if existing_grouphash is None: - span.set_tag("create_group_transaction.outcome", "new_group") - metrics_timer_tags["create_group_transaction.outcome"] = "new_group" + span.set_tag("outcome", "new_group") + metrics_timer_tags["outcome"] = "new_group" record_new_group_metrics(event) group = _create_group(project, event, **group_processing_kwargs) diff --git a/src/sentry/features/temporary.py b/src/sentry/features/temporary.py index 82161037a2eb5a..8bd99028fa5c88 100644 --- a/src/sentry/features/temporary.py +++ b/src/sentry/features/temporary.py @@ -60,6 +60,7 @@ def register_temporary_features(manager: FeatureManager): manager.add("organizations:ddm-experimental", OrganizationFeature, FeatureHandlerStrategy.REMOTE) manager.add("organizations:ddm-metrics-api-unit-normalization", OrganizationFeature, FeatureHandlerStrategy.REMOTE) manager.add("organizations:ddm-sidebar-item-hidden", OrganizationFeature, FeatureHandlerStrategy.REMOTE) + manager.add("organizations:metrics-stats", OrganizationFeature, FeatureHandlerStrategy.REMOTE) manager.add("organizations:ddm-ui", OrganizationFeature, FeatureHandlerStrategy.INTERNAL) manager.add("organizations:default-high-priority-alerts", OrganizationFeature, FeatureHandlerStrategy.INTERNAL) manager.add("organizations:deprecate-fid-from-performance-score", OrganizationFeature, FeatureHandlerStrategy.INTERNAL) diff --git a/src/sentry/grouping/ingest/__init__.py b/src/sentry/grouping/ingest/__init__.py new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/src/sentry/grouping/ingest/config.py b/src/sentry/grouping/ingest/config.py new file mode 100644 index 00000000000000..e0af3b15f1f8f0 --- /dev/null +++ b/src/sentry/grouping/ingest/config.py @@ -0,0 +1,111 @@ +from __future__ import annotations + +import logging +import time +from collections.abc import MutableMapping +from typing import TYPE_CHECKING, Any + +from django.conf import settings +from django.core.cache import cache + +from sentry import features +from sentry.locks import locks +from sentry.models.project import Project +from sentry.projectoptions.defaults import BETA_GROUPING_CONFIG, DEFAULT_GROUPING_CONFIG + +if TYPE_CHECKING: + pass + +logger = logging.getLogger("sentry.events.grouping") + +Job = MutableMapping[str, Any] + + +def update_grouping_config_if_needed(project: Project) -> None: + if _project_should_update_grouping(project): + _auto_update_grouping(project) + + +def _project_should_update_grouping(project: Project) -> bool: + should_update_org = ( + project.organization_id % 1000 < float(settings.SENTRY_GROUPING_AUTO_UPDATE_ENABLED) * 1000 + ) + return bool(project.get_option("sentry:grouping_auto_update")) and should_update_org + + +def _config_update_happened_recently(project: Project, tolerance: int) -> bool: + """ + Determine whether an auto-upate happened within the last `tolerance` seconds. + + We can use this test to compensate for the delay between config getting updated and Relay + picking up the change. + """ + project_transition_expiry = project.get_option("sentry:secondary_grouping_expiry") or 0 + last_config_update = project_transition_expiry - settings.SENTRY_GROUPING_UPDATE_MIGRATION_PHASE + now = int(time.time()) + time_since_update = now - last_config_update + + return time_since_update < 60 + + +def _auto_update_grouping(project: Project) -> None: + current_config = project.get_option("sentry:grouping_config") + new_config = DEFAULT_GROUPING_CONFIG + + if current_config == new_config or current_config == BETA_GROUPING_CONFIG: + return + + # Because the way the auto grouping upgrading happening is racy, we want to + # try to write the audit log entry and project option change just once. + # For this a cache key is used. That's not perfect, but should reduce the + # risk significantly. + cache_key = f"grouping-config-update:{project.id}:{current_config}" + lock_key = f"grouping-update-lock:{project.id}" + if cache.get(cache_key) is not None: + return + + with locks.get(lock_key, duration=60, name="grouping-update-lock").acquire(): + if cache.get(cache_key) is None: + cache.set(cache_key, "1", 60 * 5) + else: + return + + from sentry import audit_log + from sentry.utils.audit import create_system_audit_entry + + # This is when we will stop calculating both old hashes (which we do in an effort to + # preserve group continuity). + expiry = int(time.time()) + settings.SENTRY_GROUPING_UPDATE_MIGRATION_PHASE + + changes = { + "sentry:secondary_grouping_config": current_config, + "sentry:secondary_grouping_expiry": expiry, + "sentry:grouping_config": new_config, + } + for key, value in changes.items(): + project.update_option(key, value) + + create_system_audit_entry( + organization=project.organization, + target_object=project.id, + event=audit_log.get_event_id("PROJECT_EDIT"), + data={**changes, **project.get_audit_log_data()}, + ) + + +def is_in_transition(project: Project) -> bool: + secondary_grouping_config = project.get_option("sentry:secondary_grouping_config") + secondary_grouping_expiry = project.get_option("sentry:secondary_grouping_expiry") + + return bool(secondary_grouping_config) and (secondary_grouping_expiry or 0) >= time.time() + + +def project_uses_optimized_grouping(project: Project) -> bool: + primary_grouping_config = project.get_option("sentry:grouping_config") + secondary_grouping_config = project.get_option("sentry:secondary_grouping_config") + has_mobile_config = "mobile:2021-02-12" in [primary_grouping_config, secondary_grouping_config] + + return not has_mobile_config and features.has( + "organizations:grouping-suppress-unnecessary-secondary-hash", + project.organization, + ) diff --git a/src/sentry/grouping/ingest.py b/src/sentry/grouping/ingest/hashing.py similarity index 63% rename from src/sentry/grouping/ingest.py rename to src/sentry/grouping/ingest/hashing.py index 8c41b9941980ca..8f95098b6083de 100644 --- a/src/sentry/grouping/ingest.py +++ b/src/sentry/grouping/ingest/hashing.py @@ -2,15 +2,11 @@ import copy import logging -import time from collections.abc import MutableMapping, Sequence from typing import TYPE_CHECKING, Any import sentry_sdk -from django.conf import settings -from django.core.cache import cache -from sentry import features from sentry.exceptions import HashDiscarded from sentry.features.rollout import in_random_rollout from sentry.grouping.api import ( @@ -27,14 +23,12 @@ get_grouping_config_dict_for_project, load_grouping_config, ) +from sentry.grouping.ingest.config import _config_update_happened_recently, is_in_transition +from sentry.grouping.ingest.metrics import record_hash_calculation_metrics +from sentry.grouping.ingest.utils import extract_hashes from sentry.grouping.result import CalculatedHashes -from sentry.issues.grouptype import GroupCategory -from sentry.killswitches import killswitch_matches_context -from sentry.locks import locks -from sentry.models.group import Group from sentry.models.grouphash import GroupHash from sentry.models.project import Project -from sentry.projectoptions.defaults import BETA_GROUPING_CONFIG, DEFAULT_GROUPING_CONFIG from sentry.reprocessing2 import is_reprocessed_event from sentry.utils import metrics from sentry.utils.metrics import MutableTags @@ -48,78 +42,6 @@ Job = MutableMapping[str, Any] -def update_grouping_config_if_needed(project: Project) -> None: - if _project_should_update_grouping(project): - _auto_update_grouping(project) - - -def _project_should_update_grouping(project: Project) -> bool: - should_update_org = ( - project.organization_id % 1000 < float(settings.SENTRY_GROUPING_AUTO_UPDATE_ENABLED) * 1000 - ) - return bool(project.get_option("sentry:grouping_auto_update")) and should_update_org - - -def _config_update_happened_recently(project: Project, tolerance: int) -> bool: - """ - Determine whether an auto-upate happened within the last `tolerance` seconds. - - We can use this test to compensate for the delay between config getting updated and Relay - picking up the change. - """ - project_transition_expiry = project.get_option("sentry:secondary_grouping_expiry") or 0 - last_config_update = project_transition_expiry - settings.SENTRY_GROUPING_UPDATE_MIGRATION_PHASE - now = int(time.time()) - time_since_update = now - last_config_update - - return time_since_update < 60 - - -def _auto_update_grouping(project: Project) -> None: - current_config = project.get_option("sentry:grouping_config") - new_config = DEFAULT_GROUPING_CONFIG - - if current_config == new_config or current_config == BETA_GROUPING_CONFIG: - return - - # Because the way the auto grouping upgrading happening is racy, we want to - # try to write the audit log entry and project option change just once. - # For this a cache key is used. That's not perfect, but should reduce the - # risk significantly. - cache_key = f"grouping-config-update:{project.id}:{current_config}" - lock_key = f"grouping-update-lock:{project.id}" - if cache.get(cache_key) is not None: - return - - with locks.get(lock_key, duration=60, name="grouping-update-lock").acquire(): - if cache.get(cache_key) is None: - cache.set(cache_key, "1", 60 * 5) - else: - return - - from sentry import audit_log - from sentry.utils.audit import create_system_audit_entry - - # This is when we will stop calculating both old hashes (which we do in an effort to - # preserve group continuity). - expiry = int(time.time()) + settings.SENTRY_GROUPING_UPDATE_MIGRATION_PHASE - - changes = { - "sentry:secondary_grouping_config": current_config, - "sentry:secondary_grouping_expiry": expiry, - "sentry:grouping_config": new_config, - } - for key, value in changes.items(): - project.update_option(key, value) - - create_system_audit_entry( - organization=project.organization, - target_object=project.id, - event=audit_log.get_event_id("PROJECT_EDIT"), - data={**changes, **project.get_audit_log_data()}, - ) - - def _calculate_event_grouping( project: Project, event: Event, grouping_config: GroupingConfig ) -> CalculatedHashes: @@ -201,13 +123,6 @@ def _calculate_background_grouping( return _calculate_event_grouping(project, event, config) -def is_in_transition(project: Project) -> bool: - secondary_grouping_config = project.get_option("sentry:secondary_grouping_config") - secondary_grouping_expiry = project.get_option("sentry:secondary_grouping_expiry") - - return bool(secondary_grouping_config) and (secondary_grouping_expiry or 0) >= time.time() - - def maybe_run_secondary_grouping( project: Project, job: Job, metric_tags: MutableTags ) -> tuple[GroupingConfig, CalculatedHashes]: @@ -461,160 +376,3 @@ def get_hash_values( job["finest_tree_label"] = all_hashes.finest_tree_label return (primary_hashes, secondary_hashes, all_hashes) - - -def record_hash_calculation_metrics( - project: Project, - primary_config: GroupingConfig, - primary_hashes: CalculatedHashes, - secondary_config: GroupingConfig, - secondary_hashes: CalculatedHashes, -): - has_secondary_hashes = len(extract_hashes(secondary_hashes)) > 0 - - if has_secondary_hashes: - tags = { - "primary_config": primary_config["id"], - "secondary_config": secondary_config["id"], - } - - # If the configs are the same, *of course* the values are going to match, so no point in - # recording a metric - # - # TODO: If we fix the issue outlined in https://github.com/getsentry/sentry/pull/65116, we - # can ditch both this check and the logging below - if tags["primary_config"] != tags["secondary_config"]: - current_values = primary_hashes.hashes - secondary_values = secondary_hashes.hashes - hashes_match = current_values == secondary_values - - if hashes_match: - tags["result"] = "no change" - else: - shared_hashes = set(current_values) & set(secondary_values) - if len(shared_hashes) > 0: - tags["result"] = "partial change" - else: - tags["result"] = "full change" - - metrics.incr("grouping.hash_comparison", tags=tags) - - else: - if not _config_update_happened_recently(project, 30): - logger.info( - "Equal primary and secondary configs", - extra={ - "project": project.id, - "primary_config": primary_config["id"], - }, - ) - - -# TODO: Once the legacy `_save_aggregate` goes away, this logic can be pulled into -# `record_hash_calculation_metrics`. Right now it's split up because we don't know the value for -# `result` at the time the legacy `_save_aggregate` (indirectly) calls `record_hash_calculation_metrics` -def record_calculation_metric_with_result( - project: Project, - has_secondary_hashes: bool, - result: str, -) -> None: - - # Track the total number of grouping calculations done overall, so we can divide by the - # count to get an average number of calculations per event - tags = { - "in_transition": str(is_in_transition(project)), - "using_transition_optimization": str( - features.has( - "organizations:grouping-suppress-unnecessary-secondary-hash", - project.organization, - ) - ), - "result": result, - } - metrics.incr("grouping.event_hashes_calculated", tags=tags) - metrics.incr("grouping.total_calculations", amount=2 if has_secondary_hashes else 1, tags=tags) - - -def record_new_group_metrics(event: Event): - metrics.incr( - "group.created", - skip_internal=True, - tags={ - "platform": event.platform or "unknown", - "sdk": normalized_sdk_tag_from_event(event), - }, - ) - - # This only applies to events with stacktraces - frame_mix = event.get_event_metadata().get("in_app_frame_mix") - if frame_mix: - metrics.incr( - "grouping.in_app_frame_mix", - sample_rate=1.0, - tags={ - "platform": event.platform or "unknown", - "sdk": normalized_sdk_tag_from_event(event), - "frame_mix": frame_mix, - }, - ) - - -def check_for_group_creation_load_shed(project: Project, event: Event): - """ - Raise a `HashDiscarded` error if the load-shed killswitch is enabled - """ - if killswitch_matches_context( - "store.load-shed-group-creation-projects", - { - "project_id": project.id, - "platform": event.platform, - }, - ): - raise HashDiscarded("Load shedding group creation", reason="load_shed") - - -def add_group_id_to_grouphashes( - group: Group, - grouphashes: list[GroupHash], -) -> None: - """ - Link the given group to any grouphash which doesn't yet have a group assigned. - """ - - new_grouphash_ids = [gh.id for gh in grouphashes if gh.group_id is None] - - GroupHash.objects.filter(id__in=new_grouphash_ids).exclude( - state=GroupHash.State.LOCKED_IN_MIGRATION - ).update(group=group) - - -def check_for_category_mismatch(group: Group) -> bool: - """ - Make sure an error event hasn't hashed to a value assigned to a non-error-type group - """ - if group.issue_category != GroupCategory.ERROR: - logger.info( - "event_manager.category_mismatch", - extra={ - "issue_category": group.issue_category, - "event_type": "error", - }, - ) - return True - - return False - - -def extract_hashes(calculated_hashes: CalculatedHashes | None) -> list[str]: - return [] if not calculated_hashes else list(calculated_hashes.hashes) - - -def project_uses_optimized_grouping(project: Project) -> bool: - primary_grouping_config = project.get_option("sentry:grouping_config") - secondary_grouping_config = project.get_option("sentry:secondary_grouping_config") - has_mobile_config = "mobile:2021-02-12" in [primary_grouping_config, secondary_grouping_config] - - return not has_mobile_config and features.has( - "organizations:grouping-suppress-unnecessary-secondary-hash", - project.organization, - ) diff --git a/src/sentry/grouping/ingest/metrics.py b/src/sentry/grouping/ingest/metrics.py new file mode 100644 index 00000000000000..30868421318b29 --- /dev/null +++ b/src/sentry/grouping/ingest/metrics.py @@ -0,0 +1,117 @@ +from __future__ import annotations + +import logging +from collections.abc import MutableMapping +from typing import TYPE_CHECKING, Any + +from sentry import features +from sentry.grouping.api import GroupingConfig +from sentry.grouping.ingest.config import _config_update_happened_recently, is_in_transition +from sentry.grouping.ingest.utils import extract_hashes +from sentry.grouping.result import CalculatedHashes +from sentry.models.project import Project +from sentry.utils import metrics +from sentry.utils.tag_normalization import normalized_sdk_tag_from_event + +if TYPE_CHECKING: + from sentry.eventstore.models import Event + +logger = logging.getLogger("sentry.events.grouping") + +Job = MutableMapping[str, Any] + + +def record_hash_calculation_metrics( + project: Project, + primary_config: GroupingConfig, + primary_hashes: CalculatedHashes, + secondary_config: GroupingConfig, + secondary_hashes: CalculatedHashes, +): + has_secondary_hashes = len(extract_hashes(secondary_hashes)) > 0 + + if has_secondary_hashes: + tags = { + "primary_config": primary_config["id"], + "secondary_config": secondary_config["id"], + } + + # If the configs are the same, *of course* the values are going to match, so no point in + # recording a metric + # + # TODO: If we fix the issue outlined in https://github.com/getsentry/sentry/pull/65116, we + # can ditch both this check and the logging below + if tags["primary_config"] != tags["secondary_config"]: + current_values = primary_hashes.hashes + secondary_values = secondary_hashes.hashes + hashes_match = current_values == secondary_values + + if hashes_match: + tags["result"] = "no change" + else: + shared_hashes = set(current_values) & set(secondary_values) + if len(shared_hashes) > 0: + tags["result"] = "partial change" + else: + tags["result"] = "full change" + + metrics.incr("grouping.hash_comparison", tags=tags) + + else: + if not _config_update_happened_recently(project, 30): + logger.info( + "Equal primary and secondary configs", + extra={ + "project": project.id, + "primary_config": primary_config["id"], + }, + ) + + +# TODO: Once the legacy `_save_aggregate` goes away, this logic can be pulled into +# `record_hash_calculation_metrics`. Right now it's split up because we don't know the value for +# `result` at the time the legacy `_save_aggregate` (indirectly) calls `record_hash_calculation_metrics` +def record_calculation_metric_with_result( + project: Project, + has_secondary_hashes: bool, + result: str, +) -> None: + + # Track the total number of grouping calculations done overall, so we can divide by the + # count to get an average number of calculations per event + tags = { + "in_transition": str(is_in_transition(project)), + "using_transition_optimization": str( + features.has( + "organizations:grouping-suppress-unnecessary-secondary-hash", + project.organization, + ) + ), + "result": result, + } + metrics.incr("grouping.event_hashes_calculated", tags=tags) + metrics.incr("grouping.total_calculations", amount=2 if has_secondary_hashes else 1, tags=tags) + + +def record_new_group_metrics(event: Event): + metrics.incr( + "group.created", + skip_internal=True, + tags={ + "platform": event.platform or "unknown", + "sdk": normalized_sdk_tag_from_event(event), + }, + ) + + # This only applies to events with stacktraces + frame_mix = event.get_event_metadata().get("in_app_frame_mix") + if frame_mix: + metrics.incr( + "grouping.in_app_frame_mix", + sample_rate=1.0, + tags={ + "platform": event.platform or "unknown", + "sdk": normalized_sdk_tag_from_event(event), + "frame_mix": frame_mix, + }, + ) diff --git a/src/sentry/grouping/ingest/utils.py b/src/sentry/grouping/ingest/utils.py new file mode 100644 index 00000000000000..b3d876332a909c --- /dev/null +++ b/src/sentry/grouping/ingest/utils.py @@ -0,0 +1,70 @@ +from __future__ import annotations + +import logging +from collections.abc import MutableMapping +from typing import TYPE_CHECKING, Any + +from sentry.exceptions import HashDiscarded +from sentry.grouping.result import CalculatedHashes +from sentry.issues.grouptype import GroupCategory +from sentry.killswitches import killswitch_matches_context +from sentry.models.group import Group +from sentry.models.grouphash import GroupHash +from sentry.models.project import Project + +if TYPE_CHECKING: + from sentry.eventstore.models import Event + +logger = logging.getLogger("sentry.events.grouping") + +Job = MutableMapping[str, Any] + + +def extract_hashes(calculated_hashes: CalculatedHashes | None) -> list[str]: + return [] if not calculated_hashes else list(calculated_hashes.hashes) + + +def add_group_id_to_grouphashes( + group: Group, + grouphashes: list[GroupHash], +) -> None: + """ + Link the given group to any grouphash which doesn't yet have a group assigned. + """ + + new_grouphash_ids = [gh.id for gh in grouphashes if gh.group_id is None] + + GroupHash.objects.filter(id__in=new_grouphash_ids).exclude( + state=GroupHash.State.LOCKED_IN_MIGRATION + ).update(group=group) + + +def check_for_group_creation_load_shed(project: Project, event: Event): + """ + Raise a `HashDiscarded` error if the load-shed killswitch is enabled + """ + if killswitch_matches_context( + "store.load-shed-group-creation-projects", + { + "project_id": project.id, + "platform": event.platform, + }, + ): + raise HashDiscarded("Load shedding group creation", reason="load_shed") + + +def check_for_category_mismatch(group: Group) -> bool: + """ + Make sure an error event hasn't hashed to a value assigned to a non-error-type group + """ + if group.issue_category != GroupCategory.ERROR: + logger.info( + "event_manager.category_mismatch", + extra={ + "issue_category": group.issue_category, + "event_type": "error", + }, + ) + return True + + return False diff --git a/src/sentry/integrations/slack/service.py b/src/sentry/integrations/slack/service.py new file mode 100644 index 00000000000000..bc61d085d6d4b6 --- /dev/null +++ b/src/sentry/integrations/slack/service.py @@ -0,0 +1,216 @@ +from __future__ import annotations + +from logging import Logger, getLogger + +from sentry.integrations.repository import get_default_issue_alert_repository +from sentry.integrations.repository.issue_alert import ( + IssueAlertNotificationMessage, + IssueAlertNotificationMessageRepository, +) +from sentry.integrations.slack import BlockSlackMessageBuilder, SlackClient +from sentry.integrations.utils.common import get_active_integration_for_organization +from sentry.models.activity import Activity +from sentry.models.rule import Rule +from sentry.notifications.notifications.activity import EMAIL_CLASSES_BY_TYPE +from sentry.types.integrations import ExternalProviderEnum + +_default_logger = getLogger(__name__) + + +class RuleDataError(Exception): + pass + + +class SlackService: + """ + Slack service is the main entry point for all business logic related to Slack. + We will consolidate the Slack logic in here to create an easier interface to interact with, and not worry about + figuring out which specific class or object you need, how to create them, in which order, and what to call. + + This service will have plentiful logging, error catching and handling, and be mindful of performance impacts. + There will also be monitoring and alerting in place to give more visibility to the main business logic methods. + """ + + def __init__( + self, + notification_message_repository: IssueAlertNotificationMessageRepository, + message_block_builder: BlockSlackMessageBuilder, + logger: Logger, + ) -> None: + self._notification_message_repository = notification_message_repository + self._slack_block_builder = message_block_builder + self._logger = logger + + @classmethod + def default(cls) -> SlackService: + return SlackService( + notification_message_repository=get_default_issue_alert_repository(), + message_block_builder=BlockSlackMessageBuilder(), + logger=_default_logger, + ) + + def notify_all_threads_for_activity(self, activity: Activity) -> None: + """ + For an activity related to an issue group, send notifications in a Slack thread to all parent notifications for + that specific group and project. + + If the group is not associated with an activity, return early as there's nothing to do. + If the user is not associated with an activity, return early as we only care about user activities. + """ + if activity.group is None: + self._logger.info( + "no group associated on the activity, nothing to do", + extra={ + "activity_id": activity.id, + }, + ) + return None + + if activity.user_id is None: + self._logger.info( + "machine/system updates are ignored at this time, nothing to do", + extra={ + "activity_id": activity.id, + }, + ) + return None + + # The same message is sent to all the threads, so this needs to only happen once + notification_to_send = self._get_notification_message_to_send(activity=activity) + if not notification_to_send: + self._logger.info( + "notification to send is invalid", + extra={ + "activity_id": activity.id, + }, + ) + return None + + integration = get_active_integration_for_organization( + organization_id=activity.group.organization.id, + provider=ExternalProviderEnum.SLACK, + ) + if integration is None: + self._logger.info( + "no integration found for activity", + extra={ + "activity_id": activity.id, + "organization_id": activity.project.organization_id, + "project_id": activity.project.id, + }, + ) + return None + + slack_client = SlackClient(integration_id=integration.id) + + # Get all parent notifications, which will have the message identifier to use to reply in a thread + parent_notifications = ( + self._notification_message_repository.get_all_parent_notification_messages_by_filters( + group_ids=[activity.group.id], + project_ids=[activity.project.id], + ) + ) + for parent_notification in parent_notifications: + try: + self._handle_parent_notification( + parent_notification=parent_notification, + notification_to_send=notification_to_send, + client=slack_client, + ) + except Exception as err: + self._logger.info( + "failed to send notification", + exc_info=err, + extra={ + "activity_id": activity.id, + "parent_notification_id": parent_notification.id, + "notification_to_send": notification_to_send, + "integration_id": integration.id, + }, + ) + + def _handle_parent_notification( + self, + parent_notification: IssueAlertNotificationMessage, + notification_to_send: str, + client: SlackClient, + ) -> None: + # For each parent notification, we need to get the channel that the notification is replied to + # Get the channel by using the action uuid + if not parent_notification.rule_fire_history: + raise RuleDataError( + f"parent notification {parent_notification.id} does not have a rule_fire_history" + ) + + if not parent_notification.rule_action_uuid: + raise RuleDataError( + f"parent notification {parent_notification.id} does not have a rule_action_uuid" + ) + + rule: Rule = parent_notification.rule_fire_history.rule + rule_action = rule.get_rule_action_details_by_uuid(parent_notification.rule_action_uuid) + if not rule_action: + raise RuleDataError( + f"failed to find rule action {parent_notification.rule_action_uuid} for rule {rule.id}" + ) + + channel_id: str | None = rule_action.get("channel_id", None) + if not channel_id: + raise RuleDataError( + f"failed to get channel_id for rule {rule.id} and rule action {parent_notification.rule_action_uuid}" + ) + + block = self._slack_block_builder.get_markdown_block(text=notification_to_send) + payload = {"channel": channel_id, "thread_ts": parent_notification.message_identifier} + slack_payload = self._slack_block_builder._build_blocks( + block, fallback_text=notification_to_send + ) + payload.update(slack_payload) + try: + client.post("/chat.postMessage", data=payload, timeout=5) + except Exception as err: + self._logger.info( + "failed to post message to slack", + extra={ + "error": str(err), + "payload": payload, + "channel": channel_id, + "thread_ts": parent_notification.message_identifier, + "rule_action_uuid": parent_notification.rule_action_uuid, + }, + ) + raise + + def _get_notification_message_to_send(self, activity: Activity) -> str | None: + """ + Get the notification message that we need to send in a slack thread based on the activity type. + + Apparently the get_context is a very computation heavy call, so make sure to only call this once. + """ + notification_cls = EMAIL_CLASSES_BY_TYPE.get(activity.type, None) + if not notification_cls: + self._logger.info( + "activity type is not currently supported", + extra={ + "activity_id": activity.id, + "activity_type": activity.type, + }, + ) + return None + + notification_obj = notification_cls(activity=activity) + context = notification_obj.get_context() + text_description = context.get("text_description", None) + if not text_description: + self._logger.info( + "context did not contain text_description", + extra={ + "activity_id": activity.id, + "notification_type": activity.type, + "notification_cls": notification_cls.__name__, + "context": context, + }, + ) + return None + + return text_description diff --git a/src/sentry/integrations/utils/common.py b/src/sentry/integrations/utils/common.py new file mode 100644 index 00000000000000..fd23196e293600 --- /dev/null +++ b/src/sentry/integrations/utils/common.py @@ -0,0 +1,28 @@ +import logging + +from sentry.models.organization import OrganizationStatus +from sentry.services.hybrid_cloud.integration import RpcIntegration, integration_service +from sentry.types.integrations import ExternalProviderEnum + +_default_logger = logging.getLogger(__name__) + + +def get_active_integration_for_organization( + organization_id: int, provider: ExternalProviderEnum +) -> RpcIntegration | None: + try: + return integration_service.get_integration( + organization_id=organization_id, + status=OrganizationStatus.ACTIVE, + provider=provider.value, + ) + except Exception as err: + _default_logger.info( + "error getting active integration for organization from service", + exc_info=err, + extra={ + "organization_id": organization_id, + "provider": provider.value, + }, + ) + return None diff --git a/src/sentry/interfaces/contexts.py b/src/sentry/interfaces/contexts.py index 3fd028a41ad620..35285b823c058d 100644 --- a/src/sentry/interfaces/contexts.py +++ b/src/sentry/interfaces/contexts.py @@ -1,7 +1,7 @@ from __future__ import annotations import string -from typing import ClassVar, TypeVar +from typing import Any, ClassVar, TypeVar from django.utils.encoding import force_str @@ -94,8 +94,8 @@ def __init__(self, alias, data): if value is not None: ctx_data[force_str(key)] = value # Numbers exceeding 15 place values will be converted to strings to avoid rendering issues - if isinstance(value, (int, float)) and len(str_value := force_str(value)) > 15: - ctx_data[force_str(key)] = str_value + if isinstance(value, (int, float, list, dict)): + ctx_data[force_str(key)] = self.change_type(value) self.data = ctx_data def to_json(self): @@ -134,6 +134,16 @@ def iter_tags(self): else: yield (f"{self.alias}.{field}", value) + def change_type(self, value: int | float | list | dict) -> Any: + if isinstance(value, (float, int)) and len(str_value := force_str(value)) > 15: + return str_value + if isinstance(value, list): + return [self.change_type(el) for el in value] + elif isinstance(value, dict): + return {key: self.change_type(el) for key, el in value.items()} + else: + return value + # TODO(dcramer): contexts need to document/describe expected (optional) fields @contexttype diff --git a/src/sentry/issues/grouptype.py b/src/sentry/issues/grouptype.py index 9ab9b7a6829d94..862c9ac67aeefa 100644 --- a/src/sentry/issues/grouptype.py +++ b/src/sentry/issues/grouptype.py @@ -31,7 +31,11 @@ class GroupCategory(Enum): FEEDBACK = 6 -GROUP_CATEGORIES_CUSTOM_EMAIL = (GroupCategory.ERROR, GroupCategory.PERFORMANCE) +GROUP_CATEGORIES_CUSTOM_EMAIL = ( + GroupCategory.ERROR, + GroupCategory.PERFORMANCE, + GroupCategory.FEEDBACK, +) # GroupCategories which have customized email templates. If not included here, will fall back to a generic template. DEFAULT_IGNORE_LIMIT: int = 3 diff --git a/src/sentry/migrations/0687_alert_rule_project_backfill_migration.py b/src/sentry/migrations/0687_alert_rule_project_backfill_migration.py index 50e081099f3ec6..d083ca0ae3203e 100644 --- a/src/sentry/migrations/0687_alert_rule_project_backfill_migration.py +++ b/src/sentry/migrations/0687_alert_rule_project_backfill_migration.py @@ -25,7 +25,28 @@ def _backfill_alert_rule_projects(apps, schema_editor): ) continue - alert_rule = snuba_query.alertrule_set.get() + alert_rule_set = list(snuba_query.alertrule_set.all()) + if not len(alert_rule_set): + logger.warning( + "QuerySubscription + SnubaQuery found with no alert_rule", + extra={ + "query_subscription_id": subscription.id, + "snuba_query_id": snuba_query.id, + }, + ) + continue + elif len(alert_rule_set) > 1: + logger.warning( + "QuerySubscription + SnubaQuery found with multiple alert_rules", + extra={ + "query_subscription_id": subscription.id, + "snuba_query_id": snuba_query.id, + "alert_rule_ids": [alert_rule.id for alert_rule in alert_rule_set], + }, + ) + + # Default to the first alert rule + alert_rule = alert_rule_set[0] existing_alert_rule_projects = list(AlertRuleProjects.objects.filter(alert_rule=alert_rule)) should_create_new = True diff --git a/src/sentry/migrations/0688_add_project_flag_high_priority_alerts.py b/src/sentry/migrations/0688_add_project_flag_high_priority_alerts.py new file mode 100644 index 00000000000000..0221fb26c2955a --- /dev/null +++ b/src/sentry/migrations/0688_add_project_flag_high_priority_alerts.py @@ -0,0 +1,55 @@ +# Generated by Django 5.0.3 on 2024-04-03 23:44 + +from django.db import migrations + +import bitfield.models +from sentry.new_migrations.migrations import CheckedMigration + + +class Migration(CheckedMigration): + # This flag is used to mark that a migration shouldn't be automatically run in production. For + # the most part, this should only be used for operations where it's safe to run the migration + # after your code has deployed. So this should not be used for most operations that alter the + # schema of a table. + # Here are some things that make sense to mark as dangerous: + # - Large data migrations. Typically we want these to be run manually by ops so that they can + # be monitored and not block the deploy for a long period of time while they run. + # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to + # have ops run this and not block the deploy. Note that while adding an index is a schema + # change, it's completely safe to run the operation after the code has deployed. + is_dangerous = False + + dependencies = [ + ("sentry", "0687_alert_rule_project_backfill_migration"), + ] + + operations = [ + migrations.AlterField( + model_name="project", + name="flags", + field=bitfield.models.BitField( + [ + "has_releases", + "has_issue_alerts_targeting", + "has_transactions", + "has_alert_filters", + "has_sessions", + "has_profiles", + "has_replays", + "has_feedbacks", + "has_new_feedbacks", + "spike_protection_error_currently_active", + "spike_protection_transaction_currently_active", + "spike_protection_attachment_currently_active", + "has_minified_stack_trace", + "has_cron_monitors", + "has_cron_checkins", + "has_sourcemaps", + "has_custom_metrics", + "has_high_priority_alerts", + ], + default=10, + null=True, + ), + ), + ] diff --git a/src/sentry/models/group.py b/src/sentry/models/group.py index d2b75777a397ac..c35ef7c3aa7b08 100644 --- a/src/sentry/models/group.py +++ b/src/sentry/models/group.py @@ -447,25 +447,6 @@ def update_group_status( group.priority = priority updated_priority[group.id] = priority - logger.info( - "group.update_group_status.priority_updated", - extra={ - "group_id": group.id, - "from_substatus": from_substatus, - "priority": priority, - }, - ) - else: - logger.info( - "group.update_group_status.priority_not_updated", - extra={ - "group_id": group.id, - "from_substatus": from_substatus, - "new_priority": priority, - "current_priority": group.priority, - }, - ) - modified_groups_list.append(group) Group.objects.bulk_update(modified_groups_list, ["status", "substatus", "priority"]) @@ -481,14 +462,6 @@ def update_group_status( if group.id in updated_priority: new_priority = updated_priority[group.id] - logger.info( - "group.update_group_status.priority_updated_activity", - extra={ - "group_id": group.id, - "priority": updated_priority[group.id], - "group_priority": group.priority, - }, - ) Activity.objects.create_group_activity( group=group, type=ActivityType.SET_PRIORITY, diff --git a/src/sentry/models/options/option.py b/src/sentry/models/options/option.py index 0105db58d0b347..3520436f7d786a 100644 --- a/src/sentry/models/options/option.py +++ b/src/sentry/models/options/option.py @@ -49,8 +49,16 @@ class Meta: @classmethod def query_for_relocation_export(cls, q: models.Q, pk_map: PrimaryKeyMap) -> models.Q: - # These ping options change too frequently to be useful in exports. - return q & ~models.Q(key__in={"sentry:last_worker_ping", "sentry:last_worker_version"}) + # These ping options change too frequently, or necessarily with each install, to be useful + # in exports. More broadly, we don't really care about comparing them for accuracy. + return q & ~models.Q( + key__in={ + "sentry:sentry:install-id", # Only used on self-hosted + "sentry:last_worker_ping", # Changes very frequently + "sentry:last_worker_version", # Changes very frequently + "sentry:latest_version", # Auto-generated periodically, which defeats comparison + } + ) @region_silo_only_model diff --git a/src/sentry/models/project.py b/src/sentry/models/project.py index e24115e3a0f986..16a1a808522931 100644 --- a/src/sentry/models/project.py +++ b/src/sentry/models/project.py @@ -110,6 +110,7 @@ "minidump", "native", "native-qt", + "nintendo", "node", "node-awslambda", "node-azurefunctions", @@ -294,6 +295,9 @@ class flags(TypedClassBitField): # This Project has custom metrics has_custom_metrics: bool + # This Project has enough issue volume to use high priority alerts + has_high_priority_alerts: bool + bitfield_default = 10 bitfield_null = True diff --git a/src/sentry/relay/config/metric_extraction.py b/src/sentry/relay/config/metric_extraction.py index e9674ca59df38a..090a80920203f4 100644 --- a/src/sentry/relay/config/metric_extraction.py +++ b/src/sentry/relay/config/metric_extraction.py @@ -14,6 +14,7 @@ from sentry import features, options from sentry.api.endpoints.project_transaction_threshold import DEFAULT_THRESHOLD from sentry.api.utils import get_date_range_from_params +from sentry.features.rollout import in_random_rollout from sentry.incidents.models.alert_rule import AlertRule, AlertRuleStatus from sentry.models.dashboard_widget import ( ON_DEMAND_ENABLED_KEY, @@ -203,6 +204,20 @@ def _get_alert_metric_specs( return specs +def _bulk_cache_query_key(project: Project) -> str: + return f"on-demand.bulk-query-cache.{project.organization.id}" + + +def _get_bulk_cached_query(project: Project) -> dict[str, Any]: + query_bulk_cache_key = _bulk_cache_query_key(project) + return cache.get(query_bulk_cache_key, None) + + +def _set_bulk_cached_query(project: Project, query_cache: dict[str, Any]) -> None: + query_bulk_cache_key = _bulk_cache_query_key(project) + cache.set(query_bulk_cache_key, query_cache, timeout=5400) + + @metrics.wraps("on_demand_metrics._get_widget_metric_specs") def _get_widget_metric_specs( project: Project, enabled_features: set[str], prefilling: bool @@ -230,6 +245,10 @@ def _get_widget_metric_specs( "on_demand_metrics.widgets_to_process", amount=len(widget_queries), sample_rate=1.0 ) + organization_bulk_query_cache = _get_bulk_cached_query(project) + save_organization_bulk_cache = not bool(organization_bulk_query_cache) + organization_bulk_query_cache = {} + ignored_widget_ids: dict[int, bool] = {} specs_for_widget: dict[int, list[HashedMetricSpec]] = defaultdict(list) widget_query_for_spec_hash: dict[str, DashboardWidgetQuery] = {} @@ -239,7 +258,9 @@ def _get_widget_metric_specs( with metrics.timer("on_demand_metrics.widget_spec_convert"): for widget_query in widget_queries: - widget_specs = convert_widget_query_to_metric(project, widget_query, prefilling) + widget_specs = convert_widget_query_to_metric( + project, widget_query, prefilling, organization_bulk_query_cache + ) if not widget_specs: # Skip checking any widget queries that don't have specs, @@ -285,6 +306,9 @@ def _get_widget_metric_specs( _update_state_with_spec_limit(trimmed_specs, widget_query_for_spec_hash) metrics.incr("on_demand_metrics.widget_query_specs", amount=len(specs)) + if in_random_rollout("on_demand_metrics.cache_should_use_on_demand"): + if save_organization_bulk_cache: + _set_bulk_cached_query(project, organization_bulk_query_cache) return specs @@ -410,7 +434,10 @@ def _convert_snuba_query_to_metrics( def convert_widget_query_to_metric( - project: Project, widget_query: DashboardWidgetQuery, prefilling: bool + project: Project, + widget_query: DashboardWidgetQuery, + prefilling: bool, + organization_bulk_query_cache: dict[str, Any] | None = None, ) -> list[HashedMetricSpec]: """ Converts a passed metrics widget query to one or more MetricSpecs. @@ -426,7 +453,7 @@ def convert_widget_query_to_metric( for aggregate in aggregates: metrics_specs += _generate_metric_specs( - aggregate, widget_query, project, prefilling, groupbys + aggregate, widget_query, project, prefilling, groupbys, organization_bulk_query_cache ) return metrics_specs @@ -438,6 +465,7 @@ def _generate_metric_specs( project: Project, prefilling: bool, groupbys: Sequence[str] | None = None, + organization_bulk_query_cache: dict[str, Any] | None = None, ) -> list[HashedMetricSpec]: metrics_specs = [] metrics.incr("on_demand_metrics.before_widget_spec_generation") @@ -452,6 +480,7 @@ def _generate_metric_specs( prefilling, groupbys=groupbys, spec_type=MetricSpecType.DYNAMIC_QUERY, + organization_bulk_query_cache=organization_bulk_query_cache, ): for spec in results: _log_on_demand_metric_spec( @@ -708,6 +737,7 @@ def _convert_aggregate_and_query_to_metrics( prefilling: bool, spec_type: MetricSpecType = MetricSpecType.SIMPLE_QUERY, groupbys: Sequence[str] | None = None, + organization_bulk_query_cache: dict[str, Any] | None = None, ) -> Sequence[HashedMetricSpec] | None: """ Converts an aggregate and a query to a metric spec with its hash value. @@ -719,7 +749,9 @@ def _convert_aggregate_and_query_to_metrics( # We can avoid injection of the environment in the query, since it's supported by standard, thus it won't change # the supported state of a query, since if it's standard, and we added environment it will still be standard # and if it's on demand, it will always be on demand irrespectively of what we add. - if not should_use_on_demand_metrics(dataset, aggregate, query, groupbys, prefilling): + if not should_use_on_demand_metrics( + dataset, aggregate, query, groupbys, prefilling, organization_bulk_query_cache + ): return None metric_specs_and_hashes = [] diff --git a/src/sentry/runner/commands/cleanup.py b/src/sentry/runner/commands/cleanup.py index e3ff158190b22f..f9d3246d9b2ab8 100644 --- a/src/sentry/runner/commands/cleanup.py +++ b/src/sentry/runner/commands/cleanup.py @@ -14,6 +14,7 @@ from django.utils import timezone from sentry.runner.decorators import log_options +from sentry.silo.base import SiloMode def get_project(value: str) -> int | None: @@ -49,8 +50,22 @@ def multiprocess_worker(task_queue: _WorkQueue) -> None: logger = logging.getLogger("sentry.cleanup") - configured = False - skip_models = [] + from sentry.runner import configure + + configure() + + from sentry import deletions, models, similarity + + skip_models = [ + # Handled by other parts of cleanup + models.EventAttachment, + models.UserReport, + models.Group, + models.GroupEmailThread, + models.GroupRuleStatus, + # Handled by TTL + similarity, + ] while True: j = task_queue.get() @@ -58,26 +73,6 @@ def multiprocess_worker(task_queue: _WorkQueue) -> None: task_queue.task_done() return - # On first task, configure Sentry environment - if not configured: - from sentry.runner import configure - - configure() - configured = True - - from sentry import deletions, models, similarity - - skip_models = [ - # Handled by other parts of cleanup - models.EventAttachment, - models.UserReport, - models.Group, - models.GroupEmailThread, - models.GroupRuleStatus, - # Handled by TTL - similarity, - ] - model, chunk = j model = import_string(model) @@ -281,23 +276,24 @@ def is_filtered(model: type[Model]) -> bool: item.delete_file() project_id = None - if project: - click.echo("Bulk NodeStore deletion not available for project selection", err=True) - project_id = get_project(project) - # These models span across projects, so let's skip them - DELETES.remove((models.ArtifactBundle, "date_added", "date_added")) - if project_id is None: - click.echo("Error: Project not found", err=True) - raise click.Abort() - else: - if not silent: - click.echo("Removing old NodeStore values") + if SiloMode.get_current_mode() != SiloMode.CONTROL: + if project: + click.echo("Bulk NodeStore deletion not available for project selection", err=True) + project_id = get_project(project) + # These models span across projects, so let's skip them + DELETES.remove((models.ArtifactBundle, "date_added", "date_added")) + if project_id is None: + click.echo("Error: Project not found", err=True) + raise click.Abort() + else: + if not silent: + click.echo("Removing old NodeStore values") - cutoff = timezone.now() - timedelta(days=days) - try: - nodestore.backend.cleanup(cutoff) - except NotImplementedError: - click.echo("NodeStore backend does not support cleanup operation", err=True) + cutoff = timezone.now() - timedelta(days=days) + try: + nodestore.backend.cleanup(cutoff) + except NotImplementedError: + click.echo("NodeStore backend does not support cleanup operation", err=True) for model_tp, dtfield, order_by in BULK_QUERY_DELETES: chunk_size = 10000 @@ -339,19 +335,21 @@ def is_filtered(model: type[Model]) -> bool: task_queue.join() - project_deletion_query = models.Project.objects.filter(status=ObjectStatus.ACTIVE) - if project: - project_deletion_query = models.Project.objects.filter(id=project_id) - + project_deletion_query = None to_delete_by_project = [] - for model_tp_tup in DELETES_BY_PROJECT: - if is_filtered(model_tp_tup[0]): - if not silent: - click.echo(">> Skipping %s" % model_tp_tup[0].__name__) - else: - to_delete_by_project.append(model_tp_tup) + if SiloMode.get_current_mode() != SiloMode.CONTROL: + project_deletion_query = models.Project.objects.filter(status=ObjectStatus.ACTIVE) + if project: + project_deletion_query = models.Project.objects.filter(id=project_id) + + for model_tp_tup in DELETES_BY_PROJECT: + if is_filtered(model_tp_tup[0]): + if not silent: + click.echo(">> Skipping %s" % model_tp_tup[0].__name__) + else: + to_delete_by_project.append(model_tp_tup) - if to_delete_by_project: + if project_deletion_query and to_delete_by_project: for project_id_for_deletion in RangeQuerySetWrapper( project_deletion_query.values_list("id", flat=True), result_value_getter=lambda item: item, diff --git a/src/sentry/search/events/builder/discover.py b/src/sentry/search/events/builder/discover.py index 06856a3385feae..0b26ff3726d85e 100644 --- a/src/sentry/search/events/builder/discover.py +++ b/src/sentry/search/events/builder/discover.py @@ -90,6 +90,7 @@ class BaseQueryBuilder: organization_column: str = "organization.id" function_alias_prefix: str | None = None spans_metrics_builder = False + entity: Entity | None = None def get_middle(self): """Get the middle for comparison functions""" @@ -194,6 +195,7 @@ def __init__( turbo: bool = False, sample_rate: float | None = None, array_join: str | None = None, + entity: Entity | None = None, ): if config is None: self.builder_config = QueryBuilderConfig() @@ -280,6 +282,7 @@ def __init__( equations=equations, orderby=orderby, ) + self.entity = entity def are_columns_resolved(self) -> bool: return self.columns and isinstance(self.columns[0], Function) @@ -1109,6 +1112,8 @@ def column(self, name: str) -> Column: :param name: The unresolved sentry name. """ resolved_column = self.resolve_column_name(name) + if self.entity: + return Column(resolved_column, entity=self.entity) return Column(resolved_column) # Query filter helper methods diff --git a/src/sentry/search/snuba/executors.py b/src/sentry/search/snuba/executors.py index cb0ce146537bf2..5c052cb60bba34 100644 --- a/src/sentry/search/snuba/executors.py +++ b/src/sentry/search/snuba/executors.py @@ -53,6 +53,7 @@ from sentry.models.organization import Organization from sentry.models.project import Project from sentry.models.team import Team +from sentry.search.events.builder.discover import UnresolvedQuery from sentry.search.events.filter import convert_search_filter_to_snuba_query, format_search_filter from sentry.services.hybrid_cloud.user.model import RpcUser from sentry.snuba.dataset import Dataset @@ -1116,124 +1117,143 @@ class InvalidQueryForExecutor(Exception): pass -def get_basic_group_snuba_lookup(search_filter: SearchFilter, attr_entity: Entity) -> Condition: - """ - Returns the basic lookup for a search filter. - """ - return Condition( - Column(f"group_{search_filter.key.name}", attr_entity), - Op.IN, - search_filter.value.raw_value, - ) - +class GroupAttributesPostgresSnubaQueryExecutor(PostgresSnubaQueryExecutor): + def get_basic_group_snuba_condition(self, search_filter: SearchFilter) -> Condition: + """ + Returns the basic lookup for a search filter. + """ + return Condition( + Column(f"group_{search_filter.key.name}", self.entities["attrs"]), + Op.IN, + search_filter.value.raw_value, + ) -def get_assigned(search_filter: SearchFilter, attr_entity: Entity) -> Condition: - """ - Returns the assigned lookup for a search filter. - """ - user_ids = [user.id for user in search_filter.value.raw_value if isinstance(user, RpcUser)] - team_ids = [team.id for team in search_filter.value.raw_value if isinstance(team, Team)] - - conditions = [] - if user_ids: - assigned_to_user = Condition(Column("assignee_user_id", attr_entity), Op.IN, user_ids) - conditions.append(assigned_to_user) - - if team_ids: - assigned_to_team = Condition(Column("assignee_team_id", attr_entity), Op.IN, team_ids) - conditions.append(assigned_to_team) - # asking for unassigned issues - if None in search_filter.value.raw_value: - # neither assigned to team or user - assigned_to_none_user = Condition(Column("assignee_user_id", attr_entity), Op.IS_NULL, None) - assigned_to_none_team = Condition(Column("assignee_team_id", attr_entity), Op.IS_NULL, None) - conditions.append( - BooleanCondition( - op=BooleanOp.AND, conditions=[assigned_to_none_user, assigned_to_none_team] - ) + def get_basic_event_snuba_condition(self, search_filter: SearchFilter) -> Condition: + """ + Returns the basic lookup for a search filter. + """ + query_builder = UnresolvedQuery( + dataset=Dataset.Events, + entity=self.entities["event"], + params={}, ) + return query_builder.default_filter_converter(search_filter) - if len(conditions) == 1: - return conditions[0] + def get_assigned(self, search_filter: SearchFilter) -> Condition: + """ + Returns the assigned lookup for a search filter. + """ + attr_entity = self.entities["attrs"] + user_ids = [user.id for user in search_filter.value.raw_value if isinstance(user, RpcUser)] + team_ids = [team.id for team in search_filter.value.raw_value if isinstance(team, Team)] - return BooleanCondition(op=BooleanOp.OR, conditions=conditions) + conditions = [] + if user_ids: + assigned_to_user = Condition(Column("assignee_user_id", attr_entity), Op.IN, user_ids) + conditions.append(assigned_to_user) + + if team_ids: + assigned_to_team = Condition(Column("assignee_team_id", attr_entity), Op.IN, team_ids) + conditions.append(assigned_to_team) + # asking for unassigned issues + if None in search_filter.value.raw_value: + # neither assigned to team or user + assigned_to_none_user = Condition( + Column("assignee_user_id", attr_entity), Op.IS_NULL, None + ) + assigned_to_none_team = Condition( + Column("assignee_team_id", attr_entity), Op.IS_NULL, None + ) + conditions.append( + BooleanCondition( + op=BooleanOp.AND, conditions=[assigned_to_none_user, assigned_to_none_team] + ) + ) + if len(conditions) == 1: + return conditions[0] -def get_suggested(search_filter: SearchFilter, attr_entity: Entity) -> Condition: - """ - Returns the suggested lookup for a search filter. - """ - users = filter(lambda x: isinstance(x, RpcUser), search_filter.value.raw_value) - user_ids = [user.id for user in users] - teams = filter(lambda x: isinstance(x, Team), search_filter.value.raw_value) - team_ids = [team.id for team in teams] - - conditions = [] - if user_ids: - suspect_commit_user = Condition( - Column("owner_suspect_commit_user_id", attr_entity), Op.IN, user_ids - ) - ownership_rule_user = Condition( - Column("owner_ownership_rule_user_id", attr_entity), Op.IN, user_ids - ) - codeowner_user = Condition(Column("owner_codeowners_user_id", attr_entity), Op.IN, user_ids) - conditions = conditions + [suspect_commit_user, ownership_rule_user, codeowner_user] + return BooleanCondition(op=BooleanOp.OR, conditions=conditions) - if team_ids: - ownership_rule_team = Condition( - Column("owner_ownership_rule_team_id", attr_entity), Op.IN, team_ids - ) - codowner_team = Condition(Column("owner_codeowners_team_id", attr_entity), Op.IN, team_ids) - conditions = conditions + [ownership_rule_team, codowner_team] + def get_suggested(self, search_filter: SearchFilter) -> Condition: + """ + Returns the suggested lookup for a search filter. + """ + attr_entity = self.entities["attrs"] + users = filter(lambda x: isinstance(x, RpcUser), search_filter.value.raw_value) + user_ids = [user.id for user in users] + teams = filter(lambda x: isinstance(x, Team), search_filter.value.raw_value) + team_ids = [team.id for team in teams] - if None in search_filter.value.raw_value: - # neither assigned to team or user - suspect_commit_user = Condition( - Column("owner_suspect_commit_user_id", attr_entity), Op.IS_NULL, None - ) - ownership_rule_user = Condition( - Column("owner_ownership_rule_user_id", attr_entity), Op.IS_NULL, None - ) - ownership_rule_team = Condition( - Column("owner_ownership_rule_team_id", attr_entity), Op.IS_NULL, None - ) - codeowner_user = Condition( - Column("owner_codeowners_user_id", attr_entity), Op.IS_NULL, None - ) - codowner_team = Condition(Column("owner_codeowners_team_id", attr_entity), Op.IS_NULL, None) - conditions.append( - BooleanCondition( - op=BooleanOp.AND, - conditions=[ - suspect_commit_user, - ownership_rule_user, - ownership_rule_team, - codeowner_user, - codowner_team, - ], + conditions = [] + if user_ids: + suspect_commit_user = Condition( + Column("owner_suspect_commit_user_id", attr_entity), Op.IN, user_ids ) - ) + ownership_rule_user = Condition( + Column("owner_ownership_rule_user_id", attr_entity), Op.IN, user_ids + ) + codeowner_user = Condition( + Column("owner_codeowners_user_id", attr_entity), Op.IN, user_ids + ) + conditions = conditions + [suspect_commit_user, ownership_rule_user, codeowner_user] - if len(conditions) == 1: - return conditions[0] + if team_ids: + ownership_rule_team = Condition( + Column("owner_ownership_rule_team_id", attr_entity), Op.IN, team_ids + ) + codowner_team = Condition( + Column("owner_codeowners_team_id", attr_entity), Op.IN, team_ids + ) + conditions = conditions + [ownership_rule_team, codowner_team] - return BooleanCondition( - op=BooleanOp.OR, - conditions=conditions, - ) + if None in search_filter.value.raw_value: + # neither assigned to team or user + suspect_commit_user = Condition( + Column("owner_suspect_commit_user_id", attr_entity), Op.IS_NULL, None + ) + ownership_rule_user = Condition( + Column("owner_ownership_rule_user_id", attr_entity), Op.IS_NULL, None + ) + ownership_rule_team = Condition( + Column("owner_ownership_rule_team_id", attr_entity), Op.IS_NULL, None + ) + codeowner_user = Condition( + Column("owner_codeowners_user_id", attr_entity), Op.IS_NULL, None + ) + codowner_team = Condition( + Column("owner_codeowners_team_id", attr_entity), Op.IS_NULL, None + ) + conditions.append( + BooleanCondition( + op=BooleanOp.AND, + conditions=[ + suspect_commit_user, + ownership_rule_user, + ownership_rule_team, + codeowner_user, + codowner_team, + ], + ) + ) + if len(conditions) == 1: + return conditions[0] -def get_assigned_or_suggested(search_filter: SearchFilter, attr_entity: Entity) -> Condition: - return BooleanCondition( - op=BooleanOp.OR, - conditions=[ - get_assigned(search_filter, attr_entity), - get_suggested(search_filter, attr_entity), - ], - ) + return BooleanCondition( + op=BooleanOp.OR, + conditions=conditions, + ) + def get_assigned_or_suggested(self, search_filter: SearchFilter) -> Condition: + return BooleanCondition( + op=BooleanOp.OR, + conditions=[ + self.get_assigned(search_filter), + self.get_suggested(search_filter), + ], + ) -class GroupAttributesPostgresSnubaQueryExecutor(PostgresSnubaQueryExecutor): ISSUE_FIELD_NAME = "group_id" entities = { @@ -1241,9 +1261,9 @@ class GroupAttributesPostgresSnubaQueryExecutor(PostgresSnubaQueryExecutor): "attrs": Entity("group_attributes", alias="g"), } - supported_conditions_lookup = { - "status": get_basic_group_snuba_lookup, - "substatus": get_basic_group_snuba_lookup, + group_conditions_lookup = { + "status": get_basic_group_snuba_condition, + "substatus": get_basic_group_snuba_condition, "assigned_or_suggested": get_assigned_or_suggested, "assigned_to": get_assigned, } @@ -1303,16 +1323,6 @@ def calculate_start_end( end = max([retention_date, end]) return start, end, retention_date - def validate_search_filters(self, search_filters: Sequence[SearchFilter] | None) -> bool: - """ - Validates whether a set of search filters can be handled by this search backend. - """ - for search_filter in search_filters or (): - supported_condition = self.supported_conditions_lookup.get(search_filter.key.name) - if not supported_condition: - return False - return True - def query( self, projects: Sequence[Project], @@ -1333,9 +1343,6 @@ def query( aggregate_kwargs: TrendsSortWeights | None = None, use_group_snuba_dataset: bool = False, ) -> CursorResult[Group]: - if not self.validate_search_filters(search_filters): - raise InvalidQueryForExecutor("Search filters invalid for this query executor") - start, end, retention_date = self.calculate_start_end( retention_window_start, search_filters, date_from, date_to ) @@ -1363,9 +1370,12 @@ def query( Condition(Column("timestamp", event_entity), Op.LT, end), ] for search_filter in search_filters or (): - where_conditions.append( - self.supported_conditions_lookup[search_filter.key.name](search_filter, attr_entity) - ) + # use the stored function if it exists in our mapping, otherwise use the basic lookup + fn = self.group_conditions_lookup.get(search_filter.key.name) + if fn: + where_conditions.append(fn(self, search_filter)) + else: + where_conditions.append(self.get_basic_event_snuba_condition(search_filter)) if environments: # TODO: Should this be handled via filter_keys, once we have a snql compatible version? diff --git a/src/sentry/sentry_metrics/querying/data/api.py b/src/sentry/sentry_metrics/querying/data/api.py index 5523440e35032b..4bce0fb3749ae4 100644 --- a/src/sentry/sentry_metrics/querying/data/api.py +++ b/src/sentry/sentry_metrics/querying/data/api.py @@ -39,11 +39,6 @@ def run_metrics_queries_plan( A MetricsQueriesPlanResult object which encapsulates the results of the plan and allows a QueryTransformer to be run on the data. """ - # For now, if the query plan is empty, we return an empty dictionary. In the future, we might want to default - # to a better data type. - if metrics_queries_plan.is_empty(): - return MetricsQueriesPlanResult([]) - # We build the basic query that contains the metadata which will be shared across all queries. base_query = MetricsQuery( start=start, diff --git a/src/sentry/sentry_metrics/querying/data/execution.py b/src/sentry/sentry_metrics/querying/data/execution.py index 46b03f829c8a9e..e2508f06d77e3e 100644 --- a/src/sentry/sentry_metrics/querying/data/execution.py +++ b/src/sentry/sentry_metrics/querying/data/execution.py @@ -812,13 +812,11 @@ def schedule(self, intermediate_query: IntermediateQuery, query_type: QueryType) if query_type == QueryType.TOTALS_AND_SERIES: series_query = replace(totals_query, type=ScheduledQueryType.SERIES) - final_query = replace(totals_query, next=series_query) - # We initialize the query by performing type-aware mutations that prepare the query to be executed correctly # (e.g., adding `totals` to a totals query...). - self._scheduled_queries.append( - final_query.initialize( - self._organization, self._projects, self._blocked_metrics_for_projects - ) + final_query = replace(totals_query, next=series_query).initialize( + self._organization, self._projects, self._blocked_metrics_for_projects ) + + self._scheduled_queries.append(final_query) self._query_results.append(None) diff --git a/src/sentry/sentry_metrics/querying/data/transformation/metrics_api.py b/src/sentry/sentry_metrics/querying/data/transformation/metrics_api.py index 34947c325dcd26..1b1944f572c2ae 100644 --- a/src/sentry/sentry_metrics/querying/data/transformation/metrics_api.py +++ b/src/sentry/sentry_metrics/querying/data/transformation/metrics_api.py @@ -237,9 +237,16 @@ def transform(self, query_results: list[QueryResult]) -> Mapping[str, Any]: Returns: A mapping containing the data transformed in the correct format. """ - # If we have not run any queries, we won't return anything back. + base_result: dict[str, Any] = { + "data": [], + "meta": [], + "start": None, + "end": None, + "intervals": [], + } + if not query_results: - return {} + return base_result # We first build intermediate results that we can work efficiently with. queries_groups, queries_meta = self._build_intermediate_results(query_results) @@ -277,13 +284,10 @@ def transform(self, query_results: list[QueryResult]) -> Mapping[str, Any]: for query_meta in queries_meta: transformed_queries_meta.append([meta.meta for meta in query_meta]) - base_result = { - "data": transformed_queries_groups, - "meta": transformed_queries_meta, - "start": start, - "end": end, - } - + base_result["data"] = transformed_queries_groups + base_result["meta"] = transformed_queries_meta + base_result["start"] = start + base_result["end"] = end if intervals is not None: base_result["intervals"] = intervals diff --git a/src/sentry/snuba/metrics/extraction.py b/src/sentry/snuba/metrics/extraction.py index bed325e6883812..3c14460999f4bf 100644 --- a/src/sentry/snuba/metrics/extraction.py +++ b/src/sentry/snuba/metrics/extraction.py @@ -47,7 +47,6 @@ from sentry.snuba.metrics.naming_layer.mri import ParsedMRI, parse_mri from sentry.snuba.metrics.utils import MetricOperationType from sentry.utils import metrics -from sentry.utils.cache import cache from sentry.utils.hashlib import md5_text from sentry.utils.snuba import is_measurement, is_span_op_breakdown, resolve_column @@ -671,15 +670,18 @@ def should_use_on_demand_metrics( query: str | None, groupbys: Sequence[str] | None = None, prefilling: bool = False, + organization_bulk_query_cache: dict[str, Any] | None = None, ) -> bool: if in_random_rollout("on_demand_metrics.cache_should_use_on_demand"): + if organization_bulk_query_cache is None: + organization_bulk_query_cache = {} dataset_str = dataset.value if isinstance(dataset, Enum) else str(dataset or "") groupbys_str = ",".join(sorted(groupbys)) if groupbys else "" - cache_key = md5_text( + local_cache_key = md5_text( f"{dataset_str}-{aggregate}-{query or ''}-{groupbys_str}-prefilling={prefilling}" ).hexdigest() - cached_result = cache.get(cache_key) + cached_result = organization_bulk_query_cache.get(local_cache_key, None) if cached_result: metrics.incr("on_demand_metrics.should_use_on_demand_metrics.cache_hit") return cached_result @@ -687,7 +689,7 @@ def should_use_on_demand_metrics( logger.info( "should_use_on_demand_metrics.cache_miss", extra={ - "cache_key": cache_key, + "cache_key": local_cache_key, }, ) result = _should_use_on_demand_metrics( @@ -698,7 +700,7 @@ def should_use_on_demand_metrics( prefilling=prefilling, ) metrics.incr("on_demand_metrics.should_use_on_demand_metrics.cache_miss") - cache.set(cache_key, result, timeout=5400) + organization_bulk_query_cache[local_cache_key] = result return result return _should_use_on_demand_metrics( diff --git a/src/sentry/snuba/referrer.py b/src/sentry/snuba/referrer.py index 0f33eb34e9a503..8f4520de150d51 100644 --- a/src/sentry/snuba/referrer.py +++ b/src/sentry/snuba/referrer.py @@ -344,6 +344,9 @@ class Referrer(Enum): "api.performance.transaction-summary.vitals-chart" ) API_PERFORMANCE_TRANSACTION_SUMMARY = "api.performance.transaction-summary" + API_PERFORMANCE_TRANSACTIONS_STATISTICAL_DETECTOR_ROOT_CAUSE_ANALYSIS = ( + "api.performance.transactions.statistical-detector-root-cause-analysis" + ) API_PERFORMANCE_TRANSACTIONS_STATISTICAL_DETECTOR_STATS = ( "api.performance.transactions.statistical-detector-stats" ) diff --git a/src/sentry/tasks/activity.py b/src/sentry/tasks/activity.py index 407e66b9456be1..5e5f74987c7f0f 100644 --- a/src/sentry/tasks/activity.py +++ b/src/sentry/tasks/activity.py @@ -33,6 +33,8 @@ def get_activity_notifiers(project): silo_mode=SiloMode.REGION, ) def send_activity_notifications(activity_id): + from sentry import features + from sentry.integrations.slack.service import SlackService from sentry.models.activity import Activity from sentry.models.organization import Organization @@ -46,3 +48,7 @@ def send_activity_notifications(activity_id): for notifier in get_activity_notifiers(activity.project): notifier.notify_about_activity(activity) + + if features.has("organizations:slack-thread-issue-alert", organization): + slack_service = SlackService.default() + slack_service.notify_all_threads_for_activity(activity=activity) diff --git a/src/sentry/templates/sentry/emails/feedback.html b/src/sentry/templates/sentry/emails/feedback.html new file mode 100644 index 00000000000000..e94079f8c9ba44 --- /dev/null +++ b/src/sentry/templates/sentry/emails/feedback.html @@ -0,0 +1,243 @@ +{% extends "sentry/emails/base.html" %} + +{% load timezone from tz %} +{% load sentry_avatars %} +{% load sentry_helpers %} +{% load sentry_features %} +{% load sentry_assets %} +{% load i18n static %} + +{% block head %} + {{ block.super }} + +{% endblock %} + +{% block preheader %} + {{ group_header }} from {{ project_label }}. +{% endblock %} + +{% block header %} +
+ {{ block.super }} +
+ {% if not has_alert_integration %} + + + Set up in Slack + + {% endif %} + View on Sentry +
+
+{% endblock %} + +{% block content %} + +
+
+

+ {% if has_issue_states and group_header %} + {{ group_header }} + {% else %} + New Feedback from {{ project_label }} + {% if environment %} in {{ environment }}{% endif %} + {% endif %} +

+ {% if notification_reason %} +
+ {{ notification_reason }} +
+ {% endif %} + + {% if enhanced_privacy %} +
+
ID: {{ event.event_id }}
+ {% if timezone %} +
{{ event.datetime|timezone:timezone|date:"N j, Y, g:i:s a e"}}
+ {% else %} +
{{ event.datetime|date:"N j, Y, g:i:s a e"}}
+ {% endif %} +
+ +
Details about this feedback are not shown in this notification since enhanced privacy + controls are enabled. For more details about this feedback, view this feedback on Sentry.
+ {% else %} + + + + + + + +
User Feedback
+
+ {% with type=event.get_event_type metadata=group.get_event_metadata transaction=event.transaction %} +
+

+ {% if issue_title %} + {{ issue_title|truncatechars:40 }} + {% else %} + {{ event.title|truncatechars:40 }} + {% endif %} + {% if culprit %} + {{ culprit }} + {% elif transaction %} + {{ transaction }} + {% endif %} +
+
+ {% if metadata.name and metadata.contact_email %} +

{{ metadata.name }} {{ metadata.contact_email }}

+ {% elif metadata.name %} +

{{ metadata.name }}

+ {% elif metadata.contact_email %} +

{{ metadata.contact_email }}

+ {% endif %} +
+
+
ID: {{ event.event_id }}
+ {% if timezone %} +
{{ event.datetime|timezone:timezone|date:"N j, Y, g:i:s a e"}}
+ {% else %} +
{{ event.datetime|date:"N j, Y, g:i:s a e"}}
+ {% endif %} +
+ {% if subtitle %} +

Message

+

{{ subtitle }}

+ {% endif %} + +
+ {% endwith %} +
+
+ + + {% if has_issue_states %} +
+ + + + + + + + + + {% if environment %} + + + + + {% endif %} + + + + + +
project{{ project_label }}
environment{{ environment }}
level{{ group.get_level_display }}
+
+ {% endif %} + + + {% block table %}{% endblock %} + + {% if issue_replays_url %} +
+

Session Replay

+ + + + + + + + + + +
Session Replay: + + Session Replay + See a replay of this feedback + +
+
+ {% endif %} + + + {% block stacktrace %}{% endblock %} + + {% if tags %} +

Tags

+ +
    + {% for tag_key, tag_value in tags %} +
  • + {{ tag_key|as_tag_alias }} + = + + {% with query=tag_key|as_tag_alias|add:":\""|add:tag_value|add:"\""|urlencode %} + {{ tag_value|truncatechars:50 }} {% if tag_value|is_url %}{% endif %} + {% endwith %} + +
  • + {% endfor %} +
+ {% endif %} + {% endif %} + +

+ {% if snooze_alert %} + Mute this alert + {% endif %} + This email was triggered by + {% for rule in rules %} + {{ rule.label }}{% if not forloop.last %}, {% endif %} + {% endfor %} +

+ + {% if not has_integrations %} +
+ + + + + + +
+

+ {{ "Get this alert wherever you work" }} +

+ {% endif %} + + {# support for gmail actions #} +
+ +
+ + +
+
+ + +
+
+
+
+{% endblock %} diff --git a/src/sentry/templates/sentry/feedback.txt b/src/sentry/templates/sentry/feedback.txt new file mode 100644 index 00000000000000..ed6b3635e28e2c --- /dev/null +++ b/src/sentry/templates/sentry/feedback.txt @@ -0,0 +1,41 @@ +{% spaceless %} +{% autoescape off %} +{% if enhanced_privacy %} +Details about this feedback are not shown in this notification since enhanced +privacy controls are enabled. For more details about this feedback, view this +issue on Sentry. +Details +------- + +{{ link }} +{% else %} +Details +------- + +{{ link }} + + +{% if generic_issue_data %} +User Feedback +---------- +{% for label, html, _ in generic_issue_data %} + {{ label }} {{ html }} +{% endfor %}{% endif %} + +Tags +---- +{% for tag_key, tag_value in tags %} +* {{ tag_key }} = {{ tag_value }}{% endfor %} + +{% if interfaces %}{% for label, _, text in interfaces %} +{{ label }} +----------- + +{{ text }} + +{% endfor %} +{% endif %}{% endif %} + +Unsubscribe: {{ unsubscribe_link }} +{% endautoescape %} +{% endspaceless %} diff --git a/src/sentry/testutils/helpers/notifications.py b/src/sentry/testutils/helpers/notifications.py index 8acdb3634377f2..0223d8c12f477c 100644 --- a/src/sentry/testutils/helpers/notifications.py +++ b/src/sentry/testutils/helpers/notifications.py @@ -6,6 +6,7 @@ from typing import Any from sentry.issues.grouptype import ( + FeedbackGroup, PerformanceNPlusOneAPICallsGroupType, PerformanceNPlusOneGroupType, PerformanceRenderBlockingAssetSpanGroupType, @@ -119,6 +120,33 @@ def get_title_link(self, *args): "info", "/api/123/", ) +TEST_FEEDBACK_ISSUE_OCCURENCE = IssueOccurrence( + id=uuid.uuid4().hex, + project_id=1, + event_id=uuid.uuid4().hex, + fingerprint=["c" * 32], + issue_title="User Feedback", + subtitle="Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed vel aliquam velit, nec condimentum mi. Maecenas accumsan, nunc ac venenatis hendrerit, mi libero facilisis nunc, fringilla molestie dui est vulputate diam. Duis ac justo euismod, sagittis est at, bibendum purus. Praesent nec tortor vel ante accumsan lobortis. Morbi mollis augue nec dolor feugiat congue. Nullam eget blandit nisi. Sed in arcu odio. Aenean malesuada tortor quis felis dapibus congue.d", + culprit="api/123", + resource_id="1234", + evidence_data={ + "contact_email": "test@test.com", + "message": "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed vel aliquam velit, nec condimentum mi. Maecenas accumsan, nunc ac venenatis hendrerit, mi libero facilisis nunc, fringilla molestie dui est vulputate diam. Duis ac justo euismod, sagittis est at, bibendum purus. Praesent nec tortor vel ante accumsan lobortis. Morbi mollis augue nec dolor feugiat congue. Nullam eget blandit nisi. Sed in arcu odio. Aenean malesuada tortor quis felis dapibus congue.", + "name": "Test Name", + }, + evidence_display=[ + IssueEvidence("contact_email", "test@test.com", False), + IssueEvidence( + "message", + "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed vel aliquam velit, nec condimentum mi. Maecenas accumsan, nunc ac venenatis hendrerit, mi libero facilisis nunc, fringilla molestie dui est vulputate diam. Duis ac justo euismod, sagittis est at, bibendum purus. Praesent nec tortor vel ante accumsan lobortis. Morbi mollis augue nec dolor feugiat congue. Nullam eget blandit nisi. Sed in arcu odio. Aenean malesuada tortor quis felis dapibus congue.", + True, + ), + IssueEvidence("name", "Test Name", False), + ], + type=FeedbackGroup, + detection_time=datetime.now(UTC), + level="info", +) TEST_PERF_ISSUE_OCCURRENCE = IssueOccurrence( uuid.uuid4().hex, 1, diff --git a/src/sentry/utils/openai_sdk_integration.py b/src/sentry/utils/openai_sdk_integration.py deleted file mode 100644 index 9e86ff2c794324..00000000000000 --- a/src/sentry/utils/openai_sdk_integration.py +++ /dev/null @@ -1,75 +0,0 @@ -import hashlib - -from sentry_sdk.hub import Hub -from sentry_sdk.integrations import DidNotEnable, Integration - -try: - from openai import resources -except ImportError: - raise DidNotEnable("OpenAI is not installed") - - -class OpenAiIntegration(Integration): - identifier = "openai" - - def __init__(self, capture_prompts=False): - self.capture_prompts = capture_prompts - - @staticmethod - def setup_once(): - patch_openai() - - -def patch_openai(): - old_open_ai_resources_chat_completions_create = resources.chat.completions.Completions.create - - def monkeypatched_openai_completions_create(*args, **kwargs): - hub = Hub.current - - integration = hub.get_integration(OpenAiIntegration) - if integration is None: - return old_open_ai_resources_chat_completions_create(*args, **kwargs) - - # for now, simply make the description (which is how grouping is defined) - # simply the first message in the chat. This is not ideal, but it's a start. - with hub.start_span( - op="ai.llm.completion", - description=_get_description(kwargs.get("messages"), integration.capture_prompts), - ) as span: - if integration.capture_prompts: - span.set_data( - "chat_completion_input", - {"messages": kwargs.get("messages")}, - ) - return_value = old_open_ai_resources_chat_completions_create(*args, **kwargs) - if getattr(return_value, "usage", None): - completion_output = { - "created": return_value.created, - "id": return_value.id, - } - if integration.capture_prompts: - completion_output["choices"] = return_value.model_dump(exclude_unset=True).get( - "choices" - ) - - span.set_data( - "chat_completion_output", - completion_output, - ) - span.set_data("llm_prompt_tkens", return_value.usage.prompt_tokens) - span.set_data("llm_completion_tkens", return_value.usage.completion_tokens) - span.set_data("llm_total_tkens", return_value.usage.total_tokens) - span.set_data("language_model", return_value.model) - return return_value - - resources.chat.completions.Completions.create = monkeypatched_openai_completions_create # type: ignore[method-assign] - - -def _get_description(messages, capture_prompts): - if messages is None or len(messages) == 0: - return "" - if capture_prompts: - # TODO: should we handle truncation here? - return messages[0]["content"] - else: - return hashlib.md5(messages[0]["content"].encode("utf-8")).hexdigest() diff --git a/src/sentry/utils/sdk.py b/src/sentry/utils/sdk.py index 5ef3118bdd6d27..58600f2db25248 100644 --- a/src/sentry/utils/sdk.py +++ b/src/sentry/utils/sdk.py @@ -24,7 +24,6 @@ from sentry.features.rollout import in_random_rollout from sentry.utils import metrics from sentry.utils.db import DjangoAtomicIntegration -from sentry.utils.openai_sdk_integration import OpenAiIntegration from sentry.utils.rust import RustInfoIntegration # Can't import models in utils because utils should be the bottom of the food chain @@ -470,7 +469,6 @@ def flush( RustInfoIntegration(), RedisIntegration(), ThreadingIntegration(propagate_hub=True), - OpenAiIntegration(capture_prompts=True), ], spotlight=settings.IS_DEV and not settings.NO_SPOTLIGHT, **sdk_options, diff --git a/src/sentry/web/debug_urls.py b/src/sentry/web/debug_urls.py index 98a33652817a3e..a7afb094c7c2c8 100644 --- a/src/sentry/web/debug_urls.py +++ b/src/sentry/web/debug_urls.py @@ -17,6 +17,7 @@ ) from sentry.web.frontend.debug.debug_cron_muted_monitor_email import DebugCronMutedMonitorEmailView from sentry.web.frontend.debug.debug_error_embed import DebugErrorPageEmbedView +from sentry.web.frontend.debug.debug_feedback_issue import DebugFeedbackIssueEmailView from sentry.web.frontend.debug.debug_generic_issue import DebugGenericIssueEmailView from sentry.web.frontend.debug.debug_incident_activity_email import DebugIncidentActivityEmailView from sentry.web.frontend.debug.debug_incident_trigger_email import DebugIncidentTriggerEmailView @@ -83,6 +84,7 @@ urlpatterns = [ re_path(r"^debug/mail/error-alert/$", sentry.web.frontend.debug.mail.alert), + re_path(r"^debug/mail/feedback-alert/$", DebugFeedbackIssueEmailView.as_view()), re_path( r"^debug/mail/performance-alert/(?P[^\/]+)?/$", DebugPerformanceIssueEmailView.as_view(), diff --git a/src/sentry/web/frontend/debug/debug_feedback_issue.py b/src/sentry/web/frontend/debug/debug_feedback_issue.py new file mode 100644 index 00000000000000..96ee161e120e24 --- /dev/null +++ b/src/sentry/web/frontend/debug/debug_feedback_issue.py @@ -0,0 +1,44 @@ +from django.conf import settings +from django.utils.safestring import mark_safe +from django.views.generic import View + +from sentry.models.organization import Organization +from sentry.models.project import Project +from sentry.models.rule import Rule +from sentry.notifications.utils import get_generic_data, get_group_settings_link, get_rules +from sentry.utils import json + +from .mail import COMMIT_EXAMPLE, MailPreview, make_feedback_issue + + +class DebugFeedbackIssueEmailView(View): + def get(self, request): + org = Organization(id=1, slug="example", name="Example") + project = Project(id=1, slug="example", name="Example", organization=org) + + event = make_feedback_issue(project) + group = event.group + + rule = Rule(id=1, label="An example rule") + + generic_issue_data_html = get_generic_data(event) + section_header = "Issue Data" if generic_issue_data_html else "" + return MailPreview( + html_template="sentry/emails/feedback.html", + text_template="sentry/emails/feedback.txt", + context={ + "rule": rule, + "rules": get_rules([rule], org, project), + "group": group, + "event": event, + "timezone": settings.SENTRY_DEFAULT_TIME_ZONE, + "link": get_group_settings_link(group, None, get_rules([rule], org, project), 1337), + "generic_issue_data": [(section_header, mark_safe(generic_issue_data_html), None)], + "tags": event.tags, + "project_label": project.slug, + "commits": json.loads(COMMIT_EXAMPLE), + "issue_title": event.occurrence.issue_title, + "subtitle": event.occurrence.subtitle, + "culprit": event.occurrence.culprit, + }, + ).render(request) diff --git a/src/sentry/web/frontend/debug/mail.py b/src/sentry/web/frontend/debug/mail.py index 652ce5884c0f3d..049a253b26ad92 100644 --- a/src/sentry/web/frontend/debug/mail.py +++ b/src/sentry/web/frontend/debug/mail.py @@ -57,6 +57,7 @@ from sentry.testutils.helpers.datetime import before_now # NOQA:S007 from sentry.testutils.helpers.notifications import ( # NOQA:S007 SAMPLE_TO_OCCURRENCE_MAP, + TEST_FEEDBACK_ISSUE_OCCURENCE, TEST_ISSUE_OCCURRENCE, ) from sentry.types.group import GroupSubStatus @@ -105,6 +106,8 @@ } ]""" +REPLAY_ID = "9188182919744ea987d8e4e58f4a6dec" + def get_random(request) -> Random: seed = request.GET.get("seed", str(time.time())) @@ -250,6 +253,28 @@ def make_generic_event(project: Project): return generic_group.get_latest_event() +def make_feedback_issue(project): + event_id = uuid.uuid4().hex + occurrence_data = TEST_FEEDBACK_ISSUE_OCCURENCE.to_dict() + occurrence_data["event_id"] = event_id + occurrence_data["fingerprint"] = [ + md5(part.encode("utf-8")).hexdigest() for part in occurrence_data["fingerprint"] + ] + occurrence, group_info = process_event_and_issue_occurrence( + occurrence_data, + { + "event_id": event_id, + "project_id": project.id, + "timestamp": before_now(minutes=1).isoformat(), + "tags": [("logger", "javascript"), ("environment", "prod"), ("replayId", REPLAY_ID)], + }, + ) + if not group_info: + raise ValueError("No group found") + feedback_issue = group_info.group + return feedback_issue.get_latest_event() + + def get_shared_context(rule, org, project: Project, group, event): rules = get_rules([rule], org, project) snooze_alert = len(rules) > 0 @@ -423,9 +448,6 @@ def get(self, request: AuthenticatedHttpRequest) -> HttpResponse: ) -replay_id = "9188182919744ea987d8e4e58f4a6dec" - - @login_required def alert(request): random = get_random(request) @@ -463,7 +485,7 @@ def alert(request): "culprit": random.choice(["sentry.tasks.culprit.culprit", None]), "subtitle": random.choice(["subtitles are cool", None]), "issue_type": group.issue_type.description, - "replay_id": replay_id, + "replay_id": REPLAY_ID, "issue_replays_url": get_issue_replay_link(group, "?referrer=alert_email"), }, ).render(request) diff --git a/static/app/components/events/aiSuggestedSolution/suggestion.tsx b/static/app/components/events/aiSuggestedSolution/suggestion.tsx index 8826833d4aa4a2..419d52916a86c6 100644 --- a/static/app/components/events/aiSuggestedSolution/suggestion.tsx +++ b/static/app/components/events/aiSuggestedSolution/suggestion.tsx @@ -17,7 +17,7 @@ import type {Event, Project} from 'sentry/types'; import {trackAnalytics} from 'sentry/utils/analytics'; import {getAnalyticsDataForEvent} from 'sentry/utils/events'; import {isActiveSuperuser} from 'sentry/utils/isActiveSuperuser'; -import marked from 'sentry/utils/marked'; +import {limitedMarked} from 'sentry/utils/marked'; import {useApiQuery} from 'sentry/utils/queryClient'; import {useIsSentryEmployee} from 'sentry/utils/useIsSentryEmployee'; import useOrganization from 'sentry/utils/useOrganization'; @@ -182,7 +182,7 @@ export function Suggestion({onHideSuggestion, projectSlug, event}: Props) { ) : ( void}) { - const hasSteps = data.steps && data.steps.length > 0; - - const isDone = data.status !== 'PROCESSING'; - return ( - {t('Autofix')} + + {t('Autofix')} + + + - {hasSteps && !isDone ? : null} - {hasSteps && isDone ? : null} ); } @@ -26,7 +26,6 @@ export function AutofixCard({data, onRetry}: {data: AutofixData; onRetry: () => const Title = styled('div')` font-size: ${p => p.theme.fontSizeExtraLarge}; font-weight: bold; - margin-bottom: ${space(2)}; `; const AutofixPanel = styled(Panel)` @@ -35,3 +34,9 @@ const AutofixPanel = styled(Panel)` background: ${p => p.theme.backgroundSecondary}; padding: ${space(2)} ${space(3)} ${space(3)} ${space(3)}; `; + +const AutofixHeader = styled('div')` + display: grid; + grid-template-columns: 1fr auto; + margin-bottom: ${space(2)}; +`; diff --git a/static/app/components/events/autofix/autofixDiff.spec.tsx b/static/app/components/events/autofix/autofixDiff.spec.tsx index f67d39366c74d6..080525059d5d76 100644 --- a/static/app/components/events/autofix/autofixDiff.spec.tsx +++ b/static/app/components/events/autofix/autofixDiff.spec.tsx @@ -1,13 +1,13 @@ -import {AutofixResultFixture} from 'sentry-fixture/autofixResult'; +import {AutofixDiffFilePatch} from 'sentry-fixture/autofixDiffFilePatch'; -import {render, screen, within} from 'sentry-test/reactTestingLibrary'; +import {render, screen, userEvent, within} from 'sentry-test/reactTestingLibrary'; import {textWithMarkupMatcher} from 'sentry-test/utils'; import {AutofixDiff} from 'sentry/components/events/autofix/autofixDiff'; describe('AutofixDiff', function () { const defaultProps = { - fix: AutofixResultFixture(), + diff: [AutofixDiffFilePatch()], }; it('displays a modified file diff correctly', function () { @@ -18,6 +18,10 @@ describe('AutofixDiff', function () { screen.getByText('src/sentry/processing/backpressure/memory.py') ).toBeInTheDocument(); + // Lines changed + expect(screen.getByText('+1')).toBeInTheDocument(); + expect(screen.getByText('-1')).toBeInTheDocument(); + // Hunk section header expect( screen.getByText( @@ -50,4 +54,18 @@ describe('AutofixDiff', function () { // 6 context lines expect(screen.getAllByTestId('line-context')).toHaveLength(6); }); + + it('can collapse a file diff', async function () { + render(); + + expect(screen.getAllByTestId('line-context')).toHaveLength(6); + + // Clicking toggle hides file context + await userEvent.click(screen.getByRole('button', {name: 'Toggle file diff'})); + expect(screen.queryByTestId('line-context')).not.toBeInTheDocument(); + + // Clicking again shows file context + await userEvent.click(screen.getByRole('button', {name: 'Toggle file diff'})); + expect(screen.getAllByTestId('line-context')).toHaveLength(6); + }); }); diff --git a/static/app/components/events/autofix/autofixDiff.tsx b/static/app/components/events/autofix/autofixDiff.tsx index 634eafaca37202..2f1dd00ac68326 100644 --- a/static/app/components/events/autofix/autofixDiff.tsx +++ b/static/app/components/events/autofix/autofixDiff.tsx @@ -1,16 +1,20 @@ -import {Fragment, useMemo} from 'react'; +import {Fragment, useMemo, useState} from 'react'; import styled from '@emotion/styled'; import {type Change, diffWords} from 'diff'; +import {Button} from 'sentry/components/button'; import { - type AutofixResult, type DiffLine, DiffLineType, + type FilePatch, } from 'sentry/components/events/autofix/types'; +import InteractionStateLayer from 'sentry/components/interactionStateLayer'; +import {IconChevron} from 'sentry/icons'; +import {t} from 'sentry/locale'; import {space} from 'sentry/styles/space'; type AutofixDiffProps = { - fix: AutofixResult; + diff: FilePatch[]; }; interface DiffLineWithChanges extends DiffLine { @@ -112,28 +116,48 @@ function DiffHunkContent({lines, header}: {header: string; lines: DiffLine[]}) { ); } -export function AutofixDiff({fix}: AutofixDiffProps) { - if (!fix.diff) { +function FileDiff({file}: {file: FilePatch}) { + const [isExpanded, setIsExpanded] = useState(true); + + return ( + + setIsExpanded(value => !value)}> + + + +{file.added} + -{file.removed} + + {file.path} + - } - href={autofixData.fix.pr_url} - external - > - {t('View Pull Request')} - - + } + href={autofixData.fix.pr_url} + external + > + {t('View Pull Request')} + ); @@ -108,7 +102,7 @@ export function AutofixResult({autofixData, onRetry}: Props) { const ResultPanel = styled(Panel)` padding: ${space(2)}; - margin: 0; + margin: ${space(2)} 0 0 0; `; const PreviewContent = styled('div')` diff --git a/static/app/components/events/autofix/index.spec.tsx b/static/app/components/events/autofix/index.spec.tsx index c73f2410a48dc0..ab8631445ce34f 100644 --- a/static/app/components/events/autofix/index.spec.tsx +++ b/static/app/components/events/autofix/index.spec.tsx @@ -12,7 +12,7 @@ import type {EventMetadataWithAutofix} from 'sentry/components/events/autofix/ty const group = GroupFixture(); const event = EventFixture(); -describe('AiAutofix', () => { +describe('Autofix', () => { beforeEach(() => { MockApiClient.addMockResponse({ url: `/issues/${group.id}/ai-autofix/`, @@ -63,17 +63,62 @@ describe('AiAutofix', () => { /> ); - // Should show latest log preview in header - expect(await screen.findByText('Second log message')).toBeInTheDocument(); - // Others should not be visible - expect(screen.queryByText('First log message')).not.toBeInTheDocument(); + // Logs should be visible + expect(screen.getByText('First log message')).toBeInTheDocument(); + expect(screen.getByText('Second log message')).toBeInTheDocument(); - // Opening step shows all logs + // Toggling step hides old logs await userEvent.click(screen.getByRole('button', {name: 'Toggle step details'})); - expect(screen.getByText('First log message')).toBeInTheDocument(); + expect(screen.queryByText('First log message')).not.toBeInTheDocument(); + // Should show latest log preview in header expect(screen.getByText('Second log message')).toBeInTheDocument(); }); + it('can reset and try again while running', async () => { + const autofixData = AutofixDataFixture({ + steps: [ + AutofixStepFixture({ + id: '1', + progress: [ + AutofixProgressItemFixture({message: 'First log message'}), + AutofixProgressItemFixture({message: 'Second log message'}), + ], + }), + ], + }); + + MockApiClient.addMockResponse({ + url: `/issues/${group.id}/ai-autofix/`, + body: autofixData, + }); + + const triggerAutofixMock = MockApiClient.addMockResponse({ + url: `/issues/${group.id}/ai-autofix/`, + method: 'POST', + }); + + render( + + ); + + await userEvent.click(screen.getByRole('button', {name: 'Start Over'})); + + expect(screen.getByText('Try Autofix')).toBeInTheDocument(); + + // Clicking the fix button should show the initial state "Starting Autofix..." and call the api + await userEvent.click(screen.getByRole('button', {name: 'Gimme Fix'})); + expect(await screen.findByText('Starting Autofix...')).toBeInTheDocument(); + expect(triggerAutofixMock).toHaveBeenCalledTimes(1); + }); + it('renders the FixResult component when autofixData is present', () => { render( { 'https://github.com/pulls/1234' ); }); - - it('can toggle logs for completed fix', async () => { - render( - - ); - - await userEvent.click(screen.getByRole('button', {name: 'Toggle log details'})); - - expect(screen.getByText('I am processing')).toBeInTheDocument(); - expect(screen.getByText('oh yes I am')).toBeInTheDocument(); - }); }); diff --git a/static/app/components/events/eventTagsAndScreenshot/screenshot/screenshotPagination.tsx b/static/app/components/events/eventTagsAndScreenshot/screenshot/screenshotPagination.tsx index 9a178a9c9ea92a..d5c1d2f43f40b5 100644 --- a/static/app/components/events/eventTagsAndScreenshot/screenshot/screenshotPagination.tsx +++ b/static/app/components/events/eventTagsAndScreenshot/screenshot/screenshotPagination.tsx @@ -12,10 +12,12 @@ type Props = { onNext: ReactEventHandler; onPrevious: ReactEventHandler; previousDisabled: boolean; + className?: string; headerText?: React.ReactNode; }; function ScreenshotPagination({ + className, previousDisabled, nextDisabled, headerText, @@ -23,7 +25,7 @@ function ScreenshotPagination({ onNext, }: Props) { return ( - +