diff --git a/.eslintrc.js b/.eslintrc.js index 65f96764527b2..18f6abda6d7e8 100644 --- a/.eslintrc.js +++ b/.eslintrc.js @@ -89,6 +89,14 @@ module.exports = { message: "Use 'useLocation', 'useParams', 'useNavigate', 'useRoutes' from sentry/utils instead.", }, + { + name: 'qs', + message: 'Please use query-string instead of qs', + }, + { + name: 'moment', + message: 'Please import moment-timezone instead of moment', + }, ], }, ], diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 4683234338a78..586fdd67e6c8d 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -241,7 +241,7 @@ yarn.lock @getsentry/owners-js-de /tests/snuba/search/test_backend.py @getsentry/visibility -/src/sentry/search/events/ @getsentry/visibility @getsentry/issues +/src/sentry/search/events/ @getsentry/visibility /src/sentry/utils/performance_issues/ @getsentry/performance @@ -323,10 +323,10 @@ tests/sentry/api/endpoints/test_organization_dashboard_widget_details.py @ge ## End of Profiling -## Configurations -/src/sentry/remote_config/ @getsentry/replay-backend -/tests/sentry/remote_config/ @getsentry/replay-backend -## End of Configurations +## Flags +/src/sentry/flags/ @getsentry/replay-backend +/tests/sentry/flags/ @getsentry/replay-backend +## End of Flags ## Replays @@ -519,6 +519,8 @@ tests/sentry/api/endpoints/test_organization_dashboard_widget_details.py @ge /src/sentry/grouping/ @getsentry/issues /src/sentry/mediators/ @getsentry/issues /src/sentry/ratelimits/ @getsentry/issues +/src/sentry/search/events/builder/issue_platform.py @getsentry/issues +/src/sentry/search/events/builder/errors.py @getsentry/issues /src/sentry/search/snuba/ @getsentry/issues /src/sentry/seer/similarity/ @getsentry/issues /src/sentry/tasks/auto_ongoing_issues.py @getsentry/issues diff --git a/.github/actions/test-setup-sentry-devservices/action.yml b/.github/actions/test-setup-sentry-devservices/action.yml new file mode 100644 index 0000000000000..0995881e85bba --- /dev/null +++ b/.github/actions/test-setup-sentry-devservices/action.yml @@ -0,0 +1,119 @@ +# NOTE: Do not rely on `make` commands here as this action is used across different repos +# where the Makefile will not be available +name: 'Sentry Setup' +description: 'Sets up a Sentry test environment' +inputs: + workdir: + description: 'Directory where the sentry source is located' + required: false + default: '.' + +outputs: + yarn-cache-dir: + description: 'Path to yarn cache' + value: ${{ steps.config.outputs.yarn-cache-dir }} + matrix-instance-number: + description: 'The matrix instance number (starting at 1)' + value: ${{ steps.config.outputs.matrix-instance-number }} + matrix-instance-total: + description: 'Reexport of MATRIX_INSTANCE_TOTAL.' + value: ${{ steps.config.outputs.matrix-instance-total }} + +runs: + using: 'composite' + steps: + - name: Setup default environment variables + # the default for "bash" is: + # bash --noprofile --norc -eo pipefail {0} + shell: bash --noprofile --norc -eo pipefail -ux {0} + env: + MATRIX_INSTANCE: ${{ matrix.instance }} + # XXX: We should be using something like len(strategy.matrix.instance) (not possible atm) + # If you have other things like python-version: [foo, bar, baz] then the sharding logic + # isn't right because job-total will be 3x larger and you'd never run 2/3 of the tests. + # MATRIX_INSTANCE_TOTAL: ${{ strategy.job-total }} + run: | + echo "PIP_DISABLE_PIP_VERSION_CHECK=on" >> $GITHUB_ENV + echo "PIP_INDEX_URL=https://pypi.devinfra.sentry.io/simple" >> $GITHUB_ENV + echo "SENTRY_SKIP_BACKEND_VALIDATION=1" >> $GITHUB_ENV + + ### node configuration ### + echo "NODE_ENV=development" >> $GITHUB_ENV + + ### pytest configuration ### + echo "PY_COLORS=1" >> "$GITHUB_ENV" + echo "PYTEST_ADDOPTS=--reruns=5 --durations=10 --fail-slow=60s" >> $GITHUB_ENV + echo "COVERAGE_CORE=sysmon" >> "$GITHUB_ENV" + + ### pytest-sentry configuration ### + if [ "$GITHUB_REPOSITORY" = "getsentry/sentry" ]; then + echo "PYTEST_SENTRY_DSN=https://6fd5cfea2d4d46b182ad214ac7810508@sentry.io/2423079" >> $GITHUB_ENV + echo "PYTEST_SENTRY_TRACES_SAMPLE_RATE=0" >> $GITHUB_ENV + + # This records failures on master to sentry in order to detect flakey tests, as it's + # expected that people have failing tests on their PRs + if [ "$GITHUB_REF" = "refs/heads/master" ]; then + echo "PYTEST_SENTRY_ALWAYS_REPORT=1" >> $GITHUB_ENV + fi + fi + + # Configure a different release version, otherwise it defaults to the + # commit sha which will conflict with our actual prod releases. This is a + # confusing experience because it looks like these are "empty" releases + # because no commits are attached and associates the release with our + # javascript + sentry projects. + echo "SENTRY_RELEASE=ci@$GITHUB_SHA" >> $GITHUB_ENV + + # this handles pytest test sharding + if [ "$MATRIX_INSTANCE" ]; then + if ! [ "${MATRIX_INSTANCE_TOTAL:-}" ]; then + echo "MATRIX_INSTANCE_TOTAL is required." + exit 1 + fi + echo "TEST_GROUP=$MATRIX_INSTANCE" >> $GITHUB_ENV + echo "TOTAL_TEST_GROUPS=$MATRIX_INSTANCE_TOTAL" >> $GITHUB_ENV + fi + + - uses: getsentry/action-setup-venv@a133e6fd5fa6abd3f590a1c106abda344f5df69f # v2.1.0 + with: + python-version: ${{ inputs.python-version }} + cache-dependency-path: ${{ inputs.workdir }}/requirements-dev-frozen.txt + install-cmd: cd ${{ inputs.workdir }} && python3 -m tools.hack_pip && pip install -r requirements-dev-frozen.txt + + - name: Set up outputs + id: config + env: + MATRIX_INSTANCE: ${{ matrix.instance }} + shell: bash --noprofile --norc -eo pipefail -ux {0} + run: | + echo "yarn-cache-dir=$(yarn cache dir)" >> "$GITHUB_OUTPUT" + echo "matrix-instance-number=$(($MATRIX_INSTANCE+1))" >> "$GITHUB_OUTPUT" + echo "matrix-instance-total=$((${MATRIX_INSTANCE_TOTAL:-}))" >> "$GITHUB_OUTPUT" + + - name: Install python dependencies + shell: bash --noprofile --norc -eo pipefail -ux {0} + env: + # This is necessary when other repositories (e.g. relay) want to take advantage of this workflow + # without needing to fork it. The path needed is the one where setup.py is located + WORKDIR: ${{ inputs.workdir }} + run: | + cd "$WORKDIR" + # We need to install editable otherwise things like check migration will fail. + python3 -m tools.fast_editable --path . + + - name: Start devservices + shell: bash --noprofile --norc -eo pipefail -ux {0} + env: + WORKDIR: ${{ inputs.workdir }} + ENABLE_AUTORUN_MIGRATION_SEARCH_ISSUES: '1' + run: | + sentry init + + # have tests listen on the docker gateway ip so loopback can occur + echo "DJANGO_LIVE_TEST_SERVER_ADDRESS=$(docker network inspect bridge --format='{{(index .IPAM.Config 0).Gateway}}')" >> "$GITHUB_ENV" + + docker ps -a + + # This is necessary when other repositories (e.g. relay) want to take advantage of this workflow + # without needing to fork it. The path needed is the one where tools are located + cd "$WORKDIR" diff --git a/.github/workflows/test_docker_compose_acceptance.yml b/.github/workflows/test_docker_compose_acceptance.yml new file mode 100644 index 0000000000000..df15c17b9e273 --- /dev/null +++ b/.github/workflows/test_docker_compose_acceptance.yml @@ -0,0 +1,144 @@ +# Also note that this name *MUST* match the filename because GHA +# only provides the workflow name (https://docs.github.com/en/free-pro-team@latest/actions/reference/environment-variables#default-environment-variables) +# and GH APIs only support querying by workflow *FILENAME* (https://developer.github.com/v3/actions/workflows/#get-a-workflow) +name: test-docker-compose-acceptance +on: + schedule: + - cron: '30,0 * * * *' + +# Cancel in progress workflows on pull_requests. +# https://docs.github.com/en/actions/using-jobs/using-concurrency#example-using-a-fallback-value +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +# hack for https://github.com/actions/cache/issues/810#issuecomment-1222550359 +env: + SEGMENT_DOWNLOAD_TIMEOUT_MINS: 3 + NODE_OPTIONS: '--max-old-space-size=4096' + +jobs: + docker-compose-acceptance: + name: docker-compose-acceptance + runs-on: ubuntu-22.04 + timeout-minutes: 30 + permissions: + contents: read + id-token: write + strategy: + # This helps not having to run multiple jobs because one fails, thus, reducing resource usage + # and reducing the risk that one of many runs would turn red again (read: intermittent tests) + fail-fast: false + matrix: + # XXX: When updating this, make sure you also update MATRIX_INSTANCE_TOTAL. + instance: [0, 1, 2, 3, 4] + pg-version: ['14'] + env: + # XXX: MATRIX_INSTANCE_TOTAL must be hardcoded to the length of strategy.matrix.instance. + MATRIX_INSTANCE_TOTAL: 5 + TEST_GROUP_STRATEGY: roundrobin + + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + name: Checkout sentry + + - uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4 + id: setup-node + with: + node-version-file: '.volta.json' + + - name: Step configurations + id: config + run: | + echo "webpack-path=.webpack_cache" >> "$GITHUB_OUTPUT" + echo "WEBPACK_CACHE_PATH=.webpack_cache" >> "$GITHUB_ENV" + + - name: webpack cache + uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 + with: + path: ${{ steps.config.outputs.webpack-path }} + key: ${{ runner.os }}-v2-webpack-cache-${{ hashFiles('webpack.config.ts') }} + + - name: node_modules cache + uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 + id: nodemodulescache + with: + path: node_modules + key: ${{ runner.os }}-node-modules-${{ hashFiles('yarn.lock', 'api-docs/yarn.lock', '.volta.json') }} + + - name: Install Javascript Dependencies + if: steps.nodemodulescache.outputs.cache-hit != 'true' + run: yarn install --frozen-lockfile + + - name: webpack + env: + # this is fine to not have for forks, it shouldn't fail + SENTRY_WEBPACK_WEBHOOK_SECRET: ${{ secrets.SENTRY_WEBPACK_WEBHOOK_SECRET }} + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} + # should set value either as `true` or `false` + CODECOV_ENABLE_BA: true + GH_COMMIT_SHA: ${{ github.event.pull_request.head.sha }} + run: | + yarn build-acceptance + + - name: Build chartcuterie configuration module + run: | + make build-chartcuterie-config + + - name: Setup sentry env + uses: ./.github/actions/test-setup-sentry-devservices + id: setup + + - name: copy chartcuterie config to devservices chartcuterie directory + run: | + ls config/chartcuterie + cp -r config/chartcuterie devservices + + - name: Bring up devservices + run: | + docker network create sentry + docker compose -f devservices/docker-compose-testing.yml up -d redis postgres snuba clickhouse chartcuterie + + - name: Run acceptance tests (#${{ steps.setup.outputs.matrix-instance-number }} of ${{ steps.setup.outputs.matrix-instance-total }}) + run: make run-acceptance + + - name: Collect test data + uses: ./.github/actions/collect-test-data + if: ${{ !cancelled() }} + with: + artifact_path: .artifacts/pytest.acceptance.json + gcs_bucket: ${{ secrets.COLLECT_TEST_DATA_GCS_BUCKET }} + gcp_project_id: ${{ secrets.COLLECT_TEST_DATA_GCP_PROJECT_ID }} + workload_identity_provider: ${{ secrets.SENTRY_GCP_DEV_WORKLOAD_IDENTITY_POOL }} + service_account_email: ${{ secrets.COLLECT_TEST_DATA_SERVICE_ACCOUNT_EMAIL }} + matrix_instance_number: ${{ steps.setup.outputs.matrix-instance-number }} + + # This job runs when FE or BE changes happen, however, we only upload coverage data for + # BE changes since it conflicts with codecov's carry forward functionality + # Upload coverage data even if running the tests step fails since + # it reduces large coverage fluctuations + - name: Handle artifacts + uses: ./.github/actions/artifacts + if: ${{ always() }} + with: + token: ${{ secrets.CODECOV_TOKEN }} + commit_sha: ${{ github.event.pull_request.head.sha }} + + - name: Inspect failure + if: failure() + run: | + docker compose -f devservices/docker-compose-testing.yml ps + docker compose -f devservices/docker-compose-testing.yml logs --tail 1000 + + docker-compose-acceptance-required-checks: + # this is a required check so we need this job to always run and report a status. + if: always() + name: Docker Compose Acceptance + needs: [docker-compose-acceptance] + runs-on: ubuntu-22.04 + timeout-minutes: 3 + steps: + - name: Check for failures + if: contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') + run: | + echo "One of the dependent jobs have failed. You may need to re-run it." && exit 1 diff --git a/.github/workflows/test_docker_compose_backend.yml b/.github/workflows/test_docker_compose_backend.yml new file mode 100644 index 0000000000000..25491b3566ab7 --- /dev/null +++ b/.github/workflows/test_docker_compose_backend.yml @@ -0,0 +1,295 @@ +name: test-docker-compose-backend + +on: + schedule: + - cron: '30,0 * * * *' + +# Cancel in progress workflows on pull_requests. +# https://docs.github.com/en/actions/using-jobs/using-concurrency#example-using-a-fallback-value +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +# hack for https://github.com/actions/cache/issues/810#issuecomment-1222550359 +env: + SEGMENT_DOWNLOAD_TIMEOUT_MINS: 3 + +jobs: + docker-compose-api-docs: + name: api docs test + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4 + id: setup-node + with: + node-version-file: '.volta.json' + + - name: Setup sentry python env + uses: ./.github/actions/test-setup-sentry-devservices + id: setup + + - name: Bring up devservices + run: | + docker network create sentry + docker compose -f devservices/docker-compose-testing.yml up -d redis postgres snuba clickhouse + + - name: Run API docs tests + # install ts-node for ts build scripts to execute properly without potentially installing + # conflicting deps when running scripts locally + # see: https://github.com/getsentry/sentry/pull/32328/files + run: | + yarn add ts-node && make test-api-docs + + - name: Inspect failure + if: failure() + run: | + docker compose -f devservices/docker-compose-testing.yml ps + docker compose -f devservices/docker-compose-testing.yml logs --tail 1000 + + docker-compose-backend-test: + name: backend test + runs-on: ubuntu-22.04 + timeout-minutes: 60 + permissions: + contents: read + id-token: write + strategy: + # This helps not having to run multiple jobs because one fails, thus, reducing resource usage + # and reducing the risk that one of many runs would turn red again (read: intermittent tests) + fail-fast: false + matrix: + # XXX: When updating this, make sure you also update MATRIX_INSTANCE_TOTAL. + instance: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + pg-version: ['14'] + + env: + # XXX: `MATRIX_INSTANCE_TOTAL` must be hardcoded to the length of `strategy.matrix.instance`. + # If this increases, make sure to also increase `flags.backend.after_n_builds` in `codecov.yml`. + MATRIX_INSTANCE_TOTAL: 11 + + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - name: Setup sentry env + uses: ./.github/actions/test-setup-sentry-devservices + + - name: Bring up devservices + run: | + docker network create sentry + echo "BIGTABLE_EMULATOR_HOST=127.0.0.1:8086" >> $GITHUB_ENV + docker compose -f devservices/docker-compose-testing.yml up -d + + - name: Run backend test (${{ steps.setup.outputs.matrix-instance-number }} of ${{ steps.setup.outputs.matrix-instance-total }}) + run: | + make test-python-ci + + - name: Collect test data + uses: ./.github/actions/collect-test-data + if: ${{ !cancelled() }} + with: + artifact_path: .artifacts/pytest.json + gcs_bucket: ${{ secrets.COLLECT_TEST_DATA_GCS_BUCKET }} + gcp_project_id: ${{ secrets.COLLECT_TEST_DATA_GCP_PROJECT_ID }} + workload_identity_provider: ${{ secrets.SENTRY_GCP_DEV_WORKLOAD_IDENTITY_POOL }} + service_account_email: ${{ secrets.COLLECT_TEST_DATA_SERVICE_ACCOUNT_EMAIL }} + matrix_instance_number: ${{ steps.setup.outputs.matrix-instance-number }} + + # Upload coverage data even if running the tests step fails since + # it reduces large coverage fluctuations + - name: Handle artifacts + if: ${{ always() }} + uses: ./.github/actions/artifacts + with: + token: ${{ secrets.CODECOV_TOKEN }} + commit_sha: ${{ github.event.pull_request.head.sha }} + + - name: Inspect failure + if: failure() + run: | + docker compose -f devservices/docker-compose-testing.yml ps + docker compose -f devservices/docker-compose-testing.yml logs --tail 1000 + + docker-compose-backend-migration-tests: + name: backend migration tests + runs-on: ubuntu-22.04 + timeout-minutes: 30 + strategy: + matrix: + pg-version: ['14'] + + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - name: Setup sentry env + uses: ./.github/actions/test-setup-sentry-devservices + id: setup + + - name: Bring up devservices + run: | + docker network create sentry + docker compose -f devservices/docker-compose-testing.yml up -d redis postgres snuba clickhouse + + - name: run tests + run: | + PYTEST_ADDOPTS="$PYTEST_ADDOPTS -m migrations --migrations --reruns 0" make test-python-ci + + # Upload coverage data even if running the tests step fails since + # it reduces large coverage fluctuations + - name: Handle artifacts + if: ${{ always() }} + uses: ./.github/actions/artifacts + with: + token: ${{ secrets.CODECOV_TOKEN }} + commit_sha: ${{ github.event.pull_request.head.sha }} + + - name: Inspect failure + if: failure() + run: | + docker compose -f devservices/docker-compose-testing.yml ps + docker compose -f devservices/docker-compose-testing.yml logs --tail 1000 + + docker-compose-cli: + name: cli test + runs-on: ubuntu-22.04 + timeout-minutes: 10 + strategy: + matrix: + pg-version: ['14'] + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - name: Setup sentry env + uses: ./.github/actions/test-setup-sentry-devservices + id: setup + + - name: Bring up devservices + run: | + docker network create sentry + docker compose -f devservices/docker-compose-testing.yml up -d redis postgres + + - name: Run test + run: | + make test-cli + + # Upload coverage data even if running the tests step fails since + # it reduces large coverage fluctuations + - name: Handle artifacts + if: ${{ always() }} + uses: ./.github/actions/artifacts + with: + token: ${{ secrets.CODECOV_TOKEN }} + commit_sha: ${{ github.event.pull_request.head.sha }} + + - name: Inspect failure + if: failure() + run: | + docker compose -f devservices/docker-compose-testing.yml ps + docker compose -f devservices/docker-compose-testing.yml logs --tail 1000 + + docker-compose-migration: + name: check migration + runs-on: ubuntu-22.04 + strategy: + matrix: + pg-version: ['14'] + + steps: + - name: Checkout sentry + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - name: Setup sentry env + uses: ./.github/actions/test-setup-sentry-devservices + id: setup + + - name: Bring up devservices + run: | + docker network create sentry + docker compose -f devservices/docker-compose-testing.yml up -d redis postgres + + - name: Migration & lockfile checks + env: + SENTRY_LOG_LEVEL: ERROR + PGPASSWORD: postgres + run: | + ./.github/workflows/scripts/migration-check.sh + + - name: Inspect failure + if: failure() + run: | + docker compose -f devservices/docker-compose-testing.yml ps + docker compose -f devservices/docker-compose-testing.yml logs --tail 1000 + + docker-compose-monolith-dbs: + name: monolith-dbs test + runs-on: ubuntu-22.04 + timeout-minutes: 20 + permissions: + contents: read + id-token: write + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - name: Setup sentry env + uses: ./.github/actions/test-setup-sentry-devservices + id: setup + + - name: Bring up devservices + run: | + docker network create sentry + docker compose -f devservices/docker-compose-testing.yml up -d redis postgres + + - name: Run test + run: | + make test-monolith-dbs + + - name: Collect test data + uses: ./.github/actions/collect-test-data + if: ${{ !cancelled() }} + with: + artifact_path: .artifacts/pytest.monolith-dbs.json + gcs_bucket: ${{ secrets.COLLECT_TEST_DATA_GCS_BUCKET }} + gcp_project_id: ${{ secrets.COLLECT_TEST_DATA_GCP_PROJECT_ID }} + workload_identity_provider: ${{ secrets.SENTRY_GCP_DEV_WORKLOAD_IDENTITY_POOL }} + service_account_email: ${{ secrets.COLLECT_TEST_DATA_SERVICE_ACCOUNT_EMAIL }} + + # Upload coverage data even if running the tests step fails since + # it reduces large coverage fluctuations + - name: Handle artifacts + if: ${{ always() }} + uses: ./.github/actions/artifacts + with: + token: ${{ secrets.CODECOV_TOKEN }} + commit_sha: ${{ github.event.pull_request.head.sha }} + + - name: Inspect failure + if: failure() + run: | + docker compose -f devservices/docker-compose-testing.yml ps + docker compose -f devservices/docker-compose-testing.yml logs --tail 1000 + + # This check runs once all dependent jobs have passed + # It symbolizes that all required Backend checks have succesfully passed (Or skipped) + # This step is the only required backend check + docker-compose-backend-required-check: + needs: + [ + docker-compose-api-docs, + docker-compose-backend-test, + docker-compose-backend-migration-tests, + docker-compose-cli, + docker-compose-migration, + docker-compose-monolith-dbs, + ] + name: Docker Compose Backend + # This is necessary since a failed/skipped dependent job would cause this job to be skipped + if: always() + runs-on: ubuntu-22.04 + steps: + # If any jobs we depend on fail, we will fail since this is a required check + # NOTE: A timeout is considered a failure + - name: Check for failures + if: contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') + run: | + echo "One of the dependent jobs have failed. You may need to re-run it." && exit 1 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6f3f7701ec3ae..6ab5dca06423e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -65,7 +65,7 @@ repos: additional_dependencies: [packaging==21.3] - id: requirements-overrides name: use pinned archives (see comment in file) - stages: [commit] + stages: [pre-commit] language: pygrep entry: | (?x) @@ -157,14 +157,14 @@ repos: - id: python-check-blanket-type-ignore - repo: https://github.com/python-jsonschema/check-jsonschema - rev: 0.24.1 + rev: 0.29.3 hooks: - id: check-github-actions - id: check-github-workflows args: [--verbose] - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.3.0 + rev: v5.0.0 hooks: - id: check-case-conflict - id: check-executables-have-shebangs @@ -182,7 +182,7 @@ repos: args: [--pytest-test-first] - repo: https://github.com/shellcheck-py/shellcheck-py - rev: v0.9.0.6 + rev: v0.10.0.1 hooks: - id: shellcheck types: [file] diff --git a/CHANGES b/CHANGES index 9dd28fdf199e2..1fae1885ccc85 100644 --- a/CHANGES +++ b/CHANGES @@ -1,3 +1,35 @@ +24.10.0 +------- + +### Various fixes & improvements + +- feat(releases): Split release commit from shared component (#78538) by @scttcper +- chore(alerts): Add info log when sending subscription update data to Seer (#79049) by @ceorourke +- feat(anomaly detection): add preview chart to new alert form (#78238) by @natemoo-re +- feat(issue-stream): Use stacked primary secondary counts designs (#79070) by @MichaelSun48 +- feat(alerts): Add new feature flag to enable EAP alerts (#78985) by @edwardgou-sentry +- feat(discover): Update EAP dataset and entity key for discover builders (#78967) by @edwardgou-sentry +- fix: add info to post process TypeErrors for debugging (#79099) by @mjq +- ref(ingest): annotate transaction consumer with spans (#79101) by @mjq +- ref(rr6): Replace many useRouter's with useNavigate's (#78804) by @evanpurkhiser +- chore(feedback): Analytics for error rendering feedback item (#78978) by @c298lee +- feat(explore): Linking to spans in traceview from all tables (#78984) by @Abdkhan14 +- fix(eap): Count takes arg (#79066) by @Zylphrex +- feat(quick-start): Add analytics code to the backend to track quick start completion (#79089) by @priscilawebdev +- feat(quick-start): Add new feature flag for the new updates (#79094) by @priscilawebdev +- fix(dashboard): OnDemand widget creation also includes transaction type (#79059) by @narsaynorath +- fix(dashboard): OnDemand extraction for Transaction widgets (#79055) by @narsaynorath +- feat(quick-start): Add analytics code to the frontend to track quick start completion (#79092) by @priscilawebdev +- ref(quick-start): Update 'project to set up' logic to default to the first project (#78460) by @priscilawebdev +- ref(onboarding): Add pnpm to express js install step (#79093) by @priscilawebdev +- feat(dynamic-sampling): add feature flag (#79084) by @constantinius +- fix(loader): Catch errors in `sentryOnLoad` separately (#78993) by @mydea +- ref(feedback): 401 for unauth'd POSTs to projectUserReports (#79069) by @aliu39 +- fix(issue-stream): reduce font size of events and user counts (#79028) by @MichaelSun48 +- fix(issue-stream): Fix bug where replay divider was shown despite no replays (#79068) by @MichaelSun48 + +_Plus 1020 more_ + 24.9.0 ------ diff --git a/api-docs/openapi.json b/api-docs/openapi.json index 0fe59a3fda185..da16c3a2bf31d 100644 --- a/api-docs/openapi.json +++ b/api-docs/openapi.json @@ -132,31 +132,19 @@ "/api/0/projects/{organization_id_or_slug}/{project_id_or_slug}/events/{event_id}/": { "$ref": "paths/events/project-event-details.json" }, - "/api/0/projects/{organization_id_or_slug}/{project_id_or_slug}/events/": { - "$ref": "paths/events/project-events.json" - }, "/api/0/projects/{organization_id_or_slug}/{project_id_or_slug}/issues/": { "$ref": "paths/events/project-issues.json" }, - "/api/0/organizations/{organization_id_or_slug}/issues/{issue_id}/tags/{key}/values/": { + "/api/0/issues/{issue_id}/tags/{key}/values/": { "$ref": "paths/events/tag-values.json" }, - "/api/0/organizations/{organization_id_or_slug}/issues/{issue_id}/tags/{key}/": { + "/api/0/issues/{issue_id}/tags/{key}/": { "$ref": "paths/events/tag-details.json" }, - "/api/0/organizations/{organization_id_or_slug}/issues/{issue_id}/hashes/": { + "/api/0/issues/{issue_id}/hashes/": { "$ref": "paths/events/issue-hashes.json" }, - "/api/0/organizations/{organization_id_or_slug}/issues/{issue_id}/events/oldest/": { - "$ref": "paths/events/oldest-event.json" - }, - "/api/0/organizations/{organization_id_or_slug}/issues/{issue_id}/events/latest/": { - "$ref": "paths/events/latest-event.json" - }, - "/api/0/organizations/{organization_id_or_slug}/issues/{issue_id}/events/": { - "$ref": "paths/events/issue-events.json" - }, - "/api/0/organizations/{organization_id_or_slug}/issues/{issue_id}/": { + "/api/0/issues/{issue_id}/": { "$ref": "paths/events/issue-details.json" }, "/api/0/organizations/{organization_id_or_slug}/releases/": { diff --git a/api-docs/paths/events/issue-details.json b/api-docs/paths/events/issue-details.json index 77d2be8803084..ac00d75f3c5fc 100644 --- a/api-docs/paths/events/issue-details.json +++ b/api-docs/paths/events/issue-details.json @@ -212,7 +212,7 @@ "properties": { "status": { "type": "string", - "description": "The new status for the issues. Valid values are `\"resolved\"`, `\"reprocessing\"`, `\"unresolved\"`, and `\"ignored\"`." + "description": "The new status for the issues. Valid values are `\"resolved\"`, `\"resolvedInNextRelease\"`, `\"unresolved\"`, and `\"ignored\"`." }, "statusDetails": { "type": "object", diff --git a/api-docs/paths/events/issue-events.json b/api-docs/paths/events/issue-events.json deleted file mode 100644 index 3e498fe26c7e9..0000000000000 --- a/api-docs/paths/events/issue-events.json +++ /dev/null @@ -1,105 +0,0 @@ -{ - "get": { - "tags": ["Events"], - "description": "This endpoint lists an issue's events.", - "operationId": "List an Issue's Events", - "parameters": [ - { - "name": "organization_id_or_slug", - "in": "path", - "description": "The ID or slug of the organization the issues belongs to.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "issue_id", - "in": "path", - "description": "The ID of the issue to retrieve.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "full", - "in": "query", - "description": "If this is set to true then the event payload will include the full event body, including the stacktrace. \nSet to true to enable.", - "schema": { - "type": "boolean" - } - } - ], - "responses": { - "200": { - "description": "Success", - "content": { - "application/json": { - "schema": { - "type": "array", - "items": { - "$ref": "../../components/schemas/event.json#/Event" - } - }, - "example": [ - { - "eventID": "9fac2ceed9344f2bbfdd1fdacb0ed9b1", - "tags": [ - { - "key": "browser", - "value": "Chrome 60.0" - }, - { - "key": "device", - "value": "Other" - }, - { - "key": "environment", - "value": "production" - }, - { - "value": "fatal", - "key": "level" - }, - { - "key": "os", - "value": "Mac OS X 10.12.6" - }, - { - "value": "CPython 2.7.16", - "key": "runtime" - }, - { - "key": "release", - "value": "17642328ead24b51867165985996d04b29310337" - }, - { - "key": "server_name", - "value": "web1.example.com" - } - ], - "dateCreated": "2020-09-11T17:46:36Z", - "user": null, - "message": "", - "title": "This is an example Python exception", - "id": "dfb1a2d057194e76a4186cc8a5271553", - "platform": "python", - "event.type": "error", - "groupID": "1889724436" - } - ] - } - } - }, - "403": { - "description": "Forbidden" - } - }, - "security": [ - { - "auth_token": ["event:read"] - } - ] - } -} diff --git a/api-docs/paths/events/issue-hashes.json b/api-docs/paths/events/issue-hashes.json index 6b3ba88548b6d..77acd0241f8cb 100644 --- a/api-docs/paths/events/issue-hashes.json +++ b/api-docs/paths/events/issue-hashes.json @@ -4,15 +4,6 @@ "description": "This endpoint lists an issue's hashes, which are the generated checksums used to aggregate individual events.", "operationId": "List an Issue's Hashes", "parameters": [ - { - "name": "organization_id_or_slug", - "in": "path", - "description": "The ID or slug of the organization the issue belong to.", - "required": true, - "schema": { - "type": "string" - } - }, { "name": "issue_id", "in": "path", diff --git a/api-docs/paths/events/latest-event.json b/api-docs/paths/events/latest-event.json deleted file mode 100644 index 42f6adba6b139..0000000000000 --- a/api-docs/paths/events/latest-event.json +++ /dev/null @@ -1,547 +0,0 @@ -{ - "get": { - "tags": ["Events"], - "description": "Retrieves the details of the latest event for an issue.", - "operationId": "Retrieve the Latest Event for an Issue", - "parameters": [ - { - "name": "organization_id_or_slug", - "in": "path", - "description": "The ID or slug of the organization the issue belong to.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "issue_id", - "in": "path", - "description": "The ID of the issue.", - "required": true, - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "description": "Success", - "content": { - "application/json": { - "schema": { - "$ref": "../../components/schemas/event.json#/EventDetailed" - }, - "example": { - "eventID": "9999aaaaca8b46d797c23c6077c6ff01", - "dist": null, - "userReport": null, - "previousEventID": null, - "message": "", - "title": "This is an example Python exception", - "id": "9999aaafcc8b46d797c23c6077c6ff01", - "size": 107762, - "errors": [ - { - "data": { - "column": 8, - "source": "https://s1.sentry-cdn.com/_static/bloopbloop/sentry/dist/app.js.map", - "row": 15 - }, - "message": "Invalid location in sourcemap", - "type": "js_invalid_sourcemap_location" - } - ], - "platform": "javascript", - "nextEventID": "99f9e199e9a74a14bfef6196ad741619", - "type": "error", - "metadata": { - "type": "ForbiddenError", - "value": "GET /organizations/hellboy-meowmeow/users/ 403" - }, - "tags": [ - { - "value": "Chrome 83.0.4103", - "key": "browser", - "_meta": null - }, - { - "value": "Chrome", - "key": "browser.name", - "_meta": null - }, - { - "value": "prod", - "key": "environment", - "_meta": null - }, - { - "value": "yes", - "key": "handled", - "_meta": null - }, - { - "value": "error", - "key": "level", - "_meta": null - }, - { - "value": "generic", - "key": "mechanism", - "_meta": null - } - ], - "dateCreated": "2020-06-17T22:26:56.098086Z", - "dateReceived": "2020-06-17T22:26:56.428721Z", - "user": { - "username": null, - "name": "Hell Boy", - "ip_address": "192.168.1.1", - "email": "hell@boy.cat", - "data": { - "isStaff": false - }, - "id": "550747" - }, - "entries": [ - { - "type": "exception", - "data": { - "values": [ - { - "stacktrace": { - "frames": [ - { - "function": "ignoreOnError", - "errors": null, - "colNo": 23, - "vars": null, - "package": null, - "absPath": "webpack:////usr/src/getsentry/src/sentry/node_modules/@sentry/browser/esm/helpers.js", - "inApp": false, - "lineNo": 71, - "module": "usr/src/getsentry/src/sentry/node_modules/@sentry/browser/esm/helpers", - "filename": "/usr/src/getsentry/src/sentry/node_modules/@sentry/browser/esm/helpers.js", - "platform": null, - "instructionAddr": null, - "context": [ - [66, " }"], - [ - 67, - " // Attempt to invoke user-land function" - ], - [ - 68, - " // NOTE: If you are a Sentry user, and you are seeing this stack frame, it" - ], - [ - 69, - " // means the sentry.javascript SDK caught an error invoking your application code. This" - ], - [ - 70, - " // is expected behavior and NOT indicative of a bug with sentry.javascript." - ], - [ - 71, - " return fn.apply(this, wrappedArguments);" - ], - [ - 72, - " // tslint:enable:no-unsafe-any" - ], - [73, " }"], - [74, " catch (ex) {"], - [75, " ignoreNextOnError();"], - [76, " withScope(function (scope) {"] - ], - "symbolAddr": null, - "trust": null, - "symbol": null - }, - { - "function": "apply", - "errors": null, - "colNo": 24, - "vars": null, - "package": null, - "absPath": "webpack:////usr/src/getsentry/src/sentry/node_modules/reflux-core/lib/PublisherMethods.js", - "inApp": false, - "lineNo": 74, - "module": "usr/src/getsentry/src/sentry/node_modules/reflux-core/lib/PublisherMethods", - "filename": "/usr/src/getsentry/src/sentry/node_modules/reflux-core/lib/PublisherMethods.js", - "platform": null, - "instructionAddr": null, - "context": [ - [69, " */"], - [ - 70, - " triggerAsync: function triggerAsync() {" - ], - [71, " var args = arguments,"], - [72, " me = this;"], - [73, " _.nextTick(function () {"], - [74, " me.trigger.apply(me, args);"], - [75, " });"], - [76, " },"], - [77, ""], - [78, " /**"], - [ - 79, - " * Wraps the trigger mechanism with a deferral function." - ] - ], - "symbolAddr": null, - "trust": null, - "symbol": null - } - ], - "framesOmitted": null, - "registers": null, - "hasSystemFrames": true - }, - "module": null, - "rawStacktrace": { - "frames": [ - { - "function": "a", - "errors": null, - "colNo": 88800, - "vars": null, - "package": null, - "absPath": "https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js", - "inApp": false, - "lineNo": 81, - "module": null, - "filename": "/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js", - "platform": null, - "instructionAddr": null, - "context": [ - [76, "/*!"], - [77, " Copyright (c) 2018 Jed Watson."], - [ - 78, - " Licensed under the MIT License (MIT), see" - ], - [ - 79, - " http://jedwatson.github.io/react-select" - ], - [80, "*/"], - [ - 81, - "{snip} e,t)}));return e.handleEvent?e.handleEvent.apply(this,s):e.apply(this,s)}catch(e){throw c(),Object(o.m)((function(n){n.addEventProcessor((fu {snip}" - ], - [82, "/*!"], - [83, " * JavaScript Cookie v2.2.1"], - [ - 84, - " * https://github.com/js-cookie/js-cookie" - ], - [85, " *"], - [ - 86, - " * Copyright 2006, 2015 Klaus Hartl & Fagner Brack" - ] - ], - "symbolAddr": null, - "trust": null, - "symbol": null - }, - { - "function": null, - "errors": null, - "colNo": 149484, - "vars": null, - "package": null, - "absPath": "https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js", - "inApp": false, - "lineNo": 119, - "module": null, - "filename": "/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js", - "platform": null, - "instructionAddr": null, - "context": [ - [114, "/* @license"], - [115, "Papa Parse"], - [116, "v5.2.0"], - [117, "https://github.com/mholt/PapaParse"], - [118, "License: MIT"], - [ - 119, - "{snip} (){var e=arguments,t=this;r.nextTick((function(){t.trigger.apply(t,e)}))},deferWith:function(e){var t=this.trigger,n=this,r=function(){t.app {snip}" - ], - [120, "/**!"], - [ - 121, - " * @fileOverview Kickass library to create and place poppers near their reference elements." - ], - [122, " * @version 1.16.1"], - [123, " * @license"], - [ - 124, - " * Copyright (c) 2016 Federico Zivolo and contributors" - ] - ], - "symbolAddr": null, - "trust": null, - "symbol": null - } - ], - "framesOmitted": null, - "registers": null, - "hasSystemFrames": true - }, - "mechanism": { - "type": "generic", - "handled": true - }, - "threadId": null, - "value": "GET /organizations/hellboy-meowmeow/users/ 403", - "type": "ForbiddenError" - } - ], - "excOmitted": null, - "hasSystemFrames": true - } - }, - { - "type": "breadcrumbs", - "data": { - "values": [ - { - "category": "tracing", - "level": "debug", - "event_id": null, - "timestamp": "2020-06-17T22:26:55.266586Z", - "data": null, - "message": "[Tracing] pushActivity: idleTransactionStarted#1", - "type": "debug" - }, - { - "category": "xhr", - "level": "info", - "event_id": null, - "timestamp": "2020-06-17T22:26:55.619446Z", - "data": { - "url": "/api/0/internal/health/", - "status_code": 200, - "method": "GET" - }, - "message": null, - "type": "http" - }, - { - "category": "sentry.transaction", - "level": "info", - "event_id": null, - "timestamp": "2020-06-17T22:26:55.945016Z", - "data": null, - "message": "7787a027f3fb46c985aaa2287b3f4d09", - "type": "default" - } - ] - } - }, - { - "type": "request", - "data": { - "fragment": null, - "cookies": [], - "inferredContentType": null, - "env": null, - "headers": [ - [ - "User-Agent", - "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36" - ] - ], - "url": "https://sentry.io/organizations/hellboy-meowmeow/issues/", - "query": [["project", "5236886"]], - "data": null, - "method": null - } - } - ], - "packages": {}, - "sdk": { - "version": "5.17.0", - "name": "sentry.javascript.browser" - }, - "_meta": { - "user": null, - "context": null, - "entries": {}, - "contexts": null, - "message": null, - "packages": null, - "tags": {}, - "sdk": null - }, - "contexts": { - "ForbiddenError": { - "status": 403, - "statusText": "Forbidden", - "responseJSON": { - "detail": "You do not have permission to perform this action." - }, - "type": "default" - }, - "browser": { - "version": "83.0.4103", - "type": "browser", - "name": "Chrome" - }, - "os": { - "version": "10", - "type": "os", - "name": "Windows" - }, - "trace": { - "span_id": "83db1ad17e67dfe7", - "type": "trace", - "trace_id": "da6caabcd90e45fdb81f6655824a5f88", - "op": "navigation" - }, - "organization": { - "type": "default", - "id": "323938", - "slug": "hellboy-meowmeow" - } - }, - "fingerprints": ["fbe908cc63d63ea9763fd84cb6bad177"], - "context": { - "resp": { - "status": 403, - "responseJSON": { - "detail": "You do not have permission to perform this action." - }, - "name": "ForbiddenError", - "statusText": "Forbidden", - "message": "GET /organizations/hellboy-meowmeow/users/ 403", - "stack": "Error\n at https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/app.js:1:480441\n at u (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:165:51006)\n at Generator._invoke (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:165:50794)\n at Generator.A.forEach.e. [as next] (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:165:51429)\n at n (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:16:68684)\n at s (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:16:68895)\n at https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:16:68954\n at new Promise ()\n at https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:16:68835\n at v (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/app.js:1:480924)\n at m (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/app.js:1:480152)\n at t.fetchMemberList (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/app.js:1:902983)\n at t.componentDidMount (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/app.js:1:900527)\n at t.componentDidMount (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:189:15597)\n at Pc (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:181:101023)\n at t.unstable_runWithPriority (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:189:3462)\n at Ko (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:181:45529)\n at Rc (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:181:97371)\n at Oc (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:181:87690)\n at https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:181:45820\n at t.unstable_runWithPriority (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:189:3462)\n at Ko (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:181:45529)\n at Zo (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:181:45765)\n at Jo (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:181:45700)\n at gc (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:181:84256)\n at Object.enqueueSetState (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:181:50481)\n at t.M.setState (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:173:1439)\n at t.onUpdate (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/app.js:1:543076)\n at a.n (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:119:149090)\n at a.emit (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:189:6550)\n at p.trigger (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:119:149379)\n at p.onInitializeUrlState (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/app.js:1:541711)\n at a.n (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:119:149090)\n at a.emit (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:189:6550)\n at Function.trigger (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:119:149379)\n at https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:119:149484\n at a (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:81:88800)" - } - }, - "release": { - "dateReleased": "2020-06-17T19:21:02.186004Z", - "newGroups": 4, - "commitCount": 11, - "url": "https://freight.getsentry.net/deploys/getsentry/production/8868/", - "data": {}, - "lastDeploy": { - "name": "b65bc521378269d3eaefdc964f8ef56621414943 to prod", - "url": null, - "environment": "prod", - "dateStarted": null, - "dateFinished": "2020-06-17T19:20:55.641748Z", - "id": "6883490" - }, - "deployCount": 1, - "dateCreated": "2020-06-17T18:45:31.042157Z", - "lastEvent": "2020-07-08T21:21:21Z", - "version": "b65bc521378269d3eaefdc964f8ef56621414943", - "firstEvent": "2020-06-17T22:25:14Z", - "lastCommit": { - "repository": { - "status": "active", - "integrationId": "2933", - "externalSlug": "getsentry/getsentry", - "name": "getsentry/getsentry", - "provider": { - "id": "integrations:github", - "name": "GitHub" - }, - "url": "https://github.com/getsentry/getsentry", - "id": "2", - "dateCreated": "2016-10-10T21:36:45.373994Z" - }, - "releases": [ - { - "dateReleased": "2020-06-23T13:26:18.427090Z", - "url": "https://freight.getsentry.net/deploys/getsentry/staging/2077/", - "dateCreated": "2020-06-23T13:22:50.420265Z", - "version": "f3783e5fe710758724f14267439fd46cc2bf5918", - "shortVersion": "f3783e5fe710758724f14267439fd46cc2bf5918", - "ref": "perf/source-maps-test" - }, - { - "dateReleased": "2020-06-17T19:21:02.186004Z", - "url": "https://freight.getsentry.net/deploys/getsentry/production/8868/", - "dateCreated": "2020-06-17T18:45:31.042157Z", - "version": "b65bc521378269d3eaefdc964f8ef56621414943", - "shortVersion": "b65bc521378269d3eaefdc964f8ef56621414943", - "ref": "master" - } - ], - "dateCreated": "2020-06-17T18:43:37Z", - "message": "feat(billing): Get a lot of money", - "id": "b65bc521378269d3eaefdc964f8ef56621414943" - }, - "shortVersion": "b65bc521378269d3eaefdc964f8ef56621414943", - "authors": [ - { - "username": "a37a1b4520ce46cea147ae2885a4e7e7", - "lastLogin": "2020-09-14T22:34:55.550640Z", - "isSuperuser": false, - "isManaged": false, - "experiments": {}, - "lastActive": "2020-09-15T22:13:20.503880Z", - "isStaff": false, - "id": "655784", - "isActive": true, - "has2fa": false, - "name": "hell.boy@sentry.io", - "avatarUrl": "https://secure.gravatar.com/avatar/eaa22e25b3a984659420831a77e4874e?s=32&d=mm", - "dateJoined": "2020-04-20T16:21:25.365772Z", - "emails": [ - { - "is_verified": false, - "id": "784574", - "email": "hellboy@gmail.com" - }, - { - "is_verified": true, - "id": "749185", - "email": "hell.boy@sentry.io" - } - ], - "avatar": { - "avatarUuid": null, - "avatarType": "letter_avatar" - }, - "hasPasswordAuth": false, - "email": "hell.boy@sentry.io" - } - ], - "owner": null, - "ref": "master", - "projects": [ - { - "name": "Sentry CSP", - "slug": "sentry-csp" - }, - { - "name": "Backend", - "slug": "sentry" - }, - { - "name": "Frontend", - "slug": "javascript" - } - ] - }, - "groupID": "1341191803" - } - } - } - }, - "403": { - "description": "Forbidden" - } - }, - "security": [ - { - "auth_token": ["event:read"] - } - ] - } -} diff --git a/api-docs/paths/events/oldest-event.json b/api-docs/paths/events/oldest-event.json deleted file mode 100644 index bcc625688907e..0000000000000 --- a/api-docs/paths/events/oldest-event.json +++ /dev/null @@ -1,547 +0,0 @@ -{ - "get": { - "tags": ["Events"], - "description": "Retrieves the details of the oldest event for an issue.", - "operationId": "Retrieve the Oldest Event for an Issue", - "parameters": [ - { - "name": "organization_id_or_slug", - "in": "path", - "description": "The ID or slug of the organization the issue belong to.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "issue_id", - "in": "path", - "description": "The ID of the issue.", - "required": true, - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "description": "Success", - "content": { - "application/json": { - "schema": { - "$ref": "../../components/schemas/event.json#/EventDetailed" - }, - "example": { - "eventID": "9999aaaaca8b46d797c23c6077c6ff01", - "dist": null, - "userReport": null, - "previousEventID": null, - "message": "", - "title": "This is an example Python exception", - "id": "9999aaafcc8b46d797c23c6077c6ff01", - "size": 107762, - "errors": [ - { - "data": { - "column": 8, - "source": "https://s1.sentry-cdn.com/_static/bloopbloop/sentry/dist/app.js.map", - "row": 15 - }, - "message": "Invalid location in sourcemap", - "type": "js_invalid_sourcemap_location" - } - ], - "platform": "javascript", - "nextEventID": "99f9e199e9a74a14bfef6196ad741619", - "type": "error", - "metadata": { - "type": "ForbiddenError", - "value": "GET /organizations/hellboy-meowmeow/users/ 403" - }, - "tags": [ - { - "value": "Chrome 83.0.4103", - "key": "browser", - "_meta": null - }, - { - "value": "Chrome", - "key": "browser.name", - "_meta": null - }, - { - "value": "prod", - "key": "environment", - "_meta": null - }, - { - "value": "yes", - "key": "handled", - "_meta": null - }, - { - "value": "error", - "key": "level", - "_meta": null - }, - { - "value": "generic", - "key": "mechanism", - "_meta": null - } - ], - "dateCreated": "2020-06-17T22:26:56.098086Z", - "dateReceived": "2020-06-17T22:26:56.428721Z", - "user": { - "username": null, - "name": "Hell Boy", - "ip_address": "192.168.1.1", - "email": "hell@boy.cat", - "data": { - "isStaff": false - }, - "id": "550747" - }, - "entries": [ - { - "type": "exception", - "data": { - "values": [ - { - "stacktrace": { - "frames": [ - { - "function": "ignoreOnError", - "errors": null, - "colNo": 23, - "vars": null, - "package": null, - "absPath": "webpack:////usr/src/getsentry/src/sentry/node_modules/@sentry/browser/esm/helpers.js", - "inApp": false, - "lineNo": 71, - "module": "usr/src/getsentry/src/sentry/node_modules/@sentry/browser/esm/helpers", - "filename": "/usr/src/getsentry/src/sentry/node_modules/@sentry/browser/esm/helpers.js", - "platform": null, - "instructionAddr": null, - "context": [ - [66, " }"], - [ - 67, - " // Attempt to invoke user-land function" - ], - [ - 68, - " // NOTE: If you are a Sentry user, and you are seeing this stack frame, it" - ], - [ - 69, - " // means the sentry.javascript SDK caught an error invoking your application code. This" - ], - [ - 70, - " // is expected behavior and NOT indicative of a bug with sentry.javascript." - ], - [ - 71, - " return fn.apply(this, wrappedArguments);" - ], - [ - 72, - " // tslint:enable:no-unsafe-any" - ], - [73, " }"], - [74, " catch (ex) {"], - [75, " ignoreNextOnError();"], - [76, " withScope(function (scope) {"] - ], - "symbolAddr": null, - "trust": null, - "symbol": null - }, - { - "function": "apply", - "errors": null, - "colNo": 24, - "vars": null, - "package": null, - "absPath": "webpack:////usr/src/getsentry/src/sentry/node_modules/reflux-core/lib/PublisherMethods.js", - "inApp": false, - "lineNo": 74, - "module": "usr/src/getsentry/src/sentry/node_modules/reflux-core/lib/PublisherMethods", - "filename": "/usr/src/getsentry/src/sentry/node_modules/reflux-core/lib/PublisherMethods.js", - "platform": null, - "instructionAddr": null, - "context": [ - [69, " */"], - [ - 70, - " triggerAsync: function triggerAsync() {" - ], - [71, " var args = arguments,"], - [72, " me = this;"], - [73, " _.nextTick(function () {"], - [74, " me.trigger.apply(me, args);"], - [75, " });"], - [76, " },"], - [77, ""], - [78, " /**"], - [ - 79, - " * Wraps the trigger mechanism with a deferral function." - ] - ], - "symbolAddr": null, - "trust": null, - "symbol": null - } - ], - "framesOmitted": null, - "registers": null, - "hasSystemFrames": true - }, - "module": null, - "rawStacktrace": { - "frames": [ - { - "function": "a", - "errors": null, - "colNo": 88800, - "vars": null, - "package": null, - "absPath": "https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js", - "inApp": false, - "lineNo": 81, - "module": null, - "filename": "/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js", - "platform": null, - "instructionAddr": null, - "context": [ - [76, "/*!"], - [77, " Copyright (c) 2018 Jed Watson."], - [ - 78, - " Licensed under the MIT License (MIT), see" - ], - [ - 79, - " http://jedwatson.github.io/react-select" - ], - [80, "*/"], - [ - 81, - "{snip} e,t)}));return e.handleEvent?e.handleEvent.apply(this,s):e.apply(this,s)}catch(e){throw c(),Object(o.m)((function(n){n.addEventProcessor((fu {snip}" - ], - [82, "/*!"], - [83, " * JavaScript Cookie v2.2.1"], - [ - 84, - " * https://github.com/js-cookie/js-cookie" - ], - [85, " *"], - [ - 86, - " * Copyright 2006, 2015 Klaus Hartl & Fagner Brack" - ] - ], - "symbolAddr": null, - "trust": null, - "symbol": null - }, - { - "function": null, - "errors": null, - "colNo": 149484, - "vars": null, - "package": null, - "absPath": "https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js", - "inApp": false, - "lineNo": 119, - "module": null, - "filename": "/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js", - "platform": null, - "instructionAddr": null, - "context": [ - [114, "/* @license"], - [115, "Papa Parse"], - [116, "v5.2.0"], - [117, "https://github.com/mholt/PapaParse"], - [118, "License: MIT"], - [ - 119, - "{snip} (){var e=arguments,t=this;r.nextTick((function(){t.trigger.apply(t,e)}))},deferWith:function(e){var t=this.trigger,n=this,r=function(){t.app {snip}" - ], - [120, "/**!"], - [ - 121, - " * @fileOverview Kickass library to create and place poppers near their reference elements." - ], - [122, " * @version 1.16.1"], - [123, " * @license"], - [ - 124, - " * Copyright (c) 2016 Federico Zivolo and contributors" - ] - ], - "symbolAddr": null, - "trust": null, - "symbol": null - } - ], - "framesOmitted": null, - "registers": null, - "hasSystemFrames": true - }, - "mechanism": { - "type": "generic", - "handled": true - }, - "threadId": null, - "value": "GET /organizations/hellboy-meowmeow/users/ 403", - "type": "ForbiddenError" - } - ], - "excOmitted": null, - "hasSystemFrames": true - } - }, - { - "type": "breadcrumbs", - "data": { - "values": [ - { - "category": "tracing", - "level": "debug", - "event_id": null, - "timestamp": "2020-06-17T22:26:55.266586Z", - "data": null, - "message": "[Tracing] pushActivity: idleTransactionStarted#1", - "type": "debug" - }, - { - "category": "xhr", - "level": "info", - "event_id": null, - "timestamp": "2020-06-17T22:26:55.619446Z", - "data": { - "url": "/api/0/internal/health/", - "status_code": 200, - "method": "GET" - }, - "message": null, - "type": "http" - }, - { - "category": "sentry.transaction", - "level": "info", - "event_id": null, - "timestamp": "2020-06-17T22:26:55.945016Z", - "data": null, - "message": "7787a027f3fb46c985aaa2287b3f4d09", - "type": "default" - } - ] - } - }, - { - "type": "request", - "data": { - "fragment": null, - "cookies": [], - "inferredContentType": null, - "env": null, - "headers": [ - [ - "User-Agent", - "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36" - ] - ], - "url": "https://sentry.io/organizations/hellboy-meowmeow/issues/", - "query": [["project", "5236886"]], - "data": null, - "method": null - } - } - ], - "packages": {}, - "sdk": { - "version": "5.17.0", - "name": "sentry.javascript.browser" - }, - "_meta": { - "user": null, - "context": null, - "entries": {}, - "contexts": null, - "message": null, - "packages": null, - "tags": {}, - "sdk": null - }, - "contexts": { - "ForbiddenError": { - "status": 403, - "statusText": "Forbidden", - "responseJSON": { - "detail": "You do not have permission to perform this action." - }, - "type": "default" - }, - "browser": { - "version": "83.0.4103", - "type": "browser", - "name": "Chrome" - }, - "os": { - "version": "10", - "type": "os", - "name": "Windows" - }, - "trace": { - "span_id": "83db1ad17e67dfe7", - "type": "trace", - "trace_id": "da6caabcd90e45fdb81f6655824a5f88", - "op": "navigation" - }, - "organization": { - "type": "default", - "id": "323938", - "slug": "hellboy-meowmeow" - } - }, - "fingerprints": ["fbe908cc63d63ea9763fd84cb6bad177"], - "context": { - "resp": { - "status": 403, - "responseJSON": { - "detail": "You do not have permission to perform this action." - }, - "name": "ForbiddenError", - "statusText": "Forbidden", - "message": "GET /organizations/hellboy-meowmeow/users/ 403", - "stack": "Error\n at https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/app.js:1:480441\n at u (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:165:51006)\n at Generator._invoke (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:165:50794)\n at Generator.A.forEach.e. [as next] (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:165:51429)\n at n (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:16:68684)\n at s (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:16:68895)\n at https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:16:68954\n at new Promise ()\n at https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:16:68835\n at v (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/app.js:1:480924)\n at m (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/app.js:1:480152)\n at t.fetchMemberList (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/app.js:1:902983)\n at t.componentDidMount (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/app.js:1:900527)\n at t.componentDidMount (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:189:15597)\n at Pc (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:181:101023)\n at t.unstable_runWithPriority (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:189:3462)\n at Ko (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:181:45529)\n at Rc (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:181:97371)\n at Oc (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:181:87690)\n at https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:181:45820\n at t.unstable_runWithPriority (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:189:3462)\n at Ko (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:181:45529)\n at Zo (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:181:45765)\n at Jo (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:181:45700)\n at gc (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:181:84256)\n at Object.enqueueSetState (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:181:50481)\n at t.M.setState (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:173:1439)\n at t.onUpdate (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/app.js:1:543076)\n at a.n (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:119:149090)\n at a.emit (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:189:6550)\n at p.trigger (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:119:149379)\n at p.onInitializeUrlState (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/app.js:1:541711)\n at a.n (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:119:149090)\n at a.emit (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:189:6550)\n at Function.trigger (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:119:149379)\n at https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:119:149484\n at a (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:81:88800)" - } - }, - "release": { - "dateReleased": "2020-06-17T19:21:02.186004Z", - "newGroups": 4, - "commitCount": 11, - "url": "https://freight.getsentry.net/deploys/getsentry/production/8868/", - "data": {}, - "lastDeploy": { - "name": "b65bc521378269d3eaefdc964f8ef56621414943 to prod", - "url": null, - "environment": "prod", - "dateStarted": null, - "dateFinished": "2020-06-17T19:20:55.641748Z", - "id": "6883490" - }, - "deployCount": 1, - "dateCreated": "2020-06-17T18:45:31.042157Z", - "lastEvent": "2020-07-08T21:21:21Z", - "version": "b65bc521378269d3eaefdc964f8ef56621414943", - "firstEvent": "2020-06-17T22:25:14Z", - "lastCommit": { - "repository": { - "status": "active", - "integrationId": "2933", - "externalSlug": "getsentry/getsentry", - "name": "getsentry/getsentry", - "provider": { - "id": "integrations:github", - "name": "GitHub" - }, - "url": "https://github.com/getsentry/getsentry", - "id": "2", - "dateCreated": "2016-10-10T21:36:45.373994Z" - }, - "releases": [ - { - "dateReleased": "2020-06-23T13:26:18.427090Z", - "url": "https://freight.getsentry.net/deploys/getsentry/staging/2077/", - "dateCreated": "2020-06-23T13:22:50.420265Z", - "version": "f3783e5fe710758724f14267439fd46cc2bf5918", - "shortVersion": "f3783e5fe710758724f14267439fd46cc2bf5918", - "ref": "perf/source-maps-test" - }, - { - "dateReleased": "2020-06-17T19:21:02.186004Z", - "url": "https://freight.getsentry.net/deploys/getsentry/production/8868/", - "dateCreated": "2020-06-17T18:45:31.042157Z", - "version": "b65bc521378269d3eaefdc964f8ef56621414943", - "shortVersion": "b65bc521378269d3eaefdc964f8ef56621414943", - "ref": "master" - } - ], - "dateCreated": "2020-06-17T18:43:37Z", - "message": "feat(billing): Get a lot of money", - "id": "b65bc521378269d3eaefdc964f8ef56621414943" - }, - "shortVersion": "b65bc521378269d3eaefdc964f8ef56621414943", - "authors": [ - { - "username": "a37a1b4520ce46cea147ae2885a4e7e7", - "lastLogin": "2020-09-14T22:34:55.550640Z", - "isSuperuser": false, - "isManaged": false, - "experiments": {}, - "lastActive": "2020-09-15T22:13:20.503880Z", - "isStaff": false, - "id": "655784", - "isActive": true, - "has2fa": false, - "name": "hell.boy@sentry.io", - "avatarUrl": "https://secure.gravatar.com/avatar/eaa22e25b3a984659420831a77e4874e?s=32&d=mm", - "dateJoined": "2020-04-20T16:21:25.365772Z", - "emails": [ - { - "is_verified": false, - "id": "784574", - "email": "hellboy@gmail.com" - }, - { - "is_verified": true, - "id": "749185", - "email": "hell.boy@sentry.io" - } - ], - "avatar": { - "avatarUuid": null, - "avatarType": "letter_avatar" - }, - "hasPasswordAuth": false, - "email": "hell.boy@sentry.io" - } - ], - "owner": null, - "ref": "master", - "projects": [ - { - "name": "Sentry CSP", - "slug": "sentry-csp" - }, - { - "name": "Backend", - "slug": "sentry" - }, - { - "name": "Frontend", - "slug": "javascript" - } - ] - }, - "groupID": "1341191803" - } - } - } - }, - "403": { - "description": "Forbidden" - } - }, - "security": [ - { - "auth_token": ["event:read"] - } - ] - } -} diff --git a/api-docs/paths/events/project-events.json b/api-docs/paths/events/project-events.json deleted file mode 100644 index af3376c16af70..0000000000000 --- a/api-docs/paths/events/project-events.json +++ /dev/null @@ -1,108 +0,0 @@ -{ - "get": { - "tags": ["Events"], - "description": "Return a list of error events bound to a project.", - "operationId": "List a Project's Error Events", - "parameters": [ - { - "name": "organization_id_or_slug", - "in": "path", - "description": "The ID or slug of the organization the events belong to.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "project_id_or_slug", - "in": "path", - "description": "The ID or slug of the project the events belong to.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "full", - "in": "query", - "description": "If this is set to true then the event payload will include the full event body, including the stacktrace. \nSet to true to enable.", - "schema": { - "type": "boolean" - } - }, - { - "$ref": "../../components/parameters/pagination-cursor.json#/PaginationCursor" - } - ], - "responses": { - "200": { - "description": "Success", - "content": { - "application/json": { - "schema": { - "type": "array", - "items": { - "$ref": "../../components/schemas/event.json#/Event" - } - }, - "example": [ - { - "eventID": "9fac2ceed9344f2bbfdd1fdacb0ed9b1", - "tags": [ - { - "key": "browser", - "value": "Chrome 60.0" - }, - { - "key": "device", - "value": "Other" - }, - { - "key": "environment", - "value": "production" - }, - { - "value": "fatal", - "key": "level" - }, - { - "key": "os", - "value": "Mac OS X 10.12.6" - }, - { - "value": "CPython 2.7.16", - "key": "runtime" - }, - { - "key": "release", - "value": "17642328ead24b51867165985996d04b29310337" - }, - { - "key": "server_name", - "value": "web1.example.com" - } - ], - "dateCreated": "2020-09-11T17:46:36Z", - "user": null, - "message": "", - "title": "This is an example Python exception", - "id": "dfb1a2d057194e76a4186cc8a5271553", - "platform": "python", - "event.type": "error", - "groupID": "1889724436" - } - ] - } - } - }, - "403": { - "description": "Forbidden" - } - }, - "security": [ - { - "auth_token": ["project:read"] - } - ] - } -} diff --git a/api-docs/paths/events/project-issues.json b/api-docs/paths/events/project-issues.json index 25baa3d6a81fc..1fab060ca1d6e 100644 --- a/api-docs/paths/events/project-issues.json +++ b/api-docs/paths/events/project-issues.json @@ -1,7 +1,7 @@ { "get": { "tags": ["Events"], - "description": "Return a list of issues (groups) bound to a project. All parameters are supplied as query string parameters. \n\n A default query of ``is:unresolved`` is applied. To return results with other statuses send an new query value (i.e. ``?query=`` for all results).\n\nThe ``statsPeriod`` parameter can be used to select the timeline stats which should be present. Possible values are: ``\"\"`` (disable),``\"24h\"`` (default), ``\"14d\"``", + "description": "Return a list of issues (groups) bound to a project. All parameters are supplied as query string parameters. \n\n A default query of ``is:unresolved`` is applied. To return results with other statuses send an new query value (i.e. ``?query=`` for all results).\n\nThe ``statsPeriod`` parameter can be used to select the timeline stats which should be present. Possible values are: ``\"\"`` (disable),``\"24h\"`` (default), ``\"14d\"``\n\nUser feedback items from the [User Feedback Widget](https://docs.sentry.io/product/user-feedback/#user-feedback-widget) are built off the issue platform, so to return a list of user feedback items for a specific project, filter for `issue.category:feedback`.", "operationId": "List a Project's Issues", "parameters": [ { diff --git a/api-docs/paths/events/tag-details.json b/api-docs/paths/events/tag-details.json index bd286ac3fc57c..d2b1cc93ba44e 100644 --- a/api-docs/paths/events/tag-details.json +++ b/api-docs/paths/events/tag-details.json @@ -4,15 +4,6 @@ "description": "Returns details for given tag key related to an issue.", "operationId": "Retrieve Tag Details", "parameters": [ - { - "name": "organization_id_or_slug", - "in": "path", - "description": "The ID or slug of the organization the issue belongs to.", - "required": true, - "schema": { - "type": "string" - } - }, { "name": "issue_id", "in": "path", diff --git a/api-docs/paths/events/tag-values.json b/api-docs/paths/events/tag-values.json index f25210f905342..323b3d33bc8f8 100644 --- a/api-docs/paths/events/tag-values.json +++ b/api-docs/paths/events/tag-values.json @@ -4,15 +4,6 @@ "description": "Returns details for given tag key related to an issue. \n\nWhen [paginated](/api/pagination) can return at most 1000 values.", "operationId": "List a Tag's Values Related to an Issue", "parameters": [ - { - "name": "organization_id_or_slug", - "in": "path", - "description": "The ID or slug of the organization the issue belongs to.", - "required": true, - "schema": { - "type": "string" - } - }, { "name": "issue_id", "in": "path", diff --git a/api-docs/paths/projects/user-feedback.json b/api-docs/paths/projects/user-feedback.json index 4f52258237a19..deeaa6bf8a23c 100644 --- a/api-docs/paths/projects/user-feedback.json +++ b/api-docs/paths/projects/user-feedback.json @@ -1,7 +1,7 @@ { "get": { "tags": ["Projects"], - "description": "Return a list of user feedback items within this project.\n\n*This list does not include submissions from the [User Feedback Widget](https://docs.sentry.io/product/user-feedback/#user-feedback-widget). This is because it is based on an older format called User Reports - read more [here](https://develop.sentry.dev/application/feedback-architecture/#user-reports).*", + "description": "Return a list of user feedback items within this project.\n\n*This list does not include submissions from the [User Feedback Widget](https://docs.sentry.io/product/user-feedback/#user-feedback-widget). This is because it is based on an older format called User Reports - read more [here](https://develop.sentry.dev/application/feedback-architecture/#user-reports). To return a list of user feedback items from the widget, please use the [issue API](https://docs.sentry.io/api/events/list-a-projects-issues/)* with the filter `issue.category:feedback`.*", "operationId": "List a Project's User Feedback", "parameters": [ { diff --git a/biome.json b/biome.json index a28089f817376..90eb877cb2f90 100644 --- a/biome.json +++ b/biome.json @@ -39,9 +39,7 @@ "noRestrictedImports": { "level": "warn", "options": { - "paths": { - "react-router": "Do not import from react-router. While we transition to 6 there are shims to import from" - } + "paths": {} } } }, diff --git a/devenv/config.ini b/devenv/config.ini index 4eb73da0f3d85..fe11192399824 100644 --- a/devenv/config.ini +++ b/devenv/config.ini @@ -1,3 +1,6 @@ +[devenv] +minimum_version = 1.13.0 + [venv.sentry] python = 3.12.6 path = .venv @@ -51,6 +54,18 @@ linux_arm64_sha256 = 6ecba675e90d154f22e20200fa5684f20ad1495b73c0462f1bd7da4e9d0 # used for autoupdate version = v0.6.6 +[lima] +darwin_x86_64 = https://github.com/lima-vm/lima/releases/download/v0.19.1/lima-0.19.1-Darwin-x86_64.tar.gz +darwin_x86_64_sha256 = ac8827479f66ef1b288b31f164b22f6433faa14c44ce5bbebe09e6e913582479 +darwin_arm64 = https://github.com/lima-vm/lima/releases/download/v0.19.1/lima-0.19.1-Darwin-arm64.tar.gz +darwin_arm64_sha256 = 0dfcf3a39782baf1c2ea43cf026f8df0321c671d914c105fbb78de507aa8bda4 +linux_x86_64 = https://github.com/lima-vm/lima/releases/download/v0.19.1/lima-0.19.1-Linux-x86_64.tar.gz +linux_x86_64_sha256 = 7d18b1716aae14bf98d6ea93a703e8877b0c3142f7ba2e87401d47d5d0fe3ff1 +linux_arm64 = https://github.com/lima-vm/lima/releases/download/v0.19.1/lima-0.19.1-Linux-aarch64.tar.gz +linux_arm64_sha256 = c55e57ddbefd9988d0f3676bb873bcc6e0f7b3c3d47a1f07599ee151c5198d96 +# used for autoupdate +version = 0.19.1 + # kept here only for compatibility with older `devenv` [python] version = 3.12.6 diff --git a/devenv/sync.py b/devenv/sync.py index 8b3c18dd663c5..b822d2c864e06 100644 --- a/devenv/sync.py +++ b/devenv/sync.py @@ -1,5 +1,6 @@ from __future__ import annotations +import importlib import os import shlex import subprocess @@ -70,7 +71,33 @@ def run_procs( return all_good +# Temporary, see https://github.com/getsentry/sentry/pull/78881 +def check_minimum_version(minimum_version: str): + version = importlib.metadata.version("sentry-devenv") + + parsed_version = tuple(map(int, version.split("."))) + parsed_minimum_version = tuple(map(int, minimum_version.split("."))) + + if parsed_version < parsed_minimum_version: + raise SystemExit( + f""" +Hi! To reduce potential breakage we've defined a minimum +devenv version ({minimum_version}) to run sync. + +Please run the following to update your global devenv to the minimum: + +{constants.root}/venv/bin/pip install -U 'sentry-devenv=={minimum_version}' + +Then, use it to run sync this one time. + +{constants.root}/bin/devenv sync +""" + ) + + def main(context: dict[str, str]) -> int: + check_minimum_version("1.13.0") + repo = context["repo"] reporoot = context["reporoot"] repo_config = config.get_config(f"{reporoot}/devenv/config.ini") @@ -82,20 +109,15 @@ def main(context: dict[str, str]) -> int: # repo-local devenv needs to update itself first with a successful sync # so it'll take 2 syncs to get onto devenv-managed node, it is what it is - try: - from devenv.lib import node + from devenv.lib import node - node.install( - repo_config["node"]["version"], - repo_config["node"][constants.SYSTEM_MACHINE], - repo_config["node"][f"{constants.SYSTEM_MACHINE}_sha256"], - reporoot, - ) - node.install_yarn(repo_config["node"]["yarn_version"], reporoot) - except ImportError: - from devenv.lib import volta - - volta.install(reporoot) + node.install( + repo_config["node"]["version"], + repo_config["node"][constants.SYSTEM_MACHINE], + repo_config["node"][f"{constants.SYSTEM_MACHINE}_sha256"], + reporoot, + ) + node.install_yarn(repo_config["node"]["yarn_version"], reporoot) # no more imports from devenv past this point! if the venv is recreated # then we won't have access to devenv libs until it gets reinstalled @@ -108,27 +130,18 @@ def main(context: dict[str, str]) -> int: venv.ensure(venv_dir, python_version, url, sha256) if constants.DARWIN: - try: - colima.install( - repo_config["colima"]["version"], - repo_config["colima"][constants.SYSTEM_MACHINE], - repo_config["colima"][f"{constants.SYSTEM_MACHINE}_sha256"], - reporoot, - ) - except TypeError: - # this is needed for devenv <=1.4.0,>1.2.3 to finish syncing and therefore update itself - colima.install( - repo_config["colima"]["version"], - repo_config["colima"][constants.SYSTEM_MACHINE], - repo_config["colima"][f"{constants.SYSTEM_MACHINE}_sha256"], - ) - - # TODO: move limactl version into per-repo config - try: - limactl.install(reporoot) - except TypeError: - # this is needed for devenv <=1.4.0,>1.2.3 to finish syncing and therefore update itself - limactl.install() + colima.install( + repo_config["colima"]["version"], + repo_config["colima"][constants.SYSTEM_MACHINE], + repo_config["colima"][f"{constants.SYSTEM_MACHINE}_sha256"], + reporoot, + ) + limactl.install( + repo_config["lima"]["version"], + repo_config["lima"][constants.SYSTEM_MACHINE], + repo_config["lima"][f"{constants.SYSTEM_MACHINE}_sha256"], + reporoot, + ) if not run_procs( repo, diff --git a/devservices/clickhouse/config.xml b/devservices/clickhouse/config.xml new file mode 100644 index 0000000000000..327d60661b29d --- /dev/null +++ b/devservices/clickhouse/config.xml @@ -0,0 +1,6 @@ + + 0.3 + + 1 + + diff --git a/devservices/docker-compose-testing.yml b/devservices/docker-compose-testing.yml new file mode 100644 index 0000000000000..aa0ddafe656bb --- /dev/null +++ b/devservices/docker-compose-testing.yml @@ -0,0 +1,282 @@ +x-restart-policy: &restart_policy + restart: unless-stopped +x-depends_on-healthy: &depends_on-healthy + condition: service_healthy +x-depends_on-default: &depends_on-default + condition: service_started +x-healthcheck-defaults: &healthcheck_defaults + interval: 30s + timeout: 1m30s + retries: 10 + start_period: 10s +services: + redis: + <<: *restart_policy + container_name: sentry_redis + image: ghcr.io/getsentry/image-mirror-library-redis:5.0-alpine + healthcheck: + <<: *healthcheck_defaults + test: redis-cli ping + command: + [ + 'redis-server', + '--appendonly', + 'yes', + '--save', + '60', + '20', + '--auto-aof-rewrite-percentage', + '100', + '--auto-aof-rewrite-min-size', + '64mb', + ] + volumes: + - 'sentry-redis:/data' + ports: + - '6379:6379' + networks: + - sentry + extra_hosts: + host.docker.internal: host-gateway + postgres: + <<: *restart_policy + container_name: sentry_postgres + # Using the same postgres version as Sentry dev for consistency purposes + image: 'ghcr.io/getsentry/image-mirror-library-postgres:14-alpine' + healthcheck: + <<: *healthcheck_defaults + # Using default user "postgres" from sentry/sentry.conf.example.py or value of POSTGRES_USER if provided + test: ['CMD-SHELL', 'pg_isready -U ${POSTGRES_USER:-postgres}'] + 'command': + [ + 'postgres', + '-c', + 'wal_level=logical', + '-c', + 'max_replication_slots=1', + '-c', + 'max_wal_senders=1', + ] + environment: + POSTGRES_HOST_AUTH_METHOD: 'trust' + POSTGRES_DB: 'sentry' + volumes: + - 'sentry-postgres:/var/lib/postgresql/data' + ports: + - '5432:5432' + networks: + - sentry + extra_hosts: + host.docker.internal: host-gateway + kafka: + <<: *restart_policy + image: 'ghcr.io/getsentry/image-mirror-confluentinc-cp-kafka:7.5.0' + container_name: sentry_kafka + environment: + # https://docs.confluent.io/platform/current/installation/docker/config-reference.html#cp-kakfa-example + KAFKA_PROCESS_ROLES: 'broker,controller' + KAFKA_CONTROLLER_QUORUM_VOTERS: '1@127.0.0.1:29093' + KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER' + KAFKA_NODE_ID: '1' + CLUSTER_ID: 'MkU3OEVBNTcwNTJENDM2Qk' + KAFKA_LISTENERS: 'PLAINTEXT://0.0.0.0:29092,INTERNAL://0.0.0.0:9093,EXTERNAL://0.0.0.0:9092,CONTROLLER://0.0.0.0:29093' + KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://127.0.0.1:29092,INTERNAL://kafka:9093,EXTERNAL://127.0.0.1:9092' + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'PLAINTEXT:PLAINTEXT,INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT' + KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT' + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: '1' + KAFKA_OFFSETS_TOPIC_NUM_PARTITIONS: '1' + KAFKA_LOG_RETENTION_HOURS: '24' + KAFKA_MESSAGE_MAX_BYTES: '50000000' #50MB or bust + KAFKA_MAX_REQUEST_SIZE: '50000000' #50MB on requests apparently too + volumes: + - 'sentry-kafka:/var/lib/kafka/data' + - 'sentry-kafka-log:/var/lib/kafka/log' + healthcheck: + <<: *healthcheck_defaults + test: ['CMD-SHELL', 'nc -z localhost 9092'] + interval: 10s + timeout: 10s + retries: 30 + ports: + - '9092:9092' + - '9093:9093' + networks: + - sentry + extra_hosts: + host.docker.internal: host-gateway + clickhouse: + <<: *restart_policy + container_name: sentry_clickhouse + image: 'ghcr.io/getsentry/image-mirror-altinity-clickhouse-server:23.3.19.33.altinitystable' + ulimits: + nofile: + soft: 262144 + hard: 262144 + volumes: + - 'sentry-clickhouse:/var/lib/clickhouse' + - 'sentry-clickhouse-log:/var/log/clickhouse-server' + - type: bind + read_only: true + source: ./clickhouse/config.xml + target: /etc/clickhouse-server/config.d/sentry.xml + healthcheck: + test: [ + 'CMD-SHELL', + # Manually override any http_proxy envvar that might be set, because + # this wget does not support no_proxy. See: + # https://github.com/getsentry/self-hosted/issues/1537 + "http_proxy='' wget -nv -t1 --spider 'http://localhost:8123/' || exit 1", + ] + interval: 10s + timeout: 10s + retries: 30 + ports: + - '8123:8123' + - '9000:9000' + - '9009:9009' + networks: + - sentry + extra_hosts: + host.docker.internal: host-gateway + symbolicator: + <<: *restart_policy + container_name: sentry_symbolicator + image: 'us-central1-docker.pkg.dev/sentryio/symbolicator/image:nightly' + volumes: + - 'sentry-symbolicator:/data' + - type: bind + read_only: true + source: ./symbolicator + target: /etc/symbolicator + command: run -c /etc/symbolicator/config.yml + ports: + - '3021:3021' + networks: + - sentry + extra_hosts: + host.docker.internal: host-gateway + vroom: + <<: *restart_policy + container_name: sentry_vroom + image: 'us-central1-docker.pkg.dev/sentryio/vroom/vroom:latest' + environment: + SENTRY_KAFKA_BROKERS_PROFILING: 'sentry_kafka:9092' + SENTRY_KAFKA_BROKERS_OCCURRENCES: 'sentry_kafka:9092' + SENTRY_BUCKET_PROFILES: file://localhost//var/lib/sentry-profiles + SENTRY_SNUBA_HOST: 'http://snuba-api:1218' + volumes: + - sentry-vroom:/var/lib/sentry-profiles + depends_on: + kafka: + <<: *depends_on-healthy + ports: + - '8085:8085' + networks: + - sentry + extra_hosts: + host.docker.internal: host-gateway + snuba: + <<: *restart_policy + container_name: sentry_snuba + image: ghcr.io/getsentry/snuba:latest + ports: + - '1218:1218' + - '1219:1219' + networks: + - sentry + command: ['devserver'] + environment: + PYTHONUNBUFFERED: '1' + SNUBA_SETTINGS: docker + DEBUG: '1' + CLICKHOUSE_HOST: 'clickhouse' + CLICKHOUSE_PORT: '9000' + CLICKHOUSE_HTTP_PORT: '8123' + DEFAULT_BROKERS: 'kafka:9093' + REDIS_HOST: 'redis' + REDIS_PORT: '6379' + REDIS_DB: '1' + ENABLE_SENTRY_METRICS_DEV: '${ENABLE_SENTRY_METRICS_DEV:-}' + ENABLE_PROFILES_CONSUMER: '${ENABLE_PROFILES_CONSUMER:-}' + ENABLE_SPANS_CONSUMER: '${ENABLE_SPANS_CONSUMER:-}' + ENABLE_ISSUE_OCCURRENCE_CONSUMER: '${ENABLE_ISSUE_OCCURRENCE_CONSUMER:-}' + ENABLE_AUTORUN_MIGRATION_SEARCH_ISSUES: '1' + ENABLE_GROUP_ATTRIBUTES_CONSUMER: '${ENABLE_GROUP_ATTRIBUTES_CONSUMER:-}' + platform: linux/amd64 + depends_on: + - kafka + - redis + - clickhouse + extra_hosts: + host.docker.internal: host-gateway + bigtable: + <<: *restart_policy + container_name: sentry_bigtable + image: 'us.gcr.io/sentryio/cbtemulator:23c02d92c7a1747068eb1fc57dddbad23907d614' + ports: + - '8086:8086' + networks: + - sentry + extra_hosts: + host.docker.internal: host-gateway + redis-cluster: + <<: *restart_policy + container_name: sentry_redis-cluster + image: ghcr.io/getsentry/docker-redis-cluster:7.0.10 + ports: + - '7000:7000' + - '7001:7001' + - '7002:7002' + - '7003:7003' + - '7004:7004' + - '7005:7005' + networks: + - sentry + volumes: + - sentry-redis-cluster:/redis-data + environment: + - IP=0.0.0.0 + chartcuterie: + <<: *restart_policy + container_name: sentry_chartcuterie + image: 'us-central1-docker.pkg.dev/sentryio/chartcuterie/image:latest' + environment: + CHARTCUTERIE_CONFIG: /etc/chartcuterie/config.js + CHARTCUTERIE_CONFIG_POLLING: true + volumes: + - ./chartcuterie:/etc/chartcuterie + ports: + - '7901:9090' + networks: + - sentry + extra_hosts: + host.docker.internal: host-gateway + healthcheck: + <<: *healthcheck_defaults + # Using default user "postgres" from sentry/sentry.conf.example.py or value of POSTGRES_USER if provided + test: + [ + 'CMD-SHELL', + 'docker exec sentry_chartcuterie python3 -c "import urllib.request; urllib.request.urlopen(\"http://127.0.0.1:9090/api/chartcuterie/healthcheck/live\", timeout=5)"', + ] + +volumes: + # These store application data that should persist across restarts. + sentry-data: + sentry-postgres: + sentry-redis: + sentry-redis-cluster: + sentry-kafka: + sentry-clickhouse: + sentry-symbolicator: + # This volume stores profiles and should be persisted. + # Not being external will still persist data across restarts. + # It won't persist if someone does a docker compose down -v. + sentry-vroom: + sentry-kafka-log: + sentry-clickhouse-log: + +networks: + sentry: + name: sentry + external: true diff --git a/devservices/symbolicator/config.yml b/devservices/symbolicator/config.yml new file mode 100644 index 0000000000000..290d752a6dd04 --- /dev/null +++ b/devservices/symbolicator/config.yml @@ -0,0 +1,11 @@ +bind: '0.0.0.0:3021' +logging: + level: 'debug' + format: 'pretty' + enable_backtraces: true + +# explicitly disable caches as it's not something we want in tests. in +# development it may be less ideal. perhaps we should do the same thing as we +# do with relay one day (one container per test/session), although that will be +# slow +cache_dir: null diff --git a/fixtures/apidocs_test_case.py b/fixtures/apidocs_test_case.py index 80aba059aaf56..11098075dfb6a 100644 --- a/fixtures/apidocs_test_case.py +++ b/fixtures/apidocs_test_case.py @@ -8,7 +8,7 @@ from openapi_core.validation.response.validators import V30ResponseDataValidator from sentry.testutils.cases import APITestCase -from sentry.testutils.helpers.datetime import before_now, iso_format +from sentry.testutils.helpers.datetime import before_now from sentry.testutils.skips import requires_snuba @@ -41,7 +41,7 @@ def create_event(self, name, **kwargs): "event_id": (name * 32)[:32], "fingerprint": ["1"], "sdk": {"version": "5.17.0", "name": "sentry.javascript.browser"}, - "timestamp": iso_format(before_now(seconds=1)), + "timestamp": before_now(seconds=1).isoformat(), "user": {"id": self.user.id, "email": self.user.email}, "release": name, } diff --git a/fixtures/backup/model_dependencies/detailed.json b/fixtures/backup/model_dependencies/detailed.json index abe8107c5dfee..61f451c6b04dd 100644 --- a/fixtures/backup/model_dependencies/detailed.json +++ b/fixtures/backup/model_dependencies/detailed.json @@ -31,6 +31,24 @@ ] ] }, + "flags.flagauditlogmodel": { + "dangling": false, + "foreign_keys": { + "organization_id": { + "kind": "HybridCloudForeignKey", + "model": "sentry.organization", + "nullable": false + } + }, + "model": "flags.flagauditlogmodel", + "relocation_dependencies": [], + "relocation_scope": "Excluded", + "silos": [ + "Region" + ], + "table_name": "flags_audit_log", + "uniques": [] + }, "hybridcloud.apikeyreplica": { "dangling": false, "foreign_keys": { @@ -1389,6 +1407,28 @@ ] ] }, + "sentry.dashboardpermissions": { + "dangling": false, + "foreign_keys": { + "dashboard": { + "kind": "DefaultOneToOneField", + "model": "sentry.dashboard", + "nullable": false + } + }, + "model": "sentry.dashboardpermissions", + "relocation_dependencies": [], + "relocation_scope": "Organization", + "silos": [ + "Region" + ], + "table_name": "sentry_dashboardpermissions", + "uniques": [ + [ + "dashboard" + ] + ] + }, "sentry.dashboardproject": { "dangling": false, "foreign_keys": { @@ -2334,6 +2374,11 @@ "kind": "DefaultOneToOneField", "model": "sentry.grouphash", "nullable": false + }, + "seer_matched_grouphash": { + "kind": "FlexibleForeignKey", + "model": "sentry.grouphash", + "nullable": true } }, "model": "sentry.grouphashmetadata", @@ -6166,6 +6211,11 @@ "uptime.projectuptimesubscription": { "dangling": false, "foreign_keys": { + "environment": { + "kind": "FlexibleForeignKey", + "model": "sentry.environment", + "nullable": true + }, "owner_team": { "kind": "FlexibleForeignKey", "model": "sentry.team", @@ -6218,6 +6268,77 @@ ] ] }, + "workflow_engine.action": { + "dangling": false, + "foreign_keys": {}, + "model": "workflow_engine.action", + "relocation_dependencies": [], + "relocation_scope": "Excluded", + "silos": [ + "Region" + ], + "table_name": "workflow_engine_action", + "uniques": [] + }, + "workflow_engine.datacondition": { + "dangling": false, + "foreign_keys": { + "condition_group": { + "kind": "DefaultForeignKey", + "model": "workflow_engine.dataconditiongroup", + "nullable": false + } + }, + "model": "workflow_engine.datacondition", + "relocation_dependencies": [], + "relocation_scope": "Organization", + "silos": [ + "Region" + ], + "table_name": "workflow_engine_datacondition", + "uniques": [] + }, + "workflow_engine.dataconditiongroup": { + "dangling": false, + "foreign_keys": { + "organization": { + "kind": "DefaultForeignKey", + "model": "sentry.organization", + "nullable": false + } + }, + "model": "workflow_engine.dataconditiongroup", + "relocation_dependencies": [], + "relocation_scope": "Organization", + "silos": [ + "Region" + ], + "table_name": "workflow_engine_dataconditiongroup", + "uniques": [] + }, + "workflow_engine.dataconditiongroupaction": { + "dangling": false, + "foreign_keys": { + "action": { + "kind": "FlexibleForeignKey", + "model": "workflow_engine.action", + "nullable": false + }, + "condition_group": { + "kind": "FlexibleForeignKey", + "model": "workflow_engine.dataconditiongroup", + "nullable": false + } + }, + "model": "workflow_engine.dataconditiongroupaction", + "relocation_dependencies": [], + "relocation_scope": "Excluded", + "silos": [ + "Region" + ], + "table_name": "workflow_engine_dataconditiongroupaction", + "uniques": [] + }, "workflow_engine.datasource": { "dangling": false, "foreign_keys": { @@ -6281,6 +6402,11 @@ "kind": "HybridCloudForeignKey", "model": "sentry.user", "nullable": true + }, + "workflow_condition_group": { + "kind": "FlexibleForeignKey", + "model": "workflow_engine.dataconditiongroup", + "nullable": true } }, "model": "workflow_engine.detector", @@ -6294,9 +6420,53 @@ [ "name", "organization" + ], + [ + "workflow_condition_group" ] ] }, + "workflow_engine.detectorstate": { + "dangling": false, + "foreign_keys": { + "detector": { + "kind": "FlexibleForeignKey", + "model": "workflow_engine.detector", + "nullable": false + } + }, + "model": "workflow_engine.detectorstate", + "relocation_dependencies": [], + "relocation_scope": "Organization", + "silos": [ + "Region" + ], + "table_name": "workflow_engine_detectorstate", + "uniques": [] + }, + "workflow_engine.detectorworkflow": { + "dangling": false, + "foreign_keys": { + "detector": { + "kind": "FlexibleForeignKey", + "model": "workflow_engine.detector", + "nullable": false + }, + "workflow": { + "kind": "FlexibleForeignKey", + "model": "workflow_engine.workflow", + "nullable": false + } + }, + "model": "workflow_engine.detectorworkflow", + "relocation_dependencies": [], + "relocation_scope": "Organization", + "silos": [ + "Region" + ], + "table_name": "workflow_engine_detectorworkflow", + "uniques": [] + }, "workflow_engine.workflow": { "dangling": false, "foreign_keys": { @@ -6304,6 +6474,11 @@ "kind": "FlexibleForeignKey", "model": "sentry.organization", "nullable": false + }, + "when_condition_group": { + "kind": "FlexibleForeignKey", + "model": "workflow_engine.dataconditiongroup", + "nullable": true } }, "model": "workflow_engine.workflow", @@ -6320,22 +6495,31 @@ ] ] }, - "workflow_engine.workflowaction": { + "workflow_engine.workflowdataconditiongroup": { "dangling": false, "foreign_keys": { + "condition_group": { + "kind": "FlexibleForeignKey", + "model": "workflow_engine.dataconditiongroup", + "nullable": false + }, "workflow": { "kind": "FlexibleForeignKey", "model": "workflow_engine.workflow", "nullable": false } }, - "model": "workflow_engine.workflowaction", + "model": "workflow_engine.workflowdataconditiongroup", "relocation_dependencies": [], "relocation_scope": "Organization", "silos": [ "Region" ], - "table_name": "workflow_engine_workflowaction", - "uniques": [] + "table_name": "workflow_engine_workflowdataconditiongroup", + "uniques": [ + [ + "condition_group" + ] + ] } } \ No newline at end of file diff --git a/fixtures/backup/model_dependencies/flat.json b/fixtures/backup/model_dependencies/flat.json index c64d01b2bc452..7b85fa11b8bbd 100644 --- a/fixtures/backup/model_dependencies/flat.json +++ b/fixtures/backup/model_dependencies/flat.json @@ -4,6 +4,9 @@ "sentry.organization", "sentry.project" ], + "flags.flagauditlogmodel": [ + "sentry.organization" + ], "hybridcloud.apikeyreplica": [ "sentry.apikey", "sentry.organization" @@ -190,6 +193,9 @@ "sentry.organization", "sentry.user" ], + "sentry.dashboardpermissions": [ + "sentry.dashboard" + ], "sentry.dashboardproject": [ "sentry.dashboard", "sentry.project" @@ -850,12 +856,24 @@ "sentry.user" ], "uptime.projectuptimesubscription": [ + "sentry.environment", "sentry.project", "sentry.team", "sentry.user", "uptime.uptimesubscription" ], "uptime.uptimesubscription": [], + "workflow_engine.action": [], + "workflow_engine.datacondition": [ + "workflow_engine.dataconditiongroup" + ], + "workflow_engine.dataconditiongroup": [ + "sentry.organization" + ], + "workflow_engine.dataconditiongroupaction": [ + "workflow_engine.action", + "workflow_engine.dataconditiongroup" + ], "workflow_engine.datasource": [ "sentry.organization" ], @@ -866,12 +884,22 @@ "workflow_engine.detector": [ "sentry.organization", "sentry.team", - "sentry.user" + "sentry.user", + "workflow_engine.dataconditiongroup" + ], + "workflow_engine.detectorstate": [ + "workflow_engine.detector" + ], + "workflow_engine.detectorworkflow": [ + "workflow_engine.detector", + "workflow_engine.workflow" ], "workflow_engine.workflow": [ - "sentry.organization" + "sentry.organization", + "workflow_engine.dataconditiongroup" ], - "workflow_engine.workflowaction": [ + "workflow_engine.workflowdataconditiongroup": [ + "workflow_engine.dataconditiongroup", "workflow_engine.workflow" ] } \ No newline at end of file diff --git a/fixtures/backup/model_dependencies/sorted.json b/fixtures/backup/model_dependencies/sorted.json index 84116e0c856f5..a7d2fee2ab05d 100644 --- a/fixtures/backup/model_dependencies/sorted.json +++ b/fixtures/backup/model_dependencies/sorted.json @@ -48,11 +48,17 @@ "sentry.userroleuser", "social_auth.usersocialauth", "uptime.uptimesubscription", + "workflow_engine.action", + "workflow_engine.dataconditiongroup", + "workflow_engine.dataconditiongroupaction", "workflow_engine.datasource", "workflow_engine.detector", + "workflow_engine.detectorstate", "workflow_engine.workflow", - "workflow_engine.workflowaction", + "workflow_engine.workflowdataconditiongroup", + "workflow_engine.detectorworkflow", "workflow_engine.datasourcedetector", + "workflow_engine.datacondition", "sentry.savedsearch", "sentry.relocation", "sentry.recentsearch", @@ -102,6 +108,7 @@ "hybridcloud.organizationslugreservationreplica", "hybridcloud.externalactorreplica", "hybridcloud.apikeyreplica", + "flags.flagauditlogmodel", "feedback.feedback", "uptime.projectuptimesubscription", "sentry.useroption", @@ -151,6 +158,7 @@ "sentry.debugidartifactbundle", "sentry.dashboardwidget", "sentry.dashboardproject", + "sentry.dashboardpermissions", "sentry.customdynamicsamplingruleproject", "sentry.commitfilechange", "sentry.broadcastseen", diff --git a/fixtures/backup/model_dependencies/truncate.json b/fixtures/backup/model_dependencies/truncate.json index c236755389b59..714940ccaebb7 100644 --- a/fixtures/backup/model_dependencies/truncate.json +++ b/fixtures/backup/model_dependencies/truncate.json @@ -48,11 +48,17 @@ "sentry_userrole_users", "social_auth_usersocialauth", "uptime_uptimesubscription", + "workflow_engine_action", + "workflow_engine_dataconditiongroup", + "workflow_engine_dataconditiongroupaction", "workflow_engine_datasource", "workflow_engine_detector", + "workflow_engine_detectorstate", "workflow_engine_workflow", - "workflow_engine_workflowaction", + "workflow_engine_workflowdataconditiongroup", + "workflow_engine_detectorworkflow", "workflow_engine_datasourcedetector", + "workflow_engine_datacondition", "sentry_savedsearch", "sentry_relocation", "sentry_recentsearch", @@ -102,6 +108,7 @@ "hybridcloud_organizationslugreservationreplica", "hybridcloud_externalactorreplica", "hybridcloud_apikeyreplica", + "flags_audit_log", "feedback_feedback", "uptime_projectuptimesubscription", "sentry_useroption", @@ -151,6 +158,7 @@ "sentry_debugidartifactbundle", "sentry_dashboardwidget", "sentry_dashboardproject", + "sentry_dashboardpermissions", "sentry_customdynamicsamplingruleproject", "sentry_commitfilechange", "sentry_broadcastseen", diff --git a/src/sentry/api/endpoints/integrations/sentry_apps/internal_app_token/__init__.py b/fixtures/safe_migrations_apps/run_sql_app/__init__.py similarity index 100% rename from src/sentry/api/endpoints/integrations/sentry_apps/internal_app_token/__init__.py rename to fixtures/safe_migrations_apps/run_sql_app/__init__.py diff --git a/fixtures/safe_migrations_apps/run_sql_app/migrations/0001_initial.py b/fixtures/safe_migrations_apps/run_sql_app/migrations/0001_initial.py new file mode 100644 index 0000000000000..1f566fa78ac91 --- /dev/null +++ b/fixtures/safe_migrations_apps/run_sql_app/migrations/0001_initial.py @@ -0,0 +1,30 @@ +# Generated by Django 3.1 on 2019-09-22 21:47 + +from django.db import migrations, models + +from sentry.new_migrations.migrations import CheckedMigration + + +class Migration(CheckedMigration): + + initial = True + + dependencies = [] + + operations = [ + migrations.CreateModel( + name="TestTable", + fields=[ + ( + "id", + models.AutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("field", models.IntegerField(null=True)), + ], + ), + ] diff --git a/fixtures/safe_migrations_apps/run_sql_app/migrations/0002_run_sql.py b/fixtures/safe_migrations_apps/run_sql_app/migrations/0002_run_sql.py new file mode 100644 index 0000000000000..c8c76653c28cd --- /dev/null +++ b/fixtures/safe_migrations_apps/run_sql_app/migrations/0002_run_sql.py @@ -0,0 +1,23 @@ +from django.db import migrations + +from sentry.new_migrations.migrations import CheckedMigration + + +class Migration(CheckedMigration): + + dependencies = [ + ("run_sql_app", "0001_initial"), + ] + + operations = [ + migrations.SeparateDatabaseAndState( + database_operations=[ + migrations.RunSQL( + """ALTER TABLE "run_sql_app_testtable" DROP COLUMN "field";""", + reverse_sql="""ALTER TABLE "run_sql_app_testtable" ADD COLUMN "field" int NULL;""", + hints={"tables": ["run_sql_app_testtable"]}, + ) + ], + state_operations=[migrations.RemoveField("testtable", "field")], + ) + ] diff --git a/fixtures/safe_migrations_apps/run_sql_app/migrations/0003_add_col.py b/fixtures/safe_migrations_apps/run_sql_app/migrations/0003_add_col.py new file mode 100644 index 0000000000000..59d7c9343c3a3 --- /dev/null +++ b/fixtures/safe_migrations_apps/run_sql_app/migrations/0003_add_col.py @@ -0,0 +1,14 @@ +from django.db import migrations, models + +from sentry.new_migrations.migrations import CheckedMigration + + +class Migration(CheckedMigration): + + dependencies = [ + ("run_sql_app", "0002_run_sql"), + ] + + operations = [ + migrations.AddField("testtable", "field", models.IntegerField(null=True)), + ] diff --git a/src/sentry/api/endpoints/integrations/sentry_apps/stats/__init__.py b/fixtures/safe_migrations_apps/run_sql_app/migrations/__init__.py similarity index 100% rename from src/sentry/api/endpoints/integrations/sentry_apps/stats/__init__.py rename to fixtures/safe_migrations_apps/run_sql_app/migrations/__init__.py diff --git a/fixtures/safe_migrations_apps/run_sql_app/models.py b/fixtures/safe_migrations_apps/run_sql_app/models.py new file mode 100644 index 0000000000000..fdd098a365453 --- /dev/null +++ b/fixtures/safe_migrations_apps/run_sql_app/models.py @@ -0,0 +1,5 @@ +from django.db import models + + +class TestTable(models.Model): + field = models.IntegerField(default=0) diff --git a/jest.config.ts b/jest.config.ts index 2298e04af2ed4..1bec1edcf29f1 100644 --- a/jest.config.ts +++ b/jest.config.ts @@ -15,15 +15,8 @@ const { GITHUB_PR_REF, GITHUB_RUN_ID, GITHUB_RUN_ATTEMPT, - USING_YARN_TEST, } = process.env; -if (USING_YARN_TEST === undefined) { - // eslint-disable-next-line no-console - console.error('Do not run `jest` directly, use `yarn test` instead!'); - process.exit(); -} - const IS_MASTER_BRANCH = GITHUB_PR_REF === 'refs/heads/master'; const BALANCE_RESULTS_PATH = path.resolve( diff --git a/migrations_lockfile.txt b/migrations_lockfile.txt index 05d67e8cc4579..27f992dc08ccf 100644 --- a/migrations_lockfile.txt +++ b/migrations_lockfile.txt @@ -10,7 +10,7 @@ hybridcloud: 0016_add_control_cacheversion nodestore: 0002_nodestore_no_dictfield remote_subscriptions: 0003_drop_remote_subscription replays: 0004_index_together -sentry: 0765_add_org_to_api_auth +sentry: 0777_add_related_name_to_dashboard_permissions social_auth: 0002_default_auto_field -uptime: 0013_uptime_subscription_new_unique -workflow_engine: 0005_data_source_detector +uptime: 0017_unique_on_timeout +workflow_engine: 0009_detector_type diff --git a/package.json b/package.json index e8cb6ce69cfed..681187faf4dd0 100644 --- a/package.json +++ b/package.json @@ -54,17 +54,16 @@ "@react-types/shared": "^3.24.1", "@rsdoctor/webpack-plugin": "0.4.4", "@sentry-internal/global-search": "^1.0.0", - "@sentry-internal/react-inspector": "6.0.1-4", "@sentry-internal/rrweb": "2.26.0", "@sentry-internal/rrweb-player": "2.26.0", "@sentry-internal/rrweb-snapshot": "2.26.0", - "@sentry/core": "^8.28.0", - "@sentry/node": "^8.28.0", - "@sentry/react": "^8.28.0", + "@sentry/core": "^8.35.0-beta.0", + "@sentry/node": "^8.35.0-beta.0", + "@sentry/react": "^8.35.0-beta.0", "@sentry/release-parser": "^1.3.1", "@sentry/status-page-list": "^0.3.0", - "@sentry/types": "^8.28.0", - "@sentry/utils": "^8.28.0", + "@sentry/types": "^8.35.0-beta.0", + "@sentry/utils": "^8.35.0-beta.0", "@sentry/webpack-plugin": "^2.22.4", "@spotlightjs/spotlight": "^2.0.0-alpha.1", "@tanstack/react-query": "^5.56.2", @@ -73,6 +72,7 @@ "@types/color": "^3.0.3", "@types/diff": "5.2.1", "@types/dompurify": "^3.0.5", + "@types/history": "^3.2.5", "@types/invariant": "^2.2.35", "@types/jest": "29.5.12", "@types/js-beautify": "^1.14.3", @@ -88,7 +88,6 @@ "@types/react-grid-layout": "^1.3.2", "@types/react-lazyload": "3.2.3", "@types/react-mentions": "4.1.13", - "@types/react-router": "^3.0.28", "@types/react-select": "4.0.18", "@types/react-sparklines": "^1.7.2", "@types/react-virtualized": "^9.21.22", @@ -142,7 +141,7 @@ "papaparse": "^5.3.2", "pegjs": "^0.10.0", "pegjs-loader": "^0.5.8", - "platformicons": "^6.0.1", + "platformicons": "^7.0.1", "po-catalog-loader": "2.1.0", "prettier": "3.3.2", "prismjs": "^1.29.0", @@ -157,8 +156,7 @@ "react-lazyload": "^3.2.1", "react-mentions": "4.4.10", "react-popper": "^2.3.0", - "react-router": "3.2.6", - "react-router-dom": "^6.23.0", + "react-router-dom": "^6.26.2", "react-select": "4.3.1", "react-sparklines": "1.7.0", "react-virtualized": "^9.22.5", @@ -169,8 +167,8 @@ "style-loader": "^3.3.4", "terser-webpack-plugin": "^5.3.10", "ts-node": "^10.9.2", - "tslib": "^2.6.3", - "typescript": "^5.5.2", + "tslib": "^2.7.0", + "typescript": "^5.6.3", "u2f-api": "1.0.10", "url-loader": "^4.1.1", "webpack": "5.94.0", @@ -183,7 +181,7 @@ "@codecov/webpack-plugin": "^1.2.0", "@pmmmwh/react-refresh-webpack-plugin": "0.5.15", "@sentry/jest-environment": "6.0.0", - "@sentry/profiling-node": "^8.28.0", + "@sentry/profiling-node": "^8.35.0-beta.0", "@styled/typescript-styled-plugin": "^1.0.1", "@testing-library/dom": "10.1.0", "@testing-library/jest-dom": "6.4.5", @@ -193,8 +191,8 @@ "babel-gettext-extractor": "^4.1.3", "babel-jest": "29.7.0", "benchmark": "^2.1.4", - "eslint": "8.57.0", - "eslint-config-sentry-app": "2.8.0", + "eslint": "8.57.1", + "eslint-config-sentry-app": "2.9.0", "html-webpack-plugin": "^5.6.0", "jest": "29.7.0", "jest-canvas-mock": "^2.5.2", @@ -247,6 +245,7 @@ "build-js-loader": "ts-node scripts/build-js-loader.ts", "validate-api-examples": "yarn --cwd api-docs openapi-examples-validator ../tests/apidocs/openapi-derefed.json --no-additional-properties", "mkcert-localhost": "mkcert -key-file config/localhost-key.pem -cert-file config/localhost.pem localhost 127.0.0.1 dev.getsentry.net *.dev.getsentry.net && mkcert -install", + "https-proxy": "caddy run --config - <<< '{\"apps\":{\"http\":{\"servers\":{\"srv0\":{\"listen\":[\":8003\"],\"routes\":[{\"handle\":[{\"handler\":\"reverse_proxy\",\"upstreams\":[{\"dial\":\"localhost:8000\"}]}]}],\"tls_connection_policies\":[{\"certificate_selection\":{\"any_tag\":[\"cert0\"]}}]}}},\"tls\":{\"certificates\":{\"load_files\":[{\"certificate\":\"./config/localhost.pem\",\"key\":\"./config/localhost-key.pem\",\"tags\":[\"cert0\"]}]}}}}'", "extract-ios-device-names": "ts-node scripts/extract-ios-device-names.ts" }, "browserslist": { diff --git a/pyproject.toml b/pyproject.toml index 078d2f3629a71..67dfbf9ece345 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -124,7 +124,6 @@ module = [ "sentry.api.bases.organizationmember", "sentry.api.bases.project", "sentry.api.bases.project_request_change", - "sentry.api.bases.sentryapps", "sentry.api.bases.team", "sentry.api.endpoints.accept_organization_invite", "sentry.api.endpoints.auth_config", @@ -136,8 +135,6 @@ module = [ "sentry.api.endpoints.group_integration_details", "sentry.api.endpoints.group_integrations", "sentry.api.endpoints.index", - "sentry.api.endpoints.integrations.sentry_apps.internal_app_token.index", - "sentry.api.endpoints.integrations.sentry_apps.stats.details", "sentry.api.endpoints.internal.mail", "sentry.api.endpoints.organization_details", "sentry.api.endpoints.organization_events", @@ -194,14 +191,11 @@ module = [ "sentry.api.serializers.models.project", "sentry.api.serializers.models.role", "sentry.api.serializers.models.rule", - "sentry.api.serializers.models.sentry_app", "sentry.api.serializers.models.team", "sentry.api.serializers.rest_framework.mentions", "sentry.api.serializers.rest_framework.notification_action", "sentry.api.serializers.rest_framework.rule", - "sentry.api.serializers.rest_framework.sentry_app_request", "sentry.api.serializers.snuba", - "sentry.api.validators.email", "sentry.auth.helper", "sentry.auth.provider", "sentry.auth.system", @@ -210,8 +204,6 @@ module = [ "sentry.db.router", "sentry.discover.endpoints.discover_key_transactions", "sentry.eventstore.models", - "sentry.features.handler", - "sentry.features.manager", "sentry.grouping.strategies.legacy", "sentry.identity.bitbucket.provider", "sentry.identity.github_enterprise.provider", @@ -254,7 +246,6 @@ module = [ "sentry.integrations.jira_server.client", "sentry.integrations.jira_server.integration", "sentry.integrations.metric_alerts", - "sentry.integrations.mixins.notifications", "sentry.integrations.msteams.actions.form", "sentry.integrations.msteams.client", "sentry.integrations.msteams.integration", @@ -299,7 +290,6 @@ module = [ "sentry.notifications.notifications.activity.base", "sentry.notifications.notifications.activity.release", "sentry.notifications.notifications.integration_nudge", - "sentry.ownership.grammar", "sentry.pipeline.base", "sentry.pipeline.views.base", "sentry.pipeline.views.nested", @@ -335,7 +325,6 @@ module = [ "sentry.sentry_apps.installations", "sentry.sentry_metrics.indexer.postgres.postgres_v2", "sentry.shared_integrations.client.proxy", - "sentry.similarity.features", "sentry.snuba.errors", "sentry.snuba.issue_platform", "sentry.snuba.metrics.datasource", @@ -348,16 +337,9 @@ module = [ "sentry.tagstore.types", "sentry.tasks.auth", "sentry.tasks.base", - "sentry.tasks.process_buffer", - "sentry.tasks.sentry_apps", - "sentry.templatetags.sentry_assets", - "sentry.templatetags.sentry_helpers", - "sentry.templatetags.sentry_plugins", "sentry.testutils.cases", "sentry.testutils.fixtures", - "sentry.testutils.helpers.features", "sentry.testutils.helpers.notifications", - "sentry.testutils.helpers.slack", "sentry.utils.auth", "sentry.utils.committers", "sentry.utils.services", @@ -367,14 +349,6 @@ module = [ "sentry.web.frontend.auth_logout", "sentry.web.frontend.auth_organization_login", "sentry.web.frontend.base", - "sentry.web.frontend.debug.debug_codeowners_auto_sync_failure_email", - "sentry.web.frontend.debug.debug_incident_activity_email", - "sentry.web.frontend.debug.debug_incident_trigger_email", - "sentry.web.frontend.debug.debug_mfa_added_email", - "sentry.web.frontend.debug.debug_mfa_removed_email", - "sentry.web.frontend.debug.debug_organization_integration_request", - "sentry.web.frontend.debug.debug_organization_invite_request", - "sentry.web.frontend.debug.debug_organization_join_request", "sentry.web.frontend.disabled_member_view", "sentry.web.frontend.group_plugin_action", "sentry.web.frontend.idp_email_verification", @@ -394,7 +368,6 @@ module = [ "sentry_plugins.jira.plugin", "tests.sentry.api.bases.test_organization", "tests.sentry.api.bases.test_project", - "tests.sentry.api.bases.test_sentryapps", "tests.sentry.api.bases.test_team", "tests.sentry.api.endpoints.notifications.test_notification_actions_details", "tests.sentry.api.endpoints.notifications.test_notification_actions_index", @@ -429,7 +402,7 @@ disable_error_code = [ # begin: stronger typing [[tool.mypy.overrides]] module = [ - "sentry.api.endpoints.issues.*", + "sentry.api.endpoints.project_backfill_similar_issues_embeddings_records", "sentry.api.helpers.deprecation", "sentry.api.helpers.source_map_helper", "sentry.auth.services.*", @@ -449,8 +422,7 @@ module = [ "sentry.eventtypes.error", "sentry.grouping.component", "sentry.grouping.fingerprinting", - "sentry.grouping.ingest.metrics", - "sentry.grouping.ingest.utils", + "sentry.grouping.ingest.*", "sentry.grouping.parameterization", "sentry.hybridcloud.*", "sentry.ingest.slicing", @@ -470,6 +442,7 @@ module = [ "sentry.issues.endpoints.organization_searches", "sentry.issues.endpoints.project_events", "sentry.issues.endpoints.project_stacktrace_link", + "sentry.issues.endpoints.related_issues", "sentry.issues.endpoints.shared_group_details", "sentry.issues.endpoints.team_groups_old", "sentry.issues.escalating_group_forecast", @@ -494,7 +467,6 @@ module = [ "sentry.issues.update_inbox", "sentry.lang.java.processing", "sentry.llm.*", - "sentry.mediators.sentry_app_installations.installation_notifier", "sentry.migrations.*", "sentry.models.event", "sentry.models.eventattachment", @@ -507,6 +479,7 @@ module = [ "sentry.nodestore.filesystem.backend", "sentry.nodestore.models", "sentry.organizations.*", + "sentry.ownership.*", "sentry.plugins.base.response", "sentry.plugins.base.view", "sentry.profiles.*", @@ -559,10 +532,12 @@ module = [ "sentry.utils.uwsgi", "sentry.utils.zip", "sentry.web.frontend.auth_provider_login", + "sentry.web.frontend.cli", "sentry.web.frontend.csv", "sentry_plugins.base", - "tests.sentry.api.endpoints.issues.*", + "tests.sentry.deletions.test_group", "tests.sentry.event_manager.test_event_manager", + "tests.sentry.grouping.ingest.test_seer", "tests.sentry.grouping.test_fingerprinting", "tests.sentry.hybridcloud.*", "tests.sentry.issues", @@ -571,6 +546,7 @@ module = [ "tests.sentry.issues.endpoints.test_organization_group_search_views", "tests.sentry.issues.endpoints.test_organization_searches", "tests.sentry.issues.endpoints.test_project_stacktrace_link", + "tests.sentry.issues.endpoints.test_related_issues", "tests.sentry.issues.endpoints.test_source_map_debug", "tests.sentry.issues.test_attributes", "tests.sentry.issues.test_escalating", @@ -592,11 +568,13 @@ module = [ "tests.sentry.issues.test_status_change", "tests.sentry.issues.test_status_change_consumer", "tests.sentry.issues.test_update_inbox", + "tests.sentry.ownership.*", "tests.sentry.ratelimits.test_leaky_bucket", "tests.sentry.relay.config.test_metric_extraction", "tests.sentry.tasks.test_on_demand_metrics", "tests.sentry.types.test_actor", "tests.sentry.types.test_region", + "tests.sentry.web.frontend.test_cli", "tools.*", ] disallow_any_generics = true diff --git a/requirements-base.txt b/requirements-base.txt index 2bde86c72cf6a..4d10fffb82e43 100644 --- a/requirements-base.txt +++ b/requirements-base.txt @@ -67,17 +67,17 @@ rfc3986-validator>=0.1.1 sentry-arroyo>=2.16.5 sentry-kafka-schemas>=0.1.111 sentry-ophio==1.0.0 -sentry-protos>=0.1.21 +sentry-protos>=0.1.26 sentry-redis-tools>=0.1.7 -sentry-relay>=0.9.1 -sentry-sdk>=2.12.0 +sentry-relay>=0.9.2 +sentry-sdk>=2.17.0 slack-sdk>=3.27.2 -snuba-sdk>=3.0.38 +snuba-sdk>=3.0.43 simplejson>=3.17.6 sqlparse>=0.4.4 statsd>=3.3 structlog>=22 -symbolic==12.10.0 +symbolic==12.12.0 tiktoken>=0.6.0 tldextract>=5.1.2 toronado>=0.1.0 diff --git a/requirements-dev-frozen.txt b/requirements-dev-frozen.txt index c52f5eae48b99..fea5d1a38d28d 100644 --- a/requirements-dev-frozen.txt +++ b/requirements-dev-frozen.txt @@ -32,7 +32,7 @@ confluent-kafka==2.3.0 covdefaults==2.3.0 coverage==7.4.1 croniter==1.3.10 -cryptography==42.0.4 +cryptography==43.0.1 cssselect==1.0.3 cssutils==2.9.0 datadog==0.49.1 @@ -42,7 +42,7 @@ django==5.1.1 django-crispy-forms==1.14.0 django-csp==3.8 django-pg-zero-downtime-migrations==0.13 -django-stubs-ext==5.0.4 +django-stubs-ext==5.1.0 djangorestframework==3.15.2 docker==6.1.3 drf-spectacular==0.26.3 @@ -73,11 +73,11 @@ grpcio==1.60.1 grpcio-status==1.60.1 h11==0.13.0 hiredis==2.3.2 -honcho==1.1.0 +honcho==2.0.0 httpcore==1.0.2 httpx==0.25.2 -identify==2.5.24 -idna==2.10 +identify==2.6.1 +idna==3.7 inflection==0.5.1 iniconfig==1.1.1 iso3166==2.1.1 @@ -103,7 +103,7 @@ msgpack==1.0.7 msgpack-types==0.2.0 mypy==1.11.2 mypy-extensions==1.0.0 -nodeenv==1.8.0 +nodeenv==1.9.1 oauthlib==3.1.0 openai==1.3.5 openapi-core==0.18.2 @@ -125,7 +125,7 @@ pillow==10.2.0 pip-tools==7.1.0 platformdirs==4.2.0 pluggy==1.5.0 -pre-commit==3.3.2 +pre-commit==4.0.0 progressbar2==3.41.0 prompt-toolkit==3.0.41 proto-plus==1.24.0 @@ -149,7 +149,7 @@ pytest-django==4.9.0 pytest-fail-slow==0.3.0 pytest-json-report==1.5.0 pytest-metadata==3.1.1 -pytest-rerunfailures==11.0 +pytest-rerunfailures==14.0 pytest-sentry==0.3.0 pytest-xdist==3.0.2 python-dateutil==2.9.0 @@ -179,28 +179,28 @@ s3transfer==0.10.0 selenium==4.16.0 sentry-arroyo==2.16.5 sentry-cli==2.16.0 -sentry-devenv==1.10.2 -sentry-forked-django-stubs==5.0.4.post2 -sentry-forked-djangorestframework-stubs==3.15.1.post1 +sentry-devenv==1.13.0 +sentry-forked-django-stubs==5.1.0.post2 +sentry-forked-djangorestframework-stubs==3.15.1.post2 sentry-kafka-schemas==0.1.111 sentry-ophio==1.0.0 -sentry-protos==0.1.21 +sentry-protos==0.1.26 sentry-redis-tools==0.1.7 -sentry-relay==0.9.1 -sentry-sdk==2.12.0 +sentry-relay==0.9.2 +sentry-sdk==2.17.0 sentry-usage-accountant==0.0.10 simplejson==3.17.6 six==1.16.0 slack-sdk==3.27.2 sniffio==1.2.0 -snuba-sdk==3.0.39 +snuba-sdk==3.0.43 sortedcontainers==2.4.0 soupsieve==2.3.2.post1 sqlparse==0.5.0 statsd==3.3.0 stripe==3.1.0 structlog==22.1.0 -symbolic==12.10.0 +symbolic==12.12.0 tiktoken==0.6.0 time-machine==2.13.0 tldextract==5.1.2 diff --git a/requirements-dev.txt b/requirements-dev.txt index 61a113efa780e..cf3b0fbee4de1 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,11 +1,11 @@ --index-url https://pypi.devinfra.sentry.io/simple -sentry-devenv>=1.10.2 +sentry-devenv>=1.13.0 covdefaults>=2.3.0 docker>=6 time-machine>=2.13.0 -honcho>=1.1.0 +honcho>=2 openapi-core>=0.18.2 openapi-pydantic>=0.4.0 pytest>=8.1 @@ -13,7 +13,7 @@ pytest-cov>=4.0.0 pytest-django>=4.9.0 pytest-fail-slow>=0.3.0 pytest-json-report>=1.5.0 -pytest-rerunfailures>=11 +pytest-rerunfailures>=14 pytest-sentry>=0.3.0 pytest-xdist>=3 responses>=0.23.1 @@ -21,7 +21,7 @@ selenium>=4.16.0 sentry-cli>=2.16.0 # pre-commit dependencies -pre-commit>=3.3 +pre-commit>=4 black>=22.10.0 flake8>=7 flake8-bugbear>=22.10 @@ -35,8 +35,8 @@ pip-tools>=7.1.0 packaging>=21.3 # for type checking -sentry-forked-django-stubs>=5.0.4.post2 -sentry-forked-djangorestframework-stubs>=3.15.1.post1 +sentry-forked-django-stubs>=5.1.0.post2 +sentry-forked-djangorestframework-stubs>=3.15.1.post2 lxml-stubs msgpack-types>=0.2.0 mypy>=1.11.2 diff --git a/requirements-frozen.txt b/requirements-frozen.txt index b58a5fe92b673..7a34edf710bb2 100644 --- a/requirements-frozen.txt +++ b/requirements-frozen.txt @@ -27,7 +27,7 @@ click-plugins==1.1.1 click-repl==0.3.0 confluent-kafka==2.3.0 croniter==1.3.10 -cryptography==42.0.4 +cryptography==43.0.1 cssselect==1.0.3 cssutils==2.9.0 datadog==0.49.1 @@ -63,7 +63,7 @@ h11==0.14.0 hiredis==2.3.2 httpcore==1.0.2 httpx==0.25.2 -idna==2.10 +idna==3.7 inflection==0.5.1 iso3166==2.1.1 isodate==0.6.1 @@ -125,22 +125,22 @@ s3transfer==0.10.0 sentry-arroyo==2.16.5 sentry-kafka-schemas==0.1.111 sentry-ophio==1.0.0 -sentry-protos==0.1.21 +sentry-protos==0.1.26 sentry-redis-tools==0.1.7 -sentry-relay==0.9.1 -sentry-sdk==2.12.0 +sentry-relay==0.9.2 +sentry-sdk==2.17.0 sentry-usage-accountant==0.0.10 simplejson==3.17.6 six==1.16.0 slack-sdk==3.27.2 sniffio==1.3.0 -snuba-sdk==3.0.39 +snuba-sdk==3.0.43 soupsieve==2.3.2.post1 sqlparse==0.5.0 statsd==3.3.0 stripe==3.1.0 structlog==22.1.0 -symbolic==12.10.0 +symbolic==12.12.0 tiktoken==0.6.0 tldextract==5.1.2 toronado==0.1.0 diff --git a/scripts/test.js b/scripts/test.js index d73f8696933f8..156608c4ceeb1 100644 --- a/scripts/test.js +++ b/scripts/test.js @@ -6,9 +6,6 @@ process.env.NODE_ENV = 'test'; process.env.PUBLIC_URL = ''; process.env.TZ = 'America/New_York'; -// Marker to indicate that we've correctly ran with `yarn test`. -process.env.USING_YARN_TEST = true; - // Makes the script crash on unhandled rejections instead of silently // ignoring them. In the future, promise rejections that are not handled will // terminate the Node.js process with a non-zero exit code. diff --git a/setup.cfg b/setup.cfg index 16372c6641425..fa329dc0dff63 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,6 +1,6 @@ [metadata] name = sentry -version = 24.10.0.dev0 +version = 24.11.0.dev0 description = A realtime logging and aggregation server. long_description = file: README.md long_description_content_type = text/markdown diff --git a/src/flagpole/conditions.py b/src/flagpole/conditions.py index ff3e8c7404cd3..ac530ffbeb6de 100644 --- a/src/flagpole/conditions.py +++ b/src/flagpole/conditions.py @@ -20,7 +20,7 @@ class ConditionOperatorKind(str, Enum): """Provided a single value, check if the property (a list) is not included""" EQUALS = "equals" - """Comprare a value to another. Values are compared with types""" + """Compare a value to another. Values are compared with types""" NOT_EQUALS = "not_equals" """Compare a value to not be equal to another. Values are compared with types""" diff --git a/src/sentry/analytics/events/alert_created.py b/src/sentry/analytics/events/alert_created.py index d9f743dcdac9d..c00ac0b2f4038 100644 --- a/src/sentry/analytics/events/alert_created.py +++ b/src/sentry/analytics/events/alert_created.py @@ -15,6 +15,7 @@ class AlertCreatedEvent(analytics.Event): analytics.Attribute("alert_rule_ui_component", required=False), analytics.Attribute("duplicate_rule", required=False), analytics.Attribute("wizard_v3", required=False), + analytics.Attribute("query_type", required=False), ) diff --git a/src/sentry/analytics/events/onboarding_complete.py b/src/sentry/analytics/events/onboarding_complete.py new file mode 100644 index 0000000000000..59b2924442aa8 --- /dev/null +++ b/src/sentry/analytics/events/onboarding_complete.py @@ -0,0 +1,14 @@ +from sentry import analytics + + +class OnboardingCompleteEvent(analytics.Event): + type = "onboarding.complete" + + attributes = ( + analytics.Attribute("user_id"), + analytics.Attribute("organization_id"), + analytics.Attribute("referrer"), + ) + + +analytics.register(OnboardingCompleteEvent) diff --git a/src/sentry/api/analytics.py b/src/sentry/api/analytics.py index 339c9bc5bc458..e875453f138fe 100644 --- a/src/sentry/api/analytics.py +++ b/src/sentry/api/analytics.py @@ -33,6 +33,25 @@ class GroupSimilarIssuesEmbeddingsCountEvent(analytics.Event): ) +class DevToolbarApiRequestEvent(analytics.Event): + type = "devtoolbar.api_request" + + attributes = ( + analytics.Attribute("view_name"), + analytics.Attribute("route"), + analytics.Attribute("query_string", required=False), + analytics.Attribute("origin", required=False), + analytics.Attribute("method"), + analytics.Attribute("status_code", type=int), + analytics.Attribute("organization_id", type=int, required=False), + analytics.Attribute("organization_slug", required=False), + analytics.Attribute("project_id", type=int, required=False), + analytics.Attribute("project_slug", required=False), + analytics.Attribute("user_id", type=int, required=False), + ) + + analytics.register(OrganizationSavedSearchCreatedEvent) analytics.register(OrganizationSavedSearchDeletedEvent) analytics.register(GroupSimilarIssuesEmbeddingsCountEvent) +analytics.register(DevToolbarApiRequestEvent) diff --git a/src/sentry/api/api_owners.py b/src/sentry/api/api_owners.py index da89e1e004e22..60e5d1884f26a 100644 --- a/src/sentry/api/api_owners.py +++ b/src/sentry/api/api_owners.py @@ -13,6 +13,7 @@ class ApiOwner(Enum): ECOSYSTEM = "ecosystem" ENTERPRISE = "enterprise" FEEDBACK = "feedback-backend" + FLAG = "replay-backend" HYBRID_CLOUD = "hybrid-cloud" INTEGRATIONS = "product-owners-settings-integrations" ISSUES = "issues" @@ -27,4 +28,3 @@ class ApiOwner(Enum): TELEMETRY_EXPERIENCE = "telemetry-experience" UNOWNED = "unowned" WEB_FRONTEND_SDKS = "team-web-sdk-frontend" - REMOTE_CONFIG = "replay-backend" diff --git a/src/sentry/api/base.py b/src/sentry/api/base.py index 962b733c078b7..52b49e991ea33 100644 --- a/src/sentry/api/base.py +++ b/src/sentry/api/base.py @@ -394,7 +394,7 @@ def dispatch(self, request: Request, *args, **kwargs) -> Response: Identical to rest framework's dispatch except we add the ability to convert arguments (for common URL params). """ - with sentry_sdk.start_span(op="base.dispatch.setup", description=type(self).__name__): + with sentry_sdk.start_span(op="base.dispatch.setup", name=type(self).__name__): self.args = args self.kwargs = kwargs request = self.initialize_request(request, *args, **kwargs) @@ -415,7 +415,7 @@ def dispatch(self, request: Request, *args, **kwargs) -> Response: origin = None try: - with sentry_sdk.start_span(op="base.dispatch.request", description=type(self).__name__): + with sentry_sdk.start_span(op="base.dispatch.request", name=type(self).__name__): if origin: if request.auth: allowed_origins = request.auth.get_allowed_origins() @@ -449,7 +449,7 @@ def dispatch(self, request: Request, *args, **kwargs) -> Response: with sentry_sdk.start_span( op="base.dispatch.execute", - description=".".join( + name=".".join( getattr(part, "__name__", None) or str(part) for part in (type(self), handler) ), ) as span: @@ -469,7 +469,7 @@ def dispatch(self, request: Request, *args, **kwargs) -> Response: if duration < (settings.SENTRY_API_RESPONSE_DELAY / 1000.0): with sentry_sdk.start_span( op="base.dispatch.sleep", - description=type(self).__name__, + name=type(self).__name__, ) as span: span.set_data("SENTRY_API_RESPONSE_DELAY", settings.SENTRY_API_RESPONSE_DELAY) time.sleep(settings.SENTRY_API_RESPONSE_DELAY / 1000.0 - duration) @@ -556,7 +556,7 @@ def paginate( cursor = self.get_cursor_from_request(request, cursor_cls) with sentry_sdk.start_span( op="base.paginate.get_result", - description=type(self).__name__, + name=type(self).__name__, ) as span: annotate_span_with_pagination_args(span, per_page) paginator = get_paginator(paginator, paginator_cls, paginator_kwargs) @@ -576,7 +576,7 @@ def paginate( if on_results: with sentry_sdk.start_span( op="base.paginate.on_results", - description=type(self).__name__, + name=type(self).__name__, ): results = on_results(cursor_result.results) else: diff --git a/src/sentry/api/bases/__init__.py b/src/sentry/api/bases/__init__.py index eeb0925009617..bf18019cc99a8 100644 --- a/src/sentry/api/bases/__init__.py +++ b/src/sentry/api/bases/__init__.py @@ -4,5 +4,4 @@ from .organizationmember import * # NOQA from .project import * # NOQA from .project_transaction_threshold_override import * # NOQA -from .sentryapps import * # NOQA from .team import * # NOQA diff --git a/src/sentry/api/bases/organization_events.py b/src/sentry/api/bases/organization_events.py index 4ddd5bd2469e5..3ef920d04efbf 100644 --- a/src/sentry/api/bases/organization_events.py +++ b/src/sentry/api/bases/organization_events.py @@ -117,7 +117,7 @@ def get_snuba_params( quantize_date_params: bool = True, ) -> SnubaParams: """Returns params to make snuba queries with""" - with sentry_sdk.start_span(op="discover.endpoint", description="filter_params(dataclass)"): + with sentry_sdk.start_span(op="discover.endpoint", name="filter_params(dataclass)"): if ( len(self.get_field_list(organization, request)) + len(self.get_equation_list(organization, request)) @@ -317,7 +317,7 @@ def handle_results_with_meta( standard_meta: bool | None = False, dataset: Any | None = None, ) -> dict[str, Any]: - with sentry_sdk.start_span(op="discover.endpoint", description="base.handle_results"): + with sentry_sdk.start_span(op="discover.endpoint", name="base.handle_results"): data = self.handle_data(request, organization, project_ids, results.get("data")) meta = results.get("meta", {}) fields_meta = meta.get("fields", {}) @@ -424,9 +424,7 @@ def get_event_stats_data( dataset: Any | None = None, ) -> dict[str, Any]: with handle_query_errors(): - with sentry_sdk.start_span( - op="discover.endpoint", description="base.stats_query_creation" - ): + with sentry_sdk.start_span(op="discover.endpoint", name="base.stats_query_creation"): _columns = [query_column] # temporary change to make topN query work for multi-axes requests if additional_query_column is not None: @@ -466,14 +464,14 @@ def get_event_stats_data( raise ValidationError("Comparison period is outside your retention window") query_columns = get_query_columns(columns, rollup) - with sentry_sdk.start_span(op="discover.endpoint", description="base.stats_query"): + with sentry_sdk.start_span(op="discover.endpoint", name="base.stats_query"): result = get_event_stats( query_columns, query, snuba_params, rollup, zerofill_results, comparison_delta ) serializer = SnubaTSResultSerializer(organization, None, request.user) - with sentry_sdk.start_span(op="discover.endpoint", description="base.stats_serialization"): + with sentry_sdk.start_span(op="discover.endpoint", name="base.stats_serialization"): # When the request is for top_events, result can be a SnubaTSResult in the event that # there were no top events found. In this case, result contains a zerofilled series # that acts as a placeholder. diff --git a/src/sentry/api/bases/sentryapps.py b/src/sentry/api/bases/sentryapps.py index 242c81858f2f2..b1e5512dce5ed 100644 --- a/src/sentry/api/bases/sentryapps.py +++ b/src/sentry/api/bases/sentryapps.py @@ -1,498 +1,13 @@ -from __future__ import annotations - -import logging -from functools import wraps -from typing import Any - -from django.http import Http404 -from rest_framework.exceptions import PermissionDenied -from rest_framework.permissions import BasePermission -from rest_framework.request import Request -from rest_framework.response import Response -from rest_framework.serializers import ValidationError - -from sentry.api.authentication import ClientIdSecretAuthentication -from sentry.api.base import Endpoint -from sentry.api.permissions import SentryPermission, StaffPermissionMixin -from sentry.auth.staff import is_active_staff -from sentry.auth.superuser import is_active_superuser, superuser_has_permission -from sentry.coreapi import APIError -from sentry.integrations.api.bases.integration import PARANOID_GET -from sentry.middleware.stats import add_request_metric_tags -from sentry.models.organization import OrganizationStatus -from sentry.organizations.services.organization import ( - RpcUserOrganizationContext, - organization_service, +from sentry.sentry_apps.api.bases.sentryapps import ( + RegionSentryAppBaseEndpoint, + SentryAppBaseEndpoint, + SentryAppInstallationBaseEndpoint, + SentryAppInstallationsBaseEndpoint, ) -from sentry.sentry_apps.models.sentry_app import SentryApp -from sentry.sentry_apps.services.app import RpcSentryApp, app_service -from sentry.users.services.user import RpcUser -from sentry.users.services.user.service import user_service -from sentry.utils.sdk import Scope -from sentry.utils.strings import to_single_line_str - -COMPONENT_TYPES = ["stacktrace-link", "issue-link"] - -logger = logging.getLogger(__name__) - - -def catch_raised_errors(func): - @wraps(func) - def wrapped(self, *args, **kwargs): - try: - return func(self, *args, **kwargs) - except APIError as e: - return Response({"detail": e.msg}, status=400) - - return wrapped - - -def ensure_scoped_permission(request, allowed_scopes): - """ - Verifies the User making the request has at least one required scope for - the endpoint being requested. - - If no scopes were specified in a ``scope_map``, it means the endpoint should - not be accessible. That is, this function expects every accessible endpoint - to have a list of scopes. - - That list of scopes may be empty, implying that the User does not need any - specific scope and the endpoint is public. - """ - # If no scopes were found at all, the endpoint should not be accessible. - if allowed_scopes is None: - return False - - # If there are no scopes listed, it implies a public endpoint. - if len(allowed_scopes) == 0: - return True - - return any(request.access.has_scope(s) for s in set(allowed_scopes)) - - -def add_integration_platform_metric_tag(func): - @wraps(func) - def wrapped(self, *args, **kwargs): - add_request_metric_tags(self.request, integration_platform=True) - return func(self, *args, **kwargs) - - return wrapped - - -class SentryAppsPermission(SentryPermission): - scope_map = { - "GET": PARANOID_GET, - "POST": ("org:write", "org:admin"), - } - - def has_object_permission(self, request: Request, view, context: RpcUserOrganizationContext): - if not hasattr(request, "user") or not request.user: - return False - - self.determine_access(request, context) - - if superuser_has_permission(request): - return True - - # User must be a part of the Org they're trying to create the app in. - if context.organization.status != OrganizationStatus.ACTIVE or not context.member: - raise Http404 - - return ensure_scoped_permission(request, self.scope_map.get(request.method)) - - -class SentryAppsAndStaffPermission(StaffPermissionMixin, SentryAppsPermission): - """Allows staff to access the GET method of sentry apps endpoints.""" - - staff_allowed_methods = {"GET"} - - -class IntegrationPlatformEndpoint(Endpoint): - def dispatch(self, request, *args, **kwargs): - add_request_metric_tags(request, integration_platform=True) - return super().dispatch(request, *args, **kwargs) - - -class SentryAppsBaseEndpoint(IntegrationPlatformEndpoint): - permission_classes: tuple[type[BasePermission], ...] = (SentryAppsAndStaffPermission,) - - def _get_organization_slug(self, request: Request): - organization_slug = request.json_body.get("organization") - if not organization_slug or not isinstance(organization_slug, str): - error_message = "Please provide a valid value for the 'organization' field." - raise ValidationError({"organization": to_single_line_str(error_message)}) - return organization_slug - - def _get_organization_for_superuser_or_staff( - self, user: RpcUser, organization_slug: str - ) -> RpcUserOrganizationContext: - context = organization_service.get_organization_by_slug( - slug=organization_slug, only_visible=False, user_id=user.id - ) - - if context is None: - error_message = f"Organization '{organization_slug}' does not exist." - raise ValidationError({"organization": to_single_line_str(error_message)}) - - return context - - def _get_organization_for_user( - self, user: RpcUser, organization_slug: str - ) -> RpcUserOrganizationContext: - context = organization_service.get_organization_by_slug( - slug=organization_slug, only_visible=True, user_id=user.id - ) - if context is None or context.member is None: - error_message = f"User does not belong to the '{organization_slug}' organization." - raise PermissionDenied(to_single_line_str(error_message)) - return context - - def _get_org_context(self, request: Request) -> RpcUserOrganizationContext: - organization_slug = self._get_organization_slug(request) - if is_active_superuser(request) or is_active_staff(request): - return self._get_organization_for_superuser_or_staff(request.user, organization_slug) - else: - return self._get_organization_for_user(request.user, organization_slug) - - def convert_args(self, request: Request, *args, **kwargs): - """ - This baseclass is the SentryApp collection endpoints: - - [GET, POST] /sentry-apps - - The GET endpoint is public and doesn't require (or handle) any query - params or request body. - - The POST endpoint is for creating a Sentry App. Part of that creation - is associating it with the Organization that it's created within. - - So in the case of POST requests, we want to pull the Organization out - of the request body so that we can ensure the User making the request - has access to it. - - Since ``convert_args`` is conventionally where you materialize model - objects from URI params, we're applying the same logic for a param in - the request body. - """ - if not request.json_body: - return (args, kwargs) - - context = self._get_org_context(request) - self.check_object_permissions(request, context) - kwargs["organization"] = context.organization - - return (args, kwargs) - - -class SentryAppPermission(SentryPermission): - unpublished_scope_map = { - "GET": ("org:read", "org:integrations", "org:write", "org:admin"), - "PUT": ("org:write", "org:admin"), - "POST": ("org:admin",), # used for publishing an app - "DELETE": ("org:admin",), - } - - published_scope_map = { - "GET": PARANOID_GET, - "PUT": ("org:write", "org:admin"), - "POST": ("org:admin",), - "DELETE": ("org:admin",), - } - - @property - def scope_map(self): - return self.published_scope_map - - def has_object_permission(self, request: Request, view, sentry_app: RpcSentryApp | SentryApp): - if not hasattr(request, "user") or not request.user: - return False - - owner_app = organization_service.get_organization_by_id( - id=sentry_app.owner_id, user_id=request.user.id - ) - self.determine_access(request, owner_app) - - if superuser_has_permission(request): - return True - - organizations = ( - user_service.get_organizations(user_id=request.user.id) - if request.user.id is not None - else () - ) - # if app is unpublished, user must be in the Org who owns the app. - if not sentry_app.is_published: - if not any(sentry_app.owner_id == org.id for org in organizations): - raise Http404 - - # TODO(meredith): make a better way to allow for public - # endpoints. we can't use ensure_scoped_permission now - # that the public endpoint isn't denoted by '()' - if sentry_app.is_published and request.method == "GET": - return True - - return ensure_scoped_permission( - request, self._scopes_for_sentry_app(sentry_app).get(request.method) - ) - - def _scopes_for_sentry_app(self, sentry_app): - if sentry_app.is_published: - return self.published_scope_map - else: - return self.unpublished_scope_map - - -class SentryAppAndStaffPermission(StaffPermissionMixin, SentryAppPermission): - """Allows staff to access sentry app endpoints. Note that this is used for - endpoints acting on a single sentry app only.""" - - pass - - -class SentryAppBaseEndpoint(IntegrationPlatformEndpoint): - permission_classes: tuple[type[BasePermission], ...] = (SentryAppPermission,) - def convert_args( - self, request: Request, sentry_app_id_or_slug: int | str, *args: Any, **kwargs: Any - ): - try: - sentry_app = SentryApp.objects.get(slug__id_or_slug=sentry_app_id_or_slug) - except SentryApp.DoesNotExist: - raise Http404 - - self.check_object_permissions(request, sentry_app) - - Scope.get_isolation_scope().set_tag("sentry_app", sentry_app.slug) - - kwargs["sentry_app"] = sentry_app - return (args, kwargs) - - -class RegionSentryAppBaseEndpoint(IntegrationPlatformEndpoint): - def convert_args( - self, request: Request, sentry_app_id_or_slug: int | str, *args: Any, **kwargs: Any - ): - if str(sentry_app_id_or_slug).isdecimal(): - sentry_app = app_service.get_sentry_app_by_id(id=int(sentry_app_id_or_slug)) - else: - sentry_app = app_service.get_sentry_app_by_slug(slug=sentry_app_id_or_slug) - if sentry_app is None: - raise Http404 - - self.check_object_permissions(request, sentry_app) - - Scope.get_isolation_scope().set_tag("sentry_app", sentry_app.slug) - - kwargs["sentry_app"] = sentry_app - return (args, kwargs) - - -class SentryAppInstallationsPermission(SentryPermission): - scope_map = { - "GET": ("org:read", "org:integrations", "org:write", "org:admin"), - "POST": ("org:integrations", "org:write", "org:admin"), - } - - def has_object_permission(self, request: Request, view, organization): - if not hasattr(request, "user") or not request.user: - return False - - self.determine_access(request, organization) - - if superuser_has_permission(request): - return True - - organizations = ( - user_service.get_organizations(user_id=request.user.id) - if request.user.id is not None - else () - ) - if not any(organization.id == org.id for org in organizations): - raise Http404 - - return ensure_scoped_permission(request, self.scope_map.get(request.method)) - - -class SentryAppInstallationsBaseEndpoint(IntegrationPlatformEndpoint): - permission_classes = (SentryAppInstallationsPermission,) - - def convert_args(self, request: Request, organization_id_or_slug, *args, **kwargs): - extra_args = {} - # We need to pass user_id if the user is not a superuser - if not is_active_superuser(request): - extra_args["user_id"] = request.user.id - - if str(organization_id_or_slug).isdecimal(): - organization = organization_service.get_org_by_id( - id=int(organization_id_or_slug), **extra_args - ) - else: - organization = organization_service.get_org_by_slug( - slug=str(organization_id_or_slug), **extra_args - ) - - if organization is None: - raise Http404 - self.check_object_permissions(request, organization) - - kwargs["organization"] = organization - return (args, kwargs) - - -class SentryAppInstallationPermission(SentryPermission): - scope_map = { - "GET": ("org:read", "org:integrations", "org:write", "org:admin"), - "DELETE": ("org:integrations", "org:write", "org:admin"), - # NOTE(mn): The only POST endpoint right now is to create External - # Issues, which uses this baseclass since it's nested under an - # installation. - # - # The scopes below really only make sense for that endpoint. Any other - # nested endpoints will probably need different scopes - figure out how - # to deal with that when it happens. - "POST": ("org:integrations", "event:write", "event:admin"), - } - - def has_permission(self, request: Request, *args, **kwargs): - # To let the app mark the installation as installed, we don't care about permissions - if ( - hasattr(request, "user") - and hasattr(request.user, "is_sentry_app") - and request.user.is_sentry_app - and request.method == "PUT" - ): - return True - return super().has_permission(request, *args, **kwargs) - - def has_object_permission(self, request: Request, view, installation): - if not hasattr(request, "user") or not request.user: - return False - - self.determine_access(request, installation.organization_id) - - if superuser_has_permission(request): - return True - - # if user is an app, make sure it's for that same app - if request.user.is_sentry_app: - return request.user.id == installation.sentry_app.proxy_user_id - - org_context = organization_service.get_organization_by_id( - id=installation.organization_id, - user_id=request.user.id, - include_teams=False, - include_projects=False, - ) - if ( - org_context.member is None - or org_context.organization.status != OrganizationStatus.ACTIVE - ): - raise Http404 - - return ensure_scoped_permission(request, self.scope_map.get(request.method)) - - -class SentryAppInstallationBaseEndpoint(IntegrationPlatformEndpoint): - permission_classes: tuple[type[BasePermission], ...] = (SentryAppInstallationPermission,) - - def convert_args(self, request: Request, uuid, *args, **kwargs): - installations = app_service.get_many(filter=dict(uuids=[uuid])) - installation = installations[0] if installations else None - if installation is None: - raise Http404 - - self.check_object_permissions(request, installation) - - Scope.get_isolation_scope().set_tag("sentry_app_installation", installation.uuid) - - kwargs["installation"] = installation - return (args, kwargs) - - -class SentryAppInstallationExternalIssuePermission(SentryAppInstallationPermission): - scope_map = { - "POST": ("event:read", "event:write", "event:admin"), - "DELETE": ("event:admin",), - } - - -class SentryAppInstallationExternalIssueBaseEndpoint(SentryAppInstallationBaseEndpoint): - permission_classes = (SentryAppInstallationExternalIssuePermission,) - - -class SentryAppAuthorizationsPermission(SentryPermission): - def has_object_permission(self, request: Request, view, installation): - if not hasattr(request, "user") or not request.user: - return False - - installation_org_context = organization_service.get_organization_by_id( - id=installation.organization_id, user_id=request.user.id - ) - self.determine_access(request, installation_org_context) - - if not request.user.is_sentry_app: - return False - - # Request must be made as the app's Proxy User, using their Client ID - # and Secret. - return request.user.id == installation.sentry_app.proxy_user_id - - -class SentryAppAuthorizationsBaseEndpoint(SentryAppInstallationBaseEndpoint): - authentication_classes = (ClientIdSecretAuthentication,) - permission_classes = (SentryAppAuthorizationsPermission,) - - -class SentryInternalAppTokenPermission(SentryPermission): - scope_map = { - "GET": ("org:write", "org:admin"), - "POST": ("org:write", "org:admin"), - "DELETE": ("org:write", "org:admin"), - } - - def has_object_permission(self, request: Request, view, sentry_app): - if not hasattr(request, "user") or not request.user: - return False - - owner_app = organization_service.get_organization_by_id( - id=sentry_app.owner_id, user_id=request.user.id - ) - self.determine_access(request, owner_app) - - if superuser_has_permission(request): - return True - - return ensure_scoped_permission(request, self.scope_map.get(request.method)) - - -class SentryAppStatsPermission(SentryPermission): - scope_map = { - "GET": ("org:read", "org:integrations", "org:write", "org:admin"), - # Anyone logged in can increment the stats, so leave the scopes empty - # Note: this only works for session-based auth so you cannot increment stats through API - "POST": (), - } - - def has_object_permission(self, request: Request, view, sentry_app: SentryApp | RpcSentryApp): - if not hasattr(request, "user") or not request.user: - return False - - owner_app = organization_service.get_organization_by_id( - id=sentry_app.owner_id, user_id=request.user.id - ) - if owner_app is None: - logger.error( - "sentry_app_stats.permission_org_not_found", - extra={ - "sentry_app_id": sentry_app.id, - "owner_org_id": sentry_app.owner_id, - "user_id": request.user.id, - }, - ) - return False - self.determine_access(request, owner_app) - - if is_active_superuser(request): - return True - - return ensure_scoped_permission(request, self.scope_map.get(request.method)) +__all__ = ( + "SentryAppBaseEndpoint", + "RegionSentryAppBaseEndpoint", + "SentryAppInstallationBaseEndpoint", + "SentryAppInstallationsBaseEndpoint", +) diff --git a/src/sentry/api/decorators.py b/src/sentry/api/decorators.py index 921b6953ac952..ef28203d67841 100644 --- a/src/sentry/api/decorators.py +++ b/src/sentry/api/decorators.py @@ -19,11 +19,7 @@ def is_considered_sudo(request: Request) -> bool: or is_api_key_auth(request.auth) or is_api_token_auth(request.auth) or is_org_auth_token_auth(request.auth) - or ( - request.user.is_authenticated - and not isinstance(request.user, AnonymousUser) - and not request.user.has_usable_password() - ) + or (request.user.is_authenticated and not request.user.has_usable_password()) ) diff --git a/src/sentry/api/endpoints/admin_project_configs.py b/src/sentry/api/endpoints/admin_project_configs.py index a33b9a6d5a259..3b46ff73c17ac 100644 --- a/src/sentry/api/endpoints/admin_project_configs.py +++ b/src/sentry/api/endpoints/admin_project_configs.py @@ -46,7 +46,7 @@ def get(self, request: Request) -> Response: else: configs[key] = None - # TODO if we don't think we'll add anything to the endpoint + # TODO: if we don't think we'll add anything to the endpoint # we may as well return just the configs return Response({"configs": configs}, status=200) diff --git a/src/sentry/api/endpoints/api_application_details.py b/src/sentry/api/endpoints/api_application_details.py index 6eae213e37d30..73f6dadeeba95 100644 --- a/src/sentry/api/endpoints/api_application_details.py +++ b/src/sentry/api/endpoints/api_application_details.py @@ -10,8 +10,8 @@ from sentry.api.base import Endpoint, control_silo_endpoint from sentry.api.exceptions import ResourceDoesNotExist from sentry.api.serializers import serialize +from sentry.deletions.models.scheduleddeletion import ScheduledDeletion from sentry.models.apiapplication import ApiApplication, ApiApplicationStatus -from sentry.models.scheduledeletion import ScheduledDeletion class ApiApplicationSerializer(serializers.Serializer): diff --git a/src/sentry/api/endpoints/debug_files.py b/src/sentry/api/endpoints/debug_files.py index e27fd4f2efe3f..f623e71a8e865 100644 --- a/src/sentry/api/endpoints/debug_files.py +++ b/src/sentry/api/endpoints/debug_files.py @@ -15,7 +15,7 @@ from symbolic.debuginfo import normalize_debug_id from symbolic.exceptions import SymbolicError -from sentry import ratelimits, roles +from sentry import ratelimits from sentry.api.api_owners import ApiOwner from sentry.api.api_publish_status import ApiPublishStatus from sentry.api.base import region_silo_endpoint @@ -39,6 +39,7 @@ from sentry.models.project import Project from sentry.models.release import Release, get_artifact_counts from sentry.models.releasefile import ReleaseFile +from sentry.roles import organization_roles from sentry.tasks.assemble import ( AssembleTask, ChunkFileState, @@ -53,7 +54,7 @@ _release_suffix = re.compile(r"^(.*)\s+\(([^)]+)\)\s*$") -def upload_from_request(request, project): +def upload_from_request(request: Request, project: Project): if "file" not in request.data: return Response({"detail": "Missing uploaded file"}, status=400) fileobj = request.data["file"] @@ -61,7 +62,7 @@ def upload_from_request(request, project): return Response(serialize(files, request.user), status=201) -def has_download_permission(request, project): +def has_download_permission(request: Request, project: Project): if is_system_auth(request.auth) or is_active_superuser(request): return True @@ -72,7 +73,7 @@ def has_download_permission(request, project): required_role = organization.get_option("sentry:debug_files_role") or DEBUG_FILES_ROLE_DEFAULT if request.user.is_sentry_app: - if roles.get(required_role).priority > roles.get("member").priority: + if organization_roles.can_manage("member", required_role): return request.access.has_scope("project:write") else: return request.access.has_scope("project:read") @@ -86,7 +87,12 @@ def has_download_permission(request, project): except OrganizationMember.DoesNotExist: return False - return roles.get(current_role).priority >= roles.get(required_role).priority + if organization_roles.can_manage(current_role, required_role): + return True + + # There's an edge case where a team admin is an org member but the required + # role is org admin. In that case, the team admin should be able to download. + return required_role == "admin" and request.access.has_project_scope(project, "project:write") def _has_delete_permission(access: Access, project: Project) -> bool: @@ -104,7 +110,7 @@ class ProguardArtifactReleasesEndpoint(ProjectEndpoint): } permission_classes = (ProjectReleasePermission,) - def post(self, request: Request, project) -> Response: + def post(self, request: Request, project: Project) -> Response: release_name = request.data.get("release_name") proguard_uuid = request.data.get("proguard_uuid") @@ -153,7 +159,7 @@ def post(self, request: Request, project) -> Response: status=status.HTTP_409_CONFLICT, ) - def get(self, request: Request, project) -> Response: + def get(self, request: Request, project: Project) -> Response: """ List a Project's Proguard Associated Releases ```````````````````````````````````````` @@ -189,7 +195,7 @@ class DebugFilesEndpoint(ProjectEndpoint): } permission_classes = (ProjectReleasePermission,) - def download(self, debug_file_id, project): + def download(self, debug_file_id, project: Project): rate_limited = ratelimits.backend.is_limited( project=project, key=f"rl:DSymFilesEndpoint:download:{debug_file_id}:{project.id}", @@ -223,7 +229,7 @@ def download(self, debug_file_id, project): except OSError: raise Http404 - def get(self, request: Request, project) -> Response: + def get(self, request: Request, project: Project) -> Response: """ List a Project's Debug Information Files ```````````````````````````````````````` @@ -240,7 +246,7 @@ def get(self, request: Request, project) -> Response: :auth: required """ download_requested = request.GET.get("id") is not None - if download_requested and (has_download_permission(request, project)): + if download_requested and has_download_permission(request, project): return self.download(request.GET.get("id"), project) elif download_requested: return Response(status=403) @@ -335,7 +341,7 @@ def delete(self, request: Request, project: Project) -> Response: return Response(status=404) - def post(self, request: Request, project) -> Response: + def post(self, request: Request, project: Project) -> Response: """ Upload a New File ````````````````` @@ -367,7 +373,7 @@ class UnknownDebugFilesEndpoint(ProjectEndpoint): } permission_classes = (ProjectReleasePermission,) - def get(self, request: Request, project) -> Response: + def get(self, request: Request, project: Project) -> Response: checksums = request.GET.getlist("checksums") missing = ProjectDebugFile.objects.find_missing(checksums, project=project) return Response({"missing": missing}) @@ -382,7 +388,7 @@ class AssociateDSymFilesEndpoint(ProjectEndpoint): permission_classes = (ProjectReleasePermission,) # Legacy endpoint, kept for backwards compatibility - def post(self, request: Request, project) -> Response: + def post(self, request: Request, project: Project) -> Response: return Response({"associatedDsymFiles": []}) @@ -394,7 +400,7 @@ class DifAssembleEndpoint(ProjectEndpoint): } permission_classes = (ProjectReleasePermission,) - def post(self, request: Request, project) -> Response: + def post(self, request: Request, project: Project) -> Response: """ Assemble one or multiple chunks (FileBlob) into debug files ```````````````````````````````````````````````````````````` @@ -517,7 +523,7 @@ class SourceMapsEndpoint(ProjectEndpoint): } permission_classes = (ProjectReleasePermission,) - def get(self, request: Request, project) -> Response: + def get(self, request: Request, project: Project) -> Response: """ List a Project's Source Map Archives ```````````````````````````````````` @@ -549,7 +555,7 @@ def get(self, request: Request, project) -> Response: queryset = queryset.filter(query_q) - def expose_release(release, count): + def expose_release(release, count: int): return { "type": "release", "id": release["id"], @@ -581,7 +587,7 @@ def serialize_results(results): on_results=serialize_results, ) - def delete(self, request: Request, project) -> Response: + def delete(self, request: Request, project: Project) -> Response: """ Delete an Archive ``````````````````````````````````````````````````` diff --git a/src/sentry/api/endpoints/event_ai_suggested_fix.py b/src/sentry/api/endpoints/event_ai_suggested_fix.py index d5d32e1b3c9f8..b856ecf96bca4 100644 --- a/src/sentry/api/endpoints/event_ai_suggested_fix.py +++ b/src/sentry/api/endpoints/event_ai_suggested_fix.py @@ -297,8 +297,6 @@ class EventAiSuggestedFixEndpoint(ProjectEndpoint): publish_status = { "GET": ApiPublishStatus.PRIVATE, } - # go away - private = True enforce_rate_limit = True rate_limits = { "GET": { diff --git a/src/sentry/api/endpoints/group_ai_autofix.py b/src/sentry/api/endpoints/group_ai_autofix.py index 7f2835bbeca01..74a1a07e81677 100644 --- a/src/sentry/api/endpoints/group_ai_autofix.py +++ b/src/sentry/api/endpoints/group_ai_autofix.py @@ -39,8 +39,6 @@ class GroupAutofixEndpoint(GroupEndpoint): "GET": ApiPublishStatus.EXPERIMENTAL, } owner = ApiOwner.ML_AI - # go away - private = True enforce_rate_limit = True rate_limits = { "POST": { @@ -87,6 +85,7 @@ def _call_autofix( serialized_event: dict[str, Any], instruction: str, timeout_secs: int, + pr_to_comment_on_url: str | None = None, ): path = "/v1/automation/autofix/start" body = orjson.dumps( @@ -116,7 +115,8 @@ def _call_autofix( "organizations:autofix-disable-codebase-indexing", group.organization, actor=user, - ) + ), + "comment_on_pr_with_url": pr_to_comment_on_url, }, }, option=orjson.OPT_NON_STR_KEYS, @@ -191,6 +191,7 @@ def post(self, request: Request, group: Group) -> Response: serialized_event, data.get("instruction", data.get("additional_context", "")), TIMEOUT_SECONDS, + data.get("pr_to_comment_on_url", None), # support optional PR id for copilot ) except Exception as e: logger.exception( diff --git a/src/sentry/api/endpoints/group_ai_summary.py b/src/sentry/api/endpoints/group_ai_summary.py index 350d6054feaea..43660b92ccb02 100644 --- a/src/sentry/api/endpoints/group_ai_summary.py +++ b/src/sentry/api/endpoints/group_ai_summary.py @@ -44,7 +44,6 @@ class GroupAiSummaryEndpoint(GroupEndpoint): "POST": ApiPublishStatus.EXPERIMENTAL, } owner = ApiOwner.ML_AI - private = True enforce_rate_limit = True rate_limits = { "POST": { diff --git a/src/sentry/api/endpoints/group_autofix_setup_check.py b/src/sentry/api/endpoints/group_autofix_setup_check.py index a7d23cadba360..6f0d2e05a5347 100644 --- a/src/sentry/api/endpoints/group_autofix_setup_check.py +++ b/src/sentry/api/endpoints/group_autofix_setup_check.py @@ -17,6 +17,7 @@ get_project_codebase_indexing_status, ) from sentry.autofix.utils import get_autofix_repos_from_project_code_mappings +from sentry.constants import ObjectStatus from sentry.integrations.services.integration import integration_service from sentry.integrations.utils.code_mapping import get_sorted_code_mapping_configs from sentry.models.group import Group @@ -44,7 +45,7 @@ def get_autofix_integration_setup_problems( organization_integration = organization_integrations[0] if organization_integrations else None integration = organization_integration and integration_service.get_integration( - organization_integration_id=organization_integration.id + organization_integration_id=organization_integration.id, status=ObjectStatus.ACTIVE ) installation = integration and integration.get_installation(organization_id=organization.id) @@ -102,7 +103,6 @@ class GroupAutofixSetupCheck(GroupEndpoint): "GET": ApiPublishStatus.EXPERIMENTAL, } owner = ApiOwner.ML_AI - private = True def get(self, request: Request, group: Group) -> Response: """ diff --git a/src/sentry/api/endpoints/group_autofix_update.py b/src/sentry/api/endpoints/group_autofix_update.py index 89545a207e6a8..60906134faad6 100644 --- a/src/sentry/api/endpoints/group_autofix_update.py +++ b/src/sentry/api/endpoints/group_autofix_update.py @@ -26,7 +26,6 @@ class GroupAutofixUpdateEndpoint(GroupEndpoint): "POST": ApiPublishStatus.EXPERIMENTAL, } owner = ApiOwner.ML_AI - private = True def post(self, request: Request, group: Group) -> Response: """ diff --git a/src/sentry/api/endpoints/group_external_issue_details.py b/src/sentry/api/endpoints/group_external_issue_details.py index 13a730a620fcd..1e04086f98319 100644 --- a/src/sentry/api/endpoints/group_external_issue_details.py +++ b/src/sentry/api/endpoints/group_external_issue_details.py @@ -5,7 +5,7 @@ from sentry.api.api_publish_status import ApiPublishStatus from sentry.api.base import region_silo_endpoint from sentry.api.bases.group import GroupEndpoint -from sentry.models.platformexternalissue import PlatformExternalIssue +from sentry.sentry_apps.models.platformexternalissue import PlatformExternalIssue @region_silo_endpoint diff --git a/src/sentry/api/endpoints/group_external_issues.py b/src/sentry/api/endpoints/group_external_issues.py index b1c812c432d81..71ecceb05b00b 100644 --- a/src/sentry/api/endpoints/group_external_issues.py +++ b/src/sentry/api/endpoints/group_external_issues.py @@ -5,7 +5,7 @@ from sentry.api.base import region_silo_endpoint from sentry.api.bases.group import GroupEndpoint from sentry.api.serializers import serialize -from sentry.models.platformexternalissue import PlatformExternalIssue +from sentry.sentry_apps.models.platformexternalissue import PlatformExternalIssue @region_silo_endpoint diff --git a/src/sentry/api/endpoints/integrations/sentry_apps/__init__.py b/src/sentry/api/endpoints/integrations/sentry_apps/__init__.py index a3654763c8be0..7f0a6d8ce27d9 100644 --- a/src/sentry/api/endpoints/integrations/sentry_apps/__init__.py +++ b/src/sentry/api/endpoints/integrations/sentry_apps/__init__.py @@ -1,11 +1,5 @@ -from .internal_app_token.details import SentryInternalAppTokenDetailsEndpoint -from .internal_app_token.index import SentryInternalAppTokensEndpoint -from .stats.details import SentryAppStatsEndpoint -from .stats.index import SentryAppsStatsEndpoint - -__all__ = ( - "SentryAppsStatsEndpoint", - "SentryAppStatsEndpoint", - "SentryInternalAppTokenDetailsEndpoint", - "SentryInternalAppTokensEndpoint", +from sentry.sentry_apps.api.endpoints.sentry_internal_app_token_details import ( + SentryInternalAppTokenDetailsEndpoint, ) + +__all__ = ("SentryInternalAppTokenDetailsEndpoint",) diff --git a/src/sentry/api/endpoints/internal/feature_flags.py b/src/sentry/api/endpoints/internal/feature_flags.py index f1917b398f802..04775415f41a3 100644 --- a/src/sentry/api/endpoints/internal/feature_flags.py +++ b/src/sentry/api/endpoints/internal/feature_flags.py @@ -46,7 +46,7 @@ def put(self, request: Request) -> Response: for valid_flag in valid_feature_flags: match_found = False new_string = ( - f'\nSENTRY_FEATURES["{valid_flag}"]={request.data.get(valid_flag,False)}\n' + f'\nSENTRY_FEATURES["{valid_flag}"]={request.data.get(valid_flag, False)}\n' ) # Search for the string match and update lines for i, line in enumerate(lines): diff --git a/src/sentry/api/endpoints/notification_defaults.py b/src/sentry/api/endpoints/notification_defaults.py index 63a3baf3019ac..2a9684f9143b1 100644 --- a/src/sentry/api/endpoints/notification_defaults.py +++ b/src/sentry/api/endpoints/notification_defaults.py @@ -17,7 +17,6 @@ class NotificationDefaultsEndpoints(Endpoint): } owner = ApiOwner.ALERTS_NOTIFICATIONS permission_classes = () - private = True def get(self, request: Request) -> Response: """ diff --git a/src/sentry/api/endpoints/organization_access_request_details.py b/src/sentry/api/endpoints/organization_access_request_details.py index 4dfd8c1dda50b..06e89a732f592 100644 --- a/src/sentry/api/endpoints/organization_access_request_details.py +++ b/src/sentry/api/endpoints/organization_access_request_details.py @@ -1,3 +1,5 @@ +import logging + from django.db import IntegrityError, router, transaction from rest_framework import serializers from rest_framework.request import Request @@ -11,8 +13,11 @@ from sentry.api.exceptions import ResourceDoesNotExist from sentry.api.serializers import serialize from sentry.models.organizationaccessrequest import OrganizationAccessRequest +from sentry.models.organizationmember import OrganizationMember from sentry.models.organizationmemberteam import OrganizationMemberTeam +logger = logging.getLogger(__name__) + class AccessRequestPermission(OrganizationPermission): scope_map = { @@ -71,8 +76,8 @@ def _can_access(self, request: Request, access_request): def get(self, request: Request, organization) -> Response: """ - Get list of requests to join org/team - + Get a list of requests to join org/team. + If any requests are redundant (user already joined the team), they are not returned. """ if request.access.has_scope("org:write"): access_requests = list( @@ -80,7 +85,7 @@ def get(self, request: Request, organization) -> Response: team__organization=organization, member__user_is_active=True, member__user_id__isnull=False, - ).select_related("team") + ).select_related("team", "member") ) elif request.access.has_scope("team:write") and request.access.team_ids_with_membership: @@ -89,20 +94,28 @@ def get(self, request: Request, organization) -> Response: member__user_is_active=True, member__user_id__isnull=False, team__id__in=request.access.team_ids_with_membership, - ).select_related("team") + ).select_related("team", "member") ) else: # Return empty response if user does not have access return Response([]) - return Response(serialize(access_requests, request.user)) + teams_by_user = OrganizationMember.objects.get_teams_by_user(organization=organization) + + # We omit any requests which are now redundant (i.e. the user joined that team some other way) + valid_access_requests = [ + access_request + for access_request in access_requests + if access_request.member.user_id is not None + and access_request.team_id not in teams_by_user[access_request.member.user_id] + ] + + return Response(serialize(valid_access_requests, request.user)) def put(self, request: Request, organization, request_id) -> Response: """ Approve or deny a request - Approve or deny a request. - {method} {path} """ diff --git a/src/sentry/api/endpoints/organization_dashboards.py b/src/sentry/api/endpoints/organization_dashboards.py index 481c528d32065..5cc723e6a8a49 100644 --- a/src/sentry/api/endpoints/organization_dashboards.py +++ b/src/sentry/api/endpoints/organization_dashboards.py @@ -50,9 +50,25 @@ def has_object_permission(self, request: Request, view, obj): return super().has_object_permission(request, view, obj) if isinstance(obj, Dashboard): - for project in obj.projects.all(): - if not request.access.has_project_access(project): - return False + # 1. Dashboard contains certain projects + if obj.projects.exists(): + return request.access.has_projects_access(obj.projects.all()) + + # 2. Dashboard covers all projects or all my projects + + # allow when Open Membership + if obj.organization.flags.allow_joinleave: + return True + + # allow for Managers and Owners + if request.access.has_scope("org:write"): + return True + + # allow for creator + if request.user.id == obj.created_by_id: + return True + + return False return True diff --git a/src/sentry/api/endpoints/organization_details.py b/src/sentry/api/endpoints/organization_details.py index fb412016f4cf1..258f3202a386f 100644 --- a/src/sentry/api/endpoints/organization_details.py +++ b/src/sentry/api/endpoints/organization_details.py @@ -62,9 +62,11 @@ SAFE_FIELDS_DEFAULT, SCRAPE_JAVASCRIPT_DEFAULT, SENSITIVE_FIELDS_DEFAULT, + TARGET_SAMPLE_RATE_DEFAULT, UPTIME_AUTODETECTION, ) from sentry.datascrubbing import validate_pii_config_update, validate_pii_selectors +from sentry.deletions.models.scheduleddeletion import RegionScheduledDeletion from sentry.hybridcloud.rpc import IDEMPOTENCY_KEY_LENGTH from sentry.integrations.utils.codecov import has_codecov_integration from sentry.lang.native.utils import ( @@ -75,7 +77,6 @@ from sentry.models.avatars.organization_avatar import OrganizationAvatar from sentry.models.options.organization_option import OrganizationOption from sentry.models.organization import Organization, OrganizationStatus -from sentry.models.scheduledeletion import RegionScheduledDeletion from sentry.organizations.services.organization import organization_service from sentry.organizations.services.organization.model import ( RpcOrganization, @@ -215,6 +216,7 @@ METRICS_ACTIVATE_LAST_FOR_GAUGES_DEFAULT, ), ("uptimeAutodetection", "sentry:uptime_autodetection", bool, UPTIME_AUTODETECTION), + ("targetSampleRate", "sentry:target_sample_rate", float, TARGET_SAMPLE_RATE_DEFAULT), ) DELETION_STATUSES = frozenset( @@ -276,6 +278,7 @@ class OrganizationSerializer(BaseOrganizationSerializer): relayPiiConfig = serializers.CharField(required=False, allow_blank=True, allow_null=True) apdexThreshold = serializers.IntegerField(min_value=1, required=False) uptimeAutodetection = serializers.BooleanField(required=False) + targetSampleRate = serializers.FloatField(required=False) @cached_property def _has_legacy_rate_limits(self): @@ -365,6 +368,25 @@ def validate_projectRateLimit(self, value): ) return value + def validate_targetSampleRate(self, value): + from sentry import features + + organization = self.context["organization"] + request = self.context["request"] + has_dynamic_sampling_custom = features.has( + "organizations:dynamic-sampling-custom", organization, actor=request.user + ) + if not has_dynamic_sampling_custom: + raise serializers.ValidationError( + "Organization does not have the custom dynamic sample rate feature enabled." + ) + + if not 0.0 <= value <= 1.0: + raise serializers.ValidationError( + "The targetSampleRate option must be in the range [0:1]" + ) + return value + def validate(self, attrs): attrs = super().validate(attrs) if attrs.get("avatarType") == "upload": diff --git a/src/sentry/api/endpoints/organization_environments.py b/src/sentry/api/endpoints/organization_environments.py index 2b7e261415597..2176808a485e4 100644 --- a/src/sentry/api/endpoints/organization_environments.py +++ b/src/sentry/api/endpoints/organization_environments.py @@ -1,5 +1,3 @@ -from typing import TypedDict - from drf_spectacular.utils import extend_schema from rest_framework.request import Request from rest_framework.response import Response @@ -10,6 +8,7 @@ from sentry.api.bases import OrganizationEndpoint from sentry.api.helpers.environments import environment_visibility_filter_options from sentry.api.serializers import serialize +from sentry.api.serializers.models.environment import EnvironmentSerializerResponse from sentry.apidocs.constants import RESPONSE_BAD_REQUEST, RESPONSE_FORBIDDEN from sentry.apidocs.examples.environment_examples import EnvironmentExamples from sentry.apidocs.parameters import EnvironmentParams, GlobalParams @@ -17,11 +16,6 @@ from sentry.models.environment import Environment, EnvironmentProject -class OrganizationEnvironmentResponseType(TypedDict): - id: int - name: str - - @extend_schema(tags=["Environments"]) @region_silo_endpoint class OrganizationEnvironmentsEndpoint(OrganizationEndpoint): @@ -35,7 +29,7 @@ class OrganizationEnvironmentsEndpoint(OrganizationEndpoint): parameters=[GlobalParams.ORG_ID_OR_SLUG, EnvironmentParams.VISIBILITY], responses={ 200: inline_sentry_response_serializer( - "OrganizationEnvironmentResponse", list[OrganizationEnvironmentResponseType] + "OrganizationEnvironmentResponse", list[EnvironmentSerializerResponse] ), 400: RESPONSE_BAD_REQUEST, 403: RESPONSE_FORBIDDEN, diff --git a/src/sentry/api/endpoints/organization_events.py b/src/sentry/api/endpoints/organization_events.py index 672c4441708ea..3930d2130239f 100644 --- a/src/sentry/api/endpoints/organization_events.py +++ b/src/sentry/api/endpoints/organization_events.py @@ -59,13 +59,36 @@ class DiscoverDatasetSplitException(Exception): Referrer.API_DASHBOARDS_BIGNUMBERWIDGET.value, Referrer.API_DISCOVER_TRANSACTIONS_LIST.value, Referrer.API_DISCOVER_QUERY_TABLE.value, + Referrer.API_PERFORMANCE_BROWSER_RESOURCE_MAIN_TABLE.value, + Referrer.API_PERFORMANCE_BROWSER_RESOURCES_PAGE_SELECTOR.value, + Referrer.API_PERFORMANCE_BROWSER_WEB_VITALS_PROJECT.value, + Referrer.API_PERFORMANCE_BROWSER_WEB_VITALS_PROJECT_SCORES.value, + Referrer.API_PERFORMANCE_BROWSER_WEB_VITALS_TRANSACTION.value, + Referrer.API_PERFORMANCE_BROWSER_WEB_VITALS_TRANSACTIONS_SCORES.value, + Referrer.API_PERFORMANCE_CACHE_LANDING_CACHE_TRANSACTION_LIST.value, + Referrer.API_PERFORMANCE_GENERIC_WIDGET_CHART_APDEX_AREA.value, + Referrer.API_PERFORMANCE_GENERIC_WIDGET_CHART_HIGHEST_CACHE_MISS_RATE_TRANSACTIONS.value, + Referrer.API_PERFORMANCE_GENERIC_WIDGET_CHART_MOST_FROZEN_FRAMES.value, + Referrer.API_PERFORMANCE_GENERIC_WIDGET_CHART_MOST_SLOW_FRAMES.value, + Referrer.API_PERFORMANCE_GENERIC_WIDGET_CHART_MOST_TIME_CONSUMING_DOMAINS.value, + Referrer.API_PERFORMANCE_GENERIC_WIDGET_CHART_MOST_TIME_CONSUMING_RESOURCES.value, + Referrer.API_PERFORMANCE_GENERIC_WIDGET_CHART_MOST_TIME_SPENT_DB_QUERIES.value, + Referrer.API_PERFORMANCE_GENERIC_WIDGET_CHART_SLOW_DB_OPS.value, + Referrer.API_PERFORMANCE_GENERIC_WIDGET_CHART_SLOW_HTTP_OPS.value, + Referrer.API_PERFORMANCE_GENERIC_WIDGET_CHART_SLOW_RESOURCE_OPS.value, + Referrer.API_PERFORMANCE_GENERIC_WIDGET_CHART_SLOW_SCREENS_BY_TTID.value, + Referrer.API_PERFORMANCE_GENERIC_WIDGET_CHART_TPM_AREA.value, + Referrer.API_PERFORMANCE_GENERIC_WIDGET_CHART_USER_MISERY_AREA.value, Referrer.API_PERFORMANCE_VITALS_CARDS.value, Referrer.API_PERFORMANCE_LANDING_TABLE.value, - Referrer.API_PERFORMANCE_TRANSACTION_SUMMARY.value, + Referrer.API_PERFORMANCE_TRANSACTION_EVENTS.value, + Referrer.API_PERFORMANCE_TRANSACTION_NAME_SEARCH_BAR.value, Referrer.API_PERFORMANCE_TRANSACTION_SPANS.value, + Referrer.API_PERFORMANCE_TRANSACTION_SUMMARY.value, Referrer.API_PERFORMANCE_STATUS_BREAKDOWN.value, Referrer.API_PERFORMANCE_VITAL_DETAIL.value, Referrer.API_PERFORMANCE_DURATIONPERCENTILECHART.value, + Referrer.API_PERFORMANCE_TRACE_TRACE_DRAWER_TRANSACTION_CACHE_METRICS.value, Referrer.API_PERFORMANCE_TRANSACTIONS_STATISTICAL_DETECTOR_ROOT_CAUSE_ANALYSIS.value, Referrer.API_PROFILING_LANDING_TABLE.value, Referrer.API_PROFILING_LANDING_FUNCTIONS_CARD.value, @@ -83,12 +106,15 @@ class DiscoverDatasetSplitException(Exception): Referrer.API_TRACE_VIEW_ERRORS_VIEW.value, Referrer.API_TRACE_VIEW_HOVER_CARD.value, Referrer.API_ISSUES_ISSUE_EVENTS.value, + Referrer.API_STARFISH_DATABASE_SYSTEM_SELECTOR.value, Referrer.API_STARFISH_ENDPOINT_LIST.value, + Referrer.API_STARFISH_FULL_SPAN_FROM_TRACE.value, Referrer.API_STARFISH_GET_SPAN_ACTIONS.value, Referrer.API_STARFISH_GET_SPAN_DOMAINS.value, Referrer.API_STARFISH_GET_SPAN_OPERATIONS.value, Referrer.API_STARFISH_SIDEBAR_SPAN_METRICS.value, Referrer.API_STARFISH_SPAN_CATEGORY_BREAKDOWN.value, + Referrer.API_STARFISH_SPAN_DESCRIPTION.value, Referrer.API_STARFISH_SPAN_LIST.value, Referrer.API_STARFISH_SPAN_SUMMARY_P95.value, Referrer.API_STARFISH_SPAN_SUMMARY_PAGE.value, @@ -379,11 +405,12 @@ def get(self, request: Request, organization) -> Response: if request.auth: referrer = API_TOKEN_REFERRER elif referrer not in ALLOWED_EVENTS_REFERRERS: - with sentry_sdk.isolation_scope() as scope: - scope.set_tag("forbidden_referrer", referrer) - sentry_sdk.capture_message( - "Forbidden Referrer. If this is intentional, add it to `ALLOWED_EVENTS_REFERRERS`" - ) + if referrer: + with sentry_sdk.isolation_scope() as scope: + scope.set_tag("forbidden_referrer", referrer) + sentry_sdk.capture_message( + "Forbidden Referrer. If this is intentional, add it to `ALLOWED_EVENTS_REFERRERS`" + ) referrer = Referrer.API_ORGANIZATION_EVENTS.value def _data_fn(scoped_dataset, offset, limit, query) -> dict[str, Any]: diff --git a/src/sentry/api/endpoints/organization_events_anomalies.py b/src/sentry/api/endpoints/organization_events_anomalies.py index ea1cc23c5c510..4f5dfc081fe4a 100644 --- a/src/sentry/api/endpoints/organization_events_anomalies.py +++ b/src/sentry/api/endpoints/organization_events_anomalies.py @@ -6,6 +6,7 @@ from sentry.api.api_owners import ApiOwner from sentry.api.api_publish_status import ApiPublishStatus from sentry.api.base import region_silo_endpoint +from sentry.api.bases.organization import OrganizationAlertRulePermission from sentry.api.bases.organization_events import OrganizationEventsV2EndpointBase from sentry.api.exceptions import ResourceDoesNotExist from sentry.api.paginator import OffsetPaginator @@ -32,6 +33,7 @@ class OrganizationEventsAnomaliesEndpoint(OrganizationEventsV2EndpointBase): publish_status = { "POST": ApiPublishStatus.EXPERIMENTAL, } + permission_classes = (OrganizationAlertRulePermission,) @extend_schema( operation_id="Identify anomalies in historical data", @@ -68,7 +70,9 @@ def post(self, request: Request, organization: Organization) -> Response: """ Return a list of anomalies for a time series of historical event data. """ - if not features.has("organizations:anomaly-detection-alerts", organization): + if not features.has( + "organizations:anomaly-detection-alerts", organization + ) and not features.has("organizations:anomaly-detection-rollout", organization): raise ResourceDoesNotExist("Your organization does not have access to this feature.") historical_data = self._format_historical_data(request.data.get("historical_data")) diff --git a/src/sentry/api/endpoints/organization_events_facets.py b/src/sentry/api/endpoints/organization_events_facets.py index 1704d7086a485..0879fc0700f11 100644 --- a/src/sentry/api/endpoints/organization_events_facets.py +++ b/src/sentry/api/endpoints/organization_events_facets.py @@ -32,7 +32,7 @@ def get(self, request: Request, organization) -> Response: update_snuba_params_with_timestamp(request, snuba_params, timestamp_key="traceTimestamp") def data_fn(offset, limit): - with sentry_sdk.start_span(op="discover.endpoint", description="discover_query"): + with sentry_sdk.start_span(op="discover.endpoint", name="discover_query"): with handle_query_errors(): facets = discover.get_facets( query=request.GET.get("query"), @@ -42,9 +42,7 @@ def data_fn(offset, limit): cursor=offset, ) - with sentry_sdk.start_span( - op="discover.endpoint", description="populate_results" - ) as span: + with sentry_sdk.start_span(op="discover.endpoint", name="populate_results") as span: span.set_data("facet_count", len(facets or [])) resp = defaultdict(lambda: {"key": "", "topValues": []}) for row in facets: diff --git a/src/sentry/api/endpoints/organization_events_facets_performance.py b/src/sentry/api/endpoints/organization_events_facets_performance.py index 4ee603dd1d4fa..c01dca77371f2 100644 --- a/src/sentry/api/endpoints/organization_events_facets_performance.py +++ b/src/sentry/api/endpoints/organization_events_facets_performance.py @@ -90,7 +90,7 @@ def get(self, request: Request, organization) -> Response: tag_key = TAG_ALIASES.get(tag_key) def data_fn(offset, limit): - with sentry_sdk.start_span(op="discover.endpoint", description="discover_query"): + with sentry_sdk.start_span(op="discover.endpoint", name="discover_query"): referrer = "api.organization-events-facets-performance.top-tags" tag_data = query_tag_data( filter_query=filter_query, @@ -178,7 +178,7 @@ def get(self, request: Request, organization) -> Response: tag_key = TAG_ALIASES.get(tag_key) def data_fn(offset, limit, raw_limit): - with sentry_sdk.start_span(op="discover.endpoint", description="discover_query"): + with sentry_sdk.start_span(op="discover.endpoint", name="discover_query"): referrer = "api.organization-events-facets-performance-histogram" top_tags = query_top_tags( tag_key=tag_key, @@ -269,9 +269,7 @@ def query_tag_data( :return: Returns the row with aggregate and count if the query was successful Returns None if query was not successful which causes the endpoint to return early """ - with sentry_sdk.start_span( - op="discover.discover", description="facets.filter_transform" - ) as span: + with sentry_sdk.start_span(op="discover.discover", name="facets.filter_transform") as span: span.set_data("query", filter_query) tag_query = DiscoverQueryBuilder( dataset=Dataset.Discover, @@ -289,7 +287,7 @@ def query_tag_data( Condition(tag_query.resolve_column(aggregate_column), Op.IS_NOT_NULL) ) - with sentry_sdk.start_span(op="discover.discover", description="facets.frequent_tags"): + with sentry_sdk.start_span(op="discover.discover", name="facets.frequent_tags"): # Get the average and count to use to filter the next request to facets tag_data = tag_query.run_query(f"{referrer}.all_transactions") @@ -324,7 +322,7 @@ def query_top_tags( """ translated_aggregate_column = discover.resolve_discover_column(aggregate_column) - with sentry_sdk.start_span(op="discover.discover", description="facets.top_tags"): + with sentry_sdk.start_span(op="discover.discover", name="facets.top_tags"): if not orderby: orderby = ["-count"] @@ -399,9 +397,7 @@ def query_facet_performance( tag_key_limit = limit if tag_key else 1 - with sentry_sdk.start_span( - op="discover.discover", description="facets.filter_transform" - ) as span: + with sentry_sdk.start_span(op="discover.discover", name="facets.filter_transform") as span: span.set_data("query", filter_query) tag_query = DiscoverQueryBuilder( dataset=Dataset.Discover, @@ -452,7 +448,7 @@ def query_facet_performance( ["trace", "trace.ctx", "trace.span", "project", "browser", "celery_task_id", "url"], ) - with sentry_sdk.start_span(op="discover.discover", description="facets.aggregate_tags"): + with sentry_sdk.start_span(op="discover.discover", name="facets.aggregate_tags"): span.set_data("sample_rate", sample_rate) span.set_data("target_sample", target_sample) aggregate_comparison = transaction_aggregate * 1.005 if transaction_aggregate else 0 diff --git a/src/sentry/api/endpoints/organization_events_has_measurements.py b/src/sentry/api/endpoints/organization_events_has_measurements.py index 96e18e20ece72..6c359059d991f 100644 --- a/src/sentry/api/endpoints/organization_events_has_measurements.py +++ b/src/sentry/api/endpoints/organization_events_has_measurements.py @@ -58,7 +58,7 @@ def get(self, request: Request, organization) -> Response: if not self.has_feature(organization, request): return Response(status=404) - with sentry_sdk.start_span(op="discover.endpoint", description="parse params"): + with sentry_sdk.start_span(op="discover.endpoint", name="parse params"): try: # This endpoint only allows for a single project + transaction, so no need # to check `global-views`. diff --git a/src/sentry/api/endpoints/organization_events_histogram.py b/src/sentry/api/endpoints/organization_events_histogram.py index 86a244f0a2eed..aa05de8b26383 100644 --- a/src/sentry/api/endpoints/organization_events_histogram.py +++ b/src/sentry/api/endpoints/organization_events_histogram.py @@ -72,7 +72,7 @@ def get(self, request: Request, organization) -> Response: sentry_sdk.set_tag("performance.metrics_enhanced", metrics_enhanced) - with sentry_sdk.start_span(op="discover.endpoint", description="histogram"): + with sentry_sdk.start_span(op="discover.endpoint", name="histogram"): serializer = HistogramSerializer(data=request.GET) if serializer.is_valid(): data = serializer.validated_data diff --git a/src/sentry/api/endpoints/organization_events_meta.py b/src/sentry/api/endpoints/organization_events_meta.py index 87b74bee64340..3b3f8f5375ec2 100644 --- a/src/sentry/api/endpoints/organization_events_meta.py +++ b/src/sentry/api/endpoints/organization_events_meta.py @@ -66,7 +66,7 @@ def get(self, request: Request, organization) -> Response: except NoProjects: return Response([]) - with sentry_sdk.start_span(op="discover.endpoint", description="find_lookup_keys") as span: + with sentry_sdk.start_span(op="discover.endpoint", name="find_lookup_keys") as span: possible_keys = ["transaction"] lookup_keys = {key: request.query_params.get(key) for key in possible_keys} @@ -79,7 +79,7 @@ def get(self, request: Request, organization) -> Response: ) with handle_query_errors(): - with sentry_sdk.start_span(op="discover.endpoint", description="filter_creation"): + with sentry_sdk.start_span(op="discover.endpoint", name="filter_creation"): projects = self.get_projects(request, organization) query_kwargs = build_query_params_from_request( request, organization, projects, snuba_params.environments @@ -99,10 +99,10 @@ def get(self, request: Request, organization) -> Response: query_kwargs["actor"] = request.user - with sentry_sdk.start_span(op="discover.endpoint", description="issue_search"): + with sentry_sdk.start_span(op="discover.endpoint", name="issue_search"): results_cursor = search.backend.query(**query_kwargs) - with sentry_sdk.start_span(op="discover.endpoint", description="serialize_results") as span: + with sentry_sdk.start_span(op="discover.endpoint", name="serialize_results") as span: results = list(results_cursor) span.set_data("result_length", len(results)) context = serialize( diff --git a/src/sentry/api/endpoints/organization_events_spans_histogram.py b/src/sentry/api/endpoints/organization_events_spans_histogram.py index af54c6ecbfc88..aef74605cf470 100644 --- a/src/sentry/api/endpoints/organization_events_spans_histogram.py +++ b/src/sentry/api/endpoints/organization_events_spans_histogram.py @@ -55,7 +55,7 @@ def get(self, request: Request, organization) -> Response: except NoProjects: return Response({}) - with sentry_sdk.start_span(op="discover.endpoint", description="spans_histogram"): + with sentry_sdk.start_span(op="discover.endpoint", name="spans_histogram"): serializer = SpansHistogramSerializer(data=request.GET) if serializer.is_valid(): data = serializer.validated_data diff --git a/src/sentry/api/endpoints/organization_events_spans_performance.py b/src/sentry/api/endpoints/organization_events_spans_performance.py index 727c15d8e7d7d..3a359569974cc 100644 --- a/src/sentry/api/endpoints/organization_events_spans_performance.py +++ b/src/sentry/api/endpoints/organization_events_spans_performance.py @@ -333,9 +333,7 @@ def get_event_stats( zerofill_results: bool, comparison_delta: datetime | None = None, ) -> SnubaTSResult: - with sentry_sdk.start_span( - op="discover.discover", description="timeseries.filter_transform" - ): + with sentry_sdk.start_span(op="discover.discover", name="timeseries.filter_transform"): builder = TimeseriesQueryBuilder( Dataset.Discover, {}, @@ -372,9 +370,7 @@ def get_event_stats( snql_query, "api.organization-events-spans-performance-stats" ) - with sentry_sdk.start_span( - op="discover.discover", description="timeseries.transform_results" - ): + with sentry_sdk.start_span(op="discover.discover", name="timeseries.transform_results"): result = discover.zerofill( results["data"], snuba_params.start_date, diff --git a/src/sentry/api/endpoints/organization_events_stats.py b/src/sentry/api/endpoints/organization_events_stats.py index f46ea78cd5fc9..0a11b698e8c81 100644 --- a/src/sentry/api/endpoints/organization_events_stats.py +++ b/src/sentry/api/endpoints/organization_events_stats.py @@ -183,7 +183,7 @@ def check_if_results_have_data(self, results: SnubaTSResult | dict[str, SnubaTSR def get(self, request: Request, organization: Organization) -> Response: query_source = self.get_request_source(request) - with sentry_sdk.start_span(op="discover.endpoint", description="filter_params") as span: + with sentry_sdk.start_span(op="discover.endpoint", name="filter_params") as span: span.set_data("organization", organization) top_events = 0 diff --git a/src/sentry/api/endpoints/organization_events_trace.py b/src/sentry/api/endpoints/organization_events_trace.py index 5d509b985534c..92f9f881500a9 100644 --- a/src/sentry/api/endpoints/organization_events_trace.py +++ b/src/sentry/api/endpoints/organization_events_trace.py @@ -38,7 +38,7 @@ from sentry.utils.numbers import base32_encode, format_grouped_length from sentry.utils.sdk import set_measurement from sentry.utils.snuba import bulk_snuba_queries -from sentry.utils.validators import INVALID_ID_DETAILS, is_event_id +from sentry.utils.validators import INVALID_ID_DETAILS, is_event_id, is_span_id logger: logging.Logger = logging.getLogger(__name__) MAX_TRACE_SIZE: int = 100 @@ -218,7 +218,7 @@ def __init__( @property def nodestore_event(self) -> Event | GroupEvent | None: if self._nodestore_event is None and not self.fetched_nodestore: - with sentry_sdk.start_span(op="nodestore", description="get_event_by_id"): + with sentry_sdk.start_span(op="nodestore", name="get_event_by_id"): self.fetched_nodestore = True self._nodestore_event = eventstore.backend.get_event_by_id( self.event["project.id"], self.event["id"] @@ -618,7 +618,11 @@ def query_trace_data( # id is just for consistent results transaction_orderby = ["-root", "timestamp", "id"] if event_id is not None: - transaction_columns.append(f'to_other(id, "{event_id}", 0, 1) AS target') + # Already validated to be one of the two + if is_event_id(event_id): + transaction_columns.append(f'to_other(id, "{event_id}", 0, 1) AS target') + else: + transaction_columns.append(f'to_other(trace.span, "{event_id}", 0, 1) AS target') # Target is the event_id the frontend plans to render, we try to sort it to the top so it loads even if its not # within the query limit, needs to be the first orderby cause it takes precedence over finding the root transaction_orderby.insert(0, "-target") @@ -758,7 +762,7 @@ def build_span_query(trace_id: str, spans_params: SnubaParams, query_spans: list sentry_sdk.set_measurement("trace_view.spans.span_minimum", span_minimum) sentry_sdk.set_tag("trace_view.split_by_char.optimization", len(query_spans) > span_minimum) if len(query_spans) > span_minimum: - # TODO because we're not doing an IN on a list of literals, snuba will not optimize the query with the HexInt + # TODO: because we're not doing an IN on a list of literals, snuba will not optimize the query with the HexInt # column processor which means we won't be taking advantage of the span_id index but if we only do this when we # have a lot of query_spans we should have a great performance improvement still once we do that we can simplify # this code and always apply this optimization @@ -790,7 +794,7 @@ def augment_transactions_with_spans( query_source: QuerySource | None = QuerySource.SENTRY_BACKEND, ) -> Sequence[SnubaTransaction]: """Augment the list of transactions with parent, error and problem data""" - with sentry_sdk.start_span(op="augment.transactions", description="setup"): + with sentry_sdk.start_span(op="augment.transactions", name="setup"): trace_parent_spans = set() # parent span ids of segment spans transaction_problem_map: dict[str, SnubaTransaction] = {} problem_project_map: dict[int, list[str]] = {} @@ -819,7 +823,7 @@ def augment_transactions_with_spans( else: sentry_sdk.set_tag("trace_view.missing_timestamp_constraints", True) - with sentry_sdk.start_span(op="augment.transactions", description="get transaction span ids"): + with sentry_sdk.start_span(op="augment.transactions", name="get transaction span ids"): for index, transaction in enumerate(transactions): transaction["occurrence_spans"] = [] transaction["issue_occurrences"] = [] @@ -839,7 +843,7 @@ def augment_transactions_with_spans( # parent span ids of the segment spans trace_parent_spans.add(transaction["trace.parent_span"]) - with sentry_sdk.start_span(op="augment.transactions", description="get perf issue span ids"): + with sentry_sdk.start_span(op="augment.transactions", name="get perf issue span ids"): for problem_project, occurrences in problem_project_map.items(): if occurrences: issue_occurrences.extend( @@ -855,7 +859,7 @@ def augment_transactions_with_spans( set(problem.evidence_data["offender_span_ids"]) ) - with sentry_sdk.start_span(op="augment.transactions", description="create query params"): + with sentry_sdk.start_span(op="augment.transactions", name="create query params"): query_spans = {*trace_parent_spans, *error_spans, *occurrence_spans} if "" in query_spans: query_spans.remove("") @@ -920,7 +924,7 @@ def augment_transactions_with_spans( parent["span_id"] = pad_span_id(parent["span_id"]) parent_map[parent["span_id"]] = parent - with sentry_sdk.start_span(op="augment.transactions", description="linking transactions"): + with sentry_sdk.start_span(op="augment.transactions", name="linking transactions"): for transaction in transactions: # For a given transaction, if parent span id exists in the tranaction (so this is # not a root span), see if the indexed spans data can tell us what the parent @@ -929,7 +933,7 @@ def augment_transactions_with_spans( parent = parent_map.get(transaction["trace.parent_span"]) if parent is not None: transaction["trace.parent_transaction"] = parent["transaction.id"] - with sentry_sdk.start_span(op="augment.transactions", description="linking perf issues"): + with sentry_sdk.start_span(op="augment.transactions", name="linking perf issues"): for problem in issue_occurrences: for span_id in problem.evidence_data["offender_span_ids"]: parent = parent_map.get(span_id) @@ -938,7 +942,7 @@ def augment_transactions_with_spans( occurrence = parent.copy() occurrence["problem"] = problem transaction_problem["occurrence_spans"].append(occurrence) - with sentry_sdk.start_span(op="augment.transactions", description="linking errors"): + with sentry_sdk.start_span(op="augment.transactions", name="linking errors"): for error in errors: parent = parent_map.get(error["trace.span"]) error["trace.transaction"] = parent["transaction.id"] if parent is not None else None @@ -1062,10 +1066,12 @@ def get(self, request: Request, organization: Organization, trace_id: str) -> Ht if detailed and use_spans: raise ParseError("Cannot return a detailed response while using spans") limit = min(int(request.GET.get("limit", MAX_TRACE_SIZE)), 10_000) - event_id = request.GET.get("event_id") or request.GET.get("eventId") + event_id = ( + request.GET.get("targetId") or request.GET.get("event_id") or request.GET.get("eventId") + ) # Only need to validate event_id as trace_id is validated in the URL - if event_id and not is_event_id(event_id): + if event_id and not (is_event_id(event_id) or is_span_id(event_id)): return Response({"detail": INVALID_ID_DETAILS.format("Event ID")}, status=400) query_source = self.get_request_source(request) @@ -1233,7 +1239,7 @@ def serialize( current_generation: int | None = None root_id: str | None = None - with sentry_sdk.start_span(op="building.trace", description="light trace"): + with sentry_sdk.start_span(op="building.trace", name="light trace"): # Check if the event is an orphan_error if not snuba_event or not nodestore_event: orphan_error = find_event( @@ -1438,7 +1444,7 @@ def serialize( to_check.append(root) iteration = 0 - with sentry_sdk.start_span(op="building.trace", description="full trace"): + with sentry_sdk.start_span(op="building.trace", name="full trace"): has_orphans = False while parent_map or to_check: @@ -1613,7 +1619,7 @@ def serialize_with_spans( if detailed: raise ParseError("Cannot return a detailed response using Spans") - with sentry_sdk.start_span(op="serialize", description="create parent map"): + with sentry_sdk.start_span(op="serialize", name="create parent map"): parent_to_children_event_map = defaultdict(list) serialized_transactions: list[TraceEvent] = [] for transaction in transactions: @@ -1642,7 +1648,7 @@ def serialize_with_spans( else: orphan_errors.append(error) - with sentry_sdk.start_span(op="serialize", description="associate children"): + with sentry_sdk.start_span(op="serialize", name="associate children"): for trace_event in serialized_transactions: event_id = trace_event.event["id"] if event_id in parent_to_children_event_map: @@ -1653,7 +1659,7 @@ def serialize_with_spans( parent_error_map.pop(event_id), key=lambda k: k["timestamp"] ) - with sentry_sdk.start_span(op="serialize", description="more orphans"): + with sentry_sdk.start_span(op="serialize", name="more orphans"): visited_transactions_ids: set[str] = { root_trace.event["id"] for root_trace in root_traces } @@ -1666,7 +1672,7 @@ def serialize_with_spans( for child in serialized_transaction.children: visited_transactions_ids.add(child.event["id"]) - with sentry_sdk.start_span(op="serialize", description="sort"): + with sentry_sdk.start_span(op="serialize", name="sort"): # Sort the results so they're consistent orphan_errors.sort(key=lambda k: k["timestamp"]) root_traces.sort(key=child_sort_key) @@ -1688,7 +1694,7 @@ def serialize_with_spans( if serialized_orphan is not None: result_transactions.append(serialized_orphan) - with sentry_sdk.start_span(op="serialize", description="to dict"): + with sentry_sdk.start_span(op="serialize", name="to dict"): return { "transactions": result_transactions, "orphan_errors": [self.serialize_error(error) for error in orphan_errors], diff --git a/src/sentry/api/endpoints/organization_events_trends.py b/src/sentry/api/endpoints/organization_events_trends.py index 63274f5cc45e0..e2cded11a2ee8 100644 --- a/src/sentry/api/endpoints/organization_events_trends.py +++ b/src/sentry/api/endpoints/organization_events_trends.py @@ -54,7 +54,7 @@ class TrendColumns(TypedDict): TREND_TYPES = [IMPROVED, REGRESSION] -# TODO move this to the builder file and introduce a top-events version instead +# TODO: move this to the builder file and introduce a top-events version instead class TrendQueryBuilder(DiscoverQueryBuilder): def convert_aggregate_filter_to_condition( self, aggregate_filter: AggregateFilter @@ -431,7 +431,7 @@ def get(self, request: Request, organization) -> Response: except NoProjects: return Response([]) - with sentry_sdk.start_span(op="discover.endpoint", description="trend_dates"): + with sentry_sdk.start_span(op="discover.endpoint", name="trend_dates"): middle_date = request.GET.get("middle") if middle_date: try: diff --git a/src/sentry/api/endpoints/organization_events_trends_v2.py b/src/sentry/api/endpoints/organization_events_trends_v2.py index e293806d073fa..95ca60a010f0a 100644 --- a/src/sentry/api/endpoints/organization_events_trends_v2.py +++ b/src/sentry/api/endpoints/organization_events_trends_v2.py @@ -177,7 +177,7 @@ def get_timeseries(top_events, _, rollup, zerofill_results): results[result_key]["data"].append(row) else: discarded += 1 - # TODO filter out entries that don't have transaction or trend_function + # TODO: filter out entries that don't have transaction or trend_function logger.warning( "trends.top-events.timeseries.key-mismatch", extra={ diff --git a/src/sentry/api/endpoints/organization_events_vitals.py b/src/sentry/api/endpoints/organization_events_vitals.py index 0dc36ffed38ee..c308e82810fcd 100644 --- a/src/sentry/api/endpoints/organization_events_vitals.py +++ b/src/sentry/api/endpoints/organization_events_vitals.py @@ -31,7 +31,7 @@ def get(self, request: Request, organization) -> Response: if not self.has_feature(organization, request): return Response(status=404) - with sentry_sdk.start_span(op="discover.endpoint", description="parse params"): + with sentry_sdk.start_span(op="discover.endpoint", name="parse params"): try: snuba_params = self.get_snuba_params(request, organization) except NoProjects: diff --git a/src/sentry/api/endpoints/organization_measurements_meta.py b/src/sentry/api/endpoints/organization_measurements_meta.py index 8346fb3f82663..bbca2d6a7b9a2 100644 --- a/src/sentry/api/endpoints/organization_measurements_meta.py +++ b/src/sentry/api/endpoints/organization_measurements_meta.py @@ -33,7 +33,7 @@ def get(self, request: Request, organization: Organization) -> Response: use_case_id=UseCaseID.TRANSACTIONS, ) - with start_span(op="transform", description="metric meta"): + with start_span(op="transform", name="metric meta"): result = { item["name"]: { "functions": METRIC_FUNCTION_LIST_BY_TYPE[item["type"]], diff --git a/src/sentry/api/endpoints/organization_member/details.py b/src/sentry/api/endpoints/organization_member/details.py index 57db9ef68552d..108d5b7eb0239 100644 --- a/src/sentry/api/endpoints/organization_member/details.py +++ b/src/sentry/api/endpoints/organization_member/details.py @@ -508,15 +508,16 @@ def delete( ) with transaction.atomic(router.db_for_write(OrganizationMember)): - # Delete any invite requests and pending invites by the deleted member - existing_invites = OrganizationMember.objects.filter( - Q(invite_status=InviteStatus.REQUESTED_TO_BE_INVITED.value) - | Q(token__isnull=False), - inviter_id=member.user_id, - organization=organization, - ) - for om in existing_invites: - om.delete() + if member.user_id: + # Delete any invite requests and pending invites by the deleted member + existing_invites = OrganizationMember.objects.filter( + Q(invite_status=InviteStatus.REQUESTED_TO_BE_INVITED.value) + | Q(token__isnull=False), + inviter_id=member.user_id, + organization=organization, + ) + for om in existing_invites: + om.delete() self.create_audit_entry( request=request, diff --git a/src/sentry/api/endpoints/organization_metrics_tag_details.py b/src/sentry/api/endpoints/organization_metrics_tag_details.py index 58ccb30864611..01b4e3d4dba63 100644 --- a/src/sentry/api/endpoints/organization_metrics_tag_details.py +++ b/src/sentry/api/endpoints/organization_metrics_tag_details.py @@ -41,7 +41,7 @@ def get(self, request: Request, organization: Organization, tag_name: str) -> Re for project in projects ): if len(metric_names) == 1 and metric_names[0].startswith("d:eap"): - # TODO hack for EAP, hardcode some metric names + # TODO: hack for EAP, hardcode some metric names if tag_name == "color": return Response( [ diff --git a/src/sentry/api/endpoints/organization_metrics_tags.py b/src/sentry/api/endpoints/organization_metrics_tags.py index 041eb403727ac..52d0cde0c3a01 100644 --- a/src/sentry/api/endpoints/organization_metrics_tags.py +++ b/src/sentry/api/endpoints/organization_metrics_tags.py @@ -58,7 +58,7 @@ def get(self, request: Request, organization: Organization) -> Response: for project in projects ): if metric_name.startswith("d:eap"): - # TODO hack for EAP, return a fixed list + # TODO: hack for EAP, return a fixed list return Response([Tag(key="color"), Tag(key="location")]) try: diff --git a/src/sentry/api/endpoints/organization_on_demand_metrics_estimation_stats.py b/src/sentry/api/endpoints/organization_on_demand_metrics_estimation_stats.py index fc9f1c3c4fe67..1f5901dfe5f28 100644 --- a/src/sentry/api/endpoints/organization_on_demand_metrics_estimation_stats.py +++ b/src/sentry/api/endpoints/organization_on_demand_metrics_estimation_stats.py @@ -66,9 +66,7 @@ def get(self, request: Request, organization: Organization) -> Response: if measurement is None: return Response({"detail": "missing required parameter yAxis"}, status=400) - with sentry_sdk.start_span( - op="discover.metrics.endpoint", description="get_full_metrics" - ) as span: + with sentry_sdk.start_span(op="discover.metrics.endpoint", name="get_full_metrics") as span: span.set_data("organization", organization) try: diff --git a/src/sentry/api/endpoints/organization_releases.py b/src/sentry/api/endpoints/organization_releases.py index de668f2730237..3fe2cab0ef644 100644 --- a/src/sentry/api/endpoints/organization_releases.py +++ b/src/sentry/api/endpoints/organization_releases.py @@ -42,6 +42,7 @@ SEMVER_PACKAGE_ALIAS, ) from sentry.search.events.filter import handle_operator_negation, parse_semver +from sentry.search.utils import get_latest_release from sentry.signals import release_created from sentry.snuba.sessions import STATS_PERIODS from sentry.types.activity import ActivityType @@ -101,6 +102,13 @@ def _filter_releases_by_query(queryset, organization, query, filter_params): query_q = ~Q(version__in=raw_value) elif search_filter.operator == "IN": query_q = Q(version__in=raw_value) + elif raw_value == "latest": + latest_releases = get_latest_release( + projects=filter_params["project_id"], + environments=filter_params.get("environment"), + organization_id=organization.id, + ) + query_q = Q(version__in=latest_releases) else: query_q = Q(version=search_filter.value.value) @@ -238,7 +246,7 @@ def get_projects(self, request: Request, organization, project_ids=None, project organization, project_ids=project_ids, project_slugs=project_slugs, - include_all_accessible="GET" != request.method, + include_all_accessible=False, ) def get(self, request: Request, organization) -> Response: diff --git a/src/sentry/api/endpoints/organization_sessions.py b/src/sentry/api/endpoints/organization_sessions.py index a247080b9b1e3..372c1e58b4012 100644 --- a/src/sentry/api/endpoints/organization_sessions.py +++ b/src/sentry/api/endpoints/organization_sessions.py @@ -82,9 +82,7 @@ def get(self, request: Request, organization) -> Response: def data_fn(offset: int, limit: int) -> SessionsQueryResult: with self.handle_query_errors(): - with sentry_sdk.start_span( - op="sessions.endpoint", description="build_sessions_query" - ): + with sentry_sdk.start_span(op="sessions.endpoint", name="build_sessions_query"): request_limit = None if request.GET.get("per_page") is not None: request_limit = limit diff --git a/src/sentry/api/endpoints/organization_spans_aggregation.py b/src/sentry/api/endpoints/organization_spans_aggregation.py index 8a4031f75c1bd..dbb7ec1a6d5ed 100644 --- a/src/sentry/api/endpoints/organization_spans_aggregation.py +++ b/src/sentry/api/endpoints/organization_spans_aggregation.py @@ -410,7 +410,7 @@ def get(self, request: Request, organization: Organization) -> Response: ) with sentry_sdk.start_span( - op="span.aggregation", description="AggregateIndexedSpans.build_aggregate_span_tree" + op="span.aggregation", name="AggregateIndexedSpans.build_aggregate_span_tree" ): aggregated_tree = AggregateIndexedSpans().build_aggregate_span_tree(results) @@ -442,7 +442,7 @@ def get(self, request: Request, organization: Organization) -> Response: ) with sentry_sdk.start_span( - op="span.aggregation", description="AggregateNodestoreSpans.build_aggregate_span_tree" + op="span.aggregation", name="AggregateNodestoreSpans.build_aggregate_span_tree" ): aggregated_tree = AggregateNodestoreSpans().build_aggregate_span_tree(events) diff --git a/src/sentry/api/endpoints/organization_spans_fields.py b/src/sentry/api/endpoints/organization_spans_fields.py index d3697277dbae7..6fe1203765fea 100644 --- a/src/sentry/api/endpoints/organization_spans_fields.py +++ b/src/sentry/api/endpoints/organization_spans_fields.py @@ -7,6 +7,8 @@ from rest_framework.request import Request from rest_framework.response import Response from sentry_protos.snuba.v1alpha.endpoint_tags_list_pb2 import ( + AttributeValuesRequest, + AttributeValuesResponse, TraceItemAttributesRequest, TraceItemAttributesResponse, ) @@ -20,6 +22,7 @@ from sentry.api.api_publish_status import ApiPublishStatus from sentry.api.base import region_silo_endpoint from sentry.api.bases import NoProjects, OrganizationEventsV2EndpointBase +from sentry.api.event_search import translate_escape_sequences from sentry.api.paginator import ChainPaginator from sentry.api.serializers import serialize from sentry.api.utils import handle_query_errors @@ -29,7 +32,7 @@ from sentry.snuba.dataset import Dataset from sentry.snuba.referrer import Referrer from sentry.tagstore.types import TagKey, TagValue -from sentry.utils import snuba +from sentry.utils import snuba_rpc # This causes problems if a user sends an attribute with any of these values # but the meta table currently can't handle that anyways @@ -55,6 +58,13 @@ class OrganizationSpansFieldsEndpointSerializer(serializers.Serializer): ) type = serializers.ChoiceField(["string", "number"], required=False) + def validate_type(self, value): + if value == "string": + return AttributeKey.Type.TYPE_STRING + if value == "number": + return AttributeKey.Type.TYPE_FLOAT + raise NotImplementedError + def validate(self, attrs): if attrs["dataset"] == "spans" and attrs.get("type") is None: raise ParseError(detail='type is required when using dataset="spans"') @@ -112,9 +122,9 @@ def get(self, request: Request, organization) -> Response: ), limit=max_span_tags, offset=0, - type=AttributeKey.Type.TYPE_STRING, + type=serialized["type"], ) - rpc_response = snuba.rpc(rpc_request, TraceItemAttributesResponse) + rpc_response = snuba_rpc.rpc(rpc_request, TraceItemAttributesResponse) paginator = ChainPaginator( [ @@ -195,6 +205,68 @@ def get(self, request: Request, organization, key: str) -> Response: max_span_tag_values = options.get("performance.spans-tags-values.max") + serializer = OrganizationSpansFieldsEndpointSerializer(data=request.GET) + if not serializer.is_valid(): + return Response(serializer.errors, status=400) + serialized = serializer.validated_data + + if serialized["dataset"] == "spans" and features.has( + "organizations:visibility-explore-dataset", organization, actor=request.user + ): + start_timestamp = Timestamp() + start_timestamp.FromDatetime( + snuba_params.start_date.replace(hour=0, minute=0, second=0, microsecond=0) + ) + + end_timestamp = Timestamp() + end_timestamp.FromDatetime( + snuba_params.end_date.replace(hour=0, minute=0, second=0, microsecond=0) + + timedelta(days=1) + ) + + query = translate_escape_sequences(request.GET.get("query", "")) + rpc_request = AttributeValuesRequest( + meta=RequestMeta( + organization_id=organization.id, + cogs_category="performance", + referrer=Referrer.API_SPANS_TAG_VALUES_RPC.value, + project_ids=snuba_params.project_ids, + start_timestamp=start_timestamp, + end_timestamp=end_timestamp, + trace_item_name=TraceItemName.TRACE_ITEM_NAME_EAP_SPANS, + ), + name=key, + value_substring_match=query, + limit=max_span_tag_values, + offset=0, + ) + rpc_response = snuba_rpc.rpc(rpc_request, AttributeValuesResponse) + + paginator = ChainPaginator( + [ + [ + TagValue( + key=key, + value=tag_value, + times_seen=None, + first_seen=None, + last_seen=None, + ) + for tag_value in rpc_response.values + if tag_value + ] + ], + max_limit=max_span_tag_values, + ) + + return self.paginate( + request=request, + paginator=paginator, + on_results=lambda results: serialize(results, request.user), + default_per_page=max_span_tag_values, + max_per_page=max_span_tag_values, + ) + executor = SpanFieldValuesAutocompletionExecutor( snuba_params=snuba_params, key=key, @@ -339,7 +411,7 @@ def get_autocomplete_query_base(self) -> BaseQueryBuilder: def get_autocomplete_results(self, query: BaseQueryBuilder) -> list[TagValue]: with handle_query_errors(): - results = query.process_results(query.run_query(Referrer.API_SPANS_TAG_KEYS.value)) + results = query.process_results(query.run_query(Referrer.API_SPANS_TAG_VALUES.value)) return [ TagValue( diff --git a/src/sentry/api/endpoints/organization_stats_summary.py b/src/sentry/api/endpoints/organization_stats_summary.py index d81651da576be..311ca67a2c8c1 100644 --- a/src/sentry/api/endpoints/organization_stats_summary.py +++ b/src/sentry/api/endpoints/organization_stats_summary.py @@ -142,16 +142,14 @@ def get(self, request: Request, organization) -> HttpResponse: """ with self.handle_query_errors(): tenant_ids = {"organization_id": organization.id} - with sentry_sdk.start_span(op="outcomes.endpoint", description="build_outcomes_query"): + with sentry_sdk.start_span(op="outcomes.endpoint", name="build_outcomes_query"): query = self.build_outcomes_query( request, organization, ) - with sentry_sdk.start_span(op="outcomes.endpoint", description="run_outcomes_query"): + with sentry_sdk.start_span(op="outcomes.endpoint", name="run_outcomes_query"): result_totals = run_outcomes_query_totals(query, tenant_ids=tenant_ids) - with sentry_sdk.start_span( - op="outcomes.endpoint", description="massage_outcomes_result" - ): + with sentry_sdk.start_span(op="outcomes.endpoint", name="massage_outcomes_result"): projects, result = massage_sessions_result_summary( query, result_totals, request.GET.getlist("outcome") ) diff --git a/src/sentry/api/endpoints/organization_stats_v2.py b/src/sentry/api/endpoints/organization_stats_v2.py index 833879efc119e..9627e04d077b8 100644 --- a/src/sentry/api/endpoints/organization_stats_v2.py +++ b/src/sentry/api/endpoints/organization_stats_v2.py @@ -166,21 +166,19 @@ def get(self, request: Request, organization) -> Response: with self.handle_query_errors(): tenant_ids = {"organization_id": organization.id} - with sentry_sdk.start_span(op="outcomes.endpoint", description="build_outcomes_query"): + with sentry_sdk.start_span(op="outcomes.endpoint", name="build_outcomes_query"): query = self.build_outcomes_query( request, organization, ) - with sentry_sdk.start_span(op="outcomes.endpoint", description="run_outcomes_query"): + with sentry_sdk.start_span(op="outcomes.endpoint", name="run_outcomes_query"): result_totals = run_outcomes_query_totals(query, tenant_ids=tenant_ids) result_timeseries = ( None if "project_id" in query.query_groupby else run_outcomes_query_timeseries(query, tenant_ids=tenant_ids) ) - with sentry_sdk.start_span( - op="outcomes.endpoint", description="massage_outcomes_result" - ): + with sentry_sdk.start_span(op="outcomes.endpoint", name="massage_outcomes_result"): result = massage_outcomes_result(query, result_totals, result_timeseries) return Response(result, status=200) diff --git a/src/sentry/api/endpoints/organization_tags.py b/src/sentry/api/endpoints/organization_tags.py index 4a99e726ce231..197210992ecd4 100644 --- a/src/sentry/api/endpoints/organization_tags.py +++ b/src/sentry/api/endpoints/organization_tags.py @@ -37,7 +37,7 @@ def get(self, request: Request, organization) -> Response: else: dataset = Dataset.Discover - with sentry_sdk.start_span(op="tagstore", description="get_tag_keys_for_projects"): + with sentry_sdk.start_span(op="tagstore", name="get_tag_keys_for_projects"): with handle_query_errors(): results = tagstore.backend.get_tag_keys_for_projects( filter_params["project_id"], diff --git a/src/sentry/api/endpoints/organization_teams.py b/src/sentry/api/endpoints/organization_teams.py index 505c6bc3d76fb..5b7f1f6bea456 100644 --- a/src/sentry/api/endpoints/organization_teams.py +++ b/src/sentry/api/endpoints/organization_teams.py @@ -18,6 +18,7 @@ from sentry.apidocs.examples.team_examples import TeamExamples from sentry.apidocs.parameters import CursorQueryParam, GlobalParams, TeamParams from sentry.apidocs.utils import inline_sentry_response_serializer +from sentry.db.models.fields.slug import DEFAULT_SLUG_MAX_LENGTH from sentry.integrations.models.external_actor import ExternalActor from sentry.models.organizationmember import OrganizationMember from sentry.models.organizationmemberteam import OrganizationMemberTeam @@ -44,7 +45,7 @@ class TeamPostSerializer(serializers.Serializer): slug = SentrySerializerSlugField( help_text="""Uniquely identifies a team and is used for the interface. If not provided, it is automatically generated from the name.""", - max_length=50, + max_length=DEFAULT_SLUG_MAX_LENGTH, required=False, allow_null=True, ) diff --git a/src/sentry/api/endpoints/organization_traces.py b/src/sentry/api/endpoints/organization_traces.py index abfa434f5ecbc..4018865ed536f 100644 --- a/src/sentry/api/endpoints/organization_traces.py +++ b/src/sentry/api/endpoints/organization_traces.py @@ -7,6 +7,7 @@ from typing import Any, Literal, NotRequired, TypedDict import sentry_sdk +from django.utils import timezone from rest_framework import serializers from rest_framework.exceptions import ParseError, ValidationError from rest_framework.request import Request @@ -27,7 +28,9 @@ from sentry.search.events.builder.base import BaseQueryBuilder from sentry.search.events.builder.discover import DiscoverQueryBuilder from sentry.search.events.builder.spans_indexed import ( + SpansEAPQueryBuilder, SpansIndexedQueryBuilder, + TimeseriesSpanEAPIndexedQueryBuilder, TimeseriesSpanIndexedQueryBuilder, ) from sentry.search.events.constants import TIMEOUT_SPAN_ERROR_MESSAGE @@ -81,6 +84,9 @@ class TraceResult(TypedDict): class OrganizationTracesSerializer(serializers.Serializer): + dataset = serializers.ChoiceField( + ["spans", "spansIndexed"], required=False, default="spansIndexed" + ) metricsMax = serializers.FloatField(required=False) metricsMin = serializers.FloatField(required=False) metricsOp = serializers.CharField(required=False) @@ -91,6 +97,24 @@ class OrganizationTracesSerializer(serializers.Serializer): query = serializers.ListField( required=False, allow_empty=True, child=serializers.CharField(allow_blank=True) ) + sort = serializers.CharField(required=False) + + def validate_dataset(self, value): + if value == "spans": + return Dataset.EventsAnalyticsPlatform + if value == "spansIndexed": + return Dataset.SpansIndexed + raise ParseError(detail=f"Unsupported dataset: {value}") + + def validate(self, data): + if data["dataset"] == Dataset.EventsAnalyticsPlatform: + sort = data.get("sort") + if sort is not None: + sort_field = sort[1:] if sort.startswith("-") else sort + + if sort_field not in {"timestamp"}: + raise ParseError(detail=f"Unsupported sort: {sort}") + return data @contextmanager @@ -124,14 +148,21 @@ def get(self, request: Request, organization: Organization) -> Response: except NoProjects: return Response(status=404) + buffer = options.get("performance.traces.trace-explorer-skip-recent-seconds") + now = timezone.now() - timedelta(seconds=buffer) + assert snuba_params.end is not None + snuba_params.end = min(snuba_params.end, now) + serializer = OrganizationTracesSerializer(data=request.GET) if not serializer.is_valid(): return Response(serializer.errors, status=400) serialized = serializer.validated_data executor = TracesExecutor( + dataset=serialized["dataset"], snuba_params=snuba_params, user_queries=serialized.get("query", []), + sort=serialized.get("sort"), metrics_max=serialized.get("metricsMax"), metrics_min=serialized.get("metricsMin"), metrics_operation=serialized.get("metricsOp"), @@ -163,6 +194,9 @@ def get(self, request: Request, organization: Organization) -> Response: class OrganizationTraceSpansSerializer(serializers.Serializer): + dataset = serializers.ChoiceField( + ["spans", "spansIndexed"], required=False, default="spansIndexed" + ) metricsMax = serializers.FloatField(required=False) metricsMin = serializers.FloatField(required=False) metricsOp = serializers.CharField(required=False) @@ -175,6 +209,13 @@ class OrganizationTraceSpansSerializer(serializers.Serializer): required=False, allow_empty=True, child=serializers.CharField(allow_blank=True) ) + def validate_dataset(self, value): + if value == "spans": + return Dataset.EventsAnalyticsPlatform + if value == "spansIndexed": + return Dataset.SpansIndexed + raise ParseError(detail=f"Unsupported dataset: {value}") + @region_silo_endpoint class OrganizationTraceSpansEndpoint(OrganizationTracesEndpointBase): @@ -197,6 +238,7 @@ def get(self, request: Request, organization: Organization, trace_id: str) -> Re serialized = serializer.validated_data executor = TraceSpansExecutor( + dataset=serialized["dataset"], snuba_params=snuba_params, trace_id=trace_id, fields=serialized["field"], @@ -224,11 +266,21 @@ def get(self, request: Request, organization: Organization, trace_id: str) -> Re class OrganizationTracesStatsSerializer(serializers.Serializer): + dataset = serializers.ChoiceField( + ["spans", "spansIndexed"], required=False, default="spansIndexed" + ) query = serializers.ListField( required=False, allow_empty=True, child=serializers.CharField(allow_blank=True) ) yAxis = serializers.ListField(required=True, child=serializers.CharField()) + def validate_dataset(self, value): + if value == "spans": + return Dataset.EventsAnalyticsPlatform + if value == "spansIndexed": + return Dataset.SpansIndexed + raise ParseError(detail=f"Unsupported dataset: {value}") + @region_silo_endpoint class OrganizationTracesStatsEndpoint(OrganizationTracesEndpointBase): @@ -271,6 +323,7 @@ def get_event_stats( comparison_delta: timedelta | None, ) -> SnubaTSResult: executor = TraceStatsExecutor( + dataset=serialized["dataset"], snuba_params=snuba_params, columns=serialized["yAxis"], user_queries=serialized.get("query", []), @@ -300,8 +353,10 @@ class TracesExecutor: def __init__( self, *, + dataset: Dataset, snuba_params: SnubaParams, user_queries: list[str], + sort: str | None, metrics_max: float | None, metrics_min: float | None, metrics_operation: str | None, @@ -311,8 +366,10 @@ def __init__( breakdown_slices: int, get_all_projects: Callable[[], list[Project]], ): + self.dataset = dataset self.snuba_params = snuba_params - self.user_queries = process_user_queries(snuba_params, user_queries) + self.user_queries = process_user_queries(snuba_params, user_queries, dataset) + self.sort = sort self.metrics_max = metrics_max self.metrics_min = metrics_min self.metrics_operation = metrics_operation @@ -338,11 +395,11 @@ def _execute(self): self.snuba_params, ) - self.refine_params(min_timestamp, max_timestamp) - if not trace_ids: return [] + self.refine_params(min_timestamp, max_timestamp) + with handle_span_query_errors(): snuba_params = self.params_with_all_projects() @@ -384,6 +441,9 @@ def _execute(self): traces_breakdown_projects_results=traces_breakdown_projects_results, ) + ordering = {trace_id: i for i, trace_id in enumerate(trace_ids)} + data.sort(key=lambda trace: ordering[trace["trace"]]) + return data def refine_params(self, min_timestamp: datetime, max_timestamp: datetime): @@ -564,21 +624,99 @@ def get_traces_matching_span_conditions_in_traces( def get_traces_matching_span_conditions_query( self, snuba_params: SnubaParams, - sort: str | None = None, + ) -> tuple[BaseQueryBuilder, str]: + if self.dataset == Dataset.EventsAnalyticsPlatform: + return self.get_traces_matching_span_conditions_query_eap(snuba_params) + return self.get_traces_matching_span_conditions_query_indexed(snuba_params) + + def get_traces_matching_span_conditions_query_eap( + self, + snuba_params: SnubaParams, ) -> tuple[BaseQueryBuilder, str]: if len(self.user_queries) < 2: timestamp_column = "timestamp" else: timestamp_column = "min(timestamp)" - if sort == "-timestamp": + if self.sort == "-timestamp": orderby = [f"-{timestamp_column}"] + elif self.sort == "timestamp": + orderby = [timestamp_column] else: # The orderby is intentionally `None` here as this query is much faster # if we let Clickhouse decide which order to return the results in. # This also means we cannot order by any columns or paginate. orderby = None + if len(self.user_queries) < 2: + # Optimization: If there is only a condition for a single span, + # we can take the fast path and query without using aggregates. + query = SpansEAPQueryBuilder( + Dataset.EventsAnalyticsPlatform, + params={}, + snuba_params=snuba_params, + query=None, + selected_columns=["trace", timestamp_column], + orderby=orderby, + limit=self.limit, + limitby=("trace", 1), + config=QueryBuilderConfig( + transform_alias_to_input_format=True, + ), + ) + + for where in self.user_queries.values(): + query.where.extend(where) + else: + query = SpansEAPQueryBuilder( + Dataset.EventsAnalyticsPlatform, + params={}, + snuba_params=snuba_params, + query=None, + selected_columns=["trace", timestamp_column], + orderby=orderby, + limit=self.limit, + limitby=("trace", 1), + config=QueryBuilderConfig( + auto_aggregations=True, + transform_alias_to_input_format=True, + ), + ) + + trace_conditions = [] + for where in self.user_queries.values(): + if len(where) == 1: + trace_conditions.extend(where) + elif len(where) > 1: + trace_conditions.append(BooleanCondition(op=BooleanOp.AND, conditions=where)) + + # Transform the condition into it's aggregate form so it can be used to + # match on the trace. + new_condition = generate_trace_condition(where) + if new_condition: + query.having.append(new_condition) + + if len(trace_conditions) == 1: + # This should never happen since it should use a flat query + # but handle it just in case. + query.where.extend(trace_conditions) + elif len(trace_conditions) > 1: + query.where.append(BooleanCondition(op=BooleanOp.OR, conditions=trace_conditions)) + + if options.get("performance.traces.trace-explorer-skip-floating-spans"): + query.add_conditions([Condition(Column("segment_id"), Op.NEQ, "00")]) + + return query, timestamp_column + + def get_traces_matching_span_conditions_query_indexed( + self, + snuba_params: SnubaParams, + ) -> tuple[BaseQueryBuilder, str]: + if len(self.user_queries) < 2: + timestamp_column = "timestamp" + else: + timestamp_column = "min(timestamp)" + if len(self.user_queries) < 2: # Optimization: If there is only a condition for a single span, # we can take the fast path and query without using aggregates. @@ -588,7 +726,6 @@ def get_traces_matching_span_conditions_query( snuba_params=snuba_params, query=None, selected_columns=["trace", timestamp_column], - orderby=orderby, limit=self.limit, limitby=("trace", 1), config=QueryBuilderConfig( @@ -605,7 +742,6 @@ def get_traces_matching_span_conditions_query( snuba_params=snuba_params, query=None, selected_columns=["trace", timestamp_column], - orderby=orderby, limit=self.limit, config=QueryBuilderConfig( auto_aggregations=True, @@ -759,6 +895,50 @@ def get_traces_breakdown_projects_query( self, snuba_params: SnubaParams, trace_ids: list[str], + ) -> tuple[BaseQueryBuilder, Referrer]: + if self.dataset == Dataset.EventsAnalyticsPlatform: + return self.get_traces_breakdown_projects_query_eap(snuba_params, trace_ids) + return self.get_traces_breakdown_projects_query_indexed(snuba_params, trace_ids) + + def get_traces_breakdown_projects_query_eap( + self, + snuba_params: SnubaParams, + trace_ids: list[str], + ) -> tuple[BaseQueryBuilder, Referrer]: + query = SpansEAPQueryBuilder( + Dataset.EventsAnalyticsPlatform, + params={}, + snuba_params=snuba_params, + query="is_transaction:1", + selected_columns=[ + "trace", + "project", + "sdk.name", + "span.op", + "parent_span", + "transaction", + "precise.start_ts", + "precise.finish_ts", + ], + orderby=["precise.start_ts", "-precise.finish_ts"], + # limit the number of segments we fetch per trace so a single + # large trace does not result in the rest being blank + limitby=("trace", int(MAX_SNUBA_RESULTS / len(trace_ids))), + limit=MAX_SNUBA_RESULTS, + config=QueryBuilderConfig( + transform_alias_to_input_format=True, + ), + ) + + # restrict the query to just this subset of trace ids + query.add_conditions([Condition(Column("trace_id"), Op.IN, trace_ids)]) + + return query, Referrer.API_TRACE_EXPLORER_TRACES_BREAKDOWNS + + def get_traces_breakdown_projects_query_indexed( + self, + snuba_params: SnubaParams, + trace_ids: list[str], ) -> tuple[BaseQueryBuilder, Referrer]: query = SpansIndexedQueryBuilder( Dataset.SpansIndexed, @@ -794,6 +974,74 @@ def get_traces_metas_query( self, snuba_params: SnubaParams, trace_ids: list[str], + ) -> tuple[BaseQueryBuilder, Referrer]: + if self.dataset == Dataset.EventsAnalyticsPlatform: + return self.get_traces_metas_query_eap(snuba_params, trace_ids) + return self.get_traces_metas_query_indexed(snuba_params, trace_ids) + + def get_traces_metas_query_eap( + self, + snuba_params: SnubaParams, + trace_ids: list[str], + ) -> tuple[BaseQueryBuilder, Referrer]: + query = SpansEAPQueryBuilder( + Dataset.EventsAnalyticsPlatform, + params={}, + snuba_params=snuba_params, + query=None, + selected_columns=[ + "trace", + "count()", + "first_seen()", + "last_seen()", + ], + limit=len(trace_ids), + config=QueryBuilderConfig( + functions_acl=["first_seen", "last_seen"], + transform_alias_to_input_format=True, + ), + ) + + # restrict the query to just this subset of trace ids + query.add_conditions([Condition(Column("trace_id"), Op.IN, trace_ids)]) + + """ + We want to get a count of the number of matching spans. To do this, we have to + translate the user queries into conditions, and get a count of spans that match + any one of the user queries. + """ + + # Translate each user query into a condition to match one + trace_conditions = [] + for where in self.user_queries.values(): + trace_condition = format_as_trace_conditions(where) + if not trace_condition: + continue + elif len(trace_condition) == 1: + trace_conditions.append(trace_condition[0]) + else: + trace_conditions.append(Function("and", trace_condition)) + + # Join all the user queries together into a single one where at least 1 have + # to be true. + if not trace_conditions: + query.columns.append(Function("count", [], MATCHING_COUNT_ALIAS)) + elif len(trace_conditions) == 1: + query.columns.append(Function("countIf", trace_conditions, MATCHING_COUNT_ALIAS)) + else: + query.columns.append( + Function("countIf", [Function("or", trace_conditions)], MATCHING_COUNT_ALIAS) + ) + + if options.get("performance.traces.trace-explorer-skip-floating-spans"): + query.add_conditions([Condition(Column("segment_id"), Op.NEQ, "00")]) + + return query, Referrer.API_TRACE_EXPLORER_TRACES_META + + def get_traces_metas_query_indexed( + self, + snuba_params: SnubaParams, + trace_ids: list[str], ) -> tuple[BaseQueryBuilder, Referrer]: query = SpansIndexedQueryBuilder( Dataset.SpansIndexed, @@ -898,6 +1146,7 @@ class TraceSpansExecutor: def __init__( self, *, + dataset: Dataset, snuba_params: SnubaParams, trace_id: str, fields: list[str], @@ -909,10 +1158,11 @@ def __init__( metrics_query: str | None, mri: str | None, ): + self.dataset = dataset self.snuba_params = snuba_params self.trace_id = trace_id self.fields = fields - self.user_queries = process_user_queries(snuba_params, user_queries) + self.user_queries = process_user_queries(snuba_params, user_queries, dataset) self.metrics_max = metrics_max self.metrics_min = metrics_min self.metrics_operation = metrics_operation @@ -990,6 +1240,76 @@ def get_user_spans_query( span_keys: list[SpanKey] | None, limit: int, offset: int, + ) -> BaseQueryBuilder: + if self.dataset == Dataset.EventsAnalyticsPlatform: + # span_keys is not supported in EAP mode because that's a legacy + # code path to support metrics that no longer exists + return self.get_user_spans_query_eap(snuba_params, limit, offset) + return self.get_user_spans_query_indexed(snuba_params, span_keys, limit, offset) + + def get_user_spans_query_eap( + self, + snuba_params: SnubaParams, + limit: int, + offset: int, + ) -> BaseQueryBuilder: + user_spans_query = SpansEAPQueryBuilder( + Dataset.EventsAnalyticsPlatform, + params={}, + snuba_params=snuba_params, + query=None, # Note: conditions are added below + selected_columns=self.fields, + orderby=self.sort, + limit=limit, + offset=offset, + config=QueryBuilderConfig( + transform_alias_to_input_format=True, + ), + ) + + user_conditions = [] + + for where in self.user_queries.values(): + user_conditions.append(where) + + # First make sure that we only return spans from the trace specified + user_spans_query.add_conditions([Condition(Column("trace_id"), Op.EQ, self.trace_id)]) + + conditions = [] + + # Next we have to turn the user queries into the appropriate conditions in + # the SnQL that we produce. + + # There are multiple sets of user conditions that needs to be satisfied + # and if a span satisfy any of them, it should be considered. + # + # To handle this use case, we want to OR all the user specified + # conditions together in this query. + for where in user_conditions: + if len(where) > 1: + conditions.append(BooleanCondition(op=BooleanOp.AND, conditions=where)) + elif len(where) == 1: + conditions.append(where[0]) + + if len(conditions) > 1: + # More than 1 set of conditions were specified, we want to show + # spans that match any 1 of them so join the conditions with `OR`s. + user_spans_query.add_conditions( + [BooleanCondition(op=BooleanOp.OR, conditions=conditions)] + ) + elif len(conditions) == 1: + # Only 1 set of user conditions were specified, simply insert them into + # the final query. + user_spans_query.add_conditions([conditions[0]]) + + return user_spans_query + + def get_user_spans_query_indexed( + self, + snuba_params: SnubaParams, + span_keys: list[SpanKey] | None, + limit: int, + offset: int, ) -> BaseQueryBuilder: user_spans_query = SpansIndexedQueryBuilder( Dataset.SpansIndexed, @@ -1098,15 +1418,17 @@ class TraceStatsExecutor: def __init__( self, *, + dataset: Dataset, snuba_params: SnubaParams, columns: list[str], user_queries: list[str], rollup: int, zerofill_results: bool, ): + self.dataset = dataset self.snuba_params = snuba_params self.columns = columns - self.user_queries = process_user_queries(snuba_params, user_queries) + self.user_queries = process_user_queries(snuba_params, user_queries, dataset) self.rollup = rollup self.zerofill_results = zerofill_results @@ -1137,6 +1459,39 @@ def execute(self) -> SnubaTSResult: ) def get_timeseries_query(self) -> BaseQueryBuilder: + if self.dataset == Dataset.EventsAnalyticsPlatform: + return self.get_timeseries_query_eap() + return self.get_timeseries_query_indexed() + + def get_timeseries_query_eap(self) -> BaseQueryBuilder: + query = TimeseriesSpanEAPIndexedQueryBuilder( + Dataset.EventsAnalyticsPlatform, + params={}, + snuba_params=self.snuba_params, + interval=self.rollup, + query=None, + selected_columns=self.columns, + ) + + trace_conditions = [] + + for where in self.user_queries.values(): + if len(where) == 1: + trace_conditions.extend(where) + elif len(where) > 1: + trace_conditions.append(BooleanCondition(op=BooleanOp.AND, conditions=where)) + + if len(trace_conditions) == 1: + query.where.extend(trace_conditions) + elif len(trace_conditions) > 1: + query.where.append(BooleanCondition(op=BooleanOp.OR, conditions=trace_conditions)) + + if options.get("performance.traces.trace-explorer-skip-floating-spans"): + query.add_conditions([Condition(Column("segment_id"), Op.NEQ, "00")]) + + return query + + def get_timeseries_query_indexed(self) -> BaseQueryBuilder: query = TimeseriesSpanIndexedQueryBuilder( Dataset.SpansIndexed, params={}, @@ -1468,18 +1823,33 @@ def stack_clear(trace, until=None): def process_user_queries( snuba_params: SnubaParams, user_queries: list[str], + dataset: Dataset = Dataset.SpansIndexed, ) -> dict[str, list[list[WhereType]]]: with handle_span_query_errors(): - builder = SpansIndexedQueryBuilder( - Dataset.SpansIndexed, - params={}, - snuba_params=snuba_params, - query=None, # Note: conditions are added below - selected_columns=[], - config=QueryBuilderConfig( - transform_alias_to_input_format=True, - ), - ) + if dataset == Dataset.EventsAnalyticsPlatform: + span_indexed_builder = SpansEAPQueryBuilder( + dataset, + params={}, + snuba_params=snuba_params, + query=None, # Note: conditions are added below + selected_columns=[], + config=QueryBuilderConfig( + transform_alias_to_input_format=True, + ), + ) + resolve_conditions = span_indexed_builder.resolve_conditions + else: + span_eap_builder = SpansIndexedQueryBuilder( + dataset, + params={}, + snuba_params=snuba_params, + query=None, # Note: conditions are added below + selected_columns=[], + config=QueryBuilderConfig( + transform_alias_to_input_format=True, + ), + ) + resolve_conditions = span_eap_builder.resolve_conditions queries: dict[str, list[list[WhereType]]] = {} @@ -1492,7 +1862,7 @@ def process_user_queries( # We want to ignore all the aggregate conditions here because we're strictly # searching on span attributes, not aggregates - where, _ = builder.resolve_conditions(user_query) + where, _ = resolve_conditions(user_query) queries[user_query] = where set_measurement("user_queries_count", len(queries)) diff --git a/src/sentry/api/endpoints/organization_user_reports.py b/src/sentry/api/endpoints/organization_user_reports.py index a083d32db550c..705f072e3b885 100644 --- a/src/sentry/api/endpoints/organization_user_reports.py +++ b/src/sentry/api/endpoints/organization_user_reports.py @@ -1,8 +1,10 @@ +from datetime import UTC, datetime, timedelta from typing import NotRequired, TypedDict from rest_framework.request import Request from rest_framework.response import Response +from sentry import quotas from sentry.api.api_owners import ApiOwner from sentry.api.api_publish_status import ApiPublishStatus from sentry.api.base import region_silo_endpoint @@ -13,6 +15,7 @@ from sentry.api.serializers import serialize from sentry.api.serializers.models import UserReportWithGroupSerializer from sentry.models.userreport import UserReport +from sentry.utils.dates import epoch class _PaginateKwargs(TypedDict): @@ -56,6 +59,10 @@ def get(self, request: Request, organization) -> Response: queryset = queryset.filter( date_added__range=(filter_params["start"], filter_params["end"]) ) + else: + retention = quotas.backend.get_event_retention(organization=organization) + start = datetime.now(UTC) - timedelta(days=retention) if retention else epoch + queryset = queryset.filter(date_added__gte=start) status = request.GET.get("status", "unresolved") paginate_kwargs: _PaginateKwargs = {} diff --git a/src/sentry/api/endpoints/project_autofix_codebase_index_status.py b/src/sentry/api/endpoints/project_autofix_codebase_index_status.py index 0072bffe0865f..7ed2bb1fde436 100644 --- a/src/sentry/api/endpoints/project_autofix_codebase_index_status.py +++ b/src/sentry/api/endpoints/project_autofix_codebase_index_status.py @@ -22,7 +22,6 @@ class ProjectAutofixCodebaseIndexStatusEndpoint(ProjectEndpoint): "GET": ApiPublishStatus.EXPERIMENTAL, } owner = ApiOwner.ML_AI - private = True def get(self, request: Request, project: Project) -> Response: """ diff --git a/src/sentry/api/endpoints/project_autofix_create_codebase_index.py b/src/sentry/api/endpoints/project_autofix_create_codebase_index.py index 7db0402b44cc7..c79dd86ef3e94 100644 --- a/src/sentry/api/endpoints/project_autofix_create_codebase_index.py +++ b/src/sentry/api/endpoints/project_autofix_create_codebase_index.py @@ -33,7 +33,6 @@ class ProjectAutofixCreateCodebaseIndexEndpoint(ProjectEndpoint): "POST": ApiPublishStatus.EXPERIMENTAL, } owner = ApiOwner.ML_AI - private = True permission_classes = (ProjectAutofixCreateCodebaseIndexPermission,) diff --git a/src/sentry/api/endpoints/project_backfill_similar_issues_embeddings_records.py b/src/sentry/api/endpoints/project_backfill_similar_issues_embeddings_records.py index f3ba17d9bde69..a97aec8997611 100644 --- a/src/sentry/api/endpoints/project_backfill_similar_issues_embeddings_records.py +++ b/src/sentry/api/endpoints/project_backfill_similar_issues_embeddings_records.py @@ -8,6 +8,7 @@ from sentry.api.base import region_silo_endpoint from sentry.api.bases.project import ProjectEndpoint from sentry.auth.superuser import is_active_superuser +from sentry.models.project import Project from sentry.tasks.embeddings_grouping.backfill_seer_grouping_records_for_project import ( backfill_seer_grouping_records_for_project, ) @@ -20,7 +21,7 @@ class ProjectBackfillSimilarIssuesEmbeddingsRecords(ProjectEndpoint): "POST": ApiPublishStatus.PRIVATE, } - def post(self, request: Request, project) -> Response: + def post(self, request: Request, project: Project) -> Response: if not features.has("projects:similarity-embeddings-backfill", project): return Response(status=404) diff --git a/src/sentry/api/endpoints/project_details.py b/src/sentry/api/endpoints/project_details.py index ba3c4fca0fee5..7f85df1a955d4 100644 --- a/src/sentry/api/endpoints/project_details.py +++ b/src/sentry/api/endpoints/project_details.py @@ -30,6 +30,7 @@ from sentry.apidocs.parameters import GlobalParams from sentry.constants import RESERVED_PROJECT_SLUGS, ObjectStatus from sentry.datascrubbing import validate_pii_config_update, validate_pii_selectors +from sentry.deletions.models.scheduleddeletion import RegionScheduledDeletion from sentry.dynamic_sampling import get_supported_biases_ids, get_user_biases from sentry.grouping.enhancer import Enhancements from sentry.grouping.enhancer.exceptions import InvalidEnhancerConfig @@ -44,10 +45,9 @@ ) from sentry.lang.native.utils import STORE_CRASH_REPORTS_MAX, convert_crashreport_count from sentry.models.group import Group, GroupStatus -from sentry.models.project import Project +from sentry.models.project import PROJECT_SLUG_MAX_LENGTH, Project from sentry.models.projectbookmark import ProjectBookmark from sentry.models.projectredirect import ProjectRedirect -from sentry.models.scheduledeletion import RegionScheduledDeletion from sentry.notifications.utils import has_alert_integration from sentry.tasks.delete_seer_grouping_records import call_seer_delete_project_grouping_records @@ -122,8 +122,6 @@ class ProjectMemberSerializer(serializers.Serializer): "performanceIssueCreationRate", "performanceIssueCreationThroughPlatform", "performanceIssueSendToPlatform", - "highlightContext", - "highlightTags", "uptimeAutodetection", ] ) @@ -135,7 +133,7 @@ class ProjectAdminSerializer(ProjectMemberSerializer): ) slug = SentrySerializerSlugField( help_text="Uniquely identifies a project and is used for the interface.", - max_length=50, + max_length=PROJECT_SLUG_MAX_LENGTH, required=False, ) platform = serializers.CharField( @@ -168,14 +166,16 @@ class ProjectAdminSerializer(ProjectMemberSerializer): ) highlightContext = HighlightContextField( required=False, - help_text="A JSON mapping of context types to lists of strings for their keys. E.g. {'user': ['id', 'email']}", + help_text="""A JSON mapping of context types to lists of strings for their keys. +E.g. `{'user': ['id', 'email']}`""", ) highlightTags = ListField( child=serializers.CharField(), required=False, - help_text="A list of strings with tag keys to highlight on this project's issues. E.g. ['release', 'environment']", + help_text="""A list of strings with tag keys to highlight on this project's issues. +E.g. `['release', 'environment']`""", ) - # TODO: Add help_text to all the fields for public documentation + # TODO: Add help_text to all the fields for public documentation, then remove them from 'exclude_fields' team = serializers.RegexField(r"^[a-z0-9_\-]+$", max_length=50) digestsMinDelay = serializers.IntegerField(min_value=60, max_value=3600) digestsMaxDelay = serializers.IntegerField(min_value=60, max_value=3600) diff --git a/src/sentry/api/endpoints/project_docs_platform.py b/src/sentry/api/endpoints/project_docs_platform.py deleted file mode 100644 index 4de3d9d06c7a1..0000000000000 --- a/src/sentry/api/endpoints/project_docs_platform.py +++ /dev/null @@ -1,67 +0,0 @@ -from django.urls import reverse -from rest_framework.request import Request -from rest_framework.response import Response - -from sentry.api.api_owners import ApiOwner -from sentry.api.api_publish_status import ApiPublishStatus -from sentry.api.base import region_silo_endpoint -from sentry.api.bases.project import ProjectEndpoint -from sentry.api.exceptions import ResourceDoesNotExist -from sentry.models.projectkey import ProjectKey -from sentry.utils.http import absolute_uri -from sentry.utils.integrationdocs import load_doc - - -def replace_keys(html, project_key): - if project_key is None: - return html - html = html.replace("___DSN___", project_key.dsn_private) - html = html.replace("___PUBLIC_DSN___", project_key.dsn_public) - html = html.replace("___PUBLIC_KEY___", project_key.public_key) - html = html.replace("___SECRET_KEY___", project_key.secret_key) - html = html.replace("___PROJECT_ID___", str(project_key.project_id)) - html = html.replace("___MINIDUMP_URL___", project_key.minidump_endpoint) - html = html.replace("___UNREAL_URL___", project_key.unreal_endpoint) - html = html.replace( - "___RELAY_CDN_URL___", - absolute_uri(reverse("sentry-js-sdk-loader", args=[project_key.public_key])), - ) - - # If we actually render this in the main UI we can also provide - # extra information about the project (org slug and project slug) - if "___PROJECT_NAME___" in html or "___ORG_NAME___" in html: - project = project_key.project - org = project.organization - html = html.replace("___ORG_NAME___", str(org.slug)) - html = html.replace("___PROJECT_NAME___", str(project.slug)) - - return html - - -@region_silo_endpoint -class ProjectDocsPlatformEndpoint(ProjectEndpoint): - publish_status = { - "GET": ApiPublishStatus.PRIVATE, - } - owner = ApiOwner.TELEMETRY_EXPERIENCE - - def get(self, request: Request, project, platform) -> Response: - data = load_doc(platform) - if not data: - raise ResourceDoesNotExist - keys = ("id", "name", "html", "link") - for key in keys: - if key not in data: - raise ResourceDoesNotExist - - project_key = ProjectKey.get_default(project) - - return Response( - { - "id": data["id"], - "name": data["name"], - "html": replace_keys(data["html"], project_key), - "link": data["link"], - "wizardSetup": data.get("wizard_setup", None), - } - ) diff --git a/src/sentry/api/endpoints/project_environments.py b/src/sentry/api/endpoints/project_environments.py index 80536ec9de6f9..687ebe9a8b73f 100644 --- a/src/sentry/api/endpoints/project_environments.py +++ b/src/sentry/api/endpoints/project_environments.py @@ -1,3 +1,4 @@ +from drf_spectacular.utils import OpenApiResponse, extend_schema from rest_framework.request import Request from rest_framework.response import Response @@ -6,33 +7,42 @@ from sentry.api.bases.project import ProjectEndpoint from sentry.api.helpers.environments import environment_visibility_filter_options from sentry.api.serializers import serialize +from sentry.api.serializers.models.environment import EnvironmentProjectSerializerResponse +from sentry.apidocs.constants import RESPONSE_FORBIDDEN, RESPONSE_NOT_FOUND, RESPONSE_UNAUTHORIZED +from sentry.apidocs.examples.environment_examples import EnvironmentExamples +from sentry.apidocs.parameters import EnvironmentParams, GlobalParams +from sentry.apidocs.utils import inline_sentry_response_serializer from sentry.models.environment import EnvironmentProject +@extend_schema(tags=["Environments"]) @region_silo_endpoint class ProjectEnvironmentsEndpoint(ProjectEndpoint): publish_status = { - "GET": ApiPublishStatus.UNKNOWN, + "GET": ApiPublishStatus.PUBLIC, } + @extend_schema( + operation_id="List a Project's Environments", + parameters=[ + GlobalParams.ORG_ID_OR_SLUG, + GlobalParams.PROJECT_ID_OR_SLUG, + EnvironmentParams.VISIBILITY, + ], + responses={ + 200: inline_sentry_response_serializer( + "ListProjectEnvironments", list[EnvironmentProjectSerializerResponse] + ), + 400: OpenApiResponse(description="Invalid value for 'visibility'."), + 401: RESPONSE_UNAUTHORIZED, + 403: RESPONSE_FORBIDDEN, + 404: RESPONSE_NOT_FOUND, + }, + examples=EnvironmentExamples.GET_PROJECT_ENVIRONMENTS, + ) def get(self, request: Request, project) -> Response: """ - List a Project's Environments - ``````````````````````````````` - - Return environments for a given project. - - :qparam string visibility: when omitted only visible environments are - returned. Set to ``"hidden"`` for only hidden - environments, or ``"all"`` for both hidden - and visible environments. - - :pparam string organization_id_or_slug: the id or slug of the organization the project - belongs to. - - :pparam string project_id_or_slug: the id or slug of the project. - - :auth: required + Lists a project's environments. """ queryset = ( diff --git a/src/sentry/api/endpoints/project_ownership.py b/src/sentry/api/endpoints/project_ownership.py index ea63426734b17..7e7a05c421340 100644 --- a/src/sentry/api/endpoints/project_ownership.py +++ b/src/sentry/api/endpoints/project_ownership.py @@ -21,8 +21,9 @@ from sentry.signals import ownership_rule_created from sentry.utils.audit import create_audit_entry -MAX_RAW_LENGTH = 100_000 -HIGHER_MAX_RAW_LENGTH = 250_000 +DEFAULT_MAX_RAW_LENGTH = 100_000 +LARGE_MAX_RAW_LENGTH = 250_000 +XLARGE_MAX_RAW_LENGTH = 750_000 class ProjectOwnershipRequestSerializer(serializers.Serializer): @@ -62,11 +63,12 @@ def _validate_no_codeowners(rules): ) def get_max_length(self): - if features.has( - "organizations:higher-ownership-limit", self.context["ownership"].project.organization - ): - return HIGHER_MAX_RAW_LENGTH - return MAX_RAW_LENGTH + organization = self.context["ownership"].project.organization + if features.has("organizations:ownership-size-limit-xlarge", organization): + return XLARGE_MAX_RAW_LENGTH + if features.has("organizations:ownership-size-limit-large", organization): + return LARGE_MAX_RAW_LENGTH + return DEFAULT_MAX_RAW_LENGTH def validate_autoAssignment(self, value): if value not in [ diff --git a/src/sentry/api/endpoints/project_rule_actions.py b/src/sentry/api/endpoints/project_rule_actions.py index 36bad42c07976..73ba633945a4f 100644 --- a/src/sentry/api/endpoints/project_rule_actions.py +++ b/src/sentry/api/endpoints/project_rule_actions.py @@ -1,5 +1,6 @@ import logging +import sentry_sdk from rest_framework.exceptions import ValidationError from rest_framework.request import Request from rest_framework.response import Response @@ -13,7 +14,7 @@ from sentry.eventstore.models import GroupEvent from sentry.models.rule import Rule from sentry.rules.processing.processor import activate_downstream_actions -from sentry.shared_integrations.exceptions import IntegrationError +from sentry.shared_integrations.exceptions import IntegrationFormError from sentry.utils.safe import safe_execute from sentry.utils.samples import create_sample_event @@ -97,7 +98,7 @@ def execute_future_on_test_event( # safe_execute logs these as exceptions, which can result in # noisy sentry issues, so log with a warning instead. - if isinstance(exc, IntegrationError): + if isinstance(exc, IntegrationFormError): logger.warning( "%s.test_alert.integration_error", callback_name, extra={"exc": exc} ) @@ -110,7 +111,12 @@ def execute_future_on_test_event( logger.warning( "%s.test_alert.unexpected_exception", callback_name, exc_info=True ) - break + error_id = sentry_sdk.capture_exception(exc) + action_exceptions.append( + f"An unexpected error occurred. Error ID: '{error_id}'" + ) + + break status = None data = None diff --git a/src/sentry/api/endpoints/project_rule_details.py b/src/sentry/api/endpoints/project_rule_details.py index 9d655541784a8..009248ae51b0e 100644 --- a/src/sentry/api/endpoints/project_rule_details.py +++ b/src/sentry/api/endpoints/project_rule_details.py @@ -26,13 +26,13 @@ from sentry.apidocs.examples.issue_alert_examples import IssueAlertExamples from sentry.apidocs.parameters import GlobalParams, IssueAlertParams from sentry.constants import ObjectStatus +from sentry.deletions.models.scheduleddeletion import RegionScheduledDeletion from sentry.integrations.jira.actions.create_ticket import JiraCreateTicketAction from sentry.integrations.jira_server.actions.create_ticket import JiraServerCreateTicketAction from sentry.integrations.slack.tasks.find_channel_id_for_rule import find_channel_id_for_rule from sentry.integrations.slack.utils.rule_status import RedisRuleStatus from sentry.mediators.project_rules.updater import Updater from sentry.models.rule import NeglectedRule, RuleActivity, RuleActivityType -from sentry.models.scheduledeletion import RegionScheduledDeletion from sentry.rules.actions import trigger_sentry_app_action_creators_for_issues from sentry.rules.actions.utils import get_changed_data, get_updated_rule_data from sentry.signals import alert_rule_edited diff --git a/src/sentry/api/endpoints/project_rules.py b/src/sentry/api/endpoints/project_rules.py index fefae983ae090..4658c4ace2488 100644 --- a/src/sentry/api/endpoints/project_rules.py +++ b/src/sentry/api/endpoints/project_rules.py @@ -505,8 +505,8 @@ class ProjectRulesPostSerializer(serializers.Serializer): - `workspace` - The integration ID associated with the Slack workspace. - `channel` - The name of the channel to send the notification to (e.g., #critical, Jane Schmidt). - `channel_id` (optional) - The ID of the channel to send the notification to. -- `tags` - A string of tags to show in the notification, separated by commas (e.g., "environment, user, my_tag"). -- `notes` - Text to show alongside the notification. To @ a user, include their user id like `@`. To include a clickable link, format the link and title like ``. +- `tags` (optional) - A string of tags to show in the notification, separated by commas (e.g., "environment, user, my_tag"). +- `notes` (optional) - Text to show alongside the notification. To @ a user, include their user id like `@`. To include a clickable link, format the link and title like ``. ```json { "id": "sentry.integrations.slack.notify_action.SlackNotifyServiceAction", @@ -531,7 +531,7 @@ class ProjectRulesPostSerializer(serializers.Serializer): **Send a Discord notification** - `server` - The integration ID associated with the Discord server. - `channel_id` - The ID of the channel to send the notification to. -- `tags` - A string of tags to show in the notification, separated by commas (e.g., "environment, user, my_tag"). +- `tags` (optional) - A string of tags to show in the notification, separated by commas (e.g., "environment, user, my_tag"). ```json { "id": "sentry.integrations.discord.notify_action.DiscordNotifyServiceAction", diff --git a/src/sentry/api/endpoints/project_rules_configuration.py b/src/sentry/api/endpoints/project_rules_configuration.py index ce4e6c568bfb1..dec553ff70265 100644 --- a/src/sentry/api/endpoints/project_rules_configuration.py +++ b/src/sentry/api/endpoints/project_rules_configuration.py @@ -31,9 +31,6 @@ def get(self, request: Request, project) -> Response: can_create_tickets = features.has( "organizations:integrations-ticket-rules", project.organization ) - has_latest_adopted_release = features.has( - "organizations:latest-adopted-release-filter", project.organization - ) # TODO: conditions need to be based on actions for rule_type, rule_cls in rules: @@ -76,12 +73,6 @@ def get(self, request: Request, project) -> Response: if rule_type.startswith("condition/"): condition_list.append(context) elif rule_type.startswith("filter/"): - if ( - context["id"] - == "sentry.rules.filters.latest_adopted_release_filter.LatestAdoptedReleaseFilter" - and not has_latest_adopted_release - ): - continue filter_list.append(context) elif rule_type.startswith("action/"): action_list.append(context) diff --git a/src/sentry/api/endpoints/project_servicehook_details.py b/src/sentry/api/endpoints/project_servicehook_details.py index d44bc117494d1..8c1b1b60f5469 100644 --- a/src/sentry/api/endpoints/project_servicehook_details.py +++ b/src/sentry/api/endpoints/project_servicehook_details.py @@ -10,8 +10,9 @@ from sentry.api.bases.project import ProjectEndpoint from sentry.api.exceptions import ResourceDoesNotExist from sentry.api.serializers import serialize -from sentry.api.validators import ServiceHookValidator from sentry.constants import ObjectStatus +from sentry.sentry_apps.api.parsers.servicehook import ServiceHookValidator +from sentry.sentry_apps.api.serializers.servicehook import ServiceHookSerializer from sentry.sentry_apps.models.servicehook import ServiceHook @@ -42,7 +43,7 @@ def get(self, request: Request, project, hook_id) -> Response: hook = ServiceHook.objects.get(project_id=project.id, guid=hook_id) except ServiceHook.DoesNotExist: raise ResourceDoesNotExist - return self.respond(serialize(hook, request.user)) + return self.respond(serialize(hook, request.user, ServiceHookSerializer())) def put(self, request: Request, project, hook_id) -> Response: """ @@ -95,7 +96,7 @@ def put(self, request: Request, project, hook_id) -> Response: data=hook.get_audit_log_data(), ) - return self.respond(serialize(hook, request.user)) + return self.respond(serialize(hook, request.user, ServiceHookSerializer())) def delete(self, request: Request, project, hook_id) -> Response: """ diff --git a/src/sentry/api/endpoints/project_servicehooks.py b/src/sentry/api/endpoints/project_servicehooks.py index 1c38a74a2b951..cc1af074094d4 100644 --- a/src/sentry/api/endpoints/project_servicehooks.py +++ b/src/sentry/api/endpoints/project_servicehooks.py @@ -10,8 +10,9 @@ from sentry.api.base import region_silo_endpoint from sentry.api.bases.project import ProjectEndpoint from sentry.api.serializers import serialize -from sentry.api.validators import ServiceHookValidator from sentry.constants import ObjectStatus +from sentry.sentry_apps.api.parsers.servicehook import ServiceHookValidator +from sentry.sentry_apps.api.serializers.servicehook import ServiceHookSerializer from sentry.sentry_apps.models.servicehook import ServiceHook from sentry.sentry_apps.services.hook import hook_service @@ -65,7 +66,7 @@ def get(self, request: Request, project) -> Response: request=request, queryset=queryset, order_by="-id", - on_results=lambda x: serialize(x, request.user), + on_results=lambda x: serialize(x, request.user, ServiceHookSerializer()), ) def post(self, request: Request, project) -> Response: @@ -130,5 +131,6 @@ def post(self, request: Request, project) -> Response: ) return self.respond( - serialize(ServiceHook.objects.get(id=hook.id), request.user), status=201 + serialize(ServiceHook.objects.get(id=hook.id), request.user, ServiceHookSerializer()), + status=201, ) diff --git a/src/sentry/api/endpoints/project_user_reports.py b/src/sentry/api/endpoints/project_user_reports.py index 3660faf94254d..3b9b5a988c803 100644 --- a/src/sentry/api/endpoints/project_user_reports.py +++ b/src/sentry/api/endpoints/project_user_reports.py @@ -1,9 +1,11 @@ +from datetime import UTC, datetime, timedelta from typing import NotRequired, TypedDict from rest_framework import serializers from rest_framework.request import Request from rest_framework.response import Response +from sentry import quotas from sentry.api.api_owners import ApiOwner from sentry.api.api_publish_status import ApiPublishStatus from sentry.api.authentication import DSNAuthentication @@ -17,6 +19,7 @@ from sentry.models.environment import Environment from sentry.models.projectkey import ProjectKey from sentry.models.userreport import UserReport +from sentry.utils.dates import epoch class UserReportSerializer(serializers.ModelSerializer): @@ -61,7 +64,11 @@ def get(self, request: Request, project) -> Response: except Environment.DoesNotExist: queryset = UserReport.objects.none() else: - queryset = UserReport.objects.filter(project_id=project.id, group_id__isnull=False) + retention = quotas.backend.get_event_retention(organization=project.organization) + start = datetime.now(UTC) - timedelta(days=retention) if retention else epoch + queryset = UserReport.objects.filter( + project_id=project.id, group_id__isnull=False, date_added__gte=start + ) if environment is not None: queryset = queryset.filter(environment_id=environment.id) @@ -113,7 +120,7 @@ def post(self, request: Request, project) -> Response: :param string comments: comments supplied by user """ if hasattr(request.auth, "project_id") and project.id != request.auth.project_id: - return self.respond(status=400) + return self.respond(status=401) serializer = UserReportSerializer(data=request.data) if not serializer.is_valid(): diff --git a/src/sentry/api/endpoints/secret_scanning/github.py b/src/sentry/api/endpoints/secret_scanning/github.py new file mode 100644 index 0000000000000..c96362be3b714 --- /dev/null +++ b/src/sentry/api/endpoints/secret_scanning/github.py @@ -0,0 +1,176 @@ +import hashlib +import logging + +import sentry_sdk +from django.http import HttpResponse +from django.utils import timezone +from django.utils.decorators import method_decorator +from django.views.decorators.csrf import csrf_exempt +from django.views.generic.base import View + +from sentry import options +from sentry.hybridcloud.models import ApiTokenReplica, OrgAuthTokenReplica +from sentry.models.apitoken import ApiToken +from sentry.models.orgauthtoken import OrgAuthToken +from sentry.organizations.absolute_url import generate_organization_url +from sentry.organizations.services.organization import organization_service +from sentry.types.token import AuthTokenType +from sentry.users.models.user import User +from sentry.utils import json, metrics +from sentry.utils.email import MessageBuilder +from sentry.utils.github import verify_signature +from sentry.utils.http import absolute_uri +from sentry.web.frontend.base import control_silo_view + +logger = logging.getLogger(__name__) + +TOKEN_TYPE_HUMAN_READABLE = { + AuthTokenType.USER: "User Auth Token", + AuthTokenType.ORG: "Organization Auth Token", +} + +REVOKE_URLS = { + AuthTokenType.USER: "/settings/account/api/auth-tokens/", + AuthTokenType.ORG: "/settings/auth-tokens/", +} + + +@control_silo_view +class SecretScanningGitHubEndpoint(View): + @method_decorator(csrf_exempt) + def dispatch(self, request, *args, **kwargs): + if request.method != "POST": + return HttpResponse(status=405) + + response = super().dispatch(request, *args, **kwargs) + metrics.incr( + "secret-scanning.github.webhooks", + 1, + tags={"status": response.status_code}, + skip_internal=False, + ) + return response + + def post(self, request): + if request.headers.get("Content-Type") != "application/json": + return HttpResponse( + json.dumps({"details": "invalid content type specified"}), status=400 + ) + + payload = request.body.decode("utf-8") + signature = request.headers.get("Github-Public-Key-Signature") + key_id = request.headers.get("Github-Public-Key-Identifier") + + try: + if options.get("secret-scanning.github.enable-signature-verification"): + verify_signature( + payload, + signature, + key_id, + "secret_scanning", + ) + except ValueError as e: + sentry_sdk.capture_exception(e) + return HttpResponse(json.dumps({"details": "invalid signature"}), status=400) + + secret_alerts = json.loads(payload) + response = [] + for secret_alert in secret_alerts: + alerted_token_str = secret_alert["token"] + hashed_alerted_token = hashlib.sha256(alerted_token_str.encode()).hexdigest() + + # no prefix tokens could indicate old user auth tokens with no prefixes + token_type = AuthTokenType.USER + if alerted_token_str.startswith(AuthTokenType.ORG): + token_type = AuthTokenType.ORG + elif alerted_token_str.startswith((AuthTokenType.USER_APP, AuthTokenType.INTEGRATION)): + # TODO: add support for other token types + return HttpResponse( + json.dumps({"details": "auth token type is not implemented"}), status=501 + ) + + try: + token: ApiToken | OrgAuthToken + + if token_type == AuthTokenType.USER: + token = ApiToken.objects.get(hashed_token=hashed_alerted_token) + + if token_type == AuthTokenType.ORG: + token = OrgAuthToken.objects.get( + token_hashed=hashed_alerted_token, date_deactivated=None + ) + + extra = { + "exposed_source": secret_alert["source"], + "exposed_url": secret_alert["url"], + "hashed_token": hashed_alerted_token, + "token_type": token_type, + } + logger.info("found an exposed auth token", extra=extra) + + # TODO: mark an API token as exposed in the database + + # TODO: expose this option in the UI + revoke_action_enabled = False + if revoke_action_enabled: + # TODO: revoke token + pass + + # Send an email + url_prefix = options.get("system.url-prefix") + if isinstance(token, ApiToken): + # for user token, send an alert to the token owner + users = User.objects.filter(id=token.user_id) + elif isinstance(token, OrgAuthToken): + # for org token, send an alert to all organization owners + organization = organization_service.get(id=token.organization_id) + if organization is None: + continue + + owner_members = organization_service.get_organization_owner_members( + organization_id=organization.id + ) + user_ids = [om.user_id for om in owner_members] + users = User.objects.filter(id__in=user_ids) + + url_prefix = generate_organization_url(organization.slug) + + token_type_human_readable = TOKEN_TYPE_HUMAN_READABLE.get(token_type, "Auth Token") + + revoke_url = absolute_uri(REVOKE_URLS.get(token_type, "/"), url_prefix=url_prefix) + + context = { + "datetime": timezone.now(), + "token_name": token.name, + "token_type": token_type_human_readable, + "token_redacted": f"{token_type}...{token.token_last_characters}", + "hashed_token": hashed_alerted_token, + "exposed_source": secret_alert["source"], + "exposed_url": secret_alert["url"], + "revoke_url": revoke_url, + } + + subject = f"Action Required: {token_type_human_readable} Exposed" + msg = MessageBuilder( + subject="{}{}".format(options.get("mail.subject-prefix"), subject), + template="sentry/emails/secret-scanning/body.txt", + html_template="sentry/emails/secret-scanning/body.html", + type="user.secret-scanning-alert", + context=context, + ) + msg.send_async([u.username for u in users]) + except ( + ApiToken.DoesNotExist, + ApiTokenReplica.DoesNotExist, + OrgAuthToken.DoesNotExist, + OrgAuthTokenReplica.DoesNotExist, + ): + response.append( + { + "token_hash": hashed_alerted_token, + "token_type": secret_alert["type"], + "label": "false_positive", + } + ) + + return HttpResponse(json.dumps(response), status=200) diff --git a/src/sentry/api/endpoints/seer_rpc.py b/src/sentry/api/endpoints/seer_rpc.py index f468d0f5ae5a3..8ad06295ed6c8 100644 --- a/src/sentry/api/endpoints/seer_rpc.py +++ b/src/sentry/api/endpoints/seer_rpc.py @@ -17,6 +17,7 @@ from rest_framework.response import Response from sentry_sdk import Scope, capture_exception +from sentry import options from sentry.api.api_owners import ApiOwner from sentry.api.api_publish_status import ApiPublishStatus from sentry.api.authentication import AuthenticationSiloLimit, StandardAuthentication @@ -153,8 +154,9 @@ def get_organization_slug(*, org_id: int) -> dict: def get_organization_autofix_consent(*, org_id: int) -> dict: org: Organization = Organization.objects.get(id=org_id) consent = org.get_option("sentry:gen_ai_consent", False) + github_extension_enabled = org_id in options.get("github-extension.enabled-orgs") return { - "consent": consent, + "consent": consent or github_extension_enabled, } diff --git a/src/sentry/api/endpoints/team_details.py b/src/sentry/api/endpoints/team_details.py index 87d276d1bb279..158720816c691 100644 --- a/src/sentry/api/endpoints/team_details.py +++ b/src/sentry/api/endpoints/team_details.py @@ -23,14 +23,15 @@ ) from sentry.apidocs.examples.team_examples import TeamExamples from sentry.apidocs.parameters import GlobalParams, TeamParams -from sentry.models.scheduledeletion import RegionScheduledDeletion +from sentry.db.models.fields.slug import DEFAULT_SLUG_MAX_LENGTH +from sentry.deletions.models.scheduleddeletion import RegionScheduledDeletion from sentry.models.team import Team, TeamStatus @extend_schema_serializer(exclude_fields=["name"]) class TeamDetailsSerializer(CamelSnakeModelSerializer): slug = SentrySerializerSlugField( - max_length=50, + max_length=DEFAULT_SLUG_MAX_LENGTH, help_text="Uniquely identifies a team. This is must be available.", ) @@ -55,6 +56,13 @@ class TeamDetailsEndpoint(TeamEndpoint): "GET": ApiPublishStatus.PUBLIC, "PUT": ApiPublishStatus.PUBLIC, } + # OrganizationSCIMTeamDetails inherits this endpoint, but toggles this setting + _allow_idp_changes = False + + def can_modify_idp_team(self, team: Team): + if not team.idp_provisioned: + return True + return self._allow_idp_changes @extend_schema( operation_id="Retrieve a Team", @@ -106,6 +114,13 @@ def put(self, request: Request, team) -> Response: Update various attributes and configurable settings for the given team. """ + + if not self.can_modify_idp_team(team): + return Response( + {"detail": "This team is managed through your organization's identity provider."}, + status=403, + ) + serializer = TeamDetailsSerializer(team, data=request.data, partial=True) if serializer.is_valid(): team = serializer.save() @@ -140,6 +155,13 @@ def delete(self, request: Request, team) -> Response: **Note:** Deletion happens asynchronously and therefore is not immediate. Teams will have their slug released while waiting for deletion. """ + + if not self.can_modify_idp_team(team): + return Response( + {"detail": "This team is managed through your organization's identity provider."}, + status=403, + ) + suffix = uuid4().hex new_slug = f"{team.slug}-{suffix}"[0:50] try: diff --git a/src/sentry/api/endpoints/team_projects.py b/src/sentry/api/endpoints/team_projects.py index 73ce44a11de87..98fadbfd2a714 100644 --- a/src/sentry/api/endpoints/team_projects.py +++ b/src/sentry/api/endpoints/team_projects.py @@ -22,7 +22,7 @@ from sentry.apidocs.parameters import CursorQueryParam, GlobalParams from sentry.apidocs.utils import inline_sentry_response_serializer from sentry.constants import RESERVED_PROJECT_SLUGS, ObjectStatus -from sentry.models.project import Project +from sentry.models.project import PROJECT_SLUG_MAX_LENGTH, Project from sentry.models.team import Team from sentry.seer.similarity.utils import project_is_seer_eligible from sentry.signals import project_created @@ -38,7 +38,7 @@ class ProjectPostSerializer(serializers.Serializer): slug = SentrySerializerSlugField( help_text="""Uniquely identifies a project and is used for the interface. If not provided, it is automatically generated from the name.""", - max_length=50, + max_length=PROJECT_SLUG_MAX_LENGTH, required=False, allow_null=True, ) diff --git a/src/sentry/api/endpoints/user_notification_settings_options.py b/src/sentry/api/endpoints/user_notification_settings_options.py index 20b0ce6089b12..a381d1eee7858 100644 --- a/src/sentry/api/endpoints/user_notification_settings_options.py +++ b/src/sentry/api/endpoints/user_notification_settings_options.py @@ -22,8 +22,6 @@ class UserNotificationSettingsOptionsEndpoint(UserEndpoint): "PUT": ApiPublishStatus.PRIVATE, } owner = ApiOwner.ALERTS_NOTIFICATIONS - # TODO(Steve): Make not private when we launch new system - private = True def get(self, request: Request, user: User) -> Response: """ diff --git a/src/sentry/api/endpoints/user_notification_settings_options_detail.py b/src/sentry/api/endpoints/user_notification_settings_options_detail.py index d39efa93cf8fe..53e73e039e850 100644 --- a/src/sentry/api/endpoints/user_notification_settings_options_detail.py +++ b/src/sentry/api/endpoints/user_notification_settings_options_detail.py @@ -17,8 +17,6 @@ class UserNotificationSettingsOptionsDetailEndpoint(UserEndpoint): "DELETE": ApiPublishStatus.PRIVATE, } owner = ApiOwner.ALERTS_NOTIFICATIONS - # TODO(Steve): Make not private when we launch new system - private = True def convert_args( self, diff --git a/src/sentry/api/endpoints/user_notification_settings_providers.py b/src/sentry/api/endpoints/user_notification_settings_providers.py index 945bf75477f4d..dff386dae2882 100644 --- a/src/sentry/api/endpoints/user_notification_settings_providers.py +++ b/src/sentry/api/endpoints/user_notification_settings_providers.py @@ -25,8 +25,6 @@ class UserNotificationSettingsProvidersEndpoint(UserEndpoint): "PUT": ApiPublishStatus.PRIVATE, } owner = ApiOwner.ALERTS_NOTIFICATIONS - # TODO(Steve): Make not private when we launch new system - private = True def get(self, request: Request, user: User) -> Response: """ diff --git a/src/sentry/api/endpoints/warmup.py b/src/sentry/api/endpoints/warmup.py new file mode 100644 index 0000000000000..4d989835f992c --- /dev/null +++ b/src/sentry/api/endpoints/warmup.py @@ -0,0 +1,20 @@ +from rest_framework.request import Request +from rest_framework.response import Response + +from sentry.api.api_owners import ApiOwner +from sentry.api.api_publish_status import ApiPublishStatus +from sentry.api.base import Endpoint, all_silo_endpoint +from sentry.ratelimits.config import RateLimitConfig + + +@all_silo_endpoint +class WarmupEndpoint(Endpoint): + publish_status = { + "GET": ApiPublishStatus.PRIVATE, + } + owner = ApiOwner.UNOWNED + permission_classes = () + rate_limits = RateLimitConfig(group="INTERNAL") + + def get(self, request: Request) -> Response: + return Response(200) diff --git a/src/sentry/api/event_search.py b/src/sentry/api/event_search.py index 7aba67336a779..7341c7bcb7dc7 100644 --- a/src/sentry/api/event_search.py +++ b/src/sentry/api/event_search.py @@ -571,13 +571,14 @@ def create_from(cls, search_config: SearchConfig, **overrides): class SearchVisitor(NodeVisitor): unwrapped_exceptions = (InvalidSearchQuery,) - def __init__(self, config=None, params=None, builder=None): + def __init__(self, config=None, params=None, builder=None, get_field_type=None): super().__init__() if config is None: config = SearchConfig() self.config = config self.params = params if params is not None else {} + self.get_field_type = get_field_type if builder is None: # Avoid circular import from sentry.search.events.builder.discover import UnresolvedQuery @@ -590,6 +591,10 @@ def __init__(self, config=None, params=None, builder=None): ) else: self.builder = builder + if get_field_type is None: + self.get_field_type = self.builder.get_field_type + else: + self.get_field_type = get_field_type @cached_property def key_mappings_lookup(self): @@ -604,7 +609,7 @@ def is_numeric_key(self, key): key in self.config.numeric_keys or is_measurement(key) or is_span_op_breakdown(key) - or self.builder.get_field_type(key) == "number" + or self.get_field_type(key) == "number" or self.is_duration_key(key) ) @@ -614,11 +619,11 @@ def is_duration_key(self, key): key in self.config.duration_keys or is_duration_measurement(key) or is_span_op_breakdown(key) - or self.builder.get_field_type(key) in duration_types + or self.get_field_type(key) in duration_types ) def is_size_key(self, key): - return self.builder.get_field_type(key) in SIZE_UNITS + return self.get_field_type(key) in SIZE_UNITS def is_date_key(self, key): return key in self.config.date_keys @@ -1241,7 +1246,7 @@ def generic_visit(self, node, children): def parse_search_query( - query, config=None, params=None, builder=None, config_overrides=None + query, config=None, params=None, builder=None, config_overrides=None, get_field_type=None ) -> list[ SearchFilter ]: # TODO: use the `Sequence[QueryToken]` type and update the code that fails type checking. @@ -1264,4 +1269,6 @@ def parse_search_query( if config_overrides: config = SearchConfig.create_from(config, **config_overrides) - return SearchVisitor(config, params=params, builder=builder).visit(tree) + return SearchVisitor( + config, params=params, builder=builder, get_field_type=get_field_type + ).visit(tree) diff --git a/src/sentry/api/fields/sentry_slug.py b/src/sentry/api/fields/sentry_slug.py index 6301e9483eaff..24eecce61da29 100644 --- a/src/sentry/api/fields/sentry_slug.py +++ b/src/sentry/api/fields/sentry_slug.py @@ -4,6 +4,7 @@ from drf_spectacular.utils import extend_schema_field from rest_framework import serializers +from sentry.db.models.fields.slug import DEFAULT_SLUG_MAX_LENGTH from sentry.slug.errors import DEFAULT_SLUG_ERROR_MESSAGE, ORG_SLUG_ERROR_MESSAGE from sentry.slug.patterns import MIXED_SLUG_PATTERN, ORG_SLUG_PATTERN @@ -24,6 +25,7 @@ def __init__( self, error_messages=None, org_slug: bool = False, + max_length: int = DEFAULT_SLUG_MAX_LENGTH, *args, **kwargs, ): @@ -37,4 +39,6 @@ def __init__( pattern = ORG_SLUG_PATTERN error_messages["invalid"] = ORG_SLUG_ERROR_MESSAGE - super().__init__(pattern, error_messages=error_messages, *args, **kwargs) + super().__init__( + pattern, error_messages=error_messages, max_length=max_length, *args, **kwargs + ) diff --git a/src/sentry/api/helpers/actionable_items_helper.py b/src/sentry/api/helpers/actionable_items_helper.py index fccc127cf20c1..f66d12cb3cff3 100644 --- a/src/sentry/api/helpers/actionable_items_helper.py +++ b/src/sentry/api/helpers/actionable_items_helper.py @@ -38,6 +38,8 @@ class ActionPriority: EventError.INVALID_ENVIRONMENT: ActionPriority.LOW, EventError.NATIVE_BAD_DSYM: ActionPriority.LOW, EventError.NATIVE_MISSING_DSYM: ActionPriority.LOW, + EventError.NATIVE_INTERNAL_FAILURE: ActionPriority.LOW, + EventError.NATIVE_SYMBOLICATOR_FAILED: ActionPriority.LOW, EventError.NATIVE_MISSING_OPTIONALLY_BUNDLED_DSYM: ActionPriority.LOW, EventError.PAST_TIMESTAMP: ActionPriority.LOW, EventError.PROGUARD_MISSING_LINENO: ActionPriority.LOW, @@ -66,12 +68,10 @@ class ActionPriority: EventError.JS_SCRAPING_DISABLED, EventError.JS_TOO_MANY_REMOTE_SOURCES, EventError.MISSING_ATTRIBUTE, - EventError.NATIVE_INTERNAL_FAILURE, EventError.NATIVE_MISSING_SYMBOL, EventError.NATIVE_MISSING_SYSTEM_DSYM, EventError.NATIVE_NO_CRASHED_THREAD, EventError.NATIVE_SIMULATOR_FRAME, - EventError.NATIVE_SYMBOLICATOR_FAILED, EventError.NATIVE_UNKNOWN_IMAGE, EventError.UNKNOWN_ERROR, EventError.VALUE_TOO_LONG, diff --git a/src/sentry/api/helpers/group_index/delete.py b/src/sentry/api/helpers/group_index/delete.py index b2bd74552f181..d930632b674e1 100644 --- a/src/sentry/api/helpers/group_index/delete.py +++ b/src/sentry/api/helpers/group_index/delete.py @@ -10,13 +10,14 @@ from rest_framework.request import Request from rest_framework.response import Response -from sentry import audit_log, eventstream +from sentry import audit_log, eventstream, features from sentry.api.base import audit_logger from sentry.deletions.tasks.groups import delete_groups as delete_groups_task from sentry.issues.grouptype import GroupCategory from sentry.models.group import Group, GroupStatus from sentry.models.grouphash import GroupHash from sentry.models.groupinbox import GroupInbox +from sentry.models.organization import Organization from sentry.models.project import Project from sentry.signals import issue_deleted from sentry.tasks.delete_seer_grouping_records import call_delete_seer_grouping_records_by_hash @@ -44,10 +45,24 @@ def delete_group_list( if not group_list: return + issue_platform_deletion_allowed = features.has( + "organizations:issue-platform-deletion", project.organization, actor=request.user + ) + # deterministic sort for sanity, and for very large deletions we'll # delete the "smaller" groups first group_list.sort(key=lambda g: (g.times_seen, g.id)) - group_ids = [g.id for g in group_list] + group_ids = [] + non_error_group_found = False + for g in group_list: + group_ids.append(g.id) + if not non_error_group_found and g.issue_category != GroupCategory.ERROR: + non_error_group_found = True + + countdown = 3600 + # With ClickHouse light deletes we want to get rid of the long delay + if issue_platform_deletion_allowed and non_error_group_found: + countdown = 0 Group.objects.filter(id__in=group_ids).exclude( status__in=[GroupStatus.PENDING_DELETION, GroupStatus.DELETION_IN_PROGRESS] @@ -73,7 +88,7 @@ def delete_group_list( "transaction_id": transaction_id, "eventstream_state": eventstream_state, }, - countdown=3600, + countdown=countdown, ) for group in group_list: @@ -140,7 +155,12 @@ def delete_groups( if not group_list: return Response(status=204) - if any(group.issue_category != GroupCategory.ERROR for group in group_list): + org = Organization.objects.get_from_cache(id=organization_id) + issue_platform_deletion_allowed = features.has( + "organizations:issue-platform-deletion", org, actor=request.user + ) + non_error_group_found = any(group.issue_category != GroupCategory.ERROR for group in group_list) + if not issue_platform_deletion_allowed and non_error_group_found: raise rest_framework.exceptions.ValidationError(detail="Only error issues can be deleted.") groups_by_project_id = defaultdict(list) diff --git a/src/sentry/api/paginator.py b/src/sentry/api/paginator.py index 61684a9161f3e..173ce25e87148 100644 --- a/src/sentry/api/paginator.py +++ b/src/sentry/api/paginator.py @@ -537,7 +537,7 @@ def get_result(self, limit, cursor=None): prev=Cursor(0, max(0, offset - limit), True, offset > 0), next=Cursor(0, max(0, offset + limit), False, has_more), ) - # TODO use Cursor.value as the `end` argument to data_fn() so that + # TODO: use Cursor.value as the `end` argument to data_fn() so that # subsequent pages returned using these cursors are using the same end # date for queries, this should stop drift from new incoming events. diff --git a/src/sentry/api/serializers/base.py b/src/sentry/api/serializers/base.py index 7a8982affab98..98ccdc9362e94 100644 --- a/src/sentry/api/serializers/base.py +++ b/src/sentry/api/serializers/base.py @@ -61,10 +61,10 @@ def serialize( pass else: return objects - with sentry_sdk.start_span(op="serialize", description=type(serializer).__name__) as span: + with sentry_sdk.start_span(op="serialize", name=type(serializer).__name__) as span: span.set_data("Object Count", len(objects)) - with sentry_sdk.start_span(op="serialize.get_attrs", description=type(serializer).__name__): + with sentry_sdk.start_span(op="serialize.get_attrs", name=type(serializer).__name__): attrs = serializer.get_attrs( # avoid passing NoneType's to the serializer as they're allowed and # filtered out of serialize() @@ -73,7 +73,7 @@ def serialize( **kwargs, ) - with sentry_sdk.start_span(op="serialize.iterate", description=type(serializer).__name__): + with sentry_sdk.start_span(op="serialize.iterate", name=type(serializer).__name__): return [serializer(o, attrs=attrs.get(o, {}), user=user, **kwargs) for o in objects] diff --git a/src/sentry/api/serializers/models/__init__.py b/src/sentry/api/serializers/models/__init__.py index 24e2717084e9c..55cd1ce1872ce 100644 --- a/src/sentry/api/serializers/models/__init__.py +++ b/src/sentry/api/serializers/models/__init__.py @@ -4,7 +4,6 @@ from .apiauthorization import * # noqa: F401,F403 from .apikey import * # noqa: F401,F403 from .apitoken import * # noqa: F401,F403 -from .app_platform_event import * # noqa: F401,F403 from .auditlogentry import * # noqa: F401,F403 from .auth_provider import * # noqa: F401,F403 from .broadcast import * # noqa: F401,F403 @@ -37,7 +36,6 @@ from .organization_member.utils import * # noqa: F401,F403 from .organization_plugin import * # noqa: F401,F403 from .orgauthtoken import * # noqa: F401,F403 -from .platformexternalissue import * # noqa: F401,F403 from .plugin import * # noqa: F401,F403 from .project import * # noqa: F401,F403 from .project_key import * # noqa: F401,F403 @@ -57,11 +55,6 @@ from .role import * # noqa: F401,F403 from .rule import * # noqa: F401,F403 from .savedsearch import * # noqa: F401,F403 -from .sentry_app import * # noqa: F401,F403 -from .sentry_app_avatar import * # noqa: F401,F403 -from .sentry_app_component import * # noqa: F401,F403 -from .sentry_app_installation import * # noqa: F401,F403 -from .servicehook import * # noqa: F401,F403 from .tagvalue import * # noqa: F401,F403 from .team import * # noqa: F401,F403 from .user_social_auth import * # noqa: F401,F403 diff --git a/src/sentry/api/serializers/models/apiapplication.py b/src/sentry/api/serializers/models/apiapplication.py index d1cdcbbbb8e21..a2d4ab707d7ab 100644 --- a/src/sentry/api/serializers/models/apiapplication.py +++ b/src/sentry/api/serializers/models/apiapplication.py @@ -20,4 +20,5 @@ def serialize(self, obj, attrs, user, **kwargs): "termsUrl": obj.terms_url, "allowedOrigins": obj.get_allowed_origins(), "redirectUris": obj.get_redirect_uris(), + "scopes": obj.scopes, } diff --git a/src/sentry/api/serializers/models/dashboard.py b/src/sentry/api/serializers/models/dashboard.py index 50d53311004a0..47c25c81b3275 100644 --- a/src/sentry/api/serializers/models/dashboard.py +++ b/src/sentry/api/serializers/models/dashboard.py @@ -6,14 +6,15 @@ from sentry import features from sentry.api.serializers import Serializer, register, serialize from sentry.constants import ALL_ACCESS_PROJECTS -from sentry.discover.models import DatasetSourcesTypes from sentry.models.dashboard import Dashboard +from sentry.models.dashboard_permissions import DashboardPermissions from sentry.models.dashboard_widget import ( DashboardWidget, DashboardWidgetDisplayTypes, DashboardWidgetQuery, DashboardWidgetQueryOnDemand, DashboardWidgetTypes, + DatasetSourcesTypes, ) from sentry.snuba.metrics.extraction import OnDemandMetricSpecVersioning from sentry.users.api.serializers.user import UserSerializerResponse @@ -41,6 +42,7 @@ class DashboardWidgetQueryResponse(TypedDict): widgetId: str onDemand: list[OnDemandResponse] isHidden: bool + selectedAggregate: int | None class ThresholdType(TypedDict): @@ -63,6 +65,10 @@ class DashboardWidgetResponse(TypedDict): layout: dict[str, int] +class DashboardPermissionsResponse(TypedDict): + is_creator_only_editable: bool + + @register(DashboardWidget) class DashboardWidgetSerializer(Serializer): def get_attrs(self, item_list, user, **kwargs): @@ -164,6 +170,15 @@ def serialize(self, obj, attrs, user, **kwargs) -> DashboardWidgetQueryResponse: "widgetId": str(obj.widget_id), "onDemand": attrs["onDemand"], "isHidden": obj.is_hidden, + "selectedAggregate": obj.selected_aggregate, + } + + +@register(DashboardPermissions) +class DashboardPermissionsSerializer(Serializer): + def serialize(self, obj, attrs, user, **kwargs) -> DashboardPermissionsResponse: + return { + "is_creator_only_editable": obj.is_creator_only_editable, } @@ -257,6 +272,7 @@ class DashboardDetailsResponse(DashboardDetailsResponseOptional): widgets: list[DashboardWidgetResponse] projects: list[int] filters: DashboardFilters + permissions: DashboardPermissionsResponse | None @register(Dashboard) @@ -292,6 +308,7 @@ def serialize(self, obj, attrs, user, **kwargs) -> DashboardDetailsResponse: "widgets": attrs["widgets"], "projects": [project.id for project in obj.projects.all()], "filters": {}, + "permissions": serialize(obj.permissions) if hasattr(obj, "permissions") else None, } if obj.filters is not None: diff --git a/src/sentry/api/serializers/models/environment.py b/src/sentry/api/serializers/models/environment.py index f4a1bc9440e1b..5f4413f45b901 100644 --- a/src/sentry/api/serializers/models/environment.py +++ b/src/sentry/api/serializers/models/environment.py @@ -1,5 +1,6 @@ from collections import namedtuple from datetime import timedelta +from typing import TypedDict from django.utils import timezone @@ -11,15 +12,28 @@ StatsPeriod = namedtuple("StatsPeriod", ("segments", "interval")) +class EnvironmentSerializerResponse(TypedDict): + id: str + name: str + + +class EnvironmentProjectSerializerResponse(TypedDict): + id: str + name: str + isHidden: bool + + @register(Environment) class EnvironmentSerializer(Serializer): - def serialize(self, obj, attrs, user, **kwargs): + def serialize(self, obj: Environment, attrs, user, **kwargs) -> EnvironmentSerializerResponse: return {"id": str(obj.id), "name": obj.name} @register(EnvironmentProject) class EnvironmentProjectSerializer(Serializer): - def serialize(self, obj, attrs, user, **kwargs): + def serialize( + self, obj: EnvironmentProject, attrs, user, **kwargs + ) -> EnvironmentProjectSerializerResponse: return { "id": str(obj.id), "name": obj.environment.name, diff --git a/src/sentry/api/serializers/models/event.py b/src/sentry/api/serializers/models/event.py index 264faaab6b30e..ccde2d14b2d63 100644 --- a/src/sentry/api/serializers/models/event.py +++ b/src/sentry/api/serializers/models/event.py @@ -4,7 +4,7 @@ from collections import defaultdict from collections.abc import Sequence from datetime import datetime, timezone -from typing import Any +from typing import Any, TypedDict, cast import sentry_sdk import sqlparse @@ -12,7 +12,10 @@ from sentry.api.serializers import Serializer, register, serialize from sentry.api.serializers.models.release import GroupEventReleaseSerializer -from sentry.eventstore.models import Event, GroupEvent +from sentry.api.serializers.models.userreport import UserReportSerializerResponse +from sentry.api.serializers.types import GroupEventReleaseSerializerResponse +from sentry.eventstore.models import BaseEvent, Event, GroupEvent +from sentry.interfaces.user import EventUserApiContext, User from sentry.models.eventattachment import EventAttachment from sentry.models.eventerror import EventError from sentry.models.release import Release @@ -20,7 +23,6 @@ from sentry.sdk_updates import SdkSetupState, get_suggested_updates from sentry.search.utils import convert_user_tag_to_query, map_device_class_level from sentry.stacktraces.processing import find_stacktraces_in_data -from sentry.users.models.user import User from sentry.utils.json import prune_empty_keys from sentry.utils.safe import get_path @@ -34,6 +36,15 @@ MAX_SQL_FORMAT_LENGTH = 1500 +class EventTagOptional(TypedDict, total=False): + query: str + + +class EventTag(EventTagOptional): + key: str + value: str + + def get_crash_files(events): event_ids = [x.event_id for x in events if x.platform == "native"] if event_ids: @@ -85,7 +96,7 @@ def get_tags_with_meta(event): tags_meta = prune_empty_keys({str(i): e.pop("_meta") for i, e in enumerate(tags)}) - return (tags, meta_with_chunks(tags, tags_meta)) + return (cast(list[EventTag], tags), meta_with_chunks(tags, tags_meta)) def get_entries(event: Event | GroupEvent, user: User, is_public: bool = False): @@ -122,6 +133,64 @@ def get_entries(event: Event | GroupEvent, user: User, is_public: bool = False): ) +class BaseEventSerializerResponse(TypedDict): + id: str + groupID: str | None + eventID: str + projectID: str + message: str | None + title: str + location: str | None + user: EventUserApiContext | None + tags: list[EventTag] + platform: str + dateReceived: datetime | None + contexts: dict[str, Any] | None + size: int | None + entries: list[Any] + dist: str | None + sdk: dict[str, str] + context: dict[str, Any] | None + packages: dict[str, Any] + type: str + metadata: Any + errors: list[Any] + occurrence: Any + _meta: dict[str, Any] + + +class ErrorEventFields(TypedDict, total=False): + crashFile: str | None + culprit: str | None + dateCreated: datetime + fingerprints: list[str] + groupingConfig: Any + + +class TransactionEventFields(TypedDict, total=False): + startTimestamp: datetime + endTimestamp: datetime + measurements: Any + breakdowns: Any + _metrics_summary: Any + + +class EventSerializerResponse( + BaseEventSerializerResponse, ErrorEventFields, TransactionEventFields +): + pass + + +class SqlFormatEventSerializerResponse(EventSerializerResponse): + release: GroupEventReleaseSerializerResponse | None + + +class IssueEventSerializerResponse(SqlFormatEventSerializerResponse): + userReport: UserReportSerializerResponse | None + sdkUpdates: list[dict[str, Any]] + resolvedWith: list[str] + + @register(GroupEvent) @register(Event) class EventSerializer(Serializer): @@ -151,7 +220,7 @@ def _get_attr_with_meta(self, event, attr, default=None): def _get_legacy_message_with_meta(self, event): meta = event.data.get("_meta") - message = get_path(event.data, "logentry", "formatted") + message: str | None = get_path(event.data, "logentry", "formatted") msg_meta = get_path(meta, "logentry", "formatted") if not message: @@ -217,7 +286,7 @@ def should_display_error(self, error): and ".frames." not in name ) - def serialize(self, obj, attrs, user, **kwargs): + def serialize(self, obj, attrs, user, **kwargs) -> EventSerializerResponse: from sentry.api.serializers.rest_framework import convert_dict_key_case, snake_to_camel_case errors = [ @@ -233,18 +302,19 @@ def serialize(self, obj, attrs, user, **kwargs): (context, context_meta) = self._get_attr_with_meta(obj, "extra", {}) (packages, packages_meta) = self._get_attr_with_meta(obj, "modules", {}) - received = obj.data.get("received") - if received: + received_data = obj.data.get("received") + received: datetime | None = None + if received_data: # Sentry at one point attempted to record invalid types here. # Remove after June 2 2016 try: - received = datetime.fromtimestamp(received, timezone.utc) + received = datetime.fromtimestamp(received_data, timezone.utc) except TypeError: received = None occurrence = getattr(obj, "occurrence", None) - d = { + event_data: EventSerializerResponse = { "id": obj.event_id, "groupID": str(obj.group_id) if obj.group_id else None, "eventID": obj.event_id, @@ -286,16 +356,21 @@ def serialize(self, obj, attrs, user, **kwargs): } # Serialize attributes that are specific to different types of events. if obj.get_event_type() == "transaction": - d.update(self.__serialize_transaction_attrs(attrs, obj)) + return { + **event_data, + **self.__serialize_transaction_attrs(attrs, obj), + } else: - d.update(self.__serialize_error_attrs(attrs, obj)) - return d + return { + **event_data, + **self.__serialize_error_attrs(attrs, obj), + } - def __serialize_transaction_attrs(self, attrs, obj): + def __serialize_transaction_attrs(self, attrs, obj) -> TransactionEventFields: """ Add attributes that are only present on transaction events. """ - transaction_attrs = { + transaction_attrs: TransactionEventFields = { "startTimestamp": obj.data.get("start_timestamp"), "endTimestamp": obj.data.get("timestamp"), "measurements": obj.data.get("measurements"), @@ -308,7 +383,7 @@ def __serialize_transaction_attrs(self, attrs, obj): return transaction_attrs - def __serialize_error_attrs(self, attrs, obj): + def __serialize_error_attrs(self, attrs, obj) -> ErrorEventFields: """ Add attributes that are present on error and default event types """ @@ -362,7 +437,7 @@ def _format_sql_query(self, message: str): return formatted def _format_breadcrumb_messages( - self, event_data: dict[str, Any], event: Event | GroupEvent, user: User + self, event_data: EventSerializerResponse, event: Event | GroupEvent, user: User ): try: breadcrumbs = next( @@ -386,7 +461,9 @@ def _format_breadcrumb_messages( sentry_sdk.capture_exception(exc) return event_data - def _get_release_info(self, user, event, include_full_release_data: bool): + def _get_release_info( + self, user, event, include_full_release_data: bool + ) -> GroupEventReleaseSerializerResponse | None: version = event.get_tag("sentry:release") if not version: return None @@ -403,7 +480,9 @@ def _get_release_info(self, user, event, include_full_release_data: bool): else: return serialize(release, user, GroupEventReleaseSerializer()) - def _format_db_spans(self, event_data: dict[str, Any], event: Event | GroupEvent, user: User): + def _format_db_spans( + self, event_data: EventSerializerResponse, event: Event | GroupEvent, user: User + ): try: spans = next( filter(lambda entry: entry["type"] == "spans", event_data.get("entries", ())), @@ -423,15 +502,17 @@ def _format_db_spans(self, event_data: dict[str, Any], event: Event | GroupEvent sentry_sdk.capture_exception(exc) return event_data - def serialize(self, obj, attrs, user, include_full_release_data=False): + def serialize( + self, obj, attrs, user, include_full_release_data=False + ) -> SqlFormatEventSerializerResponse: result = super().serialize(obj, attrs, user) - with sentry_sdk.start_span(op="serialize", description="Format SQL"): + with sentry_sdk.start_span(op="serialize", name="Format SQL"): result = self._format_breadcrumb_messages(result, obj, user) result = self._format_db_spans(result, obj, user) - result["release"] = self._get_release_info(user, obj, include_full_release_data) + release_info = self._get_release_info(user, obj, include_full_release_data) - return result + return {**result, "release": release_info} class IssueEventSerializer(SqlFormatEventSerializer): @@ -459,12 +540,17 @@ def _get_resolved_with(self, obj: Event) -> list[str]: return list(unique_resolution_methods) - def serialize(self, obj, attrs, user, include_full_release_data=False): + def serialize( + self, obj, attrs, user, include_full_release_data=False + ) -> IssueEventSerializerResponse: result = super().serialize(obj, attrs, user, include_full_release_data) - result["userReport"] = self._get_user_report(user, obj) - result["sdkUpdates"] = self._get_sdk_updates(obj) - result["resolvedWith"] = self._get_resolved_with(obj) - return result + + return { + **result, + "userReport": self._get_user_report(user, obj), + "sdkUpdates": self._get_sdk_updates(obj), + "resolvedWith": self._get_resolved_with(obj), + } class SharedEventSerializer(EventSerializer): @@ -483,6 +569,27 @@ def serialize(self, obj, attrs, user, **kwargs): return result +SimpleEventSerializerResponse = TypedDict( + "SimpleEventSerializerResponse", + { + "id": str, + "event.type": str, + "groupID": str | None, + "eventID": str, + "projectID": str, + "message": str, + "title": str, + "location": str | None, + "culprit": str, + "user": EventUserApiContext | None, + "tags": list[EventTag], + "platform": str, + "dateCreated": datetime, + "crashFile": str | None, + }, +) + + class SimpleEventSerializer(EventSerializer): """ Simple event serializer that renders a basic outline of an event without @@ -505,17 +612,19 @@ def get_attrs(self, item_list, user, **kwargs): } return {event: {"crash_file": serialized_files.get(event.event_id)} for event in item_list} - def serialize(self, obj, attrs, user, **kwargs): - tags = [{"key": key.split("sentry:", 1)[-1], "value": value} for key, value in obj.tags] + def serialize(self, obj: BaseEvent, attrs, user, **kwargs) -> SimpleEventSerializerResponse: + tags: list[EventTag] = [ + {"key": key.split("sentry:", 1)[-1], "value": value} for key, value in obj.tags + ] for tag in tags: query = convert_user_tag_to_query(tag["key"], tag["value"]) if query: tag["query"] = query map_device_class_tags(tags) - user = obj.get_minimal_user() + event_user = obj.get_minimal_user() - return { + response: SimpleEventSerializerResponse = { "id": str(obj.event_id), "event.type": str(obj.get_event_type()), "groupID": str(obj.group_id) if obj.group_id else None, @@ -527,7 +636,7 @@ def serialize(self, obj, attrs, user, **kwargs): "title": obj.title, "location": obj.location, "culprit": obj.culprit, - "user": user and user.get_api_context(), + "user": event_user and event_user.get_api_context(), "tags": tags, "platform": obj.platform, "dateCreated": obj.datetime, @@ -535,6 +644,8 @@ def serialize(self, obj, attrs, user, **kwargs): "crashFile": attrs["crash_file"], } + return response + class ExternalEventSerializer(EventSerializer): """ diff --git a/src/sentry/api/serializers/models/eventuser.py b/src/sentry/api/serializers/models/eventuser.py index 89328f031d77c..2fd7ee04dfb3d 100644 --- a/src/sentry/api/serializers/models/eventuser.py +++ b/src/sentry/api/serializers/models/eventuser.py @@ -1,11 +1,26 @@ +from typing import TypedDict + from sentry.api.serializers import Serializer, register from sentry.utils.avatar import get_gravatar_url from sentry.utils.eventuser import EventUser +class EventUserSerializerResponse(TypedDict): + id: str | None + tagValue: str + identifier: str + username: str + email: str + name: str + ipAddress: str + avatarUrl: str + hash: str + dateCreated: None + + @register(EventUser) class EventUserSerializer(Serializer): - def serialize(self, obj, attrs, user, **kwargs): + def serialize(self, obj, attrs, user, **kwargs) -> EventUserSerializerResponse: return { "id": str(obj.id) if obj.id is not None else obj.id, "tagValue": obj.tag_value, diff --git a/src/sentry/api/serializers/models/group.py b/src/sentry/api/serializers/models/group.py index 924aafb1d71e5..e056c3113faf9 100644 --- a/src/sentry/api/serializers/models/group.py +++ b/src/sentry/api/serializers/models/group.py @@ -648,7 +648,7 @@ def _resolve_resolutions( @staticmethod def _resolve_external_issue_annotations(groups: Sequence[Group]) -> Mapping[int, Sequence[Any]]: - from sentry.models.platformexternalissue import PlatformExternalIssue + from sentry.sentry_apps.models.platformexternalissue import PlatformExternalIssue # find the external issues for sentry apps and add them in return ( diff --git a/src/sentry/api/serializers/models/group_stream.py b/src/sentry/api/serializers/models/group_stream.py index a072fa1c2dad3..78eaf1e48262d 100644 --- a/src/sentry/api/serializers/models/group_stream.py +++ b/src/sentry/api/serializers/models/group_stream.py @@ -19,7 +19,6 @@ SeenStats, snuba_tsdb, ) -from sentry.api.serializers.models.platformexternalissue import PlatformExternalIssueSerializer from sentry.api.serializers.models.plugin import is_plugin_deprecated from sentry.constants import StatsPeriod from sentry.integrations.api.serializers.models.external_issue import ExternalIssueSerializer @@ -31,7 +30,10 @@ from sentry.models.groupinbox import get_inbox_details from sentry.models.grouplink import GroupLink from sentry.models.groupowner import get_owner_details -from sentry.models.platformexternalissue import PlatformExternalIssue +from sentry.sentry_apps.api.serializers.platform_external_issue import ( + PlatformExternalIssueSerializer, +) +from sentry.sentry_apps.models.platformexternalissue import PlatformExternalIssue from sentry.snuba.dataset import Dataset from sentry.tsdb.base import TSDBModel from sentry.utils import metrics diff --git a/src/sentry/api/serializers/models/organization.py b/src/sentry/api/serializers/models/organization.py index 0b4d6c72caaf1..53b13775a81c5 100644 --- a/src/sentry/api/serializers/models/organization.py +++ b/src/sentry/api/serializers/models/organization.py @@ -48,9 +48,11 @@ SAFE_FIELDS_DEFAULT, SCRAPE_JAVASCRIPT_DEFAULT, SENSITIVE_FIELDS_DEFAULT, + TARGET_SAMPLE_RATE_DEFAULT, UPTIME_AUTODETECTION, ObjectStatus, ) +from sentry.db.models.fields.slug import DEFAULT_SLUG_MAX_LENGTH from sentry.dynamic_sampling.tasks.common import get_organization_volume from sentry.dynamic_sampling.tasks.helpers.sliding_window import get_sliding_window_org_sample_rate from sentry.killswitches import killswitch_matches_context @@ -101,7 +103,7 @@ class BaseOrganizationSerializer(serializers.Serializer): # 3. cannot end with a dash slug = SentrySerializerSlugField( org_slug=True, - max_length=50, + max_length=DEFAULT_SLUG_MAX_LENGTH, ) def validate_slug(self, value: str) -> str: @@ -265,7 +267,7 @@ def get_feature_set( ] feature_set = set() - with sentry_sdk.start_span(op="features.check", description="check batch features"): + with sentry_sdk.start_span(op="features.check", name="check batch features"): # Check features in batch using the entity handler batch_features = features.batch_has(org_features, actor=user, organization=obj) @@ -281,7 +283,7 @@ def get_feature_set( # This feature_name was found via `batch_has`, don't check again using `has` org_features.remove(feature_name) - with sentry_sdk.start_span(op="features.check", description="check individual features"): + with sentry_sdk.start_span(op="features.check", name="check individual features"): # Remaining features should not be checked via the entity handler for feature_name in org_features: if features.has(feature_name, obj, actor=user, skip_entity=True): @@ -420,7 +422,7 @@ def serialize( class _DetailedOrganizationSerializerResponseOptional(OrganizationSerializerResponse, total=False): - role: Any # TODO replace with enum/literal + role: Any # TODO: replace with enum/literal orgRole: str uptimeAutodetection: bool @@ -611,6 +613,11 @@ def serialize( # type: ignore[explicit-override, override] obj.get_option("sentry:uptime_autodetection", UPTIME_AUTODETECTION) ) + if features.has("organizations:dynamic-sampling-custom", obj, actor=user): + context["targetSampleRate"] = float( + obj.get_option("sentry:target_sample_rate", TARGET_SAMPLE_RATE_DEFAULT) + ) + trusted_relays_raw = obj.get_option("sentry:trusted-relays") or [] # serialize trusted relays info into their external form context["trustedRelays"] = [TrustedRelaySerializer(raw).data for raw in trusted_relays_raw] diff --git a/src/sentry/api/serializers/models/project.py b/src/sentry/api/serializers/models/project.py index 821d3ccf82c0d..3465fa28b34c9 100644 --- a/src/sentry/api/serializers/models/project.py +++ b/src/sentry/api/serializers/models/project.py @@ -275,7 +275,7 @@ class ProjectSerializerResponse(ProjectSerializerBaseResponse): isPublic: bool avatar: SerializedAvatarFields color: str - status: str # TODO enum/literal + status: str # TODO: enum/literal @register(Project) diff --git a/src/sentry/api/serializers/models/release.py b/src/sentry/api/serializers/models/release.py index 8b73b870a314b..4077b86c0fcfe 100644 --- a/src/sentry/api/serializers/models/release.py +++ b/src/sentry/api/serializers/models/release.py @@ -11,7 +11,11 @@ from sentry import release_health, tagstore from sentry.api.serializers import Serializer, register, serialize -from sentry.api.serializers.types import ReleaseSerializerResponse +from sentry.api.serializers.release_details_types import VersionInfo +from sentry.api.serializers.types import ( + GroupEventReleaseSerializerResponse, + ReleaseSerializerResponse, +) from sentry.models.commit import Commit from sentry.models.commitauthor import CommitAuthor from sentry.models.deploy import Deploy @@ -27,7 +31,7 @@ from sentry.utils.hashlib import md5_text -def expose_version_info(info): +def expose_version_info(info) -> VersionInfo | None: if info is None: return None version = {"raw": info["version_raw"]} @@ -616,7 +620,7 @@ def get_attrs(self, item_list, user, **kwargs): result[item] = p return result - def serialize(self, obj, attrs, user, **kwargs): + def serialize(self, obj, attrs, user, **kwargs) -> GroupEventReleaseSerializerResponse: return { "id": obj.id, "commitCount": obj.commit_count, diff --git a/src/sentry/api/serializers/models/tagvalue.py b/src/sentry/api/serializers/models/tagvalue.py index 66b050ff4d71b..ff615926f58ee 100644 --- a/src/sentry/api/serializers/models/tagvalue.py +++ b/src/sentry/api/serializers/models/tagvalue.py @@ -1,3 +1,5 @@ +from typing import Any, cast + from sentry.api.serializers import Serializer from sentry.search.utils import convert_user_tag_to_query from sentry.utils.eventuser import EventUser @@ -20,8 +22,9 @@ def get_attrs(self, item_list, user, **kwargs): return result def serialize(self, obj, attrs, user, **kwargs): + result: dict[str, Any] = {} if isinstance(attrs["user"], EventUser): - result = attrs["user"].serialize() + result = cast(dict[str, Any], attrs["user"].serialize()) else: result = {"id": None} diff --git a/src/sentry/api/serializers/models/userreport.py b/src/sentry/api/serializers/models/userreport.py index 6ec62d5051a79..8b92841bef0fe 100644 --- a/src/sentry/api/serializers/models/userreport.py +++ b/src/sentry/api/serializers/models/userreport.py @@ -1,11 +1,36 @@ -from sentry import eventstore +from datetime import timedelta +from typing import Any, TypedDict + +from django.utils import timezone + +from sentry import eventstore, quotas from sentry.api.serializers import Serializer, register, serialize from sentry.eventstore.models import Event from sentry.models.group import Group from sentry.models.project import Project from sentry.models.userreport import UserReport from sentry.snuba.dataset import Dataset -from sentry.utils.eventuser import EventUser +from sentry.utils.eventuser import EventUser, SerializedEventUser + + +class UserReportEvent(TypedDict): + id: str + eventID: str + + +class UserReportSerializerResponse(TypedDict): + id: str + eventID: str + name: str | None + email: str | None + comments: str + dateCreated: str + user: SerializedEventUser | None + event: UserReportEvent + + +class UserReportWithGroupSerializerResponse(UserReportSerializerResponse): + issue: dict[str, Any] @register(UserReport) @@ -14,11 +39,13 @@ def get_attrs(self, item_list, user, **kwargs): attrs = {} project = Project.objects.get(id=item_list[0].project_id) + retention = quotas.backend.get_event_retention(organization=project.organization) events = eventstore.backend.get_events( filter=eventstore.Filter( event_ids=[item.event_id for item in item_list], project_ids=[project.id], + start=timezone.now() - timedelta(days=retention) if retention else None, ), referrer="UserReportSerializer.get_attrs", dataset=Dataset.Events, @@ -28,14 +55,16 @@ def get_attrs(self, item_list, user, **kwargs): events_dict: dict[str, Event] = {event.event_id: event for event in events} for item in item_list: attrs[item] = { - "event_user": EventUser.from_event(events_dict[item.event_id]) - if events_dict.get(item.event_id) - else {} + "event_user": ( + EventUser.from_event(events_dict[item.event_id]) + if events_dict.get(item.event_id) + else {} + ) } return attrs - def serialize(self, obj, attrs, user, **kwargs): + def serialize(self, obj, attrs, user, **kwargs) -> UserReportSerializerResponse: # TODO(dcramer): add in various context from the event # context == user / http / extra interfaces @@ -86,7 +115,9 @@ def get_attrs(self, item_list, user, **kwargs): ) return attrs - def serialize(self, obj, attrs, user, **kwargs): + def serialize(self, obj, attrs, user, **kwargs) -> UserReportWithGroupSerializerResponse: context = super().serialize(obj, attrs, user) - context["issue"] = attrs["group"] - return context + return { + **context, + "issue": attrs["group"], + } diff --git a/src/sentry/api/serializers/release_details_types.py b/src/sentry/api/serializers/release_details_types.py index 7cacff2a0927c..b524d2671330a 100644 --- a/src/sentry/api/serializers/release_details_types.py +++ b/src/sentry/api/serializers/release_details_types.py @@ -41,7 +41,7 @@ class VersionInfoOptional(TypedDict, total=False): class VersionInfo(VersionInfoOptional): package: str | None - version: dict[str, str] + version: dict[str, Any] buildHash: str | None @@ -51,7 +51,7 @@ class LastDeployOptional(TypedDict, total=False): class LastDeploy(LastDeployOptional): - id: int + id: str environment: str dateFinished: str name: str diff --git a/src/sentry/api/serializers/rest_framework/__init__.py b/src/sentry/api/serializers/rest_framework/__init__.py index a53794edadd18..8125cd6632d14 100644 --- a/src/sentry/api/serializers/rest_framework/__init__.py +++ b/src/sentry/api/serializers/rest_framework/__init__.py @@ -11,6 +11,3 @@ from .project_key import * # noqa: F401,F403 from .release import * # noqa: F401,F403 from .rule import * # noqa: F401,F403 -from .sentry_app import * # noqa: F401,F403 -from .sentry_app_installation import * # noqa: F401,F403 -from .sentry_app_request import * # noqa: F401,F403 diff --git a/src/sentry/api/serializers/rest_framework/dashboard.py b/src/sentry/api/serializers/rest_framework/dashboard.py index 71f9d0140b763..67d87c831afa9 100644 --- a/src/sentry/api/serializers/rest_framework/dashboard.py +++ b/src/sentry/api/serializers/rest_framework/dashboard.py @@ -160,6 +160,8 @@ class DashboardWidgetQuerySerializer(CamelSnakeSerializer[Dashboard]): on_demand_extraction = DashboardWidgetQueryOnDemandSerializer(many=False, required=False) on_demand_extraction_disabled = serializers.BooleanField(required=False) + selected_aggregate = serializers.IntegerField(required=False, allow_null=True) + required_for_create = {"fields", "conditions"} validate_id = validate_id @@ -341,7 +343,8 @@ def validate(self, data): if ( ondemand_feature - and data.get("widget_type") == DashboardWidgetTypes.DISCOVER + and data.get("widget_type") + in [DashboardWidgetTypes.DISCOVER, DashboardWidgetTypes.TRANSACTION_LIKE] and not query.get("on_demand_extraction_disabled", False) ): if query.get("columns"): @@ -453,6 +456,12 @@ def validate(self, data): return data +class DashboardPermissionsSerializer(CamelSnakeSerializer[Dashboard]): + is_creator_only_editable = serializers.BooleanField( + help_text="Whether the dashboard is editable only by the creator.", + ) + + class DashboardDetailsSerializer(CamelSnakeSerializer[Dashboard]): # Is a string because output serializers also make it a string. id = serializers.CharField(required=False, help_text="A dashboard's unique id.") @@ -491,6 +500,11 @@ class DashboardDetailsSerializer(CamelSnakeSerializer[Dashboard]): help_text="Setting that lets you display saved time range for this dashboard in UTC.", ) validate_id = validate_id + permissions = DashboardPermissionsSerializer( + required=False, + allow_null=True, + help_text="Permissions that restrict users from editing dashboards", + ) def validate_projects(self, projects): from sentry.api.validators import validate_project_ids @@ -646,12 +660,16 @@ def create_widget(self, dashboard, widget_data, order): orderby=query.get("orderby", ""), order=i, is_hidden=query.get("is_hidden", False), + selected_aggregate=query.get("selected_aggregate"), ) ) DashboardWidgetQuery.objects.bulk_create(new_queries) - if widget.widget_type == DashboardWidgetTypes.DISCOVER: + if widget.widget_type in [ + DashboardWidgetTypes.DISCOVER, + DashboardWidgetTypes.TRANSACTION_LIKE, + ]: self._check_query_cardinality(new_queries) def _check_query_cardinality(self, new_queries: Sequence[DashboardWidgetQuery]): @@ -724,13 +742,17 @@ def update_widget_queries(self, widget, data): is_hidden=query_data.get("is_hidden", False), orderby=query_data.get("orderby", ""), order=next_order + i, + selected_aggregate=query_data.get("selected_aggregate"), ) ) else: raise serializers.ValidationError("You cannot use a query not owned by this widget") DashboardWidgetQuery.objects.bulk_create(new_queries) - if widget.widget_type == DashboardWidgetTypes.DISCOVER: + if widget.widget_type in [ + DashboardWidgetTypes.DISCOVER, + DashboardWidgetTypes.TRANSACTION_LIKE, + ]: self._check_query_cardinality(new_queries + update_queries) def update_widget_query(self, query, data, order): @@ -742,6 +764,7 @@ def update_widget_query(self, query, data, order): query.columns = data.get("columns", query.columns) query.field_aliases = data.get("field_aliases", query.field_aliases) query.is_hidden = data.get("is_hidden", query.is_hidden) + query.selected_aggregate = data.get("selected_aggregate", query.selected_aggregate) query.order = order query.save() diff --git a/src/sentry/api/serializers/types.py b/src/sentry/api/serializers/types.py index 460477b2a11b7..0d58ed915feb0 100644 --- a/src/sentry/api/serializers/types.py +++ b/src/sentry/api/serializers/types.py @@ -66,9 +66,26 @@ class ReleaseSerializerResponse(ReleaseSerializerResponseOptional): newGroups: int status: str shortVersion: str - versionInfo: VersionInfo + versionInfo: VersionInfo | None data: dict[str, Any] commitCount: int deployCount: int authors: list[Author] projects: list[Project] + + +class GroupEventReleaseSerializerResponse(TypedDict, total=False): + id: int + commitCount: int + data: dict[str, Any] + dateCreated: datetime + dateReleased: datetime | None + deployCount: int + ref: str | None + lastCommit: dict[str, Any] | None + lastDeploy: LastDeploy | None + status: str + url: str | None + userAgent: str | None + version: str | None + versionInfo: VersionInfo | None diff --git a/src/sentry/api/urls.py b/src/sentry/api/urls.py index b57570d5f0507..801ce25ee35cb 100644 --- a/src/sentry/api/urls.py +++ b/src/sentry/api/urls.py @@ -7,7 +7,6 @@ from sentry.api.endpoints.group_autofix_setup_check import GroupAutofixSetupCheck from sentry.api.endpoints.group_integration_details import GroupIntegrationDetailsEndpoint from sentry.api.endpoints.group_integrations import GroupIntegrationsEndpoint -from sentry.api.endpoints.issues.related_issues import RelatedIssuesEndpoint from sentry.api.endpoints.org_auth_token_details import OrgAuthTokenDetailsEndpoint from sentry.api.endpoints.org_auth_tokens import OrgAuthTokensEndpoint from sentry.api.endpoints.organization_events_anomalies import OrganizationEventsAnomaliesEndpoint @@ -56,6 +55,7 @@ from sentry.api.endpoints.relocations.recover import RelocationRecoverEndpoint from sentry.api.endpoints.relocations.retry import RelocationRetryEndpoint from sentry.api.endpoints.relocations.unpause import RelocationUnpauseEndpoint +from sentry.api.endpoints.secret_scanning.github import SecretScanningGitHubEndpoint from sentry.api.endpoints.seer_rpc import SeerRpcServiceEndpoint from sentry.api.endpoints.source_map_debug_blue_thunder_edition import ( SourceMapDebugBlueThunderEditionEndpoint, @@ -74,6 +74,11 @@ DiscoverSavedQueryDetailEndpoint, DiscoverSavedQueryVisitEndpoint, ) +from sentry.flags.endpoints.hooks import OrganizationFlagsHooksEndpoint +from sentry.flags.endpoints.logs import ( + OrganizationFlagLogDetailsEndpoint, + OrganizationFlagLogIndexEndpoint, +) from sentry.incidents.endpoints.organization_alert_rule_activations import ( OrganizationAlertRuleActivationsEndpoint, ) @@ -181,6 +186,8 @@ GroupParticipantsEndpoint, GroupSimilarIssuesEmbeddingsEndpoint, GroupSimilarIssuesEndpoint, + GroupTombstoneDetailsEndpoint, + GroupTombstoneEndpoint, OrganizationGroupIndexEndpoint, OrganizationGroupIndexStatsEndpoint, OrganizationGroupSearchViewsEndpoint, @@ -191,6 +198,7 @@ ProjectGroupIndexEndpoint, ProjectGroupStatsEndpoint, ProjectStacktraceLinkEndpoint, + RelatedIssuesEndpoint, SharedGroupDetailsEndpoint, ShortIdLookupEndpoint, SourceMapDebugEndpoint, @@ -242,10 +250,6 @@ from sentry.monitors.endpoints.project_processing_errors_index import ( ProjectProcessingErrorsIndexEndpoint, ) -from sentry.remote_config.endpoints import ( - ProjectConfigurationEndpoint, - ProjectConfigurationProxyEndpoint, -) from sentry.replays.endpoints.organization_replay_count import OrganizationReplayCountEndpoint from sentry.replays.endpoints.organization_replay_details import OrganizationReplayDetailsEndpoint from sentry.replays.endpoints.organization_replay_events_meta import ( @@ -255,9 +259,6 @@ from sentry.replays.endpoints.organization_replay_selector_index import ( OrganizationReplaySelectorIndexEndpoint, ) -from sentry.replays.endpoints.project_replay_accessibility_issues import ( - ProjectReplayAccessibilityIssuesEndpoint, -) from sentry.replays.endpoints.project_replay_clicks_index import ProjectReplayClicksIndexEndpoint from sentry.replays.endpoints.project_replay_details import ProjectReplayDetailsEndpoint from sentry.replays.endpoints.project_replay_recording_segment_details import ( @@ -308,7 +309,15 @@ ) from sentry.sentry_apps.api.endpoints.sentry_app_requests import SentryAppRequestsEndpoint from sentry.sentry_apps.api.endpoints.sentry_app_rotate_secret import SentryAppRotateSecretEndpoint +from sentry.sentry_apps.api.endpoints.sentry_app_stats_details import SentryAppStatsEndpoint from sentry.sentry_apps.api.endpoints.sentry_apps import SentryAppsEndpoint +from sentry.sentry_apps.api.endpoints.sentry_apps_stats import SentryAppsStatsEndpoint +from sentry.sentry_apps.api.endpoints.sentry_internal_app_token_details import ( + SentryInternalAppTokenDetailsEndpoint, +) +from sentry.sentry_apps.api.endpoints.sentry_internal_app_tokens import ( + SentryInternalAppTokensEndpoint, +) from sentry.uptime.endpoints.project_uptime_alert_details import ProjectUptimeAlertDetailsEndpoint from sentry.uptime.endpoints.project_uptime_alert_index import ProjectUptimeAlertIndexEndpoint from sentry.users.api.endpoints.authenticator_index import AuthenticatorIndexEndpoint @@ -392,17 +401,9 @@ from .endpoints.group_tagkey_details import GroupTagKeyDetailsEndpoint from .endpoints.group_tagkey_values import GroupTagKeyValuesEndpoint from .endpoints.group_tags import GroupTagsEndpoint -from .endpoints.group_tombstone import GroupTombstoneEndpoint -from .endpoints.group_tombstone_details import GroupTombstoneDetailsEndpoint from .endpoints.group_user_reports import GroupUserReportsEndpoint from .endpoints.grouping_configs import GroupingConfigsEndpoint from .endpoints.index import IndexEndpoint -from .endpoints.integrations.sentry_apps import ( - SentryAppsStatsEndpoint, - SentryAppStatsEndpoint, - SentryInternalAppTokenDetailsEndpoint, - SentryInternalAppTokensEndpoint, -) from .endpoints.internal import ( InternalBeaconEndpoint, InternalEnvironmentEndpoint, @@ -586,7 +587,6 @@ from .endpoints.project_create_sample import ProjectCreateSampleEndpoint from .endpoints.project_create_sample_transaction import ProjectCreateSampleTransactionEndpoint from .endpoints.project_details import ProjectDetailsEndpoint -from .endpoints.project_docs_platform import ProjectDocsPlatformEndpoint from .endpoints.project_environment_details import ProjectEnvironmentDetailsEndpoint from .endpoints.project_environments import ProjectEnvironmentsEndpoint from .endpoints.project_filter_details import ProjectFilterDetailsEndpoint @@ -714,7 +714,7 @@ def create_group_urls(name_prefix: str) -> list[URLPattern | URLResolver]: name=f"{name_prefix}-group-events", ), re_path( - r"^(?P[^\/]+)/events/(?P(?:latest|oldest|helpful|recommended|\d+|[A-Fa-f0-9-]{32,36}))/$", + r"^(?P[^\/]+)/events/(?P(?:latest|oldest|recommended|\d+|[A-Fa-f0-9-]{32,36}))/$", GroupEventDetailsEndpoint.as_view(), name=f"{name_prefix}-group-event-details", ), @@ -2031,6 +2031,23 @@ def create_group_urls(name_prefix: str) -> list[URLPattern | URLResolver]: OrganizationRelayUsage.as_view(), name="sentry-api-0-organization-relay-usage", ), + # Flags + re_path( + r"^(?P[^\/]+)/flags/logs/$", + OrganizationFlagLogIndexEndpoint.as_view(), + name="sentry-api-0-organization-flag-logs", + ), + re_path( + r"^(?P[^\/]+)/flags/logs/(?P\d+)/$", + OrganizationFlagLogDetailsEndpoint.as_view(), + name="sentry-api-0-organization-flag-log", + ), + re_path( + r"^(?P[^\/]+)/flags/hooks/provider/(?P[\w-]+)/$", + OrganizationFlagsHooksEndpoint.as_view(), + name="sentry-api-0-organization-flag-hooks", + ), + # Replays re_path( r"^(?P[^\/]+)/replays/$", OrganizationReplayIndexEndpoint.as_view(), @@ -2249,11 +2266,6 @@ def create_group_urls(name_prefix: str) -> list[URLPattern | URLResolver]: ProjectCreateSampleTransactionEndpoint.as_view(), name="sentry-api-0-project-create-sample-transaction", ), - re_path( - r"^(?P[^\/]+)/(?P[^\/]+)/docs/(?P[\w-]+)/$", - ProjectDocsPlatformEndpoint.as_view(), - name="sentry-api-0-project-docs-platform", - ), re_path( r"^(?P[^\/]+)/(?P[^\/]+)/environments/$", ProjectEnvironmentsEndpoint.as_view(), @@ -2422,11 +2434,6 @@ def create_group_urls(name_prefix: str) -> list[URLPattern | URLResolver]: r"^(?P[^\/]+)/(?P[^\/]+)/keys/(?P[^\/]+)/stats/$", ProjectKeyStatsEndpoint.as_view(), ), - re_path( - r"^(?P[^\/]+)/(?P[^\/]+)/configuration/$", - ProjectConfigurationEndpoint.as_view(), - name="sentry-api-0-project-key-configuration", - ), re_path( r"^(?P[^/]+)/(?P[^/]+)/members/$", ProjectMemberIndexEndpoint.as_view(), @@ -2532,11 +2539,6 @@ def create_group_urls(name_prefix: str) -> list[URLPattern | URLResolver]: ProjectReplayViewedByEndpoint.as_view(), name="sentry-api-0-project-replay-viewed-by", ), - re_path( - r"^(?P[^/]+)/(?P[^\/]+)/replays/(?P[\w-]+)/accessibility-issues/$", - ProjectReplayAccessibilityIssuesEndpoint.as_view(), - name="sentry-api-0-project-replay-accessibility-issues", - ), re_path( r"^(?P[^/]+)/(?P[^\/]+)/replays/(?P[\w-]+)/clicks/$", ProjectReplayClicksIndexEndpoint.as_view(), @@ -3282,11 +3284,6 @@ def create_group_urls(name_prefix: str) -> list[URLPattern | URLResolver]: SetupWizard.as_view(), name="sentry-api-0-project-wizard", ), - re_path( - r"^remote-config/projects/(?P[^\/]+)/$", - ProjectConfigurationProxyEndpoint.as_view(), - name="sentry-api-0-project-remote-configuration", - ), # Internal re_path( r"^internal/", @@ -3302,6 +3299,12 @@ def create_group_urls(name_prefix: str) -> list[URLPattern | URLResolver]: RelocationPublicKeyEndpoint.as_view(), name="sentry-api-0-relocations-public-key", ), + # Secret Scanning + re_path( + r"^secret-scanning/github/$", + SecretScanningGitHubEndpoint.as_view(), + name="sentry-api-0-secret-scanning-github", + ), # Catch all re_path( r"^$", diff --git a/src/sentry/api/validators/__init__.py b/src/sentry/api/validators/__init__.py index 9836bf781fe22..455db4b7f387b 100644 --- a/src/sentry/api/validators/__init__.py +++ b/src/sentry/api/validators/__init__.py @@ -7,5 +7,4 @@ from .notifications import * # noqa: F401,F403 from .project import * # noqa: F401,F403 from .project_codeowners import * # noqa: F401,F403 -from .servicehook import * # noqa: F401,F403 from .userrole import * # noqa: F401,F403 diff --git a/src/sentry/apidocs/api_ownership_allowlist_dont_modify.py b/src/sentry/apidocs/api_ownership_allowlist_dont_modify.py index 94ba8c7342e7c..7f5ef84ac62f9 100644 --- a/src/sentry/apidocs/api_ownership_allowlist_dont_modify.py +++ b/src/sentry/apidocs/api_ownership_allowlist_dont_modify.py @@ -317,4 +317,5 @@ "/extensions/jira/uninstalled/", "/api/0/projects/{organization_id_or_slug}/{project_id_or_slug}/filters/", "/api/0/teams/{organization_id_or_slug}/{team_id_or_slug}/alerts-triggered/", + "/_warmup/", ] diff --git a/src/sentry/apidocs/api_publish_status_allowlist_dont_modify.py b/src/sentry/apidocs/api_publish_status_allowlist_dont_modify.py index 553132be5907b..7201ea8861bd7 100644 --- a/src/sentry/apidocs/api_publish_status_allowlist_dont_modify.py +++ b/src/sentry/apidocs/api_publish_status_allowlist_dont_modify.py @@ -15,11 +15,10 @@ "/api/0/relays/{relay_id}/": {"DELETE"}, "/api/0/{var}/{issue_id}/": {"DELETE", "GET", "PUT"}, "/api/0/{var}/{issue_id}/activities/": {"GET"}, - "/api/0/{var}/{issue_id}/events/": {"GET"}, "/api/0/{var}/{issue_id}/events/{event_id}/": {"GET"}, "/api/0/{var}/{issue_id}/{var}/": {"GET", "POST"}, "/api/0/{var}/{issue_id}/{var}/{note_id}/": {"DELETE", "PUT"}, - "/api/0/{var}/{issue_id}/hashes/": {"GET", "DELETE"}, + "/api/0/{var}/{issue_id}/hashes/": {"GET", "DELETE", "PUT"}, "/api/0/{var}/{issue_id}/reprocessing/": {"POST"}, "/api/0/{var}/{issue_id}/stats/": {"GET"}, "/api/0/{var}/{issue_id}/tags/": {"GET"}, @@ -83,6 +82,7 @@ "/api/0/organizations/{organization_id_or_slug}/{var}/{issue_id}/hashes/": { "GET", "DELETE", + "PUT", }, "/api/0/organizations/{organization_id_or_slug}/{var}/{issue_id}/reprocessing/": {"POST"}, "/api/0/organizations/{organization_id_or_slug}/{var}/{issue_id}/stats/": {"GET"}, diff --git a/src/sentry/apidocs/examples/dashboard_examples.py b/src/sentry/apidocs/examples/dashboard_examples.py index 07d3d21c790ae..4ee6029151a05 100644 --- a/src/sentry/apidocs/examples/dashboard_examples.py +++ b/src/sentry/apidocs/examples/dashboard_examples.py @@ -56,6 +56,7 @@ } ], "isHidden": False, + "selectedAggregate": None, } ], "limit": None, @@ -66,6 +67,7 @@ "projects": [1], "filters": {}, "period": "7d", + "permissions": {"is_creator_only_editable": False}, } DASHBOARDS_OBJECT = [ diff --git a/src/sentry/apidocs/examples/environment_examples.py b/src/sentry/apidocs/examples/environment_examples.py index 485ffc527ec02..c13738180beb4 100644 --- a/src/sentry/apidocs/examples/environment_examples.py +++ b/src/sentry/apidocs/examples/environment_examples.py @@ -7,11 +7,11 @@ class EnvironmentExamples: "List an Organization's Environments", value=[ { - "id": 1, + "id": "1", "name": "Production", }, { - "id": 2, + "id": "2", "name": "Staging", }, ], @@ -19,3 +19,15 @@ class EnvironmentExamples: response_only=True, ) ] + + GET_PROJECT_ENVIRONMENTS = [ + OpenApiExample( + "List a Project's Environments", + value=[ + {"id": "1", "name": "Production", "isHidden": False}, + {"id": "2", "name": "Staging", "isHidden": True}, + ], + status_codes=["200"], + response_only=True, + ) + ] diff --git a/src/sentry/apidocs/examples/event_examples.py b/src/sentry/apidocs/examples/event_examples.py new file mode 100644 index 0000000000000..22797eafad053 --- /dev/null +++ b/src/sentry/apidocs/examples/event_examples.py @@ -0,0 +1,442 @@ +from datetime import datetime + +from drf_spectacular.utils import OpenApiExample + +from sentry.issues.endpoints.project_event_details import GroupEventDetailsResponse + +SIMPLE_EVENT = { + "eventID": "9fac2ceed9344f2bbfdd1fdacb0ed9b1", + "tags": [ + {"key": "browser", "value": "Chrome 60.0"}, + {"key": "device", "value": "Other"}, + {"key": "environment", "value": "production"}, + {"value": "fatal", "key": "level"}, + {"key": "os", "value": "Mac OS X 10.12.6"}, + {"value": "CPython 2.7.16", "key": "runtime"}, + {"key": "release", "value": "17642328ead24b51867165985996d04b29310337"}, + {"key": "server_name", "value": "web1.example.com"}, + ], + "dateCreated": "2020-09-11T17:46:36Z", + "user": None, + "message": "", + "title": "This is an example Python exception", + "id": "dfb1a2d057194e76a4186cc8a5271553", + "platform": "python", + "event.type": "error", + "groupID": "1889724436", + "crashFile": None, + "location": "example.py:123", + "culprit": "/books/new/", + "projectID": "49271", +} + +GROUP_EVENT: GroupEventDetailsResponse = { + "groupID": "1341191803", + "eventID": "9999aaaaca8b46d797c23c6077c6ff01", + "dist": None, + "userReport": None, + "previousEventID": None, + "message": "", + "title": "This is an example Python exception", + "id": "9999aaafcc8b46d797c23c6077c6ff01", + "size": 107762, + "errors": [ + { + "data": { + "column": 8, + "source": "https://s1.sentry-cdn.com/_static/bloopbloop/sentry/dist/app.js.map", + "row": 15, + }, + "message": "Invalid location in sourcemap", + "type": "js_invalid_sourcemap_location", + } + ], + "platform": "javascript", + "nextEventID": "99f9e199e9a74a14bfef6196ad741619", + "type": "error", + "metadata": { + "type": "ForbiddenError", + "value": "GET /organizations/hellboy-meowmeow/users/ 403", + }, + "tags": [ + {"value": "Chrome 83.0.4103", "key": "browser"}, + {"value": "Chrome", "key": "browser.name"}, + {"value": "prod", "key": "environment"}, + {"value": "yes", "key": "handled"}, + {"value": "error", "key": "level"}, + {"value": "generic", "key": "mechanism"}, + ], + "dateCreated": datetime.fromisoformat("2020-06-17T22:26:56.098086Z"), + "dateReceived": datetime.fromisoformat("2020-06-17T22:26:56.428721Z"), + "user": { + "username": None, + "name": "Hell Boy", + "ip_address": "192.168.1.1", + "email": "hell@boy.cat", + "data": {"isStaff": False}, + "id": "550747", + }, + "entries": [ + { + "type": "exception", + "data": { + "values": [ + { + "stacktrace": { + "frames": [ + { + "function": "ignoreOnError", + "errors": None, + "colNo": 23, + "vars": None, + "package": None, + "absPath": "webpack:////usr/src/getsentry/src/sentry/node_modules/@sentry/browser/esm/helpers.js", + "inApp": False, + "lineNo": 71, + "module": "usr/src/getsentry/src/sentry/node_modules/@sentry/browser/esm/helpers", + "filename": "/usr/src/getsentry/src/sentry/node_modules/@sentry/browser/esm/helpers.js", + "platform": None, + "instructionAddr": None, + "context": [ + [66, " }"], + [67, " // Attempt to invoke user-land function"], + [ + 68, + " // NOTE: If you are a Sentry user, and you are seeing this stack frame, it", + ], + [ + 69, + " // means the sentry.javascript SDK caught an error invoking your application code. This", + ], + [ + 70, + " // is expected behavior and NOT indicative of a bug with sentry.javascript.", + ], + [ + 71, + " return fn.apply(this, wrappedArguments);", + ], + [72, " // tslint:enable:no-unsafe-any"], + [73, " }"], + [74, " catch (ex) {"], + [75, " ignoreNextOnError();"], + [76, " withScope(function (scope) {"], + ], + "symbolAddr": None, + "trust": None, + "symbol": None, + }, + { + "function": "apply", + "errors": None, + "colNo": 24, + "vars": None, + "package": None, + "absPath": "webpack:////usr/src/getsentry/src/sentry/node_modules/reflux-core/lib/PublisherMethods.js", + "inApp": False, + "lineNo": 74, + "module": "usr/src/getsentry/src/sentry/node_modules/reflux-core/lib/PublisherMethods", + "filename": "/usr/src/getsentry/src/sentry/node_modules/reflux-core/lib/PublisherMethods.js", + "platform": None, + "instructionAddr": None, + "context": [ + [69, " */"], + [70, " triggerAsync: function triggerAsync() {"], + [71, " var args = arguments,"], + [72, " me = this;"], + [73, " _.nextTick(function () {"], + [74, " me.trigger.apply(me, args);"], + [75, " });"], + [76, " },"], + [77, ""], + [78, " /**"], + [ + 79, + " * Wraps the trigger mechanism with a deferral function.", + ], + ], + "symbolAddr": None, + "trust": None, + "symbol": None, + }, + ], + "framesOmitted": None, + "registers": None, + "hasSystemFrames": True, + }, + "module": None, + "rawStacktrace": { + "frames": [ + { + "function": "a", + "errors": None, + "colNo": 88800, + "vars": None, + "package": None, + "absPath": "https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js", + "inApp": False, + "lineNo": 81, + "module": None, + "filename": "/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js", + "platform": None, + "instructionAddr": None, + "context": [ + [76, "/*!"], + [77, " Copyright (c) 2018 Jed Watson."], + [78, " Licensed under the MIT License (MIT), see"], + [79, " http://jedwatson.github.io/react-select"], + [80, "*/"], + [ + 81, + "{snip} e,t)}));return e.handleEvent?e.handleEvent.apply(this,s):e.apply(this,s)}catch(e){throw c(),Object(o.m)((function(n){n.addEventProcessor((fu {snip}", + ], + [82, "/*!"], + [83, " * JavaScript Cookie v2.2.1"], + [84, " * https://github.com/js-cookie/js-cookie"], + [85, " *"], + [86, " * Copyright 2006, 2015 Klaus Hartl & Fagner Brack"], + ], + "symbolAddr": None, + "trust": None, + "symbol": None, + }, + { + "function": None, + "errors": None, + "colNo": 149484, + "vars": None, + "package": None, + "absPath": "https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js", + "inApp": False, + "lineNo": 119, + "module": None, + "filename": "/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js", + "platform": None, + "instructionAddr": None, + "context": [ + [114, "/* @license"], + [115, "Papa Parse"], + [116, "v5.2.0"], + [117, "https://github.com/mholt/PapaParse"], + [118, "License: MIT"], + [ + 119, + "{snip} (){var e=arguments,t=this;r.nextTick((function(){t.trigger.apply(t,e)}))},deferWith:function(e){var t=this.trigger,n=this,r=function(){t.app {snip}", + ], + [120, "/**!"], + [ + 121, + " * @fileOverview Kickass library to create and place poppers near their reference elements.", + ], + [122, " * @version 1.16.1"], + [123, " * @license"], + [ + 124, + " * Copyright (c) 2016 Federico Zivolo and contributors", + ], + ], + "symbolAddr": None, + "trust": None, + "symbol": None, + }, + ], + "framesOmitted": None, + "registers": None, + "hasSystemFrames": True, + }, + "mechanism": {"type": "generic", "handled": True}, + "threadId": None, + "value": "GET /organizations/hellboy-meowmeow/users/ 403", + "type": "ForbiddenError", + } + ], + "excOmitted": None, + "hasSystemFrames": True, + }, + }, + { + "type": "breadcrumbs", + "data": { + "values": [ + { + "category": "tracing", + "level": "debug", + "event_id": None, + "timestamp": "2020-06-17T22:26:55.266586Z", + "data": None, + "message": "[Tracing] pushActivity: idleTransactionStarted#1", + "type": "debug", + }, + { + "category": "xhr", + "level": "info", + "event_id": None, + "timestamp": "2020-06-17T22:26:55.619446Z", + "data": { + "url": "/api/0/internal/health/", + "status_code": 200, + "method": "GET", + }, + "message": None, + "type": "http", + }, + { + "category": "sentry.transaction", + "level": "info", + "event_id": None, + "timestamp": "2020-06-17T22:26:55.945016Z", + "data": None, + "message": "7787a027f3fb46c985aaa2287b3f4d09", + "type": "default", + }, + ] + }, + }, + { + "type": "request", + "data": { + "fragment": None, + "cookies": [], + "inferredContentType": None, + "env": None, + "headers": [ + [ + "User-Agent", + "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36", + ] + ], + "url": "https://sentry.io/organizations/hellboy-meowmeow/issues/", + "query": [["project", "5236886"]], + "data": None, + "method": None, + }, + }, + ], + "packages": {}, + "sdk": {"version": "5.17.0", "name": "sentry.javascript.browser"}, + "_meta": { + "user": None, + "context": None, + "entries": {}, + "contexts": None, + "message": None, + "packages": None, + "tags": {}, + "sdk": None, + }, + "contexts": { + "ForbiddenError": { + "status": 403, + "statusText": "Forbidden", + "responseJSON": {"detail": "You do not have permission to perform this action."}, + "type": "default", + }, + "browser": {"version": "83.0.4103", "type": "browser", "name": "Chrome"}, + "os": {"version": "10", "type": "os", "name": "Windows"}, + "trace": { + "span_id": "83db1ad17e67dfe7", + "type": "trace", + "trace_id": "da6caabcd90e45fdb81f6655824a5f88", + "op": "navigation", + }, + "organization": {"type": "default", "id": "323938", "slug": "hellboy-meowmeow"}, + }, + "fingerprints": ["fbe908cc63d63ea9763fd84cb6bad177"], + "context": { + "resp": { + "status": 403, + "responseJSON": {"detail": "You do not have permission to perform this action."}, + "name": "ForbiddenError", + "statusText": "Forbidden", + "message": "GET /organizations/hellboy-meowmeow/users/ 403", + "stack": "Error\n at https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/app.js:1:480441\n at u (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:165:51006)\n at Generator._invoke (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:165:50794)\n at Generator.A.forEach.e. [as next] (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:165:51429)\n at n (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:16:68684)\n at s (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:16:68895)\n at https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:16:68954\n at new Promise ()\n at https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:16:68835\n at v (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/app.js:1:480924)\n at m (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/app.js:1:480152)\n at t.fetchMemberList (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/app.js:1:902983)\n at t.componentDidMount (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/app.js:1:900527)\n at t.componentDidMount (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:189:15597)\n at Pc (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:181:101023)\n at t.unstable_runWithPriority (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:189:3462)\n at Ko (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:181:45529)\n at Rc (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:181:97371)\n at Oc (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:181:87690)\n at https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:181:45820\n at t.unstable_runWithPriority (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:189:3462)\n at Ko (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:181:45529)\n at Zo (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:181:45765)\n at Jo (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:181:45700)\n at gc (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:181:84256)\n at Object.enqueueSetState (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:181:50481)\n at t.M.setState (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:173:1439)\n at t.onUpdate (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/app.js:1:543076)\n at a.n (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:119:149090)\n at a.emit (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:189:6550)\n at p.trigger (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:119:149379)\n at p.onInitializeUrlState (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/app.js:1:541711)\n at a.n (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:119:149090)\n at a.emit (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:189:6550)\n at Function.trigger (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:119:149379)\n at https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:119:149484\n at a (https://s1.sentry-cdn.com/_static/dde778f9f93a48e2b6e58ecb0c5eb8f2/sentry/dist/vendor.js:81:88800)", + } + }, + "release": { + "dateReleased": datetime.fromisoformat("2020-06-17T19:21:02.186004Z"), + "commitCount": 11, + "url": "https://freight.getsentry.net/deploys/getsentry/production/8868/", + "data": {}, + "lastDeploy": { + "name": "b65bc521378269d3eaefdc964f8ef56621414943 to prod", + "url": None, + "environment": "prod", + "dateStarted": None, + "dateFinished": "2020-06-17T19:20:55.641748Z", + "id": "6883490", + }, + "deployCount": 1, + "dateCreated": datetime.fromisoformat("2020-06-17T18:45:31.042157Z"), + "version": "b65bc521378269d3eaefdc964f8ef56621414943", + "lastCommit": { + "repository": { + "status": "active", + "integrationId": "2933", + "externalSlug": "getsentry/getsentry", + "name": "getsentry/getsentry", + "provider": {"id": "integrations:github", "name": "GitHub"}, + "url": "https://github.com/getsentry/getsentry", + "id": "2", + "dateCreated": "2016-10-10T21:36:45.373994Z", + }, + "releases": [ + { + "dateReleased": datetime.fromisoformat("2020-06-23T13:26:18.427090Z"), + "url": "https://freight.getsentry.net/deploys/getsentry/staging/2077/", + "dateCreated": "2020-06-23T13:22:50.420265Z", + "version": "f3783e5fe710758724f14267439fd46cc2bf5918", + "shortVersion": "f3783e5fe710758724f14267439fd46cc2bf5918", + "ref": "perf/source-maps-test", + }, + { + "dateReleased": datetime.fromisoformat("2020-06-17T19:21:02.186004Z"), + "url": "https://freight.getsentry.net/deploys/getsentry/production/8868/", + "dateCreated": datetime.fromisoformat("2020-06-17T18:45:31.042157Z"), + "version": "b65bc521378269d3eaefdc964f8ef56621414943", + "shortVersion": "b65bc521378269d3eaefdc964f8ef56621414943", + "ref": "master", + }, + ], + "dateCreated": datetime.fromisoformat("2020-06-17T18:43:37Z"), + "message": "feat(billing): Get a lot of money", + "id": "b65bc521378269d3eaefdc964f8ef56621414943", + }, + "ref": "master", + }, + "crashFile": None, + "location": "example.py:123", + "culprit": "/books/new/", + "groupingConfig": {"enhancements": "abc", "id": "2359823092345612392"}, + "occurrence": None, + "projectID": "5236886", + "resolvedWith": [], + "sdkUpdates": [], + "userReport": None, +} + + +class EventExamples: + PROJECT_EVENTS_SIMPLE = [ + OpenApiExample( + "Return a list of error events bound to a project", + value=[SIMPLE_EVENT], + response_only=True, + status_codes=["200"], + ) + ] + GROUP_EVENTS_SIMPLE = [ + OpenApiExample( + "Return a list of error events bound to an issue", + value=[SIMPLE_EVENT], + response_only=True, + status_codes=["200"], + ) + ] + GROUP_EVENT_DETAILS = [ + OpenApiExample( + "Return an issue event", + value=GROUP_EVENT, + response_only=True, + status_codes=["200"], + ) + ] diff --git a/src/sentry/apidocs/examples/integration_examples.py b/src/sentry/apidocs/examples/integration_examples.py index 9691785cae31e..44ec0f5052db4 100644 --- a/src/sentry/apidocs/examples/integration_examples.py +++ b/src/sentry/apidocs/examples/integration_examples.py @@ -55,3 +55,34 @@ class IntegrationExamples: response_only=True, ) ] + + EXTERNAL_USER_CREATE = [ + OpenApiExample( + "Create an external user", + value={ + "externalName": "@Billybob", + "provider": "github", + "userId": "1", + "integrationId": "1", + "id": "1", + }, + status_codes=["200", "201"], + response_only=True, + ) + ] + + EXTERNAL_TEAM_CREATE = [ + OpenApiExample( + "Create an external team", + value={ + "externalId": "asdf", + "externalName": "@team-foo", + "provider": "slack", + "integrationId": "1", + "id": "1", + "teamId": "2", + }, + status_codes=["200", "201"], + response_only=True, + ) + ] diff --git a/src/sentry/apidocs/examples/organization_examples.py b/src/sentry/apidocs/examples/organization_examples.py index 0542f224ee4b5..109ae5d434152 100644 --- a/src/sentry/apidocs/examples/organization_examples.py +++ b/src/sentry/apidocs/examples/organization_examples.py @@ -800,7 +800,7 @@ class OrganizationExamples: }, "deployCount": 1, "lastDeploy": { - "id": 53070941, + "id": "53070941", "environment": "canary-test-control", "dateStarted": None, "dateFinished": "2024-05-21T11:26:17.597793Z", @@ -893,21 +893,6 @@ class OrganizationExamples: ) ] - EXTERNAL_USER_CREATE = [ - OpenApiExample( - "Create an external user", - value={ - "id": 123, - "provider": "github", - "external_name": "@billy", - "integration_id": 123, - "user_id": 123, - }, - status_codes=["200", "201"], - response_only=True, - ) - ] - GET_HISTORICAL_ANOMALIES = [ OpenApiExample( "Identify anomalies in historical data", diff --git a/src/sentry/apidocs/hooks.py b/src/sentry/apidocs/hooks.py index 364ca5e144409..49e22c21e6767 100644 --- a/src/sentry/apidocs/hooks.py +++ b/src/sentry/apidocs/hooks.py @@ -224,6 +224,8 @@ def _validate_request_body( def custom_postprocessing_hook(result: Any, generator: Any, **kwargs: Any) -> Any: + _fix_issue_paths(result) + # Fetch schema component references schema_components = result["components"]["schemas"] @@ -281,3 +283,40 @@ def _check_tag(method_info: Mapping[str, Any], endpoint_name: str) -> None: def _check_description(json_body: Mapping[str, Any], err_str: str) -> None: if json_body.get("description") is None: raise SentryApiBuildError(err_str) + + +def _fix_issue_paths(result: Any) -> Any: + """ + The way we define `/issues/` paths causes some problems with drf-spectacular: + - The path may be defined twice, with `/organizations/{organization_id_slug}` prefix and without + - The `/issues/` part of the path is defined as `issues|groups` for compatibility reasons, + but we only want to use `issues` in the docs + + This function removes duplicate paths, removes the `issues|groups` path parameter and + replaces it with `issues` in the path. + """ + items = list(result["paths"].items()) + + modified_paths = [] + + for path, endpoint in items: + if "{var}/{issue_id}" in path: + modified_paths.append(path) + + for path in modified_paths: + updated_path = path.replace("{var}/{issue_id}", "issues/{issue_id}") + if path.startswith("/api/0/organizations/{organization_id_or_slug}/"): + updated_path = updated_path.replace( + "/api/0/organizations/{organization_id_or_slug}/", "/api/0/" + ) + endpoint = result["paths"][path] + for method in endpoint.keys(): + endpoint[method]["parameters"] = [ + param + for param in endpoint[method]["parameters"] + if not ( + param["in"] == "path" and param["name"] in ("var", "organization_id_or_slug") + ) + ] + result["paths"][updated_path] = endpoint + del result["paths"][path] diff --git a/src/sentry/apidocs/parameters.py b/src/sentry/apidocs/parameters.py index 53ca248c2b8c2..8bcc38d34c9bf 100644 --- a/src/sentry/apidocs/parameters.py +++ b/src/sentry/apidocs/parameters.py @@ -95,7 +95,8 @@ class EnvironmentParams: location="query", required=False, type=str, - description="""The visibility of the environments to filter by. The options are: `all`, `hidden`, `visible`. Defaults to `visible`.""", + description="""The visibility of the environments to filter by. Defaults to `visible`.""", + enum=["all", "hidden", "visible"], ) @@ -179,6 +180,14 @@ class OrganizationParams: description="The ID of the external user object. This is returned when creating an external user.", ) + EXTERNAL_TEAM_ID = OpenApiParameter( + name="external_team_id", + location="path", + required=True, + type=int, + description="The ID of the external team object. This is returned when creating an external team.", + ) + class ReleaseParams: VERSION = OpenApiParameter( @@ -253,6 +262,23 @@ class SCIMParams: ) +class IssueParams: + ISSUES_OR_GROUPS = OpenApiParameter( + name="var", + location="path", + required=False, + type=str, + description="Issue URLs may be accessed with either `issues` or `groups`. This parameter is will be removed when building the API docs.", + ) + ISSUE_ID = OpenApiParameter( + name="issue_id", + location="path", + required=True, + type=int, + description="The ID of the issue you'd like to query.", + ) + + class IssueAlertParams: ISSUE_RULE_ID = OpenApiParameter( name="rule_id", diff --git a/src/sentry/audit_log/events.py b/src/sentry/audit_log/events.py index f831b0754060a..6fdb3b3a6bebe 100644 --- a/src/sentry/audit_log/events.py +++ b/src/sentry/audit_log/events.py @@ -1,5 +1,6 @@ from __future__ import annotations +from datetime import datetime from typing import TYPE_CHECKING from sentry.audit_log.manager import AuditLogEvent @@ -351,11 +352,17 @@ def __init__(self): def render(self, audit_log_entry: AuditLogEntry): entry_data = audit_log_entry.data - access_start = entry_data.get("access_start", None) - access_end = entry_data.get("access_end", None) + access_start = entry_data.get("access_start") + access_end = entry_data.get("access_end") rendered_text = "waived data secrecy" if access_start is not None and access_end is not None: - rendered_text += f" from {access_start} to {access_end}" + start_dt = datetime.fromisoformat(access_start) + end_dt = datetime.fromisoformat(access_end) + + formatted_start = start_dt.strftime("%b %d, %Y %I:%M %p UTC") + formatted_end = end_dt.strftime("%b %d, %Y %I:%M %p UTC") + + rendered_text += f" from {formatted_start} to {formatted_end}" return rendered_text diff --git a/src/sentry/backup/comparators.py b/src/sentry/backup/comparators.py index 9155f6de851d3..445d5f3078108 100644 --- a/src/sentry/backup/comparators.py +++ b/src/sentry/backup/comparators.py @@ -865,13 +865,27 @@ def get_default_comparators() -> dict[str, list[JSONScrubbingComparator]]: ], "sentry.userrole": [DateUpdatedComparator("date_updated")], "sentry.userroleuser": [DateUpdatedComparator("date_updated")], + "workflow_engine.action": [DateUpdatedComparator("date_updated", "date_added")], + "workflow_engine.datacondition": [DateUpdatedComparator("date_updated", "date_added")], + "workflow_engine.dataconditiongroup": [ + DateUpdatedComparator("date_updated", "date_added") + ], + "workflow_engine.dataconditiongroupaction": [ + DateUpdatedComparator("date_updated", "date_added") + ], "workflow_engine.datasource": [DateUpdatedComparator("date_updated", "date_added")], "workflow_engine.datasourcedetector": [ DateUpdatedComparator("date_updated", "date_added") ], "workflow_engine.detector": [DateUpdatedComparator("date_updated", "date_added")], + "workflow_engine.detectorstate": [DateUpdatedComparator("date_updated", "date_added")], + "workflow_engine.detectorworkflow": [ + DateUpdatedComparator("date_updated", "date_added") + ], "workflow_engine.workflow": [DateUpdatedComparator("date_updated", "date_added")], - "workflow_engine.workflowaction": [DateUpdatedComparator("date_updated", "date_added")], + "workflow_engine.workflowdataconditiongroup": [ + DateUpdatedComparator("date_updated", "date_added") + ], }, ) diff --git a/src/sentry/buffer/base.py b/src/sentry/buffer/base.py index 5cdb624d011e9..a9cde77448f9e 100644 --- a/src/sentry/buffer/base.py +++ b/src/sentry/buffer/base.py @@ -148,7 +148,6 @@ def process( extra: dict[str, Any] | None = None, signal_only: bool | None = None, ) -> None: - from sentry.event_manager import ScoreClause from sentry.models.group import Group created = False @@ -162,12 +161,6 @@ def process( # HACK(dcramer): this is gross, but we don't have a good hook to compute this property today # XXX(dcramer): remove once we can replace 'priority' with something reasonable via Snuba if model is Group: - if "last_seen" in update_kwargs and "times_seen" in update_kwargs: - update_kwargs["score"] = ScoreClause( - group=None, - times_seen=update_kwargs["times_seen"], - last_seen=update_kwargs["last_seen"], - ) # XXX: create_or_update doesn't fire `post_save` signals, and so this update never # ends up in the cache. This causes issues when handling issue alerts, and likely # elsewhere. Use `update` here since we're already special casing, and we know that diff --git a/src/sentry/buffer/redis.py b/src/sentry/buffer/redis.py index 3ae1a951421c5..5b0ffbd01aa25 100644 --- a/src/sentry/buffer/redis.py +++ b/src/sentry/buffer/redis.py @@ -34,7 +34,7 @@ # load everywhere _last_validation_log: float | None = None Pipeline = Any -# TODO type Pipeline instead of using Any here +# TODO: type Pipeline instead of using Any here def _get_model_key(model: type[models.Model]) -> str: @@ -123,7 +123,9 @@ def queue(self, model_key: str) -> str | None: """ Get the queue name for the given model_key. """ + metrics.incr(f"pendingbuffer-router.queue.{model_key}") if model_key in self.pending_buffer_router: + metrics.incr(f"pendingbuffer-router.queue-found.{model_key}") generate_queue = self.pending_buffer_router[model_key].generate_queue if generate_queue is not None: return generate_queue(model_key) @@ -158,6 +160,7 @@ def assign_queue(self, model: type[models.Model], generate_queue: ChooseQueueFun A queue can be assigned to a model by passing in the generate_queue function. """ key = _get_model_key(model=model) + metrics.incr(f"redisbuffer-router.assign_queue.{key}") self._routers[key] = generate_queue def create_pending_buffers_router(self, incr_batch_size: int) -> PendingBufferRouter: diff --git a/src/sentry/celery.py b/src/sentry/celery.py index d3d723a08a315..d3ce67dcaeeec 100644 --- a/src/sentry/celery.py +++ b/src/sentry/celery.py @@ -14,8 +14,8 @@ [ # basic tasks that must be passed models still "sentry.tasks.process_buffer.process_incr", - "sentry.tasks.process_resource_change_bound", - "sentry.tasks.sentry_apps.send_alert_event", + "sentry.sentry_apps.tasks.sentry_apps.process_resource_change_bound", + "sentry.sentry_apps.tasks.sentry_apps.send_alert_event", "sentry.tasks.unmerge", "src.sentry.notifications.utils.async_send_notification", # basic tasks that can already deal with primary keys passed diff --git a/src/sentry/charts/chartcuterie.py b/src/sentry/charts/chartcuterie.py index 956d01afd64c8..263ef4e7df96a 100644 --- a/src/sentry/charts/chartcuterie.py +++ b/src/sentry/charts/chartcuterie.py @@ -67,7 +67,7 @@ def generate_chart(self, style: ChartType, data: Any, size: ChartSize | None = N with sentry_sdk.start_span( op="charts.chartcuterie.generate_chart", - description=type(self).__name__, + name=type(self).__name__, ): # Using sentry json formatter to handle datetime objects @@ -90,7 +90,7 @@ def generate_chart(self, style: ChartType, data: Any, size: ChartSize | None = N with sentry_sdk.start_span( op="charts.chartcuterie.upload", - description=type(self).__name__, + name=type(self).__name__, ): storage = get_storage(self.storage_options) storage.save(file_name, BytesIO(resp.content)) diff --git a/src/sentry/conf/server.py b/src/sentry/conf/server.py index fd98edfc9b8a4..00df2f3c2f683 100644 --- a/src/sentry/conf/server.py +++ b/src/sentry/conf/server.py @@ -20,12 +20,13 @@ from sentry.conf.api_pagination_allowlist_do_not_modify import ( SENTRY_API_PAGINATION_ALLOWLIST_DO_NOT_MODIFY, ) +from sentry.conf.types.celery import SplitQueueSize, SplitQueueTaskRoute from sentry.conf.types.kafka_definition import ConsumerDefinition from sentry.conf.types.logging_config import LoggingConfig from sentry.conf.types.role_dict import RoleDict from sentry.conf.types.sdk_config import ServerSdkConfig from sentry.utils import json # NOQA (used in getsentry config) -from sentry.utils.celery import crontab_with_minute_jitter +from sentry.utils.celery import crontab_with_minute_jitter, make_split_task_queues from sentry.utils.types import Type, type_from_value @@ -345,6 +346,7 @@ def env( "sentry.middleware.locale.SentryLocaleMiddleware", "sentry.middleware.ratelimit.RatelimitMiddleware", "django.contrib.messages.middleware.MessageMiddleware", + "sentry.middleware.devtoolbar.DevToolbarAnalyticsMiddleware", ) ROOT_URLCONF = "sentry.conf.urls" @@ -392,12 +394,14 @@ def env( "sentry", "sentry.analytics", "sentry.incidents.apps.Config", + "sentry.deletions", "sentry.discover", "sentry.analytics.events", "sentry.nodestore", "sentry.users", "sentry.sentry_apps", "sentry.integrations", + "sentry.flags", "sentry.monitors", "sentry.uptime", "sentry.replays", @@ -750,6 +754,7 @@ def SOCIAL_AUTH_DEFAULT_USERNAME() -> str: "sentry.integrations.github.tasks.pr_comment", "sentry.integrations.jira.tasks", "sentry.integrations.opsgenie.tasks", + "sentry.sentry_apps.tasks", "sentry.snuba.tasks", "sentry.replays.tasks", "sentry.monitors.tasks.clock_pulse", @@ -819,6 +824,34 @@ def SOCIAL_AUTH_DEFAULT_USERNAME() -> str: "sentry.integrations.tasks", ) +# tmp(michal): Default configuration for post_process* queues split +SENTRY_POST_PROCESS_QUEUE_SPLIT_ROUTER: dict[str, Callable[[], str]] = {} + +# Enable split queue routing +CELERY_ROUTES = ("sentry.queue.routers.SplitQueueTaskRouter",) + +# Mapping from task names to split queues. This can be used when the +# task does not have to specify the queue and can rely on Celery to +# do the routing. +# Each route has a task name as key and a tuple containing a list of queues +# and a default one as destination. The default one is used when the +# rollout option is not active. +CELERY_SPLIT_QUEUE_TASK_ROUTES_REGION: Mapping[str, SplitQueueTaskRoute] = { + "sentry.tasks.store.save_event_transaction": { + "default_queue": "events.save_event_transaction", + "queues_config": { + "total": 3, + "in_use": 3, + }, + } +} +CELERY_SPLIT_TASK_QUEUES_REGION = make_split_task_queues(CELERY_SPLIT_QUEUE_TASK_ROUTES_REGION) + +# Mapping from queue name to split queues to be used by SplitQueueRouter. +# This is meant to be used in those case where we have to specify the +# queue name when issuing a task. Example: post process. +CELERY_SPLIT_QUEUE_ROUTES: Mapping[str, SplitQueueSize] = {} + default_exchange = Exchange("default", type="direct") control_exchange = default_exchange @@ -1239,16 +1272,19 @@ def SOCIAL_AUTH_DEFAULT_USERNAME() -> str: CELERYBEAT_SCHEDULE_FILENAME = os.path.join(tempfile.gettempdir(), "sentry-celerybeat-control") CELERYBEAT_SCHEDULE = CELERYBEAT_SCHEDULE_CONTROL CELERY_QUEUES = CELERY_QUEUES_CONTROL + CELERY_SPLIT_QUEUE_TASK_ROUTES: Mapping[str, SplitQueueTaskRoute] = {} elif SILO_MODE == "REGION": CELERYBEAT_SCHEDULE_FILENAME = os.path.join(tempfile.gettempdir(), "sentry-celerybeat-region") CELERYBEAT_SCHEDULE = CELERYBEAT_SCHEDULE_REGION - CELERY_QUEUES = CELERY_QUEUES_REGION + CELERY_QUEUES = CELERY_QUEUES_REGION + CELERY_SPLIT_TASK_QUEUES_REGION + CELERY_SPLIT_QUEUE_TASK_ROUTES = CELERY_SPLIT_QUEUE_TASK_ROUTES_REGION else: CELERYBEAT_SCHEDULE = {**CELERYBEAT_SCHEDULE_CONTROL, **CELERYBEAT_SCHEDULE_REGION} CELERYBEAT_SCHEDULE_FILENAME = os.path.join(tempfile.gettempdir(), "sentry-celerybeat") - CELERY_QUEUES = CELERY_QUEUES_REGION + CELERY_QUEUES_CONTROL + CELERY_QUEUES = CELERY_QUEUES_REGION + CELERY_QUEUES_CONTROL + CELERY_SPLIT_TASK_QUEUES_REGION + CELERY_SPLIT_QUEUE_TASK_ROUTES = CELERY_SPLIT_QUEUE_TASK_ROUTES_REGION for queue in CELERY_QUEUES: queue.durable = False @@ -1730,7 +1766,7 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]: SENTRY_METRICS_INDEXER = "sentry.sentry_metrics.indexer.postgres.postgres_v2.PostgresIndexer" SENTRY_METRICS_INDEXER_OPTIONS: dict[str, Any] = {} SENTRY_METRICS_INDEXER_CACHE_TTL = 3600 * 2 -SENTRY_METRICS_INDEXER_TRANSACTIONS_SAMPLE_RATE = 0.1 +SENTRY_METRICS_INDEXER_TRANSACTIONS_SAMPLE_RATE = 0.1 # relative to SENTRY_BACKEND_APM_SAMPLING SENTRY_METRICS_INDEXER_SPANNER_OPTIONS: dict[str, Any] = {} @@ -2183,9 +2219,6 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]: # This flag activates consuming GroupAttribute messages in the development environment SENTRY_USE_GROUP_ATTRIBUTES = True -# This flag activates replay analyzer service in the development environment -SENTRY_USE_REPLAY_ANALYZER_SERVICE = False - # This flag activates Spotlight Sidecar in the development environment SENTRY_USE_SPOTLIGHT = False @@ -2435,14 +2468,6 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]: "only_if": settings.SENTRY_USE_PROFILING, } ), - "session-replay-analyzer": lambda settings, options: ( - { - "image": "ghcr.io/getsentry/session-replay-analyzer:latest", - "environment": {}, - "ports": {"3000/tcp": 3000}, - "only_if": settings.SENTRY_USE_REPLAY_ANALYZER_SERVICE, - } - ), "spotlight-sidecar": lambda settings, options: ( { "image": "ghcr.io/getsentry/spotlight:latest", @@ -2467,7 +2492,7 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]: SENTRY_SELF_HOSTED_ERRORS_ONLY = False # only referenced in getsentry to provide the stable beacon version # updated with scripts/bump-version.sh -SELF_HOSTED_STABLE_VERSION = "24.9.0" +SELF_HOSTED_STABLE_VERSION = "24.10.0" # Whether we should look at X-Forwarded-For header or not # when checking REMOTE_ADDR ip addresses @@ -3115,6 +3140,7 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]: ZERO_DOWNTIME_MIGRATIONS_RAISE_FOR_UNSAFE = True ZERO_DOWNTIME_MIGRATIONS_LOCK_TIMEOUT = None ZERO_DOWNTIME_MIGRATIONS_STATEMENT_TIMEOUT = None +ZERO_DOWNTIME_MIGRATIONS_LOCK_TIMEOUT_FORCE = False if int(PG_VERSION.split(".", maxsplit=1)[0]) < 12: # In v0.6 of django-pg-zero-downtime-migrations this settings is deprecated for PostreSQLv12+ @@ -3187,7 +3213,7 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]: # We should not run access logging middleware on some endpoints as # it is very noisy, and these views are hit by internal services. -ACCESS_LOGS_EXCLUDE_PATHS = ("/api/0/internal/", "/api/0/relays/") +ACCESS_LOGS_EXCLUDE_PATHS = ("/api/0/internal/", "/api/0/relays/", "/_warmup/") VALIDATE_SUPERUSER_ACCESS_CATEGORY_AND_REASON = True DISABLE_SU_FORM_U2F_CHECK_FOR_LOCAL = False @@ -3514,7 +3540,3 @@ def custom_parameter_sort(parameter: dict) -> tuple[str, int]: SENTRY_WEB_PORT = int(bind[1]) CELERYBEAT_SCHEDULE_FILENAME = f"celerybeat-schedule-{SILO_MODE}" - - -# tmp(michal): Default configuration for post_process* queueus split -SENTRY_POST_PROCESS_QUEUE_SPLIT_ROUTER: dict[str, Callable[[], str]] = {} diff --git a/src/sentry/conf/types/celery.py b/src/sentry/conf/types/celery.py new file mode 100644 index 0000000000000..809afab5d7af5 --- /dev/null +++ b/src/sentry/conf/types/celery.py @@ -0,0 +1,44 @@ +from __future__ import annotations + +from typing import NotRequired, TypedDict + + +class SplitQueueSize(TypedDict): + # The total number of queues to create to split a single queue. + # This number triggers the creation of the queues themselves + # when the application starts. + total: int + # The number of queues to actually use. It has to be smaller or + # equal to `total`. + # This is the number of queues the router uses when the split + # is enable on this queue. + # This number exists in order to be able to safely increase or + # decrease the number of queues as the queues have to be created + # first, then we have to start consuming from them, only then + # we can start producing. + in_use: int + + +class SplitQueueTaskRoute(TypedDict): + """ + This is used to provide the routes tasks invocations have to be + routed to when the Celery router is used. + """ + + # This represents both the name of the default queue in use when + # the router is not deployed and the prefix for all split queue + # names for this task. + # + # Example: my_queue, becomes my_queue_1, my_queue_2 if there are + # two split queues. + default_queue: str + + # Configures the number of queues to create and to use. + # + # This can be None if we do not want to set up multiple queue in + # an environment. In order to use the SplitQueue router, the queue + # name has to be removed from the task definition, which means we + # must go through the router in all cases. Thus the router has + # to provide a default queue even if we do not want a split in an + # environment. + queues_config: NotRequired[SplitQueueSize] diff --git a/src/sentry/conf/urls.py b/src/sentry/conf/urls.py index 09bc7fe52a756..62f54146cd013 100644 --- a/src/sentry/conf/urls.py +++ b/src/sentry/conf/urls.py @@ -1,31 +1,11 @@ from __future__ import annotations -from django.urls import URLPattern, URLResolver, re_path - -from sentry.web.frontend import csrf_failure from sentry.web.frontend.error_404 import Error404View from sentry.web.frontend.error_500 import Error500View from sentry.web.urls import urlpatterns as web_urlpatterns +# XXX: remove after getsentry does not reference these handler404 = Error404View.as_view() handler500 = Error500View.as_view() -urlpatterns: list[URLResolver | URLPattern] = [ - re_path( - r"^500/", - handler500, - name="error-500", - ), - re_path( - r"^404/", - handler404, - name="error-404", - ), - re_path( - r"^403-csrf-failure/", - csrf_failure.view, - name="error-403-csrf-failure", - ), -] - -urlpatterns += web_urlpatterns +urlpatterns = web_urlpatterns diff --git a/src/sentry/constants.py b/src/sentry/constants.py index 9e76eb1690772..41bee5a4a9224 100644 --- a/src/sentry/constants.py +++ b/src/sentry/constants.py @@ -710,6 +710,7 @@ class InsightModules(Enum): METRICS_ACTIVATE_LAST_FOR_GAUGES_DEFAULT = False DATA_CONSENT_DEFAULT = False UPTIME_AUTODETECTION = True +TARGET_SAMPLE_RATE_DEFAULT = 1.0 # `sentry:events_member_admin` - controls whether the 'member' role gets the event:admin scope EVENTS_MEMBER_ADMIN_DEFAULT = True @@ -781,6 +782,7 @@ class InsightModules(Enum): "*/ready", "*/readyz", "*/ping", + "*/up", ] diff --git a/src/sentry/consumers/__init__.py b/src/sentry/consumers/__init__.py index 975da68126ea5..77231e1b1621c 100644 --- a/src/sentry/consumers/__init__.py +++ b/src/sentry/consumers/__init__.py @@ -162,6 +162,19 @@ def ingest_events_options() -> list[click.Option]: return options +def ingest_transactions_options() -> list[click.Option]: + options = ingest_events_options() + options.append( + click.Option( + ["--no-celery-mode", "no_celery_mode"], + default=False, + is_flag=True, + help="Save event directly in consumer without celery", + ) + ) + return options + + _METRICS_INDEXER_OPTIONS = [ click.Option(["--input-block-size"], type=int, default=None), click.Option(["--output-block-size"], type=int, default=None), @@ -312,20 +325,8 @@ def ingest_events_options() -> list[click.Option]: }, "ingest-transactions": { "topic": Topic.INGEST_TRANSACTIONS, - "strategy_factory": "sentry.ingest.consumer.factory.IngestStrategyFactory", - "click_options": ingest_events_options(), - "static_args": { - "consumer_type": ConsumerType.Transactions, - }, - "dlq_topic": Topic.INGEST_TRANSACTIONS_DLQ, - }, - "ingest-transactions-inc847": { - "topic": Topic.INGEST_TRANSACTIONS, - "strategy_factory": "sentry.ingest.consumer.factory.IngestStrategyFactory", - "click_options": ingest_events_options(), - "static_args": { - "consumer_type": ConsumerType.Transactions, - }, + "strategy_factory": "sentry.ingest.consumer.factory.IngestTransactionsStrategyFactory", + "click_options": ingest_transactions_options(), "dlq_topic": Topic.INGEST_TRANSACTIONS_DLQ, }, "ingest-metrics": { diff --git a/src/sentry/coreapi.py b/src/sentry/coreapi.py index ce544b3fe4e06..4e1f58c91d0a7 100644 --- a/src/sentry/coreapi.py +++ b/src/sentry/coreapi.py @@ -1,12 +1,6 @@ from __future__ import annotations import logging -from time import time - -from sentry.attachments import attachment_cache -from sentry.eventstore.processing import event_processing_store -from sentry.ingest.consumer.processors import CACHE_TIMEOUT -from sentry.tasks.store import preprocess_event, preprocess_event_from_reprocessing # TODO: We should make the API a class, and UDP/HTTP just inherit from it # This will make it so we can more easily control logging with various @@ -35,33 +29,3 @@ class APIUnauthorized(APIError): class APIForbidden(APIError): http_status = 403 - - -def insert_data_to_database_legacy( - data, start_time=None, from_reprocessing=False, attachments=None -): - """ - Yet another "fast path" to ingest an event without making it go - through Relay. Please consider using functions from the ingest consumer - instead, or, if you're within tests, to use `TestCase.store_event`. - """ - - # XXX(markus): Delete this function and merge with ingest consumer logic. - - if start_time is None: - start_time = time() - - # we might be passed some subclasses of dict that fail dumping - if not isinstance(data, dict): - data = dict(data.items()) - - cache_key = event_processing_store.store(data) - - # Attachments will be empty or None if the "event-attachments" feature - # is turned off. For native crash reports it will still contain the - # crash dump (e.g. minidump) so we can load it during processing. - if attachments is not None: - attachment_cache.set(cache_key, attachments, cache_timeout=CACHE_TIMEOUT) - - task = from_reprocessing and preprocess_event_from_reprocessing or preprocess_event - task.delay(cache_key=cache_key, start_time=start_time, event_id=data["event_id"]) diff --git a/src/sentry/data_secrecy/api/waive_data_secrecy.py b/src/sentry/data_secrecy/api/waive_data_secrecy.py index 8af1d9bab7e48..5f59ba863423c 100644 --- a/src/sentry/data_secrecy/api/waive_data_secrecy.py +++ b/src/sentry/data_secrecy/api/waive_data_secrecy.py @@ -1,3 +1,4 @@ +import logging from collections.abc import Mapping from typing import Any @@ -20,6 +21,8 @@ from sentry.data_secrecy.models import DataSecrecyWaiver from sentry.models.organization import Organization +logger = logging.getLogger("sentry.data_secrecy") + class WaiveDataSecrecyPermission(OrganizationPermission): scope_map = { @@ -119,25 +122,34 @@ def put(self, request: Request, organization: Organization): serialize(ds, request.user, DataSecrecyWaiverSerializer()), status=status.HTTP_200_OK ) - def delete(self, request: Request, organization): + def delete(self, request: Request, organization: Organization): """ Reinstates data secrecy for an organization. """ try: - ds = get_object_or_404(DataSecrecyWaiver, organization=organization) - ds.delete() - - self.create_audit_entry( - request=request, - organization=organization, - event=audit_log.get_event_id("DATA_SECRECY_REINSTATED"), + logger.info("Reinstating data secrecy for organization %s", organization.id) + ds = DataSecrecyWaiver.objects.get(organization=organization) + logger.info( + "Data secrecy waiver found for organization %s", + organization.id, + extra={"ds": ds.id}, ) - return Response( - {"detail": "Data secrecy has been reinstated."}, - status=status.HTTP_204_NO_CONTENT, - ) - except Http404: + except DataSecrecyWaiver.DoesNotExist: + logger.info("No data secrecy waiver found for organization %s", organization.id) return Response( {"detail": "No data secrecy waiver found for this organization."}, status=status.HTTP_404_NOT_FOUND, ) + + ds.delete() + logger.info("Data secrecy waiver deleted for organization %s", organization.id) + + self.create_audit_entry( + request=request, + organization=organization, + event=audit_log.get_event_id("DATA_SECRECY_REINSTATED"), + ) + return Response( + {"detail": "Data secrecy has been reinstated."}, + status=status.HTTP_204_NO_CONTENT, + ) diff --git a/src/sentry/db/models/fields/node.py b/src/sentry/db/models/fields/node.py index 7e3844319f67e..c58cad00fbb32 100644 --- a/src/sentry/db/models/fields/node.py +++ b/src/sentry/db/models/fields/node.py @@ -192,7 +192,7 @@ def to_python(self, value): try: value = pickle.loads(decompress(value)) except Exception as e: - # TODO this is a bit dangerous as a failure to read/decode the + # TODO: this is a bit dangerous as a failure to read/decode the # node_id will end up with this record being replaced with an # empty value under a new key, potentially orphaning an # original value in nodestore. OTOH if we can't decode the info diff --git a/src/sentry/db/models/fields/slug.py b/src/sentry/db/models/fields/slug.py index fa435e4a93066..ebb57ea2efbbc 100644 --- a/src/sentry/db/models/fields/slug.py +++ b/src/sentry/db/models/fields/slug.py @@ -3,6 +3,8 @@ from sentry.slug.validators import no_numeric_validator, org_slug_validator +DEFAULT_SLUG_MAX_LENGTH = 50 + class SentrySlugField(SlugField): default_validators = [*SlugField.default_validators, no_numeric_validator] diff --git a/src/sentry/db/postgres/schema.py b/src/sentry/db/postgres/schema.py index ad5efc10188a9..65e500578163c 100644 --- a/src/sentry/db/postgres/schema.py +++ b/src/sentry/db/postgres/schema.py @@ -1,10 +1,17 @@ +from contextlib import contextmanager + +from django.conf import settings +from django.db.backends.ddl_references import Statement from django.db.backends.postgresql.schema import ( DatabaseSchemaEditor as PostgresDatabaseSchemaEditor, ) from django.db.models import Field from django.db.models.base import ModelBase from django_zero_downtime_migrations.backends.postgres.schema import ( + DUMMY_SQL, DatabaseSchemaEditorMixin, + MultiStatementSQL, + PGLock, Unsafe, UnsafeOperationException, ) @@ -69,6 +76,12 @@ class SafePostgresDatabaseSchemaEditor(DatabaseSchemaEditorMixin, PostgresDataba PostgresDatabaseSchemaEditor.alter_db_tablespace ) + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.LOCK_TIMEOUT_FORCE = getattr( + settings, "ZERO_DOWNTIME_MIGRATIONS_LOCK_TIMEOUT_FORCE", False + ) + def alter_db_table(self, model, old_db_table, new_db_table): """ This didn't work correctly in django_zero_downtime_migrations, so implementing here. This @@ -85,7 +98,7 @@ def delete_model(self, model): """ raise UnsafeOperationException( f"Deleting the {model.__name__} model is unsafe.\n" - "More info here: https://develop.sentry.dev/database-migrations/#tables" + "More info here: https://develop.sentry.dev/database-migrations/#deleting-tables" ) def remove_field(self, model, field): @@ -94,9 +107,74 @@ def remove_field(self, model, field): """ raise UnsafeOperationException( f"Removing the {model.__name__}.{field.name} field is unsafe.\n" - "More info here: https://develop.sentry.dev/database-migrations/#columns" + "More info here: https://develop.sentry.dev/database-migrations/#deleting-columns" ) + def execute(self, sql, params=()): + if sql is DUMMY_SQL: + return + statements = [] + if isinstance(sql, MultiStatementSQL): + statements.extend(sql) + elif isinstance(sql, Statement) and isinstance(sql.template, MultiStatementSQL): + statements.extend(Statement(s, **sql.parts) for s in sql.template) + else: + statements.append(sql) + for statement in statements: + if isinstance(statement, PGLock): + use_timeouts = statement.use_timeouts + disable_statement_timeout = statement.disable_statement_timeout + statement = statement.sql + elif isinstance(statement, Statement) and isinstance(statement.template, PGLock): + use_timeouts = statement.template.use_timeouts + disable_statement_timeout = statement.template.disable_statement_timeout + statement = Statement(statement.template.sql, **statement.parts) + else: + use_timeouts = False + disable_statement_timeout = False + + if use_timeouts: + with self._set_operation_timeout(self.STATEMENT_TIMEOUT, self.LOCK_TIMEOUT): + PostgresDatabaseSchemaEditor.execute(self, statement, params) + elif self.LOCK_TIMEOUT_FORCE: + with self._set_operation_timeout(lock_timeout=self.LOCK_TIMEOUT): + PostgresDatabaseSchemaEditor.execute(self, statement, params) + elif disable_statement_timeout and self.FLEXIBLE_STATEMENT_TIMEOUT: + with self._set_operation_timeout(self.ZERO_TIMEOUT): + PostgresDatabaseSchemaEditor.execute(self, statement, params) + else: + PostgresDatabaseSchemaEditor.execute(self, statement, params) + + @contextmanager + def _set_operation_timeout(self, statement_timeout=None, lock_timeout=None): + if self.collect_sql: + previous_statement_timeout = self.ZERO_TIMEOUT + previous_lock_timeout = self.ZERO_TIMEOUT + else: + with self.connection.cursor() as cursor: + cursor.execute(self.sql_get_statement_timeout) + (previous_statement_timeout,) = cursor.fetchone() + cursor.execute(self.sql_get_lock_timeout) + (previous_lock_timeout,) = cursor.fetchone() + if statement_timeout is not None: + PostgresDatabaseSchemaEditor.execute( + self, self.sql_set_statement_timeout % {"statement_timeout": statement_timeout} + ) + if lock_timeout is not None: + PostgresDatabaseSchemaEditor.execute( + self, self.sql_set_lock_timeout % {"lock_timeout": lock_timeout} + ) + yield + if statement_timeout is not None: + PostgresDatabaseSchemaEditor.execute( + self, + self.sql_set_statement_timeout % {"statement_timeout": previous_statement_timeout}, + ) + if lock_timeout is not None: + PostgresDatabaseSchemaEditor.execute( + self, self.sql_set_lock_timeout % {"lock_timeout": previous_lock_timeout} + ) + class DatabaseSchemaEditorProxy: """ diff --git a/src/sentry/deletions/__init__.py b/src/sentry/deletions/__init__.py index e5e7e0a7ec59f..59e13f34e9a13 100644 --- a/src/sentry/deletions/__init__.py +++ b/src/sentry/deletions/__init__.py @@ -200,13 +200,6 @@ def get_manager() -> DeletionTaskManager: return _default_manager -def __getattr__(name: str) -> Any: - # Shim for getsentry - if name == "default_manager": - return get_manager() - raise AttributeError(f"module {__name__} has no attribute {name}") - - def get( task: type[BaseDeletionTask[Any]] | None = None, **kwargs: Any, diff --git a/src/sentry/deletions/base.py b/src/sentry/deletions/base.py index 856d5ff2f5f49..3655e95ee8bdf 100644 --- a/src/sentry/deletions/base.py +++ b/src/sentry/deletions/base.py @@ -178,7 +178,7 @@ def mark_deletion_in_progress(self, instance_list: Sequence[ModelT]) -> None: class ModelDeletionTask(BaseDeletionTask[ModelT]): - DEFAULT_QUERY_LIMIT = None + DEFAULT_QUERY_LIMIT: int | None = None manager_name = "objects" def __init__( diff --git a/src/sentry/deletions/defaults/group.py b/src/sentry/deletions/defaults/group.py index c0da32c0bb6cc..41df5aa42270a 100644 --- a/src/sentry/deletions/defaults/group.py +++ b/src/sentry/deletions/defaults/group.py @@ -2,14 +2,20 @@ import os from collections import defaultdict -from collections.abc import Sequence +from collections.abc import Mapping, Sequence from typing import Any -from sentry import eventstore, eventstream, models, nodestore +from sentry_sdk import set_tag +from snuba_sdk import DeleteQuery, Request + +from sentry import eventstore, eventstream, features, models, nodestore from sentry.eventstore.models import Event +from sentry.issues.grouptype import GroupCategory from sentry.models.group import Group, GroupStatus from sentry.models.rulefirehistory import RuleFireHistory +from sentry.snuba.dataset import Dataset from sentry.tasks.delete_seer_grouping_records import call_delete_seer_grouping_records_by_hash +from sentry.utils.snuba import bulk_snuba_queries from ..base import BaseDeletionTask, BaseRelation, ModelDeletionTask, ModelRelation from ..manager import DeletionTaskManager @@ -48,22 +54,35 @@ ) -class EventDataDeletionTask(BaseDeletionTask[Group]): +class EventsBaseDeletionTask(BaseDeletionTask[Group]): """ - Deletes nodestore data, EventAttachment and UserReports for group + Base class to delete events associated to groups and its related models. """ # Number of events fetched from eventstore per chunk() call. DEFAULT_CHUNK_SIZE = 10000 + referrer = "deletions.group" + dataset: Dataset def __init__( self, manager: DeletionTaskManager, groups: Sequence[Group], **kwargs: Any ) -> None: self.groups = groups + # Use self.last_event to keep track of the last event processed in the chunk method. self.last_event: Event | None = None + self.set_group_and_project_ids() super().__init__(manager, **kwargs) - def chunk(self) -> bool: + def set_group_and_project_ids(self) -> None: + group_ids = [] + self.project_groups = defaultdict(list) + for group in self.groups: + self.project_groups[group.project_id].append(group.id) + group_ids.append(group.id) + self.group_ids = group_ids + self.project_ids = list(self.project_groups.keys()) + + def get_unfetched_events(self) -> list[Event]: conditions = [] if self.last_event is not None: conditions.extend( @@ -76,49 +95,124 @@ def chunk(self) -> bool: ] ) - group_ids = [] - project_groups = defaultdict(list) - for group in self.groups: - project_groups[group.project_id].append(group.id) - group_ids.append(group.id) - project_ids = list(project_groups.keys()) - events = eventstore.backend.get_unfetched_events( filter=eventstore.Filter( - conditions=conditions, project_ids=project_ids, group_ids=group_ids + conditions=conditions, project_ids=self.project_ids, group_ids=self.group_ids ), limit=self.DEFAULT_CHUNK_SIZE, - referrer="deletions.group", + referrer=self.referrer, orderby=["-timestamp", "-event_id"], - tenant_ids=( - {"organization_id": self.groups[0].project.organization_id} if self.groups else None - ), + tenant_ids=self.tenant_ids, + dataset=self.dataset, ) - if not events: - # Remove all group events now that their node data has been removed. - for project_id, group_ids in project_groups.items(): - eventstream_state = eventstream.backend.start_delete_groups(project_id, group_ids) - eventstream.backend.end_delete_groups(eventstream_state) - return False + return events + + @property + def tenant_ids(self) -> Mapping[str, Any]: + result = {"referrer": self.referrer} + if self.groups: + result["organization_id"] = self.groups[0].project.organization_id + return result + - self.last_event = events[-1] +class ErrorEventsDeletionTask(EventsBaseDeletionTask): + """ + Deletes nodestore data, EventAttachment and UserReports for requested groups. + + This class uses the old Snuba deletion method. + """ + + dataset = Dataset.Events + + def chunk(self) -> bool: + """This method is called to delete chunks of data. It returns a boolean to say + if the deletion has completed and if it needs to be called again.""" + events = self.get_unfetched_events() + if events: + self.delete_events_from_nodestore(events) + self.delete_dangling_attachments_and_user_reports(events) + # This value will be used in the next call to chunk + self.last_event = events[-1] + # As long as it returns True the task will keep iterating + return True + else: + # Now that all events have been deleted from the eventstore, we can delete the events from snuba + self.delete_events_from_snuba() + return False + def delete_events_from_nodestore(self, events: Sequence[Event]) -> None: # Remove from nodestore node_ids = [Event.generate_node_id(event.project_id, event.event_id) for event in events] nodestore.backend.delete_multi(node_ids) + def delete_dangling_attachments_and_user_reports(self, events: Sequence[Event]) -> None: # Remove EventAttachment and UserReport *again* as those may not have a # group ID, therefore there may be dangling ones after "regular" model # deletion. event_ids = [event.event_id for event in events] models.EventAttachment.objects.filter( - event_id__in=event_ids, project_id__in=project_ids + event_id__in=event_ids, project_id__in=self.project_ids ).delete() models.UserReport.objects.filter( - event_id__in=event_ids, project_id__in=project_ids + event_id__in=event_ids, project_id__in=self.project_ids ).delete() - return True + def delete_events_from_snuba(self) -> None: + # Remove all group events now that their node data has been removed. + for project_id, group_ids in self.project_groups.items(): + eventstream_state = eventstream.backend.start_delete_groups(project_id, group_ids) + eventstream.backend.end_delete_groups(eventstream_state) + + +class IssuePlatformEventsDeletionTask(EventsBaseDeletionTask): + """ + This class helps delete Issue Platform events which use the new Clickhouse light deletes. + """ + + dataset = Dataset.IssuePlatform + + def chunk(self) -> bool: + """This method is called to delete chunks of data. It returns a boolean to say + if the deletion has completed and if it needs to be called again.""" + events = self.get_unfetched_events() + if events: + # Ideally, in some cases, we should also delete the associated event from the Nodestore. + # In the occurrence_consumer [1] we sometimes create a new event but it's hard in post-ingestion to distinguish between + # a created event and an existing one. + # https://github.com/getsentry/sentry/blob/a86b9b672709bc9c4558cffb2c825965b8cee0d1/src/sentry/issues/occurrence_consumer.py#L324-L339 + self.delete_events_from_nodestore(events) + # This value will be used in the next call to chunk + self.last_event = events[-1] + # As long as it returns True the task will keep iterating + return True + else: + # Now that all events have been deleted from the eventstore, we can delete the occurrences from Snuba + self.delete_events_from_snuba() + return False + + def delete_events_from_nodestore(self, events: Sequence[Event]) -> None: + # We delete by the occurrence_id instead of the event_id + node_ids = [ + Event.generate_node_id(event.project_id, event._snuba_data["occurrence_id"]) + for event in events + ] + nodestore.backend.delete_multi(node_ids) + + def delete_events_from_snuba(self) -> None: + requests = [] + for project_id, group_ids in self.project_groups.items(): + query = DeleteQuery( + self.dataset.value, + column_conditions={"project_id": [project_id], "group_id": list(group_ids)}, + ) + request = Request( + dataset=self.dataset.value, + app_id=self.referrer, + query=query, + tenant_ids=self.tenant_ids, + ) + requests.append(request) + bulk_snuba_queries(requests) class GroupDeletionTask(ModelDeletionTask[Group]): @@ -131,30 +225,59 @@ def delete_bulk(self, instance_list: Sequence[Group]) -> bool: Group deletion operates as a quasi-bulk operation so that we don't flood snuba replacements with deletions per group. """ - self.mark_deletion_in_progress(instance_list) + if not instance_list: + return True - group_ids = [group.id for group in instance_list] + self.mark_deletion_in_progress(instance_list) + error_group_ids = [ + group.id for group in instance_list if group.issue_category == GroupCategory.ERROR + ] # Tell seer to delete grouping records with these group hashes - call_delete_seer_grouping_records_by_hash(group_ids) + call_delete_seer_grouping_records_by_hash(error_group_ids) + + self._delete_children(instance_list) + + # Remove group objects with children removed. + self.delete_instance_bulk(instance_list) + + return False + def _delete_children(self, instance_list: Sequence[Group]) -> None: + group_ids = [group.id for group in instance_list] # Remove child relations for all groups first. child_relations: list[BaseRelation] = [] for model in _GROUP_RELATED_MODELS: child_relations.append(ModelRelation(model, {"group_id__in": group_ids})) + org = instance_list[0].project.organization + issue_platform_deletion_allowed = features.has( + "organizations:issue-platform-deletion", org, actor=None + ) + error_groups, issue_platform_groups = separate_by_group_category(instance_list) + # If this isn't a retention cleanup also remove event data. if not os.environ.get("_SENTRY_CLEANUP"): - child_relations.append( - BaseRelation(params={"groups": instance_list}, task=EventDataDeletionTask) - ) - - self.delete_children(child_relations) + if not issue_platform_deletion_allowed: + params = {"groups": instance_list} + child_relations.append(BaseRelation(params=params, task=ErrorEventsDeletionTask)) + else: + if error_groups: + params = {"groups": error_groups} + child_relations.append( + BaseRelation(params=params, task=ErrorEventsDeletionTask) + ) - # Remove group objects with children removed. - self.delete_instance_bulk(instance_list) + if issue_platform_groups: + # This helps creating custom Sentry alerts; + # remove when #proj-snuba-lightweight_delets is done + set_tag("issue_platform_deletion", True) + params = {"groups": issue_platform_groups} + child_relations.append( + BaseRelation(params=params, task=IssuePlatformEventsDeletionTask) + ) - return False + self.delete_children(child_relations) def delete_instance(self, instance: Group) -> None: from sentry import similarity @@ -168,3 +291,15 @@ def mark_deletion_in_progress(self, instance_list: Sequence[Group]) -> None: Group.objects.filter(id__in=[i.id for i in instance_list]).exclude( status=GroupStatus.DELETION_IN_PROGRESS ).update(status=GroupStatus.DELETION_IN_PROGRESS, substatus=None) + + +def separate_by_group_category(instance_list: Sequence[Group]) -> tuple[list[Group], list[Group]]: + error_groups = [] + issue_platform_groups = [] + for group in instance_list: + ( + error_groups.append(group) + if group.issue_category == GroupCategory.ERROR + else issue_platform_groups.append(group) + ) + return error_groups, issue_platform_groups diff --git a/src/sentry/deletions/defaults/platform_external_issue.py b/src/sentry/deletions/defaults/platform_external_issue.py index ac8ecc3132829..d17e208dfb2db 100644 --- a/src/sentry/deletions/defaults/platform_external_issue.py +++ b/src/sentry/deletions/defaults/platform_external_issue.py @@ -1,7 +1,7 @@ from collections.abc import Sequence from sentry.deletions.base import ModelDeletionTask -from sentry.models.platformexternalissue import PlatformExternalIssue +from sentry.sentry_apps.models.platformexternalissue import PlatformExternalIssue class PlatformExternalIssueDeletionTask(ModelDeletionTask[PlatformExternalIssue]): diff --git a/src/sentry/deletions/defaults/repository.py b/src/sentry/deletions/defaults/repository.py index 960befb3b93d0..41d2adedc1600 100644 --- a/src/sentry/deletions/defaults/repository.py +++ b/src/sentry/deletions/defaults/repository.py @@ -29,7 +29,7 @@ def get_child_relations(self, instance: Repository) -> list[BaseRelation]: return _get_repository_child_relations(instance) def delete_instance(self, instance: Repository) -> None: - # TODO child_relations should also send pending_delete so we + # TODO: child_relations should also send pending_delete so we # don't have to do this here. pending_delete.send(sender=type(instance), instance=instance, actor=self.get_actor()) diff --git a/src/sentry/deletions/manager.py b/src/sentry/deletions/manager.py index 7f4e3615fbbc4..c24a9cc98c901 100644 --- a/src/sentry/deletions/manager.py +++ b/src/sentry/deletions/manager.py @@ -1,8 +1,11 @@ +from __future__ import annotations + from collections.abc import MutableMapping -from typing import Any +from typing import TYPE_CHECKING, Any -from sentry.db.models.base import Model -from sentry.deletions.base import BaseDeletionTask +if TYPE_CHECKING: + from sentry.db.models.base import Model + from sentry.deletions.base import BaseDeletionTask __all__ = ["DeletionTaskManager"] diff --git a/src/sentry/deletions/models/__init__.py b/src/sentry/deletions/models/__init__.py new file mode 100644 index 0000000000000..6390b946de4da --- /dev/null +++ b/src/sentry/deletions/models/__init__.py @@ -0,0 +1,3 @@ +from sentry.deletions.models.scheduleddeletion import RegionScheduledDeletion + +__all__ = ("RegionScheduledDeletion",) diff --git a/src/sentry/models/scheduledeletion.py b/src/sentry/deletions/models/scheduleddeletion.py similarity index 99% rename from src/sentry/models/scheduledeletion.py rename to src/sentry/deletions/models/scheduleddeletion.py index 318ba96a29491..ca2d460da00bf 100644 --- a/src/sentry/models/scheduledeletion.py +++ b/src/sentry/deletions/models/scheduleddeletion.py @@ -104,7 +104,7 @@ def schedule( return record @classmethod - def cancel(cls, instance: Model): + def cancel(cls, instance: Model) -> None: model_name = type(instance).__name__ try: deletion = cls.objects.get( diff --git a/src/sentry/deletions/tasks/scheduled.py b/src/sentry/deletions/tasks/scheduled.py index e0ae8daa4f6f1..2f8cfab6a3f1a 100644 --- a/src/sentry/deletions/tasks/scheduled.py +++ b/src/sentry/deletions/tasks/scheduled.py @@ -8,12 +8,12 @@ from django.db import router, transaction from django.utils import timezone -from sentry.exceptions import DeleteAborted -from sentry.models.scheduledeletion import ( +from sentry.deletions.models.scheduleddeletion import ( BaseScheduledDeletion, RegionScheduledDeletion, ScheduledDeletion, ) +from sentry.exceptions import DeleteAborted from sentry.signals import pending_delete from sentry.silo.base import SiloMode from sentry.tasks.base import instrumented_task, retry diff --git a/src/sentry/discover/dashboard_widget_split.py b/src/sentry/discover/dashboard_widget_split.py index 15433740edac4..a5f4a61751566 100644 --- a/src/sentry/discover/dashboard_widget_split.py +++ b/src/sentry/discover/dashboard_widget_split.py @@ -5,7 +5,9 @@ from snuba_sdk.query_visitors import InvalidQueryError from sentry import features +from sentry.api.serializers.rest_framework.dashboard import is_aggregate from sentry.constants import ObjectStatus +from sentry.discover.arithmetic import ArithmeticParseError from sentry.discover.dataset_split import ( SplitDataset, _dataset_split_decision_inferred_from_query, @@ -81,6 +83,30 @@ def _get_and_save_split_decision_for_dashboard_widget( projects = dashboard.projects.all() or Project.objects.filter( organization_id=dashboard.organization.id, status=ObjectStatus.ACTIVE ) + + # Handle cases where the organization has no projects at all. + # No projects means a downstream check will fail and we can default + # to the errors dataset. + if not projects.exists(): + if not dry_run: + sentry_sdk.set_context( + "dashboard", + { + "dashboard_id": dashboard.id, + "widget_id": widget.id, + "org_slug": dashboard.organization.slug, + }, + ) + sentry_sdk.capture_message( + "No projects found in organization for dashboard, defaulting to errors dataset" + ) + _save_split_decision_for_widget( + widget, + DashboardWidgetTypes.ERROR_EVENTS, + DatasetSourcesTypes.FORCED, + ) + return DashboardWidgetTypes.ERROR_EVENTS, False + snuba_dataclass = _get_snuba_dataclass_for_dashboard_widget(widget, list(projects)) selected_columns = _get_field_list(widget_query.fields or []) @@ -137,7 +163,7 @@ def _get_and_save_split_decision_for_dashboard_widget( _save_split_decision_for_widget( widget, widget_dataset, - DatasetSourcesTypes.INFERRED, + DatasetSourcesTypes.SPLIT_VERSION_2, ) return widget_dataset, False @@ -146,7 +172,7 @@ def _get_and_save_split_decision_for_dashboard_widget( and not equations ): try: - metrics_query( + metrics_query_result = metrics_query( selected_columns, query, snuba_dataclass, @@ -155,27 +181,42 @@ def _get_and_save_split_decision_for_dashboard_widget( offset=None, limit=1, referrer="tasks.performance.split_discover_dataset", + transform_alias_to_input_format=True, ) - if dry_run: - logger.info( - "Split decision for %s: %s (inferred from running metrics query)", - widget.id, - DashboardWidgetTypes.TRANSACTION_LIKE, - ) - else: - _save_split_decision_for_widget( - widget, - DashboardWidgetTypes.TRANSACTION_LIKE, - DatasetSourcesTypes.INFERRED, + has_metrics_data = ( + metrics_query_result.get("data") + # No results were returned at all + and len(metrics_query_result["data"]) > 0 + and any( + metrics_query_result["data"][0][column] > 0 + for column in selected_columns + if is_aggregate(column) ) + ) + if has_metrics_data: + if dry_run: + logger.info( + "Split decision for %s: %s (inferred from running metrics query)", + widget.id, + DashboardWidgetTypes.TRANSACTION_LIKE, + ) + else: + _save_split_decision_for_widget( + widget, + DashboardWidgetTypes.TRANSACTION_LIKE, + DatasetSourcesTypes.SPLIT_VERSION_2, + ) - return DashboardWidgetTypes.TRANSACTION_LIKE, True + return DashboardWidgetTypes.TRANSACTION_LIKE, True except ( IncompatibleMetricsQuery, snuba.QueryIllegalTypeOfArgument, snuba.UnqualifiedQueryError, InvalidQueryError, + snuba.QueryExecutionError, + snuba.SnubaError, + ArithmeticParseError, ): pass @@ -187,7 +228,14 @@ def _get_and_save_split_decision_for_dashboard_widget( ) ) has_errors = len(error_results["data"]) > 0 - except (snuba.QueryIllegalTypeOfArgument, snuba.UnqualifiedQueryError, InvalidQueryError): + except ( + snuba.QueryIllegalTypeOfArgument, + snuba.UnqualifiedQueryError, + InvalidQueryError, + snuba.QueryExecutionError, + snuba.SnubaError, + ArithmeticParseError, + ): pass if has_errors: @@ -201,7 +249,7 @@ def _get_and_save_split_decision_for_dashboard_widget( _save_split_decision_for_widget( widget, DashboardWidgetTypes.ERROR_EVENTS, - DatasetSourcesTypes.INFERRED, + DatasetSourcesTypes.SPLIT_VERSION_2, ) return DashboardWidgetTypes.ERROR_EVENTS, True @@ -213,7 +261,14 @@ def _get_and_save_split_decision_for_dashboard_widget( ) ) has_transactions = len(transaction_results["data"]) > 0 - except (snuba.QueryIllegalTypeOfArgument, snuba.UnqualifiedQueryError, InvalidQueryError): + except ( + snuba.QueryIllegalTypeOfArgument, + snuba.UnqualifiedQueryError, + InvalidQueryError, + snuba.QueryExecutionError, + snuba.SnubaError, + ArithmeticParseError, + ): pass if has_transactions: @@ -227,7 +282,7 @@ def _get_and_save_split_decision_for_dashboard_widget( _save_split_decision_for_widget( widget, DashboardWidgetTypes.TRANSACTION_LIKE, - DatasetSourcesTypes.INFERRED, + DatasetSourcesTypes.SPLIT_VERSION_2, ) return DashboardWidgetTypes.TRANSACTION_LIKE, True diff --git a/src/sentry/discover/dataset_split.py b/src/sentry/discover/dataset_split.py index 78157d2a2b835..dbd9a8508e92f 100644 --- a/src/sentry/discover/dataset_split.py +++ b/src/sentry/discover/dataset_split.py @@ -19,7 +19,7 @@ from sentry.api.utils import get_date_range_from_stats_period from sentry.constants import ObjectStatus -from sentry.discover.arithmetic import is_equation, strip_equation +from sentry.discover.arithmetic import ArithmeticParseError, is_equation, strip_equation from sentry.discover.models import DatasetSourcesTypes, DiscoverSavedQuery, DiscoverSavedQueryTypes from sentry.exceptions import InvalidParams, InvalidSearchQuery from sentry.models.environment import Environment @@ -47,6 +47,22 @@ class SplitDataset(Enum): SplitDataset.Transactions: DiscoverSavedQueryTypes.TRANSACTION_LIKE, } +TRANSACTION_ONLY_AGGREGATES = [ + "failure_rate", + "failure_count", + "apdex", + "count_miserable", + "user_misery", + "count_web_vitals", + "percentile", + "p50", + "p75", + "p90", + "p95", + "p99", + "p100", +] + TRANSACTION_ONLY_FIELDS = [ "duration", "transaction_op", @@ -77,6 +93,8 @@ class SplitDataset(Enum): "span_op_breakdowns[ops.ui]", ] +ERROR_ONLY_AGGREGATES = ["last_seen"] + ERROR_ONLY_FIELDS = [ "location", "exception_stacks.type", @@ -137,6 +155,21 @@ def _check_function_parameter_matches_dataset( return False +def _check_function_alias_matches_dataset( + function: Function | CurriedFunction, + dataset: Dataset, +) -> bool: + aggregate_aliases = ( + TRANSACTION_ONLY_AGGREGATES if dataset == Dataset.Transactions else ERROR_ONLY_AGGREGATES + ) + + for alias in aggregate_aliases: + if function.alias.startswith(alias): + return True + + return False + + def _check_aliased_expression_matches_dataset( aliased_exp: AliasedExpression, dataset: Dataset, @@ -216,8 +249,11 @@ def _check_selected_columns_match_dataset( return True elif isinstance(select_col, Function) or isinstance(select_col, CurriedFunction): + # The parameter check is a stronger check if applicable, so we should keep that first if _check_function_parameter_matches_dataset(select_col, dataset): return True + if _check_function_alias_matches_dataset(select_col, dataset): + return True return False @@ -355,6 +391,29 @@ def _get_and_save_split_decision_for_query( projects = saved_query.projects.all() or Project.objects.filter( organization_id=saved_query.organization.id, status=ObjectStatus.ACTIVE ) + + # Handle cases where the organization has no projects at all. + # No projects means a downstream check will fail and we can default + # to the errors dataset. + if not projects.exists(): + if not dry_run: + sentry_sdk.set_context( + "query", + { + "saved_query_id": saved_query.id, + "org_slug": saved_query.organization.slug, + }, + ) + sentry_sdk.capture_message( + "No projects found in organization for saved query, defaulting to errors dataset" + ) + _save_split_decision_for_query( + saved_query, + DiscoverSavedQueryTypes.ERROR_EVENTS, + DatasetSourcesTypes.FORCED, + ) + return DiscoverSavedQueryTypes.ERROR_EVENTS, False + snuba_dataclass = _get_snuba_dataclass_for_saved_query(saved_query, list(projects)) selected_columns = _get_field_list(saved_query.query.get("fields", [])) equations = _get_equation_list(saved_query.query.get("fields", [])) @@ -422,7 +481,14 @@ def _get_and_save_split_decision_for_query( ) ) has_errors = len(error_results["data"]) > 0 - except (snuba.QueryIllegalTypeOfArgument, snuba.UnqualifiedQueryError, InvalidQueryError): + except ( + snuba.QueryIllegalTypeOfArgument, + snuba.UnqualifiedQueryError, + InvalidQueryError, + snuba.QueryExecutionError, + snuba.SnubaError, + ArithmeticParseError, + ): pass if has_errors: @@ -448,7 +514,14 @@ def _get_and_save_split_decision_for_query( ) ) has_transactions = len(transaction_results["data"]) > 0 - except (snuba.QueryIllegalTypeOfArgument, snuba.UnqualifiedQueryError, InvalidQueryError): + except ( + snuba.QueryIllegalTypeOfArgument, + snuba.UnqualifiedQueryError, + InvalidQueryError, + snuba.QueryExecutionError, + snuba.SnubaError, + ArithmeticParseError, + ): pass if has_transactions: diff --git a/src/sentry/discover/endpoints/bases.py b/src/sentry/discover/endpoints/bases.py index 9eccf57ba9c99..9d067e9e0ee8d 100644 --- a/src/sentry/discover/endpoints/bases.py +++ b/src/sentry/discover/endpoints/bases.py @@ -17,8 +17,23 @@ def has_object_permission(self, request, view, obj): return super().has_object_permission(request, view, obj) if isinstance(obj, DiscoverSavedQuery): - for project in obj.projects.all(): - if not request.access.has_project_access(project): - return False + # 1. Saved Query contains certain projects + if obj.projects.exists(): + return request.access.has_projects_access(obj.projects.all()) + # 2. Saved Query covers all projects or all my projects + + # allow when Open Membership + if obj.organization.flags.allow_joinleave: + return True + + # allow for Managers and Owners + if request.access.has_scope("org:write"): + return True + + # allow for creator + if request.user.id == obj.created_by_id: + return True + + return False return True diff --git a/src/sentry/dynamic_sampling/rules/biases/custom_rule_bias.py b/src/sentry/dynamic_sampling/rules/biases/custom_rule_bias.py index a42a7fcb38814..fb6d62c29c0af 100644 --- a/src/sentry/dynamic_sampling/rules/biases/custom_rule_bias.py +++ b/src/sentry/dynamic_sampling/rules/biases/custom_rule_bias.py @@ -15,7 +15,7 @@ class CustomRuleBias(Bias): """ - Boosts at 100% sample rate all the traces that have a replay_id. + Boosts to 100% sample rate all the traces matching an active custom rule. """ def generate_rules(self, project: Project, base_sample_rate: float) -> list[PolymorphicRule]: diff --git a/src/sentry/event_manager.py b/src/sentry/event_manager.py index 8158b9c23dc25..86f4a882ba8ba 100644 --- a/src/sentry/event_manager.py +++ b/src/sentry/event_manager.py @@ -53,33 +53,25 @@ GroupingConfig, get_grouping_config_dict_for_project, ) -from sentry.grouping.ingest.config import ( - is_in_transition, - project_uses_optimized_grouping, - update_grouping_config_if_needed, -) +from sentry.grouping.ingest.config import is_in_transition, update_grouping_config_if_needed from sentry.grouping.ingest.hashing import ( - find_existing_grouphash, - get_hash_values, + find_grouphash_with_group, get_or_create_grouphashes, maybe_run_background_grouping, maybe_run_secondary_grouping, run_primary_grouping, ) -from sentry.grouping.ingest.metrics import ( - record_calculation_metric_with_result, - record_hash_calculation_metrics, - record_new_group_metrics, -) +from sentry.grouping.ingest.metrics import record_hash_calculation_metrics, record_new_group_metrics from sentry.grouping.ingest.seer import maybe_check_seer_for_matching_grouphash from sentry.grouping.ingest.utils import ( add_group_id_to_grouphashes, - check_for_category_mismatch, check_for_group_creation_load_shed, + is_non_error_type_group, ) +from sentry.grouping.variants import BaseVariant from sentry.ingest.inbound_filters import FilterStatKeys from sentry.integrations.tasks.kick_off_status_syncs import kick_off_status_syncs -from sentry.issues.grouptype import ErrorGroupType, GroupCategory +from sentry.issues.grouptype import ErrorGroupType from sentry.issues.issue_occurrence import IssueOccurrence from sentry.issues.producer import PayloadType, produce_occurrence_to_kafka from sentry.killswitches import killswitch_matches_context @@ -139,7 +131,6 @@ from sentry.utils.safe import get_path, safe_execute, setdefault_path, trim from sentry.utils.sdk import set_measurement from sentry.utils.tag_normalization import normalized_sdk_tag_from_event -from sentry.utils.types import NonNone if TYPE_CHECKING: from sentry.eventstore.models import BaseEvent, Event @@ -328,7 +319,9 @@ def __init__(self, group=None, last_seen=None, times_seen=None, *args, **kwargs) def __int__(self): # Calculate the score manually when coercing to an int. # This is used within create_or_update and friends - return self.group.get_score() if self.group else 0 + + # XXX: Since removing the 'score' column from 'Group', this now always returns 0. + return 0 def as_sql( self, @@ -517,12 +510,10 @@ def save( return jobs[0]["event"] else: project = job["event"].project - job["optimized_grouping"] = project_uses_optimized_grouping(project) job["in_grouping_transition"] = is_in_transition(project) metric_tags = { "platform": job["event"].platform or "unknown", "sdk": normalized_sdk_tag_from_event(job["event"].data), - "using_transition_optimization": job["optimized_grouping"], "in_transition": job["in_grouping_transition"], } # This metric allows differentiating from all calls to the `event_manager.save` metric @@ -887,26 +878,6 @@ def _materialize_metadata_many(jobs: Sequence[Job]) -> None: job["culprit"] = data["culprit"] -# TODO: This is only called in `_save_aggregate`, so when that goes, so can this (it's been -# supplanted by `_get_group_processing_kwargs` below) -def _get_group_creation_kwargs(job: Job | PerformanceJob) -> dict[str, Any]: - kwargs = { - "platform": job["platform"], - "message": job["event"].search_message, - "logger": job["logger_name"], - "level": LOG_LEVELS_MAP.get(job["level"]), - "last_seen": job["event"].datetime, - "first_seen": job["event"].datetime, - "active_at": job["event"].datetime, - "culprit": job["culprit"], - } - - if job["release"]: - kwargs["first_release"] = job["release"] - - return kwargs - - def _get_group_processing_kwargs(job: Job) -> dict[str, Any]: """ Pull together all the metadata used when creating a group or updating a group's metadata based @@ -1322,272 +1293,7 @@ def get_culprit(data: Mapping[str, Any]) -> str: @sentry_sdk.tracing.trace -def assign_event_to_group(event: Event, job: Job, metric_tags: MutableTags) -> GroupInfo | None: - if job["optimized_grouping"]: - group_info = _save_aggregate_new( - event=event, - job=job, - metric_tags=metric_tags, - ) - else: - group_info = _save_aggregate( - event=event, - job=job, - release=job["release"], - received_timestamp=job["received_timestamp"], - metric_tags=metric_tags, - ) - - if group_info: - event.group = group_info.group - job["groups"] = [group_info] - - return group_info - - -def _save_aggregate( - event: Event, - job: Job, - release: Release | None, - received_timestamp: int | float, - metric_tags: MutableTags, -) -> GroupInfo | None: - project = event.project - - primary_hashes, secondary_hashes = get_hash_values(project, job, metric_tags) - hashes = primary_hashes + secondary_hashes - has_secondary_hashes = len(secondary_hashes) > 0 - - # Now that we've used the current and possibly secondary grouping config(s) to calculate the - # hashes, we're free to perform a config update if permitted. Future events will use the new - # config, but will also be grandfathered into the current config for a month, so as not to - # erroneously create new groups. - update_grouping_config_if_needed(project, "ingest") - - _materialize_metadata_many([job]) - metadata = dict(job["event_metadata"]) - - group_creation_kwargs = _get_group_creation_kwargs(job) - - grouphashes = get_or_create_grouphashes(project, hashes) - - existing_grouphash = find_existing_grouphash(grouphashes) - - # In principle the group gets the same metadata as the event, so common - # attributes can be defined in eventtypes. - # - # Additionally the `last_received` key is set for group metadata, later in - # _save_aggregate - group_creation_kwargs["data"] = materialize_metadata( - event.data, - get_event_type(event.data), - metadata, - ) - group_creation_kwargs["data"]["last_received"] = received_timestamp - - if existing_grouphash is None: - if killswitch_matches_context( - "store.load-shed-group-creation-projects", - { - "project_id": project.id, - "platform": event.platform, - }, - ): - raise HashDiscarded("Load shedding group creation", reason="load_shed") - - with ( - sentry_sdk.start_span(op="event_manager.create_group_transaction") as span, - metrics.timer("event_manager.create_group_transaction") as metric_tags, - transaction.atomic(router.db_for_write(GroupHash)), - ): - # These values will get overridden with whatever happens inside the lock if we do manage - # to acquire it, so it should only end up with `wait-for-lock` if we don't - # - # TODO: If we're using this `outome` value for anything more than a count in DD (in - # other words, if we care about duration), we should probably update it so that when an - # event does have to wait, we record whether during its wait the event which got the - # lock first - # a) created a new group without consulting Seer, - # b) created a new group because Seer didn't find a close enough match, or - # c) used an existing group found by Seer - # because which of those things happened will have an effect on how long the event had to wait. - span.set_tag("outcome", "wait_for_lock") - metric_tags["outcome"] = "wait_for_lock" - - grouphash_ids = [h.id for h in grouphashes] - - # If we're in this branch, we checked our grouphashes and didn't find one with a group - # attached. We thus want to either ask seer for a nearest neighbor group (and create a - # new group if one isn't found) or just create a new group without consulting seer, but - # either way we need to guard against another event with the same hash coming in before - # we're done here and also thinking it needs to talk to seer and/or create a new group. - # To prevent this, we're using double-checked locking - # (https://en.wikipedia.org/wiki/Double-checked_locking). - - # First, try to lock the relevant rows in the `GroupHash` table. If another (identically - # hashed) event is already in the process of talking to seer and/or creating a group and - # has grabbed the lock before us, we'll block here until it's done. If not, we've now - # got the lock and other identically-hashed events will have to wait for us. - all_grouphashes = list( - GroupHash.objects.filter(id__in=grouphash_ids).select_for_update() - ) - - grouphashes = [gh for gh in all_grouphashes if gh.hash in hashes] - - # Now check again to see if any of our grouphashes have a group. If we got the lock, the - # result won't have changed and we still won't find anything. If we didn't get it, we'll - # have blocked until whichever identically-hashed event *did* get the lock has either - # created a new group for our hashes or assigned them to a neighboring group suggessted - # by seer. If that happens, we'll skip this whole branch and jump down to the same one - # we would have landed in had we found a group to begin with. - existing_grouphash = find_existing_grouphash(grouphashes) - - # If we still haven't found a matching grouphash, we're now safe to go ahead and talk to - # seer and/or create the group. - if existing_grouphash is None: - seer_matched_grouphash = maybe_check_seer_for_matching_grouphash(event) - seer_matched_group = ( - Group.objects.filter(id=seer_matched_grouphash.group_id).first() - if seer_matched_grouphash - else None - ) - - group = seer_matched_group or _create_group(project, event, **group_creation_kwargs) - - new_hashes = list(grouphashes) - - GroupHash.objects.filter(id__in=[h.id for h in new_hashes]).exclude( - state=GroupHash.State.LOCKED_IN_MIGRATION - ).update(group=group) - - is_new = not seer_matched_group - is_regression = ( - False - if is_new - else _process_existing_aggregate( - # If `seer_matched_group` were `None`, `is_new` would be true and we - # wouldn't be here - group=NonNone(seer_matched_group), - event=event, - incoming_group_values=group_creation_kwargs, - release=release, - ) - ) - - span.set_tag("outcome", "new_group" if is_new else "seer_match") - metric_tags["outcome"] = "new_group" if is_new else "seer_match" - record_calculation_metric_with_result( - project=project, - has_secondary_hashes=has_secondary_hashes, - result="no_match", - ) - - if is_new: - metrics.incr( - "group.created", - skip_internal=True, - tags={ - "platform": event.platform or "unknown", - "sdk": normalized_sdk_tag_from_event(event.data), - }, - ) - - # This only applies to events with stacktraces, and we only do this for new - # groups, because we assume that if Seer puts an event in an existing group, it - # and the existing group have the same frame mix - frame_mix = event.get_event_metadata().get("in_app_frame_mix") - if frame_mix: - metrics.incr( - "grouping.in_app_frame_mix", - sample_rate=1.0, - tags={ - "platform": event.platform or "unknown", - "sdk": normalized_sdk_tag_from_event(event.data), - "frame_mix": frame_mix, - }, - ) - - return GroupInfo(group, is_new, is_regression) - - # If we land here, it's because either: - # - # a) There's an existing group with one of our hashes and we found it the first time we looked. - # - # b) We didn't find a group the first time we looked, but another identically-hashed event beat - # us to the lock and while we were waiting either created a new group or assigned our hashes to - # a neighboring group suggested by seer - such that when we finally got the lock and looked - # again, this time there was a group to find. - - group = Group.objects.get(id=existing_grouphash.group_id) - if group.issue_category != GroupCategory.ERROR: - logger.info( - "event_manager.category_mismatch", - extra={ - "issue_category": group.issue_category, - "event_type": "error", - }, - ) - return None - - is_new = False - - new_hashes = [h for h in grouphashes if h.group_id is None] - - primary_hash_values = set(primary_hashes) - new_hash_values = {gh.hash for gh in new_hashes} - all_primary_hashes_are_new = primary_hash_values.issubset(new_hash_values) - record_calculation_metric_with_result( - project=project, - has_secondary_hashes=has_secondary_hashes, - # If at least one primary hash value isn't new, then we'll definitely have found it, since - # we check all of the primary hashes before any secondary ones. If the primary hash values - # *are* all new, then we must have gotten here by finding a secondary hash (or we'd be in - # the group-creation/seer-consultation branch). - result="found_primary" if not all_primary_hashes_are_new else "found_secondary", - ) - - if new_hashes: - # There may still be secondary hashes that we did not use to find an - # existing group. A classic example is when grouping makes changes to - # the app-hash (changes to in_app logic), but the system hash stays - # stable and is used to find an existing group. Associate any new - # hashes with the group such that event saving continues to be - # resilient against grouping algorithm changes. - # - # There is a race condition here where two processes could "steal" - # hashes from each other. In practice this should not be user-visible - # as group creation is synchronized. Meaning the only way hashes could - # jump between groups is if there were two processes that: - # - # 1) have BOTH found an existing group - # (otherwise at least one of them would be in the group creation - # codepath which has transaction isolation/acquires row locks) - # 2) AND are looking at the same set, or an overlapping set of hashes - # (otherwise they would not operate on the same rows) - # 3) yet somehow also sort their event into two different groups each - # (otherwise the update would not change anything) - # - # We think this is a very unlikely situation. A previous version of - # _save_aggregate had races around group creation which made this race - # more user visible. For more context, see 84c6f75a and d0e22787, as - # well as GH-5085. - GroupHash.objects.filter(id__in=[h.id for h in new_hashes]).exclude( - state=GroupHash.State.LOCKED_IN_MIGRATION - ).update(group=group) - - is_regression = _process_existing_aggregate( - group=group, - event=event, - incoming_group_values=group_creation_kwargs, - release=release, - ) - - return GroupInfo(group, is_new, is_regression) - - -# TODO: None of the seer logic has been added to this version yet, so you can't simultaneously use -# optimized transitions and seer -def _save_aggregate_new( +def assign_event_to_group( event: Event, job: Job, metric_tags: MutableTags, @@ -1602,7 +1308,8 @@ def _save_aggregate_new( if primary.existing_grouphash: group_info = handle_existing_grouphash(job, primary.existing_grouphash, primary.grouphashes) result = "found_primary" - # If we haven't, try again using the secondary config + # If we haven't, try again using the secondary config. (If there is no secondary config, or + # we're out of the transition period, we'll get back the empty `NULL_GROUPHASH_INFO`.) else: secondary = get_hashes_and_grouphashes(job, maybe_run_secondary_grouping, metric_tags) all_grouphashes = primary.grouphashes + secondary.grouphashes @@ -1614,7 +1321,9 @@ def _save_aggregate_new( result = "found_secondary" # If we still haven't found a group, ask Seer for a match (if enabled for the project) else: - seer_matched_grouphash = maybe_check_seer_for_matching_grouphash(event) + seer_matched_grouphash = maybe_check_seer_for_matching_grouphash( + event, primary.variants, all_grouphashes + ) if seer_matched_grouphash: group_info = handle_existing_grouphash(job, seer_matched_grouphash, all_grouphashes) @@ -1631,14 +1340,7 @@ def _save_aggregate_new( maybe_run_background_grouping(project, job) record_hash_calculation_metrics( - primary.config, primary.hashes, secondary.config, secondary.hashes - ) - # TODO: Once the legacy `_save_aggregate` goes away, the logic inside of - # `record_calculation_metric_with_result` can be pulled into `record_hash_calculation_metrics` - record_calculation_metric_with_result( - project=project, - has_secondary_hashes=len(secondary.hashes) > 0, - result=result, + project, primary.config, primary.hashes, secondary.config, secondary.hashes, result ) # Now that we've used the current and possibly secondary grouping config(s) to calculate the @@ -1647,6 +1349,13 @@ def _save_aggregate_new( # erroneously create new groups. update_grouping_config_if_needed(project, "ingest") + # The only way there won't be group info is we matched to a performance, cron, replay, or + # other-non-error-type group because of a hash collision - exceedingly unlikely, and not + # something we've ever observed, but theoretically possible. + if group_info: + event.group = group_info.group + job["groups"] = [group_info] + return group_info @@ -1654,7 +1363,7 @@ def get_hashes_and_grouphashes( job: Job, hash_calculation_function: Callable[ [Project, Job, MutableTags], - tuple[GroupingConfig, list[str]], + tuple[GroupingConfig, list[str], dict[str, BaseVariant]], ], metric_tags: MutableTags, ) -> GroupHashInfo: @@ -1669,14 +1378,14 @@ def get_hashes_and_grouphashes( project = job["event"].project # These will come back as Nones if the calculation decides it doesn't need to run - grouping_config, hashes = hash_calculation_function(project, job, metric_tags) + grouping_config, hashes, variants = hash_calculation_function(project, job, metric_tags) if hashes: - grouphashes = get_or_create_grouphashes(project, hashes) + grouphashes = get_or_create_grouphashes(project, hashes, grouping_config["id"]) - existing_grouphash = find_existing_grouphash(grouphashes) + existing_grouphash = find_grouphash_with_group(grouphashes) - return GroupHashInfo(grouping_config, hashes, grouphashes, existing_grouphash) + return GroupHashInfo(grouping_config, variants, hashes, grouphashes, existing_grouphash) else: return NULL_GROUPHASH_INFO @@ -1706,12 +1415,16 @@ def handle_existing_grouphash( # (otherwise the update would not change anything) # # We think this is a very unlikely situation. A previous version of - # _save_aggregate had races around group creation which made this race + # this function had races around group creation which made this race # more user visible. For more context, see 84c6f75a and d0e22787, as # well as GH-5085. group = Group.objects.get(id=existing_grouphash.group_id) - if check_for_category_mismatch(group): + # As far as we know this has never happened, but in theory at least, the error event hashing + # algorithm and other event hashing algorithms could come up with the same hash value in the + # same project and our hash could have matched to a non-error group. Just to be safe, we make + # sure that's not the case before proceeding. + if is_non_error_type_group(group): return None # There may still be hashes that we did not use to find an existing @@ -1777,7 +1490,7 @@ def create_group_with_grouphashes(job: Job, grouphashes: list[GroupHash]) -> Gro # condition scenario above, we'll have been blocked long enough for the other event to # have created the group and updated our grouphashes with a group id, which means this # time, we'll find something. - existing_grouphash = find_existing_grouphash(grouphashes) + existing_grouphash = find_grouphash_with_group(grouphashes) # If we still haven't found a matching grouphash, we're now safe to go ahead and create # the group. @@ -1806,16 +1519,6 @@ def _create_group( first_release: Release | None = None, **group_creation_kwargs: Any, ) -> Group: - # Temporary log to debug events seeming to disappear after being sent to Seer - if event.data.get("seer_similarity"): - logger.info( - "seer.similarity.pre_create_group", - extra={ - "event_id": event.event_id, - "hash": event.get_primary_hash(), - "project": project.id, - }, - ) short_id = _get_next_short_id(project) @@ -1891,18 +1594,6 @@ def _create_group( logger.exception("Error after unsticking project counter") raise - # Temporary log to debug events seeming to disappear after being sent to Seer - if event.data.get("seer_similarity"): - logger.info( - "seer.similarity.post_create_group", - extra={ - "event_id": event.event_id, - "hash": event.get_primary_hash(), - "project": project.id, - "group_id": group.id, - }, - ) - return group diff --git a/src/sentry/eventstore/models.py b/src/sentry/eventstore/models.py index 7f6c049de8bc1..6014a7f2ff088 100644 --- a/src/sentry/eventstore/models.py +++ b/src/sentry/eventstore/models.py @@ -18,7 +18,7 @@ from sentry import eventtypes from sentry.db.models import NodeData -from sentry.grouping.variants import BaseVariant, KeyedVariants +from sentry.grouping.variants import BaseVariant from sentry.interfaces.base import Interface, get_interfaces from sentry.issues.grouptype import GroupCategory from sentry.issues.issue_occurrence import IssueOccurrence @@ -332,6 +332,29 @@ def get_grouping_config(self) -> GroupingConfig: return get_grouping_config_dict_for_event_data(self.data, self.project) + def get_hashes_and_variants( + self, config: StrategyConfiguration | None = None + ) -> tuple[list[str], dict[str, BaseVariant]]: + """ + Return the event's hash values, calculated using the given config, along with the + `variants` data used in grouping. + """ + + variants = self.get_grouping_variants(config) + # Sort the variants so that the system variant (if any) is always last, in order to resolve + # ambiguities when choosing primary_hash for Snuba + sorted_variants = sorted( + variants.items(), + key=lambda name_and_variant: 1 if name_and_variant[0] == "system" else 0, + ) + # Get each variant's hash value, filtering out Nones + hashes = list({variant.get_hash() for _, variant in sorted_variants} - {None}) + + # Write to event before returning + self.data["hashes"] = hashes + + return (hashes, variants) + def get_hashes(self, force_config: StrategyConfiguration | None = None) -> list[str]: """ Returns the calculated hashes for the event. This uses the stored @@ -353,37 +376,7 @@ def get_hashes(self, force_config: StrategyConfiguration | None = None) -> list[ return hashes # Create fresh hashes - from sentry.grouping.api import sort_grouping_variants - - variants = self.get_grouping_variants(force_config) - hashes = [ - hash_ - for _, hash_ in self._hashes_from_sorted_grouping_variants( - sort_grouping_variants(variants) - ) - ] - - # Write to event before returning - self.data["hashes"] = hashes - return hashes - - @staticmethod - def _hashes_from_sorted_grouping_variants( - variants: KeyedVariants, - ) -> list[tuple[str, str]]: - """Create hashes from variants and filter out duplicates and None values""" - - filtered_hashes = [] - seen_hashes = set() - for name, variant in variants: - hash_ = variant.get_hash() - if hash_ is None or hash_ in seen_hashes: - continue - - seen_hashes.add(hash_) - filtered_hashes.append((name, hash_)) - - return filtered_hashes + return self.get_hashes_and_variants(force_config)[0] def normalize_stacktraces_for_grouping(self, grouping_config: StrategyConfiguration) -> None: """Normalize stacktraces and clear memoized interfaces @@ -609,7 +602,7 @@ def group_id(self) -> int | None: def group_id(self, value: int | None) -> None: self._group_id = value - # TODO We need a better way to cache these properties. functools + # TODO: We need a better way to cache these properties. functools # doesn't quite do the trick as there is a reference bug with unsaved # models. But the current _group_cache thing is also clunky because these # properties need to be stripped out in __getstate__. diff --git a/src/sentry/eventstream/base.py b/src/sentry/eventstream/base.py index dc7a7dc411fb0..ac505a9a7a215 100644 --- a/src/sentry/eventstream/base.py +++ b/src/sentry/eventstream/base.py @@ -6,9 +6,8 @@ from enum import Enum from typing import TYPE_CHECKING, Any, Optional, TypedDict, cast -from django.conf import settings - from sentry.issues.issue_occurrence import IssueOccurrence +from sentry.queue.routers import SplitQueueRouter from sentry.tasks.post_process import post_process_group from sentry.utils.cache import cache_key_for_event from sentry.utils.services import Service @@ -65,6 +64,9 @@ class EventStream(Service): "_get_event_type", ) + def __init__(self, **options: Any) -> None: + self.__celery_router = SplitQueueRouter() + def _dispatch_post_process_group_task( self, event_id: str, @@ -108,9 +110,7 @@ def _get_queue_for_post_process(self, event: Event | GroupEvent) -> str: else: default_queue = "post_process_errors" - return settings.SENTRY_POST_PROCESS_QUEUE_SPLIT_ROUTER.get( - default_queue, lambda: default_queue - )() + return self.__celery_router.route_for_queue(default_queue) def _get_occurrence_data(self, event: Event | GroupEvent) -> MutableMapping[str, Any]: occurrence = cast(Optional[IssueOccurrence], getattr(event, "occurrence", None)) diff --git a/src/sentry/eventstream/kafka/backend.py b/src/sentry/eventstream/kafka/backend.py index 8dc599d10a457..f1dd4b5269f42 100644 --- a/src/sentry/eventstream/kafka/backend.py +++ b/src/sentry/eventstream/kafka/backend.py @@ -1,6 +1,7 @@ from __future__ import annotations import logging +import time from collections.abc import Mapping, MutableMapping, Sequence from datetime import datetime from typing import TYPE_CHECKING, Any @@ -25,10 +26,12 @@ class KafkaEventStream(SnubaProtocolEventStream): def __init__(self, **options: Any) -> None: + super().__init__(**options) self.topic = Topic.EVENTS self.transactions_topic = Topic.TRANSACTIONS self.issue_platform_topic = Topic.EVENTSTREAM_GENERIC self.__producers: MutableMapping[Topic, Producer] = {} + self.error_last_logged_time: int | None = None def get_transactions_topic(self, project_id: int) -> Topic: return self.transactions_topic @@ -42,8 +45,11 @@ def get_producer(self, topic: Topic) -> Producer: return self.__producers[topic] def delivery_callback(self, error: KafkaError | None, message: KafkaMessage) -> None: + now = int(time.time()) if error is not None: - logger.warning("Could not publish message (error: %s): %r", error, message) + if self.error_last_logged_time is None or now > self.error_last_logged_time + 60: + self.error_last_logged_time = now + logger.error("Could not publish message (error: %s): %r", error, message) def _get_headers_for_insert( self, diff --git a/src/sentry/features/handler.py b/src/sentry/features/handler.py index 846626c158a94..4239e49506a10 100644 --- a/src/sentry/features/handler.py +++ b/src/sentry/features/handler.py @@ -1,9 +1,7 @@ from __future__ import annotations -__all__ = ["FeatureHandler", "BatchFeatureHandler"] - import abc -from collections.abc import Mapping, MutableSet, Sequence +from collections.abc import Sequence from typing import TYPE_CHECKING if TYPE_CHECKING: @@ -17,6 +15,9 @@ from sentry.users.services.user import RpcUser +__all__ = ["FeatureHandler", "BatchFeatureHandler"] + + class FeatureHandler: """ Base class for defining custom logic for feature decisions. @@ -28,7 +29,7 @@ class FeatureHandler: as we don't programatically release features in self-hosted. """ - features: MutableSet[str] = set() + features: set[str] = set() def __call__(self, feature: Feature, actor: User) -> bool | None: if feature.name not in self.features: @@ -45,7 +46,7 @@ def has( ) -> bool | None: raise NotImplementedError - def has_for_batch(self, batch: FeatureCheckBatch) -> Mapping[Project, bool | None]: + def has_for_batch(self, batch: FeatureCheckBatch) -> dict[Project, bool | None]: # If not overridden, iterate over objects in the batch individually. return { obj: self.has(feature, batch.actor) @@ -60,7 +61,7 @@ def batch_has( projects: Sequence[Project] | None = None, organization: Organization | None = None, batch: bool = True, - ) -> Mapping[str, Mapping[str, bool | None]] | None: + ) -> dict[str, dict[str, bool | None]] | None: raise NotImplementedError @@ -80,13 +81,21 @@ class BatchFeatureHandler(FeatureHandler): @abc.abstractmethod def _check_for_batch( - self, feature_name: str, entity: Organization | User, actor: User + self, + feature_name: str, + entity: Organization | User | None, + actor: User | RpcUser | AnonymousUser | None, ) -> bool | None: raise NotImplementedError - def has(self, feature: Feature, actor: User, skip_entity: bool | None = False) -> bool | None: + def has( + self, + feature: Feature, + actor: User | RpcUser | AnonymousUser | None, + skip_entity: bool | None = False, + ) -> bool | None: return self._check_for_batch(feature.name, feature.get_subject(), actor) - def has_for_batch(self, batch: FeatureCheckBatch) -> Mapping[Project, bool | None]: + def has_for_batch(self, batch: FeatureCheckBatch) -> dict[Project, bool | None]: flag = self._check_for_batch(batch.feature_name, batch.subject, batch.actor) return {obj: flag for obj in batch.objects} diff --git a/src/sentry/features/manager.py b/src/sentry/features/manager.py index 4e045f4d8eaa9..98c3eb8b72d58 100644 --- a/src/sentry/features/manager.py +++ b/src/sentry/features/manager.py @@ -6,7 +6,7 @@ import abc from collections import defaultdict -from collections.abc import Iterable, Mapping, MutableMapping, MutableSet, Sequence +from collections.abc import Iterable, Sequence from typing import TYPE_CHECKING, Any import sentry_sdk @@ -44,7 +44,7 @@ class RegisteredFeatureManager: """ def __init__(self) -> None: - self._handler_registry: MutableMapping[str, list[FeatureHandler]] = defaultdict(list) + self._handler_registry: dict[str, list[FeatureHandler]] = defaultdict(list) def add_handler(self, handler: FeatureHandler) -> None: """ @@ -78,7 +78,7 @@ def has_for_batch( organization: Organization, objects: Sequence[Project], actor: User | None = None, - ) -> Mapping[Project, bool]: + ) -> dict[Project, bool | None]: """ Determine if a feature is enabled for a batch of objects. @@ -100,7 +100,7 @@ def has_for_batch( >>> FeatureManager.has_for_batch('projects:feature', organization, [project1, project2], actor=request.user) """ - result = dict() + result: dict[Project, bool | None] = {} remaining = set(objects) handlers = self._handler_registry[name] @@ -111,7 +111,7 @@ def has_for_batch( with sentry_sdk.start_span( op="feature.has_for_batch.handler", - description=f"{type(handler).__name__} ({name})", + name=f"{type(handler).__name__} ({name})", ) as span: batch_size = len(remaining) span.set_data("Batch Size", batch_size) @@ -143,17 +143,17 @@ def has_for_batch( class FeatureManager(RegisteredFeatureManager): def __init__(self) -> None: super().__init__() - self._feature_registry: MutableMapping[str, type[Feature]] = {} + self._feature_registry: dict[str, type[Feature]] = {} # Deprecated: Remove entity_features once flagr has been removed. - self.entity_features: MutableSet[str] = set() - self.exposed_features: MutableSet[str] = set() - self.option_features: MutableSet[str] = set() - self.flagpole_features: MutableSet[str] = set() + self.entity_features: set[str] = set() + self.exposed_features: set[str] = set() + self.option_features: set[str] = set() + self.flagpole_features: set[str] = set() self._entity_handler: FeatureHandler | None = None def all( self, feature_type: type[Feature] = Feature, api_expose_only: bool = False - ) -> Mapping[str, type[Feature]]: + ) -> dict[str, type[Feature]]: """ Get a mapping of feature name -> feature class, optionally specific to a particular feature type. @@ -328,7 +328,7 @@ def batch_has( actor: User | RpcUser | AnonymousUser | None = None, projects: Sequence[Project] | None = None, organization: Organization | None = None, - ) -> Mapping[str, Mapping[str, bool | None]] | None: + ) -> dict[str, dict[str, bool | None]] | None: """ Determine if multiple features are enabled. Unhandled flags will not be in the results if they cannot be handled. @@ -346,7 +346,7 @@ def batch_has( # Fall back to default handler if no entity handler available. project_features = [name for name in feature_names if name.startswith("projects:")] if projects and project_features: - results: MutableMapping[str, Mapping[str, bool]] = {} + results: dict[str, dict[str, bool | None]] = {} for project in projects: proj_results = results[f"project:{project.id}"] = {} for feature_name in project_features: @@ -357,7 +357,7 @@ def batch_has( org_features = filter(lambda name: name.startswith("organizations:"), feature_names) if organization and org_features: - org_results = {} + org_results: dict[str, bool | None] = {} for feature_name in org_features: org_results[feature_name] = self.has( feature_name, organization, actor=actor @@ -370,7 +370,7 @@ def batch_has( feature_names, ) if unscoped_features: - unscoped_results = {} + unscoped_results: dict[str, bool | None] = {} for feature_name in unscoped_features: unscoped_results[feature_name] = self.has(feature_name, actor=actor) return {"unscoped": unscoped_results} @@ -417,7 +417,7 @@ def __init__( self.objects = objects self.actor = actor - def get_feature_objects(self) -> Mapping[Project, Feature]: + def get_feature_objects(self) -> dict[Project, Feature]: """ Iterate over individual Feature objects. @@ -429,5 +429,5 @@ def get_feature_objects(self) -> Mapping[Project, Feature]: return {obj: cls(self.feature_name, obj) for obj in self.objects} @property - def subject(self) -> Organization | User: + def subject(self) -> Organization | User | None: return self.organization or self.actor diff --git a/src/sentry/features/permanent.py b/src/sentry/features/permanent.py index de5e42547a4bb..9158b2dfa3cc2 100644 --- a/src/sentry/features/permanent.py +++ b/src/sentry/features/permanent.py @@ -22,6 +22,8 @@ def register_permanent_features(manager: FeatureManager): permanent_organization_features = { # Enable advanced search features, like negation and wildcard matching. "organizations:advanced-search": True, + # Enable anomaly detection alerts + "organizations:anomaly-detection-alerts": False, # Enable multiple Apple app-store-connect sources per project. "organizations:app-store-connect-multiple": False, # Enable change alerts for an org @@ -76,6 +78,8 @@ def register_permanent_features(manager: FeatureManager): "organizations:integrations-stacktrace-link": True, # Allow orgs to automatically create Tickets in Issue Alerts "organizations:integrations-ticket-rules": True, + # Enable metric alert charts in email/slack + "organizations:metric-alert-chartcuterie": False, # Enable Performance view "organizations:performance-view": True, # Enable profiling view diff --git a/src/sentry/features/temporary.py b/src/sentry/features/temporary.py index 5c96964c8b84c..c387b5ca24bd1 100644 --- a/src/sentry/features/temporary.py +++ b/src/sentry/features/temporary.py @@ -44,7 +44,7 @@ def register_temporary_features(manager: FeatureManager): # Enables activated alert rules manager.add("organizations:activated-alert-rules", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) - # Enable AI Issue Summary feture on the Issue Details page. + # Enable AI Issue Summary feature on the Issue Details page. manager.add("organizations:ai-summary", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # Enables alert creation on indexed events in UI (use for PoC/testing only) manager.add("organizations:alert-allow-indexed", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) @@ -53,10 +53,10 @@ def register_temporary_features(manager: FeatureManager): manager.add("organizations:alert-filters", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=False) # Enables the migration of alerts (checked in a migration script). manager.add("organizations:alerts-migration-enabled", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) - # Enable anomaly detection alerts - manager.add("organizations:anomaly-detection-alerts", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) - # Enable anomaly detection alerts - manager.add("organizations:fake-anomaly-detection", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) + # Enables EAP alerts + manager.add("organizations:alerts-eap", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) + # Enable anomaly detection feature for rollout + manager.add("organizations:anomaly-detection-rollout", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # Enable anomaly detection charts manager.add("organizations:anomaly-detection-alerts-charts", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) # Enable anr frame analysis @@ -82,24 +82,28 @@ def register_temporary_features(manager: FeatureManager): manager.add("organizations:continuous-profiling-stats", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=True) # Enable the continuous profiling compatible redesign manager.add("organizations:continuous-profiling-compat", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) - # Disables legacy cron ingest endpoints - manager.add("organizations:crons-disable-ingest-endpoints", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=False) - # Disables legacy cron ingest endpoints - manager.add("organizations:crons-write-user-feedback", OrganizationFeature, FeatureHandlerStrategy.OPTIONS, api_expose=False) # Delightful Developer Metrics (DDM): # Enables experimental WIP custom metrics related features manager.add("organizations:custom-metrics-experimental", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) + # Enables Info alert for custom metrics and alerts widgets removal + manager.add("organizations:custom-metrics-alerts-widgets-removal-info", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # Enable daily summary manager.add("organizations:daily-summary", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=False) + # Enable events analytics platform data in dashboards + manager.add("organizations:dashboards-eap", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # Enables import/export functionality for dashboards manager.add("organizations:dashboards-import", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # Enable metrics enhanced performance in dashboards manager.add("organizations:dashboards-mep", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) + # Enable metrics enhanced performance for AM2+ customers as they transition from AM2 to AM3 + manager.add("organizations:dashboards-metrics-transition", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) manager.add("organizations:dashboards-span-metrics", OrganizationFeature, FeatureHandlerStrategy.OPTIONS, api_expose=False) # Enable releases overlay on dashboard chart widgets manager.add("organizations:dashboards-releases-on-charts", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # Enable equations for Big Number widgets manager.add("organizations:dashboards-bignumber-equations", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) + # Enable access protected editing of dashboards + manager.add("organizations:dashboards-edit-access", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # Enable the dev toolbar PoC code for employees # Data Secrecy manager.add("organizations:data-secrecy", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) @@ -117,8 +121,12 @@ def register_temporary_features(manager: FeatureManager): manager.add("organizations:discover", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=True) # Enable the org recalibration manager.add("organizations:ds-org-recalibration", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=False) + # Enable custom dynamic sampling rates + manager.add("organizations:dynamic-sampling-custom", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # Enables data secrecy mode manager.add("organizations:enterprise-data-secrecy", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=False) + # Enable issue platform deletion + manager.add("organizations:issue-platform-deletion", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # Enable archive/escalating issue workflow features in v2 manager.add("organizations:escalating-issues-v2", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=False) # Enable emiting escalating data to the metrics backend @@ -129,12 +137,8 @@ def register_temporary_features(manager: FeatureManager): manager.add("organizations:feature-flag-ui", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # Enable disabling gitlab integrations when broken is detected manager.add("organizations:gitlab-disable-on-broken", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) - # Enable only calculating a secondary hash when needed - manager.add("organizations:grouping-suppress-unnecessary-secondary-hash", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=False) # Allow creating `GroupHashMetadata` records manager.add("organizations:grouphash-metadata-creation", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) - # Allows an org to have a larger set of project ownership rules per project - manager.add("organizations:higher-ownership-limit", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=False) # Enable increased issue_owners rate limit for auto-assignment manager.add("organizations:increased-issue-owners-rate-limit", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=False) # Starfish: extract metrics from the spans @@ -156,6 +160,10 @@ def register_temporary_features(manager: FeatureManager): manager.add("organizations:issue-details-always-show-trace", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # Enables the UI for Autofix in issue details manager.add("organizations:issue-details-autofix-ui", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) + # Enable Issue Platform deletion + manager.add("organizations:issue-platform-deletion", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) + # Enable Issue Platform deletion UI + manager.add("organizations:issue-platform-deletion-ui", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # Enables a toggle for entering the new issue details UI manager.add("organizations:issue-details-new-experience-toggle", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # Enables access to the streamlined issue details UI @@ -175,17 +183,17 @@ def register_temporary_features(manager: FeatureManager): manager.add("organizations:issue-search-snuba", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) # Enable the new issue stream search bar UI manager.add("organizations:issue-stream-search-query-builder", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) + # Enable issue stream table layout changes + manager.add("organizations:issue-stream-table-layout", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) manager.add("organizations:large-debug-files", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=False) - # Enabled latest adopted release filter for issue alerts - manager.add("organizations:latest-adopted-release-filter", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) # Enable members to invite teammates to organizations manager.add("organizations:members-invite-teammates", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) manager.add("organizations:mep-rollout-flag", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) manager.add("organizations:mep-use-default-tags", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) # Enable messaging integration onboarding when setting up alerts manager.add("organizations:messaging-integration-onboarding", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) - # Enable metric alert charts in email/slack - manager.add("organizations:metric-alert-chartcuterie", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=True) + # Enable messaging-integration onboarding when creating a new project + manager.add("organizations:messaging-integration-onboarding-project-creation", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # Enable threshold period in metric alert rule builder manager.add("organizations:metric-alert-threshold-period", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # Enables the search bar for metrics samples list @@ -203,9 +211,6 @@ def register_temporary_features(manager: FeatureManager): manager.add("organizations:more-slow-alerts", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=False) manager.add("organizations:navigation-sidebar-v2", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) manager.add("organizations:new-page-filter", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, default=True, api_expose=True) - manager.add("organizations:new-weekly-report", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) - # Display warning banner for every event issue alerts - manager.add("organizations:noisy-alert-warning", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # Notify all project members when fallthrough is disabled, instead of just the auto-assignee manager.add("organizations:notification-all-recipients", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) # Drop obsoleted status changes in occurence consumer @@ -222,6 +227,8 @@ def register_temporary_features(manager: FeatureManager): manager.add("organizations:on-demand-metrics-query-spec-version-two", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) # Display metrics components with a new design manager.add("organizations:metrics-new-inputs", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) + # Display new Source map uploads view in settings + manager.add('organizations:new-source-map-uploads-view', OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # Display on demand metrics related UI elements manager.add("organizations:on-demand-metrics-ui", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # Display on demand metrics related UI elements, for dashboards and widgets. The other flag is for alerts. @@ -230,6 +237,10 @@ def register_temporary_features(manager: FeatureManager): manager.add("organizations:onboarding", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=True) # Enable the SDK selection feature in the onboarding manager.add("organizations:onboarding-sdk-selection", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) + # Enable large ownership rule file size limit + manager.add("organizations:ownership-size-limit-large", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) + # Enable xlarge ownership rule file size limit + manager.add("organizations:ownership-size-limit-xlarge", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) # Enable views for anomaly detection manager.add("organizations:performance-anomaly-detection-ui", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # Enable mobile performance score calculation for transactions in relay @@ -315,8 +326,6 @@ def register_temporary_features(manager: FeatureManager): manager.add("organizations:performance-use-metrics", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=True) # Enable showing INP web vital in default views manager.add("organizations:performance-vitals-inp", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) - # Enable the GA features for priority alerts - manager.add("organizations:priority-ga-features", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, default=True, api_expose=True) # Enable profiling manager.add("organizations:profiling", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=True) # Enabled for those orgs who participated in the profiling Beta program @@ -336,8 +345,8 @@ def register_temporary_features(manager: FeatureManager): # Limit project events endpoint to only query back a certain number of days manager.add("organizations:project-event-date-limit", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) manager.add("organizations:project-templates", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=False) - # Enable react-router 6 in the UI - manager.add("organizations:react-router-6", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) + # Enable the new quick start guide + manager.add("organizations:quick-start-updates", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # Enable the new Related Events feature manager.add("organizations:related-events", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=False) # Enable related issues feature @@ -378,10 +387,6 @@ def register_temporary_features(manager: FeatureManager): manager.add("organizations:search-query-builder-project-details", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) manager.add("organizations:search-query-builder-alerts", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) manager.add("organizations:search-query-builder-performance", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) - # Enable the Replay Details > Accessibility tab - manager.add("organizations:session-replay-a11y-tab", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) - # Enable the accessibility issues endpoint - manager.add("organizations:session-replay-accessibility-issues", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) # Enable combined envelope Kafka items in Relay manager.add("organizations:session-replay-combined-envelope-items", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=False) # Enable canvas recording @@ -423,6 +428,8 @@ def register_temporary_features(manager: FeatureManager): # Add regression chart as image to slack message manager.add("organizations:slack-endpoint-regression-image", OrganizationFeature, FeatureHandlerStrategy.OPTIONS, api_expose=False) manager.add("organizations:slack-function-regression-image", OrganizationFeature, FeatureHandlerStrategy.OPTIONS, api_expose=False) + # Enable linking to Slack alerts from multiple teams to a single channel + manager.add("organizations:slack-multiple-team-single-channel-linking", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) manager.add("organizations:stacktrace-processing-caching", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=False) # Enable SAML2 Single-logout manager.add("organizations:sso-saml2-slo", OrganizationFeature, FeatureHandlerStrategy.OPTIONS, api_expose=False) @@ -492,8 +499,6 @@ def register_temporary_features(manager: FeatureManager): manager.add("organizations:transaction-name-sanitization", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=False) # Enables creation and full updating of uptime monitors via the api manager.add("organizations:uptime-api-create-update", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) - # Displys the "Uptime Monitor" option in the alert creation wizard - manager.add("organizations:uptime-display-wizard-create", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # Enables automatic hostname detection in uptime manager.add("organizations:uptime-automatic-hostname-detection", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) # Enables automatic subscription creation in uptime @@ -505,8 +510,6 @@ def register_temporary_features(manager: FeatureManager): # Enables uptime related settings for projects and orgs manager.add('organizations:uptime-settings', OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) manager.add("organizations:use-metrics-layer", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) - # Enable User Feedback v2 ingest - manager.add("organizations:user-feedback-ingest", OrganizationFeature, FeatureHandlerStrategy.INTERNAL, api_expose=False) # Use ReplayClipPreview inside the User Feedback Details panel manager.add("organizations:user-feedback-replay-clip", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # Enable User Feedback spam auto filtering feature ingest @@ -529,8 +532,10 @@ def register_temporary_features(manager: FeatureManager): manager.add("organizations:widget-viewer-modal-minimap", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # Enabled unresolved issue webhook for organization manager.add("organizations:webhooks-unresolved", OrganizationFeature, FeatureHandlerStrategy.OPTIONS, api_expose=True) - # Enable new feature parsing code for Jira integrations - manager.add("organizations:new-jira-transformers", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) + # Enable EventUniqueUserFrequencyConditionWithConditions special alert condition + manager.add("organizations:event-unique-user-frequency-condition-with-conditions", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) + # Use spans instead of transactions for dynamic sampling calculations. This will become the new default. + manager.add("organizations:dynamic-sampling-spans", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) # NOTE: Don't add features down here! Add them to their specific group and sort # them alphabetically! The order features are registered is not important. diff --git a/src/sentry/feedback/usecases/create_feedback.py b/src/sentry/feedback/usecases/create_feedback.py index 57c9aae5e4a03..f19ba419d7c8c 100644 --- a/src/sentry/feedback/usecases/create_feedback.py +++ b/src/sentry/feedback/usecases/create_feedback.py @@ -8,7 +8,7 @@ import jsonschema -from sentry import features +from sentry import features, options from sentry.constants import DataCategory from sentry.eventstore.models import Event, GroupEvent from sentry.feedback.usecases.spam_detection import is_spam @@ -232,7 +232,7 @@ def create_feedback_issue(event, project_id: int, source: FeedbackCreationSource is_message_spam = is_spam(event["contexts"]["feedback"]["message"]) except Exception: # until we have LLM error types ironed out, just catch all exceptions - logger.exception("Error checking if message is spam") + logger.exception("Error checking if message is spam", extra={"project_id": project_id}) metrics.incr( "feedback.create_feedback_issue.spam_detection", tags={ @@ -353,6 +353,9 @@ def shim_to_feedback( User feedbacks are an event type, so we try and grab as much from the legacy user report and event to create the new feedback. """ + if is_in_feedback_denylist(project.organization): + return + try: feedback_event: dict[str, Any] = { "contexts": { @@ -399,3 +402,7 @@ def auto_ignore_spam_feedbacks(project, issue_fingerprint): new_substatus=GroupSubStatus.FOREVER, ), ) + + +def is_in_feedback_denylist(organization): + return organization.slug in options.get("feedback.organizations.slug-denylist") diff --git a/src/sentry/filestore/gcs.py b/src/sentry/filestore/gcs.py index 36be72b03e840..c02c38b055c27 100644 --- a/src/sentry/filestore/gcs.py +++ b/src/sentry/filestore/gcs.py @@ -23,11 +23,11 @@ from sentry.net.http import TimeoutAdapter from sentry.utils import metrics -from sentry.utils.retries import ConditionalRetryPolicy, exponential_delay +from sentry.utils.retries import ConditionalRetryPolicy, sigmoid_delay # how many times do we want to try if stuff goes wrong GCS_RETRIES = 5 -REPLAY_GCS_RETRIES = GCS_RETRIES + 2 +REPLAY_GCS_RETRIES = 125 # Which errors are eligible for retry. @@ -405,6 +405,7 @@ def should_retry(attempt: int, e: Exception) -> bool: """Retry gateway timeout exceptions up to the limit.""" return attempt <= REPLAY_GCS_RETRIES and isinstance(e, GCS_RETRYABLE_ERRORS) - # Retry cadence: 0.025, 0.05, 0.1, 0.2, 0.4, 0.8, 1.6, 3.2 => ~6.5 seconds - policy = ConditionalRetryPolicy(should_retry, exponential_delay(0.05)) + # Retry cadence: After a brief period of fast retries the function will retry once + # per second for two minutes. + policy = ConditionalRetryPolicy(should_retry, sigmoid_delay()) policy(callable) diff --git a/src/sentry/flags/README.md b/src/sentry/flags/README.md new file mode 100644 index 0000000000000..6a4cab09d9f61 --- /dev/null +++ b/src/sentry/flags/README.md @@ -0,0 +1 @@ +flag log diff --git a/src/sentry/api/validators/sentry_apps/__init__.py b/src/sentry/flags/__init__.py similarity index 100% rename from src/sentry/api/validators/sentry_apps/__init__.py rename to src/sentry/flags/__init__.py diff --git a/src/sentry/flags/docs/api.md b/src/sentry/flags/docs/api.md new file mode 100644 index 0000000000000..6a42c59fe9ed9 --- /dev/null +++ b/src/sentry/flags/docs/api.md @@ -0,0 +1,114 @@ +# Flags API + +Host: https://sentry.io/api/0 + +**Authors.** + +@cmanallen + +**How to read this document.** + +This document is structured by resource with each resource having actions that can be performed against it. Every action that either accepts a request or returns a response WILL document the full interchange format. Clients may opt to restrict response data or provide a subset of the request data. + +## Flag Logs [/organizations//flags/logs/] + +- Parameters + - flag (optional, string) - The flag name to filter the result by. Can be specified multiple times. + - start (optional, string) - ISO 8601 format (`YYYY-MM-DDTHH:mm:ss.sssZ`) + - end (optional, string) - ISO 8601 format. Required if `start` is set. + - statsPeriod (optional, string) - A positive integer suffixed with a unit type. + - cursor (optional, string)` + - per_page (optional, number) + Default: 10 + - offset (optional, number) + Default: 0 + +### Browse Flag Logs [GET] + +Retrieve a collection of flag logs. + +**Attributes** + +| Column | Type | Description | +| --------------- | ------ | ------------------------------------------------------------- | +| action | string | Enum of `created`, `updated`, or `deleted`. | +| created_at | string | ISO-8601 timestamp of when the flag was changed. | +| created_by | string | The user responsible for the change. | +| created_by_type | string | Enum of `email`, `id`, or `name`. | +| flag | string | The name of the flag changed. Maps to flag_log_id in the URI. | +| id | number | A unique identifier for the log entry. | +| tags | object | A collection of provider-specified scoping metadata. | + +- Response 200 + + ```json + { + "data": [ + { + "action": "created", + "created_at": "2024-01-01T05:12:33", + "created_by": "2552", + "created_by_type": "id", + "flag": "my-flag-name", + "id": 1, + "tags": { + "environment": "production" + } + } + ] + } + ``` + +## Flag Log [/organizations//flags/logs//] + +### Fetch Flag Log [GET] + +Retrieve a single flag log instance. + +- Response 200 + + ```json + { + "data": { + "action": "updated", + "created_at": "2024-11-19T19:12:55", + "created_by": "user@site.com", + "created_by_type": "email", + "flag": "new-flag-name", + "id": 1, + "tags": { + "environment": "development" + } + } + } + ``` + +## Webhooks [/webhooks/flags/organization//provider//] + +### Create Flag Log [POST] + +The shape of the request object varies by provider. The `` URI parameter informs the server of the shape of the request and it is on the server to handle the provider. The following providers are supported: Unleash, Split, and LaunchDarkly. + +**Flag Pole Example:** + +Flag pole is Sentry owned. It matches our audit-log resource because it is designed for that purpose. + +- Request (application/json) + + ```json + { + "data": [ + { + "action": "updated", + "created_at": "2024-11-19T19:12:55", + "created_by": "colton.allen@sentry.io", + "flag": "flag-name", + "tags": { + "commit_sha": "1f33a107d7cd060ab9c98e11c9e5a62dc1347861" + } + } + ] + } + ``` + +- Response 201 diff --git a/src/sentry/remote_config/__init__.py b/src/sentry/flags/endpoints/__init__.py similarity index 100% rename from src/sentry/remote_config/__init__.py rename to src/sentry/flags/endpoints/__init__.py diff --git a/src/sentry/flags/endpoints/hooks.py b/src/sentry/flags/endpoints/hooks.py new file mode 100644 index 0000000000000..0f71378062dbe --- /dev/null +++ b/src/sentry/flags/endpoints/hooks.py @@ -0,0 +1,74 @@ +from rest_framework.request import Request +from rest_framework.response import Response + +from sentry.api.api_owners import ApiOwner +from sentry.api.api_publish_status import ApiPublishStatus +from sentry.api.authentication import OrgAuthTokenAuthentication +from sentry.api.base import Endpoint, region_silo_endpoint +from sentry.api.bases.organization import OrganizationPermission +from sentry.api.exceptions import ResourceDoesNotExist +from sentry.flags.providers import ( + DeserializationError, + InvalidProvider, + handle_provider_event, + write, +) +from sentry.models.organization import Organization +from sentry.utils.sdk import bind_organization_context + +"""HTTP endpoint. + +This endpoint accepts only organization authorization tokens. I've made the conscious +decision to exclude all other forms of authentication. We don't want users accidentally +writing logs or leaked DSNs generating invalid log entries. An organization token is +secret and reasonably restricted and so makes sense for this use case where we have +inter-provider communication. + +This endpoint allows writes if any write-level "org" permission was provided. +""" + + +class OrganizationFlagHookPermission(OrganizationPermission): + scope_map = { + "POST": ["org:ci"], + } + + +@region_silo_endpoint +class OrganizationFlagsHooksEndpoint(Endpoint): + authentication_classes = (OrgAuthTokenAuthentication,) + owner = ApiOwner.REPLAY + permission_classes = (OrganizationFlagHookPermission,) + publish_status = { + "POST": ApiPublishStatus.PRIVATE, + } + + def convert_args( + self, + request: Request, + organization_id_or_slug: int | str, + *args, + **kwargs, + ): + try: + if isinstance(organization_id_or_slug, int): + organization = Organization.objects.get_from_cache(id=organization_id_or_slug) + else: + organization = Organization.objects.get_from_cache(slug=organization_id_or_slug) + except Organization.DoesNotExist: + raise ResourceDoesNotExist + + self.check_object_permissions(request, organization) + bind_organization_context(organization) + + kwargs["organization"] = organization + return args, kwargs + + def post(self, request: Request, organization: Organization, provider: str) -> Response: + try: + write(handle_provider_event(provider, request.data, organization.id)) + return Response(status=200) + except InvalidProvider: + raise ResourceDoesNotExist + except DeserializationError as exc: + return Response(exc.errors, status=400) diff --git a/src/sentry/flags/endpoints/logs.py b/src/sentry/flags/endpoints/logs.py new file mode 100644 index 0000000000000..45dfeea606ec2 --- /dev/null +++ b/src/sentry/flags/endpoints/logs.py @@ -0,0 +1,95 @@ +from datetime import datetime +from typing import Any, TypedDict + +from rest_framework.exceptions import ParseError +from rest_framework.request import Request +from rest_framework.response import Response + +# from sentry import features +from sentry.api.api_owners import ApiOwner +from sentry.api.api_publish_status import ApiPublishStatus +from sentry.api.base import region_silo_endpoint +from sentry.api.bases.organization import OrganizationEndpoint +from sentry.api.exceptions import ResourceDoesNotExist +from sentry.api.paginator import OffsetPaginator +from sentry.api.serializers import Serializer, register, serialize +from sentry.api.utils import get_date_range_from_params +from sentry.flags.models import ActionEnum, CreatedByTypeEnum, FlagAuditLogModel +from sentry.models.organization import Organization + + +class FlagAuditLogModelSerializerResponse(TypedDict): + id: int + action: str + created_at: datetime + created_by: str + created_by_type: str + flag: str + tags: dict[str, Any] + + +@register(FlagAuditLogModel) +class FlagAuditLogModelSerializer(Serializer): + def serialize(self, obj, attrs, user, **kwargs) -> FlagAuditLogModelSerializerResponse: + return { + "id": obj.id, + "action": ActionEnum.to_string(obj.action), + "created_at": obj.created_at.isoformat(), + "created_by": obj.created_by, + "created_by_type": CreatedByTypeEnum.to_string(obj.created_by_type), + "flag": obj.flag, + "tags": obj.tags, + } + + +@region_silo_endpoint +class OrganizationFlagLogIndexEndpoint(OrganizationEndpoint): + owner = ApiOwner.FLAG + publish_status = {"GET": ApiPublishStatus.PRIVATE} + + def get(self, request: Request, organization: Organization) -> Response: + # if not features.has("organizations:feature-flag-ui", organization, actor=request.user): + # raise ResourceDoesNotExist + + start, end = get_date_range_from_params(request.GET) + if start is None or end is None: + raise ParseError(detail="Invalid date range") + + queryset = FlagAuditLogModel.objects.filter( + created_at__gte=start, + created_at__lt=end, + organization_id=organization.id, + ) + + flags = request.GET.getlist("flag") + if flags: + queryset = queryset.filter(flag__in=flags) + + return self.paginate( + request=request, + queryset=queryset, + on_results=lambda x: { + "data": serialize(x, request.user, FlagAuditLogModelSerializer()) + }, + paginator_cls=OffsetPaginator, + ) + + +@region_silo_endpoint +class OrganizationFlagLogDetailsEndpoint(OrganizationEndpoint): + owner = ApiOwner.FLAG + publish_status = {"GET": ApiPublishStatus.PRIVATE} + + def get(self, request: Request, organization: Organization, flag_log_id: int) -> Response: + # if not features.has("organizations:feature-flag-ui", organization, actor=request.user): + # raise ResourceDoesNotExist + + try: + model = FlagAuditLogModel.objects.filter( + id=flag_log_id, + organization_id=organization.id, + ).get() + except FlagAuditLogModel.DoesNotExist: + raise ResourceDoesNotExist + + return self.respond({"data": serialize(model, request.user, FlagAuditLogModelSerializer())}) diff --git a/src/sentry/flags/migrations/0001_add_flag_audit_log.py b/src/sentry/flags/migrations/0001_add_flag_audit_log.py new file mode 100644 index 0000000000000..58e9e223174c8 --- /dev/null +++ b/src/sentry/flags/migrations/0001_add_flag_audit_log.py @@ -0,0 +1,58 @@ +# Generated by Django 5.1.1 on 2024-09-25 15:31 + +import django.utils.timezone +from django.db import migrations, models + +import sentry.db.models.fields.bounded +import sentry.db.models.fields.hybrid_cloud_foreign_key +from sentry.new_migrations.migrations import CheckedMigration + + +class Migration(CheckedMigration): + # This flag is used to mark that a migration shouldn't be automatically run in production. + # This should only be used for operations where it's safe to run the migration after your + # code has deployed. So this should not be used for most operations that alter the schema + # of a table. + # Here are some things that make sense to mark as post deployment: + # - Large data migrations. Typically we want these to be run manually so that they can be + # monitored and not block the deploy for a long period of time while they run. + # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to + # run this outside deployments so that we don't block them. Note that while adding an index + # is a schema change, it's completely safe to run the operation after the code has deployed. + # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment + + is_post_deployment = False + + initial = True + + dependencies = [] + + operations = [ + migrations.CreateModel( + name="FlagAuditLogModel", + fields=[ + ( + "id", + sentry.db.models.fields.bounded.BoundedBigAutoField( + primary_key=True, serialize=False + ), + ), + ("action", models.PositiveSmallIntegerField()), + ("created_at", models.DateTimeField(default=django.utils.timezone.now)), + ("created_by", models.CharField(max_length=100)), + ("created_by_type", models.PositiveSmallIntegerField()), + ("flag", models.CharField(max_length=100)), + ( + "organization_id", + sentry.db.models.fields.hybrid_cloud_foreign_key.HybridCloudForeignKey( + "sentry.Organization", db_index=True, on_delete="CASCADE" + ), + ), + ("tags", models.JSONField()), + ], + options={ + "db_table": "flags_audit_log", + "indexes": [models.Index(fields=["flag"], name="flags_audit_flag_455822_idx")], + }, + ), + ] diff --git a/tests/sentry/api/validators/__init__.py b/src/sentry/flags/migrations/__init__.py similarity index 100% rename from tests/sentry/api/validators/__init__.py rename to src/sentry/flags/migrations/__init__.py diff --git a/src/sentry/flags/models.py b/src/sentry/flags/models.py new file mode 100644 index 0000000000000..80852d7cc89e5 --- /dev/null +++ b/src/sentry/flags/models.py @@ -0,0 +1,85 @@ +from enum import Enum + +from django.db import models +from django.utils import timezone + +from sentry.backup.scopes import RelocationScope +from sentry.db.models import Model, region_silo_model, sane_repr +from sentry.db.models.fields.hybrid_cloud_foreign_key import HybridCloudForeignKey + + +class ActionEnum(Enum): + CREATED = 0 + DELETED = 1 + UPDATED = 2 + + @classmethod + def to_string(cls, integer): + if integer == 0: + return "created" + if integer == 1: + return "deleted" + if integer == 2: + return "updated" + raise ValueError + + +ACTION_MAP = { + "created": ActionEnum.CREATED.value, + "deleted": ActionEnum.DELETED.value, + "updated": ActionEnum.UPDATED.value, +} + + +class CreatedByTypeEnum(Enum): + EMAIL = 0 + ID = 1 + NAME = 2 + + @classmethod + def to_string(cls, integer): + if integer == 0: + return "email" + if integer == 1: + return "id" + if integer == 2: + return "name" + raise ValueError + + +CREATED_BY_TYPE_MAP = { + "email": CreatedByTypeEnum.EMAIL.value, + "id": CreatedByTypeEnum.ID.value, + "name": CreatedByTypeEnum.NAME.value, +} + + +@region_silo_model +class FlagAuditLogModel(Model): + __relocation_scope__ = RelocationScope.Excluded + + ACTION_TYPES = ( + (ActionEnum.CREATED, "created"), + (ActionEnum.UPDATED, "updated"), + (ActionEnum.DELETED, "deleted"), + ) + CREATED_BY_TYPE_TYPES = ( + (CreatedByTypeEnum.EMAIL, "email"), + (CreatedByTypeEnum.NAME, "name"), + (CreatedByTypeEnum.ID, "id"), + ) + + action = models.PositiveSmallIntegerField(choices=ACTION_TYPES) + created_at = models.DateTimeField(default=timezone.now) + created_by = models.CharField(max_length=100) + created_by_type = models.PositiveSmallIntegerField(choices=CREATED_BY_TYPE_TYPES) + flag = models.CharField(max_length=100) + organization_id = HybridCloudForeignKey("sentry.Organization", null=False, on_delete="CASCADE") + tags = models.JSONField() + + class Meta: + app_label = "flags" + db_table = "flags_audit_log" + indexes = (models.Index(fields=("flag",)),) + + __repr__ = sane_repr("organization_id", "flag") diff --git a/src/sentry/flags/providers.py b/src/sentry/flags/providers.py new file mode 100644 index 0000000000000..32a96b105038a --- /dev/null +++ b/src/sentry/flags/providers.py @@ -0,0 +1,89 @@ +import datetime +from typing import Any, TypedDict + +from sentry.flags.models import ACTION_MAP, CREATED_BY_TYPE_MAP, FlagAuditLogModel +from sentry.silo.base import SiloLimit + + +def write(rows: list["FlagAuditLogRow"]) -> None: + try: + FlagAuditLogModel.objects.bulk_create(FlagAuditLogModel(**row) for row in rows) + except SiloLimit.AvailabilityError: + pass + + +"""Provider definitions. + +Provider definitions are pure functions. They accept data and return data. Providers do not +initiate any IO operations. Instead they return commands in the form of the return type or +an exception. These commands inform the caller (the endpoint defintion) what IO must be +emitted to satisfy the request. This is done primarily to improve testability and test +performance but secondarily to allow easy extension of the endpoint without knowledge of +the underlying systems. +""" + + +class FlagAuditLogRow(TypedDict): + """A complete flag audit log row instance.""" + + action: int + created_at: datetime.datetime + created_by: str + created_by_type: int + flag: str + organization_id: int + tags: dict[str, Any] + + +class DeserializationError(Exception): + """The request body could not be deserialized.""" + + def __init__(self, errors): + self.errors = errors + + +class InvalidProvider(Exception): + """An unsupported provider type was specified.""" + + ... + + +def handle_provider_event( + provider: str, + request_data: dict[str, Any], + organization_id: int, +) -> list[FlagAuditLogRow]: + raise InvalidProvider(provider) + + +"""Internal flag-pole provider. + +Allows us to skip the HTTP endpoint. +""" + + +class FlagAuditLogItem(TypedDict): + """A simplified type which is easier to work with than the row definition.""" + + action: str + flag: str + created_at: datetime.datetime + created_by: str + tags: dict[str, str] + + +def handle_flag_pole_event_internal(items: list[FlagAuditLogItem], organization_id: int) -> None: + write( + [ + { + "action": ACTION_MAP[item["action"]], + "created_at": item["created_at"], + "created_by": item["created_by"], + "created_by_type": CREATED_BY_TYPE_MAP["name"], + "flag": item["flag"], + "organization_id": organization_id, + "tags": item["tags"], + } + for item in items + ] + ) diff --git a/src/sentry/grouping/api.py b/src/sentry/grouping/api.py index a7cab08a0b657..19982875b99f3 100644 --- a/src/sentry/grouping/api.py +++ b/src/sentry/grouping/api.py @@ -26,7 +26,6 @@ ComponentVariant, CustomFingerprintVariant, FallbackVariant, - KeyedVariants, SaltedComponentVariant, ) from sentry.models.grouphash import GroupHash @@ -43,13 +42,14 @@ @dataclass class GroupHashInfo: config: GroupingConfig + variants: dict[str, BaseVariant] hashes: list[str] grouphashes: list[GroupHash] existing_grouphash: GroupHash | None NULL_GROUPING_CONFIG: GroupingConfig = {"id": "", "enhancements": ""} -NULL_GROUPHASH_INFO = GroupHashInfo(NULL_GROUPING_CONFIG, [], [], None) +NULL_GROUPHASH_INFO = GroupHashInfo(NULL_GROUPING_CONFIG, {}, [], [], None) class GroupingConfigNotFound(LookupError): @@ -234,7 +234,15 @@ def get_fingerprinting_config_for_project( def apply_server_fingerprinting(event, config, allow_custom_title=True): - client_fingerprint = event.get("fingerprint") + fingerprint_info = {} + + client_fingerprint = event.get("fingerprint", []) + client_fingerprint_is_default = len(client_fingerprint) == 1 and is_default_fingerprint_var( + client_fingerprint[0] + ) + if client_fingerprint and not client_fingerprint_is_default: + fingerprint_info["client_fingerprint"] = client_fingerprint + rv = config.get_fingerprint_values_for_event(event) if rv is not None: rule, new_fingerprint, attributes = rv @@ -247,13 +255,10 @@ def apply_server_fingerprinting(event, config, allow_custom_title=True): # Persist the rule that matched with the fingerprint in the event # dictionary for later debugging. - event["_fingerprint_info"] = { - "client_fingerprint": client_fingerprint, - "matched_rule": rule.to_json(), - } + fingerprint_info["matched_rule"] = rule.to_json() - if rule.is_builtin: - event["_fingerprint_info"]["is_builtin"] = True + if fingerprint_info: + event["_fingerprint_info"] = fingerprint_info def _get_calculated_grouping_variants_for_event( @@ -345,7 +350,7 @@ def get_grouping_variants_for_event( rv[key] = ComponentVariant(component, context.config) fingerprint = resolve_fingerprint_values(fingerprint, event.data) - if fingerprint_info and fingerprint_info.get("is_builtin", False): + if (fingerprint_info or {}).get("matched_rule", {}).get("is_builtin") is True: rv["built-in-fingerprint"] = BuiltInFingerprintVariant(fingerprint, fingerprint_info) else: rv["custom-fingerprint"] = CustomFingerprintVariant(fingerprint, fingerprint_info) @@ -370,18 +375,3 @@ def get_grouping_variants_for_event( rv["fallback"] = FallbackVariant() return rv - - -def sort_grouping_variants(variants: dict[str, BaseVariant]) -> KeyedVariants: - """Sort a sequence of variants into flat variants""" - - flat_variants = [] - - for name, variant in variants.items(): - flat_variants.append((name, variant)) - - # Sort system variant to the back of the list to resolve ambiguities when - # choosing primary_hash for Snuba - flat_variants.sort(key=lambda name_and_variant: 1 if name_and_variant[0] == "system" else 0) - - return flat_variants diff --git a/src/sentry/grouping/component.py b/src/sentry/grouping/component.py index 61c89ba39c33e..4269c74be4dcf 100644 --- a/src/sentry/grouping/component.py +++ b/src/sentry/grouping/component.py @@ -19,6 +19,7 @@ "violation": "violation", "uri": "URL", "message": "message", + "template": "template", } diff --git a/src/sentry/grouping/fingerprinting/__init__.py b/src/sentry/grouping/fingerprinting/__init__.py index 36ebefdfa2e51..8523ffa587cf6 100644 --- a/src/sentry/grouping/fingerprinting/__init__.py +++ b/src/sentry/grouping/fingerprinting/__init__.py @@ -245,7 +245,7 @@ def get_fingerprint_values_for_event(self, event: dict[str, object]) -> None | o def _from_config_structure( cls, data: dict[str, Any], bases: Sequence[str] | None = None ) -> Self: - version = data["version"] + version = data.get("version", VERSION) if version != VERSION: raise ValueError("Unknown version") return cls( diff --git a/src/sentry/grouping/ingest/config.py b/src/sentry/grouping/ingest/config.py index fc7a4741151c5..c620c8f559e63 100644 --- a/src/sentry/grouping/ingest/config.py +++ b/src/sentry/grouping/ingest/config.py @@ -8,7 +8,7 @@ from django.conf import settings from django.core.cache import cache -from sentry import features, options +from sentry import options from sentry.grouping.strategies.configurations import CONFIGURATIONS from sentry.locks import locks from sentry.models.project import Project @@ -23,11 +23,6 @@ CONFIGS_TO_DEPRECATE = () -# Used by getsentry script. Remove it once the script has been updated to call update_grouping_config_if_needed -def update_grouping_config_if_permitted(project: Project) -> None: - update_grouping_config_if_needed(project, "script") - - def update_grouping_config_if_needed(project: Project, source: str) -> None: current_config = project.get_option("sentry:grouping_config") new_config = DEFAULT_GROUPING_CONFIG @@ -53,8 +48,8 @@ def update_grouping_config_if_needed(project: Project, source: str) -> None: from sentry import audit_log from sentry.utils.audit import create_system_audit_entry - # This is when we will stop calculating both old hashes (which we do in an effort to - # preserve group continuity). + # This is when we will stop calculating the old hash in cases where we don't find the new + # hash (which we do in an effort to preserve group continuity). expiry = int(time.time()) + settings.SENTRY_GROUPING_UPDATE_MIGRATION_PHASE changes: dict[str, str | int] = {"sentry:grouping_config": new_config} @@ -88,18 +83,3 @@ def is_in_transition(project: Project) -> bool: secondary_grouping_expiry = project.get_option("sentry:secondary_grouping_expiry") return bool(secondary_grouping_config) and (secondary_grouping_expiry or 0) >= time.time() - - -def project_uses_optimized_grouping(project: Project) -> bool: - if options.get("grouping.config_transition.killswitch_enabled"): - return False - - return ( - features.has( - "organizations:grouping-suppress-unnecessary-secondary-hash", - project.organization, - ) - or (is_in_transition(project)) - # TODO: Yes, this is everyone - this check will soon be removed entirely - or project.id % 5 < 5 # 100% of all non-transition projects - ) diff --git a/src/sentry/grouping/ingest/hashing.py b/src/sentry/grouping/ingest/hashing.py index 681493d5151ed..3c7f977e9af89 100644 --- a/src/sentry/grouping/ingest/hashing.py +++ b/src/sentry/grouping/ingest/hashing.py @@ -14,7 +14,6 @@ NULL_GROUPING_CONFIG, BackgroundGroupingConfigLoader, GroupingConfig, - GroupingConfigNotFound, SecondaryGroupingConfigLoader, apply_server_fingerprinting, get_fingerprinting_config_for_project, @@ -22,7 +21,7 @@ load_grouping_config, ) from sentry.grouping.ingest.config import is_in_transition -from sentry.grouping.ingest.metrics import record_hash_calculation_metrics +from sentry.grouping.variants import BaseVariant from sentry.models.grouphash import GroupHash from sentry.models.grouphashmetadata import GroupHashMetadata from sentry.models.project import Project @@ -39,10 +38,10 @@ def _calculate_event_grouping( project: Project, event: Event, grouping_config: GroupingConfig -) -> list[str]: +) -> tuple[list[str], dict[str, BaseVariant]]: """ - Main entrypoint for modifying/enhancing and grouping an event, writes - hashes back into event payload. + Calculate hashes for the event using the given grouping config, add them to the event data, and + return them, along with the variants data upon which they're based. """ metric_tags: MutableTags = { "grouping_config": grouping_config["id"], @@ -61,7 +60,7 @@ def _calculate_event_grouping( # The active grouping config was put into the event in the # normalize step before. We now also make sure that the # fingerprint was set to `'{{ default }}' just in case someone - # removed it from the payload. The call to get_hashes will then + # removed it from the payload. The call to `get_hashes_and_variants` will then # look at `grouping_config` to pick the right parameters. event.data["fingerprint"] = event.data.data.get("fingerprint") or ["{{ default }}"] apply_server_fingerprinting( @@ -71,18 +70,9 @@ def _calculate_event_grouping( ) with metrics.timer("event_manager.event.get_hashes", tags=metric_tags): - # TODO: It's not clear we can even hit `GroupingConfigNotFound` here - this is leftover - # from a time before we started separately retrieving the grouping config and passing it - # directly to `get_hashes`. Now that we do that, a bogus config will get replaced by the - # default long before we get here. Should we consolidate bogus config handling into the - # code actually getting the config? - try: - hashes = event.get_hashes(loaded_grouping_config) - except GroupingConfigNotFound: - event.data["grouping_config"] = get_grouping_config_dict_for_project(project) - hashes = event.get_hashes() + hashes, variants = event.get_hashes_and_variants(loaded_grouping_config) - return hashes + return (hashes, variants) def maybe_run_background_grouping(project: Project, job: Job) -> None: @@ -111,12 +101,12 @@ def _calculate_background_grouping( "sdk": normalized_sdk_tag_from_event(event.data), } with metrics.timer("event_manager.background_grouping", tags=metric_tags): - return _calculate_event_grouping(project, event, config) + return _calculate_event_grouping(project, event, config)[0] def maybe_run_secondary_grouping( project: Project, job: Job, metric_tags: MutableTags -) -> tuple[GroupingConfig, list[str]]: +) -> tuple[GroupingConfig, list[str], dict[str, BaseVariant]]: """ If the projct is in a grouping config transition phase, calculate a set of secondary hashes for the job's event. @@ -130,27 +120,29 @@ def maybe_run_secondary_grouping( secondary_grouping_config = SecondaryGroupingConfigLoader().get_config_dict(project) secondary_hashes = _calculate_secondary_hashes(project, job, secondary_grouping_config) - return (secondary_grouping_config, secondary_hashes) + # Return an empty variants dictionary because we need the signature of this function to match + # that of `run_primary_grouping` (so we have to return something), but we don't ever actually + # need the variant information + return (secondary_grouping_config, secondary_hashes, {}) def _calculate_secondary_hashes( project: Project, job: Job, secondary_grouping_config: GroupingConfig ) -> list[str]: - """Calculate secondary hash for event using a fallback grouping config for a period of time. - This happens when we upgrade all projects that have not opted-out to automatic upgrades plus - when the customer changes the grouping config. - This causes extra load in save_event processing. """ - secondary_hashes = [] + Calculate hashes based on an older grouping config, so that unknown hashes calculated by the + current config can be matched to an existing group if there is one. + """ + secondary_hashes: list[str] = [] try: with sentry_sdk.start_span( op="event_manager", - description="event_manager.save.secondary_calculate_event_grouping", + name="event_manager.save.secondary_calculate_event_grouping", ): # create a copy since `_calculate_event_grouping` modifies the event to add all sorts - # of grouping info and we don't want the backup grouping data in there + # of grouping info and we don't want the secondary grouping data in there event_copy = copy.deepcopy(job["event"]) - secondary_hashes = _calculate_event_grouping( + secondary_hashes, _ = _calculate_event_grouping( project, event_copy, secondary_grouping_config ) except Exception as err: @@ -161,9 +153,9 @@ def _calculate_secondary_hashes( def run_primary_grouping( project: Project, job: Job, metric_tags: MutableTags -) -> tuple[GroupingConfig, list[str]]: +) -> tuple[GroupingConfig, list[str], dict[str, BaseVariant]]: """ - Get the primary grouping config and primary hashes for the event. + Get the primary grouping config, primary hashes, and variants for the event. """ with metrics.timer("event_manager.load_grouping_config"): grouping_config = get_grouping_config_dict_for_project(project) @@ -172,29 +164,33 @@ def run_primary_grouping( with ( sentry_sdk.start_span( op="event_manager", - description="event_manager.save.calculate_event_grouping", + name="event_manager.save.calculate_event_grouping", ), metrics.timer("event_manager.calculate_event_grouping", tags=metric_tags), ): - hashes = _calculate_primary_hashes(project, job, grouping_config) + hashes, variants = _calculate_primary_hashes_and_variants(project, job, grouping_config) - return (grouping_config, hashes) + return (grouping_config, hashes, variants) -def _calculate_primary_hashes( +def _calculate_primary_hashes_and_variants( project: Project, job: Job, grouping_config: GroupingConfig -) -> list[str]: +) -> tuple[list[str], dict[str, BaseVariant]]: """ - Get the primary hash for the event. + Get the primary hash and variants for the event. This is pulled out into a separate function mostly in order to make testing easier. """ return _calculate_event_grouping(project, job["event"], grouping_config) -def find_existing_grouphash( +def find_grouphash_with_group( grouphashes: Sequence[GroupHash], ) -> GroupHash | None: + """ + Search in the list of given `GroupHash` records for one which has a group assigned to it, and + return the first one found. (Assumes grouphashes have already been sorted in priority order.) + """ for group_hash in grouphashes: if group_hash.group_id is not None: return group_hash @@ -212,47 +208,38 @@ def find_existing_grouphash( return None -def get_hash_values( - project: Project, - job: Job, - metric_tags: MutableTags, -) -> tuple[list[str], list[str]]: - # Background grouping is a way for us to get performance metrics for a new - # config without having it actually affect on how events are grouped. It runs - # either before or after the main grouping logic, depending on the option value. - maybe_run_background_grouping(project, job) - - secondary_grouping_config, secondary_hashes = maybe_run_secondary_grouping( - project, job, metric_tags - ) - - primary_grouping_config, primary_hashes = run_primary_grouping(project, job, metric_tags) - - record_hash_calculation_metrics( - primary_grouping_config, - primary_hashes, - secondary_grouping_config, - secondary_hashes, - ) - - return (primary_hashes, secondary_hashes) - +def get_or_create_grouphashes( + project: Project, hashes: Sequence[str], grouping_config: str +) -> list[GroupHash]: + is_secondary = grouping_config != project.get_option("sentry:grouping_config") + grouphashes: list[GroupHash] = [] -def get_or_create_grouphashes(project: Project, hashes: Sequence[str]) -> list[GroupHash]: - grouphashes = [] + # The only utility of secondary hashes is to link new primary hashes to an existing group. + # Secondary hashes which are also new are therefore of no value, so there's no need to store or + # annotate them and we can bail now. + if is_secondary and not GroupHash.objects.filter(project=project, hash__in=hashes).exists(): + return grouphashes for hash_value in hashes: grouphash, created = GroupHash.objects.get_or_create(project=project, hash=hash_value) # TODO: Do we want to expand this to backfill metadata for existing grouphashes? If we do, # we'll have to override the metadata creation date for them. - if ( - created - and options.get("grouping.grouphash_metadata.ingestion_writes_enabled") - and features.has("organizations:grouphash-metadata-creation", project.organization) + if options.get("grouping.grouphash_metadata.ingestion_writes_enabled") and features.has( + "organizations:grouphash-metadata-creation", project.organization ): - # For now, this just creates a record with a creation timestamp - GroupHashMetadata.objects.create(grouphash=grouphash) + if created: + GroupHashMetadata.objects.create( + grouphash=grouphash, + latest_grouping_config=grouping_config, + ) + elif ( + grouphash.metadata and grouphash.metadata.latest_grouping_config != grouping_config + ): + # Keep track of the most recent config which computed this hash, so that once a + # config is deprecated, we can clear out the GroupHash records which are no longer + # being produced + grouphash.metadata.update(latest_grouping_config=grouping_config) grouphashes.append(grouphash) diff --git a/src/sentry/grouping/ingest/metrics.py b/src/sentry/grouping/ingest/metrics.py index deabbb9184980..4f76adf9340fa 100644 --- a/src/sentry/grouping/ingest/metrics.py +++ b/src/sentry/grouping/ingest/metrics.py @@ -6,7 +6,7 @@ from sentry import options from sentry.grouping.api import GroupingConfig -from sentry.grouping.ingest.config import is_in_transition, project_uses_optimized_grouping +from sentry.grouping.ingest.config import is_in_transition from sentry.models.project import Project from sentry.utils import metrics from sentry.utils.tag_normalization import normalized_sdk_tag_from_event @@ -20,15 +20,19 @@ def record_hash_calculation_metrics( + project: Project, primary_config: GroupingConfig, primary_hashes: list[str], secondary_config: GroupingConfig, secondary_hashes: list[str], + existing_hash_search_result: str, ) -> None: has_secondary_hashes = len(secondary_hashes) > 0 + # In cases where we've computed both primary and secondary hashes, track how often the config + # change has changed the resulting hashes if has_secondary_hashes: - tags = { + hash_comparison_tags = { "primary_config": primary_config["id"], "secondary_config": secondary_config["id"], } @@ -37,47 +41,36 @@ def record_hash_calculation_metrics( hashes_match = current_values == secondary_values if hashes_match: - tags["result"] = "no change" + hash_comparison_tags["result"] = "no change" else: shared_hashes = set(current_values) & set(secondary_values) if len(shared_hashes) > 0: - tags["result"] = "partial change" + hash_comparison_tags["result"] = "partial change" else: - tags["result"] = "full change" + hash_comparison_tags["result"] = "full change" metrics.incr( "grouping.hash_comparison", sample_rate=options.get("grouping.config_transition.metrics_sample_rate"), - tags=tags, + tags=hash_comparison_tags, ) - -# TODO: Once the legacy `_save_aggregate` goes away, this logic can be pulled into -# `record_hash_calculation_metrics`. Right now it's split up because we don't know the value for -# `result` at the time the legacy `_save_aggregate` (indirectly) calls `record_hash_calculation_metrics` -def record_calculation_metric_with_result( - project: Project, - has_secondary_hashes: bool, - result: str, -) -> None: - # Track the total number of grouping calculations done overall, so we can divide by the # count to get an average number of calculations per event - tags = { + num_calculations_tags = { "in_transition": str(is_in_transition(project)), - "using_transition_optimization": str(project_uses_optimized_grouping(project)), - "result": result, + "result": existing_hash_search_result, } metrics.incr( "grouping.event_hashes_calculated", sample_rate=options.get("grouping.config_transition.metrics_sample_rate"), - tags=tags, + tags=num_calculations_tags, ) metrics.incr( "grouping.total_calculations", amount=2 if has_secondary_hashes else 1, sample_rate=options.get("grouping.config_transition.metrics_sample_rate"), - tags=tags, + tags=num_calculations_tags, ) diff --git a/src/sentry/grouping/ingest/seer.py b/src/sentry/grouping/ingest/seer.py index b03e93f34545b..7887d893fece7 100644 --- a/src/sentry/grouping/ingest/seer.py +++ b/src/sentry/grouping/ingest/seer.py @@ -10,6 +10,7 @@ from sentry.conf.server import SEER_SIMILARITY_MODEL_VERSION from sentry.eventstore.models import Event from sentry.grouping.grouping_info import get_grouping_info_from_variants +from sentry.grouping.variants import BaseVariant from sentry.models.grouphash import GroupHash from sentry.models.project import Project from sentry.seer.similarity.similar_issues import get_similarity_data_from_seer @@ -27,7 +28,7 @@ logger = logging.getLogger("sentry.events.grouping") -def should_call_seer_for_grouping(event: Event) -> bool: +def should_call_seer_for_grouping(event: Event, variants: dict[str, BaseVariant]) -> bool: """ Use event content, feature flags, rate limits, killswitches, seer health, etc. to determine whether a call to Seer should be made. @@ -42,7 +43,7 @@ def should_call_seer_for_grouping(event: Event) -> bool: return False if ( - _has_customized_fingerprint(event) + _has_customized_fingerprint(event, variants) or killswitch_enabled(project.id, event) or _circuit_breaker_broken(event, project) # **Do not add any new checks after this.** The rate limit check MUST remain the last of all @@ -79,7 +80,7 @@ def _project_has_similarity_grouping_enabled(project: Project) -> bool: # combined with some other value). To the extent to which we're then using this function to decide # whether or not to call Seer, this means that the calculations giving rise to the default part of # the value never involve Seer input. In the long run, we probably want to change that. -def _has_customized_fingerprint(event: Event) -> bool: +def _has_customized_fingerprint(event: Event, variants: dict[str, BaseVariant]) -> bool: fingerprint = event.data.get("fingerprint", []) if "{{ default }}" in fingerprint: @@ -97,7 +98,6 @@ def _has_customized_fingerprint(event: Event) -> bool: return True # Fully customized fingerprint (from either us or the user) - variants = event.get_grouping_variants() fingerprint_variant = variants.get("custom-fingerprint") or variants.get("built-in-fingerprint") if fingerprint_variant: @@ -178,6 +178,7 @@ def _circuit_breaker_broken(event: Event, project: Project) -> bool: def get_seer_similar_issues( event: Event, + variants: dict[str, BaseVariant], num_neighbors: int = 1, ) -> tuple[dict[str, Any], GroupHash | None]: """ @@ -186,9 +187,7 @@ def get_seer_similar_issues( should go in (if any), or None if no neighbor was near enough. """ event_hash = event.get_primary_hash() - stacktrace_string = get_stacktrace_string( - get_grouping_info_from_variants(event.get_grouping_variants()) - ) + stacktrace_string = get_stacktrace_string(get_grouping_info_from_variants(variants)) exception_type = get_path(event.data, "exception", "values", -1, "type") request_data: SimilarIssuesEmbeddingsRequest = { @@ -231,25 +230,60 @@ def get_seer_similar_issues( return (similar_issues_metadata, parent_grouphash) -def maybe_check_seer_for_matching_grouphash(event: Event) -> GroupHash | None: +def maybe_check_seer_for_matching_grouphash( + event: Event, variants: dict[str, BaseVariant], all_grouphashes: list[GroupHash] +) -> GroupHash | None: seer_matched_grouphash = None - if should_call_seer_for_grouping(event): + if should_call_seer_for_grouping(event, variants): metrics.incr( "grouping.similarity.did_call_seer", sample_rate=options.get("seer.similarity.metrics_sample_rate"), tags={"call_made": True, "blocker": "none"}, ) + try: # If no matching group is found in Seer, we'll still get back result # metadata, but `seer_matched_grouphash` will be None - seer_response_data, seer_matched_grouphash = get_seer_similar_issues(event) - event.data["seer_similarity"] = seer_response_data - - # Insurance - in theory we shouldn't ever land here - except Exception as e: + seer_response_data, seer_matched_grouphash = get_seer_similar_issues(event, variants) + except Exception as e: # Insurance - in theory we shouldn't ever land here sentry_sdk.capture_exception( e, tags={"event": event.event_id, "project": event.project.id} ) + return None + + # Find the GroupHash corresponding to the hash value sent to Seer + # + # TODO: There shouldn't actually be more than one hash in `all_grouphashes`, but + # a) there's a bug in our precedence logic which leads both in-app and system stacktrace + # hashes being marked as contributing and making it through to this point, and + # b) because of how we used to compute secondary and primary hashes, we keep secondary + # hashes even when we don't need them. + # Once those two problems are fixed, there will only be one hash passed to this function + # and we won't have to do this search to find the right one to update. + primary_hash = event.get_primary_hash() + grouphash_sent = list( + filter(lambda grouphash: grouphash.hash == primary_hash, all_grouphashes) + )[0] + + # Update the relevant GroupHash with Seer results + gh_metadata = grouphash_sent.metadata + if gh_metadata: + gh_metadata.update( + # Technically the time of the metadata record creation and the time of the Seer + # request will be some milliseconds apart, but a) the difference isn't meaningful + # for us, and b) forcing them to be the same (rather than just close) lets us use + # their equality as a signal that the Seer call happened during ingest rather than + # during a backfill, without having to store that information separately. + seer_date_sent=gh_metadata.date_added, + seer_event_sent=event.event_id, + seer_model=seer_response_data["similarity_model_version"], + seer_matched_grouphash=seer_matched_grouphash, + seer_match_distance=( + seer_response_data["results"][0]["stacktrace_distance"] + if seer_matched_grouphash + else None + ), + ) return seer_matched_grouphash diff --git a/src/sentry/grouping/ingest/utils.py b/src/sentry/grouping/ingest/utils.py index f18049ca8ce46..5e7c3261650f7 100644 --- a/src/sentry/grouping/ingest/utils.py +++ b/src/sentry/grouping/ingest/utils.py @@ -48,7 +48,7 @@ def check_for_group_creation_load_shed(project: Project, event: Event) -> None: raise HashDiscarded("Load shedding group creation", reason="load_shed") -def check_for_category_mismatch(group: Group) -> bool: +def is_non_error_type_group(group: Group) -> bool: """ Make sure an error event hasn't hashed to a value assigned to a non-error-type group """ diff --git a/src/sentry/grouping/variants.py b/src/sentry/grouping/variants.py index 261d3ba8fdf56..402735122b97a 100644 --- a/src/sentry/grouping/variants.py +++ b/src/sentry/grouping/variants.py @@ -58,7 +58,7 @@ def get_hash(self) -> str | None: class FallbackVariant(BaseVariant): - id = "fallback" + type = "fallback" contributes = True def get_hash(self) -> str | None: diff --git a/src/sentry/hybridcloud/outbox/base.py b/src/sentry/hybridcloud/outbox/base.py index addc0f8f859ee..abc4b3144bf9c 100644 --- a/src/sentry/hybridcloud/outbox/base.py +++ b/src/sentry/hybridcloud/outbox/base.py @@ -2,7 +2,7 @@ import contextlib import logging -from collections.abc import Collection, Generator, Iterable, Mapping, Sequence +from collections.abc import Collection, Generator, Iterable, Mapping from typing import TYPE_CHECKING, Any, Protocol, TypeVar from django.db import connections, router, transaction @@ -113,7 +113,7 @@ def bulk_create(self, objs: Iterable[_RM], *args: Any, **kwds: Any) -> list[_RM] return super().bulk_create(tuple_of_objs, *args, **kwds) def bulk_update( - self, objs: Iterable[_RM], fields: Sequence[str], *args: Any, **kwds: Any + self, objs: Iterable[_RM], fields: Iterable[str], *args: Any, **kwds: Any ) -> Any: from sentry.hybridcloud.models.outbox import outbox_context @@ -297,7 +297,7 @@ def bulk_create(self, objs: Iterable[_CM], *args: Any, **kwds: Any) -> list[_CM] return super().bulk_create(tuple_of_objs, *args, **kwds) def bulk_update( - self, objs: Iterable[_CM], fields: Sequence[str], *args: Any, **kwds: Any + self, objs: Iterable[_CM], fields: Iterable[str], *args: Any, **kwds: Any ) -> Any: from sentry.hybridcloud.models.outbox import outbox_context diff --git a/src/sentry/hybridcloud/rpc/pagination.py b/src/sentry/hybridcloud/rpc/pagination.py index 5674ef6356053..9579b950a7c57 100644 --- a/src/sentry/hybridcloud/rpc/pagination.py +++ b/src/sentry/hybridcloud/rpc/pagination.py @@ -46,7 +46,7 @@ def do_hybrid_cloud_pagination( cursor = get_cursor(self.encoded_cursor, cursor_cls) with sentry_sdk.start_span( op="hybrid_cloud.paginate.get_result", - description=description, + name=description, ) as span: annotate_span_with_pagination_args(span, self.per_page) paginator = get_paginator( diff --git a/src/sentry/hybridcloud/rpc/service.py b/src/sentry/hybridcloud/rpc/service.py index 3b85e7f332c28..d630c0ca9862e 100644 --- a/src/sentry/hybridcloud/rpc/service.py +++ b/src/sentry/hybridcloud/rpc/service.py @@ -586,7 +586,7 @@ def _open_request_context(self) -> Generator[None]: timer = metrics.timer("hybrid_cloud.dispatch_rpc.duration", tags=self._metrics_tags()) span = sentry_sdk.start_span( op="hybrid_cloud.dispatch_rpc", - description=f"rpc to {self.service_name}.{self.method_name}", + name=f"rpc to {self.service_name}.{self.method_name}", ) with span, timer: yield diff --git a/src/sentry/identity/bitbucket/provider.py b/src/sentry/identity/bitbucket/provider.py index 608c410fa8d67..ff01318f8c701 100644 --- a/src/sentry/identity/bitbucket/provider.py +++ b/src/sentry/identity/bitbucket/provider.py @@ -18,10 +18,21 @@ def get_pipeline_views(self): class BitbucketLoginView(PipelineView): def dispatch(self, request: Request, pipeline) -> HttpResponse: - jwt = request.GET.get("jwt") - if jwt is None: - return self.redirect( - "https://bitbucket.org/site/addons/authorize?descriptor_uri=%s" - % (absolute_uri("/extensions/bitbucket/descriptor/"),) - ) - return pipeline.next_step() + from sentry.integrations.base import IntegrationDomain + from sentry.integrations.utils.metrics import ( + IntegrationPipelineViewEvent, + IntegrationPipelineViewType, + ) + + with IntegrationPipelineViewEvent( + IntegrationPipelineViewType.IDENTITY_LINK, + IntegrationDomain.SOURCE_CODE_MANAGEMENT, + pipeline.provider.key, + ).capture(): + jwt = request.GET.get("jwt") + if jwt is None: + return self.redirect( + "https://bitbucket.org/site/addons/authorize?descriptor_uri=%s" + % (absolute_uri("/extensions/bitbucket/descriptor/"),) + ) + return pipeline.next_step() diff --git a/src/sentry/identity/pipeline.py b/src/sentry/identity/pipeline.py index c4c577ed32295..ae651bba12ac4 100644 --- a/src/sentry/identity/pipeline.py +++ b/src/sentry/identity/pipeline.py @@ -5,7 +5,12 @@ from django.urls import reverse from django.utils.translation import gettext_lazy as _ -from sentry import features +from sentry import features, options +from sentry.integrations.base import IntegrationDomain +from sentry.integrations.utils.metrics import ( + IntegrationPipelineViewEvent, + IntegrationPipelineViewType, +) from sentry.models.organization import Organization from sentry.organizations.services.organization.model import RpcOrganization from sentry.pipeline import Pipeline, PipelineProvider @@ -46,41 +51,49 @@ def get_provider(self, provider_key: str, **kwargs) -> PipelineProvider: "organizations:migrate-azure-devops-integration", organization ): provider_key = "vsts_new" + # TODO(iamrajjoshi): Delete this after Azure DevOps migration is complete + if provider_key == "vsts_login" and options.get("vsts.social-auth-migration"): + provider_key = "vsts_login_new" return super().get_provider(provider_key) def finish_pipeline(self): - # NOTE: only reached in the case of linking a new identity - # via Social Auth pipelines - identity = self.provider.build_identity(self.state.data) - - Identity.objects.link_identity( - user=self.request.user, - idp=self.provider_model, - external_id=identity["id"], - should_reattach=False, - defaults={ - "scopes": identity.get("scopes", []), - "data": identity.get("data", {}), - }, - ) - - messages.add_message( - self.request, - messages.SUCCESS, - IDENTITY_LINKED.format(identity_provider=self.provider.name), - ) - metrics.incr( - "identity_provider_pipeline.finish_pipeline", - tags={ - "provider": self.provider.key, - }, - skip_internal=False, - ) - - self.state.clear() - - # TODO(epurkhiser): When we have more identities and have built out an - # identity management page that supports these new identities (not - # social-auth ones), redirect to the identities page. - return HttpResponseRedirect(reverse("sentry-account-settings")) + with IntegrationPipelineViewEvent( + IntegrationPipelineViewType.IDENTITY_LINK, + IntegrationDomain.IDENTITY, + self.provider.key, + ).capture(): + # NOTE: only reached in the case of linking a new identity + # via Social Auth pipelines + identity = self.provider.build_identity(self.state.data) + + Identity.objects.link_identity( + user=self.request.user, + idp=self.provider_model, + external_id=identity["id"], + should_reattach=False, + defaults={ + "scopes": identity.get("scopes", []), + "data": identity.get("data", {}), + }, + ) + + messages.add_message( + self.request, + messages.SUCCESS, + IDENTITY_LINKED.format(identity_provider=self.provider.name), + ) + metrics.incr( + "identity_provider_pipeline.finish_pipeline", + tags={ + "provider": self.provider.key, + }, + skip_internal=False, + ) + + self.state.clear() + + # TODO(epurkhiser): When we have more identities and have built out an + # identity management page that supports these new identities (not + # social-auth ones), redirect to the identities page. + return HttpResponseRedirect(reverse("sentry-account-settings")) diff --git a/src/sentry/incidents/action_handlers.py b/src/sentry/incidents/action_handlers.py index 30b6260a3a5a1..190ffdaebea09 100644 --- a/src/sentry/incidents/action_handlers.py +++ b/src/sentry/incidents/action_handlers.py @@ -370,7 +370,7 @@ def generate_incident_trigger_email_context( threshold: None | str | float = None if alert_rule.detection_type == AlertRuleDetectionType.DYNAMIC: threshold_prefix_string = alert_rule.detection_type.title() - threshold = f"({alert_rule.sensitivity} sensitivity)" + threshold = f"({alert_rule.sensitivity} responsiveness)" alert_link_params["type"] = "anomaly_detection" else: threshold_prefix_string = ">" if show_greater_than_string else "<" diff --git a/src/sentry/incidents/endpoints/organization_alert_rule_anomalies.py b/src/sentry/incidents/endpoints/organization_alert_rule_anomalies.py index 57f754ba10273..f3d48c9d779ae 100644 --- a/src/sentry/incidents/endpoints/organization_alert_rule_anomalies.py +++ b/src/sentry/incidents/endpoints/organization_alert_rule_anomalies.py @@ -52,7 +52,9 @@ def get(self, request: Request, organization: Organization, alert_rule: AlertRul """ Return a list of anomalies for a metric alert rule. """ - if not features.has("organizations:anomaly-detection-alerts", organization): + if not features.has( + "organizations:anomaly-detection-alerts", organization + ) and not features.has("organizations:anomaly-detection-rollout", organization): raise ResourceDoesNotExist("Your organization does not have access to this feature.") # NOTE: this will break if we ever do more than one project per alert rule diff --git a/src/sentry/incidents/endpoints/organization_alert_rule_index.py b/src/sentry/incidents/endpoints/organization_alert_rule_index.py index 91178a4eeed08..e7bae6fd35570 100644 --- a/src/sentry/incidents/endpoints/organization_alert_rule_index.py +++ b/src/sentry/incidents/endpoints/organization_alert_rule_index.py @@ -54,6 +54,7 @@ from sentry.sentry_apps.services.app import app_service from sentry.signals import alert_rule_created from sentry.snuba.dataset import Dataset +from sentry.snuba.models import SnubaQuery from sentry.uptime.models import ( ProjectUptimeSubscription, ProjectUptimeSubscriptionMode, @@ -121,6 +122,13 @@ def create_metric_alert( if not serializer.is_valid(): raise ValidationError(serializer.errors) + # if there are no triggers, then the serializer will raise an error + for trigger in data["triggers"]: + if not trigger.get("actions", []): + raise ValidationError( + "Each trigger must have an associated action for this alert to fire." + ) + trigger_sentry_app_action_creators_for_incidents(serializer.validated_data) if get_slack_actions_with_async_lookups(organization, request.user, request.data): # need to kick off an async job for Slack @@ -152,9 +160,16 @@ def create_metric_alert( is_api_token=request.auth is not None, duplicate_rule=duplicate_rule, wizard_v3=wizard_v3, + query_type=self.get_query_type_description(data.get("queryType", None)), ) return Response(serialize(alert_rule, request.user), status=status.HTTP_201_CREATED) + def get_query_type_description(self, value): + try: + return SnubaQuery.Type(value).name + except ValueError: + return "Unknown" + @region_silo_endpoint class OrganizationCombinedRuleIndexEndpoint(OrganizationEndpoint): @@ -217,9 +232,6 @@ def get(self, request: Request, organization) -> Response: ), ) - if not features.has("organizations:uptime-rule-api", organization): - uptime_rules = ProjectUptimeSubscription.objects.none() - if not features.has("organizations:performance-view", organization): # Filter to only error alert rules alert_rules = alert_rules.filter(snuba_query__dataset=Dataset.Events.value) diff --git a/src/sentry/incidents/grouptype.py b/src/sentry/incidents/grouptype.py new file mode 100644 index 0000000000000..7a7d0dc6900d5 --- /dev/null +++ b/src/sentry/incidents/grouptype.py @@ -0,0 +1,32 @@ +from dataclasses import dataclass + +from sentry.incidents.utils.types import QuerySubscriptionUpdate +from sentry.issues.grouptype import GroupCategory, GroupType +from sentry.ratelimits.sliding_windows import Quota +from sentry.types.group import PriorityLevel +from sentry.workflow_engine.models import DataPacket +from sentry.workflow_engine.models.detector import DetectorEvaluationResult, DetectorHandler + + +# TODO: This will be a stateful detector when we build that abstraction +class MetricAlertDetectorHandler(DetectorHandler[QuerySubscriptionUpdate]): + def evaluate( + self, data_packet: DataPacket[QuerySubscriptionUpdate] + ) -> list[DetectorEvaluationResult]: + # TODO: Implement + return [] + + +# Example GroupType and detector handler for metric alerts. We don't create these issues yet, but we'll use something +# like these when we're sending issues as alerts +@dataclass(frozen=True) +class MetricAlertFire(GroupType): + type_id = 8001 + slug = "metric_alert_fire" + description = "Metric alert fired" + category = GroupCategory.METRIC_ALERT.value + creation_quota = Quota(3600, 60, 100) + default_priority = PriorityLevel.HIGH + enable_auto_resolve = False + enable_escalation_detection = False + detector_handler = MetricAlertDetectorHandler diff --git a/src/sentry/incidents/logic.py b/src/sentry/incidents/logic.py index f09b8fbdb1145..35b68b4342243 100644 --- a/src/sentry/incidents/logic.py +++ b/src/sentry/incidents/logic.py @@ -15,15 +15,14 @@ from django.db.models.signals import post_save from django.forms import ValidationError from django.utils import timezone as django_timezone -from parsimonious.exceptions import ParseError from snuba_sdk import Column, Condition, Limit, Op -from urllib3.exceptions import MaxRetryError, TimeoutError from sentry import analytics, audit_log, features, quotas from sentry.api.exceptions import ResourceDoesNotExist from sentry.auth.access import SystemAccess from sentry.constants import CRASH_RATE_ALERT_AGGREGATE_ALIAS, ObjectStatus from sentry.db.models import Model +from sentry.deletions.models.scheduleddeletion import RegionScheduledDeletion from sentry.incidents import tasks from sentry.incidents.models.alert_rule import ( AlertRule, @@ -62,7 +61,6 @@ from sentry.models.notificationaction import ActionService, ActionTarget from sentry.models.organization import Organization from sentry.models.project import Project -from sentry.models.scheduledeletion import RegionScheduledDeletion from sentry.relay.config.metric_extraction import on_demand_metrics_feature_flags from sentry.search.events.builder.base import BaseQueryBuilder from sentry.search.events.constants import ( @@ -71,7 +69,7 @@ ) from sentry.search.events.fields import is_function, resolve_field from sentry.seer.anomaly_detection.delete_rule import delete_rule_in_seer -from sentry.seer.anomaly_detection.store_data import send_historical_data_to_seer +from sentry.seer.anomaly_detection.store_data import send_new_rule_data, update_rule_data from sentry.sentry_apps.services.app import RpcSentryAppInstallation, app_service from sentry.shared_integrations.exceptions import ( ApiTimeoutError, @@ -481,6 +479,7 @@ class AlertRuleNameAlreadyUsedError(Exception): Dataset.Transactions: SnubaQuery.Type.PERFORMANCE, Dataset.PerformanceMetrics: SnubaQuery.Type.PERFORMANCE, Dataset.Metrics: SnubaQuery.Type.CRASH_RATE, + Dataset.EventsAnalyticsPlatform: SnubaQuery.Type.PERFORMANCE, } @@ -566,22 +565,28 @@ def create_alert_rule( :return: The created `AlertRule` """ + has_anomaly_detection = features.has( + "organizations:anomaly-detection-alerts", organization + ) and features.has("organizations:anomaly-detection-rollout", organization) + + if detection_type == AlertRuleDetectionType.DYNAMIC.value and not has_anomaly_detection: + raise ResourceDoesNotExist("Your organization does not have access to this feature.") + if monitor_type == AlertRuleMonitorTypeInt.ACTIVATED and not activation_condition: raise ValidationError("Activation condition required for activated alert rule") - if detection_type == AlertRuleDetectionType.DYNAMIC: - resolution = time_window - else: - resolution = get_alert_resolution(time_window, organization) if detection_type == AlertRuleDetectionType.DYNAMIC: + resolution = time_window # NOTE: we hardcode seasonality for EA seasonality = AlertRuleSeasonality.AUTO - if not (sensitivity): + if not sensitivity: raise ValidationError("Dynamic alerts require a sensitivity level") if time_window not in DYNAMIC_TIME_WINDOWS: raise ValidationError(INVALID_TIME_WINDOW) + if "is:unresolved" in query: + raise ValidationError("Dynamic alerts do not support 'is:unresolved' queries") else: - # NOTE: we hardcode seasonality for EA + resolution = get_alert_resolution(time_window, organization) seasonality = None if sensitivity: raise ValidationError("Sensitivity is not a valid field for this alert type") @@ -652,31 +657,8 @@ def create_alert_rule( AlertRuleExcludedProjects.objects.bulk_create(exclusions) if alert_rule.detection_type == AlertRuleDetectionType.DYNAMIC.value: - if not features.has("organizations:anomaly-detection-alerts", organization): - alert_rule.delete() - raise ResourceDoesNotExist( - "Your organization does not have access to this feature." - ) - - try: - # NOTE: if adding a new metric alert type, take care to check that it's handled here - rule_status = send_historical_data_to_seer( - alert_rule=alert_rule, project=projects[0] - ) - if rule_status == AlertRuleStatus.NOT_ENOUGH_DATA: - # if we don't have at least seven days worth of data, then the dynamic alert won't fire - alert_rule.update(status=AlertRuleStatus.NOT_ENOUGH_DATA.value) - except (TimeoutError, MaxRetryError): - alert_rule.delete() - raise TimeoutError("Failed to send data to Seer - cannot create alert rule.") - except ParseError: - alert_rule.delete() - raise ParseError("Failed to parse Seer store data response") - except (ValidationError, Exception): - alert_rule.delete() - raise - else: - metrics.incr("anomaly_detection_alert.created") + # NOTE: if adding a new metric alert type, take care to check that it's handled here + send_new_rule_data(alert_rule, projects[0], snuba_query) if user: create_audit_entry_from_user( @@ -932,35 +914,17 @@ def update_alert_rule( updated_fields["team_id"] = alert_rule.team_id if detection_type == AlertRuleDetectionType.DYNAMIC: - if not features.has("organizations:anomaly-detection-alerts", organization): + if not features.has( + "organizations:anomaly-detection-alerts", organization + ) and not features.has("organizations:anomaly-detection-rollout", organization): raise ResourceDoesNotExist( "Your organization does not have access to this feature." ) - - if updated_fields.get("detection_type") == AlertRuleDetectionType.DYNAMIC and ( - alert_rule.detection_type != AlertRuleDetectionType.DYNAMIC or query or aggregate - ): - for k, v in updated_fields.items(): - setattr(alert_rule, k, v) - - try: - # NOTE: if adding a new metric alert type, take care to check that it's handled here - rule_status = send_historical_data_to_seer( - alert_rule=alert_rule, - project=projects[0] if projects else alert_rule.projects.get(), - ) - if rule_status == AlertRuleStatus.NOT_ENOUGH_DATA: - # if we don't have at least seven days worth of data, then the dynamic alert won't fire - alert_rule.update(status=AlertRuleStatus.NOT_ENOUGH_DATA.value) - except (TimeoutError, MaxRetryError): - raise TimeoutError("Failed to send data to Seer - cannot update alert rule.") - except ParseError: - raise ParseError( - "Failed to parse Seer store data response - cannot update alert rule." - ) - except (ValidationError, Exception): - # If there's no historical data available—something went wrong when querying snuba - raise ValidationError("Failed to send data to Seer - cannot update alert rule.") + if query and "is:unresolved" in query: + raise ValidationError("Dynamic alerts do not support 'is:unresolved' queries") + # NOTE: if adding a new metric alert type, take care to check that it's handled here + project = projects[0] if projects else alert_rule.projects.get() + update_rule_data(alert_rule, project, snuba_query, updated_fields, updated_query_fields) else: # if this was a dynamic rule, delete the data in Seer if alert_rule.detection_type == AlertRuleDetectionType.DYNAMIC: @@ -994,7 +958,15 @@ def update_alert_rule( "time_window", timedelta(seconds=snuba_query.time_window) ) updated_query_fields.setdefault("event_types", None) - updated_query_fields.setdefault("resolution", timedelta(seconds=snuba_query.resolution)) + if ( + detection_type == AlertRuleDetectionType.DYNAMIC + and alert_rule.detection_type == AlertRuleDetectionType.DYNAMIC + ): + updated_query_fields.setdefault("resolution", snuba_query.resolution) + else: + updated_query_fields.setdefault( + "resolution", timedelta(seconds=snuba_query.resolution) + ) update_snuba_query(snuba_query, environment=environment, **updated_query_fields) existing_subs: Iterable[QuerySubscription] = () @@ -1136,6 +1108,18 @@ def delete_alert_rule( incidents = Incident.objects.filter(alert_rule=alert_rule) if incidents.exists(): + # if this was a dynamic rule, delete the data in Seer + if alert_rule.detection_type == AlertRuleDetectionType.DYNAMIC: + success = delete_rule_in_seer( + alert_rule=alert_rule, + ) + if not success: + logger.error( + "Call to delete rule data in Seer failed", + extra={ + "rule_id": alert_rule.id, + }, + ) AlertRuleActivity.objects.create( alert_rule=alert_rule, user_id=user.id if user else None, @@ -1658,7 +1642,9 @@ def _get_alert_rule_trigger_action_slack_channel_id( except StopIteration: integration = None else: - integration = integration_service.get_integration(integration_id=integration_id) + integration = integration_service.get_integration( + integration_id=integration_id, status=ObjectStatus.ACTIVE + ) if integration is None: raise InvalidTriggerActionError("Slack workspace is a required field.") @@ -1689,7 +1675,9 @@ def _get_alert_rule_trigger_action_slack_channel_id( def _get_alert_rule_trigger_action_discord_channel_id(name: str, integration_id: int) -> str | None: from sentry.integrations.discord.utils.channel import validate_channel_id - integration = integration_service.get_integration(integration_id=integration_id) + integration = integration_service.get_integration( + integration_id=integration_id, status=ObjectStatus.ACTIVE + ) if integration is None: raise InvalidTriggerActionError("Discord integration not found.") try: @@ -1858,6 +1846,22 @@ def get_opsgenie_teams(organization_id: int, integration_id: int) -> list[tuple[ "measurements.score.total", ], } +EAP_COLUMNS = [ + "span.duration", + "span.self_time", +] +EAP_FUNCTIONS = [ + "count", + "avg", + "p50", + "p75", + "p90", + "p95", + "p99", + "p100", + "max", + "min", +] def get_column_from_aggregate(aggregate: str, allow_mri: bool) -> str | None: @@ -1870,6 +1874,11 @@ def get_column_from_aggregate(aggregate: str, allow_mri: bool) -> str | None: or match.group("function") in METRICS_LAYER_UNSUPPORTED_TRANSACTION_METRICS_FUNCTIONS ): return None if match.group("columns") == "" else match.group("columns") + + # Skip additional validation for EAP queries. They don't exist in the old logic. + if match and match.group("function") in EAP_FUNCTIONS and match.group("columns") in EAP_COLUMNS: + return match.group("columns") + if allow_mri: mri_column = _get_column_from_aggregate_with_mri(aggregate) # Only if the column was allowed, we return it, otherwise we fallback to the old logic. @@ -1902,7 +1911,9 @@ def _get_column_from_aggregate_with_mri(aggregate: str) -> str | None: return columns -def check_aggregate_column_support(aggregate: str, allow_mri: bool = False) -> bool: +def check_aggregate_column_support( + aggregate: str, allow_mri: bool = False, allow_eap: bool = False +) -> bool: # TODO(ddm): remove `allow_mri` once the experimental feature flag is removed. column = get_column_from_aggregate(aggregate, allow_mri) match = is_function(aggregate) @@ -1917,6 +1928,7 @@ def check_aggregate_column_support(aggregate: str, allow_mri: bool = False) -> b isinstance(function, str) and column in INSIGHTS_FUNCTION_VALID_ARGS_MAP.get(function, []) ) + or (column in EAP_COLUMNS and allow_eap) ) diff --git a/src/sentry/incidents/serializers/__init__.py b/src/sentry/incidents/serializers/__init__.py index 061c29461acd3..58a4bd86171ef 100644 --- a/src/sentry/incidents/serializers/__init__.py +++ b/src/sentry/incidents/serializers/__init__.py @@ -26,7 +26,11 @@ } QUERY_TYPE_VALID_DATASETS = { SnubaQuery.Type.ERROR: {Dataset.Events}, - SnubaQuery.Type.PERFORMANCE: {Dataset.Transactions, Dataset.PerformanceMetrics}, + SnubaQuery.Type.PERFORMANCE: { + Dataset.Transactions, + Dataset.PerformanceMetrics, + Dataset.EventsAnalyticsPlatform, + }, SnubaQuery.Type.CRASH_RATE: {Dataset.Metrics}, } diff --git a/src/sentry/incidents/serializers/alert_rule.py b/src/sentry/incidents/serializers/alert_rule.py index f68911f9f0851..756d74ef08c1d 100644 --- a/src/sentry/incidents/serializers/alert_rule.py +++ b/src/sentry/incidents/serializers/alert_rule.py @@ -165,11 +165,17 @@ def validate_aggregate(self, aggregate): self.context["organization"], actor=self.context.get("user", None), ) + allow_eap = features.has( + "organizations:alerts-eap", + self.context["organization"], + actor=self.context.get("user", None), + ) try: if not check_aggregate_column_support( aggregate, allow_mri=allow_mri, + allow_eap=allow_eap, ): raise serializers.ValidationError( "Invalid Metric: We do not currently support this field." diff --git a/src/sentry/incidents/subscription_processor.py b/src/sentry/incidents/subscription_processor.py index ce77e69f07bb8..26d97f6b821e1 100644 --- a/src/sentry/incidents/subscription_processor.py +++ b/src/sentry/incidents/subscription_processor.py @@ -12,11 +12,8 @@ from django.utils import timezone from sentry_redis_tools.retrying_cluster import RetryingRedisCluster from snuba_sdk import Column, Condition, Limit, Op -from urllib3.exceptions import MaxRetryError, TimeoutError from sentry import features -from sentry.conf.server import SEER_ANOMALY_DETECTION_ENDPOINT_URL -from sentry.constants import CRASH_RATE_ALERT_AGGREGATE_ALIAS, CRASH_RATE_ALERT_SESSION_COUNT_ALIAS from sentry.incidents.logic import ( CRITICAL_TRIGGER_LABEL, WARNING_TRIGGER_LABEL, @@ -47,29 +44,18 @@ from sentry.incidents.tasks import handle_trigger_action from sentry.incidents.utils.types import QuerySubscriptionUpdate from sentry.models.project import Project -from sentry.net.http import connection_from_url -from sentry.seer.anomaly_detection.types import ( - AlertInSeer, - AnomalyDetectionConfig, - AnomalyType, - DetectAnomaliesRequest, - DetectAnomaliesResponse, - TimeSeriesPoint, -) -from sentry.seer.anomaly_detection.utils import translate_direction -from sentry.seer.signed_seer_api import make_signed_seer_api_request +from sentry.seer.anomaly_detection.get_anomaly_data import get_anomaly_data_from_seer +from sentry.seer.anomaly_detection.utils import anomaly_has_confidence, has_anomaly from sentry.snuba.dataset import Dataset from sentry.snuba.entity_subscription import ( ENTITY_TIME_COLUMNS, - BaseCrashRateMetricsEntitySubscription, get_entity_key_from_query_builder, get_entity_subscription_from_snuba_query, ) from sentry.snuba.models import QuerySubscription from sentry.snuba.subscriptions import delete_snuba_subscription -from sentry.utils import json, metrics, redis +from sentry.utils import metrics, redis from sentry.utils.dates import to_datetime -from sentry.utils.json import JSONDecodeError logger = logging.getLogger(__name__) REDIS_TTL = int(timedelta(days=7).total_seconds()) @@ -106,11 +92,6 @@ class SubscriptionProcessor: AlertRuleThresholdType.BELOW: (operator.lt, operator.gt), } - seer_anomaly_detection_connection_pool = connection_from_url( - settings.SEER_ANOMALY_DETECTION_URL, - timeout=settings.SEER_ANOMALY_DETECTION_TIMEOUT, - ) - def __init__(self, subscription: QuerySubscription) -> None: self.subscription = subscription try: @@ -282,132 +263,8 @@ def get_comparison_aggregation_value( result: float = (aggregation_value / comparison_aggregate) * 100 return result - def get_crash_rate_alert_aggregation_value( - self, subscription_update: QuerySubscriptionUpdate - ) -> float | None: - """ - Handles validation and extraction of Crash Rate Alerts subscription updates values. - The subscription update looks like - { - '_crash_rate_alert_aggregate': 0.5, - '_total_count': 34 - } - - `_crash_rate_alert_aggregate` represents sessions_crashed/sessions or - users_crashed/users, and so we need to subtract that number from 1 and then multiply by - 100 to get the crash free percentage - - `_total_count` represents the total sessions or user counts. This is used when - CRASH_RATE_ALERT_MINIMUM_THRESHOLD is set in the sense that if the minimum threshold is - greater than the session count, then the update is dropped. If the minimum threshold is - not set then the total sessions count is just ignored - """ - aggregation_value = subscription_update["values"]["data"][0][ - CRASH_RATE_ALERT_AGGREGATE_ALIAS - ] - if aggregation_value is None: - self.reset_trigger_counts() - metrics.incr("incidents.alert_rules.ignore_update_no_session_data") - return None - - try: - total_count = subscription_update["values"]["data"][0][ - CRASH_RATE_ALERT_SESSION_COUNT_ALIAS - ] - if CRASH_RATE_ALERT_MINIMUM_THRESHOLD is not None: - min_threshold = int(CRASH_RATE_ALERT_MINIMUM_THRESHOLD) - if total_count < min_threshold: - self.reset_trigger_counts() - metrics.incr( - "incidents.alert_rules.ignore_update_count_lower_than_min_threshold" - ) - return None - except KeyError: - # If for whatever reason total session count was not sent in the update, - # ignore the minimum threshold comparison and continue along with processing the - # update. However, this should not happen. - logger.exception( - "Received an update for a crash rate alert subscription, but no total " - "sessions count was sent" - ) - # The subscription aggregation for crash rate alerts uses the Discover percentage - # function, which would technically return a ratio of sessions_crashed/sessions and - # so we need to calculate the crash free percentage out of that returned value - aggregation_value_result: int = round((1 - aggregation_value) * 100, 3) - return aggregation_value_result - def get_crash_rate_alert_metrics_aggregation_value( self, subscription_update: QuerySubscriptionUpdate - ) -> float | None: - """ - Handle both update formats. - Once all subscriptions have been updated to v2, - we can remove v1 and replace this function with current v2. - """ - rows = subscription_update["values"]["data"] - if BaseCrashRateMetricsEntitySubscription.is_crash_rate_format_v2(rows): - version = "v2" - result = self._get_crash_rate_alert_metrics_aggregation_value_v2(subscription_update) - else: - version = "v1" - result = self._get_crash_rate_alert_metrics_aggregation_value_v1(subscription_update) - - metrics.incr( - "incidents.alert_rules.get_crash_rate_alert_metrics_aggregation_value", - tags={"format": version}, - sample_rate=1.0, - ) - return result - - def _get_crash_rate_alert_metrics_aggregation_value_v1( - self, subscription_update: QuerySubscriptionUpdate - ) -> float | None: - """ - Handles validation and extraction of Crash Rate Alerts subscription updates values over - metrics dataset. - The subscription update looks like - [ - {'project_id': 8, 'tags[5]': 6, 'value': 2.0}, - {'project_id': 8, 'tags[5]': 13,'value': 1.0} - ] - where each entry represents a session status and the count of that specific session status. - As an example, `tags[5]` represents string `session.status`, while `tags[5]: 6` could - mean something like there are 2 sessions of status `crashed`. Likewise the other entry - represents the number of sessions started. In this method, we need to reverse match these - strings to end up with something that looks like - {"init": 2, "crashed": 4} - - `init` represents sessions or users sessions that were started, hence to get the crash - free percentage, we would need to divide number of crashed sessions by that number, - and subtract that value from 1. This is also used when CRASH_RATE_ALERT_MINIMUM_THRESHOLD is - set in the sense that if the minimum threshold is greater than the session count, - then the update is dropped. If the minimum threshold is not set then the total sessions - count is just ignored - - `crashed` represents the total sessions or user counts that crashed. - """ - ( - total_session_count, - crash_count, - ) = BaseCrashRateMetricsEntitySubscription.translate_sessions_tag_keys_and_values( - data=subscription_update["values"]["data"], - org_id=self.subscription.project.organization.id, - ) - - if total_session_count == 0: - self.reset_trigger_counts() - metrics.incr("incidents.alert_rules.ignore_update_no_session_data") - return None - - if CRASH_RATE_ALERT_MINIMUM_THRESHOLD is not None: - min_threshold = int(CRASH_RATE_ALERT_MINIMUM_THRESHOLD) - if total_session_count < min_threshold: - self.reset_trigger_counts() - metrics.incr("incidents.alert_rules.ignore_update_count_lower_than_min_threshold") - return None - - aggregation_value = round((1 - crash_count / total_session_count) * 100, 3) - - return aggregation_value - - def _get_crash_rate_alert_metrics_aggregation_value_v2( - self, subscription_update: QuerySubscriptionUpdate ) -> float | None: """ Handles validation and extraction of Crash Rate Alerts subscription updates values over @@ -425,8 +282,8 @@ def _get_crash_rate_alert_metrics_aggregation_value_v2( - `crashed` represents the total sessions or user counts that crashed. """ row = subscription_update["values"]["data"][0] - total_session_count = row["count"] - crash_count = row["crashed"] + total_session_count = row.get("count", 0) + crash_count = row.get("crashed", 0) if total_session_count == 0: self.reset_trigger_counts() @@ -530,20 +387,34 @@ def process_update(self, subscription_update: QuerySubscriptionUpdate) -> None: }, ) - self.has_anomaly_detection = features.has( + has_anomaly_detection = features.has( "organizations:anomaly-detection-alerts", self.subscription.project.organization - ) - has_fake_anomalies = features.has( - "organizations:fake-anomaly-detection", self.subscription.project.organization + ) and features.has( + "organizations:anomaly-detection-rollout", self.subscription.project.organization ) potential_anomalies = None if ( - self.has_anomaly_detection + has_anomaly_detection and self.alert_rule.detection_type == AlertRuleDetectionType.DYNAMIC ): - potential_anomalies = self.get_anomaly_data_from_seer(aggregation_value) + potential_anomalies = get_anomaly_data_from_seer( + alert_rule=self.alert_rule, + subscription=self.subscription, + last_update=self.last_update.timestamp(), + aggregation_value=aggregation_value, + ) if potential_anomalies is None: + logger.info( + "No potential anomalies found", + extra={ + "subscription_id": self.subscription.id, + "dataset": self.alert_rule.snuba_query.dataset, + "organization_id": self.subscription.project.organization.id, + "project_id": self.subscription.project_id, + "alert_rule_id": self.alert_rule.id, + }, + ) return [] # Trigger callbacks for any AlertRules that may need to know about the subscription update @@ -576,7 +447,7 @@ def process_update(self, subscription_update: QuerySubscriptionUpdate) -> None: for potential_anomaly in potential_anomalies: # check to see if we have enough data for the dynamic alert rule now if self.alert_rule.status == AlertRuleStatus.NOT_ENOUGH_DATA.value: - if self.anomaly_has_confidence(potential_anomaly): + if anomaly_has_confidence(potential_anomaly): # NOTE: this means "enabled," and it's the default alert rule status. # TODO: change these status labels to be less confusing self.alert_rule.status = AlertRuleStatus.PENDING.value @@ -585,8 +456,8 @@ def process_update(self, subscription_update: QuerySubscriptionUpdate) -> None: # we don't need to check if the alert should fire if the alert can't fire yet continue - if self.has_anomaly( - potential_anomaly, trigger.label, has_fake_anomalies + if has_anomaly( + potential_anomaly, trigger.label ) and not self.check_trigger_matches_status(trigger, TriggerStatus.ACTIVE): metrics.incr( "incidents.alert_rules.threshold.alert", @@ -601,9 +472,7 @@ def process_update(self, subscription_update: QuerySubscriptionUpdate) -> None: self.trigger_alert_counts[trigger.id] = 0 if ( - not self.has_anomaly( - potential_anomaly, trigger.label, has_fake_anomalies - ) + not has_anomaly(potential_anomaly, trigger.label) and self.active_incident and self.check_trigger_matches_status(trigger, TriggerStatus.ACTIVE) ): @@ -670,130 +539,6 @@ def process_update(self, subscription_update: QuerySubscriptionUpdate) -> None: # before the next one then we might alert twice. self.update_alert_rule_stats() - def has_anomaly(self, anomaly: TimeSeriesPoint, label: str, has_fake_anomalies: bool) -> bool: - """ - Helper function to determine whether we care about an anomaly based on the - anomaly type and trigger type. - """ - if has_fake_anomalies: - return True - - anomaly_type = anomaly.get("anomaly", {}).get("anomaly_type") - - if anomaly_type == AnomalyType.HIGH_CONFIDENCE.value or ( - label == WARNING_TRIGGER_LABEL and anomaly_type == AnomalyType.LOW_CONFIDENCE.value - ): - return True - return False - - def anomaly_has_confidence(self, anomaly: TimeSeriesPoint) -> bool: - """ - Helper function to determine whether we have the 7+ days of data necessary - to detect anomalies/send alerts for dynamic alert rules. - """ - anomaly_type = anomaly.get("anomaly", {}).get("anomaly_type") - return anomaly_type != AnomalyType.NO_DATA.value - - def get_anomaly_data_from_seer( - self, aggregation_value: float | None - ) -> list[TimeSeriesPoint] | None: - anomaly_detection_config = AnomalyDetectionConfig( - time_period=int(self.alert_rule.snuba_query.time_window / 60), - sensitivity=self.alert_rule.sensitivity, - direction=translate_direction(self.alert_rule.threshold_type), - expected_seasonality=self.alert_rule.seasonality, - ) - context = AlertInSeer( - id=self.alert_rule.id, - cur_window=TimeSeriesPoint( - timestamp=self.last_update.timestamp(), value=aggregation_value - ), - ) - detect_anomalies_request = DetectAnomaliesRequest( - organization_id=self.subscription.project.organization.id, - project_id=self.subscription.project_id, - config=anomaly_detection_config, - context=context, - ) - extra_data = { - "subscription_id": self.subscription.id, - "dataset": self.subscription.snuba_query.dataset, - "organization_id": self.subscription.project.organization.id, - "project_id": self.subscription.project_id, - "alert_rule_id": self.alert_rule.id, - } - try: - response = make_signed_seer_api_request( - self.seer_anomaly_detection_connection_pool, - SEER_ANOMALY_DETECTION_ENDPOINT_URL, - json.dumps(detect_anomalies_request).encode("utf-8"), - ) - except (TimeoutError, MaxRetryError): - logger.warning( - "Timeout error when hitting anomaly detection endpoint", extra=extra_data - ) - return None - - if response.status > 400: - logger.error( - "Error when hitting Seer detect anomalies endpoint", - extra={ - "response_data": response.data, - **extra_data, - }, - ) - return None - try: - decoded_data = response.data.decode("utf-8") - except AttributeError: - logger.exception( - "Failed to parse Seer anomaly detection response", - extra={ - "ad_config": anomaly_detection_config, - "context": context, - "response_data": response.data, - "response_code": response.status, - }, - ) - return None - - try: - results: DetectAnomaliesResponse = json.loads(decoded_data) - except JSONDecodeError: - logger.exception( - "Failed to parse Seer anomaly detection response", - extra={ - "ad_config": anomaly_detection_config, - "context": context, - "response_data": decoded_data, - "response_code": response.status, - }, - ) - return None - - if not results.get("success"): - logger.error( - "Error when hitting Seer detect anomalies endpoint", - extra={ - "error_message": results.get("message", ""), - **extra_data, - }, - ) - return None - - ts = results.get("timeseries") - if not ts: - logger.warning( - "Seer anomaly detection response returned no potential anomalies", - extra={ - "ad_config": anomaly_detection_config, - "context": context, - "response_data": results.get("message"), - }, - ) - return None - return ts - def calculate_event_date_from_update_date(self, update_date: datetime) -> datetime: """ Calculates the date that an event actually happened based on the date that we diff --git a/src/sentry/incidents/tasks.py b/src/sentry/incidents/tasks.py index e9e96cf97780e..5fe389e68082b 100644 --- a/src/sentry/incidents/tasks.py +++ b/src/sentry/incidents/tasks.py @@ -31,6 +31,7 @@ from sentry.snuba.models import QuerySubscription from sentry.snuba.query_subscriptions.consumer import register_subscriber from sentry.tasks.base import instrumented_task +from sentry.users.models.user import User from sentry.users.services.user import RpcUser from sentry.users.services.user.service import user_service from sentry.utils import metrics @@ -84,7 +85,7 @@ def send_subscriber_notifications(activity_id: int) -> None: def generate_incident_activity_email( - activity: IncidentActivity, user: RpcUser, activity_user: RpcUser | None = None + activity: IncidentActivity, user: RpcUser | User, activity_user: RpcUser | User | None = None ) -> MessageBuilder: incident = activity.incident return MessageBuilder( diff --git a/src/sentry/ingest/consumer/factory.py b/src/sentry/ingest/consumer/factory.py index 65c0436624368..9691f022592e4 100644 --- a/src/sentry/ingest/consumer/factory.py +++ b/src/sentry/ingest/consumer/factory.py @@ -159,3 +159,58 @@ def shutdown(self) -> None: self._pool.close() if self._attachments_pool: self._attachments_pool.close() + + +class IngestTransactionsStrategyFactory(ProcessingStrategyFactory[KafkaPayload]): + """ + Processes transactions in either celery or no-celery mode. + Transactions are either dispatched to `save_transaction_event` or stored directly in the + consumer depending on the mode. + """ + + def __init__( + self, + reprocess_only_stuck_events: bool, + stop_at_timestamp: int | None, + num_processes: int, + max_batch_size: int, + max_batch_time: int, + input_block_size: int | None, + output_block_size: int | None, + no_celery_mode: bool = False, + ): + self.consumer_type = ConsumerType.Transactions + self.reprocess_only_stuck_events = reprocess_only_stuck_events + self.stop_at_timestamp = stop_at_timestamp + + self.multi_process = None + self._pool = MultiprocessingPool(num_processes) + + if num_processes > 1: + self.multi_process = MultiProcessConfig( + num_processes, max_batch_size, max_batch_time, input_block_size, output_block_size + ) + + self.health_checker = HealthChecker("ingest") + self.no_celery_mode = no_celery_mode + + def create_with_partitions( + self, + commit: Commit, + partitions: Mapping[Partition, int], + ) -> ProcessingStrategy[KafkaPayload]: + mp = self.multi_process + + final_step = CommitOffsets(commit) + + event_function = partial( + process_simple_event_message, + consumer_type=self.consumer_type, + reprocess_only_stuck_events=self.reprocess_only_stuck_events, + no_celery_mode=self.no_celery_mode, + ) + next_step = maybe_multiprocess_step(mp, event_function, final_step, self._pool) + return create_backpressure_step(health_checker=self.health_checker, next_step=next_step) + + def shutdown(self) -> None: + self._pool.close() diff --git a/src/sentry/ingest/consumer/processors.py b/src/sentry/ingest/consumer/processors.py index 3f067f0d8dbfb..7ea9fbf72ce03 100644 --- a/src/sentry/ingest/consumer/processors.py +++ b/src/sentry/ingest/consumer/processors.py @@ -11,9 +11,9 @@ from sentry import eventstore, features from sentry.attachments import CachedAttachment, attachment_cache -from sentry.event_manager import save_attachment +from sentry.event_manager import EventManager, save_attachment from sentry.eventstore.processing import event_processing_store -from sentry.feedback.usecases.create_feedback import FeedbackCreationSource +from sentry.feedback.usecases.create_feedback import FeedbackCreationSource, is_in_feedback_denylist from sentry.ingest.userreport import Conflict, save_userreport from sentry.killswitches import killswitch_matches_context from sentry.models.project import Project @@ -23,6 +23,7 @@ from sentry.utils import metrics from sentry.utils.cache import cache_key_for_event from sentry.utils.dates import to_datetime +from sentry.utils.sdk import set_current_event_project from sentry.utils.snuba import RateLimitExceeded logger = logging.getLogger(__name__) @@ -53,10 +54,36 @@ def inner(*args, **kwargs): return wrapper +def process_transaction_no_celery( + data: MutableMapping[str, Any], project_id: int, start_time: float +) -> None: + + set_current_event_project(project_id) + + manager = EventManager(data) + # event.project.organization is populated after this statement. + manager.save( + project_id, + assume_normalized=True, + start_time=start_time, + ) + # Put the updated event back into the cache so that post_process + # has the most recent data. + data = manager.get_data() + if not isinstance(data, dict): + data = dict(data.items()) + + with sentry_sdk.start_span(op="event_processing_store.store"): + event_processing_store.store(data) + + @trace_func(name="ingest_consumer.process_event") @metrics.wraps("ingest_consumer.process_event") def process_event( - message: IngestMessage, project: Project, reprocess_only_stuck_events: bool = False + message: IngestMessage, + project: Project, + reprocess_only_stuck_events: bool = False, + no_celery_mode: bool = False, ) -> None: """ Perform some initial filtering and deserialize the message payload. @@ -88,37 +115,44 @@ def process_event( # This code has been ripped from the old python store endpoint. We're # keeping it around because it does provide some protection against # reprocessing good events if a single consumer is in a restart loop. - deduplication_key = f"ev:{project_id}:{event_id}" - - try: - cached_value = cache.get(deduplication_key) - except Exception as exc: - raise Retriable(exc) + with sentry_sdk.start_span(op="deduplication_check"): + deduplication_key = f"ev:{project_id}:{event_id}" - if cached_value is not None: - logger.warning( - "pre-process-forwarder detected a duplicated event" " with id:%s for project:%s.", - event_id, - project_id, - ) - return # message already processed do not reprocess + try: + cached_value = cache.get(deduplication_key) + except Exception as exc: + raise Retriable(exc) + + if cached_value is not None: + logger.warning( + "pre-process-forwarder detected a duplicated event" " with id:%s for project:%s.", + event_id, + project_id, + ) + return # message already processed do not reprocess - if killswitch_matches_context( - "store.load-shed-pipeline-projects", - { - "project_id": project_id, - "event_id": event_id, - "has_attachments": bool(attachments), - }, + with sentry_sdk.start_span( + op="killswitch_matches_context", name="store.load-shed-pipeline-projects" ): - # This killswitch is for the worst of scenarios and should probably not - # cause additional load on our logging infrastructure - return + if killswitch_matches_context( + "store.load-shed-pipeline-projects", + { + "project_id": project_id, + "event_id": event_id, + "has_attachments": bool(attachments), + }, + ): + # This killswitch is for the worst of scenarios and should probably not + # cause additional load on our logging infrastructure + return # Parse the JSON payload. This is required to compute the cache key and # call process_event. The payload will be put into Kafka raw, to avoid # serializing it again. - data = orjson.loads(payload) + with sentry_sdk.start_span(op="orjson.loads"): + data = orjson.loads(payload) + + sentry_sdk.set_extra("event_type", data.get("type")) if project_id == settings.SENTRY_PROJECT: metrics.incr( @@ -126,17 +160,20 @@ def process_event( tags={"event_type": data.get("type") or "null"}, ) - if killswitch_matches_context( - "store.load-shed-parsed-pipeline-projects", - { - "organization_id": project.organization_id, - "project_id": project.id, - "event_type": data.get("type") or "null", - "has_attachments": bool(attachments), - "event_id": event_id, - }, + with sentry_sdk.start_span( + op="killswitch_matches_context", name="store.load-shed-parsed-pipeline-projects" ): - return + if killswitch_matches_context( + "store.load-shed-parsed-pipeline-projects", + { + "organization_id": project.organization_id, + "project_id": project.id, + "event_type": data.get("type") or "null", + "has_attachments": bool(attachments), + "event_id": event_id, + }, + ): + return # Raise the retriable exception and skip DLQ if anything below this point fails as it may be caused by # intermittent network issue @@ -144,11 +181,19 @@ def process_event( # If we only want to reprocess "stuck" events, we check if this event is already in the # `processing_store`. We only continue here if the event *is* present, as that will eventually # process and consume the event from the `processing_store`, whereby getting it "unstuck". - if reprocess_only_stuck_events and not event_processing_store.exists(data): - return - - with metrics.timer("ingest_consumer._store_event"): - cache_key = event_processing_store.store(data) + if reprocess_only_stuck_events: + with sentry_sdk.start_span(op="event_processing_store.exists"): + if not event_processing_store.exists(data): + return + + # The no_celery_mode version of the transactions consumer skips one trip to rc-processing + # Otherwise, we have to store the event in processing store here for the save_event task to + # fetch later + if no_celery_mode and not attachments: + cache_key = None + else: + with metrics.timer("ingest_consumer._store_event"): + cache_key = event_processing_store.store(data) try: # Records rc-processing usage broken down by @@ -172,28 +217,38 @@ def process_event( CachedAttachment(type=attachment.pop("attachment_type"), **attachment) for attachment in attachments ] - + assert cache_key is not None attachment_cache.set( cache_key, attachments=attachment_objects, timeout=CACHE_TIMEOUT ) if data.get("type") == "transaction": - # No need for preprocess/process for transactions thus submit - # directly transaction specific save_event task. - save_event_transaction.delay( - cache_key=cache_key, - data=None, - start_time=start_time, - event_id=event_id, - project_id=project_id, - ) + if no_celery_mode: + with sentry_sdk.start_span(op="ingest_consumer.process_transaction_no_celery"): + transaction = sentry_sdk.get_current_scope().transaction + + if transaction is not None: + transaction.set_tag("no_celery_mode", True) + + process_transaction_no_celery(data, project_id, start_time) + else: + assert cache_key is not None + # No need for preprocess/process for transactions thus submit + # directly transaction specific save_event task. + save_event_transaction.delay( + cache_key=cache_key, + data=None, + start_time=start_time, + event_id=event_id, + project_id=project_id, + ) try: collect_span_metrics(project, data) except Exception: pass elif data.get("type") == "feedback": - if features.has("organizations:user-feedback-ingest", project.organization, actor=None): + if not is_in_feedback_denylist(project.organization): save_event_feedback.delay( cache_key=None, # no need to cache as volume is low data=data, @@ -201,6 +256,8 @@ def process_event( event_id=event_id, project_id=project_id, ) + else: + metrics.incr("feedback.ingest.filtered", tags={"reason": "org.denylist"}) else: # Preprocess this event, which spawns either process_event or # save_event. Pass data explicitly to avoid fetching it again from the @@ -216,10 +273,14 @@ def process_event( ) # remember for an 1 hour that we saved this event (deduplication protection) - cache.set(deduplication_key, "", CACHE_TIMEOUT) + with sentry_sdk.start_span(op="cache.set"): + cache.set(deduplication_key, "", CACHE_TIMEOUT) # emit event_accepted once everything is done - event_accepted.send_robust(ip=remote_addr, data=data, project=project, sender=process_event) + with sentry_sdk.start_span(op="event_accepted.send_robust"): + event_accepted.send_robust( + ip=remote_addr, data=data, project=project, sender=process_event + ) except Exception as exc: if isinstance(exc, KeyError): # ex: missing event_id in message["payload"] raise diff --git a/src/sentry/ingest/consumer/simple_event.py b/src/sentry/ingest/consumer/simple_event.py index fb594f11a14b3..99fa07a4b52ae 100644 --- a/src/sentry/ingest/consumer/simple_event.py +++ b/src/sentry/ingest/consumer/simple_event.py @@ -14,7 +14,10 @@ def process_simple_event_message( - raw_message: Message[KafkaPayload], consumer_type: str, reprocess_only_stuck_events: bool + raw_message: Message[KafkaPayload], + consumer_type: str, + reprocess_only_stuck_events: bool, + no_celery_mode: bool = False, ) -> None: """ Processes a single Kafka Message containing a "simple" Event payload. @@ -28,6 +31,8 @@ def process_simple_event_message( - Store the JSON payload in the event processing store, and pass it on to `preprocess_event`, which will schedule a followup task such as `symbolicate_event` or `process_event`. + + No celery mode only applies to the transactions consumer. """ raw_payload = raw_message.payload.value @@ -54,7 +59,7 @@ def process_simple_event_message( logger.exception("Project for ingested event does not exist: %s", project_id) return - return process_event(message, project, reprocess_only_stuck_events) + return process_event(message, project, reprocess_only_stuck_events, no_celery_mode) except Exception as exc: # If the retriable exception was raised, we should not DLQ diff --git a/src/sentry/ingest/userreport.py b/src/sentry/ingest/userreport.py index 71b777fc04c85..904f66418b89d 100644 --- a/src/sentry/ingest/userreport.py +++ b/src/sentry/ingest/userreport.py @@ -6,10 +6,11 @@ from django.db import IntegrityError, router from django.utils import timezone -from sentry import eventstore, features, options +from sentry import eventstore, options from sentry.eventstore.models import Event, GroupEvent from sentry.feedback.usecases.create_feedback import ( UNREAL_FEEDBACK_UNATTENDED_MESSAGE, + is_in_feedback_denylist, shim_to_feedback, ) from sentry.models.userreport import UserReport @@ -32,7 +33,8 @@ def save_userreport( start_time=None, ): with metrics.timer("sentry.ingest.userreport.save_userreport"): - if is_org_in_denylist(project.organization): + if is_in_feedback_denylist(project.organization): + metrics.incr("user_report.create_user_report.filtered", tags={"reason": "org.denylist"}) return if should_filter_user_report(report["comments"]): return @@ -97,24 +99,19 @@ def save_userreport( user_feedback_received.send(project=project, sender=save_userreport) - has_feedback_ingest = features.has( - "organizations:user-feedback-ingest", project.organization, actor=None - ) logger.info( "ingest.user_report", extra={ "project_id": project.id, "event_id": report["event_id"], "has_event": bool(event), - "has_feedback_ingest": has_feedback_ingest, }, ) metrics.incr( "user_report.create_user_report.saved", - tags={"has_event": bool(event), "has_feedback_ingest": has_feedback_ingest}, + tags={"has_event": bool(event)}, ) - - if has_feedback_ingest and event: + if event: logger.info( "ingest.user_report.shim_to_feedback", extra={"project_id": project.id, "event_id": report["event_id"]}, @@ -150,10 +147,3 @@ def should_filter_user_report(comments: str): return True return False - - -def is_org_in_denylist(organization): - if organization.slug in options.get("feedback.organizations.slug-denylist"): - metrics.incr("user_report.create_user_report.filtered", tags={"reason": "org.denylist"}) - return True - return False diff --git a/src/sentry/integrations/api/bases/external_actor.py b/src/sentry/integrations/api/bases/external_actor.py index ca1c47f4ba11f..99cd4c8eb65d6 100644 --- a/src/sentry/integrations/api/bases/external_actor.py +++ b/src/sentry/integrations/api/bases/external_actor.py @@ -3,6 +3,7 @@ from django.db import IntegrityError from django.http import Http404 +from drf_spectacular.utils import extend_schema_serializer from rest_framework import serializers from rest_framework.exceptions import PermissionDenied from rest_framework.request import Request @@ -54,7 +55,7 @@ class ExternalActorSerializerBase(CamelSnakeModelSerializer): required=False, allow_null=True, help_text="The associated user ID for provider." ) external_name = serializers.CharField( - required=True, help_text="The associated username for the provider." + required=True, help_text="The associated name for the provider." ) provider = serializers.ChoiceField( choices=get_provider_choices(AVAILABLE_PROVIDERS), @@ -155,6 +156,7 @@ class Meta: fields = ["user_id", "external_id", "external_name", "provider", "integration_id", "id"] +@extend_schema_serializer(exclude_fields=["team_id"]) class ExternalTeamSerializer(ExternalActorSerializerBase): _actor_key = "team_id" diff --git a/src/sentry/integrations/api/endpoints/external_team_details.py b/src/sentry/integrations/api/endpoints/external_team_details.py index 3838359061e00..4a195157a1541 100644 --- a/src/sentry/integrations/api/endpoints/external_team_details.py +++ b/src/sentry/integrations/api/endpoints/external_team_details.py @@ -1,6 +1,7 @@ import logging from typing import Any +from drf_spectacular.utils import extend_schema from rest_framework import status from rest_framework.request import Request from rest_framework.response import Response @@ -10,10 +11,14 @@ from sentry.api.base import region_silo_endpoint from sentry.api.bases.team import TeamEndpoint from sentry.api.serializers import serialize +from sentry.apidocs.constants import RESPONSE_BAD_REQUEST, RESPONSE_FORBIDDEN, RESPONSE_NO_CONTENT +from sentry.apidocs.examples.integration_examples import IntegrationExamples +from sentry.apidocs.parameters import GlobalParams, OrganizationParams from sentry.integrations.api.bases.external_actor import ( ExternalActorEndpointMixin, ExternalTeamSerializer, ) +from sentry.integrations.api.serializers.models.external_actor import ExternalActorSerializer from sentry.integrations.models.external_actor import ExternalActor from sentry.models.team import Team @@ -21,10 +26,11 @@ @region_silo_endpoint +@extend_schema(tags=["Integrations"]) class ExternalTeamDetailsEndpoint(TeamEndpoint, ExternalActorEndpointMixin): publish_status = { - "DELETE": ApiPublishStatus.UNKNOWN, - "PUT": ApiPublishStatus.UNKNOWN, + "DELETE": ApiPublishStatus.PUBLIC, + "PUT": ApiPublishStatus.PUBLIC, } owner = ApiOwner.ENTERPRISE @@ -45,19 +51,24 @@ def convert_args( ) return args, kwargs + @extend_schema( + operation_id="Update an External Team", + parameters=[ + GlobalParams.ORG_ID_OR_SLUG, + GlobalParams.TEAM_ID_OR_SLUG, + OrganizationParams.EXTERNAL_TEAM_ID, + ], + request=ExternalTeamSerializer, + responses={ + 200: ExternalActorSerializer, + 400: RESPONSE_BAD_REQUEST, + 403: RESPONSE_FORBIDDEN, + }, + examples=IntegrationExamples.EXTERNAL_TEAM_CREATE, + ) def put(self, request: Request, team: Team, external_team: ExternalActor) -> Response: """ - Update an External Team - ````````````` - - :pparam string organization_id_or_slug: the id or slug of the organization the - team belongs to. - :pparam string team_id_or_slug: the id or slug of the team to get. - :pparam string external_team_id: id of external_team object - :param string external_id: the associated user ID for this provider - :param string external_name: the Github/Gitlab team name. - :param string provider: enum("github","gitlab") - :auth: required + Update a team in an external provider that is currently linked to a Sentry team. """ self.assert_has_feature(request, team.organization) @@ -76,9 +87,23 @@ def put(self, request: Request, team: Team, external_team: ExternalActor) -> Res return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) + @extend_schema( + operation_id="Delete an External Team", + parameters=[ + GlobalParams.ORG_ID_OR_SLUG, + GlobalParams.TEAM_ID_OR_SLUG, + OrganizationParams.EXTERNAL_TEAM_ID, + ], + request=None, + responses={ + 204: RESPONSE_NO_CONTENT, + 400: RESPONSE_BAD_REQUEST, + 403: RESPONSE_FORBIDDEN, + }, + ) def delete(self, request: Request, team: Team, external_team: ExternalActor) -> Response: """ - Delete an External Team + Delete the link between a team from an external provider and a Sentry team. """ external_team.delete() return Response(status=status.HTTP_204_NO_CONTENT) diff --git a/src/sentry/integrations/api/endpoints/external_team_index.py b/src/sentry/integrations/api/endpoints/external_team_index.py index 6796e11de89f9..1beab567c8316 100644 --- a/src/sentry/integrations/api/endpoints/external_team_index.py +++ b/src/sentry/integrations/api/endpoints/external_team_index.py @@ -1,5 +1,6 @@ import logging +from drf_spectacular.utils import extend_schema from rest_framework import status from rest_framework.request import Request from rest_framework.response import Response @@ -9,35 +10,42 @@ from sentry.api.base import region_silo_endpoint from sentry.api.bases.team import TeamEndpoint from sentry.api.serializers import serialize +from sentry.apidocs.constants import RESPONSE_BAD_REQUEST, RESPONSE_FORBIDDEN +from sentry.apidocs.examples.integration_examples import IntegrationExamples +from sentry.apidocs.parameters import GlobalParams from sentry.integrations.api.bases.external_actor import ( ExternalActorEndpointMixin, ExternalTeamSerializer, ) +from sentry.integrations.api.serializers.models.external_actor import ExternalActorSerializer from sentry.models.team import Team logger = logging.getLogger(__name__) @region_silo_endpoint +@extend_schema(tags=["Integrations"]) class ExternalTeamEndpoint(TeamEndpoint, ExternalActorEndpointMixin): publish_status = { - "POST": ApiPublishStatus.UNKNOWN, + "POST": ApiPublishStatus.PUBLIC, } owner = ApiOwner.ENTERPRISE + @extend_schema( + operation_id="Create an External Team", + parameters=[GlobalParams.ORG_ID_OR_SLUG, GlobalParams.TEAM_ID_OR_SLUG], + request=ExternalTeamSerializer, + responses={ + 200: ExternalActorSerializer, + 201: ExternalActorSerializer, + 400: RESPONSE_BAD_REQUEST, + 403: RESPONSE_FORBIDDEN, + }, + examples=IntegrationExamples.EXTERNAL_TEAM_CREATE, + ) def post(self, request: Request, team: Team) -> Response: """ - Create an External Team - ````````````` - - :pparam string organization_id_or_slug: the id or slug of the organization the - team belongs to. - :pparam string team_id_or_slug: the team_id_or_slug of the team to get. - :param required string provider: enum("github", "gitlab") - :param required string external_name: the associated Github/Gitlab team name. - :param optional string integration_id: the id of the integration if it exists. - :param string external_id: the associated user ID for this provider - :auth: required + Link a team from an external provider to a Sentry team. """ self.assert_has_feature(request, team.organization) diff --git a/src/sentry/integrations/api/endpoints/external_user_details.py b/src/sentry/integrations/api/endpoints/external_user_details.py index 3c96ff9b22d40..a1e5294fc3fb4 100644 --- a/src/sentry/integrations/api/endpoints/external_user_details.py +++ b/src/sentry/integrations/api/endpoints/external_user_details.py @@ -14,12 +14,13 @@ from sentry.api.bases.organization import OrganizationEndpoint from sentry.api.serializers import serialize from sentry.apidocs.constants import RESPONSE_BAD_REQUEST, RESPONSE_FORBIDDEN, RESPONSE_NO_CONTENT -from sentry.apidocs.examples.organization_examples import OrganizationExamples +from sentry.apidocs.examples.integration_examples import IntegrationExamples from sentry.apidocs.parameters import GlobalParams, OrganizationParams from sentry.integrations.api.bases.external_actor import ( ExternalActorEndpointMixin, ExternalUserSerializer, ) +from sentry.integrations.api.serializers.models.external_actor import ExternalActorSerializer from sentry.integrations.models.external_actor import ExternalActor from sentry.models.organization import Organization @@ -27,7 +28,7 @@ @region_silo_endpoint -@extend_schema(tags=["Organizations"]) +@extend_schema(tags=["Integrations"]) class ExternalUserDetailsEndpoint(OrganizationEndpoint, ExternalActorEndpointMixin): publish_status = { "DELETE": ApiPublishStatus.PUBLIC, @@ -54,11 +55,11 @@ def convert_args( parameters=[GlobalParams.ORG_ID_OR_SLUG, OrganizationParams.EXTERNAL_USER_ID], request=ExternalUserSerializer, responses={ - 200: ExternalUserSerializer, + 200: ExternalActorSerializer, 400: RESPONSE_BAD_REQUEST, 403: RESPONSE_FORBIDDEN, }, - examples=OrganizationExamples.EXTERNAL_USER_CREATE, + examples=IntegrationExamples.EXTERNAL_USER_CREATE, ) def put( self, request: Request, organization: Organization, external_user: ExternalActor @@ -92,7 +93,6 @@ def put( 400: RESPONSE_BAD_REQUEST, 403: RESPONSE_FORBIDDEN, }, - examples=OrganizationExamples.EXTERNAL_USER_CREATE, ) def delete( self, request: Request, organization: Organization, external_user: ExternalActor diff --git a/src/sentry/integrations/api/endpoints/external_user_index.py b/src/sentry/integrations/api/endpoints/external_user_index.py index 875ab4b4e5de1..2cd13471abb37 100644 --- a/src/sentry/integrations/api/endpoints/external_user_index.py +++ b/src/sentry/integrations/api/endpoints/external_user_index.py @@ -10,20 +10,21 @@ from sentry.api.base import region_silo_endpoint from sentry.api.bases import OrganizationEndpoint from sentry.api.serializers import serialize -from sentry.apidocs.constants import RESPONSE_BAD_REQUEST, RESPONSE_FORBIDDEN, RESPONSE_SUCCESS -from sentry.apidocs.examples.organization_examples import OrganizationExamples +from sentry.apidocs.constants import RESPONSE_BAD_REQUEST, RESPONSE_FORBIDDEN +from sentry.apidocs.examples.integration_examples import IntegrationExamples from sentry.apidocs.parameters import GlobalParams from sentry.integrations.api.bases.external_actor import ( ExternalActorEndpointMixin, ExternalUserSerializer, ) +from sentry.integrations.api.serializers.models.external_actor import ExternalActorSerializer from sentry.models.organization import Organization logger = logging.getLogger(__name__) @region_silo_endpoint -@extend_schema(tags=["Organizations"]) +@extend_schema(tags=["Integrations"]) class ExternalUserEndpoint(OrganizationEndpoint, ExternalActorEndpointMixin): publish_status = { "POST": ApiPublishStatus.PUBLIC, @@ -35,16 +36,16 @@ class ExternalUserEndpoint(OrganizationEndpoint, ExternalActorEndpointMixin): parameters=[GlobalParams.ORG_ID_OR_SLUG], request=ExternalUserSerializer, responses={ - 200: RESPONSE_SUCCESS, - 201: ExternalUserSerializer, + 200: ExternalActorSerializer, + 201: ExternalActorSerializer, 400: RESPONSE_BAD_REQUEST, 403: RESPONSE_FORBIDDEN, }, - examples=OrganizationExamples.EXTERNAL_USER_CREATE, + examples=IntegrationExamples.EXTERNAL_USER_CREATE, ) def post(self, request: Request, organization: Organization) -> Response: """ - Links a user from an external provider to a Sentry user. + Link a user from an external provider to a Sentry user. """ self.assert_has_feature(request, organization) diff --git a/src/sentry/integrations/api/endpoints/organization_code_mapping_codeowners.py b/src/sentry/integrations/api/endpoints/organization_code_mapping_codeowners.py index 9e2090c9951a9..4edd397b751f2 100644 --- a/src/sentry/integrations/api/endpoints/organization_code_mapping_codeowners.py +++ b/src/sentry/integrations/api/endpoints/organization_code_mapping_codeowners.py @@ -8,6 +8,7 @@ from sentry.api.api_publish_status import ApiPublishStatus from sentry.api.base import region_silo_endpoint from sentry.api.bases.organization import OrganizationEndpoint, OrganizationIntegrationsPermission +from sentry.constants import ObjectStatus from sentry.integrations.models.repository_project_path_config import RepositoryProjectPathConfig from sentry.integrations.services.integration import integration_service from sentry.integrations.source_code_management.repository import RepositoryIntegration @@ -18,7 +19,9 @@ def get_codeowner_contents(config): if not config.organization_integration_id: raise NotFound(detail="No associated integration") - integration = integration_service.get_integration(integration_id=config.integration_id) + integration = integration_service.get_integration( + integration_id=config.integration_id, status=ObjectStatus.ACTIVE + ) if not integration: return None install = integration.get_installation(organization_id=config.project.organization_id) diff --git a/src/sentry/integrations/api/endpoints/organization_integration_details.py b/src/sentry/integrations/api/endpoints/organization_integration_details.py index 1b7ea1294f93d..830316dd51285 100644 --- a/src/sentry/integrations/api/endpoints/organization_integration_details.py +++ b/src/sentry/integrations/api/endpoints/organization_integration_details.py @@ -16,12 +16,12 @@ from sentry.api.base import control_silo_endpoint from sentry.api.serializers import serialize from sentry.constants import ObjectStatus +from sentry.deletions.models.scheduleddeletion import ScheduledDeletion from sentry.integrations.api.bases.organization_integrations import ( OrganizationIntegrationBaseEndpoint, ) from sentry.integrations.api.serializers.models.integration import OrganizationIntegrationSerializer from sentry.integrations.models.organization_integration import OrganizationIntegration -from sentry.models.scheduledeletion import ScheduledDeletion from sentry.organizations.services.organization import RpcUserOrganizationContext from sentry.shared_integrations.exceptions import ApiError, IntegrationError from sentry.utils.audit import create_audit_entry diff --git a/src/sentry/integrations/api/endpoints/organization_integrations_index.py b/src/sentry/integrations/api/endpoints/organization_integrations_index.py index 93979867c2a71..588994d2045d3 100644 --- a/src/sentry/integrations/api/endpoints/organization_integrations_index.py +++ b/src/sentry/integrations/api/endpoints/organization_integrations_index.py @@ -21,6 +21,7 @@ OrganizationIntegrationBaseEndpoint, ) from sentry.integrations.api.serializers.models.integration import OrganizationIntegrationResponse +from sentry.integrations.base import INTEGRATION_TYPE_TO_PROVIDER, IntegrationDomain from sentry.integrations.models.integration import Integration from sentry.integrations.models.organization_integration import OrganizationIntegration from sentry.organizations.services.organization.model import ( @@ -93,6 +94,7 @@ def get( if provider_key is None: provider_key = request.GET.get("provider_key", "") include_config_raw = request.GET.get("includeConfig") + integration_type = request.GET.get("integrationType") # Include the configurations by default if includeConfig is not present. # TODO(mgaeta): HACK. We need a consistent way to get booleans from query parameters. @@ -109,6 +111,16 @@ def get( if provider_key: queryset = queryset.filter(integration__provider=provider_key.lower()) + if integration_type: + try: + integration_domain = IntegrationDomain(integration_type) + except ValueError: + return Response({"detail": "Invalid integration type"}, status=400) + provider_slugs = [ + provider for provider in INTEGRATION_TYPE_TO_PROVIDER.get(integration_domain, []) + ] + queryset = queryset.filter(integration__provider__in=provider_slugs) + def on_results(results: Sequence[OrganizationIntegration]) -> Sequence[Mapping[str, Any]]: if feature_filters: results = filter_by_features(results, feature_filters) diff --git a/src/sentry/integrations/api/endpoints/organization_repository_details.py b/src/sentry/integrations/api/endpoints/organization_repository_details.py index 00a6690be4bd4..2258ad33c3181 100644 --- a/src/sentry/integrations/api/endpoints/organization_repository_details.py +++ b/src/sentry/integrations/api/endpoints/organization_repository_details.py @@ -15,11 +15,11 @@ from sentry.api.fields.empty_integer import EmptyIntegerField from sentry.api.serializers import serialize from sentry.constants import ObjectStatus +from sentry.deletions.models.scheduleddeletion import RegionScheduledDeletion from sentry.hybridcloud.rpc import coerce_id_from from sentry.integrations.services.integration import integration_service from sentry.models.commit import Commit from sentry.models.repository import Repository -from sentry.models.scheduledeletion import RegionScheduledDeletion from sentry.tasks.repository import repository_cascade_delete_on_hide @@ -74,7 +74,9 @@ def put(self, request: Request, organization, repo_id) -> Response: raise NotImplementedError if result.get("integrationId"): integration = integration_service.get_integration( - integration_id=result["integrationId"], organization_id=coerce_id_from(organization) + integration_id=result["integrationId"], + organization_id=coerce_id_from(organization), + status=ObjectStatus.ACTIVE, ) if integration is None: return Response({"detail": "Invalid integration id"}, status=400) diff --git a/src/sentry/integrations/api/serializers/rest_framework/doc_integration.py b/src/sentry/integrations/api/serializers/rest_framework/doc_integration.py index f4ea801fedad4..0483ebd7299a1 100644 --- a/src/sentry/integrations/api/serializers/rest_framework/doc_integration.py +++ b/src/sentry/integrations/api/serializers/rest_framework/doc_integration.py @@ -8,7 +8,6 @@ from sentry.api.fields.avatar import AvatarField from sentry.api.helpers.slugs import sentry_slugify -from sentry.api.serializers.rest_framework.sentry_app import URLField from sentry.api.validators.doc_integration import validate_metadata_schema from sentry.integrations.models.doc_integration import DocIntegration from sentry.integrations.models.integration_feature import ( @@ -16,6 +15,7 @@ IntegrationFeature, IntegrationTypes, ) +from sentry.sentry_apps.api.parsers.sentry_app import URLField class MetadataField(serializers.JSONField): diff --git a/src/sentry/integrations/base.py b/src/sentry/integrations/base.py index abd91547629fa..1a37db0858aab 100644 --- a/src/sentry/integrations/base.py +++ b/src/sentry/integrations/base.py @@ -4,7 +4,7 @@ import logging import sys from collections.abc import Mapping, MutableMapping, Sequence -from enum import Enum +from enum import Enum, StrEnum from functools import cached_property from typing import TYPE_CHECKING, Any, NamedTuple, NoReturn @@ -36,6 +36,7 @@ from sentry.shared_integrations.exceptions import ( ApiError, ApiHostError, + ApiInvalidRequestError, ApiUnauthorized, IntegrationError, IntegrationFormError, @@ -125,6 +126,58 @@ class IntegrationFeatures(Enum): DEPLOYMENT = "deployment" +# Integration Types +class IntegrationDomain(StrEnum): + MESSAGING = "messaging" + PROJECT_MANAGEMENT = "project_management" + SOURCE_CODE_MANAGEMENT = "source_code_management" + ON_CALL_SCHEDULING = "on_call_scheduling" + IDENTITY = "identity" # for identity pipelines + + +class IntegrationProviderSlug(StrEnum): + SLACK = "slack" + DISCORD = "discord" + MSTeams = "msteams" + JIRA = "jira" + JIRA_SERVER = "jira_server" + AZURE_DEVOPS = "vsts" + GITHUB = "github" + GITHUB_ENTERPRISE = "github_enterprise" + GITLAB = "gitlab" + BITBUCKET = "bitbucket" + PAGERDUTY = "pagerduty" + OPSGENIE = "opsgenie" + + +INTEGRATION_TYPE_TO_PROVIDER = { + IntegrationDomain.MESSAGING: [ + IntegrationProviderSlug.SLACK, + IntegrationProviderSlug.DISCORD, + IntegrationProviderSlug.MSTeams, + ], + IntegrationDomain.PROJECT_MANAGEMENT: [ + IntegrationProviderSlug.JIRA, + IntegrationProviderSlug.JIRA_SERVER, + IntegrationProviderSlug.GITHUB, + IntegrationProviderSlug.GITHUB_ENTERPRISE, + IntegrationProviderSlug.GITLAB, + IntegrationProviderSlug.AZURE_DEVOPS, + ], + IntegrationDomain.SOURCE_CODE_MANAGEMENT: [ + IntegrationProviderSlug.GITHUB, + IntegrationProviderSlug.GITHUB_ENTERPRISE, + IntegrationProviderSlug.GITLAB, + IntegrationProviderSlug.BITBUCKET, + IntegrationProviderSlug.AZURE_DEVOPS, + ], + IntegrationDomain.ON_CALL_SCHEDULING: [ + IntegrationProviderSlug.PAGERDUTY, + IntegrationProviderSlug.OPSGENIE, + ], +} + + class IntegrationProvider(PipelineProvider, abc.ABC): """ An integration provider describes a third party that can be registered within Sentry. @@ -373,7 +426,7 @@ def get_client(self) -> Any: """ raise NotImplementedError - def get_keyring_client(self, keyid: str) -> Any: + def get_keyring_client(self, keyid: int | str) -> Any: """ Return an API client with a scoped key based on the key_name. @@ -431,7 +484,7 @@ def raise_error(self, exc: Exception, identity: Identity | None = None) -> NoRet raise InvalidIdentity(self.message_from_error(exc), identity=identity).with_traceback( sys.exc_info()[2] ) - elif isinstance(exc, ApiError): + elif isinstance(exc, ApiInvalidRequestError): if exc.json: error_fields = self.error_fields_from_json(exc.json) if error_fields is not None: diff --git a/src/sentry/integrations/bitbucket/client.py b/src/sentry/integrations/bitbucket/client.py index 2b5682abaa84e..897e309ab7b30 100644 --- a/src/sentry/integrations/bitbucket/client.py +++ b/src/sentry/integrations/bitbucket/client.py @@ -11,7 +11,7 @@ from sentry.integrations.client import ApiClient from sentry.integrations.services.integration.model import RpcIntegration from sentry.integrations.source_code_management.repository import RepositoryClient -from sentry.integrations.utils import get_query_hash +from sentry.integrations.utils.atlassian_connect import get_query_hash from sentry.models.repository import Repository from sentry.shared_integrations.client.base import BaseApiResponseX from sentry.utils import jwt diff --git a/src/sentry/integrations/bitbucket/integration.py b/src/sentry/integrations/bitbucket/integration.py index 1e12015d59f81..f3d05641111f0 100644 --- a/src/sentry/integrations/bitbucket/integration.py +++ b/src/sentry/integrations/bitbucket/integration.py @@ -10,6 +10,7 @@ from sentry.identity.pipeline import IdentityProviderPipeline from sentry.integrations.base import ( FeatureDescription, + IntegrationDomain, IntegrationFeatures, IntegrationMetadata, IntegrationProvider, @@ -18,7 +19,14 @@ from sentry.integrations.services.repository import RpcRepository, repository_service from sentry.integrations.source_code_management.repository import RepositoryIntegration from sentry.integrations.tasks.migrate_repo import migrate_repo -from sentry.integrations.utils import AtlassianConnectValidationError, get_integration_from_request +from sentry.integrations.utils.atlassian_connect import ( + AtlassianConnectValidationError, + get_integration_from_request, +) +from sentry.integrations.utils.metrics import ( + IntegrationPipelineViewEvent, + IntegrationPipelineViewType, +) from sentry.models.repository import Repository from sentry.organizations.services.organization import RpcOrganizationSummary from sentry.pipeline import NestedPipelineView, PipelineView @@ -251,9 +259,18 @@ def setup(self): class VerifyInstallation(PipelineView): def dispatch(self, request: Request, pipeline) -> Response: - try: - integration = get_integration_from_request(request, BitbucketIntegrationProvider.key) - except AtlassianConnectValidationError: - return pipeline.error("Unable to verify installation.") - pipeline.bind_state("external_id", integration.external_id) - return pipeline.next_step() + with IntegrationPipelineViewEvent( + IntegrationPipelineViewType.VERIFY_INSTALLATION, + IntegrationDomain.SOURCE_CODE_MANAGEMENT, + BitbucketIntegrationProvider.key, + ).capture() as lifecycle: + try: + integration = get_integration_from_request( + request, BitbucketIntegrationProvider.key + ) + except AtlassianConnectValidationError as e: + lifecycle.record_failure({"failure_reason": str(e)}) + return pipeline.error("Unable to verify installation.") + + pipeline.bind_state("external_id", integration.external_id) + return pipeline.next_step() diff --git a/src/sentry/integrations/bitbucket/uninstalled.py b/src/sentry/integrations/bitbucket/uninstalled.py index 2c336f64c026d..6b660b68d9df4 100644 --- a/src/sentry/integrations/bitbucket/uninstalled.py +++ b/src/sentry/integrations/bitbucket/uninstalled.py @@ -9,7 +9,10 @@ from sentry.integrations.models.integration import Integration from sentry.integrations.services.integration import integration_service from sentry.integrations.services.repository import repository_service -from sentry.integrations.utils import AtlassianConnectValidationError, get_integration_from_jwt +from sentry.integrations.utils.atlassian_connect import ( + AtlassianConnectValidationError, + get_integration_from_jwt, +) @control_silo_endpoint diff --git a/src/sentry/integrations/bitbucket_server/integration.py b/src/sentry/integrations/bitbucket_server/integration.py index 01ab5b7ecadd6..04d5b7e5ea06b 100644 --- a/src/sentry/integrations/bitbucket_server/integration.py +++ b/src/sentry/integrations/bitbucket_server/integration.py @@ -1,6 +1,5 @@ from __future__ import annotations -import logging from typing import Any from urllib.parse import urlparse @@ -16,6 +15,7 @@ from sentry.integrations.base import ( FeatureDescription, + IntegrationDomain, IntegrationFeatureNotImplementedError, IntegrationFeatures, IntegrationMetadata, @@ -26,6 +26,10 @@ from sentry.integrations.services.repository.model import RpcRepository from sentry.integrations.source_code_management.repository import RepositoryIntegration from sentry.integrations.tasks.migrate_repo import migrate_repo +from sentry.integrations.utils.metrics import ( + IntegrationPipelineViewEvent, + IntegrationPipelineViewType, +) from sentry.models.repository import Repository from sentry.organizations.services.organization import RpcOrganizationSummary from sentry.pipeline import PipelineView @@ -36,8 +40,6 @@ from .client import BitbucketServerClient, BitbucketServerSetupClient from .repository import BitbucketServerRepositoryProvider -logger = logging.getLogger("sentry.integrations.bitbucket_server") - DESCRIPTION = """ Connect your Sentry organization to Bitbucket Server, enabling the following features: """ @@ -164,37 +166,38 @@ class OAuthLoginView(PipelineView): @method_decorator(csrf_exempt) def dispatch(self, request: Request, pipeline) -> HttpResponse: - if "oauth_token" in request.GET: - return pipeline.next_step() - - config = pipeline.fetch_state("installation_data") - client = BitbucketServerSetupClient( - config.get("url"), - config.get("consumer_key"), - config.get("private_key"), - config.get("verify_ssl"), - ) + with IntegrationPipelineViewEvent( + IntegrationPipelineViewType.OAUTH_LOGIN, + IntegrationDomain.SOURCE_CODE_MANAGEMENT, + BitbucketServerIntegrationProvider.key, + ).capture() as lifecycle: + if "oauth_token" in request.GET: + return pipeline.next_step() - try: - request_token = client.get_request_token() - except ApiError as error: - logger.info( - "identity.bitbucket-server.request-token", - extra={"url": config.get("url"), "error": error}, + config = pipeline.fetch_state("installation_data") + client = BitbucketServerSetupClient( + config.get("url"), + config.get("consumer_key"), + config.get("private_key"), + config.get("verify_ssl"), ) - return pipeline.error(f"Could not fetch a request token from Bitbucket. {error}") - pipeline.bind_state("request_token", request_token) - if not request_token.get("oauth_token"): - logger.info( - "identity.bitbucket-server.oauth-token", - extra={"url": config.get("url")}, - ) - return pipeline.error("Missing oauth_token") + try: + request_token = client.get_request_token() + except ApiError as error: + lifecycle.record_failure({"failure_reason": str(error), "url": config.get("url")}) + return pipeline.error(f"Could not fetch a request token from Bitbucket. {error}") - authorize_url = client.get_authorize_url(request_token) + pipeline.bind_state("request_token", request_token) + if not request_token.get("oauth_token"): + lifecycle.record_failure( + {"failure_reason": "missing oauth_token", "url": config.get("url")} + ) + return pipeline.error("Missing oauth_token") - return self.redirect(authorize_url) + authorize_url = client.get_authorize_url(request_token) + + return self.redirect(authorize_url) class OAuthCallbackView(PipelineView): @@ -205,25 +208,32 @@ class OAuthCallbackView(PipelineView): @method_decorator(csrf_exempt) def dispatch(self, request: Request, pipeline) -> HttpResponse: - config = pipeline.fetch_state("installation_data") - client = BitbucketServerSetupClient( - config.get("url"), - config.get("consumer_key"), - config.get("private_key"), - config.get("verify_ssl"), - ) - - try: - access_token = client.get_access_token( - pipeline.fetch_state("request_token"), request.GET["oauth_token"] + with IntegrationPipelineViewEvent( + IntegrationPipelineViewType.OAUTH_CALLBACK, + IntegrationDomain.SOURCE_CODE_MANAGEMENT, + BitbucketServerIntegrationProvider.key, + ).capture() as lifecycle: + config = pipeline.fetch_state("installation_data") + client = BitbucketServerSetupClient( + config.get("url"), + config.get("consumer_key"), + config.get("private_key"), + config.get("verify_ssl"), ) - pipeline.bind_state("access_token", access_token) + try: + access_token = client.get_access_token( + pipeline.fetch_state("request_token"), request.GET["oauth_token"] + ) + + pipeline.bind_state("access_token", access_token) - return pipeline.next_step() - except ApiError as error: - logger.info("identity.bitbucket-server.access-token", extra={"error": error}) - return pipeline.error(f"Could not fetch an access token from Bitbucket. {str(error)}") + return pipeline.next_step() + except ApiError as error: + lifecycle.record_failure({"failure_reason": str(error)}) + return pipeline.error( + f"Could not fetch an access token from Bitbucket. {str(error)}" + ) class BitbucketServerIntegration(RepositoryIntegration): diff --git a/src/sentry/integrations/discord/actions/issue_alert/form.py b/src/sentry/integrations/discord/actions/issue_alert/form.py index 1fa0644e4995d..62aafe5d877f3 100644 --- a/src/sentry/integrations/discord/actions/issue_alert/form.py +++ b/src/sentry/integrations/discord/actions/issue_alert/form.py @@ -6,6 +6,7 @@ from django.core.exceptions import ValidationError from django.forms.fields import ChoiceField +from sentry.constants import ObjectStatus from sentry.integrations.discord.utils.channel import validate_channel_id from sentry.integrations.discord.utils.channel_from_url import get_channel_id_from_url from sentry.integrations.services.integration import integration_service @@ -36,7 +37,9 @@ def clean(self) -> dict[str, object] | None: cleaned_data: dict[str, object] = super().clean() or {} channel_id = cleaned_data.get("channel_id") server = cleaned_data.get("server") - integration = integration_service.get_integration(integration_id=server) + integration = integration_service.get_integration( + integration_id=server, status=ObjectStatus.ACTIVE + ) if not server or not integration: raise forms.ValidationError( diff --git a/src/sentry/integrations/discord/integration.py b/src/sentry/integrations/discord/integration.py index f44ef2404a09f..e99a619c2350b 100644 --- a/src/sentry/integrations/discord/integration.py +++ b/src/sentry/integrations/discord/integration.py @@ -148,6 +148,12 @@ def get_pipeline_views(self) -> Sequence[PipelineView]: def build_integration(self, state: Mapping[str, object]) -> Mapping[str, object]: guild_id = str(state.get("guild_id")) + + if not guild_id.isdigit(): + raise IntegrationError( + "Invalid guild ID. The Discord guild ID must be entirely numeric." + ) + try: guild_name = self.client.get_guild_name(guild_id=guild_id) except (ApiError, AttributeError): diff --git a/src/sentry/integrations/discord/requests/base.py b/src/sentry/integrations/discord/requests/base.py index 930d030407332..f22c438e98e90 100644 --- a/src/sentry/integrations/discord/requests/base.py +++ b/src/sentry/integrations/discord/requests/base.py @@ -9,6 +9,7 @@ from rest_framework.request import Request from sentry import options +from sentry.constants import ObjectStatus from sentry.identity.services.identity import RpcIdentityProvider from sentry.identity.services.identity.model import RpcIdentity from sentry.identity.services.identity.service import identity_service @@ -224,7 +225,7 @@ def get_identity_str(self) -> str | None: def validate_integration(self) -> None: if not self._integration: self._integration = integration_service.get_integration( - provider="discord", external_id=self.guild_id + provider="discord", external_id=self.guild_id, status=ObjectStatus.ACTIVE ) self._info("discord.validate.integration") diff --git a/src/sentry/integrations/discord/webhooks/command.py b/src/sentry/integrations/discord/webhooks/command.py index 5f41165998710..471b7bdcbead1 100644 --- a/src/sentry/integrations/discord/webhooks/command.py +++ b/src/sentry/integrations/discord/webhooks/command.py @@ -1,10 +1,22 @@ +from collections.abc import Callable, Iterable +from dataclasses import dataclass + from rest_framework.response import Response +from sentry.integrations.discord.requests.base import DiscordRequest +from sentry.integrations.discord.spec import DiscordMessagingSpec +from sentry.integrations.discord.utils import logger from sentry.integrations.discord.views.link_identity import build_linking_url from sentry.integrations.discord.views.unlink_identity import build_unlinking_url from sentry.integrations.discord.webhooks.handler import DiscordInteractionHandler - -from ..utils import logger +from sentry.integrations.messaging import commands +from sentry.integrations.messaging.commands import ( + CommandInput, + CommandNotMatchedError, + MessagingIntegrationCommand, + MessagingIntegrationCommandDispatcher, +) +from sentry.integrations.messaging.spec import MessagingIntegrationSpec LINK_USER_MESSAGE = "[Click here]({url}) to link your Discord account to your Sentry account." ALREADY_LINKED_MESSAGE = "You are already linked to the Sentry account with email: `{email}`." @@ -22,12 +34,6 @@ """ -class DiscordCommandNames: - LINK = "link" - UNLINK = "unlink" - HELP = "help" - - class DiscordCommandHandler(DiscordInteractionHandler): """ Handles logic for Discord Command interactions. @@ -37,25 +43,39 @@ class DiscordCommandHandler(DiscordInteractionHandler): def handle(self) -> Response: command_name = self.request.get_command_name() - logging_data = self.request.logging_data + cmd_input = CommandInput(command_name) + dispatcher = DiscordCommandDispatcher(self.request) + try: + message = dispatcher.dispatch(cmd_input) + except CommandNotMatchedError: + logger.warning( + "discord.interaction.command.unknown", + extra={"command": command_name, **self.request.logging_data}, + ) + message = dispatcher.help(cmd_input) - if command_name == DiscordCommandNames.LINK: - return self.link_user() - elif command_name == DiscordCommandNames.UNLINK: - return self.unlink_user() - elif command_name == DiscordCommandNames.HELP: - return self.help() + return self.send_message(message) - logger.warning( - "discord.interaction.command.unknown", extra={"command": command_name, **logging_data} - ) - return self.help() - def link_user(self) -> Response: +@dataclass(frozen=True) +class DiscordCommandDispatcher(MessagingIntegrationCommandDispatcher[str]): + request: DiscordRequest + + @property + def integration_spec(self) -> MessagingIntegrationSpec: + return DiscordMessagingSpec() + + @property + def command_handlers( + self, + ) -> Iterable[tuple[MessagingIntegrationCommand, Callable[[CommandInput], str]]]: + yield commands.HELP, self.help + yield commands.LINK_IDENTITY, self.link_user + yield commands.UNLINK_IDENTITY, self.unlink_user + + def link_user(self, _: CommandInput) -> str: if self.request.has_identity(): - return self.send_message( - ALREADY_LINKED_MESSAGE.format(email=self.request.get_identity_str()) - ) + return ALREADY_LINKED_MESSAGE.format(email=self.request.get_identity_str()) if not self.request.integration or not self.request.user_id: logger.warning( @@ -65,18 +85,18 @@ def link_user(self) -> Response: "hasUserId": self.request.user_id, }, ) - return self.send_message(MISSING_DATA_MESSAGE) + return MISSING_DATA_MESSAGE link_url = build_linking_url( integration=self.request.integration, discord_id=self.request.user_id, ) - return self.send_message(LINK_USER_MESSAGE.format(url=link_url)) + return LINK_USER_MESSAGE.format(url=link_url) - def unlink_user(self) -> Response: + def unlink_user(self, _: CommandInput) -> str: if not self.request.has_identity(): - return self.send_message(NOT_LINKED_MESSAGE) + return NOT_LINKED_MESSAGE # if self.request.has_identity() then these must not be None assert self.request.integration is not None @@ -87,7 +107,7 @@ def unlink_user(self) -> Response: discord_id=self.request.user_id, ) - return self.send_message(UNLINK_USER_MESSAGE.format(url=unlink_url)) + return UNLINK_USER_MESSAGE.format(url=unlink_url) - def help(self) -> Response: - return self.send_message(HELP_MESSAGE) + def help(self, _: CommandInput) -> str: + return HELP_MESSAGE diff --git a/src/sentry/integrations/discord/webhooks/message_component.py b/src/sentry/integrations/discord/webhooks/message_component.py index de92003824861..39d58be7d61ae 100644 --- a/src/sentry/integrations/discord/webhooks/message_component.py +++ b/src/sentry/integrations/discord/webhooks/message_component.py @@ -18,7 +18,12 @@ ) from sentry.integrations.discord.message_builder.base.flags import DiscordMessageFlags from sentry.integrations.discord.requests.base import DiscordRequest +from sentry.integrations.discord.spec import DiscordMessagingSpec from sentry.integrations.discord.webhooks.handler import DiscordInteractionHandler +from sentry.integrations.messaging.metrics import ( + MessagingInteractionEvent, + MessagingInteractionType, +) from sentry.models.activity import ActivityIntegration from sentry.models.group import Group from sentry.models.grouphistory import STATUS_TO_STRING_LOOKUP, GroupHistoryStatus @@ -85,36 +90,51 @@ def handle(self) -> Response: ) return self.send_message(NOT_IN_ORG) + def record_event(interaction_type: MessagingInteractionType) -> MessagingInteractionEvent: + return MessagingInteractionEvent( + interaction_type, + DiscordMessagingSpec(), + user=self.user, + organization=(self.group.organization if self.group else None), + ) + if self.custom_id.startswith(CustomIds.ASSIGN_DIALOG): logger.info("discord.interaction.component.assign_dialog", extra={**logging_data}) - return self.assign_dialog() + with record_event(MessagingInteractionType.ASSIGN_DIALOG).capture(): + return self.assign_dialog() elif self.custom_id.startswith(CustomIds.ASSIGN): logger.info( "discord.interaction.component.assign", extra={**logging_data, "assign_to": self.request.get_selected_options()[0]}, ) - return self.assign() + with record_event(MessagingInteractionType.ASSIGN).capture(): + return self.assign() elif self.custom_id.startswith(CustomIds.RESOLVE_DIALOG): logger.info("discord.interaction.component.resolve_dialog", extra={**logging_data}) - return self.resolve_dialog() + with record_event(MessagingInteractionType.RESOLVE_DIALOG).capture(): + return self.resolve_dialog() elif self.custom_id.startswith(CustomIds.RESOLVE): logger.info("discord.interaction.component.resolve", extra={**logging_data}) - return self.resolve() + with record_event(MessagingInteractionType.RESOLVE).capture(): + return self.resolve() elif self.custom_id.startswith(CustomIds.UNRESOLVE): logger.info("discord.interaction.component.unresolve", extra={**logging_data}) - return self.unresolve() + with record_event(MessagingInteractionType.UNRESOLVE).capture(): + return self.unresolve() elif self.custom_id.startswith(CustomIds.MARK_ONGOING): logger.info("discord.interaction.component.mark_ongoing", extra={**logging_data}) - return self.unresolve(from_mark_ongoing=True) + with record_event(MessagingInteractionType.MARK_ONGOING).capture(): + return self.unresolve(from_mark_ongoing=True) elif self.custom_id.startswith(CustomIds.ARCHIVE): logger.info("discord.interaction.component.archive", extra={**logging_data}) - return self.archive() + with record_event(MessagingInteractionType.ARCHIVE).capture(): + return self.archive() logger.warning("discord.interaction.component.unknown_custom_id", extra={**logging_data}) return self.send_message(INVALID_GROUP_ID) diff --git a/src/sentry/integrations/github/client.py b/src/sentry/integrations/github/client.py index 29a8a55b0027e..98c189d50bbbf 100644 --- a/src/sentry/integrations/github/client.py +++ b/src/sentry/integrations/github/client.py @@ -554,7 +554,7 @@ def get_with_pagination( with sentry_sdk.start_span( op=f"{self.integration_type}.http.pagination", - description=f"{self.integration_type}.http_response.pagination.{self.name}", + name=f"{self.integration_type}.http_response.pagination.{self.name}", ): output = [] diff --git a/src/sentry/integrations/github/integration.py b/src/sentry/integrations/github/integration.py index f1f79cfb102eb..8da637d0cde63 100644 --- a/src/sentry/integrations/github/integration.py +++ b/src/sentry/integrations/github/integration.py @@ -3,6 +3,7 @@ import logging import re from collections.abc import Mapping, Sequence +from enum import StrEnum from typing import Any from urllib.parse import parse_qsl @@ -18,6 +19,7 @@ from sentry.identity.github import GitHubIdentityProvider, get_user_info from sentry.integrations.base import ( FeatureDescription, + IntegrationDomain, IntegrationFeatures, IntegrationMetadata, IntegrationProvider, @@ -31,6 +33,10 @@ from sentry.integrations.source_code_management.repository import RepositoryIntegration from sentry.integrations.tasks.migrate_repo import migrate_repo from sentry.integrations.utils.code_mapping import RepoTree +from sentry.integrations.utils.metrics import ( + IntegrationPipelineViewEvent, + IntegrationPipelineViewType, +) from sentry.models.repository import Repository from sentry.organizations.absolute_url import generate_organization_url from sentry.organizations.services.organization import RpcOrganizationSummary, organization_service @@ -399,57 +405,93 @@ def setup(self) -> None: ) -class OAuthLoginView(PipelineView): - def dispatch(self, request: Request, pipeline) -> HttpResponseBase: - self.determine_active_organization(request) +class GitHubInstallationError(StrEnum): + INVALID_STATE = "Invalid state" + MISSING_TOKEN = "Missing access token" + MISSING_LOGIN = "Missing login info" + PENDING_DELETION = "GitHub installation pending deletion." + INSTALLATION_EXISTS = "Github installed on another Sentry organization." + USER_MISMATCH = "Authenticated user is not the same as who installed the app." + MISSING_INTEGRATION = "Integration does not exist." - ghip = GitHubIdentityProvider() - github_client_id = ghip.get_oauth_client_id() - github_client_secret = ghip.get_oauth_client_secret() - installation_id = request.GET.get("installation_id") - if installation_id: - pipeline.bind_state("installation_id", installation_id) +def record_event(event: IntegrationPipelineViewType): + return IntegrationPipelineViewEvent( + event, IntegrationDomain.SOURCE_CODE_MANAGEMENT, GitHubIntegrationProvider.key + ) - if not request.GET.get("state"): - state = pipeline.signature - redirect_uri = absolute_uri( - reverse("sentry-extension-setup", kwargs={"provider_id": "github"}) - ) - return self.redirect( - f"{ghip.get_oauth_authorize_url()}?client_id={github_client_id}&state={state}&redirect_uri={redirect_uri}" +class OAuthLoginView(PipelineView): + def dispatch(self, request: Request, pipeline) -> HttpResponseBase: + with record_event(IntegrationPipelineViewType.OAUTH_LOGIN).capture() as lifecycle: + self.determine_active_organization(request) + lifecycle.add_extra( + "organization_id", + self.active_organization.organization.id if self.active_organization else None, ) - # At this point, we are past the GitHub "authorize" step - if request.GET.get("state") != pipeline.signature: - return error(request, self.active_organization, error_short="Invalid state") - - # similar to OAuth2CallbackView.get_token_params - data = { - "code": request.GET.get("code"), - "client_id": github_client_id, - "client_secret": github_client_secret, - } - - # similar to OAuth2CallbackView.exchange_token - req = safe_urlopen(url=ghip.get_oauth_access_token_url(), data=data) - - try: - body = safe_urlread(req).decode("utf-8") - payload = dict(parse_qsl(body)) - except Exception: - payload = {} - - if "access_token" not in payload: - return error(request, self.active_organization, error_short="Missing access token") - - authenticated_user_info = get_user_info(payload["access_token"]) - if "login" not in authenticated_user_info: - return error(request, self.active_organization, error_short="Missing login info") + ghip = GitHubIdentityProvider() + github_client_id = ghip.get_oauth_client_id() + github_client_secret = ghip.get_oauth_client_secret() + + installation_id = request.GET.get("installation_id") + if installation_id: + pipeline.bind_state("installation_id", installation_id) + + if not request.GET.get("state"): + state = pipeline.signature + + redirect_uri = absolute_uri( + reverse("sentry-extension-setup", kwargs={"provider_id": "github"}) + ) + return self.redirect( + f"{ghip.get_oauth_authorize_url()}?client_id={github_client_id}&state={state}&redirect_uri={redirect_uri}" + ) + + # At this point, we are past the GitHub "authorize" step + if request.GET.get("state") != pipeline.signature: + lifecycle.record_failure({"failure_reason": GitHubInstallationError.INVALID_STATE}) + return error( + request, + self.active_organization, + error_short=GitHubInstallationError.INVALID_STATE, + ) + + # similar to OAuth2CallbackView.get_token_params + data = { + "code": request.GET.get("code"), + "client_id": github_client_id, + "client_secret": github_client_secret, + } - pipeline.bind_state("github_authenticated_user", authenticated_user_info["login"]) - return pipeline.next_step() + # similar to OAuth2CallbackView.exchange_token + req = safe_urlopen(url=ghip.get_oauth_access_token_url(), data=data) + + try: + body = safe_urlread(req).decode("utf-8") + payload = dict(parse_qsl(body)) + except Exception: + payload = {} + + if "access_token" not in payload: + lifecycle.record_failure({"failure_reason": GitHubInstallationError.MISSING_TOKEN}) + return error( + request, + self.active_organization, + error_short=GitHubInstallationError.MISSING_TOKEN, + ) + + authenticated_user_info = get_user_info(payload["access_token"]) + if "login" not in authenticated_user_info: + lifecycle.record_failure({"failure_reason": GitHubInstallationError.MISSING_LOGIN}) + return error( + request, + self.active_organization, + error_short=GitHubInstallationError.MISSING_LOGIN, + ) + + pipeline.bind_state("github_authenticated_user", authenticated_user_info["login"]) + return pipeline.next_step() class GitHubInstallation(PipelineView): @@ -458,67 +500,82 @@ def get_app_url(self) -> str: return f"https://github.com/apps/{slugify(name)}" def dispatch(self, request: Request, pipeline: Pipeline) -> HttpResponseBase: - installation_id = request.GET.get( - "installation_id", pipeline.fetch_state("installation_id") - ) - if installation_id is None: - return self.redirect(self.get_app_url()) - - pipeline.bind_state("installation_id", installation_id) - self.determine_active_organization(request) - - integration_pending_deletion_exists = False - if self.active_organization: - # We want to wait until the scheduled deletions finish or else the - # post install to migrate repos do not work. - integration_pending_deletion_exists = OrganizationIntegration.objects.filter( - integration__provider=GitHubIntegrationProvider.key, - organization_id=self.active_organization.organization.id, - status=ObjectStatus.PENDING_DELETION, - ).exists() - - if integration_pending_deletion_exists: - return error( - request, - self.active_organization, - error_short="GitHub installation pending deletion.", - error_long=ERR_INTEGRATION_PENDING_DELETION, + with record_event(IntegrationPipelineViewType.GITHUB_INSTALLATION).capture() as lifecycle: + installation_id = request.GET.get( + "installation_id", pipeline.fetch_state("installation_id") ) + if installation_id is None: + return self.redirect(self.get_app_url()) - try: - # We want to limit GitHub integrations to 1 organization - installations_exist = OrganizationIntegration.objects.filter( - integration=Integration.objects.get(external_id=installation_id) - ).exists() - - except Integration.DoesNotExist: - return pipeline.next_step() - - if installations_exist: - return error( - request, - self.active_organization, - error_short="Github installed on another Sentry organization.", - error_long=ERR_INTEGRATION_EXISTS_ON_ANOTHER_ORG, + pipeline.bind_state("installation_id", installation_id) + self.determine_active_organization(request) + lifecycle.add_extra( + "organization_id", + self.active_organization.organization.id if self.active_organization else None, ) - # OrganizationIntegration does not exist, but Integration does exist. - try: - integration = Integration.objects.get( - external_id=installation_id, status=ObjectStatus.ACTIVE - ) - except Integration.DoesNotExist: - return error(request, self.active_organization) - - # Check that the authenticated GitHub user is the same as who installed the app. - if ( - pipeline.fetch_state("github_authenticated_user") - != integration.metadata["sender"]["login"] - ): - return error( - request, - self.active_organization, - error_short="Authenticated user is not the same as who installed the app", - ) + integration_pending_deletion_exists = False + if self.active_organization: + # We want to wait until the scheduled deletions finish or else the + # post install to migrate repos do not work. + integration_pending_deletion_exists = OrganizationIntegration.objects.filter( + integration__provider=GitHubIntegrationProvider.key, + organization_id=self.active_organization.organization.id, + status=ObjectStatus.PENDING_DELETION, + ).exists() + + if integration_pending_deletion_exists: + lifecycle.record_failure( + {"failure_reason": GitHubInstallationError.PENDING_DELETION} + ) + return error( + request, + self.active_organization, + error_short=GitHubInstallationError.PENDING_DELETION, + error_long=ERR_INTEGRATION_PENDING_DELETION, + ) + + try: + # We want to limit GitHub integrations to 1 organization + installations_exist = OrganizationIntegration.objects.filter( + integration=Integration.objects.get(external_id=installation_id) + ).exists() + + except Integration.DoesNotExist: + return pipeline.next_step() + + if installations_exist: + lifecycle.record_failure( + {"failure_reason": GitHubInstallationError.INSTALLATION_EXISTS} + ) + return error( + request, + self.active_organization, + error_short=GitHubInstallationError.INSTALLATION_EXISTS, + error_long=ERR_INTEGRATION_EXISTS_ON_ANOTHER_ORG, + ) + + # OrganizationIntegration does not exist, but Integration does exist. + try: + integration = Integration.objects.get( + external_id=installation_id, status=ObjectStatus.ACTIVE + ) + except Integration.DoesNotExist: + lifecycle.record_failure( + {"failure_reason": GitHubInstallationError.MISSING_INTEGRATION} + ) + return error(request, self.active_organization) + + # Check that the authenticated GitHub user is the same as who installed the app. + if ( + pipeline.fetch_state("github_authenticated_user") + != integration.metadata["sender"]["login"] + ): + lifecycle.record_failure({"failure_reason": GitHubInstallationError.USER_MISMATCH}) + return error( + request, + self.active_organization, + error_short=GitHubInstallationError.USER_MISMATCH, + ) - return pipeline.next_step() + return pipeline.next_step() diff --git a/src/sentry/integrations/github/tasks/pr_comment.py b/src/sentry/integrations/github/tasks/pr_comment.py index 3d9c1b91c8290..93536c32b9e96 100644 --- a/src/sentry/integrations/github/tasks/pr_comment.py +++ b/src/sentry/integrations/github/tasks/pr_comment.py @@ -9,6 +9,7 @@ from snuba_sdk import Column, Condition, Direction, Entity, Function, Op, OrderBy, Query from snuba_sdk import Request as SnubaRequest +from sentry import features from sentry.constants import ObjectStatus from sentry.integrations.github.constants import ISSUE_LOCKED_ERROR_MESSAGE, RATE_LIMITED_MESSAGE from sentry.integrations.github.tasks.utils import PullRequestIssue @@ -215,6 +216,22 @@ def github_comment_workflow(pullrequest_id: int, project_id: int): top_24_issues = issue_list[:24] # 24 is the P99 for issues-per-PR + enabled_copilot = features.has("projects:ai-autofix", project) or features.has( + "organizations:autofix", organization + ) + github_copilot_actions = ( + [ + { + "name": f"Root cause #{i + 1}", + "type": "copilot-chat", + "prompt": f"@sentry root cause issue {str(issue_id)} with PR URL https://github.com/{repo.name}/pull/{str(pr_key)}", + } + for i, issue_id in enumerate(top_24_issues[:3]) + ] + if enabled_copilot + else None + ) + try: installation.create_or_update_comment( repo=repo, @@ -223,6 +240,7 @@ def github_comment_workflow(pullrequest_id: int, project_id: int): pullrequest_id=pullrequest_id, issue_list=top_24_issues, metrics_base=MERGED_PR_METRICS_BASE, + github_copilot_actions=github_copilot_actions, ) except ApiError as e: cache.delete(cache_key) diff --git a/src/sentry/integrations/github_enterprise/webhook.py b/src/sentry/integrations/github_enterprise/webhook.py index 33b700d0aadad..706544f1d9272 100644 --- a/src/sentry/integrations/github_enterprise/webhook.py +++ b/src/sentry/integrations/github_enterprise/webhook.py @@ -17,6 +17,7 @@ from sentry import options from sentry.api.api_owners import ApiOwner from sentry.api.api_publish_status import ApiPublishStatus +from sentry.constants import ObjectStatus from sentry.integrations.github.webhook import ( InstallationEventWebhook, PullRequestEventWebhook, @@ -79,6 +80,7 @@ def get_installation_metadata(event, host): integration = integration_service.get_integration( external_id=external_id, provider="github_enterprise", + status=ObjectStatus.ACTIVE, ) if integration is None: metrics.incr("integrations.github_enterprise.does_not_exist") diff --git a/src/sentry/integrations/gitlab/client.py b/src/sentry/integrations/gitlab/client.py index f56d0ce31fdc7..3b720b16353f6 100644 --- a/src/sentry/integrations/gitlab/client.py +++ b/src/sentry/integrations/gitlab/client.py @@ -9,6 +9,7 @@ from requests import PreparedRequest from sentry.identity.services.identity.model import RpcIdentity +from sentry.integrations.base import IntegrationFeatureNotImplementedError from sentry.integrations.gitlab.blame import fetch_file_blames from sentry.integrations.gitlab.utils import GitLabApiClientPath from sentry.integrations.source_code_management.commit_context import ( @@ -309,6 +310,9 @@ def get_commit(self, project_id, sha): """ return self.get_cached(GitLabApiClientPath.commit.format(project=project_id, sha=sha)) + def get_merge_commit_sha_from_commit(self, repo: str, sha: str) -> str | None: + raise IntegrationFeatureNotImplementedError + def compare_commits(self, project_id, start_sha, end_sha): """Compare commits between two SHAs diff --git a/src/sentry/integrations/gitlab/webhooks.py b/src/sentry/integrations/gitlab/webhooks.py index 063cbe02fa008..ad3a507153d76 100644 --- a/src/sentry/integrations/gitlab/webhooks.py +++ b/src/sentry/integrations/gitlab/webhooks.py @@ -168,7 +168,7 @@ def __call__( authors = {} - # TODO gitlab only sends a max of 20 commits. If a push contains + # TODO: gitlab only sends a max of 20 commits. If a push contains # more commits they provide a total count and require additional API # requests to fetch the commit details for commit in event.get("commits", []): diff --git a/src/sentry/integrations/jira/client.py b/src/sentry/integrations/jira/client.py index 1fd6cfa83f1d4..8bf1d6db7e488 100644 --- a/src/sentry/integrations/jira/client.py +++ b/src/sentry/integrations/jira/client.py @@ -8,7 +8,7 @@ from sentry.integrations.client import ApiClient from sentry.integrations.services.integration.model import RpcIntegration -from sentry.integrations.utils import get_query_hash +from sentry.integrations.utils.atlassian_connect import get_query_hash from sentry.shared_integrations.exceptions import ApiError from sentry.utils import jwt from sentry.utils.http import absolute_uri @@ -25,6 +25,7 @@ class JiraCloudClient(ApiClient): COMMENTS_URL = "/rest/api/2/issue/%s/comment" COMMENT_URL = "/rest/api/2/issue/%s/comment/%s" STATUS_URL = "/rest/api/2/status" + STATUS_SEARCH_URL = "/rest/api/2/statuses/search" CREATE_URL = "/rest/api/2/issue" ISSUE_URL = "/rest/api/2/issue/%s" META_URL = "/rest/api/2/issue/createmeta" @@ -224,3 +225,6 @@ def get_field_autocomplete(self, name, value): return self.get_cached( self.AUTOCOMPLETE_URL, params={"fieldName": jql_name, "fieldValue": value} ) + + def get_project_statuses(self, project_id: str) -> dict[str, Any]: + return dict(self.get_cached(self.STATUS_SEARCH_URL, params={"projectId": project_id})) diff --git a/src/sentry/integrations/jira/integration.py b/src/sentry/integrations/jira/integration.py index 599e85abaf2ec..1e054814013e5 100644 --- a/src/sentry/integrations/jira/integration.py +++ b/src/sentry/integrations/jira/integration.py @@ -32,6 +32,7 @@ from sentry.shared_integrations.exceptions import ( ApiError, ApiHostError, + ApiRateLimitedError, ApiUnauthorized, IntegrationError, IntegrationFormError, @@ -41,6 +42,7 @@ from sentry.users.services.user.service import user_service from sentry.utils.strings import truncatechars +from ...api.exceptions import ResourceDoesNotExist from .client import JiraCloudClient from .models.create_issue_metadata import JIRA_CUSTOM_FIELD_TYPES from .utils import build_user_choice @@ -106,10 +108,17 @@ aspects={"externalInstall": external_install}, ) +# Some Jira errors for invalid field values don't actually provide the field +# ID in an easily mappable way, so we have to manually map known error types +# here to make it explicit to the user what failed. +CUSTOM_ERROR_MESSAGE_MATCHERS = [(re.compile("Team with id '.*' not found.$"), "Team Field")] + # Hide linked issues fields because we don't have the necessary UI for fully specifying # a valid link (e.g. "is blocked by ISSUE-1"). HIDDEN_ISSUE_FIELDS = ["issuelinks"] +JIRA_PROJECT_SIZE_LOGGING_THRESHOLD = 5 + class JiraIntegration(IssueSyncIntegration): comment_key = "sync_comments" @@ -124,7 +133,7 @@ class JiraIntegration(IssueSyncIntegration): def use_email_scope(cls): return settings.JIRA_USE_EMAIL_SCOPE - def get_organization_config(self): + def get_organization_config(self) -> dict[str, Any]: configuration = [ { "name": self.outbound_status_key, @@ -140,8 +149,7 @@ def get_organization_config(self): "items": [], # Populated with projects }, "mappedSelectors": { - "on_resolve": {"choices": [], "placeholder": _("Select a status")}, - "on_unresolve": {"choices": [], "placeholder": _("Select a status")}, + # Populated on a per-project basis below }, "columnLabels": { "on_resolve": _("When resolved"), @@ -149,6 +157,7 @@ def get_organization_config(self): }, "mappedColumnLabel": _("Jira Project"), "formatMessageValue": False, + "perItemMapping": True, }, { "name": self.outbound_assignee_key, @@ -206,13 +215,59 @@ def get_organization_config(self): client = self.get_client() - try: - statuses = [(c["id"], c["name"]) for c in client.get_valid_statuses()] - configuration[0]["mappedSelectors"]["on_resolve"]["choices"] = statuses - configuration[0]["mappedSelectors"]["on_unresolve"]["choices"] = statuses + logging_context: dict[str, Any] = {} + + if not self.org_integration: + raise ResourceDoesNotExist() + + logging_context["org_integration_id"] = self.org_integration.id + logging_context["integration_id"] = self.org_integration.integration_id + try: projects = [{"value": p["id"], "label": p["name"]} for p in client.get_projects_list()] configuration[0]["addDropdown"]["items"] = projects + + # We need to monitor if we're getting a large volume of requests + # with a significant number of projects. Issuing 5 requests or more + # per configuration load is something we may need to address via + # a bulk query. + + # Jira's API supports querying all available statuses, along with + # their project and workflow usages, but this is paginated and may + # have many of the same query concerns depending on how many + # statuses are defined within the Jira organization. + + logging_context["num_projects"] = len(projects) + if len(projects) > JIRA_PROJECT_SIZE_LOGGING_THRESHOLD: + logger.info( + "excessive_project_status_requests", + extra={ + **logging_context, + }, + ) + # Each project can have a different set of statuses assignable for + # issues, so we need to create per-project mappings. + for proj in projects: + project_id = proj["value"] + project_statuses = client.get_project_statuses(project_id).get("values") + if not project_statuses: + continue + + statuses_for_project = [(c["id"], c["name"]) for c in project_statuses] + + configuration[0]["mappedSelectors"][project_id] = { + "on_resolve": { + "choices": statuses_for_project, + "placeholder": _("Select a status"), + }, + "on_unresolve": { + "choices": statuses_for_project, + "placeholder": _("Select a status"), + }, + } + except ApiRateLimitedError: + logger.warning("config_query_rate_limited", extra={**logging_context}) + raise except ApiError: configuration[0]["disabled"] = True configuration[0]["disabledReason"] = _( @@ -485,10 +540,28 @@ def error_message_from_json(self, data): def error_fields_from_json(self, data): errors = data.get("errors") - if not errors: + error_messages = data.get("errorMessages") + + if not errors and not error_messages: + return None + + error_data = {} + if error_messages: + # These may or may not contain field specific errors, so we manually + # map them + for message in error_messages: + for error_regex, key in CUSTOM_ERROR_MESSAGE_MATCHERS: + if error_regex.match(message): + error_data[key] = [message] + + if errors: + for key, error in data.get("errors").items(): + error_data[key] = [error] + + if not error_data: return None - return {key: [error] for key, error in data.get("errors").items()} + return error_data def search_url(self, org_slug): """ @@ -515,7 +588,12 @@ def build_dynamic_field(self, field_meta, group=None): elif ( # Assignee and reporter fields field_meta.get("autoCompleteUrl") - and (schema.get("items") == "user" or schema["type"] == "user") + and ( + schema.get("items") == "user" + or schema["type"] == "user" + or schema["type"] == "team" + or schema.get("items") == "team" + ) # Sprint and "Epic Link" fields or schema.get("custom") in (JIRA_CUSTOM_FIELD_TYPES["sprint"], JIRA_CUSTOM_FIELD_TYPES["epic"]) @@ -795,100 +873,6 @@ def get_create_issue_config(self, group: Group | None, user: RpcUser, **kwargs): return fields - def _old_clean_and_transform_issue_data( - self, data: dict[str, Any], issue_type_meta: dict[str, Any] - ) -> dict[str, Any]: - """ - Get the (cached) "createmeta" from Jira to use as a "schema". Clean up - the Jira issue by removing all fields that aren't enumerated by this - schema. Send this cleaned data to Jira. Finally, make another API call - to Jira to make sure the issue was created and return basic issue details. - - :param data: JiraCreateTicketAction object - :return: simple object with basic Jira issue details - """ - client = self.get_client() - cleaned_data = {} - user_id_field = client.user_id_field() - - fs = issue_type_meta["fields"] - for field in fs.keys(): - f = fs[field] - if field == "description": - cleaned_data[field] = data[field] - continue - elif field == "summary": - cleaned_data["summary"] = data["title"] - continue - elif field == "labels" and "labels" in data: - labels = [label.strip() for label in data["labels"].split(",") if label.strip()] - cleaned_data["labels"] = labels - continue - if field in data.keys(): - v = data.get(field) - if not v: - continue - - schema = f.get("schema") - if schema: - if schema.get("type") == "string" and not schema.get("custom"): - cleaned_data[field] = v - continue - if schema["type"] == "user" or schema.get("items") == "user": - if schema.get("custom") == JIRA_CUSTOM_FIELD_TYPES.get("multiuserpicker"): - # custom multi-picker - v = [{user_id_field: user_id} for user_id in v] - else: - v = {user_id_field: v} - elif schema["type"] == "issuelink": # used by Parent field - v = {"key": v} - elif schema.get("custom") == JIRA_CUSTOM_FIELD_TYPES["epic"]: - v = v - elif schema.get("custom") == JIRA_CUSTOM_FIELD_TYPES["team"]: - v = v - elif schema.get("custom") == JIRA_CUSTOM_FIELD_TYPES["sprint"]: - try: - v = int(v) - except ValueError: - raise IntegrationError(f"Invalid sprint ({v}) specified") - elif schema["type"] == "array" and schema.get("items") == "option": - v = [{"value": vx} for vx in v] - elif schema["type"] == "array" and schema.get("items") == "string": - v = [v] - elif schema["type"] == "array" and schema.get("items") != "string": - v = [{"id": vx} for vx in v] - elif schema["type"] == "option": - v = {"value": v} - elif schema.get("custom") == JIRA_CUSTOM_FIELD_TYPES.get("textarea"): - v = v - elif ( - schema["type"] == "number" - or schema.get("custom") == JIRA_CUSTOM_FIELD_TYPES["tempo_account"] - ): - try: - if "." in v: - v = float(v) - else: - v = int(v) - except ValueError: - pass - elif ( - schema.get("type") != "string" - or (schema.get("items") and schema.get("items") != "string") - or schema.get("custom") == JIRA_CUSTOM_FIELD_TYPES.get("select") - ): - v = {"id": v} - cleaned_data[field] = v - - if not (isinstance(cleaned_data["issuetype"], dict) and "id" in cleaned_data["issuetype"]): - # something fishy is going on with this field, working on some Jira - # instances, and some not. - # testing against 5.1.5 and 5.1.4 does not convert (perhaps is no longer included - # in the projectmeta API call, and would normally be converted in the - # above clean method.) - cleaned_data["issuetype"] = {"id": cleaned_data["issuetype"]} - return cleaned_data - def _clean_and_transform_issue_data( self, issue_metadata: JiraIssueTypeMetadata, data: dict[str, Any] ) -> Any: @@ -914,12 +898,9 @@ def create_issue(self, data, **kwargs): raise IntegrationError("Could not fetch issue create configuration from Jira.") issue_type_meta = self.get_issue_type_meta(data["issuetype"], meta) - if features.has("organizations:new-jira-transformers", organization=self.organization): - cleaned_data = self._clean_and_transform_issue_data( - JiraIssueTypeMetadata.from_dict(issue_type_meta), data - ) - else: - cleaned_data = self._old_clean_and_transform_issue_data(data, issue_type_meta) + cleaned_data = self._clean_and_transform_issue_data( + JiraIssueTypeMetadata.from_dict(issue_type_meta), data + ) try: response = client.create_issue(cleaned_data) diff --git a/src/sentry/integrations/jira/models/create_issue_metadata.py b/src/sentry/integrations/jira/models/create_issue_metadata.py index 2817c998c4952..feb5d529da842 100644 --- a/src/sentry/integrations/jira/models/create_issue_metadata.py +++ b/src/sentry/integrations/jira/models/create_issue_metadata.py @@ -30,6 +30,9 @@ class JiraSchemaTypes(str, Enum): team = "team" number = "number" json = "json" + version = "version" + component = "component" + priority = "priority" any = "any" diff --git a/src/sentry/integrations/jira/utils/api.py b/src/sentry/integrations/jira/utils/api.py index 99d4d8626db28..cdc0f2b37e6a5 100644 --- a/src/sentry/integrations/jira/utils/api.py +++ b/src/sentry/integrations/jira/utils/api.py @@ -9,7 +9,7 @@ from sentry.integrations.services.integration import integration_service from sentry.integrations.services.integration.model import RpcIntegration -from sentry.integrations.utils import sync_group_assignee_inbound +from sentry.integrations.utils.sync import sync_group_assignee_inbound from sentry.shared_integrations.exceptions import ApiError from ...mixins.issues import IssueSyncIntegration diff --git a/src/sentry/integrations/jira/utils/create_issue_schema_transformers.py b/src/sentry/integrations/jira/utils/create_issue_schema_transformers.py index 9db9d6fd3a62d..7269e1b8774d8 100644 --- a/src/sentry/integrations/jira/utils/create_issue_schema_transformers.py +++ b/src/sentry/integrations/jira/utils/create_issue_schema_transformers.py @@ -44,6 +44,9 @@ def get_type_transformer_mappings(user_id_field: str) -> TransformerType: JiraSchemaTypes.issue_link.value: lambda x: {"key": x}, JiraSchemaTypes.project.value: id_obj_transformer, JiraSchemaTypes.number.value: parse_number_field, + JiraSchemaTypes.priority.value: id_obj_transformer, + JiraSchemaTypes.version.value: id_obj_transformer, + JiraSchemaTypes.component: id_obj_transformer, } return transformers @@ -51,11 +54,6 @@ def get_type_transformer_mappings(user_id_field: str) -> TransformerType: def get_custom_field_transformer_mappings() -> TransformerType: transformers = { - # TODO(Gabe): `select` type fields are broken in the UI, fix this. - # JIRA_CUSTOM_FIELD_TYPES["select"]: identity_transformer, - # TODO(Gabe): `epic` type fields don't currently appear in the issue - # link dialog. Re-enable this if needed after testing. - # JIRA_CUSTOM_FIELD_TYPES["epic"]: identity_transformer, JIRA_CUSTOM_FIELD_TYPES["tempo_account"]: parse_number_field, JIRA_CUSTOM_FIELD_TYPES["sprint"]: parse_number_field, JIRA_CUSTOM_FIELD_TYPES["rank"]: id_obj_transformer, @@ -100,10 +98,10 @@ def transform_fields( for field in jira_fields: field_data = data.get(field.key) - # We don't have a mapping for this field, so it's probably extraneous. - # TODO(Gabe): Explore raising a sentry issue for unmapped fields in - # order for us to properly filter them out. - if field_data is None: + # Skip any values that indicate no value should be provided. + # We have some older alert templates with "" values, which will raise + # if we don't skip them. + if field_data is None or field_data == "": continue field_transformer = get_transformer_for_field( @@ -134,7 +132,7 @@ def transform_fields( except JiraSchemaParseError as e: raise IntegrationFormError(field_errors={field.name: str(e)}) from e - if transformed_value: + if transformed_value is not None: transformed_data[field.key] = transformed_value return transformed_data diff --git a/src/sentry/integrations/jira/views/sentry_installation.py b/src/sentry/integrations/jira/views/sentry_installation.py index 653ae21dfd591..04b17b6d9fa99 100644 --- a/src/sentry/integrations/jira/views/sentry_installation.py +++ b/src/sentry/integrations/jira/views/sentry_installation.py @@ -3,7 +3,10 @@ from rest_framework.request import Request from rest_framework.response import Response -from sentry.integrations.utils import AtlassianConnectValidationError, get_integration_from_request +from sentry.integrations.utils.atlassian_connect import ( + AtlassianConnectValidationError, + get_integration_from_request, +) from sentry.utils.assets import get_asset_url from sentry.utils.http import absolute_uri from sentry.utils.signing import sign diff --git a/src/sentry/integrations/jira/views/sentry_issue_details.py b/src/sentry/integrations/jira/views/sentry_issue_details.py index dae799b6ba2ee..fb4174b0be631 100644 --- a/src/sentry/integrations/jira/views/sentry_issue_details.py +++ b/src/sentry/integrations/jira/views/sentry_issue_details.py @@ -17,7 +17,10 @@ from sentry.api.serializers.models.group_stream import StreamGroupSerializer from sentry.integrations.models.external_issue import ExternalIssue from sentry.integrations.services.integration import integration_service -from sentry.integrations.utils import AtlassianConnectValidationError, get_integration_from_request +from sentry.integrations.utils.atlassian_connect import ( + AtlassianConnectValidationError, + get_integration_from_request, +) from sentry.models.group import Group from sentry.models.organization import Organization from sentry.shared_integrations.exceptions import ApiError diff --git a/src/sentry/integrations/jira/webhooks/installed.py b/src/sentry/integrations/jira/webhooks/installed.py index 48737929102ee..421ed574fd93c 100644 --- a/src/sentry/integrations/jira/webhooks/installed.py +++ b/src/sentry/integrations/jira/webhooks/installed.py @@ -8,7 +8,7 @@ from sentry.api.base import control_silo_endpoint from sentry.integrations.jira.tasks import sync_metadata from sentry.integrations.pipeline import ensure_integration -from sentry.integrations.utils import authenticate_asymmetric_jwt, verify_claims +from sentry.integrations.utils.atlassian_connect import authenticate_asymmetric_jwt, verify_claims from sentry.utils import jwt from ..integration import JiraIntegrationProvider diff --git a/src/sentry/integrations/jira/webhooks/issue_updated.py b/src/sentry/integrations/jira/webhooks/issue_updated.py index f2cd4d9b382ad..01b3202da3a78 100644 --- a/src/sentry/integrations/jira/webhooks/issue_updated.py +++ b/src/sentry/integrations/jira/webhooks/issue_updated.py @@ -13,7 +13,7 @@ from sentry.api.api_owners import ApiOwner from sentry.api.api_publish_status import ApiPublishStatus from sentry.api.base import region_silo_endpoint -from sentry.integrations.utils import get_integration_from_jwt +from sentry.integrations.utils.atlassian_connect import get_integration_from_jwt from sentry.integrations.utils.scope import bind_org_context_from_integration from sentry.shared_integrations.exceptions import ApiError diff --git a/src/sentry/integrations/jira/webhooks/uninstalled.py b/src/sentry/integrations/jira/webhooks/uninstalled.py index 84fc0512d1fa4..f8ac4fd41c406 100644 --- a/src/sentry/integrations/jira/webhooks/uninstalled.py +++ b/src/sentry/integrations/jira/webhooks/uninstalled.py @@ -7,7 +7,7 @@ from sentry.api.base import control_silo_endpoint from sentry.constants import ObjectStatus from sentry.integrations.models.integration import Integration -from sentry.integrations.utils import get_integration_from_jwt +from sentry.integrations.utils.atlassian_connect import get_integration_from_jwt from sentry.integrations.utils.scope import bind_org_context_from_integration from .base import JiraWebhookBase diff --git a/src/sentry/integrations/jira_server/utils/api.py b/src/sentry/integrations/jira_server/utils/api.py index 8415d9e36371d..c7c6870f1f16a 100644 --- a/src/sentry/integrations/jira_server/utils/api.py +++ b/src/sentry/integrations/jira_server/utils/api.py @@ -6,7 +6,7 @@ from sentry.integrations.services.integration.model import RpcIntegration from sentry.integrations.services.integration.service import integration_service -from sentry.integrations.utils import sync_group_assignee_inbound +from sentry.integrations.utils.sync import sync_group_assignee_inbound if TYPE_CHECKING: from sentry.integrations.models.integration import Integration diff --git a/src/sentry/integrations/messaging/commands.py b/src/sentry/integrations/messaging/commands.py new file mode 100644 index 0000000000000..32968a56e8ef2 --- /dev/null +++ b/src/sentry/integrations/messaging/commands.py @@ -0,0 +1,151 @@ +import itertools +from abc import ABC, abstractmethod +from collections.abc import Callable, Iterable +from dataclasses import dataclass +from typing import Generic, TypeVar + +from sentry.integrations.messaging.metrics import ( + MessagingInteractionEvent, + MessagingInteractionType, +) +from sentry.integrations.messaging.spec import MessagingIntegrationSpec + + +@dataclass(frozen=True, eq=True) +class CommandInput: + cmd_value: str + arg_values: tuple[str, ...] = () + + def get_all_tokens(self) -> Iterable[str]: + yield self.cmd_value + yield from self.arg_values + + def adjust(self, slug: "CommandSlug") -> "CommandInput": + """Remove the args that are part of a slug.""" + token_count = len(slug.tokens) - 1 + slug_part = [self.cmd_value] + list(self.arg_values)[:token_count] + remaining_args = self.arg_values[token_count:] + return CommandInput(" ".join(slug_part), remaining_args) + + +class CommandNotMatchedError(Exception): + def __init__(self, message: str, unmatched_input: CommandInput) -> None: + super().__init__(message) + self.unmatched_input = unmatched_input + + +class CommandSlug: + def __init__(self, text: str) -> None: + self.tokens = tuple(token.casefold() for token in text.strip().split()) + + def does_match(self, cmd_input: CommandInput) -> bool: + if not self.tokens: + return cmd_input.cmd_value == "" and not cmd_input.arg_values + cmd_prefix = itertools.islice(cmd_input.get_all_tokens(), 0, len(self.tokens)) + cmd_tokens = tuple(token.casefold() for token in cmd_prefix) + return self.tokens == cmd_tokens + + def __repr__(self): + joined_tokens = " ".join(self.tokens) + return f"{type(self).__name__}({joined_tokens!r})" + + +class MessagingIntegrationCommand: + def __init__( + self, + interaction_type: MessagingInteractionType, + command_text: str, + aliases: Iterable[str] = (), + ) -> None: + super().__init__() + self.interaction_type = interaction_type + self.command_slug = CommandSlug(command_text) + self.aliases = frozenset(CommandSlug(alias) for alias in aliases) + + @property + def name(self) -> str: + return self.interaction_type.value + + @staticmethod + def _to_tokens(text: str) -> tuple[str, ...]: + return tuple(token.casefold() for token in text.strip().split()) + + def get_all_command_slugs(self) -> Iterable[CommandSlug]: + yield self.command_slug + yield from self.aliases + + +MESSAGING_INTEGRATION_COMMANDS = ( + HELP := MessagingIntegrationCommand( + MessagingInteractionType.HELP, + "help", + aliases=("", "support", "docs"), + ), + LINK_IDENTITY := MessagingIntegrationCommand( + MessagingInteractionType.LINK_IDENTITY, + "link", + ), + UNLINK_IDENTITY := MessagingIntegrationCommand( + MessagingInteractionType.UNLINK_IDENTITY, + "unlink", + ), + LINK_TEAM := MessagingIntegrationCommand( + MessagingInteractionType.LINK_TEAM, + "link team", + ), + UNLINK_TEAM := MessagingIntegrationCommand( + MessagingInteractionType.UNLINK_TEAM, + "unlink team", + ), +) + +R = TypeVar("R") # response + + +class MessagingIntegrationCommandDispatcher(Generic[R], ABC): + """The set of commands handled by one messaging integration.""" + + @property + @abstractmethod + def integration_spec(self) -> MessagingIntegrationSpec: + raise NotImplementedError + + @property + @abstractmethod + def command_handlers( + self, + ) -> Iterable[tuple[MessagingIntegrationCommand, Callable[[CommandInput], R]]]: + raise NotImplementedError + + def get_event(self, command: MessagingIntegrationCommand) -> MessagingInteractionEvent: + return MessagingInteractionEvent( + interaction_type=command.interaction_type, spec=self.integration_spec + ) + + def dispatch(self, cmd_input: CommandInput) -> R: + @dataclass(frozen=True) + class CandidateHandler: + command: MessagingIntegrationCommand + slug: CommandSlug + callback: Callable[[CommandInput], R] + + def parsing_order(self) -> int: + # Sort by descending length of arg tokens. If one slug is a prefix of + # another (e.g., "link" and "link team"), we must check for the longer + # one first. + return -len(self.slug.tokens) + + candidate_handlers = [ + CandidateHandler(command, slug, callback) + for (command, callback) in self.command_handlers + for slug in command.get_all_command_slugs() + ] + candidate_handlers.sort(key=CandidateHandler.parsing_order) + + for handler in candidate_handlers: + if handler.slug.does_match(cmd_input): + arg_input = cmd_input.adjust(handler.slug) + with self.get_event(handler.command).capture(assume_success=False): + return handler.callback(arg_input) + + raise CommandNotMatchedError(f"{cmd_input=!r}", cmd_input) diff --git a/src/sentry/integrations/messaging/linkage.py b/src/sentry/integrations/messaging/linkage.py index a41db6a8dd75b..672a6c0a07748 100644 --- a/src/sentry/integrations/messaging/linkage.py +++ b/src/sentry/integrations/messaging/linkage.py @@ -14,6 +14,7 @@ from sentry import analytics, features from sentry.api.helpers.teams import is_team_admin +from sentry.constants import ObjectStatus from sentry.identity.services.identity import identity_service from sentry.integrations.messaging.spec import MessagingIntegrationSpec from sentry.integrations.models.external_actor import ExternalActor @@ -360,7 +361,9 @@ def handle(self, request: HttpRequest, signed_params: str) -> HttpResponseBase: slack_id: str = params["slack_id"] organization_id: str | None = params.get("organization_id") - integration = integration_service.get_integration(integration_id=integration_id) + integration = integration_service.get_integration( + integration_id=integration_id, status=ObjectStatus.ACTIVE + ) if integration is None: logger.info( "integration.not_found", diff --git a/src/sentry/integrations/messaging/metrics.py b/src/sentry/integrations/messaging/metrics.py new file mode 100644 index 0000000000000..00c6057166829 --- /dev/null +++ b/src/sentry/integrations/messaging/metrics.py @@ -0,0 +1,72 @@ +from collections.abc import Mapping +from dataclasses import dataclass +from enum import Enum +from typing import Any + +from sentry.integrations.base import IntegrationDomain +from sentry.integrations.messaging.spec import MessagingIntegrationSpec +from sentry.integrations.utils.metrics import EventLifecycleMetric, EventLifecycleOutcome +from sentry.models.organization import Organization +from sentry.organizations.services.organization import RpcOrganization +from sentry.users.models import User +from sentry.users.services.user import RpcUser + + +class MessagingInteractionType(Enum): + """A way in which a user can interact with Sentry through a messaging app.""" + + # Direct interactions with the user + HELP = "HELP" + LINK_IDENTITY = "LINK_IDENTITY" + UNLINK_IDENTITY = "UNLINK_IDENTITY" + LINK_TEAM = "LINK_TEAM" + UNLINK_TEAM = "UNLINK_TEAM" + + # Interactions on Issues + STATUS = "STATUS" + ARCHIVE_DIALOG = "ARCHIVE_DIALOG" + ARCHIVE = "ARCHIVE" + ASSIGN_DIALOG = "ASSIGN_DIALOG" + ASSIGN = "ASSIGN" + UNASSIGN = "ASSIGN" + RESOLVE_DIALOG = "RESOLVE_DIALOG" + RESOLVE = "RESOLVE" + UNRESOLVE = "UNRESOLVE" + IGNORE = "IGNORE" + MARK_ONGOING = "MARK_ONGOING" + + # Automatic behaviors + UNFURL_ISSUES = "UNFURL_ISSUES" + UNFURL_METRIC_ALERTS = "UNFURL_METRIC_ALERTS" + UNFURL_DISCOVER = "UNFURL_DISCOVER" + + GET_PARENT_NOTIFICATION = "GET_PARENT_NOTIFICATION" + + def __str__(self) -> str: + return self.value.lower() + + +@dataclass +class MessagingInteractionEvent(EventLifecycleMetric): + """An instance to be recorded of a user interacting through a messaging app.""" + + interaction_type: MessagingInteractionType + spec: MessagingIntegrationSpec + + # Optional attributes to populate extras + user: User | RpcUser | None = None + organization: Organization | RpcOrganization | None = None + + def get_key(self, outcome: EventLifecycleOutcome) -> str: + return self.get_standard_key( + domain=IntegrationDomain.MESSAGING, + integration_name=self.spec.provider_slug, + interaction_type=str(self.interaction_type), + outcome=outcome, + ) + + def get_extras(self) -> Mapping[str, Any]: + return { + "user_id": (self.user.id if self.user else None), + "organization_id": (self.organization.id if self.organization else None), + } diff --git a/src/sentry/integrations/metric_alerts.py b/src/sentry/integrations/metric_alerts.py index 31e61ac367e4a..fb544a6bd9b95 100644 --- a/src/sentry/integrations/metric_alerts.py +++ b/src/sentry/integrations/metric_alerts.py @@ -26,7 +26,6 @@ "percentage(sessions_crashed, sessions)": "% sessions crash free rate", "percentage(users_crashed, users)": "% users crash free rate", } -LOGO_URL = absolute_uri(get_asset_url("sentry", "images/sentry-email-avatar.png")) # These should be the same as the options in the frontend # COMPARISON_DELTA_OPTIONS TEXT_COMPARISON_DELTA = { @@ -39,6 +38,10 @@ } +def logo_url() -> str: + return absolute_uri(get_asset_url("sentry", "images/sentry-email-avatar.png")) + + def get_metric_count_from_incident(incident: Incident) -> str: """Returns the current or last count of an incident aggregate.""" incident_trigger = ( @@ -115,7 +118,9 @@ def incident_attachment_info( metric_value = get_metric_count_from_incident(incident) text = get_incident_status_text(alert_rule, metric_value) - if features.has("organizations:anomaly-detection-alerts", incident.organization): + if features.has( + "organizations:anomaly-detection-alerts", incident.organization + ) and features.has("organizations:anomaly-detection-rollout", incident.organization): text += f"\nThreshold: {alert_rule.detection_type.title()}" title = f"{status}: {alert_rule.name}" @@ -142,7 +147,7 @@ def incident_attachment_info( return { "title": title, "text": text, - "logo_url": LOGO_URL, + "logo_url": logo_url(), "status": status, "ts": incident.date_started, "title_link": title_link, @@ -211,7 +216,9 @@ def metric_alert_attachment_info( if metric_value is not None and status != INCIDENT_STATUS[IncidentStatus.CLOSED]: text = get_incident_status_text(alert_rule, metric_value) - if features.has("organizations:anomaly-detection-alerts", alert_rule.organization): + if features.has( + "organizations:anomaly-detection-alerts", alert_rule.organization + ) and features.has("organizations:anomaly-detection-rollout", alert_rule.organization): text += f"\nThreshold: {alert_rule.detection_type.title()}" date_started = None @@ -228,7 +235,7 @@ def metric_alert_attachment_info( return { "title": title, "text": text, - "logo_url": LOGO_URL, + "logo_url": logo_url(), "status": status, "date_started": date_started, "last_triggered_date": last_triggered_date, diff --git a/src/sentry/integrations/middleware/hybrid_cloud/parser.py b/src/sentry/integrations/middleware/hybrid_cloud/parser.py index c83c8d051aa8e..b1740f1e250b8 100644 --- a/src/sentry/integrations/middleware/hybrid_cloud/parser.py +++ b/src/sentry/integrations/middleware/hybrid_cloud/parser.py @@ -239,7 +239,9 @@ def get_response_from_webhookpayload_for_integration( regions=regions, identifier=integration.id, integration_id=integration.id ) - def get_mailbox_identifier(self, integration: RpcIntegration, data: Mapping[str, Any]) -> str: + def get_mailbox_identifier( + self, integration: RpcIntegration | Integration, data: Mapping[str, Any] + ) -> str: """ Used by integrations with higher hook volumes to create smaller mailboxes that can be delivered in parallel. Requires the integration to implement diff --git a/src/sentry/integrations/mixins/issues.py b/src/sentry/integrations/mixins/issues.py index 9a77dd4d89f9e..add7ee42679a2 100644 --- a/src/sentry/integrations/mixins/issues.py +++ b/src/sentry/integrations/mixins/issues.py @@ -12,11 +12,12 @@ from sentry.eventstore.models import GroupEvent from sentry.integrations.base import IntegrationInstallation from sentry.integrations.models.external_issue import ExternalIssue +from sentry.integrations.services.assignment_source import AssignmentSource from sentry.integrations.services.integration import integration_service from sentry.integrations.tasks.sync_status_inbound import ( sync_status_inbound as sync_status_inbound_task, ) -from sentry.integrations.utils import where_should_sync +from sentry.integrations.utils.sync import where_should_sync from sentry.issues.grouptype import GroupCategory from sentry.models.group import Group from sentry.models.grouplink import GroupLink @@ -62,7 +63,7 @@ def from_resolve_unresolve( class IssueBasicIntegration(IntegrationInstallation, ABC): - def should_sync(self, attribute): + def should_sync(self, attribute, sync_source: AssignmentSource | None = None): return False def get_group_title(self, group, event, **kwargs): @@ -378,10 +379,17 @@ class IssueSyncIntegration(IssueBasicIntegration, ABC): outbound_assignee_key: ClassVar[str | None] = None inbound_assignee_key: ClassVar[str | None] = None - def should_sync(self, attribute: str) -> bool: + def should_sync(self, attribute: str, sync_source: AssignmentSource | None = None) -> bool: key = getattr(self, f"{attribute}_key", None) if key is None or self.org_integration is None: return False + + # Check that the assignment source isn't this same integration in order to + # prevent sync-cycles from occurring. This should still allow other + # integrations to propagate changes outward. + if sync_source and sync_source.integration_id == self.org_integration.integration_id: + return False + value: bool = self.org_integration.config.get(key, False) return value @@ -400,7 +408,14 @@ def sync_assignee_outbound( raise NotImplementedError @abstractmethod - def sync_status_outbound(self, external_issue, is_resolved, project_id, **kwargs): + def sync_status_outbound( + self, + external_issue, + is_resolved, + project_id, + assignment_source: AssignmentSource | None = None, + **kwargs, + ): """ Propagate a sentry issue's status to a linked issue's status. """ diff --git a/src/sentry/integrations/mixins/notifications.py b/src/sentry/integrations/mixins/notifications.py index 0565d6f1291df..80c94602facae 100644 --- a/src/sentry/integrations/mixins/notifications.py +++ b/src/sentry/integrations/mixins/notifications.py @@ -1,5 +1,7 @@ import logging +from sentry_sdk import capture_message + from sentry.integrations.models.external_actor import ExternalActor from sentry.models.team import Team @@ -22,6 +24,21 @@ def notify_remove_external_team(self, external_team: ExternalActor, team: Team) """ Notify through the integration that an external team has been removed. """ + if not external_team.external_id: + logger.info( + "notify.external_team_missing_external_id", + extra={ + "external_team_id": external_team.id, + "team_id": team.id, + "team_slug": team.slug, + }, + ) + capture_message( + f"External team {external_team.id} has no external_id", + level="warning", + ) + return + self.send_message( channel_id=external_team.external_id, message=SUCCESS_UNLINKED_TEAM_MESSAGE.format(team=team.slug), diff --git a/src/sentry/integrations/models/external_actor.py b/src/sentry/integrations/models/external_actor.py index e21955a55d329..58c4183d8da8a 100644 --- a/src/sentry/integrations/models/external_actor.py +++ b/src/sentry/integrations/models/external_actor.py @@ -6,6 +6,7 @@ from django.utils import timezone from sentry.backup.scopes import RelocationScope +from sentry.constants import ObjectStatus from sentry.db.models import BoundedPositiveIntegerField, FlexibleForeignKey, region_silo_model from sentry.db.models.fields.hybrid_cloud_foreign_key import HybridCloudForeignKey from sentry.hybridcloud.outbox.base import ReplicatedRegionModel @@ -68,7 +69,9 @@ def delete(self, *args, **kwargs): # TODO: Extract this out of the delete method into the endpoint / controller instead. if self.team is not None: - integration = integration_service.get_integration(integration_id=self.integration_id) + integration = integration_service.get_integration( + integration_id=self.integration_id, status=ObjectStatus.ACTIVE + ) if integration: install = integration.get_installation(organization_id=self.organization.id) team = self.team diff --git a/src/sentry/integrations/models/external_issue.py b/src/sentry/integrations/models/external_issue.py index 1671e2c5439db..9e12f67fe5f54 100644 --- a/src/sentry/integrations/models/external_issue.py +++ b/src/sentry/integrations/models/external_issue.py @@ -7,6 +7,7 @@ from django.utils import timezone from sentry.backup.scopes import RelocationScope +from sentry.constants import ObjectStatus from sentry.db.models import FlexibleForeignKey, JSONField, Model, region_silo_model, sane_repr from sentry.db.models.fields.hybrid_cloud_foreign_key import HybridCloudForeignKey from sentry.db.models.manager.base import BaseManager @@ -89,7 +90,9 @@ class Meta: def get_installation(self) -> Any: from sentry.integrations.services.integration import integration_service - integration = integration_service.get_integration(integration_id=self.integration_id) + integration = integration_service.get_integration( + integration_id=self.integration_id, status=ObjectStatus.ACTIVE + ) assert integration, "Integration is required to get an installation" return integration.get_installation(organization_id=self.organization_id) diff --git a/src/sentry/integrations/msteams/notifications.py b/src/sentry/integrations/msteams/notifications.py index fca1a6152a81c..0152243dabc5b 100644 --- a/src/sentry/integrations/msteams/notifications.py +++ b/src/sentry/integrations/msteams/notifications.py @@ -82,9 +82,7 @@ def send_notification_as_msteams( ) return - with sentry_sdk.start_span( - op="notification.send_msteams", description="gen_channel_integration_map" - ): + with sentry_sdk.start_span(op="notification.send_msteams", name="gen_channel_integration_map"): data = get_integrations_by_channel_by_recipient( organization=notification.organization, recipients=recipients, @@ -92,13 +90,11 @@ def send_notification_as_msteams( ) for recipient, integrations_by_channel in data.items(): - with sentry_sdk.start_span(op="notification.send_msteams", description="send_one"): + with sentry_sdk.start_span(op="notification.send_msteams", name="send_one"): extra_context = (extra_context_by_actor or {}).get(recipient, {}) context = get_context(notification, recipient, shared_context, extra_context) - with sentry_sdk.start_span( - op="notification.send_msteams", description="gen_attachments" - ): + with sentry_sdk.start_span(op="notification.send_msteams", name="gen_attachments"): card = get_notification_card(notification, context, recipient) for channel, integration in integrations_by_channel.items(): @@ -107,7 +103,7 @@ def send_notification_as_msteams( client = MsTeamsClient(integration) try: with sentry_sdk.start_span( - op="notification.send_msteams", description="notify_recipient" + op="notification.send_msteams", name="notify_recipient" ): client.send_card(conversation_id, card) diff --git a/src/sentry/integrations/msteams/parsing.py b/src/sentry/integrations/msteams/parsing.py index 69e5a98687f0d..67e778c870bda 100644 --- a/src/sentry/integrations/msteams/parsing.py +++ b/src/sentry/integrations/msteams/parsing.py @@ -2,6 +2,7 @@ from collections.abc import Mapping from typing import Any +from sentry.constants import ObjectStatus from sentry.integrations.msteams.spec import PROVIDER from sentry.integrations.services.integration import integration_service from sentry.integrations.services.integration.model import RpcIntegration @@ -23,14 +24,18 @@ def get_integration_from_channel_data(data: Mapping[str, Any]) -> RpcIntegration team_id = _infer_team_id_from_channel_data(data=data) if team_id is None: return None - return integration_service.get_integration(provider=PROVIDER, external_id=team_id) + return integration_service.get_integration( + provider=PROVIDER, external_id=team_id, status=ObjectStatus.ACTIVE + ) def get_integration_for_tenant(data: Mapping[str, Any]) -> RpcIntegration | None: try: channel_data = data["channelData"] tenant_id = channel_data["tenant"]["id"] - return integration_service.get_integration(provider=PROVIDER, external_id=tenant_id) + return integration_service.get_integration( + provider=PROVIDER, external_id=tenant_id, status=ObjectStatus.ACTIVE + ) except Exception as err: logger.info("failed to get tenant id from request data", exc_info=err, extra={"data": data}) return None @@ -56,7 +61,9 @@ def get_integration_from_card_action(data: Mapping[str, Any]) -> RpcIntegration integration_id = _infer_integration_id_from_card_action(data=data) if integration_id is None: return None - return integration_service.get_integration(integration_id=integration_id) + return integration_service.get_integration( + integration_id=integration_id, status=ObjectStatus.ACTIVE + ) def can_infer_integration(data: Mapping[str, Any]) -> bool: diff --git a/src/sentry/integrations/msteams/webhook.py b/src/sentry/integrations/msteams/webhook.py index dbfe7fd00f318..fb40c4b04e05c 100644 --- a/src/sentry/integrations/msteams/webhook.py +++ b/src/sentry/integrations/msteams/webhook.py @@ -2,7 +2,8 @@ import logging import time -from collections.abc import Callable, Mapping +from collections.abc import Callable, Iterable, Mapping +from dataclasses import dataclass from enum import Enum from typing import Any, cast @@ -18,10 +19,23 @@ from sentry.api.api_owners import ApiOwner from sentry.api.api_publish_status import ApiPublishStatus from sentry.api.base import Endpoint, all_silo_endpoint +from sentry.constants import ObjectStatus from sentry.identity.services.identity import identity_service from sentry.identity.services.identity.model import RpcIdentity +from sentry.integrations.messaging import commands +from sentry.integrations.messaging.commands import ( + CommandInput, + CommandNotMatchedError, + MessagingIntegrationCommand, + MessagingIntegrationCommandDispatcher, +) +from sentry.integrations.messaging.metrics import ( + MessagingInteractionEvent, + MessagingInteractionType, +) +from sentry.integrations.messaging.spec import MessagingIntegrationSpec from sentry.integrations.msteams import parsing -from sentry.integrations.msteams.spec import PROVIDER +from sentry.integrations.msteams.spec import PROVIDER, MsTeamsMessagingSpec from sentry.integrations.services.integration import integration_service from sentry.models.activity import ActivityIntegration from sentry.models.apikey import ApiKey @@ -447,22 +461,21 @@ def _make_action_data(self, data: Mapping[str, Any], user_id: int) -> dict[str, action_data = {"assignedTo": ""} return action_data + _ACTION_TYPES = { + ACTION_TYPE.RESOLVE: ("resolve", MessagingInteractionType.RESOLVE), + ACTION_TYPE.IGNORE: ("ignore", MessagingInteractionType.IGNORE), + ACTION_TYPE.ASSIGN: ("assign", MessagingInteractionType.ASSIGN), + ACTION_TYPE.UNRESOLVE: ("unresolve", MessagingInteractionType.UNRESOLVE), + ACTION_TYPE.UNASSIGN: ("unassign", MessagingInteractionType.UNASSIGN), + } + def _issue_state_change(self, group: Group, identity: RpcIdentity, data) -> Response: event_write_key = ApiKey( organization_id=group.project.organization_id, scope_list=["event:write"] ) - # undoing the enum structure of ACTION_TYPE to - # get a more sensible analytics_event - action_types = { - ACTION_TYPE.RESOLVE: "resolve", - ACTION_TYPE.IGNORE: "ignore", - ACTION_TYPE.ASSIGN: "assign", - ACTION_TYPE.UNRESOLVE: "unresolve", - ACTION_TYPE.UNASSIGN: "unassign", - } action_data = self._make_action_data(data, identity.user_id) - status = action_types[data["payload"]["actionType"]] + status, interaction_type = self._ACTION_TYPES[data["payload"]["actionType"]] analytics_event = f"integrations.msteams.{status}" analytics.record( analytics_event, @@ -470,13 +483,19 @@ def _issue_state_change(self, group: Group, identity: RpcIdentity, data) -> Resp organization_id=group.project.organization.id, ) - return client.put( - path=f"/projects/{group.project.organization.slug}/{group.project.slug}/issues/", - params={"id": group.id}, - data=action_data, - user=user_service.get_user(user_id=identity.user_id), - auth=event_write_key, - ) + with MessagingInteractionEvent( + interaction_type, MsTeamsMessagingSpec() + ).capture() as lifecycle: + response = client.put( + path=f"/projects/{group.project.organization.slug}/{group.project.slug}/issues/", + params={"id": group.id}, + data=action_data, + user=user_service.get_user(user_id=identity.user_id), + auth=event_write_key, + ) + if response.status_code >= 400: + lifecycle.record_failure() + return response def _handle_action_submitted(self, request: Request) -> Response: # pull out parameters @@ -506,7 +525,9 @@ def _handle_action_submitted(self, request: Request) -> Response: group = Group.objects.select_related("project__organization").filter(id=group_id).first() if group: - integration = integration_service.get_integration(integration_id=integration.id) + integration = integration_service.get_integration( + integration_id=integration.id, status=ObjectStatus.ACTIVE + ) if integration is None: group = None @@ -602,27 +623,54 @@ def _handle_channel_message(self, request: Request) -> Response: def _handle_personal_message(self, request: Request) -> Response: data = request.data command_text = data.get("text", "").strip() - lowercase_command = command_text.lower() - conversation_id = data["conversation"]["id"] - teams_user_id = data["from"]["id"] - - # only supporting unlink for now - if "unlink" in lowercase_command: - unlink_url = build_unlinking_url(conversation_id, data["serviceUrl"], teams_user_id) - card = build_unlink_identity_card(unlink_url) - elif "help" in lowercase_command: - card = build_help_command_card() - elif "link" == lowercase_command: # don't to match other types of link commands - has_linked_identity = ( - identity_service.get_identity(filter={"identity_ext_id": teams_user_id}) is not None - ) - if has_linked_identity: - card = build_already_linked_identity_command_card() - else: - card = build_link_identity_command_card() - else: + + dispatcher = MsTeamsCommandDispatcher(data) + try: + card = dispatcher.dispatch(CommandInput(command_text)) + except CommandNotMatchedError: card = build_unrecognized_command_card(command_text) client = get_preinstall_client(data["serviceUrl"]) - client.send_card(conversation_id, card) + client.send_card(dispatcher.conversation_id, card) return self.respond(status=204) + + +@dataclass(frozen=True) +class MsTeamsCommandDispatcher(MessagingIntegrationCommandDispatcher[AdaptiveCard]): + data: dict[str, Any] + + @property + def integration_spec(self) -> MessagingIntegrationSpec: + return MsTeamsMessagingSpec() + + @property + def conversation_id(self) -> str: + return self.data["conversation"]["id"] + + @property + def teams_user_id(self) -> str: + return self.data["from"]["id"] + + @property + def command_handlers( + self, + ) -> Iterable[tuple[MessagingIntegrationCommand, Callable[[CommandInput], AdaptiveCard]]]: + yield commands.HELP, (lambda _: build_help_command_card()) + yield commands.LINK_IDENTITY, self.link_identity + yield commands.UNLINK_IDENTITY, self.unlink_identity + + def link_identity(self, _: CommandInput) -> AdaptiveCard: + linked_identity = identity_service.get_identity( + filter={"identity_ext_id": self.teams_user_id} + ) + has_linked_identity = linked_identity is not None + if has_linked_identity: + return build_already_linked_identity_command_card() + else: + return build_link_identity_command_card() + + def unlink_identity(self, _: CommandInput) -> AdaptiveCard: + unlink_url = build_unlinking_url( + self.conversation_id, self.data["serviceUrl"], self.teams_user_id + ) + return build_unlink_identity_card(unlink_url) diff --git a/src/sentry/integrations/on_call/metrics.py b/src/sentry/integrations/on_call/metrics.py new file mode 100644 index 0000000000000..1df97a5a6c744 --- /dev/null +++ b/src/sentry/integrations/on_call/metrics.py @@ -0,0 +1,57 @@ +from enum import Enum + +from attr import dataclass + +from sentry.integrations.base import IntegrationDomain +from sentry.integrations.on_call.spec import OnCallSpec +from sentry.integrations.utils.metrics import EventLifecycleMetric, EventLifecycleOutcome +from sentry.models.organization import Organization +from sentry.organizations.services.organization import RpcOrganization +from sentry.users.models import User +from sentry.users.services.user import RpcUser + + +class OnCallInteractionType(Enum): + """ + A way in which a user can interact with Sentry through an on-call app. + """ + + # General interactions + ADD_KEY = "ADD_KEY" + POST_INSTALL = "POST_INSTALL" + # Interacting with external alerts + CREATE = "CREATE" # create an alert in Opsgenie/Pagerduty + RESOLVE = "RESOLVE" # resolve an alert in Opsgenie/Pagerduty + + # Opsgenie only + VERIFY_KEYS = "VERIFY_KEYS" + VERIFY_TEAM = "VERIFY_TEAM" + MIGRATE_PLUGIN = "MIGRATE_PLUGIN" + + # PagerDuty only + VALIDATE_SERVICE = "VALIDATE_SERVICE" + + def __str__(self) -> str: + return self.value.lower() + + +@dataclass +class OnCallInteractionEvent(EventLifecycleMetric): + """ + An instance to be recorded of a user interacting with Sentry through an on-call app. + """ + + interaction_type: OnCallInteractionType + spec: OnCallSpec + + # Optional attributes to populate extras + user: User | RpcUser | None = None + organization: Organization | RpcOrganization | None = None + + def get_key(self, outcome: EventLifecycleOutcome) -> str: + return self.get_standard_key( + domain=IntegrationDomain.ON_CALL_SCHEDULING, + integration_name=self.spec.provider_slug, + interaction_type=str(self.interaction_type), + outcome=outcome, + ) diff --git a/src/sentry/integrations/on_call/spec.py b/src/sentry/integrations/on_call/spec.py new file mode 100644 index 0000000000000..130c537976e8d --- /dev/null +++ b/src/sentry/integrations/on_call/spec.py @@ -0,0 +1,35 @@ +from abc import ABC, abstractmethod + +from sentry.models.notificationaction import ActionService + + +class OnCallSpec(ABC): + @property + @abstractmethod + def provider_slug(self): + raise NotImplementedError + + @property + @abstractmethod + def action_service(self): + raise NotImplementedError + + +class OpsgenieOnCallSpec(OnCallSpec): + @property + def provider_slug(self): + return "opsgenie" + + @property + def action_service(self): + return ActionService.OPSGENIE + + +class PagerDutyOnCallSpec(OnCallSpec): + @property + def provider_slug(self): + return "pagerduty" + + @property + def action_service(self): + return ActionService.PAGERDUTY diff --git a/src/sentry/integrations/opsgenie/actions/form.py b/src/sentry/integrations/opsgenie/actions/form.py index b2b6284dd0d9b..a6a29d9d20814 100644 --- a/src/sentry/integrations/opsgenie/actions/form.py +++ b/src/sentry/integrations/opsgenie/actions/form.py @@ -1,19 +1,16 @@ from __future__ import annotations from collections.abc import Mapping -from typing import Any, cast +from typing import Any from django import forms from django.utils.translation import gettext_lazy as _ -from sentry.integrations.opsgenie.integration import OpsgenieIntegration +from sentry.integrations.on_call.metrics import OnCallInteractionType +from sentry.integrations.opsgenie.metrics import record_event from sentry.integrations.opsgenie.utils import get_team from sentry.integrations.services.integration import integration_service -from sentry.integrations.services.integration.model import ( - RpcIntegration, - RpcOrganizationIntegration, -) -from sentry.shared_integrations.exceptions import ApiError +from sentry.integrations.services.integration.model import RpcOrganizationIntegration INVALID_TEAM = 1 INVALID_KEY = 2 @@ -59,65 +56,41 @@ def __init__(self, *args, **kwargs): def _get_team_status( self, team_id: str | None, - integration: RpcIntegration, org_integration: RpcOrganizationIntegration, ) -> int: team = get_team(team_id, org_integration) if not team or not team_id: return INVALID_TEAM - install = cast( - "OpsgenieIntegration", - integration.get_installation(organization_id=org_integration.organization_id), - ) - client = install.get_keyring_client(keyid=team_id) - # the integration should be of type "sentry" - # there's no way to authenticate that a key is an integration key - # without specifying the type... even though the type is arbitrary - # and all integration keys do the same thing - try: - client.authorize_integration(type="sentry") - except ApiError: - return INVALID_KEY - return VALID_TEAM def _validate_team(self, team_id: str | None, integration_id: int | None) -> None: - params = { - "account": dict(self.fields["account"].choices).get(integration_id), - "team": dict(self.fields["team"].choices).get(team_id), - } - integration = integration_service.get_integration( - integration_id=integration_id, provider="opsgenie" - ) - org_integration = integration_service.get_organization_integration( - integration_id=integration_id, - organization_id=self.org_id, - ) - if integration is None or org_integration is None: - raise forms.ValidationError( - _("The Opsgenie integration does not exist."), - code="invalid_integration", - params=params, - ) - team_status = self._get_team_status( - team_id=team_id, integration=integration, org_integration=org_integration - ) - if team_status == INVALID_TEAM: - raise forms.ValidationError( - _('The team "%(team)s" does not belong to the %(account)s Opsgenie account.'), - code="invalid_team", - params=params, + with record_event(OnCallInteractionType.VERIFY_TEAM).capture(): + params = { + "account": dict(self.fields["account"].choices).get(integration_id), + "team": dict(self.fields["team"].choices).get(team_id), + } + integration = integration_service.get_integration( + integration_id=integration_id, provider="opsgenie" ) - elif team_status == INVALID_KEY: - raise forms.ValidationError( - _( - 'The provided API key is invalid. Please make sure that the Opsgenie API \ - key is an integration key of type "Sentry" that has configuration access.' - ), - code="invalid_key", - params=params, + org_integration = integration_service.get_organization_integration( + integration_id=integration_id, + organization_id=self.org_id, ) + if integration is None or org_integration is None: + raise forms.ValidationError( + _("The Opsgenie integration does not exist."), + code="invalid_integration", + params=params, + ) + + team_status = self._get_team_status(team_id=team_id, org_integration=org_integration) + if team_status == INVALID_TEAM: + raise forms.ValidationError( + _('The team "%(team)s" does not belong to the %(account)s Opsgenie account.'), + code="invalid_team", + params=params, + ) def clean(self) -> dict[str, Any] | None: cleaned_data = super().clean() diff --git a/src/sentry/integrations/opsgenie/actions/notification.py b/src/sentry/integrations/opsgenie/actions/notification.py index a408d3f0420d4..c7c4ea429af54 100644 --- a/src/sentry/integrations/opsgenie/actions/notification.py +++ b/src/sentry/integrations/opsgenie/actions/notification.py @@ -72,7 +72,10 @@ def send_notification(event, futures): try: rules = [f.rule for f in futures] resp = client.send_notification( - data=event, priority=priority, rules=rules, notification_uuid=notification_uuid + data=event, + priority=priority, + rules=rules, + notification_uuid=notification_uuid, ) except ApiError as e: logger.info( diff --git a/src/sentry/integrations/opsgenie/client.py b/src/sentry/integrations/opsgenie/client.py index f474e18be7392..a7bd3755418b1 100644 --- a/src/sentry/integrations/opsgenie/client.py +++ b/src/sentry/integrations/opsgenie/client.py @@ -5,6 +5,8 @@ from sentry.eventstore.models import Event, GroupEvent from sentry.integrations.client import ApiClient from sentry.integrations.models.integration import Integration +from sentry.integrations.on_call.metrics import OnCallInteractionType +from sentry.integrations.opsgenie.metrics import record_event from sentry.integrations.services.integration.model import RpcIntegration from sentry.models.group import Group from sentry.shared_integrations.client.base import BaseApiResponseX @@ -36,11 +38,6 @@ def get_alerts(self, limit: int | None = 1) -> BaseApiResponseX: path = f"/alerts?limit={limit}" return self.get(path=path, headers=self._get_auth_headers()) - def authorize_integration(self, type: str) -> BaseApiResponseX: - body = {"type": type} - path = "/integrations/authenticate" - return self.post(path=path, headers=self._get_auth_headers(), data=body) - def _get_rule_urls(self, group, rules): organization = group.project.organization rule_urls = [] @@ -97,6 +94,7 @@ def send_notification( notification_uuid: str | None = None, ): headers = self._get_auth_headers() + interaction_type = OnCallInteractionType.CREATE if isinstance(data, (Event, GroupEvent)): group = data.group event = data @@ -111,6 +109,7 @@ def send_notification( else: # if we're acknowledging the alert—meaning that the Sentry alert was resolved if data.get("identifier"): + interaction_type = OnCallInteractionType.RESOLVE alias = data["identifier"] resp = self.post( f"/alerts/{alias}/acknowledge", @@ -121,5 +120,6 @@ def send_notification( return resp # this is a metric alert payload = data - resp = self.post("/alerts", data=payload, headers=headers) + with record_event(interaction_type).capture(): + resp = self.post("/alerts", data=payload, headers=headers) return resp diff --git a/src/sentry/integrations/opsgenie/integration.py b/src/sentry/integrations/opsgenie/integration.py index 29315fd8334c9..8842f60895613 100644 --- a/src/sentry/integrations/opsgenie/integration.py +++ b/src/sentry/integrations/opsgenie/integration.py @@ -10,6 +10,7 @@ from rest_framework.request import Request from rest_framework.serializers import ValidationError +from sentry.constants import ObjectStatus from sentry.integrations.base import ( FeatureDescription, IntegrationFeatures, @@ -19,6 +20,8 @@ ) from sentry.integrations.models.integration import Integration from sentry.integrations.models.organization_integration import OrganizationIntegration +from sentry.integrations.on_call.metrics import OnCallInteractionType +from sentry.integrations.opsgenie.metrics import record_event from sentry.integrations.opsgenie.tasks import migrate_opsgenie_plugin from sentry.organizations.services.organization import RpcOrganizationSummary from sentry.pipeline import PipelineView @@ -119,7 +122,7 @@ def dispatch(self, request: Request, pipeline) -> HttpResponse: # type: ignore[ class OpsgenieIntegration(IntegrationInstallation): - def get_keyring_client(self, keyid: str) -> OpsgenieClient: + def get_keyring_client(self, keyid: int | str) -> OpsgenieClient: org_integration = self.org_integration assert org_integration, "OrganizationIntegration is required" team = get_team(team_id=keyid, org_integration=org_integration) @@ -169,7 +172,7 @@ def update_organization_config(self, data: MutableMapping[str, Any]) -> None: } integration = integration_service.get_integration( - organization_integration_id=self.org_integration.id + organization_integration_id=self.org_integration.id, status=ObjectStatus.ACTIVE ) if not integration: raise IntegrationError("Integration does not exist") @@ -180,40 +183,37 @@ def update_organization_config(self, data: MutableMapping[str, Any]) -> None: team["id"] = str(self.org_integration.id) + "-" + team["team"] invalid_keys = [] - for team in teams: - # skip if team, key pair already exist in config - if (team["team"], team["integration_key"]) in existing_team_key_pairs: - continue - - integration_key = team["integration_key"] - - # validate integration keys - client = OpsgenieClient( - integration=integration, - integration_key=integration_key, - ) - # call an API to test the integration key - try: - client.get_alerts() - except ApiError as e: - logger.info( - "opsgenie.authorization_error", - extra={"error": str(e), "status_code": e.code}, + with record_event(OnCallInteractionType.VERIFY_KEYS).capture(): + for team in teams: + # skip if team, key pair already exist in config + if (team["team"], team["integration_key"]) in existing_team_key_pairs: + continue + + integration_key = team["integration_key"] + + # validate integration keys + client = OpsgenieClient( + integration=integration, + integration_key=integration_key, ) - if e.code == 429: - raise ApiRateLimitedError( - "Too many requests. Please try updating one team/key at a time." - ) - elif e.code == 401: - invalid_keys.append(integration_key) - pass - elif e.json and e.json.get("message"): - raise ApiError(e.json["message"]) - else: - raise - - if invalid_keys: - raise ApiUnauthorized(f"Invalid integration key: {str(invalid_keys)}") + # call an API to test the integration key + try: + client.get_alerts() + except ApiError as e: + if e.code == 429: + raise ApiRateLimitedError( + "Too many requests. Please try updating one team/key at a time." + ) + elif e.code == 401: + invalid_keys.append(integration_key) + pass + elif e.json and e.json.get("message"): + raise ApiError(e.json["message"]) + else: + raise + + if invalid_keys: + raise ApiUnauthorized(f"Invalid integration key: {str(invalid_keys)}") return super().update_organization_config(data) @@ -256,21 +256,22 @@ def post_install( organization: RpcOrganizationSummary, extra: Any | None = None, ) -> None: - try: - org_integration = OrganizationIntegration.objects.get( - integration=integration, organization_id=organization.id - ) + with record_event(OnCallInteractionType.POST_INSTALL).capture(): + try: + org_integration = OrganizationIntegration.objects.get( + integration=integration, organization_id=organization.id + ) - except OrganizationIntegration.DoesNotExist: - logger.exception("The Opsgenie post_install step failed.") - return + except OrganizationIntegration.DoesNotExist: + logger.exception("The Opsgenie post_install step failed.") + return - key = integration.metadata["api_key"] - team_table = [] - if key: - team_name = "my-first-key" - team_id = f"{org_integration.id}-{team_name}" - team_table.append({"team": team_name, "id": team_id, "integration_key": key}) + key = integration.metadata["api_key"] + team_table = [] + if key: + team_name = "my-first-key" + team_id = f"{org_integration.id}-{team_name}" + team_table.append({"team": team_name, "id": team_id, "integration_key": key}) - org_integration.config.update({"team_table": team_table}) - org_integration.update(config=org_integration.config) + org_integration.config.update({"team_table": team_table}) + org_integration.update(config=org_integration.config) diff --git a/src/sentry/integrations/opsgenie/metrics.py b/src/sentry/integrations/opsgenie/metrics.py new file mode 100644 index 0000000000000..fcdde5ce27258 --- /dev/null +++ b/src/sentry/integrations/opsgenie/metrics.py @@ -0,0 +1,6 @@ +from sentry.integrations.on_call.metrics import OnCallInteractionEvent, OnCallInteractionType +from sentry.integrations.on_call.spec import OpsgenieOnCallSpec + + +def record_event(event: OnCallInteractionType): + return OnCallInteractionEvent(event, OpsgenieOnCallSpec()) diff --git a/src/sentry/integrations/opsgenie/tasks.py b/src/sentry/integrations/opsgenie/tasks.py index 1de4561016100..dc30dbc9d88b0 100644 --- a/src/sentry/integrations/opsgenie/tasks.py +++ b/src/sentry/integrations/opsgenie/tasks.py @@ -4,11 +4,12 @@ from sentry.integrations.models.integration import Integration from sentry.integrations.models.organization_integration import OrganizationIntegration +from sentry.integrations.on_call.metrics import OnCallInteractionType +from sentry.integrations.opsgenie.metrics import record_event from sentry.integrations.services.integration.service import integration_service from sentry.models.project import Project from sentry.models.rule import Rule from sentry.tasks.base import instrumented_task, retry -from sentry.utils import metrics ALERT_LEGACY_INTEGRATIONS = {"id": "sentry.rules.actions.notify_event.NotifyEventAction"} ALERT_LEGACY_INTEGRATIONS_WITH_NAME = { @@ -26,99 +27,101 @@ ) @retry(exclude=(Integration.DoesNotExist, OrganizationIntegration.DoesNotExist)) def migrate_opsgenie_plugin(integration_id: int, organization_id: int) -> None: - from sentry_plugins.opsgenie.plugin import OpsGeniePlugin + with record_event(OnCallInteractionType.MIGRATE_PLUGIN).capture(): + from sentry_plugins.opsgenie.plugin import OpsGeniePlugin - result = integration_service.organization_context( - organization_id=organization_id, integration_id=integration_id - ) - integration = result.integration - organization_integration = result.organization_integration - if not integration: - raise Integration.DoesNotExist - if not organization_integration: - raise OrganizationIntegration.DoesNotExist + result = integration_service.organization_context( + organization_id=organization_id, integration_id=integration_id + ) + integration = result.integration + organization_integration = result.organization_integration + if not integration: + raise Integration.DoesNotExist + if not organization_integration: + raise OrganizationIntegration.DoesNotExist - config = organization_integration.config - team_table = config["team_table"] + config = organization_integration.config + team_table = config["team_table"] - seen_keys = {} - for i in range(len(config["team_table"])): - seen_keys[team_table[i]["integration_key"]] = i + seen_keys = {} + for i in range(len(config["team_table"])): + seen_keys[team_table[i]["integration_key"]] = i - all_projects = Project.objects.filter(organization_id=organization_id) - plugin = OpsGeniePlugin() - opsgenie_projects = [ - p for p in all_projects if plugin.is_enabled(project=p) and plugin.is_configured(project=p) - ] + all_projects = Project.objects.filter(organization_id=organization_id) + plugin = OpsGeniePlugin() + opsgenie_projects = [ + p + for p in all_projects + if plugin.is_enabled(project=p) and plugin.is_configured(project=p) + ] - # migrate keys - for project in opsgenie_projects: - api_key = plugin.get_option("api_key", project) - if seen_keys.get(api_key) is None: - seen_keys[api_key] = len(team_table) - team = { - "team": f"{project.name} [MIGRATED]", - "id": f"{str(organization_integration.id)}-{project.name}", - "integration_key": api_key, - } - team_table.append(team) - config.update({"team_table": team_table}) + # migrate keys + for project in opsgenie_projects: + api_key = plugin.get_option("api_key", project) + if seen_keys.get(api_key) is None: + seen_keys[api_key] = len(team_table) + team = { + "team": f"{project.name} [MIGRATED]", + "id": f"{str(organization_integration.id)}-{project.name}", + "integration_key": api_key, + } + team_table.append(team) + config.update({"team_table": team_table}) - oi = integration_service.update_organization_integration( - org_integration_id=organization_integration.id, config=config - ) - if not oi: # the call to update_organization_integration failed - raise Exception("Failed to update team table.") - logger.info( - "api_keys.migrated", - extra={ - "integration_id": integration_id, - "organization_id": organization_id, - "plugin": plugin.slug, - }, - ) + oi = integration_service.update_organization_integration( + org_integration_id=organization_integration.id, config=config + ) + if not oi: # the call to update_organization_integration failed + raise Exception("Failed to update team table.") + logger.info( + "api_keys.migrated", + extra={ + "integration_id": integration_id, + "organization_id": organization_id, + "plugin": plugin.slug, + }, + ) - # migrate alert rules - for project in opsgenie_projects: - api_key = plugin.get_option("api_key", project) - team = team_table[seen_keys[api_key]] - rules_to_migrate = [ - rule - for rule in Rule.objects.filter(project_id=project.id) - if ALERT_LEGACY_INTEGRATIONS in rule.data["actions"] - or ALERT_LEGACY_INTEGRATIONS_WITH_NAME in rule.data["actions"] - ] - with transaction.atomic(router.db_for_write(Rule)): - for rule in rules_to_migrate: - actions = rule.data["actions"] - new_action = { - "id": "sentry.integrations.opsgenie.notify_action.OpsgenieNotifyTeamAction", - "account": integration.id, - "team": team["id"], - } - if new_action not in actions: - actions.append(new_action) - logger.info( - "alert_rule.migrated", - extra={ - "integration_id": integration_id, - "organization_id": organization_id, - "project_id": project.id, - "plugin": plugin.slug, - }, - ) - else: - logger.info( - "alert_rule.already_exists", - extra={ - "integration_id": integration_id, - "organization_id": organization_id, - "project_id": project.id, - "plugin": plugin.slug, - }, - ) - rule.save() + # migrate alert rules + for project in opsgenie_projects: + api_key = plugin.get_option("api_key", project) + team = team_table[seen_keys[api_key]] + rules_to_migrate = [ + rule + for rule in Rule.objects.filter(project_id=project.id) + if ALERT_LEGACY_INTEGRATIONS in rule.data["actions"] + or ALERT_LEGACY_INTEGRATIONS_WITH_NAME in rule.data["actions"] + ] + with transaction.atomic(router.db_for_write(Rule)): + for rule in rules_to_migrate: + actions = rule.data["actions"] + new_action = { + "id": "sentry.integrations.opsgenie.notify_action.OpsgenieNotifyTeamAction", + "account": integration.id, + "team": team["id"], + } + if new_action not in actions: + actions.append(new_action) + logger.info( + "alert_rule.migrated", + extra={ + "integration_id": integration_id, + "organization_id": organization_id, + "project_id": project.id, + "plugin": plugin.slug, + }, + ) + else: + logger.info( + "alert_rule.already_exists", + extra={ + "integration_id": integration_id, + "organization_id": organization_id, + "project_id": project.id, + "plugin": plugin.slug, + }, + ) + rule.save() - # disable plugin - plugin.reset_options(project) - metrics.incr("opsgenie.migration_success", skip_internal=False) + # disable plugin + plugin.reset_options(project) diff --git a/src/sentry/integrations/opsgenie/utils.py b/src/sentry/integrations/opsgenie/utils.py index 980fe96be9ca0..62847e8d1bf78 100644 --- a/src/sentry/integrations/opsgenie/utils.py +++ b/src/sentry/integrations/opsgenie/utils.py @@ -59,7 +59,7 @@ def attach_custom_priority( return data -def get_team(team_id: str | None, org_integration: RpcOrganizationIntegration | None): +def get_team(team_id: int | str | None, org_integration: RpcOrganizationIntegration | None): if not org_integration: return None teams = org_integration.config.get("team_table") diff --git a/src/sentry/integrations/pagerduty/actions/form.py b/src/sentry/integrations/pagerduty/actions/form.py index bd9e2a3a7a92e..3ac96179b5479 100644 --- a/src/sentry/integrations/pagerduty/actions/form.py +++ b/src/sentry/integrations/pagerduty/actions/form.py @@ -6,6 +6,8 @@ from django import forms from django.utils.translation import gettext_lazy as _ +from sentry.integrations.on_call.metrics import OnCallInteractionType +from sentry.integrations.pagerduty.metrics import record_event from sentry.integrations.services.integration import integration_service from sentry.integrations.types import ExternalProviders @@ -45,32 +47,33 @@ def __init__(self, *args, **kwargs): self.fields["service"].widget.choices = self.fields["service"].choices def _validate_service(self, service_id: int, integration_id: int) -> None: - params = { - "account": dict(self.fields["account"].choices).get(integration_id), - "service": dict(self.fields["service"].choices).get(service_id), - } - - org_integrations = integration_service.get_organization_integrations( - integration_id=integration_id, - providers=[ExternalProviders.PAGERDUTY.name], - ) - - if not any( - pds - for oi in org_integrations - for pds in oi.config.get("pagerduty_services", []) - if pds["id"] == service_id - ): - # We need to make sure that the service actually belongs to that integration, - # meaning that it belongs under the appropriate account in PagerDuty. - raise forms.ValidationError( - _( - 'The service "%(service)s" has not been granted access in the %(account)s Pagerduty account.' - ), - code="invalid", - params=params, + with record_event(OnCallInteractionType.VALIDATE_SERVICE).capture(): + params = { + "account": dict(self.fields["account"].choices).get(integration_id), + "service": dict(self.fields["service"].choices).get(service_id), + } + + org_integrations = integration_service.get_organization_integrations( + integration_id=integration_id, + providers=[ExternalProviders.PAGERDUTY.name], ) + if not any( + pds + for oi in org_integrations + for pds in oi.config.get("pagerduty_services", []) + if pds["id"] == service_id + ): + # We need to make sure that the service actually belongs to that integration, + # meaning that it belongs under the appropriate account in PagerDuty. + raise forms.ValidationError( + _( + 'The service "%(service)s" has not been granted access in the %(account)s Pagerduty account.' + ), + code="invalid", + params=params, + ) + def clean(self) -> dict[str, Any] | None: cleaned_data = super().clean() diff --git a/src/sentry/integrations/pagerduty/client.py b/src/sentry/integrations/pagerduty/client.py index 81d4f61402a8a..ffbf667cad5b8 100644 --- a/src/sentry/integrations/pagerduty/client.py +++ b/src/sentry/integrations/pagerduty/client.py @@ -5,6 +5,8 @@ from sentry.api.serializers import ExternalEventSerializer, serialize from sentry.eventstore.models import Event, GroupEvent from sentry.integrations.client import ApiClient +from sentry.integrations.on_call.metrics import OnCallInteractionType +from sentry.integrations.pagerduty.metrics import record_event from sentry.shared_integrations.client.base import BaseApiResponseX LEVEL_SEVERITY_MAP = { @@ -78,5 +80,5 @@ def send_trigger( else: # the payload is for a metric alert payload = data - - return self.post("/", data=payload) + with record_event(OnCallInteractionType.CREATE).capture(): + return self.post("/", data=payload) diff --git a/src/sentry/integrations/pagerduty/integration.py b/src/sentry/integrations/pagerduty/integration.py index dbddbb3f2eec2..fcf259d2c5cb0 100644 --- a/src/sentry/integrations/pagerduty/integration.py +++ b/src/sentry/integrations/pagerduty/integration.py @@ -19,6 +19,8 @@ ) from sentry.integrations.models.integration import Integration from sentry.integrations.models.organization_integration import OrganizationIntegration +from sentry.integrations.on_call.metrics import OnCallInteractionType +from sentry.integrations.pagerduty.metrics import record_event from sentry.organizations.services.organization import RpcOrganizationSummary from sentry.pipeline import PipelineView from sentry.shared_integrations.exceptions import IntegrationError @@ -67,7 +69,7 @@ class PagerDutyIntegration(IntegrationInstallation): - def get_keyring_client(self, keyid: str) -> PagerDutyClient: + def get_keyring_client(self, keyid: int | str) -> PagerDutyClient: org_integration = self.org_integration assert org_integration, "Cannot get client without an organization integration" @@ -179,22 +181,23 @@ def post_install( organization: RpcOrganizationSummary, extra: Any | None = None, ) -> None: - services = integration.metadata["services"] - try: - org_integration = OrganizationIntegration.objects.get( - integration=integration, organization_id=organization.id - ) - except OrganizationIntegration.DoesNotExist: - logger.exception("The PagerDuty post_install step failed.") - return - - with transaction.atomic(router.db_for_write(OrganizationIntegration)): - for service in services: - add_service( - org_integration, - integration_key=service["integration_key"], - service_name=service["name"], + with record_event(OnCallInteractionType.POST_INSTALL).capture(): + services = integration.metadata["services"] + try: + org_integration = OrganizationIntegration.objects.get( + integration=integration, organization_id=organization.id ) + except OrganizationIntegration.DoesNotExist: + logger.exception("The PagerDuty post_install step failed.") + return + + with transaction.atomic(router.db_for_write(OrganizationIntegration)): + for service in services: + add_service( + org_integration, + integration_key=service["integration_key"], + service_name=service["name"], + ) def build_integration(self, state): config = orjson.loads(state.get("config")) diff --git a/src/sentry/integrations/pagerduty/metrics.py b/src/sentry/integrations/pagerduty/metrics.py new file mode 100644 index 0000000000000..8f82ec36285eb --- /dev/null +++ b/src/sentry/integrations/pagerduty/metrics.py @@ -0,0 +1,6 @@ +from sentry.integrations.on_call.metrics import OnCallInteractionEvent, OnCallInteractionType +from sentry.integrations.on_call.spec import PagerDutyOnCallSpec + + +def record_event(event: OnCallInteractionType): + return OnCallInteractionEvent(event, PagerDutyOnCallSpec()) diff --git a/src/sentry/integrations/pipeline.py b/src/sentry/integrations/pipeline.py index 34975d7c914b9..38d15d5864bb5 100644 --- a/src/sentry/integrations/pipeline.py +++ b/src/sentry/integrations/pipeline.py @@ -19,6 +19,7 @@ from sentry.shared_integrations.exceptions import IntegrationError, IntegrationProviderError from sentry.silo.base import SiloMode from sentry.users.models.identity import Identity, IdentityProvider, IdentityStatus +from sentry.utils import metrics from sentry.web.helpers import render_to_response __all__ = ["IntegrationPipeline"] @@ -85,6 +86,13 @@ def get_analytics_entry(self) -> PipelineAnalyticsEntry | None: pipeline_type = "reauth" if self.fetch_state("integration_id") else "install" return PipelineAnalyticsEntry("integrations.pipeline_step", pipeline_type) + def initialize(self) -> None: + super().initialize() + + metrics.incr( + "sentry.integrations.installation_attempt", tags={"integration": self.provider.key} + ) + def finish_pipeline(self): try: data = self.provider.build_integration(self.state.data) @@ -118,6 +126,11 @@ def finish_pipeline(self): ) self.provider.post_install(self.integration, self.organization, extra=extra) self.clear_session() + + metrics.incr( + "sentry.integrations.installation_finished", tags={"integration": self.provider.key} + ) + return response def _finish_pipeline(self, data): diff --git a/src/sentry/integrations/services/assignment_source.py b/src/sentry/integrations/services/assignment_source.py new file mode 100644 index 0000000000000..fbf4c85bf9f7c --- /dev/null +++ b/src/sentry/integrations/services/assignment_source.py @@ -0,0 +1,35 @@ +from __future__ import annotations + +from dataclasses import asdict, dataclass +from datetime import datetime +from typing import TYPE_CHECKING, Any + +from django.utils import timezone + +if TYPE_CHECKING: + from sentry.integrations.models import Integration + from sentry.integrations.services.integration import RpcIntegration + + +@dataclass(frozen=True) +class AssignmentSource: + source_name: str + integration_id: int + queued: datetime = timezone.now() + + @classmethod + def from_integration(cls, integration: Integration | RpcIntegration) -> AssignmentSource: + return AssignmentSource( + source_name=integration.name, + integration_id=integration.id, + ) + + def to_dict(self) -> dict[str, Any]: + return asdict(self) + + @classmethod + def from_dict(cls, input_dict: dict[str, Any]) -> AssignmentSource | None: + try: + return cls(**input_dict) + except (ValueError, TypeError): + return None diff --git a/src/sentry/integrations/services/integration/impl.py b/src/sentry/integrations/services/integration/impl.py index 169e067997005..cbf01a3c33464 100644 --- a/src/sentry/integrations/services/integration/impl.py +++ b/src/sentry/integrations/services/integration/impl.py @@ -9,7 +9,6 @@ from sentry import analytics from sentry.api.paginator import OffsetPaginator -from sentry.api.serializers import AppPlatformEvent from sentry.constants import SentryAppInstallationStatus from sentry.hybridcloud.rpc.pagination import RpcPaginationArgs, RpcPaginationResult from sentry.incidents.models.incident import INCIDENT_STATUS, IncidentStatus @@ -35,6 +34,7 @@ serialize_organization_integration, ) from sentry.rules.actions.notify_event_service import find_alert_rule_action_ui_component +from sentry.sentry_apps.api.serializers.app_platform_event import AppPlatformEvent from sentry.sentry_apps.models.sentry_app import SentryApp from sentry.sentry_apps.models.sentry_app_installation import SentryAppInstallation from sentry.shared_integrations.exceptions import ApiError diff --git a/src/sentry/integrations/slack/actions/notification.py b/src/sentry/integrations/slack/actions/notification.py index bb50b9e923ef0..45731bb2f0937 100644 --- a/src/sentry/integrations/slack/actions/notification.py +++ b/src/sentry/integrations/slack/actions/notification.py @@ -10,6 +10,10 @@ from sentry.api.serializers.rest_framework.rule import ACTION_UUID_KEY from sentry.constants import ISSUE_ALERTS_THREAD_DEFAULT from sentry.eventstore.models import GroupEvent +from sentry.integrations.messaging.metrics import ( + MessagingInteractionEvent, + MessagingInteractionType, +) from sentry.integrations.models.integration import Integration from sentry.integrations.repository import get_default_issue_alert_repository from sentry.integrations.repository.base import NotificationMessageValidationError @@ -28,7 +32,9 @@ SLACK_ISSUE_ALERT_SUCCESS_DATADOG_METRIC, ) from sentry.integrations.slack.sdk_client import SlackSdkClient +from sentry.integrations.slack.spec import SlackMessagingSpec from sentry.integrations.slack.utils.channel import SlackChannelIdData, get_channel_id +from sentry.integrations.utils.metrics import EventLifecycle from sentry.models.options.organization_option import OrganizationOption from sentry.models.rule import Rule from sentry.notifications.additional_attachment_manager import get_additional_attachment @@ -122,41 +128,56 @@ def send_notification(event: GroupEvent, futures: Sequence[RuleFuture]) -> None: rule_action_uuid=rule_action_uuid, ) - # We need to search by rule action uuid and rule id, so only search if they exist - reply_broadcast = False - thread_ts = None - if ( - OrganizationOption.objects.get_value( - organization=self.project.organization, - key="sentry:issue_alerts_thread_flag", - default=ISSUE_ALERTS_THREAD_DEFAULT, - ) - and rule_action_uuid - and rule_id - ): - parent_notification_message = None + def get_thread_ts(lifecycle: EventLifecycle) -> str | None: + """Find the thread in which to post this notification as a reply. + + Return None to post the notification as a top-level message. + """ + + # We need to search by rule action uuid and rule id, so only search if they exist + if not ( + rule_action_uuid + and rule_id + and OrganizationOption.objects.get_value( + organization=self.project.organization, + key="sentry:issue_alerts_thread_flag", + default=ISSUE_ALERTS_THREAD_DEFAULT, + ) + ): + return None + try: parent_notification_message = self._repository.get_parent_notification_message( rule_id=rule_id, group_id=event.group.id, rule_action_uuid=rule_action_uuid, ) - except Exception: + except Exception as e: + lifecycle.record_halt(e) + # if there's an error trying to grab a parent notification, don't let that error block this flow # we already log at the repository layer, no need to log again here - pass + return None - if parent_notification_message: - # If a parent notification exists for this rule and action, then we can reply in a thread - # Make sure we track that this reply will be in relation to the parent row - new_notification_message_object.parent_notification_message_id = ( - parent_notification_message.id - ) - # To reply to a thread, use the specific key in the payload as referenced by the docs - # https://api.slack.com/methods/chat.postMessage#arg_thread_ts - thread_ts = parent_notification_message.message_identifier - # If this flow is triggered again for the same issue, we want it to be seen in the main channel - reply_broadcast = True + if parent_notification_message is None: + return None + + # If a parent notification exists for this rule and action, then we can reply in a thread + # Make sure we track that this reply will be in relation to the parent row + new_notification_message_object.parent_notification_message_id = ( + parent_notification_message.id + ) + # To reply to a thread, use the specific key in the payload as referenced by the docs + # https://api.slack.com/methods/chat.postMessage#arg_thread_ts + return parent_notification_message.message_identifier + + with MessagingInteractionEvent( + MessagingInteractionType.GET_PARENT_NOTIFICATION, SlackMessagingSpec() + ).capture() as lifecycle: + thread_ts = get_thread_ts(lifecycle) + + # If this flow is triggered again for the same issue, we want it to be seen in the main channel + reply_broadcast = thread_ts is not None client = SlackSdkClient(integration_id=integration.id) text = str(blocks.get("text")) diff --git a/src/sentry/integrations/slack/message_builder/base/block.py b/src/sentry/integrations/slack/message_builder/base/block.py index 47c532614b14f..ecbbfbff11dda 100644 --- a/src/sentry/integrations/slack/message_builder/base/block.py +++ b/src/sentry/integrations/slack/message_builder/base/block.py @@ -210,4 +210,4 @@ def _build_blocks( return blocks def as_payload(self) -> Mapping[str, Any]: - return self.build() # type: ignore[return-value] + return self.build() diff --git a/src/sentry/integrations/slack/message_builder/notifications/rule_save_edit.py b/src/sentry/integrations/slack/message_builder/notifications/rule_save_edit.py index 74c202dde397e..0241d925a561f 100644 --- a/src/sentry/integrations/slack/message_builder/notifications/rule_save_edit.py +++ b/src/sentry/integrations/slack/message_builder/notifications/rule_save_edit.py @@ -48,7 +48,7 @@ def build(self) -> SlackBlock: else: rule_text = "*Alert rule updated*\n\n" rule_text += f"{rule_url} in the {project_url} project was recently updated." - # TODO potentially use old name if it's changed? + # TODO: potentially use old name if it's changed? blocks.append(self.get_markdown_block(rule_text)) diff --git a/src/sentry/integrations/slack/message_builder/types.py b/src/sentry/integrations/slack/message_builder/types.py index 0479f4ed77916..1aabe8d29f236 100644 --- a/src/sentry/integrations/slack/message_builder/types.py +++ b/src/sentry/integrations/slack/message_builder/types.py @@ -5,7 +5,7 @@ # TODO(mgaeta): Continue fleshing out these types. SlackAttachment = dict[str, Any] SlackBlock = dict[str, Any] -SlackBody = Union[SlackAttachment, SlackBlock, list[SlackAttachment]] +SlackBody = Union[SlackAttachment, SlackBlock] # Attachment colors used for issues with no actions take. LEVEL_TO_COLOR = { diff --git a/src/sentry/integrations/slack/notifications.py b/src/sentry/integrations/slack/notifications.py index 78c3dcc6e873a..50cfb9e9d62b6 100644 --- a/src/sentry/integrations/slack/notifications.py +++ b/src/sentry/integrations/slack/notifications.py @@ -54,16 +54,14 @@ def send_notification_as_slack( Sending Slack notifications to a channel is in integrations/slack/actions/notification.py""" service = SlackService.default() - with sentry_sdk.start_span( - op="notification.send_slack", description="gen_channel_integration_map" - ): + with sentry_sdk.start_span(op="notification.send_slack", name="gen_channel_integration_map"): data = get_integrations_by_channel_by_recipient( notification.organization, recipients, ExternalProviders.SLACK ) for recipient, integrations_by_channel in data.items(): - with sentry_sdk.start_span(op="notification.send_slack", description="send_one"): - with sentry_sdk.start_span(op="notification.send_slack", description="gen_attachments"): + with sentry_sdk.start_span(op="notification.send_slack", name="send_one"): + with sentry_sdk.start_span(op="notification.send_slack", name="gen_attachments"): attachments = service.get_attachments( notification, recipient, diff --git a/src/sentry/integrations/slack/requests/base.py b/src/sentry/integrations/slack/requests/base.py index 5c4e75fe1db97..f546bd9ff2f25 100644 --- a/src/sentry/integrations/slack/requests/base.py +++ b/src/sentry/integrations/slack/requests/base.py @@ -10,8 +10,10 @@ from slack_sdk.signature import SignatureVerifier from sentry import options +from sentry.constants import ObjectStatus from sentry.identity.services.identity import RpcIdentity, identity_service from sentry.identity.services.identity.model import RpcIdentityProvider +from sentry.integrations.messaging.commands import CommandInput from sentry.integrations.services.integration import RpcIntegration, integration_service from sentry.users.services.user import RpcUser from sentry.users.services.user.service import user_service @@ -224,7 +226,7 @@ def _check_verification_token(self, verification_token: str) -> bool: def validate_integration(self) -> None: if not self._integration: self._integration = integration_service.get_integration( - provider="slack", external_id=self.team_id + provider="slack", external_id=self.team_id, status=ObjectStatus.ACTIVE ) if not self._integration: @@ -276,5 +278,9 @@ def get_command_and_args(self) -> tuple[str, Sequence[str]]: return "", [] return command[0], command[1:] + def get_command_input(self) -> CommandInput: + cmd, args = self.get_command_and_args() + return CommandInput(cmd, tuple(args)) + def _validate_identity(self) -> None: self.user = self.get_identity_user() diff --git a/src/sentry/integrations/slack/service.py b/src/sentry/integrations/slack/service.py index e11784fef24b2..9b2dd314ca53f 100644 --- a/src/sentry/integrations/slack/service.py +++ b/src/sentry/integrations/slack/service.py @@ -335,7 +335,7 @@ def notify_recipient( """Send an "activity" or "alert rule" notification to a Slack user or team, but NOT to a channel directly. This is used in the send_notification_as_slack function.""" - with sentry_sdk.start_span(op="notification.send_slack", description="notify_recipient"): + with sentry_sdk.start_span(op="notification.send_slack", name="notify_recipient"): # Make a local copy to which we can append. local_attachments = copy(attachments) diff --git a/src/sentry/integrations/slack/tasks/link_slack_user_identities.py b/src/sentry/integrations/slack/tasks/link_slack_user_identities.py index 1417383534330..6dbc706b17ec9 100644 --- a/src/sentry/integrations/slack/tasks/link_slack_user_identities.py +++ b/src/sentry/integrations/slack/tasks/link_slack_user_identities.py @@ -5,9 +5,10 @@ from django.utils import timezone +from sentry.constants import ObjectStatus from sentry.integrations.services.integration import integration_service from sentry.integrations.slack.utils.users import SlackUserData, get_slack_data_by_user -from sentry.integrations.utils import get_identities_by_user +from sentry.integrations.utils.identities import get_identities_by_user from sentry.organizations.services.organization import organization_service from sentry.silo.base import SiloMode from sentry.tasks.base import instrumented_task @@ -28,13 +29,20 @@ def link_slack_user_identities( integration_id: int, organization_id: int, ) -> None: - integration = integration_service.get_integration(integration_id=integration_id) + integration = integration_service.get_integration( + integration_id=integration_id, status=ObjectStatus.ACTIVE + ) organization_context = organization_service.get_organization_by_id(id=organization_id) organization = organization_context.organization if organization_context else None if organization is None or integration is None: logger.error( "slack.post_install.link_identities.invalid_params", - extra={"organization": organization_id, "integration": integration_id}, + extra={ + "organization_id": organization_id, + "integration_id": integration_id, + "integration": bool(integration), + "organization": bool(organization), + }, ) return None diff --git a/src/sentry/integrations/slack/unfurl/discover.py b/src/sentry/integrations/slack/unfurl/discover.py index 61a2be526cefd..eccb01927e41a 100644 --- a/src/sentry/integrations/slack/unfurl/discover.py +++ b/src/sentry/integrations/slack/unfurl/discover.py @@ -15,9 +15,14 @@ from sentry.charts import backend as charts from sentry.charts.types import ChartType from sentry.discover.arithmetic import is_equation +from sentry.integrations.messaging.metrics import ( + MessagingInteractionEvent, + MessagingInteractionType, +) from sentry.integrations.models.integration import Integration from sentry.integrations.services.integration import integration_service from sentry.integrations.slack.message_builder.discover import SlackDiscoverMessageBuilder +from sentry.integrations.slack.spec import SlackMessagingSpec from sentry.integrations.slack.unfurl.types import Handler, UnfurlableUrl, UnfurledUrl from sentry.models.apikey import ApiKey from sentry.models.organization import Organization @@ -115,6 +120,18 @@ def unfurl_discover( integration: Integration, links: list[UnfurlableUrl], user: User | None = None, +) -> UnfurledUrl: + event = MessagingInteractionEvent( + MessagingInteractionType.UNFURL_DISCOVER, SlackMessagingSpec(), user=user + ) + with event.capture(): + return _unfurl_discover(integration, links, user) + + +def _unfurl_discover( + integration: Integration, + links: list[UnfurlableUrl], + user: User | None = None, ) -> UnfurledUrl: org_integrations = integration_service.get_organization_integrations( integration_id=integration.id diff --git a/src/sentry/integrations/slack/unfurl/issues.py b/src/sentry/integrations/slack/unfurl/issues.py index 135b9402cbe97..2524cfe30cca3 100644 --- a/src/sentry/integrations/slack/unfurl/issues.py +++ b/src/sentry/integrations/slack/unfurl/issues.py @@ -5,9 +5,14 @@ from django.http.request import HttpRequest from sentry import eventstore +from sentry.integrations.messaging.metrics import ( + MessagingInteractionEvent, + MessagingInteractionType, +) from sentry.integrations.models.integration import Integration from sentry.integrations.services.integration import integration_service from sentry.integrations.slack.message_builder.issues import SlackIssuesMessageBuilder +from sentry.integrations.slack.spec import SlackMessagingSpec from sentry.integrations.slack.unfurl.types import ( Handler, UnfurlableUrl, @@ -37,6 +42,14 @@ def unfurl_issues( for a particular issue by the URL of the yet-unfurled links a user included in their Slack message. """ + event = MessagingInteractionEvent( + MessagingInteractionType.UNFURL_ISSUES, SlackMessagingSpec(), user=user + ) + with event.capture(): + return _unfurl_issues(integration, links) + + +def _unfurl_issues(integration: Integration, links: list[UnfurlableUrl]) -> UnfurledUrl: org_integrations = integration_service.get_organization_integrations( integration_id=integration.id ) diff --git a/src/sentry/integrations/slack/unfurl/metric_alerts.py b/src/sentry/integrations/slack/unfurl/metric_alerts.py index 5153d180531a1..9e0c895abb15f 100644 --- a/src/sentry/integrations/slack/unfurl/metric_alerts.py +++ b/src/sentry/integrations/slack/unfurl/metric_alerts.py @@ -14,9 +14,14 @@ from sentry.incidents.charts import build_metric_alert_chart from sentry.incidents.models.alert_rule import AlertRule from sentry.incidents.models.incident import Incident +from sentry.integrations.messaging.metrics import ( + MessagingInteractionEvent, + MessagingInteractionType, +) from sentry.integrations.models.integration import Integration from sentry.integrations.services.integration import integration_service from sentry.integrations.slack.message_builder.metric_alerts import SlackMetricAlertMessageBuilder +from sentry.integrations.slack.spec import SlackMessagingSpec from sentry.integrations.slack.unfurl.types import ( Handler, UnfurlableUrl, @@ -43,6 +48,18 @@ def unfurl_metric_alerts( integration: Integration, links: list[UnfurlableUrl], user: User | None = None, +) -> UnfurledUrl: + event = MessagingInteractionEvent( + MessagingInteractionType.UNFURL_METRIC_ALERTS, SlackMessagingSpec(), user=user + ) + with event.capture(): + return _unfurl_metric_alerts(integration, links, user) + + +def _unfurl_metric_alerts( + integration: Integration, + links: list[UnfurlableUrl], + user: User | None = None, ) -> UnfurledUrl: alert_filter_query = Q() incident_filter_query = Q() diff --git a/src/sentry/integrations/slack/webhooks/action.py b/src/sentry/integrations/slack/webhooks/action.py index 1b998f67f26cb..f97a632a8b69b 100644 --- a/src/sentry/integrations/slack/webhooks/action.py +++ b/src/sentry/integrations/slack/webhooks/action.py @@ -1,6 +1,7 @@ from __future__ import annotations import logging +from abc import ABC, abstractmethod from collections.abc import Mapping, MutableMapping, Sequence from typing import Any @@ -15,7 +16,7 @@ from slack_sdk.models.views import View from slack_sdk.webhook import WebhookClient -from sentry import analytics, options +from sentry import analytics from sentry.api import client from sentry.api.api_owners import ApiOwner from sentry.api.api_publish_status import ApiPublishStatus @@ -24,9 +25,12 @@ from sentry.api.helpers.group_index import update_groups from sentry.auth.access import from_member from sentry.exceptions import UnableToAcceptMemberInvitationException +from sentry.integrations.messaging.metrics import ( + MessagingInteractionEvent, + MessagingInteractionType, +) from sentry.integrations.services.integration import integration_service from sentry.integrations.slack.message_builder.issues import SlackIssuesMessageBuilder -from sentry.integrations.slack.message_builder.types import SlackBody from sentry.integrations.slack.metrics import ( SLACK_WEBHOOK_GROUP_ACTIONS_FAILURE_DATADOG_METRIC, SLACK_WEBHOOK_GROUP_ACTIONS_SUCCESS_DATADOG_METRIC, @@ -34,6 +38,7 @@ from sentry.integrations.slack.requests.action import SlackActionRequest from sentry.integrations.slack.requests.base import SlackRequestError from sentry.integrations.slack.sdk_client import SlackSdkClient +from sentry.integrations.slack.spec import SlackMessagingSpec from sentry.integrations.slack.utils.errors import MODAL_NOT_FOUND, unpack_slack_api_error from sentry.integrations.types import ExternalProviderEnum from sentry.integrations.utils.scope import bind_org_context_from_integration @@ -44,6 +49,7 @@ from sentry.notifications.services import notifications_service from sentry.notifications.utils.actions import BlockKitMessageAction, MessageAction from sentry.shared_integrations.exceptions import ApiError +from sentry.users.models import User from sentry.users.services.user import RpcUser from sentry.utils import metrics @@ -160,8 +166,7 @@ def get_group(slack_request: SlackActionRequest) -> Group | None: def _is_message(data: Mapping[str, Any]) -> bool: """ - XXX(epurkhiser): Used in coordination with construct_reply. - Bot posted messages will not have the type at all. + Bot posted messages will not have the type at all. """ return data.get("original_message", {}).get("type") == "message" @@ -324,251 +329,6 @@ def on_status( user_id=user.id, ) - def build_format_options(self, options: dict[str, str]) -> list[dict[str, Any]]: - return [ - { - "text": { - "type": "plain_text", - "text": text, - "emoji": True, - }, - "value": value, - } - for text, value in options.items() - ] - - def build_modal_payload( - self, - title: str, - action_text: str, - options: dict[str, str], - initial_option_text: str, - initial_option_value: str, - callback_id: str, - metadata: str, - ) -> View: - formatted_options = self.build_format_options(options) - - return View( - type="modal", - title={"type": "plain_text", "text": f"{title} Issue"}, - blocks=[ - { - "type": "section", - "text": {"type": "mrkdwn", "text": action_text}, - "accessory": { - "type": "static_select", - "initial_option": { - "text": { - "type": "plain_text", - "text": initial_option_text, - "emoji": True, - }, - "value": initial_option_value, - }, - "options": formatted_options, - "action_id": "static_select-action", - }, - } - ], - close={"type": "plain_text", "text": "Cancel"}, - submit={"type": "plain_text", "text": title}, - private_metadata=metadata, - callback_id=callback_id, - ) - - def build_resolve_modal_payload(self, callback_id: str, metadata: str) -> View: - return self.build_modal_payload( - title="Resolve", - action_text="Resolve", - options=RESOLVE_OPTIONS, - initial_option_text="Immediately", - initial_option_value="resolved", - callback_id=callback_id, - metadata=metadata, - ) - - def build_archive_modal_payload(self, callback_id: str, metadata: str) -> View: - return self.build_modal_payload( - title="Archive", - action_text="Archive", - options=ARCHIVE_OPTIONS, - initial_option_text="Until escalating", - initial_option_value="ignored:archived_until_escalating", - callback_id=callback_id, - metadata=metadata, - ) - - def _update_modal( - self, - slack_client: SlackSdkClient, - external_id: str, - modal_payload: View, - slack_request: SlackActionRequest, - ) -> None: - try: - slack_client.views_update( - external_id=external_id, - view=modal_payload, - ) - except SlackApiError as e: - # If the external_id is not found, Slack we send `not_found` error - # https://api.slack.com/methods/views.update - if unpack_slack_api_error(e) == MODAL_NOT_FOUND: - metrics.incr( - SLACK_WEBHOOK_GROUP_ACTIONS_FAILURE_DATADOG_METRIC, - sample_rate=1.0, - tags={"type": "update_modal"}, - ) - logging_data = slack_request.get_logging_data() - _logger.exception( - "slack.action.update-modal-not-found", - extra={ - **logging_data, - "trigger_id": slack_request.data["trigger_id"], - "dialog": "resolve", - }, - ) - # The modal was not found, so we need to open a new one - self._open_modal(slack_client, modal_payload, slack_request) - else: - raise - - def _open_modal( - self, slack_client: SlackSdkClient, modal_payload: View, slack_request: SlackActionRequest - ) -> None: - # Error handling is done in the calling function - slack_client.views_open( - trigger_id=slack_request.data["trigger_id"], - view=modal_payload, - ) - - def open_resolve_dialog(self, slack_request: SlackActionRequest, group: Group) -> None: - # XXX(epurkhiser): In order to update the original message we have to - # keep track of the response_url in the callback_id. Definitely hacky, - # but seems like there's no other solutions [1]: - # - # [1]: https://stackoverflow.com/questions/46629852/update-a-bot-message-after-responding-to-a-slack-dialog#comment80795670_46629852 - org = group.project.organization - callback_id_dict = { - "issue": group.id, - "orig_response_url": slack_request.data["response_url"], - "is_message": _is_message(slack_request.data), - } - if slack_request.data.get("channel"): - callback_id_dict["channel_id"] = slack_request.data["channel"]["id"] - callback_id_dict["rule"] = slack_request.callback_data.get("rule") - callback_id = orjson.dumps(callback_id_dict).decode() - - # only add tags to metadata - metadata_dict = callback_id_dict.copy() - metadata_dict["tags"] = list(slack_request.get_tags()) - metadata = orjson.dumps(metadata_dict).decode() - - # XXX(CEO): the second you make a selection (without hitting Submit) it sends a slightly different request - modal_payload = self.build_resolve_modal_payload(callback_id, metadata=metadata) - slack_client = SlackSdkClient(integration_id=slack_request.integration.id) - try: - # We need to use the action_ts as the external_id to update the modal - # We passed this in control when we sent the loading modal to beat the 3 second timeout - external_id = slack_request.get_action_ts() - - if not external_id or not options.get("send-slack-response-from-control-silo"): - # If we don't have an external_id or option is disabled we need to open a new modal - self._open_modal(slack_client, modal_payload, slack_request) - else: - self._update_modal(slack_client, external_id, modal_payload, slack_request) - - metrics.incr( - SLACK_WEBHOOK_GROUP_ACTIONS_SUCCESS_DATADOG_METRIC, - sample_rate=1.0, - tags={"type": "resolve_modal_open"}, - ) - except SlackApiError: - metrics.incr( - SLACK_WEBHOOK_GROUP_ACTIONS_FAILURE_DATADOG_METRIC, - sample_rate=1.0, - tags={"type": "resolve_modal_open"}, - ) - _logger.exception( - "slack.action.response-error", - extra={ - "organization_id": org.id, - "integration_id": slack_request.integration.id, - "trigger_id": slack_request.data["trigger_id"], - "dialog": "resolve", - }, - ) - - def open_archive_dialog(self, slack_request: SlackActionRequest, group: Group) -> None: - org = group.project.organization - - callback_id_dict = { - "issue": group.id, - "orig_response_url": slack_request.data["response_url"], - "is_message": _is_message(slack_request.data), - "rule": slack_request.callback_data.get("rule"), - } - - if slack_request.data.get("channel"): - callback_id_dict["channel_id"] = slack_request.data["channel"]["id"] - callback_id = orjson.dumps(callback_id_dict).decode() - - # only add tags to metadata - metadata_dict = callback_id_dict.copy() - metadata_dict["tags"] = list(slack_request.get_tags()) - metadata = orjson.dumps(metadata_dict).decode() - - modal_payload = self.build_archive_modal_payload(callback_id, metadata=metadata) - slack_client = SlackSdkClient(integration_id=slack_request.integration.id) - try: - # We need to use the action_ts as the external_id to update the modal - # We passed this in control when we sent the loading modal to beat the 3 second timeout - external_id = slack_request.get_action_ts() - - if not external_id or not options.get("send-slack-response-from-control-silo"): - # If we don't have an external_id or option is disabled we need to open a new modal - self._open_modal(slack_client, modal_payload, slack_request) - else: - self._update_modal(slack_client, external_id, modal_payload, slack_request) - - metrics.incr( - SLACK_WEBHOOK_GROUP_ACTIONS_SUCCESS_DATADOG_METRIC, - sample_rate=1.0, - tags={"type": "archive_modal_open"}, - ) - except SlackApiError: - metrics.incr( - SLACK_WEBHOOK_GROUP_ACTIONS_FAILURE_DATADOG_METRIC, - sample_rate=1.0, - tags={"type": "archive_modal_open"}, - ) - _logger.exception( - "slack.action.response-error", - extra={ - "organization_id": org.id, - "integration_id": slack_request.integration.id, - "trigger_id": slack_request.data["trigger_id"], - "dialog": "archive", - }, - ) - - def construct_reply(self, attachment: SlackBody, is_message: bool = False) -> SlackBody: - # XXX(epurkhiser): Slack is inconsistent about it's expected responses - # for interactive action requests. - # - # * For _unfurled_ action responses, slack expects the entire - # attachment body used to replace the unfurled attachment to be at - # the top level of the json response body. - # - # * For _bot posted message_ action responses, slack expects the - # attachment body used to replace the attachment to be within an - # `attachments` array. - if is_message: - attachment = {"attachments": [attachment]} - - return attachment - def _handle_group_actions( self, slack_request: SlackActionRequest, @@ -677,23 +437,33 @@ def _handle_group_actions( # response_url later to update it. defer_attachment_update = False + def record_event(interaction_type: MessagingInteractionType) -> MessagingInteractionEvent: + user = request.user + return MessagingInteractionEvent( + interaction_type, + SlackMessagingSpec(), + user=(user if isinstance(user, User) else None), + organization=(group.project.organization if group else None), + ) + # Handle interaction actions for action in action_list: try: - if action.name in ( - "status", - "unresolved:ongoing", - ): - self.on_status(request, identity_user, group, action) + if action.name in ("status", "unresolved:ongoing"): + with record_event(MessagingInteractionType.STATUS).capture(): + self.on_status(request, identity_user, group, action) elif ( action.name == "assign" ): # TODO: remove this as it is replaced by the options-load endpoint - self.on_assign(request, identity_user, group, action) + with record_event(MessagingInteractionType.ASSIGN).capture(): + self.on_assign(request, identity_user, group, action) elif action.name == "resolve_dialog": - self.open_resolve_dialog(slack_request, group) + with record_event(MessagingInteractionType.RESOLVE_DIALOG).capture(): + _ResolveDialog().open_dialog(slack_request, group) defer_attachment_update = True elif action.name == "archive_dialog": - self.open_archive_dialog(slack_request, group) + with record_event(MessagingInteractionType.ARCHIVE_DIALOG).capture(): + _ArchiveDialog().open_dialog(slack_request, group) defer_attachment_update = True except client.ApiError as error: return self.api_error(slack_request, group, identity_user, error, action.name) @@ -970,3 +740,204 @@ def handle_member_approval(self, slack_request: SlackActionRequest, action: str) ) return self.respond({"text": message}) + + +class _ModalDialog(ABC): + @property + @abstractmethod + def dialog_type(self) -> str: + raise NotImplementedError + + def _build_format_options(self, options: dict[str, str]) -> list[dict[str, Any]]: + return [ + { + "text": { + "type": "plain_text", + "text": text, + "emoji": True, + }, + "value": value, + } + for text, value in options.items() + ] + + def build_modal_payload( + self, + title: str, + action_text: str, + options: dict[str, str], + initial_option_text: str, + initial_option_value: str, + callback_id: str, + metadata: str, + ) -> View: + formatted_options = self._build_format_options(options) + + return View( + type="modal", + title={"type": "plain_text", "text": f"{title} Issue"}, + blocks=[ + { + "type": "section", + "text": {"type": "mrkdwn", "text": action_text}, + "accessory": { + "type": "static_select", + "initial_option": { + "text": { + "type": "plain_text", + "text": initial_option_text, + "emoji": True, + }, + "value": initial_option_value, + }, + "options": formatted_options, + "action_id": "static_select-action", + }, + } + ], + close={"type": "plain_text", "text": "Cancel"}, + submit={"type": "plain_text", "text": title}, + private_metadata=metadata, + callback_id=callback_id, + ) + + @abstractmethod + def get_modal_payload(self, callback_id: str, metadata: str) -> View: + raise NotImplementedError + + def _update_modal( + self, + slack_client: SlackSdkClient, + external_id: str, + modal_payload: View, + slack_request: SlackActionRequest, + ) -> None: + try: + slack_client.views_update( + external_id=external_id, + view=modal_payload, + ) + except SlackApiError as e: + # If the external_id is not found, Slack we send `not_found` error + # https://api.slack.com/methods/views.update + if unpack_slack_api_error(e) == MODAL_NOT_FOUND: + metrics.incr( + SLACK_WEBHOOK_GROUP_ACTIONS_FAILURE_DATADOG_METRIC, + sample_rate=1.0, + tags={"type": "update_modal"}, + ) + logging_data = slack_request.get_logging_data() + _logger.exception( + "slack.action.update-modal-not-found", + extra={ + **logging_data, + "trigger_id": slack_request.data["trigger_id"], + "dialog": self.dialog_type, + }, + ) + # The modal was not found, so we need to open a new one + self._open_modal(slack_client, modal_payload, slack_request) + else: + raise + + def _open_modal( + self, slack_client: SlackSdkClient, modal_payload: View, slack_request: SlackActionRequest + ) -> None: + # Error handling is done in the calling function + slack_client.views_open( + trigger_id=slack_request.data["trigger_id"], + view=modal_payload, + ) + + def open_dialog(self, slack_request: SlackActionRequest, group: Group) -> None: + # XXX(epurkhiser): In order to update the original message we have to + # keep track of the response_url in the callback_id. Definitely hacky, + # but seems like there's no other solutions [1]: + # + # [1]: https://stackoverflow.com/questions/46629852/update-a-bot-message-after-responding-to-a-slack-dialog#comment80795670_46629852 + org = group.project.organization + + callback_id_dict = { + "issue": group.id, + "orig_response_url": slack_request.data["response_url"], + "is_message": _is_message(slack_request.data), + "rule": slack_request.callback_data.get("rule"), + } + + if slack_request.data.get("channel"): + callback_id_dict["channel_id"] = slack_request.data["channel"]["id"] + callback_id = orjson.dumps(callback_id_dict).decode() + + # only add tags to metadata + metadata_dict = callback_id_dict.copy() + metadata_dict["tags"] = list(slack_request.get_tags()) + metadata = orjson.dumps(metadata_dict).decode() + + # XXX(CEO): the second you make a selection (without hitting Submit) it sends a slightly different request + modal_payload = self.get_modal_payload(callback_id, metadata=metadata) + slack_client = SlackSdkClient(integration_id=slack_request.integration.id) + try: + # We need to use the action_ts as the external_id to update the modal + # We passed this in control when we sent the loading modal to beat the 3 second timeout + external_id = slack_request.get_action_ts() + + if not external_id: + # If we don't have an external_id or option is disabled we need to open a new modal + self._open_modal(slack_client, modal_payload, slack_request) + else: + self._update_modal(slack_client, external_id, modal_payload, slack_request) + + metrics.incr( + SLACK_WEBHOOK_GROUP_ACTIONS_SUCCESS_DATADOG_METRIC, + sample_rate=1.0, + tags={"type": f"{self.dialog_type}_modal_open"}, + ) + except SlackApiError: + metrics.incr( + SLACK_WEBHOOK_GROUP_ACTIONS_FAILURE_DATADOG_METRIC, + sample_rate=1.0, + tags={"type": f"{self.dialog_type}_modal_open"}, + ) + _logger.exception( + "slack.action.response-error", + extra={ + "organization_id": org.id, + "integration_id": slack_request.integration.id, + "trigger_id": slack_request.data["trigger_id"], + "dialog": self.dialog_type, + }, + ) + + +class _ResolveDialog(_ModalDialog): + @property + def dialog_type(self) -> str: + return "resolve" + + def get_modal_payload(self, callback_id: str, metadata: str) -> View: + return self.build_modal_payload( + title="Resolve", + action_text="Resolve", + options=RESOLVE_OPTIONS, + initial_option_text="Immediately", + initial_option_value="resolved", + callback_id=callback_id, + metadata=metadata, + ) + + +class _ArchiveDialog(_ModalDialog): + @property + def dialog_type(self) -> str: + return "archive" + + def get_modal_payload(self, callback_id: str, metadata: str) -> View: + return self.build_modal_payload( + title="Archive", + action_text="Archive", + options=ARCHIVE_OPTIONS, + initial_option_text="Until escalating", + initial_option_value="ignored:archived_until_escalating", + callback_id=callback_id, + metadata=metadata, + ) diff --git a/src/sentry/integrations/slack/webhooks/base.py b/src/sentry/integrations/slack/webhooks/base.py index 1d2eba49c6ba1..b0663cccebb8d 100644 --- a/src/sentry/integrations/slack/webhooks/base.py +++ b/src/sentry/integrations/slack/webhooks/base.py @@ -1,17 +1,30 @@ from __future__ import annotations import abc +import logging +from collections.abc import Callable, Iterable +from dataclasses import dataclass from rest_framework import status from rest_framework.response import Response from sentry.api.base import Endpoint +from sentry.integrations.messaging import commands +from sentry.integrations.messaging.commands import ( + CommandInput, + CommandNotMatchedError, + MessagingIntegrationCommand, + MessagingIntegrationCommandDispatcher, +) +from sentry.integrations.messaging.spec import MessagingIntegrationSpec from sentry.integrations.slack.message_builder.help import SlackHelpMessageBuilder from sentry.integrations.slack.metrics import ( SLACK_WEBHOOK_DM_ENDPOINT_FAILURE_DATADOG_METRIC, SLACK_WEBHOOK_DM_ENDPOINT_SUCCESS_DATADOG_METRIC, ) from sentry.integrations.slack.requests.base import SlackDMRequest, SlackRequestError +from sentry.integrations.slack.spec import SlackMessagingSpec +from sentry.utils import metrics LINK_USER_MESSAGE = ( "<{associate_url}|Link your Slack identity> to your Sentry account to receive notifications. " @@ -24,9 +37,6 @@ NOT_LINKED_MESSAGE = "You do not have a linked identity to unlink." ALREADY_LINKED_MESSAGE = "You are already linked as `{username}`." -import logging - -from sentry.utils import metrics logger = logging.getLogger(__name__) @@ -42,33 +52,21 @@ def post_dispatcher(self, request: SlackDMRequest) -> Response: All Slack commands are handled by this endpoint. This block just validates the request and dispatches it to the right handler. """ - command, args = request.get_command_and_args() - - if command in ["help", "", "support", "docs"]: - return self.respond(SlackHelpMessageBuilder(command=command).build()) - - if command == "link": - if not args: - return self.link_user(request) - - if args[0] == "team": - return self.link_team(request) - - if command == "unlink": - if not args: - return self.unlink_user(request) - - if args[0] == "team": - return self.unlink_team(request) - - # If we cannot interpret the command, print help text. - request_data = request.data - unknown_command = request_data.get("text", "").lower() - return self.respond(SlackHelpMessageBuilder(unknown_command).build()) + cmd_input = request.get_command_input() + try: + return SlackCommandDispatcher(self, request).dispatch(cmd_input) + except CommandNotMatchedError: + # If we cannot interpret the command, print help text. + request_data = request.data + unknown_command = request_data.get("text", "").lower() + return self.help(unknown_command) def reply(self, slack_request: SlackDMRequest, message: str) -> Response: raise NotImplementedError + def help(self, command: str) -> Response: + return self.respond(SlackHelpMessageBuilder(command).build()) + def link_user(self, slack_request: SlackDMRequest) -> Response: from sentry.integrations.slack.views.link_identity import build_linking_url @@ -124,3 +122,23 @@ def link_team(self, slack_request: SlackDMRequest) -> Response: def unlink_team(self, slack_request: SlackDMRequest) -> Response: raise NotImplementedError + + +@dataclass(frozen=True) +class SlackCommandDispatcher(MessagingIntegrationCommandDispatcher[Response]): + endpoint: SlackDMEndpoint + request: SlackDMRequest + + @property + def integration_spec(self) -> MessagingIntegrationSpec: + return SlackMessagingSpec() + + @property + def command_handlers( + self, + ) -> Iterable[tuple[MessagingIntegrationCommand, Callable[[CommandInput], Response]]]: + yield commands.HELP, (lambda i: self.endpoint.help(i.cmd_value)) + yield commands.LINK_IDENTITY, (lambda i: self.endpoint.link_user(self.request)) + yield commands.UNLINK_IDENTITY, (lambda i: self.endpoint.unlink_user(self.request)) + yield commands.LINK_TEAM, (lambda i: self.endpoint.link_team(self.request)) + yield commands.UNLINK_TEAM, (lambda i: self.endpoint.unlink_team(self.request)) diff --git a/src/sentry/integrations/slack/webhooks/command.py b/src/sentry/integrations/slack/webhooks/command.py index f9730ef18758f..ea0f111566da4 100644 --- a/src/sentry/integrations/slack/webhooks/command.py +++ b/src/sentry/integrations/slack/webhooks/command.py @@ -6,6 +6,7 @@ from rest_framework.request import Request from rest_framework.response import Response +from sentry import features from sentry.api.api_owners import ApiOwner from sentry.api.api_publish_status import ApiPublishStatus from sentry.api.base import region_silo_endpoint @@ -102,7 +103,10 @@ def link_team(self, slack_request: SlackDMRequest) -> Response: has_valid_role = False for organization_membership in organization_memberships: - if is_team_linked_to_channel(organization_membership.organization, slack_request): + if not features.has( + "organizations:slack-multiple-team-single-channel-linking", + organization_membership.organization, + ) and is_team_linked_to_channel(organization_membership.organization, slack_request): return self.reply(slack_request, CHANNEL_ALREADY_LINKED_MESSAGE) if is_valid_role(organization_membership) or is_team_admin(organization_membership): diff --git a/src/sentry/integrations/source_code_management/commit_context.py b/src/sentry/integrations/source_code_management/commit_context.py index 5b89bc23c6ac4..590431ef4d4c7 100644 --- a/src/sentry/integrations/source_code_management/commit_context.py +++ b/src/sentry/integrations/source_code_management/commit_context.py @@ -4,22 +4,51 @@ from abc import ABC, abstractmethod from collections.abc import Mapping, Sequence from dataclasses import dataclass -from datetime import datetime +from datetime import datetime, timedelta, timezone from typing import Any -from django.utils import timezone +import sentry_sdk +from django.utils import timezone as django_timezone from sentry import analytics from sentry.auth.exceptions import IdentityNotValid from sentry.integrations.models.repository_project_path_config import RepositoryProjectPathConfig -from sentry.models.pullrequest import CommentType, PullRequestComment +from sentry.locks import locks +from sentry.models.commit import Commit +from sentry.models.group import Group +from sentry.models.groupowner import GroupOwner +from sentry.models.options.organization_option import OrganizationOption +from sentry.models.project import Project +from sentry.models.pullrequest import ( + CommentType, + PullRequest, + PullRequestComment, + PullRequestCommit, +) from sentry.models.repository import Repository from sentry.users.models.identity import Identity from sentry.utils import metrics +from sentry.utils.cache import cache logger = logging.getLogger(__name__) +def _debounce_pr_comment_cache_key(pullrequest_id: int) -> str: + return f"pr-comment-{pullrequest_id}" + + +def _debounce_pr_comment_lock_key(pullrequest_id: int) -> str: + return f"queue_comment_task:{pullrequest_id}" + + +def _pr_comment_log(integration_name: str, suffix: str) -> str: + return f"{integration_name}.pr_comment.{suffix}" + + +PR_COMMENT_TASK_TTL = timedelta(minutes=5).total_seconds() +PR_COMMENT_WINDOW = 14 # days + + @dataclass class SourceLineInfo: lineno: int @@ -84,6 +113,122 @@ def get_commit_context_all_frames( """ return self.get_blame_for_files(files, extra) + def queue_comment_task_if_needed( + self, + project: Project, + commit: Commit, + group_owner: GroupOwner, + group_id: int, + ) -> None: + if not OrganizationOption.objects.get_value( + organization=project.organization, + key="sentry:github_pr_bot", + default=True, + ): + logger.info( + _pr_comment_log(integration_name=self.integration_name, suffix="disabled"), + extra={"organization_id": project.organization_id}, + ) + return + + repo_query = Repository.objects.filter(id=commit.repository_id).order_by("-date_added") + group = Group.objects.get_from_cache(id=group_id) + if not ( + group.level is not logging.INFO and repo_query.exists() + ): # Don't comment on info level issues + logger.info( + _pr_comment_log( + integration_name=self.integration_name, suffix="incorrect_repo_config" + ), + extra={"organization_id": project.organization_id}, + ) + return + + repo: Repository = repo_query.get() + + logger.info( + _pr_comment_log(integration_name=self.integration_name, suffix="queue_comment_check"), + extra={"organization_id": commit.organization_id, "merge_commit_sha": commit.key}, + ) + from sentry.integrations.github.tasks.pr_comment import github_comment_workflow + + # client will raise an Exception if the request is not successful + try: + client = self.get_client() + merge_commit_sha = client.get_merge_commit_sha_from_commit( + repo=repo.name, sha=commit.key + ) + except Exception as e: + sentry_sdk.capture_exception(e) + return + + if merge_commit_sha is None: + logger.info( + _pr_comment_log( + integration_name=self.integration_name, + suffix="queue_comment_workflow.commit_not_in_default_branch", + ), + extra={ + "organization_id": commit.organization_id, + "repository_id": repo.id, + "commit_sha": commit.key, + }, + ) + return + + pr_query = PullRequest.objects.filter( + organization_id=commit.organization_id, + repository_id=commit.repository_id, + merge_commit_sha=merge_commit_sha, + ) + if not pr_query.exists(): + logger.info( + _pr_comment_log( + integration_name=self.integration_name, + suffix="queue_comment_workflow.missing_pr", + ), + extra={ + "organization_id": commit.organization_id, + "repository_id": repo.id, + "commit_sha": commit.key, + }, + ) + return + + pr = pr_query.first() + assert pr is not None + # need to query explicitly for merged PR comments since we can have multiple comments per PR + merged_pr_comment_query = PullRequestComment.objects.filter( + pull_request_id=pr.id, comment_type=CommentType.MERGED_PR + ) + if pr.date_added >= datetime.now(tz=timezone.utc) - timedelta(days=PR_COMMENT_WINDOW) and ( + not merged_pr_comment_query.exists() + or group_owner.group_id not in merged_pr_comment_query[0].group_ids + ): + lock = locks.get( + _debounce_pr_comment_lock_key(pr.id), duration=10, name="queue_comment_task" + ) + with lock.acquire(): + cache_key = _debounce_pr_comment_cache_key(pullrequest_id=pr.id) + if cache.get(cache_key) is not None: + return + + # create PR commit row for suspect commit and PR + PullRequestCommit.objects.get_or_create(commit=commit, pull_request=pr) + + logger.info( + _pr_comment_log( + integration_name=self.integration_name, suffix="queue_comment_workflow" + ), + extra={"pullrequest_id": pr.id, "project_id": group_owner.project_id}, + ) + + cache.set(cache_key, True, PR_COMMENT_TASK_TTL) + + github_comment_workflow.delay( + pullrequest_id=pr.id, project_id=group_owner.project_id + ) + def create_or_update_comment( self, repo: Repository, @@ -94,6 +239,7 @@ def create_or_update_comment( metrics_base: str, comment_type: int = CommentType.MERGED_PR, language: str | None = None, + github_copilot_actions: list[dict[str, Any]] | None = None, ): client = self.get_client() @@ -105,10 +251,19 @@ def create_or_update_comment( # client will raise ApiError if the request is not successful if pr_comment is None: resp = client.create_comment( - repo=repo.name, issue_id=str(pr_key), data={"body": comment_body} + repo=repo.name, + issue_id=str(pr_key), + data=( + { + "body": comment_body, + "actions": github_copilot_actions, + } + if github_copilot_actions + else {"body": comment_body} + ), ) - current_time = timezone.now() + current_time = django_timezone.now() comment = PullRequestComment.objects.create( external_id=resp.body["id"], pull_request_id=pullrequest_id, @@ -134,12 +289,19 @@ def create_or_update_comment( repo=repo.name, issue_id=str(pr_key), comment_id=pr_comment.external_id, - data={"body": comment_body}, + data=( + { + "body": comment_body, + "actions": github_copilot_actions, + } + if github_copilot_actions + else {"body": comment_body} + ), ) metrics.incr( metrics_base.format(integration=self.integration_name, key="comment_updated") ) - pr_comment.updated_at = timezone.now() + pr_comment.updated_at = django_timezone.now() pr_comment.group_ids = issue_list pr_comment.save() @@ -169,3 +331,7 @@ def update_comment( self, repo: str, issue_id: str, comment_id: str, data: Mapping[str, Any] ) -> Any: raise NotImplementedError + + @abstractmethod + def get_merge_commit_sha_from_commit(self, repo: str, sha: str) -> str | None: + raise NotImplementedError diff --git a/src/sentry/integrations/tasks/create_comment.py b/src/sentry/integrations/tasks/create_comment.py index 2e383b2a26877..e1f82248a0003 100644 --- a/src/sentry/integrations/tasks/create_comment.py +++ b/src/sentry/integrations/tasks/create_comment.py @@ -2,12 +2,11 @@ from sentry.integrations.models.external_issue import ExternalIssue from sentry.integrations.tasks import should_comment_sync from sentry.models.activity import Activity -from sentry.silo.base import SiloMode, region_silo_function +from sentry.silo.base import SiloMode from sentry.tasks.base import instrumented_task from sentry.types.activity import ActivityType -@region_silo_function @instrumented_task( name="sentry.integrations.tasks.create_comment", queue="integrations", diff --git a/src/sentry/integrations/tasks/sync_assignee_outbound.py b/src/sentry/integrations/tasks/sync_assignee_outbound.py index 9b68da6c19379..78b24fe9273a2 100644 --- a/src/sentry/integrations/tasks/sync_assignee_outbound.py +++ b/src/sentry/integrations/tasks/sync_assignee_outbound.py @@ -1,6 +1,10 @@ +from typing import Any + from sentry import analytics, features +from sentry.constants import ObjectStatus from sentry.integrations.models.external_issue import ExternalIssue from sentry.integrations.models.integration import Integration +from sentry.integrations.services.assignment_source import AssignmentSource from sentry.integrations.services.integration import integration_service from sentry.models.organization import Organization from sentry.silo.base import SiloMode @@ -24,7 +28,12 @@ Organization.DoesNotExist, ) ) -def sync_assignee_outbound(external_issue_id: int, user_id: int | None, assign: bool) -> None: +def sync_assignee_outbound( + external_issue_id: int, + user_id: int | None, + assign: bool, + assignment_source_dict: dict[str, Any] | None = None, +) -> None: # Sync Sentry assignee to an external issue. external_issue = ExternalIssue.objects.get(id=external_issue_id) @@ -32,7 +41,9 @@ def sync_assignee_outbound(external_issue_id: int, user_id: int | None, assign: has_issue_sync = features.has("organizations:integrations-issue-sync", organization) if not has_issue_sync: return - integration = integration_service.get_integration(integration_id=external_issue.integration_id) + integration = integration_service.get_integration( + integration_id=external_issue.integration_id, status=ObjectStatus.ACTIVE + ) if not integration: return @@ -42,10 +53,15 @@ def sync_assignee_outbound(external_issue_id: int, user_id: int | None, assign: ): return - if installation.should_sync("outbound_assignee"): + parsed_assignment_source = ( + AssignmentSource.from_dict(assignment_source_dict) if assignment_source_dict else None + ) + if installation.should_sync("outbound_assignee", parsed_assignment_source): # Assume unassign if None. user = user_service.get_user(user_id) if user_id else None - installation.sync_assignee_outbound(external_issue, user, assign=assign) + installation.sync_assignee_outbound( + external_issue, user, assign=assign, assignment_source=parsed_assignment_source + ) analytics.record( "integration.issue.assignee.synced", provider=integration.provider, diff --git a/src/sentry/integrations/tasks/sync_status_inbound.py b/src/sentry/integrations/tasks/sync_status_inbound.py index e738c09ce888d..729428d511821 100644 --- a/src/sentry/integrations/tasks/sync_status_inbound.py +++ b/src/sentry/integrations/tasks/sync_status_inbound.py @@ -7,6 +7,7 @@ from sentry import analytics from sentry.api.helpers.group_index.update import get_current_release_version_of_group +from sentry.constants import ObjectStatus from sentry.integrations.models.integration import Integration from sentry.integrations.services.integration import integration_service from sentry.models.group import Group, GroupStatus @@ -181,7 +182,9 @@ def sync_status_inbound( ) -> None: from sentry.integrations.mixins import ResolveSyncAction - integration = integration_service.get_integration(integration_id=integration_id) + integration = integration_service.get_integration( + integration_id=integration_id, status=ObjectStatus.ACTIVE + ) if integration is None: raise Integration.DoesNotExist diff --git a/src/sentry/integrations/tasks/sync_status_outbound.py b/src/sentry/integrations/tasks/sync_status_outbound.py index 3ea2807e28ead..7aa1fb3afcc9d 100644 --- a/src/sentry/integrations/tasks/sync_status_outbound.py +++ b/src/sentry/integrations/tasks/sync_status_outbound.py @@ -1,4 +1,5 @@ from sentry import analytics, features +from sentry.constants import ObjectStatus from sentry.integrations.models.external_issue import ExternalIssue from sentry.integrations.models.integration import Integration from sentry.integrations.services.integration import integration_service @@ -34,7 +35,9 @@ def sync_status_outbound(group_id: int, external_issue_id: int) -> bool | None: # Issue link could have been deleted while sync job was in the queue. return None - integration = integration_service.get_integration(integration_id=external_issue.integration_id) + integration = integration_service.get_integration( + integration_id=external_issue.integration_id, status=ObjectStatus.ACTIVE + ) if not integration: return None installation = integration.get_installation(organization_id=external_issue.organization_id) diff --git a/src/sentry/integrations/utils/__init__.py b/src/sentry/integrations/utils/__init__.py index ca99167c50117..e69de29bb2d1d 100644 --- a/src/sentry/integrations/utils/__init__.py +++ b/src/sentry/integrations/utils/__init__.py @@ -1,24 +0,0 @@ -__all__ = ( - "AtlassianConnectValidationError", - "authenticate_asymmetric_jwt", - "get_identities_by_user", - "get_identity_or_404", - "get_integration_from_jwt", - "get_integration_from_request", - "get_query_hash", - "sync_group_assignee_inbound", - "sync_group_assignee_outbound", - "verify_claims", - "where_should_sync", -) - -from .atlassian_connect import ( - AtlassianConnectValidationError, - authenticate_asymmetric_jwt, - get_integration_from_jwt, - get_integration_from_request, - get_query_hash, - verify_claims, -) -from .identities import get_identities_by_user, get_identity_or_404 -from .sync import sync_group_assignee_inbound, sync_group_assignee_outbound, where_should_sync diff --git a/src/sentry/integrations/utils/common.py b/src/sentry/integrations/utils/common.py index e7caf84a8b9de..59629f4c100d3 100644 --- a/src/sentry/integrations/utils/common.py +++ b/src/sentry/integrations/utils/common.py @@ -1,8 +1,8 @@ import logging +from sentry.constants import ObjectStatus from sentry.integrations.services.integration import RpcIntegration, integration_service from sentry.integrations.types import ExternalProviderEnum -from sentry.models.organization import OrganizationStatus _default_logger = logging.getLogger(__name__) @@ -13,7 +13,7 @@ def get_active_integration_for_organization( try: return integration_service.get_integration( organization_id=organization_id, - status=OrganizationStatus.ACTIVE, + status=ObjectStatus.ACTIVE, provider=provider.value, ) except Exception as err: diff --git a/src/sentry/integrations/utils/identities.py b/src/sentry/integrations/utils/identities.py index 7d93614d5ddc6..e42eb8e09434c 100644 --- a/src/sentry/integrations/utils/identities.py +++ b/src/sentry/integrations/utils/identities.py @@ -1,6 +1,7 @@ import logging from collections.abc import Iterable, Mapping +from django.contrib.auth.models import AnonymousUser from django.http import Http404 from sentry.constants import ObjectStatus @@ -19,17 +20,17 @@ @control_silo_function def get_identity_or_404( provider: ExternalProviders, - user: User, + user: User | AnonymousUser, integration_id: int, organization_id: int | None = None, ) -> tuple[RpcOrganization, Integration, IdentityProvider]: + """For endpoints, short-circuit with a 404 if we cannot find everything we need.""" logger_metadata = { "integration_provider": provider, "integration_id": integration_id, "organization_id": organization_id, "user_id": user.id, } - """For endpoints, short-circuit with a 404 if we cannot find everything we need.""" if provider not in EXTERNAL_PROVIDERS: _logger.info("provider is not part of supported external providers", extra=logger_metadata) raise Http404 diff --git a/src/sentry/integrations/utils/metrics.py b/src/sentry/integrations/utils/metrics.py new file mode 100644 index 0000000000000..83c2bc755017b --- /dev/null +++ b/src/sentry/integrations/utils/metrics.py @@ -0,0 +1,269 @@ +import itertools +import logging +from abc import ABC, abstractmethod +from collections.abc import Mapping +from dataclasses import dataclass +from enum import Enum +from types import TracebackType +from typing import Any, Self + +from django.conf import settings + +from sentry.integrations.base import IntegrationDomain +from sentry.utils import metrics + +logger = logging.getLogger(__name__) + + +class EventLifecycleOutcome(Enum): + STARTED = "STARTED" + HALTED = "HALTED" + SUCCESS = "SUCCESS" + FAILURE = "FAILURE" + + def __str__(self) -> str: + return self.value.lower() + + +class EventLifecycleMetric(ABC): + """Information about an event to be measured. + + This class is intended to be used across different integrations that share the + same business concern. Generally a subclass would represent one business concern + (such as MessagingInteractionEvent, which extends this class and is used in the + `slack`, `msteams`, and `discord` integration packages). + """ + + @abstractmethod + def get_key(self, outcome: EventLifecycleOutcome) -> str: + """Construct the metrics key that will represent this event. + + It is recommended to implement this method by delegating to a + `get_standard_key` call. + """ + + raise NotImplementedError + + @staticmethod + def get_standard_key( + domain: str, + integration_name: str, + interaction_type: str, + outcome: EventLifecycleOutcome, + *extra_tokens: str, + ) -> str: + """Construct a key with a standard cross-integration structure. + + Implementations of `get_key` generally should delegate to this method in + order to ensure consistency across integrations. + + :param domain: a constant string representing the category of business + concern or vertical domain that the integration belongs + to (e.g., "messaging" or "source_code_management") + :param integration_name: the name of the integration (generally should match a + package name from `sentry.integrations`) + :param interaction_type: a key representing the category of interaction being + captured (generally should come from an Enum class) + :param outcome: the object representing the event outcome + :param extra_tokens: additional tokens to add extra context, if needed + :return: a key to represent the event in metrics or logging + """ + + # For now, universally include an "slo" token to distinguish from any + # previously existing metrics keys. + # TODO: Merge with or replace existing keys? + root_tokens = ("sentry", "integrations", "slo") + + specific_tokens = (domain, integration_name, interaction_type, str(outcome)) + return ".".join(itertools.chain(root_tokens, specific_tokens, extra_tokens)) + + def get_extras(self) -> Mapping[str, Any]: + """Get extra data to log.""" + return {} + + def capture(self, assume_success: bool = True) -> "EventLifecycle": + """Open a context to measure the event.""" + return EventLifecycle(self, assume_success) + + +class EventLifecycle: + """Context object that measures an event that may succeed or fail. + + The `assume_success` attribute can be set to False for events where exiting the + context may or may not represent a failure condition. In this state, + if the program exits the context without `record_success` or `record_failure` + being called first, it will log the outcome "halted" in place of "success" or + "failure". "Halted" could mean that we received an ambiguous exception from a + remote service that may have been caused either by a bug or user error, or merely + that inserting `record_failure` calls is still a dev to-do item. + """ + + def __init__(self, payload: EventLifecycleMetric, assume_success: bool = True) -> None: + self.payload = payload + self.assume_success = assume_success + self._state: EventLifecycleOutcome | None = None + self._extra = dict(self.payload.get_extras()) + + def add_extra(self, name: str, value: Any) -> None: + """Add a value to logged "extra" data. + + Overwrites the name with a new value if it was previously used. + """ + self._extra[name] = value + + def record_event( + self, outcome: EventLifecycleOutcome, exc: BaseException | None = None + ) -> None: + """Record a starting or halting event. + + This method is public so that unit tests may mock it, but it should be called + only by the other "record" methods. + """ + + key = self.payload.get_key(outcome) + + sample_rate = ( + 1.0 if outcome == EventLifecycleOutcome.FAILURE else settings.SENTRY_METRICS_SAMPLE_RATE + ) + metrics.incr(key, sample_rate=sample_rate) + + if outcome == EventLifecycleOutcome.FAILURE: + logger.error(key, extra=self._extra, exc_info=exc) + + @staticmethod + def _report_flow_error(message) -> None: + logger.error("EventLifecycle flow error: %s", message) + + def _terminate( + self, new_state: EventLifecycleOutcome, exc: BaseException | None = None + ) -> None: + if self._state is None: + self._report_flow_error("The lifecycle has not yet been entered") + if self._state != EventLifecycleOutcome.STARTED: + self._report_flow_error("The lifecycle has already been exited") + self._state = new_state + self.record_event(new_state, exc) + + def record_success(self) -> None: + """Record that the event halted successfully. + + Exiting the context without raising an exception will call this method + automatically, unless the context was initialized with `assume_success` set + to False. + """ + + self._terminate(EventLifecycleOutcome.SUCCESS) + + def record_failure( + self, exc: BaseException | None = None, extra: dict[str, Any] | None = None + ) -> None: + """Record that the event halted in failure. Additional data may be passed + to be logged. + + There is no need to call this method directly if an exception is raised from + inside the context. It will be called automatically when exiting the context + on an exception. + + This method should be called if we return a soft failure from the event. For + example, if we receive an error status from a remote service and gracefully + display an error response to the user, it would be necessary to manually call + `record_failure` on the context object. + """ + + if extra: + self._extra.update(extra) + self._terminate(EventLifecycleOutcome.FAILURE, exc) + + def record_halt(self, exc: BaseException | None = None) -> None: + """Record that the event halted in an ambiguous state. + + This method can be called in response to a sufficiently ambiguous exception + or other error condition, where it may have been caused by a user error or + other expected condition, but there is some substantial chance that it + represents a bug. + + Such cases usually mean that we want to: + (1) document the ambiguity; + (2) monitor it for sudden spikes in frequency; and + (3) investigate whether more detailed error information is available + (but probably later, as a backlog item). + """ + + self._terminate(EventLifecycleOutcome.HALTED, exc) + + def __enter__(self) -> Self: + if self._state is not None: + self._report_flow_error("The lifecycle has already been entered") + self._state = EventLifecycleOutcome.STARTED + self.record_event(EventLifecycleOutcome.STARTED) + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType, + ) -> None: + if self._state != EventLifecycleOutcome.STARTED: + # The context called record_success or record_failure being closing, + # so we can just exit quietly. + return + + if exc_value is not None: + # We were forced to exit the context by a raised exception. + self.record_failure(exc_value) + else: + # We exited the context without record_success or record_failure being + # called. Assume success if we were told to do so. Else, log a halt + # indicating that there is no clear success or failure signal. + self._terminate( + EventLifecycleOutcome.SUCCESS + if self.assume_success + else EventLifecycleOutcome.HALTED + ) + + +class IntegrationPipelineViewType(Enum): + """A specific step in an integration's pipeline that is not a static page.""" + + # IdentityProviderPipeline + IDENTITY_LOGIN = "IDENTITY_LOGIN" + IDENTITY_LINK = "IDENTITY_LINK" + + # GitHub + OAUTH_LOGIN = "OAUTH_LOGIN" + GITHUB_INSTALLATION = "GITHUB_INSTALLATION" + + # Bitbucket + VERIFY_INSTALLATION = "VERIFY_INSTALLATION" + + # Bitbucket Server + # OAUTH_LOGIN = "OAUTH_LOGIN" + OAUTH_CALLBACK = "OAUTH_CALLBACK" + + # Azure DevOps + ACCOUNT_CONFIG = "ACCOUNT_CONFIG" + + def __str__(self) -> str: + return self.value.lower() + + +@dataclass +class IntegrationPipelineViewEvent(EventLifecycleMetric): + """An instance to be recorded of a user going through an integration pipeline view (step).""" + + interaction_type: IntegrationPipelineViewType + domain: IntegrationDomain + provider_key: str + + def get_key(self, outcome: EventLifecycleOutcome) -> str: + # not reporting as SLOs + root_tokens = ("sentry", "integrations", "installation") + specific_tokens = ( + self.domain, + self.provider_key, + str(self.interaction_type), + str(outcome), + ) + + return ".".join(itertools.chain(root_tokens, specific_tokens)) diff --git a/src/sentry/integrations/utils/stacktrace_link.py b/src/sentry/integrations/utils/stacktrace_link.py index 9ba5efc4cb08d..5aff6b93e60c9 100644 --- a/src/sentry/integrations/utils/stacktrace_link.py +++ b/src/sentry/integrations/utils/stacktrace_link.py @@ -3,6 +3,7 @@ import logging from typing import TYPE_CHECKING, NotRequired, TypedDict +from sentry.constants import ObjectStatus from sentry.integrations.models.repository_project_path_config import RepositoryProjectPathConfig from sentry.integrations.services.integration import integration_service from sentry.integrations.source_code_management.repository import RepositoryIntegration @@ -30,7 +31,7 @@ def get_link( result: RepositoryLinkOutcome = {} integration = integration_service.get_integration( - organization_integration_id=config.organization_integration_id + organization_integration_id=config.organization_integration_id, status=ObjectStatus.ACTIVE ) if not integration: result["error"] = "integration_not_found" diff --git a/src/sentry/integrations/utils/sync.py b/src/sentry/integrations/utils/sync.py index a97c6dd78faca..a2ac81f567035 100644 --- a/src/sentry/integrations/utils/sync.py +++ b/src/sentry/integrations/utils/sync.py @@ -5,6 +5,8 @@ from typing import TYPE_CHECKING from sentry import features +from sentry.integrations.models.integration import Integration +from sentry.integrations.services.assignment_source import AssignmentSource from sentry.integrations.services.integration import integration_service from sentry.integrations.tasks.sync_assignee_outbound import sync_assignee_outbound from sentry.models.group import Group @@ -20,7 +22,7 @@ @region_silo_function def where_should_sync( - integration: RpcIntegration, + integration: RpcIntegration | Integration, key: str, organization_id: int | None = None, ) -> Sequence[Organization]: @@ -62,9 +64,9 @@ def get_user_id(projects_by_user: Mapping[int, Sequence[int]], group: Group) -> @region_silo_function def sync_group_assignee_inbound( - integration: RpcIntegration, + integration: RpcIntegration | Integration, email: str | None, - external_issue_key: str, + external_issue_key: str | None, assign: bool = True, ) -> Sequence[Group]: """ @@ -92,7 +94,11 @@ def sync_group_assignee_inbound( if not assign: for group in affected_groups: - GroupAssignee.objects.deassign(group) + GroupAssignee.objects.deassign( + group, + assignment_source=AssignmentSource.from_integration(integration), + ) + return affected_groups users = user_service.get_many_by_email(emails=[email], is_verified=True) @@ -104,14 +110,23 @@ def sync_group_assignee_inbound( user_id = get_user_id(projects_by_user, group) user = users_by_id.get(user_id) if user: - GroupAssignee.objects.assign(group, user) + GroupAssignee.objects.assign( + group, + user, + assignment_source=AssignmentSource.from_integration(integration), + ) groups_assigned.append(group) else: logger.info("assignee-not-found-inbound", extra=log_context) return groups_assigned -def sync_group_assignee_outbound(group: Group, user_id: int | None, assign: bool = True) -> None: +def sync_group_assignee_outbound( + group: Group, + user_id: int | None, + assign: bool = True, + assignment_source: AssignmentSource | None = None, +) -> None: from sentry.models.grouplink import GroupLink external_issue_ids = GroupLink.objects.filter( @@ -120,5 +135,12 @@ def sync_group_assignee_outbound(group: Group, user_id: int | None, assign: bool for external_issue_id in external_issue_ids: sync_assignee_outbound.apply_async( - kwargs={"external_issue_id": external_issue_id, "user_id": user_id, "assign": assign} + kwargs={ + "external_issue_id": external_issue_id, + "user_id": user_id, + "assign": assign, + "assignment_source_dict": assignment_source.to_dict() + if assignment_source + else None, + } ) diff --git a/src/sentry/integrations/vsts/client.py b/src/sentry/integrations/vsts/client.py index 7bab65d8fda58..598d53f5310c4 100644 --- a/src/sentry/integrations/vsts/client.py +++ b/src/sentry/integrations/vsts/client.py @@ -8,6 +8,7 @@ from requests import PreparedRequest from rest_framework.response import Response +from sentry.constants import ObjectStatus from sentry.exceptions import InvalidIdentity from sentry.integrations.base import IntegrationFeatureNotImplementedError from sentry.integrations.client import ApiClient @@ -207,7 +208,7 @@ def _refresh_auth_if_expired(self): from sentry.integrations.vsts.integration import VstsIntegrationProvider integration = integration_service.get_integration( - organization_integration_id=self.org_integration_id + organization_integration_id=self.org_integration_id, status=ObjectStatus.ACTIVE ) # check if integration has migrated to new identity provider migration_version = integration.metadata.get("integration_migration_version", 0) diff --git a/src/sentry/integrations/vsts/integration.py b/src/sentry/integrations/vsts/integration.py index 831f39458bf64..b6b84563f5921 100644 --- a/src/sentry/integrations/vsts/integration.py +++ b/src/sentry/integrations/vsts/integration.py @@ -42,6 +42,7 @@ IntegrationProviderError, ) from sentry.silo.base import SiloMode +from sentry.utils import metrics from sentry.utils.http import absolute_uri from sentry.web.helpers import render_to_response @@ -527,7 +528,24 @@ def build_integration(self, state: Mapping[str, Any]) -> Mapping[str, Any]: status=ObjectStatus.ACTIVE, ).exists() + metrics.incr( + "integrations.migration.vsts_integration_migration", + sample_rate=1.0, + ) + except (IntegrationModel.DoesNotExist, AssertionError, KeyError): + logger.warning( + "vsts.build_integration.error", + extra={ + "organization_id": ( + self.pipeline.organization.id + if self.pipeline and self.pipeline.organization + else None + ), + "user_id": user["id"], + "account": account, + }, + ) subscription_id, subscription_secret = self.create_subscription( base_url=base_url, oauth_data=oauth_data ) @@ -564,7 +582,9 @@ def create_subscription( raise IntegrationProviderError( "Sentry cannot communicate with this Azure DevOps organization.\n" "Please ensure third-party app access via OAuth is enabled \n" - "in the organization's security policy." + "in the organization's security policy \n" + "The user installing the integration must have project administrator permissions. \n" + "The user installing might also need admin permissions depending on the organization's security policy." ) raise diff --git a/src/sentry/integrations/vsts/issues.py b/src/sentry/integrations/vsts/issues.py index 7e897f813d13c..9abfd9b3feb86 100644 --- a/src/sentry/integrations/vsts/issues.py +++ b/src/sentry/integrations/vsts/issues.py @@ -6,6 +6,7 @@ from mistune import markdown from rest_framework.response import Response +from sentry.constants import ObjectStatus from sentry.integrations.mixins import ResolveSyncAction from sentry.integrations.mixins.issues import IssueSyncIntegration from sentry.integrations.services.integration import integration_service @@ -361,7 +362,7 @@ def search_issues(self, query: str | None, **kwargs) -> dict[str, Any]: client = self.get_client() integration = integration_service.get_integration( - integration_id=self.org_integration.integration_id + integration_id=self.org_integration.integration_id, status=ObjectStatus.ACTIVE ) if not integration: raise IntegrationError("Azure DevOps integration not found") diff --git a/src/sentry/integrations/vsts/webhooks.py b/src/sentry/integrations/vsts/webhooks.py index 43192af80c210..d0d6ea877fd48 100644 --- a/src/sentry/integrations/vsts/webhooks.py +++ b/src/sentry/integrations/vsts/webhooks.py @@ -12,9 +12,10 @@ from sentry.api.api_owners import ApiOwner from sentry.api.api_publish_status import ApiPublishStatus from sentry.api.base import Endpoint, region_silo_endpoint +from sentry.constants import ObjectStatus from sentry.integrations.mixins.issues import IssueSyncIntegration from sentry.integrations.services.integration import integration_service -from sentry.integrations.utils import sync_group_assignee_inbound +from sentry.integrations.utils.sync import sync_group_assignee_inbound from sentry.utils.email import parse_email if TYPE_CHECKING: @@ -52,7 +53,7 @@ def post(self, request: Request, *args: Any, **kwargs: Any) -> Response: # https://docs.microsoft.com/en-us/azure/devops/service-hooks/events?view=azure-devops#workitem.updated if event_type == "workitem.updated": integration = integration_service.get_integration( - provider=PROVIDER_KEY, external_id=external_id + provider=PROVIDER_KEY, external_id=external_id, status=ObjectStatus.ACTIVE ) if integration is None: logger.info( diff --git a/src/sentry/interfaces/contexts.py b/src/sentry/interfaces/contexts.py index 35285b823c058..161a1f292365c 100644 --- a/src/sentry/interfaces/contexts.py +++ b/src/sentry/interfaces/contexts.py @@ -3,6 +3,7 @@ import string from typing import Any, ClassVar, TypeVar +import sentry_sdk from django.utils.encoding import force_str from sentry.interfaces.base import Interface @@ -232,7 +233,12 @@ def to_python(cls, data, **kwargs): @classmethod def normalize_context(cls, alias, data): ctx_type = data.get("type", alias) - ctx_cls = context_types.get(ctx_type, DefaultContextType) + try: + ctx_cls = context_types.get(ctx_type, DefaultContextType) + except TypeError: + # Debugging information for SENTRY-FOR-SENTRY-2NH2. + sentry_sdk.set_context("ctx_type", ctx_type) + raise return ctx_cls(alias, data) def iter_contexts(self): diff --git a/src/sentry/interfaces/user.py b/src/sentry/interfaces/user.py index 58b7afe5b5503..0923ddcc6ae1b 100644 --- a/src/sentry/interfaces/user.py +++ b/src/sentry/interfaces/user.py @@ -1,12 +1,23 @@ __all__ = ("User",) +from typing import Any, TypedDict + from sentry.interfaces.base import Interface from sentry.interfaces.geo import Geo from sentry.utils.json import prune_empty_keys from sentry.web.helpers import render_to_string +class EventUserApiContext(TypedDict, total=False): + id: str | None + email: str | None + username: str | None + ip_address: str | None + name: str | None + data: dict[str, Any] | None + + class User(Interface): """ An interface which describes the authenticated User for a request. @@ -51,7 +62,7 @@ def to_json(self): } ) - def get_api_context(self, is_public=False, platform=None): + def get_api_context(self, is_public=False, platform=None) -> EventUserApiContext: return { "id": self.id, "email": self.email, diff --git a/src/sentry/issues/attributes.py b/src/sentry/issues/attributes.py index 22ab70d49eab6..902a113961463 100644 --- a/src/sentry/issues/attributes.py +++ b/src/sentry/issues/attributes.py @@ -252,7 +252,7 @@ def process_update_fields(updated_fields) -> set[str]: # we'll need to assume any of the attributes are updated in that case updated_fields = {"all"} else: - VALID_FIELDS = {"status", "substatus", "num_comments"} + VALID_FIELDS = {"status", "substatus", "num_comments", "priority", "first_release"} updated_fields = VALID_FIELDS.intersection(updated_fields or ()) if updated_fields: _log_group_attributes_changed(Operation.UPDATED, "group", "-".join(sorted(updated_fields))) diff --git a/src/sentry/issues/endpoints/__init__.py b/src/sentry/issues/endpoints/__init__.py index 50e5e852f05c3..36c255daf1830 100644 --- a/src/sentry/issues/endpoints/__init__.py +++ b/src/sentry/issues/endpoints/__init__.py @@ -9,6 +9,8 @@ from .group_participants import GroupParticipantsEndpoint from .group_similar_issues import GroupSimilarIssuesEndpoint from .group_similar_issues_embeddings import GroupSimilarIssuesEmbeddingsEndpoint +from .group_tombstone import GroupTombstoneEndpoint +from .group_tombstone_details import GroupTombstoneDetailsEndpoint from .organization_eventid import EventIdLookupEndpoint from .organization_group_index import OrganizationGroupIndexEndpoint from .organization_group_index_stats import OrganizationGroupIndexStatsEndpoint @@ -21,6 +23,7 @@ from .project_group_index import ProjectGroupIndexEndpoint from .project_group_stats import ProjectGroupStatsEndpoint from .project_stacktrace_link import ProjectStacktraceLinkEndpoint +from .related_issues import RelatedIssuesEndpoint from .shared_group_details import SharedGroupDetailsEndpoint from .source_map_debug import SourceMapDebugEndpoint from .team_groups_old import TeamGroupsOldEndpoint @@ -39,6 +42,8 @@ "GroupParticipantsEndpoint", "GroupSimilarIssuesEmbeddingsEndpoint", "GroupSimilarIssuesEndpoint", + "GroupTombstoneDetailsEndpoint", + "GroupTombstoneEndpoint", "OrganizationGroupIndexEndpoint", "OrganizationGroupIndexStatsEndpoint", "OrganizationGroupSearchViewsEndpoint", @@ -49,6 +54,7 @@ "ProjectGroupIndexEndpoint", "ProjectGroupStatsEndpoint", "ProjectStacktraceLinkEndpoint", + "RelatedIssuesEndpoint", "SharedGroupDetailsEndpoint", "ShortIdLookupEndpoint", "SourceMapDebugEndpoint", diff --git a/src/sentry/issues/endpoints/group_details.py b/src/sentry/issues/endpoints/group_details.py index cfb080d49b60b..40117285948b6 100644 --- a/src/sentry/issues/endpoints/group_details.py +++ b/src/sentry/issues/endpoints/group_details.py @@ -22,7 +22,6 @@ ) from sentry.api.serializers import GroupSerializer, GroupSerializerSnuba, serialize from sentry.api.serializers.models.group_stream import get_actions, get_available_issue_plugins -from sentry.api.serializers.models.platformexternalissue import PlatformExternalIssueSerializer from sentry.api.serializers.models.plugin import PluginSerializer from sentry.api.serializers.models.team import TeamSerializer from sentry.integrations.api.serializers.models.external_issue import ExternalIssueSerializer @@ -38,10 +37,13 @@ from sentry.models.groupowner import get_owner_details from sentry.models.groupseen import GroupSeen from sentry.models.groupsubscription import GroupSubscriptionManager -from sentry.models.platformexternalissue import PlatformExternalIssue from sentry.models.team import Team from sentry.models.userreport import UserReport from sentry.plugins.base import plugins +from sentry.sentry_apps.api.serializers.platform_external_issue import ( + PlatformExternalIssueSerializer, +) +from sentry.sentry_apps.models.platformexternalissue import PlatformExternalIssue from sentry.tasks.post_process import fetch_buffered_group_stats from sentry.types.ratelimit import RateLimit, RateLimitCategory from sentry.users.services.user.service import user_service @@ -382,7 +384,7 @@ def put(self, request: Request, group) -> Response: ) return Response(e.body, status=e.status_code) - def delete(self, request: Request, group) -> Response: + def delete(self, request: Request, group: Group) -> Response: """ Remove an Issue ``````````````` @@ -394,7 +396,11 @@ def delete(self, request: Request, group) -> Response: """ from sentry.utils import snuba - if group.issue_category != GroupCategory.ERROR: + issue_platform_deletion_allowed = features.has( + "organizations:issue-platform-deletion", group.project.organization, actor=request.user + ) + + if group.issue_category != GroupCategory.ERROR and not issue_platform_deletion_allowed: raise ValidationError(detail="Only error issues can be deleted.") try: diff --git a/src/sentry/issues/endpoints/group_event_details.py b/src/sentry/issues/endpoints/group_event_details.py index bd8521b6df6c8..fffb47f40ea3b 100644 --- a/src/sentry/issues/endpoints/group_event_details.py +++ b/src/sentry/issues/endpoints/group_event_details.py @@ -4,6 +4,8 @@ from collections.abc import Sequence from django.contrib.auth.models import AnonymousUser +from drf_spectacular.types import OpenApiTypes +from drf_spectacular.utils import OpenApiParameter, extend_schema from rest_framework.request import Request from rest_framework.response import Response from snuba_sdk import Condition, Or @@ -17,8 +19,20 @@ from sentry.api.helpers.group_index import parse_and_convert_issue_search_query from sentry.api.helpers.group_index.validators import ValidationError from sentry.api.serializers import EventSerializer, serialize +from sentry.apidocs.constants import ( + RESPONSE_BAD_REQUEST, + RESPONSE_FORBIDDEN, + RESPONSE_NOT_FOUND, + RESPONSE_UNAUTHORIZED, +) +from sentry.apidocs.examples.event_examples import EventExamples +from sentry.apidocs.parameters import GlobalParams, IssueParams +from sentry.apidocs.utils import inline_sentry_response_serializer from sentry.eventstore.models import Event, GroupEvent -from sentry.issues.endpoints.project_event_details import wrap_event_response +from sentry.issues.endpoints.project_event_details import ( + GroupEventDetailsResponse, + wrap_event_response, +) from sentry.issues.grouptype import GroupCategory from sentry.models.environment import Environment from sentry.models.group import Group @@ -99,10 +113,11 @@ def issue_search_query_to_conditions( return snql_conditions +@extend_schema(tags=["Events"]) @region_silo_endpoint class GroupEventDetailsEndpoint(GroupEndpoint): publish_status = { - "GET": ApiPublishStatus.UNKNOWN, + "GET": ApiPublishStatus.PUBLIC, } enforce_rate_limit = True rate_limits = { @@ -113,14 +128,36 @@ class GroupEventDetailsEndpoint(GroupEndpoint): } } + @extend_schema( + operation_id="Retrieve an Issue Event", + parameters=[ + GlobalParams.ORG_ID_OR_SLUG, + IssueParams.ISSUES_OR_GROUPS, + IssueParams.ISSUE_ID, + GlobalParams.ENVIRONMENT, + OpenApiParameter( + name="event_id", + type=OpenApiTypes.STR, + location=OpenApiParameter.PATH, + description="The ID of the event to retrieve, or 'latest', 'oldest', or 'recommended'.", + required=True, + enum=["latest", "oldest", "recommended"], + ), + ], + responses={ + 200: inline_sentry_response_serializer( + "IssueEventDetailsResponse", GroupEventDetailsResponse + ), + 400: RESPONSE_BAD_REQUEST, + 401: RESPONSE_UNAUTHORIZED, + 403: RESPONSE_FORBIDDEN, + 404: RESPONSE_NOT_FOUND, + }, + examples=EventExamples.GROUP_EVENT_DETAILS, + ) def get(self, request: Request, group: Group, event_id: str) -> Response: """ - Retrieve the latest(most recent), oldest, or most helpful Event for an Issue - `````````````````````````````````````` - - Retrieves the details of the latest/oldest/most-helpful event for an issue. - - :pparam string group_id: the ID of the issue + Retrieves the details of an issue event. """ environments = [e for e in get_environments(request, group.project.organization)] environment_names = [e.name for e in environments] @@ -133,7 +170,7 @@ def get(self, request: Request, group: Group, event_id: str) -> Response: elif event_id == "oldest": with metrics.timer("api.endpoints.group_event_details.get", tags={"type": "oldest"}): event = group.get_oldest_event_for_environments(environment_names) - elif event_id in ("helpful", "recommended"): + elif event_id == "recommended": query = request.GET.get("query") if query: with metrics.timer( diff --git a/src/sentry/issues/endpoints/group_events.py b/src/sentry/issues/endpoints/group_events.py index ba559d1ea1b9c..dc3e23c1fe8c3 100644 --- a/src/sentry/issues/endpoints/group_events.py +++ b/src/sentry/issues/endpoints/group_events.py @@ -5,6 +5,8 @@ from typing import TYPE_CHECKING, Any from django.utils import timezone +from drf_spectacular.types import OpenApiTypes +from drf_spectacular.utils import OpenApiParameter, extend_schema from rest_framework.exceptions import ParseError from rest_framework.request import Request from rest_framework.response import Response @@ -19,7 +21,17 @@ from sentry.api.helpers.events import get_direct_hit_response, get_query_builder_for_group from sentry.api.paginator import GenericOffsetPaginator from sentry.api.serializers import EventSerializer, SimpleEventSerializer, serialize +from sentry.api.serializers.models.event import SimpleEventSerializerResponse from sentry.api.utils import get_date_range_from_params +from sentry.apidocs.constants import ( + RESPONSE_BAD_REQUEST, + RESPONSE_FORBIDDEN, + RESPONSE_NOT_FOUND, + RESPONSE_UNAUTHORIZED, +) +from sentry.apidocs.examples.event_examples import EventExamples +from sentry.apidocs.parameters import GlobalParams, IssueParams +from sentry.apidocs.utils import inline_sentry_response_serializer from sentry.eventstore.models import Event from sentry.exceptions import InvalidParams, InvalidSearchQuery from sentry.search.events.types import ParamsType @@ -38,29 +50,60 @@ class GroupEventsError(Exception): pass +@extend_schema(tags=["Events"]) @region_silo_endpoint class GroupEventsEndpoint(GroupEndpoint, EnvironmentMixin): publish_status = { - "GET": ApiPublishStatus.UNKNOWN, + "GET": ApiPublishStatus.PUBLIC, } owner = ApiOwner.ISSUES + @extend_schema( + operation_id="List an Issue's Events", + parameters=[ + GlobalParams.ORG_ID_OR_SLUG, + IssueParams.ISSUES_OR_GROUPS, + IssueParams.ISSUE_ID, + GlobalParams.START, + GlobalParams.END, + GlobalParams.STATS_PERIOD, + GlobalParams.ENVIRONMENT, + OpenApiParameter( + name="full", + type=OpenApiTypes.BOOL, + location=OpenApiParameter.QUERY, + description="Specify true to include the full event body, including the stacktrace, in the event payload.", + required=False, + ), + OpenApiParameter( + name="sample", + type=OpenApiTypes.BOOL, + location=OpenApiParameter.QUERY, + description="Return events in pseudo-random order. This is deterministic so an identical query will always return the same events in the same order.", + required=False, + ), + OpenApiParameter( + name="query", + location=OpenApiParameter.QUERY, + type=OpenApiTypes.STR, + description="An optional search query for filtering events.", + required=False, + ), + ], + responses={ + 200: inline_sentry_response_serializer( + "GroupEventsResponseDict", list[SimpleEventSerializerResponse] + ), + 400: RESPONSE_BAD_REQUEST, + 401: RESPONSE_UNAUTHORIZED, + 403: RESPONSE_FORBIDDEN, + 404: RESPONSE_NOT_FOUND, + }, + examples=EventExamples.GROUP_EVENTS_SIMPLE, + ) def get(self, request: Request, group: Group) -> Response: """ - List an Issue's Events - `````````````````````` - - This endpoint lists an issue's events. - :qparam bool full: if this is set to true then the event payload will - include the full event body, including the stacktrace. - Set to 1 to enable. - - :qparam bool sample: return events in pseudo-random order. This is deterministic, - same query will return the same events in the same order. - - :pparam string issue_id: the ID of the issue to retrieve. - - :auth: required + Return a list of error events bound to an issue """ try: diff --git a/src/sentry/issues/endpoints/group_hashes.py b/src/sentry/issues/endpoints/group_hashes.py index 3a881243f9c7a..afb6a1d4320f9 100644 --- a/src/sentry/issues/endpoints/group_hashes.py +++ b/src/sentry/issues/endpoints/group_hashes.py @@ -18,6 +18,7 @@ @region_silo_endpoint class GroupHashesEndpoint(GroupEndpoint): publish_status = { + "PUT": ApiPublishStatus.PRIVATE, "DELETE": ApiPublishStatus.PRIVATE, "GET": ApiPublishStatus.PRIVATE, } @@ -91,6 +92,41 @@ def delete(self, request: Request, group) -> Response: return Response(status=202) + def put(self, request: Request, group) -> Response: + """ + Perform an unmerge by reassigning events with hash values corresponding to the given + grouphash ids from being part of the given group to being part of a new group. + + Note that if multiple grouphash ids are given, all their corresponding events will end up in + a single new group together, rather than each hash's events ending in their own new group. + """ + grouphash_ids = request.GET.getlist("id") + if not grouphash_ids: + return Response() + + grouphashes = list( + GroupHash.objects.filter( + project_id=group.project_id, group=group.id, hash__in=grouphash_ids + ) + .exclude(state=GroupHash.State.LOCKED_IN_MIGRATION) + .values_list("hash", flat=True) + ) + if not grouphashes: + return Response({"detail": "Already being unmerged"}, status=409) + + metrics.incr( + "grouping.unmerge_issues", + sample_rate=1.0, + # We assume that if someone's merged groups, they were all from the same platform + tags={"platform": group.platform or "unknown", "sdk": group.sdk or "unknown"}, + ) + + unmerge.delay( + group.project_id, group.id, None, grouphashes, request.user.id if request.user else None + ) + + return Response(status=202) + def __handle_results(self, project_id, group_id, user, results): return [self.__handle_result(user, project_id, group_id, result) for result in results] diff --git a/src/sentry/issues/endpoints/group_notes_details.py b/src/sentry/issues/endpoints/group_notes_details.py index 65fb6012f2eed..7097802a0f0f0 100644 --- a/src/sentry/issues/endpoints/group_notes_details.py +++ b/src/sentry/issues/endpoints/group_notes_details.py @@ -84,7 +84,7 @@ def put(self, request: Request, group, note_id) -> Response: if serializer.is_valid(): payload = serializer.validated_data - # TODO adding mentions to a note doesn't send notifications. Should it? + # TODO: adding mentions to a note doesn't send notifications. Should it? # Remove mentions as they shouldn't go into the database payload.pop("mentions", []) diff --git a/src/sentry/api/endpoints/group_tombstone.py b/src/sentry/issues/endpoints/group_tombstone.py similarity index 100% rename from src/sentry/api/endpoints/group_tombstone.py rename to src/sentry/issues/endpoints/group_tombstone.py diff --git a/src/sentry/api/endpoints/group_tombstone_details.py b/src/sentry/issues/endpoints/group_tombstone_details.py similarity index 100% rename from src/sentry/api/endpoints/group_tombstone_details.py rename to src/sentry/issues/endpoints/group_tombstone_details.py diff --git a/src/sentry/issues/endpoints/project_event_details.py b/src/sentry/issues/endpoints/project_event_details.py index 83a716da203f4..518b20d67083b 100644 --- a/src/sentry/issues/endpoints/project_event_details.py +++ b/src/sentry/issues/endpoints/project_event_details.py @@ -10,15 +10,21 @@ from sentry.api.base import region_silo_endpoint from sentry.api.bases.project import ProjectEndpoint from sentry.api.serializers import IssueEventSerializer, serialize +from sentry.api.serializers.models.event import IssueEventSerializerResponse from sentry.eventstore.models import Event, GroupEvent +class GroupEventDetailsResponse(IssueEventSerializerResponse): + nextEventID: str | None + previousEventID: str | None + + def wrap_event_response( request_user: Any, event: Event | GroupEvent, environments: list[str], include_full_release_data: bool = False, -): +) -> GroupEventDetailsResponse: event_data = serialize( event, request_user, diff --git a/src/sentry/issues/endpoints/project_events.py b/src/sentry/issues/endpoints/project_events.py index b236c19271310..63e830ee08fde 100644 --- a/src/sentry/issues/endpoints/project_events.py +++ b/src/sentry/issues/endpoints/project_events.py @@ -2,6 +2,7 @@ from functools import partial from django.utils import timezone +from drf_spectacular.utils import OpenApiParameter, extend_schema from rest_framework.request import Request from rest_framework.response import Response @@ -11,16 +12,22 @@ from sentry.api.base import region_silo_endpoint from sentry.api.bases.project import ProjectEndpoint from sentry.api.serializers import EventSerializer, SimpleEventSerializer, serialize +from sentry.api.serializers.models.event import SimpleEventSerializerResponse +from sentry.apidocs.constants import RESPONSE_FORBIDDEN, RESPONSE_NOT_FOUND, RESPONSE_UNAUTHORIZED +from sentry.apidocs.examples.event_examples import EventExamples +from sentry.apidocs.parameters import CursorQueryParam, GlobalParams +from sentry.apidocs.utils import inline_sentry_response_serializer from sentry.models.project import Project from sentry.snuba.events import Columns from sentry.types.ratelimit import RateLimit, RateLimitCategory +@extend_schema(tags=["Events"]) @region_silo_endpoint class ProjectEventsEndpoint(ProjectEndpoint): owner = ApiOwner.ISSUES publish_status = { - "GET": ApiPublishStatus.EXPERIMENTAL, + "GET": ApiPublishStatus.PUBLIC, } enforce_rate_limit = True rate_limits = { @@ -31,26 +38,42 @@ class ProjectEventsEndpoint(ProjectEndpoint): } } + @extend_schema( + operation_id="List a Project's Error Events", + parameters=[ + GlobalParams.ORG_ID_OR_SLUG, + GlobalParams.PROJECT_ID_OR_SLUG, + CursorQueryParam, + OpenApiParameter( + name="full", + description="If this is set to true, the event payload will include the full event body, including the stacktrace. Set to 1 to enable.", + required=False, + type=bool, + location="query", + default=False, + ), + OpenApiParameter( + name="sample", + description="Return events in pseudo-random order. This is deterministic so an identical query will always return the same events in the same order.", + required=False, + type=bool, + location="query", + default=False, + ), + ], + responses={ + 200: inline_sentry_response_serializer( + "ProjectEventsResponseDict", list[SimpleEventSerializerResponse] + ), + 401: RESPONSE_UNAUTHORIZED, + 403: RESPONSE_FORBIDDEN, + 404: RESPONSE_NOT_FOUND, + }, + examples=EventExamples.PROJECT_EVENTS_SIMPLE, + ) def get(self, request: Request, project: Project) -> Response: """ - List a Project's Error Events - ``````````````````````` - Return a list of events bound to a project. - - Note: This endpoint is experimental and may be removed without notice. - - :qparam bool full: if this is set to true then the event payload will - include the full event body, including the stacktrace. - Set to 1 to enable. - - :qparam bool sample: return events in pseudo-random order. This is deterministic, - same query will return the same events in the same order. - - :pparam string organization_id_or_slug: the id or slug of the organization the - groups belong to. - :pparam string project_id_or_slug: the id or slug of the project the groups - belong to. """ from sentry.api.paginator import GenericOffsetPaginator diff --git a/src/sentry/api/endpoints/issues/related_issues.py b/src/sentry/issues/endpoints/related_issues.py similarity index 100% rename from src/sentry/api/endpoints/issues/related_issues.py rename to src/sentry/issues/endpoints/related_issues.py diff --git a/src/sentry/issues/grouptype.py b/src/sentry/issues/grouptype.py index 91887dceaa23e..37f8ab41eb16e 100644 --- a/src/sentry/issues/grouptype.py +++ b/src/sentry/issues/grouptype.py @@ -1,5 +1,6 @@ from __future__ import annotations +import importlib from collections import defaultdict from dataclasses import dataclass, field from datetime import timedelta @@ -7,6 +8,7 @@ from typing import TYPE_CHECKING, Any import sentry_sdk +from django.apps import apps from redis.client import StrictRedis from rediscluster import RedisCluster @@ -20,6 +22,10 @@ from sentry.models.organization import Organization from sentry.models.project import Project from sentry.users.models.user import User + from sentry.workflow_engine.models.detector import DetectorHandler +import logging + +logger = logging.getLogger(__name__) class GroupCategory(Enum): @@ -30,6 +36,7 @@ class GroupCategory(Enum): REPLAY = 5 FEEDBACK = 6 UPTIME = 7 + METRIC_ALERT = 8 GROUP_CATEGORIES_CUSTOM_EMAIL = ( @@ -147,8 +154,10 @@ class GroupType: enable_auto_resolve: bool = True # Allow escalation forecasts and detection enable_escalation_detection: bool = True + # Quota around many of these issue types can be created per project in a given time window creation_quota: Quota = Quota(3600, 60, 5) # default 5 per hour, sliding window of 60 seconds notification_config: NotificationConfig = NotificationConfig() + detector_handler: type[DetectorHandler] | None = None def __init_subclass__(cls: type[GroupType], **kwargs: Any) -> None: super().__init_subclass__(**kwargs) @@ -627,3 +636,19 @@ def should_create_group( else: client.expire(key, noise_config.expiry_seconds) return False + + +def import_grouptype(): + """ + Ensures that grouptype.py is imported in any apps that implement it. We do this to make sure that all implemented + grouptypes are loaded and registered. + """ + for app_config in apps.get_app_configs(): + grouptype_module = f"{app_config.name}.grouptype" + try: + # Try to import the module + importlib.import_module(grouptype_module) + logger.debug("Imported module", extra={"module_name": grouptype_module}) + except ModuleNotFoundError: + # If the module is not found, continue without any issues + logger.debug("No grouptypes found for app", extra={"app": app_config.name}) diff --git a/src/sentry/issues/highlights.py b/src/sentry/issues/highlights.py index c55e9971a9b48..2e05a5aabddee 100644 --- a/src/sentry/issues/highlights.py +++ b/src/sentry/issues/highlights.py @@ -2,12 +2,15 @@ from collections.abc import Mapping from typing import TypedDict +from drf_spectacular.types import OpenApiTypes +from drf_spectacular.utils import extend_schema_field from rest_framework import serializers from sentry.models.project import Project from sentry.utils.platform_categories import BACKEND, FRONTEND, MOBILE +@extend_schema_field(field=OpenApiTypes.OBJECT) class HighlightContextField(serializers.Field): def to_internal_value(self, data): if not isinstance(data, dict): diff --git a/src/sentry/issues/occurrence_consumer.py b/src/sentry/issues/occurrence_consumer.py index 5d10c2eba2440..f2b76bf7cf0a3 100644 --- a/src/sentry/issues/occurrence_consumer.py +++ b/src/sentry/issues/occurrence_consumer.py @@ -351,7 +351,6 @@ def _process_message( with sentry_sdk.start_transaction( op="_process_message", name="issues.occurrence_consumer", - sampled=True, ) as txn: try: # Messages without payload_type default to an OCCURRENCE payload diff --git a/src/sentry/issues/ongoing.py b/src/sentry/issues/ongoing.py index 9e4bad9234342..71318a2b09231 100644 --- a/src/sentry/issues/ongoing.py +++ b/src/sentry/issues/ongoing.py @@ -20,7 +20,7 @@ def bulk_transition_group_to_ongoing( group_ids: list[int], activity_data: Mapping[str, Any] | None = None, ) -> None: - with sentry_sdk.start_span(description="groups_to_transistion") as span: + with sentry_sdk.start_span(name="groups_to_transistion") as span: # make sure we don't update the Group when its already updated by conditionally updating the Group groups_to_transistion = Group.objects.filter( id__in=group_ids, status=from_status, substatus=from_substatus @@ -28,7 +28,7 @@ def bulk_transition_group_to_ongoing( span.set_tag("group_ids", group_ids) span.set_tag("groups_to_transistion count", len(groups_to_transistion)) - with sentry_sdk.start_span(description="update_group_status"): + with sentry_sdk.start_span(name="update_group_status"): Group.objects.update_group_status( groups=groups_to_transistion, status=GroupStatus.UNRESOLVED, @@ -51,10 +51,10 @@ def bulk_transition_group_to_ongoing( sender=bulk_transition_group_to_ongoing, ) - with sentry_sdk.start_span(description="bulk_remove_groups_from_inbox"): + with sentry_sdk.start_span(name="bulk_remove_groups_from_inbox"): bulk_remove_groups_from_inbox(groups_to_transistion) - with sentry_sdk.start_span(description="post_save_send_robust"): + with sentry_sdk.start_span(name="post_save_send_robust"): if not options.get("groups.enable-post-update-signal"): for group in groups_to_transistion: post_save.send_robust( diff --git a/src/sentry/issues/run.py b/src/sentry/issues/run.py index 0eac7116a06b1..057a7023436c3 100644 --- a/src/sentry/issues/run.py +++ b/src/sentry/issues/run.py @@ -48,7 +48,7 @@ def __init__( self.pool = MultiprocessingPool(num_processes) self.worker = None - def crate_parallel_worker( + def create_parallel_worker( self, commit: Commit, ) -> ProcessingStrategy[KafkaPayload]: @@ -63,7 +63,7 @@ def crate_parallel_worker( output_block_size=self.output_block_size, ) - def creat_batched_parallel_worker(self, commit: Commit) -> ProcessingStrategy[KafkaPayload]: + def create_batched_parallel_worker(self, commit: Commit) -> ProcessingStrategy[KafkaPayload]: assert self.worker is not None batch_processor = RunTask( function=functools.partial(process_batch, self.worker), @@ -81,9 +81,9 @@ def create_with_partitions( partitions: Mapping[Partition, int], ) -> ProcessingStrategy[KafkaPayload]: if self.batched: - return self.creat_batched_parallel_worker(commit) + return self.create_batched_parallel_worker(commit) else: - return self.crate_parallel_worker(commit) + return self.create_parallel_worker(commit) def shutdown(self) -> None: if self.pool: diff --git a/src/sentry/lang/native/processing.py b/src/sentry/lang/native/processing.py index 4e6ee42b7989c..70497f056ca24 100644 --- a/src/sentry/lang/native/processing.py +++ b/src/sentry/lang/native/processing.py @@ -5,6 +5,7 @@ from collections.abc import Callable, Mapping from typing import Any +import sentry_sdk from symbolic.debuginfo import normalize_debug_id from symbolic.exceptions import ParseDebugIdError @@ -287,6 +288,14 @@ def process_minidump(symbolicator: Symbolicator, data: Any) -> Any: if _handle_response_status(data, response): _merge_full_response(data, response) + # Emit Apple symbol stats + apple_symbol_stats = response.get("apple_symbol_stats") + if apple_symbol_stats: + try: + emit_apple_symbol_stats(apple_symbol_stats, data) + except Exception as e: + sentry_sdk.capture_exception(e) + return data @@ -302,6 +311,14 @@ def process_applecrashreport(symbolicator: Symbolicator, data: Any) -> Any: if _handle_response_status(data, response): _merge_full_response(data, response) + # Emit Apple symbol stats + apple_symbol_stats = response.get("apple_symbol_stats") + if apple_symbol_stats: + try: + emit_apple_symbol_stats(apple_symbol_stats, data) + except Exception as e: + sentry_sdk.capture_exception(e) + return data @@ -409,6 +426,14 @@ def process_native_stacktraces(symbolicator: Symbolicator, data: Any) -> Any: if not _handle_response_status(data, response): return data + # Emit Apple symbol stats + apple_symbol_stats = response.get("apple_symbol_stats") + if apple_symbol_stats: + try: + emit_apple_symbol_stats(apple_symbol_stats, data) + except Exception as e: + sentry_sdk.capture_exception(e) + assert len(modules) == len(response["modules"]), (modules, response) os = get_os_from_event(data) @@ -455,6 +480,52 @@ def process_native_stacktraces(symbolicator: Symbolicator, data: Any) -> Any: return data +def emit_apple_symbol_stats(apple_symbol_stats, data): + os_name = get_path(data, "contexts", "os", "name") or get_path( + data, "contexts", "os", "raw_description" + ) + os_version = get_path(data, "contexts", "os", "version") + + if os_version: + os_version = os_version.split(".", 1)[0] + + if neither := apple_symbol_stats.get("neither"): + metrics.incr( + "apple_symbol_availability_v2", + amount=neither, + tags={"availability": "neither", "os_name": os_name, "os_version": os_version}, + sample_rate=1.0, + ) + + # TODO: This seems to just be wrong + # We want mutual exclusion here, since we don't want to double count. E.g., an event has both symbols, so we + # count it both in `both` and `old` or `symx` which makes it impossible for us to know the percentage of events + # that matched both. + if both := apple_symbol_stats.get("both"): + metrics.incr( + "apple_symbol_availability_v2", + amount=both, + tags={"availability": "both", "os_name": os_name, "os_version": os_version}, + sample_rate=1.0, + ) + + if old := apple_symbol_stats.get("old"): + metrics.incr( + "apple_symbol_availability_v2", + amount=old, + tags={"availability": "old", "os_name": os_name, "os_version": os_version}, + sample_rate=1.0, + ) + + if symx := apple_symbol_stats.get("symx"): + metrics.incr( + "apple_symbol_availability_v2", + amount=symx, + tags={"availability": "symx", "os_name": os_name, "os_version": os_version}, + sample_rate=1.0, + ) + + def get_native_symbolication_function( data: Mapping[str, Any], stacktraces: list[StacktraceInfo] ) -> Callable[[Symbolicator, Any], Any] | None: diff --git a/src/sentry/lang/native/sources.py b/src/sentry/lang/native/sources.py index 5a2e35971f2fb..51152337173c4 100644 --- a/src/sentry/lang/native/sources.py +++ b/src/sentry/lang/native/sources.py @@ -17,7 +17,7 @@ from sentry import features, options from sentry.auth.system import get_system_token from sentry.models.project import Project -from sentry.utils import metrics, redis, safe +from sentry.utils import redis, safe from sentry.utils.http import get_origins logger = logging.getLogger(__name__) @@ -684,7 +684,7 @@ def _process_response(json): just have their IDs. """ try: - capture_apple_symbol_stats(json) + collect_apple_symbol_stats(json) except Exception as e: sentry_sdk.capture_exception(e) for module in json.get("modules") or (): @@ -705,7 +705,7 @@ def _process_response(json): return (sources, _process_response) -def capture_apple_symbol_stats(json): +def collect_apple_symbol_stats(json): eligible_symbols = 0 neither_has_symbol = 0 both_have_symbol = 0 @@ -748,33 +748,11 @@ def capture_apple_symbol_stats(json): # now, we are only interested in rough numbers. if eligible_symbols: - metrics.incr( - "apple_symbol_availability_v2", - amount=neither_has_symbol, - tags={"availability": "neither"}, - sample_rate=1.0, - ) - - # We want mutual exclusion here, since we don't want to double count. E.g., an event has both symbols, so we - # count it both in `both` and `old` or `symx` which makes it impossible for us to know the percentage of events - # that matched both. - if both_have_symbol: - metrics.incr( - "apple_symbol_availability_v2", - amount=both_have_symbol, - tags={"availability": "both"}, - sample_rate=1.0, - ) - else: - metrics.incr( - "apple_symbol_availability_v2", - amount=old_has_symbol, - tags={"availability": "old"}, - sample_rate=1.0, - ) - metrics.incr( - "apple_symbol_availability_v2", - amount=symx_has_symbol, - tags={"availability": "symx"}, - sample_rate=1.0, - ) + apple_symbol_stats = { + "both": both_have_symbol, + "neither": neither_has_symbol, + "symx": symx_has_symbol, + "old": old_has_symbol, + } + + json["apple_symbol_stats"] = apple_symbol_stats diff --git a/src/sentry/mail/notifications.py b/src/sentry/mail/notifications.py index 208741ea872a7..23df661b3c9ba 100644 --- a/src/sentry/mail/notifications.py +++ b/src/sentry/mail/notifications.py @@ -2,7 +2,7 @@ import logging from collections.abc import Iterable, Mapping, MutableMapping -from typing import TYPE_CHECKING, Any +from typing import TYPE_CHECKING, Any, TypeVar import orjson import sentry_sdk @@ -17,6 +17,7 @@ from sentry.notifications.notify import register_notification_provider from sentry.notifications.types import UnsubscribeContext from sentry.types.actor import Actor +from sentry.users.models.user import User from sentry.utils.email import MessageBuilder, group_id_to_email from sentry.utils.linksign import generate_signed_unsubscribe_link @@ -79,7 +80,7 @@ def _log_message(notification: BaseNotification, recipient: Actor) -> None: def get_context( notification: BaseNotification, - recipient: Actor | Team | RpcUser, + recipient: Actor | Team | RpcUser | User, shared_context: Mapping[str, Any], extra_context: Mapping[str, Any], ) -> Mapping[str, Any]: @@ -113,20 +114,20 @@ def send_notification_as_email( ) -> None: for recipient in recipients: recipient_actor = Actor.from_object(recipient) - with sentry_sdk.start_span(op="notification.send_email", description="one_recipient"): + with sentry_sdk.start_span(op="notification.send_email", name="one_recipient"): if recipient_actor.is_team: # TODO(mgaeta): MessageBuilder only works with Users so filter out Teams for now. continue _log_message(notification, recipient_actor) - with sentry_sdk.start_span(op="notification.send_email", description="build_message"): + with sentry_sdk.start_span(op="notification.send_email", name="build_message"): msg = MessageBuilder( **get_builder_args( notification, recipient_actor, shared_context, extra_context_by_actor ) ) - with sentry_sdk.start_span(op="notification.send_email", description="send_message"): + with sentry_sdk.start_span(op="notification.send_email", name="send_message"): # TODO: find better way of handling this add_users_kwargs = {} if isinstance(notification, ProjectNotification): @@ -136,11 +137,14 @@ def send_notification_as_email( notification.record_notification_sent(recipient_actor, ExternalProviders.EMAIL) +RecipientT = TypeVar("RecipientT", Actor, User) + + def get_builder_args( notification: BaseNotification, - recipient: Actor, + recipient: RecipientT, shared_context: Mapping[str, Any] | None = None, - extra_context_by_actor: Mapping[Actor, Mapping[str, Any]] | None = None, + extra_context_by_actor: Mapping[RecipientT, Mapping[str, Any]] | None = None, ) -> Mapping[str, Any]: # TODO: move context logic to single notification class method extra_context = ( diff --git a/src/sentry/mediators/__init__.py b/src/sentry/mediators/__init__.py index a8ff0caba1639..57874a2ed719f 100644 --- a/src/sentry/mediators/__init__.py +++ b/src/sentry/mediators/__init__.py @@ -1,6 +1,4 @@ from .mediator import Mediator # NOQA from .param import Param # NOQA -from .sentry_app_installations import * # NOQA -from .token_exchange.grant_exchanger import GrantExchanger # noqa: F401 from .token_exchange.refresher import Refresher # noqa: F401 from .token_exchange.util import AUTHORIZATION, REFRESH, GrantTypes # noqa: F401 diff --git a/src/sentry/mediators/alert_rule_actions/__init__.py b/src/sentry/mediators/alert_rule_actions/__init__.py deleted file mode 100644 index 77138e849b32d..0000000000000 --- a/src/sentry/mediators/alert_rule_actions/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .creator import AlertRuleActionCreator - -__all__ = ("AlertRuleActionCreator",) diff --git a/src/sentry/mediators/external_issues/__init__.py b/src/sentry/mediators/external_issues/__init__.py deleted file mode 100644 index 9b781da85a84f..0000000000000 --- a/src/sentry/mediators/external_issues/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .creator import Creator # NOQA -from .issue_link_creator import IssueLinkCreator # NOQA diff --git a/src/sentry/mediators/external_issues/creator.py b/src/sentry/mediators/external_issues/creator.py deleted file mode 100644 index 8ab273bcbc352..0000000000000 --- a/src/sentry/mediators/external_issues/creator.py +++ /dev/null @@ -1,32 +0,0 @@ -from html import escape - -from django.db import router - -from sentry.mediators.mediator import Mediator -from sentry.mediators.param import Param -from sentry.models.group import Group -from sentry.models.platformexternalissue import PlatformExternalIssue -from sentry.sentry_apps.services.app import RpcSentryAppInstallation - - -class Creator(Mediator): - install = Param(RpcSentryAppInstallation) - group = Param(Group) - web_url = Param(str) - project = Param(str) - identifier = Param(str) - using = router.db_for_write(PlatformExternalIssue) - - def call(self): - self._create_external_issue() - return self.external_issue - - def _create_external_issue(self): - display_name = f"{escape(self.project)}#{escape(self.identifier)}" - self.external_issue = PlatformExternalIssue.objects.create( - group_id=self.group.id, - project_id=self.group.project_id, - service_type=self.install.sentry_app.slug, - display_name=display_name, - web_url=self.web_url, - ) diff --git a/src/sentry/mediators/external_issues/issue_link_creator.py b/src/sentry/mediators/external_issues/issue_link_creator.py deleted file mode 100644 index ac23160fc7e81..0000000000000 --- a/src/sentry/mediators/external_issues/issue_link_creator.py +++ /dev/null @@ -1,55 +0,0 @@ -from django.db import router -from django.utils.functional import cached_property - -from sentry.coreapi import APIUnauthorized -from sentry.mediators.external_issues.creator import Creator -from sentry.mediators.external_requests.issue_link_requester import IssueLinkRequester -from sentry.mediators.mediator import Mediator -from sentry.mediators.param import Param -from sentry.models.group import Group -from sentry.models.platformexternalissue import PlatformExternalIssue -from sentry.sentry_apps.services.app import RpcSentryAppInstallation -from sentry.users.services.user import RpcUser - - -class IssueLinkCreator(Mediator): - install = Param(RpcSentryAppInstallation) - group = Param(Group) - action = Param(str) - fields = Param(object) - uri = Param(str) - user = Param(RpcUser) - using = router.db_for_write(PlatformExternalIssue) - - def call(self): - self._verify_action() - self._make_external_request() - self._create_external_issue() - return self.external_issue - - def _verify_action(self): - if self.action not in ["link", "create"]: - raise APIUnauthorized(f"Invalid action '{self.action}'") - - def _make_external_request(self): - self.response = IssueLinkRequester.run( - install=self.install, - uri=self.uri, - group=self.group, - fields=self.fields, - user=self.user, - action=self.action, - ) - - def _create_external_issue(self): - self.external_issue = Creator.run( - install=self.install, - group=self.group, - web_url=self.response["webUrl"], - project=self.response["project"], - identifier=self.response["identifier"], - ) - - @cached_property - def sentry_app(self): - return self.install.sentry_app diff --git a/src/sentry/mediators/external_requests/__init__.py b/src/sentry/mediators/external_requests/__init__.py deleted file mode 100644 index f534bc2263506..0000000000000 --- a/src/sentry/mediators/external_requests/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .alert_rule_action_requester import AlertRuleActionRequester # NOQA -from .issue_link_requester import IssueLinkRequester # NOQA -from .select_requester import SelectRequester # NOQA diff --git a/src/sentry/mediators/sentry_app_installations/__init__.py b/src/sentry/mediators/sentry_app_installations/__init__.py deleted file mode 100644 index 2369e8abea593..0000000000000 --- a/src/sentry/mediators/sentry_app_installations/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .installation_notifier import InstallationNotifier # NOQA -from .updater import Updater # NOQA diff --git a/src/sentry/mediators/sentry_app_installations/installation_notifier.py b/src/sentry/mediators/sentry_app_installations/installation_notifier.py deleted file mode 100644 index bb37880dfcda9..0000000000000 --- a/src/sentry/mediators/sentry_app_installations/installation_notifier.py +++ /dev/null @@ -1,54 +0,0 @@ -from django.db import router -from django.utils.functional import cached_property - -from sentry.api.serializers import AppPlatformEvent, SentryAppInstallationSerializer, serialize -from sentry.coreapi import APIUnauthorized -from sentry.mediators.mediator import Mediator -from sentry.mediators.param import Param -from sentry.models.apigrant import ApiGrant -from sentry.sentry_apps.models.sentry_app import SentryApp -from sentry.sentry_apps.models.sentry_app_installation import SentryAppInstallation -from sentry.users.services.user.model import RpcUser -from sentry.utils.sentry_apps import send_and_save_webhook_request - - -class InstallationNotifier(Mediator): - install = Param(SentryAppInstallation) - user = Param(RpcUser) - action = Param(str) - using = router.db_for_write(SentryAppInstallation) - - def call(self) -> None: - self._verify_action() - self._send_webhook() - - def _verify_action(self) -> None: - if self.action not in ["created", "deleted"]: - raise APIUnauthorized(f"Invalid action '{self.action}'") - - def _send_webhook(self) -> None: - send_and_save_webhook_request(self.sentry_app, self.request) - - @property - def request(self) -> AppPlatformEvent: - data = serialize( - [self.install], - user=self.user, - serializer=SentryAppInstallationSerializer(), - is_webhook=True, - )[0] - return AppPlatformEvent( - resource="installation", - action=self.action, - install=self.install, - data={"installation": data}, - actor=self.user, - ) - - @cached_property - def sentry_app(self) -> SentryApp: - return self.install.sentry_app - - @cached_property - def api_grant(self) -> ApiGrant | None: - return self.install.api_grant_id and self.install.api_grant diff --git a/src/sentry/mediators/sentry_app_installations/updater.py b/src/sentry/mediators/sentry_app_installations/updater.py deleted file mode 100644 index 4a6fac446b8c2..0000000000000 --- a/src/sentry/mediators/sentry_app_installations/updater.py +++ /dev/null @@ -1,32 +0,0 @@ -from django.db import router - -from sentry import analytics -from sentry.constants import SentryAppInstallationStatus -from sentry.mediators.mediator import Mediator -from sentry.mediators.param import Param -from sentry.sentry_apps.models.sentry_app_installation import SentryAppInstallation -from sentry.sentry_apps.services.app import RpcSentryAppInstallation - - -class Updater(Mediator): - sentry_app_installation = Param(RpcSentryAppInstallation) - status = Param(str, required=False) - using = router.db_for_write(SentryAppInstallation) - - def call(self): - self._update_status() - return self.sentry_app_installation - - def _update_status(self): - # convert from string to integer - if self.status == SentryAppInstallationStatus.INSTALLED_STR: - for install in SentryAppInstallation.objects.filter(id=self.sentry_app_installation.id): - install.update(status=SentryAppInstallationStatus.INSTALLED) - - def record_analytics(self): - analytics.record( - "sentry_app_installation.updated", - sentry_app_installation_id=self.sentry_app_installation.id, - sentry_app_id=self.sentry_app_installation.sentry_app.id, - organization_id=self.sentry_app_installation.organization_id, - ) diff --git a/src/sentry/mediators/token_exchange/__init__.py b/src/sentry/mediators/token_exchange/__init__.py index 6ce401b0e890f..84bcc14774369 100644 --- a/src/sentry/mediators/token_exchange/__init__.py +++ b/src/sentry/mediators/token_exchange/__init__.py @@ -1,4 +1,3 @@ -from .grant_exchanger import GrantExchanger # NOQA from .refresher import Refresher # NOQA from .util import AUTHORIZATION, REFRESH, GrantTypes, token_expiration # NOQA from .validator import Validator # NOQA diff --git a/src/sentry/middleware/devtoolbar.py b/src/sentry/middleware/devtoolbar.py new file mode 100644 index 0000000000000..0d1ef60c8e011 --- /dev/null +++ b/src/sentry/middleware/devtoolbar.py @@ -0,0 +1,63 @@ +import logging + +from django.http import HttpRequest, HttpResponse + +from sentry import analytics, options +from sentry.utils.http import origin_from_request +from sentry.utils.http import query_string as get_query_string +from sentry.utils.urls import parse_id_or_slug_param + +logger = logging.getLogger(__name__) + + +class DevToolbarAnalyticsMiddleware: + def __init__(self, get_response): + self.get_response = get_response + + def __call__(self, request): + response = self.get_response(request) + try: + # Note ordering of conditions to reduce option queries. GET contains the query params, regardless of method. + if request.GET.get("queryReferrer") == "devtoolbar" and options.get( + "devtoolbar.analytics.enabled" + ): + _record_api_request(request, response) + except Exception: + logger.exception("devtoolbar: exception while recording api analytics event.") + + return response + + +def _record_api_request(request: HttpRequest, response: HttpResponse) -> None: + resolver_match = request.resolver_match + if resolver_match is None: + raise ValueError(f"Request URL not resolved: {request.path_info}") + + kwargs, route, view_name = ( + resolver_match.kwargs, + resolver_match.route, + resolver_match.view_name, + ) + + org_id_or_slug = kwargs.get("organization_id_or_slug", kwargs.get("organization_slug")) + org_id, org_slug = parse_id_or_slug_param(org_id_or_slug) + project_id_or_slug = kwargs.get("project_id_or_slug") + project_id, project_slug = parse_id_or_slug_param(project_id_or_slug) + + origin = origin_from_request(request) + query_string: str = get_query_string(request) # starts with ? if non-empty + + analytics.record( + "devtoolbar.api_request", + view_name=view_name, + route=route, + query_string=query_string, + origin=origin, + method=request.method, + status_code=response.status_code, + organization_id=org_id or None, + organization_slug=org_slug, + project_id=project_id or None, + project_slug=project_slug, + user_id=request.user.id if hasattr(request, "user") and request.user else None, + ) diff --git a/src/sentry/middleware/integrations/parsers/slack.py b/src/sentry/middleware/integrations/parsers/slack.py index 7c39ef9a34e95..e8c12815cbd9e 100644 --- a/src/sentry/middleware/integrations/parsers/slack.py +++ b/src/sentry/middleware/integrations/parsers/slack.py @@ -11,7 +11,6 @@ from rest_framework.request import Request from slack_sdk.errors import SlackApiError -from sentry import options from sentry.hybridcloud.outbox.category import WebhookProviderIdentifier from sentry.integrations.middleware.hybrid_cloud.parser import ( BaseRequestParser, @@ -186,10 +185,7 @@ def get_async_region_response(self, regions: Sequence[Region]) -> HttpResponseBa # if we are able to send a response to Slack from control itself to beat the 3 second timeout, we should do so try: - if ( - options.get("send-slack-response-from-control-silo") - and self.action_option in CONTROL_RESPONSE_ACTIONS - ): + if self.action_option in CONTROL_RESPONSE_ACTIONS: CONTROL_RESPONSE_ACTIONS[self.action_option](self.request, self.action_option) except ValueError: logger.exception( diff --git a/src/sentry/middleware/locale.py b/src/sentry/middleware/locale.py index a4c62acf16d7a..b9b769431cb2c 100644 --- a/src/sentry/middleware/locale.py +++ b/src/sentry/middleware/locale.py @@ -8,7 +8,7 @@ class SentryLocaleMiddleware(LocaleMiddleware): def process_request(self, request: HttpRequest) -> None: - with sentry_sdk.start_span(op="middleware.locale", description="process_request"): + with sentry_sdk.start_span(op="middleware.locale", name="process_request"): # No locale for static media # This avoids touching user session, which means we avoid # setting `Vary: Cookie` as a response header which will diff --git a/src/sentry/migrations/0001_squashed_0484_break_org_member_user_fk.py b/src/sentry/migrations/0001_squashed_0484_break_org_member_user_fk.py index 76ce0088bbaed..5592f4a25a27a 100644 --- a/src/sentry/migrations/0001_squashed_0484_break_org_member_user_fk.py +++ b/src/sentry/migrations/0001_squashed_0484_break_org_member_user_fk.py @@ -26,12 +26,12 @@ import sentry.db.models.fields.text import sentry.db.models.fields.uuid import sentry.db.models.indexes +import sentry.deletions.models.scheduleddeletion import sentry.models.apiapplication import sentry.models.apigrant import sentry.models.apitoken import sentry.models.broadcast import sentry.models.groupshare -import sentry.models.scheduledeletion import sentry.sentry_apps.models.sentry_app import sentry.sentry_apps.models.sentry_app_installation import sentry.sentry_apps.models.servicehook @@ -2162,7 +2162,7 @@ class Migration(CheckedMigration): ( "guid", models.CharField( - default=sentry.models.scheduledeletion.default_guid, + default=sentry.deletions.models.scheduleddeletion.default_guid, max_length=32, unique=True, ), @@ -2174,7 +2174,7 @@ class Migration(CheckedMigration): ( "date_scheduled", models.DateTimeField( - default=sentry.models.scheduledeletion.default_date_schedule + default=sentry.deletions.models.scheduleddeletion.default_date_schedule ), ), ("actor_id", sentry.db.models.fields.bounded.BoundedBigIntegerField(null=True)), @@ -8993,7 +8993,7 @@ class Migration(CheckedMigration): ( "guid", models.CharField( - default=sentry.models.scheduledeletion.default_guid, + default=sentry.deletions.models.scheduleddeletion.default_guid, max_length=32, unique=True, ), @@ -9005,7 +9005,7 @@ class Migration(CheckedMigration): ( "date_scheduled", models.DateTimeField( - default=sentry.models.scheduledeletion.default_date_schedule + default=sentry.deletions.models.scheduleddeletion.default_date_schedule ), ), ("actor_id", sentry.db.models.fields.bounded.BoundedBigIntegerField(null=True)), diff --git a/src/sentry/migrations/0507_delete_pending_deletion_rules.py b/src/sentry/migrations/0507_delete_pending_deletion_rules.py index 03920d2d91164..3fce0584bc82e 100644 --- a/src/sentry/migrations/0507_delete_pending_deletion_rules.py +++ b/src/sentry/migrations/0507_delete_pending_deletion_rules.py @@ -11,8 +11,8 @@ from sentry.utils.query import RangeQuerySetWrapperWithProgressBar if TYPE_CHECKING: + from sentry.deletions.models.scheduleddeletion import RegionScheduledDeletion from sentry.models.rule import Rule - from sentry.models.scheduledeletion import RegionScheduledDeletion class ObjectStatus: diff --git a/src/sentry/migrations/0515_slugify_invalid_monitors.py b/src/sentry/migrations/0515_slugify_invalid_monitors.py index 693701cf440b8..556d126238907 100644 --- a/src/sentry/migrations/0515_slugify_invalid_monitors.py +++ b/src/sentry/migrations/0515_slugify_invalid_monitors.py @@ -13,8 +13,8 @@ from sentry.utils.query import RangeQuerySetWrapperWithProgressBar if TYPE_CHECKING: + from sentry.deletions.models.scheduleddeletion import BaseScheduledDeletion from sentry.models.rule import Rule - from sentry.models.scheduledeletion import BaseScheduledDeletion from sentry.monitors.models import Monitor diff --git a/src/sentry/migrations/0766_fix_substatus_for_pending_merge.py b/src/sentry/migrations/0766_fix_substatus_for_pending_merge.py new file mode 100644 index 0000000000000..b134194b7683f --- /dev/null +++ b/src/sentry/migrations/0766_fix_substatus_for_pending_merge.py @@ -0,0 +1,53 @@ +# Generated by Django 5.1.1 on 2024-09-24 17:28 + +from django.apps.registry import Apps +from django.db import migrations +from django.db.backends.base.schema import BaseDatabaseSchemaEditor + +from sentry.new_migrations.migrations import CheckedMigration +from sentry.utils.query import RangeQuerySetWrapperWithProgressBarApprox + + +class GroupStatus: + PENDING_MERGE = 5 + + +# End copy + + +def fix_substatus_for_pending_merge(apps: Apps, schema_editor: BaseDatabaseSchemaEditor) -> None: + Group = apps.get_model("sentry", "Group") + + for group in RangeQuerySetWrapperWithProgressBarApprox( + Group.objects.filter(status=GroupStatus.PENDING_MERGE, substatus__isnull=False) + ): + group.substatus = None + group.save(update_fields=["substatus"]) + + +class Migration(CheckedMigration): + # This flag is used to mark that a migration shouldn't be automatically run in production. + # This should only be used for operations where it's safe to run the migration after your + # code has deployed. So this should not be used for most operations that alter the schema + # of a table. + # Here are some things that make sense to mark as post deployment: + # - Large data migrations. Typically we want these to be run manually so that they can be + # monitored and not block the deploy for a long period of time while they run. + # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to + # run this outside deployments so that we don't block them. Note that while adding an index + # is a schema change, it's completely safe to run the operation after the code has deployed. + # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment + + is_post_deployment = True + + dependencies = [ + ("sentry", "0765_add_org_to_api_auth"), + ] + + operations = [ + migrations.RunPython( + fix_substatus_for_pending_merge, + migrations.RunPython.noop, + hints={"tables": ["sentry_groupedmessage", "sentry_grouphistory"]}, + ), + ] diff --git a/src/sentry/migrations/0767_add_selected_aggregate_to_dashboards_widget_query.py b/src/sentry/migrations/0767_add_selected_aggregate_to_dashboards_widget_query.py new file mode 100644 index 0000000000000..84c2064cefac4 --- /dev/null +++ b/src/sentry/migrations/0767_add_selected_aggregate_to_dashboards_widget_query.py @@ -0,0 +1,33 @@ +# Generated by Django 5.1.1 on 2024-09-24 19:39 + +from django.db import migrations, models + +from sentry.new_migrations.migrations import CheckedMigration + + +class Migration(CheckedMigration): + # This flag is used to mark that a migration shouldn't be automatically run in production. + # This should only be used for operations where it's safe to run the migration after your + # code has deployed. So this should not be used for most operations that alter the schema + # of a table. + # Here are some things that make sense to mark as post deployment: + # - Large data migrations. Typically we want these to be run manually so that they can be + # monitored and not block the deploy for a long period of time while they run. + # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to + # run this outside deployments so that we don't block them. Note that while adding an index + # is a schema change, it's completely safe to run the operation after the code has deployed. + # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment + + is_post_deployment = False + + dependencies = [ + ("sentry", "0766_fix_substatus_for_pending_merge"), + ] + + operations = [ + migrations.AddField( + model_name="dashboardwidgetquery", + name="selected_aggregate", + field=models.IntegerField(null=True), + ), + ] diff --git a/src/sentry/migrations/0768_fix_old_group_first_seen_dates.py b/src/sentry/migrations/0768_fix_old_group_first_seen_dates.py new file mode 100644 index 0000000000000..5f2637e9dfd88 --- /dev/null +++ b/src/sentry/migrations/0768_fix_old_group_first_seen_dates.py @@ -0,0 +1,47 @@ +# Generated by Django 5.1.1 on 2024-09-24 20:28 + +from datetime import datetime, timezone + +from django.apps.registry import Apps +from django.db import migrations +from django.db.backends.base.schema import BaseDatabaseSchemaEditor + +from sentry.new_migrations.migrations import CheckedMigration + +OLD_FIRST_SEEN_CUTOFF = datetime(2000, 1, 1, tzinfo=timezone.utc) + + +def update_old_first_seen_dates(apps: Apps, schema_editor: BaseDatabaseSchemaEditor) -> None: + Group = apps.get_model("sentry", "Group") + + for group in Group.objects.filter(first_seen__lt=OLD_FIRST_SEEN_CUTOFF): + group.first_seen = group.active_at + group.save(update_fields=["first_seen"]) + + +class Migration(CheckedMigration): + # This flag is used to mark that a migration shouldn't be automatically run in production. + # This should only be used for operations where it's safe to run the migration after your + # code has deployed. So this should not be used for most operations that alter the schema + # of a table. + # Here are some things that make sense to mark as post deployment: + # - Large data migrations. Typically we want these to be run manually so that they can be + # monitored and not block the deploy for a long period of time while they run. + # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to + # run this outside deployments so that we don't block them. Note that while adding an index + # is a schema change, it's completely safe to run the operation after the code has deployed. + # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment + + is_post_deployment = True + + dependencies = [ + ("sentry", "0767_add_selected_aggregate_to_dashboards_widget_query"), + ] + + operations = [ + migrations.RunPython( + update_old_first_seen_dates, + migrations.RunPython.noop, + hints={"tables": ["sentry_groupedmessage"]}, + ), + ] diff --git a/src/sentry/migrations/0769_add_seer_fields_to_grouphash_metadata.py b/src/sentry/migrations/0769_add_seer_fields_to_grouphash_metadata.py new file mode 100644 index 0000000000000..b623da7f2fa36 --- /dev/null +++ b/src/sentry/migrations/0769_add_seer_fields_to_grouphash_metadata.py @@ -0,0 +1,60 @@ +# Generated by Django 5.1.1 on 2024-09-27 21:29 + +import django.db.models.deletion +from django.db import migrations, models + +import sentry.db.models.fields.foreignkey +from sentry.new_migrations.migrations import CheckedMigration + + +class Migration(CheckedMigration): + # This flag is used to mark that a migration shouldn't be automatically run in production. + # This should only be used for operations where it's safe to run the migration after your + # code has deployed. So this should not be used for most operations that alter the schema + # of a table. + # Here are some things that make sense to mark as post deployment: + # - Large data migrations. Typically we want these to be run manually so that they can be + # monitored and not block the deploy for a long period of time while they run. + # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to + # run this outside deployments so that we don't block them. Note that while adding an index + # is a schema change, it's completely safe to run the operation after the code has deployed. + # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment + + is_post_deployment = False + + dependencies = [ + ("sentry", "0768_fix_old_group_first_seen_dates"), + ] + + operations = [ + migrations.AddField( + model_name="grouphashmetadata", + name="seer_date_sent", + field=models.DateTimeField(null=True), + ), + migrations.AddField( + model_name="grouphashmetadata", + name="seer_event_sent", + field=models.CharField(max_length=32, null=True), + ), + migrations.AddField( + model_name="grouphashmetadata", + name="seer_match_distance", + field=models.FloatField(null=True), + ), + migrations.AddField( + model_name="grouphashmetadata", + name="seer_matched_grouphash", + field=sentry.db.models.fields.foreignkey.FlexibleForeignKey( + null=True, + on_delete=django.db.models.deletion.DO_NOTHING, + related_name="seer_matchees", + to="sentry.grouphash", + ), + ), + migrations.AddField( + model_name="grouphashmetadata", + name="seer_model", + field=models.CharField(null=True), + ), + ] diff --git a/src/sentry/migrations/0770_increase_project_slug_max_length.py b/src/sentry/migrations/0770_increase_project_slug_max_length.py new file mode 100644 index 0000000000000..c131b8b6fe76a --- /dev/null +++ b/src/sentry/migrations/0770_increase_project_slug_max_length.py @@ -0,0 +1,34 @@ +# Generated by Django 5.1.1 on 2024-09-30 19:46 + +from django.db import migrations + +import sentry.db.models.fields.slug +from sentry.new_migrations.migrations import CheckedMigration + + +class Migration(CheckedMigration): + # This flag is used to mark that a migration shouldn't be automatically run in production. + # This should only be used for operations where it's safe to run the migration after your + # code has deployed. So this should not be used for most operations that alter the schema + # of a table. + # Here are some things that make sense to mark as post deployment: + # - Large data migrations. Typically we want these to be run manually so that they can be + # monitored and not block the deploy for a long period of time while they run. + # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to + # run this outside deployments so that we don't block them. Note that while adding an index + # is a schema change, it's completely safe to run the operation after the code has deployed. + # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment + + is_post_deployment = True + + dependencies = [ + ("sentry", "0769_add_seer_fields_to_grouphash_metadata"), + ] + + operations = [ + migrations.AlterField( + model_name="project", + name="slug", + field=sentry.db.models.fields.slug.SentrySlugField(max_length=100, null=True), + ), + ] diff --git a/src/sentry/migrations/0771_add_grouping_config_to_grouphash_metadata.py b/src/sentry/migrations/0771_add_grouping_config_to_grouphash_metadata.py new file mode 100644 index 0000000000000..48bfb3c9b48d4 --- /dev/null +++ b/src/sentry/migrations/0771_add_grouping_config_to_grouphash_metadata.py @@ -0,0 +1,33 @@ +# Generated by Django 5.1.1 on 2024-10-01 02:06 + +from django.db import migrations, models + +from sentry.new_migrations.migrations import CheckedMigration + + +class Migration(CheckedMigration): + # This flag is used to mark that a migration shouldn't be automatically run in production. + # This should only be used for operations where it's safe to run the migration after your + # code has deployed. So this should not be used for most operations that alter the schema + # of a table. + # Here are some things that make sense to mark as post deployment: + # - Large data migrations. Typically we want these to be run manually so that they can be + # monitored and not block the deploy for a long period of time while they run. + # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to + # run this outside deployments so that we don't block them. Note that while adding an index + # is a schema change, it's completely safe to run the operation after the code has deployed. + # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment + + is_post_deployment = False + + dependencies = [ + ("sentry", "0770_increase_project_slug_max_length"), + ] + + operations = [ + migrations.AddField( + model_name="grouphashmetadata", + name="latest_grouping_config", + field=models.CharField(null=True), + ), + ] diff --git a/src/sentry/migrations/0772_backfill_grouphash_metadata_grouping_config.py b/src/sentry/migrations/0772_backfill_grouphash_metadata_grouping_config.py new file mode 100644 index 0000000000000..6e2474e507c8f --- /dev/null +++ b/src/sentry/migrations/0772_backfill_grouphash_metadata_grouping_config.py @@ -0,0 +1,45 @@ +# Generated by Django 5.1.1 on 2024-10-01 00:47 + +from django.apps.registry import Apps +from django.db import migrations +from django.db.backends.base.schema import BaseDatabaseSchemaEditor + +from sentry.new_migrations.migrations import CheckedMigration + +DEFAULT_GROUPING_CONFIG = "newstyle:2023-01-11" + + +def fill_in_missing_grouping_config(apps: Apps, schema_editor: BaseDatabaseSchemaEditor) -> None: + GroupHashMetadata = apps.get_model("sentry", "GroupHashMetadata") + + for gh_metadata in GroupHashMetadata.objects.filter(latest_grouping_config=None): + gh_metadata.latest_grouping_config = DEFAULT_GROUPING_CONFIG + gh_metadata.save(update_fields=["latest_grouping_config"]) + + +class Migration(CheckedMigration): + # This flag is used to mark that a migration shouldn't be automatically run in production. + # This should only be used for operations where it's safe to run the migration after your + # code has deployed. So this should not be used for most operations that alter the schema + # of a table. + # Here are some things that make sense to mark as post deployment: + # - Large data migrations. Typically we want these to be run manually so that they can be + # monitored and not block the deploy for a long period of time while they run. + # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to + # run this outside deployments so that we don't block them. Note that while adding an index + # is a schema change, it's completely safe to run the operation after the code has deployed. + # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment + + is_post_deployment = False + + dependencies = [ + ("sentry", "0771_add_grouping_config_to_grouphash_metadata"), + ] + + operations = [ + migrations.RunPython( + fill_in_missing_grouping_config, + migrations.RunPython.noop, + hints={"tables": ["sentry_groupedhashmetadata"]}, + ), + ] diff --git a/src/sentry/migrations/0773_make_group_score_nullable.py b/src/sentry/migrations/0773_make_group_score_nullable.py new file mode 100644 index 0000000000000..7bad3299992d0 --- /dev/null +++ b/src/sentry/migrations/0773_make_group_score_nullable.py @@ -0,0 +1,34 @@ +# Generated by Django 5.1.1 on 2024-10-08 16:00 + +from django.db import migrations + +import sentry.db.models.fields.bounded +from sentry.new_migrations.migrations import CheckedMigration + + +class Migration(CheckedMigration): + # This flag is used to mark that a migration shouldn't be automatically run in production. + # This should only be used for operations where it's safe to run the migration after your + # code has deployed. So this should not be used for most operations that alter the schema + # of a table. + # Here are some things that make sense to mark as post deployment: + # - Large data migrations. Typically we want these to be run manually so that they can be + # monitored and not block the deploy for a long period of time while they run. + # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to + # run this outside deployments so that we don't block them. Note that while adding an index + # is a schema change, it's completely safe to run the operation after the code has deployed. + # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment + + is_post_deployment = False + + dependencies = [ + ("sentry", "0772_backfill_grouphash_metadata_grouping_config"), + ] + + operations = [ + migrations.AlterField( + model_name="group", + name="score", + field=sentry.db.models.fields.bounded.BoundedIntegerField(default=0, null=True), + ), + ] diff --git a/src/sentry/migrations/0774_drop_group_score_in_state_only.py b/src/sentry/migrations/0774_drop_group_score_in_state_only.py new file mode 100644 index 0000000000000..1a4295bcf78b8 --- /dev/null +++ b/src/sentry/migrations/0774_drop_group_score_in_state_only.py @@ -0,0 +1,34 @@ +# Generated by Django 5.1.1 on 2024-10-09 15:39 + +from django.db import migrations + +from sentry.new_migrations.migrations import CheckedMigration + + +class Migration(CheckedMigration): + # This flag is used to mark that a migration shouldn't be automatically run in production. + # This should only be used for operations where it's safe to run the migration after your + # code has deployed. So this should not be used for most operations that alter the schema + # of a table. + # Here are some things that make sense to mark as post deployment: + # - Large data migrations. Typically we want these to be run manually so that they can be + # monitored and not block the deploy for a long period of time while they run. + # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to + # run this outside deployments so that we don't block them. Note that while adding an index + # is a schema change, it's completely safe to run the operation after the code has deployed. + # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment + + is_post_deployment = False + + dependencies = [ + ("sentry", "0773_make_group_score_nullable"), + ] + + operations = [ + migrations.SeparateDatabaseAndState( + database_operations=[], + state_operations=[ + migrations.RemoveField(model_name="group", name="score"), + ], + ) + ] diff --git a/src/sentry/migrations/0775_add_dashboard_permissions_model.py b/src/sentry/migrations/0775_add_dashboard_permissions_model.py new file mode 100644 index 0000000000000..ac35c3644f555 --- /dev/null +++ b/src/sentry/migrations/0775_add_dashboard_permissions_model.py @@ -0,0 +1,51 @@ +# Generated by Django 5.1.1 on 2024-10-10 18:10 + +import django.db.models.deletion +from django.db import migrations, models + +import sentry.db.models.fields.bounded +from sentry.new_migrations.migrations import CheckedMigration + + +class Migration(CheckedMigration): + # This flag is used to mark that a migration shouldn't be automatically run in production. + # This should only be used for operations where it's safe to run the migration after your + # code has deployed. So this should not be used for most operations that alter the schema + # of a table. + # Here are some things that make sense to mark as post deployment: + # - Large data migrations. Typically we want these to be run manually so that they can be + # monitored and not block the deploy for a long period of time while they run. + # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to + # run this outside deployments so that we don't block them. Note that while adding an index + # is a schema change, it's completely safe to run the operation after the code has deployed. + # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment + + is_post_deployment = False + + dependencies = [ + ("sentry", "0774_drop_group_score_in_state_only"), + ] + + operations = [ + migrations.CreateModel( + name="DashboardPermissions", + fields=[ + ( + "id", + sentry.db.models.fields.bounded.BoundedBigAutoField( + primary_key=True, serialize=False + ), + ), + ("is_creator_only_editable", models.BooleanField(default=False)), + ( + "dashboard", + models.OneToOneField( + on_delete=django.db.models.deletion.CASCADE, to="sentry.dashboard" + ), + ), + ], + options={ + "db_table": "sentry_dashboardpermissions", + }, + ), + ] diff --git a/src/sentry/migrations/0776_drop_group_score_in_database.py b/src/sentry/migrations/0776_drop_group_score_in_database.py new file mode 100644 index 0000000000000..53ed5f75b6f0d --- /dev/null +++ b/src/sentry/migrations/0776_drop_group_score_in_database.py @@ -0,0 +1,42 @@ +# Generated by Django 5.1.1 on 2024-10-10 20:34 + +from django.db import migrations + +from sentry.new_migrations.migrations import CheckedMigration + + +class Migration(CheckedMigration): + # This flag is used to mark that a migration shouldn't be automatically run in production. + # This should only be used for operations where it's safe to run the migration after your + # code has deployed. So this should not be used for most operations that alter the schema + # of a table. + # Here are some things that make sense to mark as post deployment: + # - Large data migrations. Typically we want these to be run manually so that they can be + # monitored and not block the deploy for a long period of time while they run. + # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to + # run this outside deployments so that we don't block them. Note that while adding an index + # is a schema change, it's completely safe to run the operation after the code has deployed. + # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment + + is_post_deployment = True + + dependencies = [ + ("sentry", "0775_add_dashboard_permissions_model"), + ] + + operations = [ + migrations.SeparateDatabaseAndState( + database_operations=[ + migrations.RunSQL( + """ + ALTER TABLE "sentry_groupedmessage" DROP COLUMN "score"; + """, + reverse_sql=""" + ALTER TABLE "sentry_groupedmessage" ADD COLUMN "score" int NULL; + """, + hints={"tables": ["sentry_groupedmessage"]}, + ) + ], + state_operations=[], + ) + ] diff --git a/src/sentry/migrations/0777_add_related_name_to_dashboard_permissions.py b/src/sentry/migrations/0777_add_related_name_to_dashboard_permissions.py new file mode 100644 index 0000000000000..4617ea31099ea --- /dev/null +++ b/src/sentry/migrations/0777_add_related_name_to_dashboard_permissions.py @@ -0,0 +1,38 @@ +# Generated by Django 5.1.1 on 2024-10-15 18:09 + +import django.db.models.deletion +from django.db import migrations, models + +from sentry.new_migrations.migrations import CheckedMigration + + +class Migration(CheckedMigration): + # This flag is used to mark that a migration shouldn't be automatically run in production. + # This should only be used for operations where it's safe to run the migration after your + # code has deployed. So this should not be used for most operations that alter the schema + # of a table. + # Here are some things that make sense to mark as post deployment: + # - Large data migrations. Typically we want these to be run manually so that they can be + # monitored and not block the deploy for a long period of time while they run. + # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to + # run this outside deployments so that we don't block them. Note that while adding an index + # is a schema change, it's completely safe to run the operation after the code has deployed. + # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment + + is_post_deployment = False + + dependencies = [ + ("sentry", "0776_drop_group_score_in_database"), + ] + + operations = [ + migrations.AlterField( + model_name="dashboardpermissions", + name="dashboard", + field=models.OneToOneField( + on_delete=django.db.models.deletion.CASCADE, + related_name="permissions", + to="sentry.dashboard", + ), + ), + ] diff --git a/src/sentry/models/__init__.py b/src/sentry/models/__init__.py index f300d344c9a82..cf171644ad67a 100644 --- a/src/sentry/models/__init__.py +++ b/src/sentry/models/__init__.py @@ -1,3 +1,4 @@ +from ..sentry_apps.models.platformexternalissue import * # NOQA from .activity import * # NOQA from .apiapplication import * # NOQA from .apiauthorization import * # NOQA @@ -19,6 +20,7 @@ from .commitfilechange import CommitFileChange # noqa from .counter import * # NOQA from .dashboard import * # NOQA +from .dashboard_permissions import * # NOQA from .dashboard_widget import * # NOQA from .debugfile import * # NOQA from .deletedentry import * # NOQA @@ -75,7 +77,6 @@ from .organizationslugreservation import * # NOQA from .organizationslugreservationreplica import * # NOQA from .orgauthtoken import * # NOQA -from .platformexternalissue import * # NOQA from .project import * # NOQA from .projectbookmark import * # NOQA from .projectcodeowners import * # NOQA @@ -104,7 +105,6 @@ from .rulefirehistory import RuleFireHistory # NOQA from .rulesnooze import RuleSnooze # NOQA from .savedsearch import * # NOQA -from .scheduledeletion import * # NOQA from .search_common import * # NOQA from .sentryshot import * # NOQA from .sourcemapprocessingissue import * # NOQA diff --git a/src/sentry/models/dashboard.py b/src/sentry/models/dashboard.py index 80c35e5709831..5252a0321d277 100644 --- a/src/sentry/models/dashboard.py +++ b/src/sentry/models/dashboard.py @@ -186,9 +186,11 @@ def get_prebuilt_dashboards(organization, user) -> list[dict[str, Any]]: "queries": [ { "name": "Known Users", - "conditions": "has:user.email" - if has_discover_split - else "has:user.email !event.type:transaction", + "conditions": ( + "has:user.email" + if has_discover_split + else "has:user.email !event.type:transaction" + ), "fields": ["count_unique(user)"], "aggregates": ["count_unique(user)"], "columns": [], @@ -196,9 +198,11 @@ def get_prebuilt_dashboards(organization, user) -> list[dict[str, Any]]: }, { "name": "Anonymous Users", - "conditions": "!has:user.email" - if has_discover_split - else "!has:user.email !event.type:transaction", + "conditions": ( + "!has:user.email" + if has_discover_split + else "!has:user.email !event.type:transaction" + ), "fields": ["count_unique(user)"], "aggregates": ["count_unique(user)"], "columns": [], @@ -238,9 +242,11 @@ def get_prebuilt_dashboards(organization, user) -> list[dict[str, Any]]: "queries": [ { "name": "Error counts", - "conditions": "has:geo.country_code" - if has_discover_split - else "has:geo.country_code !event.type:transaction", + "conditions": ( + "has:geo.country_code" + if has_discover_split + else "has:geo.country_code !event.type:transaction" + ), "fields": ["geo.country_code", "geo.region", "count()"], "aggregates": ["count()"], "columns": ["geo.country_code", "geo.region"], @@ -256,9 +262,11 @@ def get_prebuilt_dashboards(organization, user) -> list[dict[str, Any]]: "queries": [ { "name": "", - "conditions": "has:browser.name" - if has_discover_split - else "has:browser.name !event.type:transaction", + "conditions": ( + "has:browser.name" + if has_discover_split + else "has:browser.name !event.type:transaction" + ), "fields": ["browser.name", "count()"], "aggregates": ["count()"], "columns": ["browser.name"], diff --git a/src/sentry/models/dashboard_permissions.py b/src/sentry/models/dashboard_permissions.py new file mode 100644 index 0000000000000..56d98180b9963 --- /dev/null +++ b/src/sentry/models/dashboard_permissions.py @@ -0,0 +1,27 @@ +from __future__ import annotations + +from django.db import models + +from sentry.backup.scopes import RelocationScope +from sentry.db.models import Model, region_silo_model +from sentry.db.models.base import sane_repr + + +@region_silo_model +class DashboardPermissions(Model): + """ + Edit permissions for a Dashboard. + """ + + __relocation_scope__ = RelocationScope.Organization + + is_creator_only_editable = models.BooleanField(default=False) + dashboard = models.OneToOneField( + "sentry.Dashboard", on_delete=models.CASCADE, related_name="permissions" + ) + + class Meta: + app_label = "sentry" + db_table = "sentry_dashboardpermissions" + + __repr__ = sane_repr("is_creator_only_editable") diff --git a/src/sentry/models/dashboard_widget.py b/src/sentry/models/dashboard_widget.py index bb375e7cfc79a..0d8fe3d21c3ae 100644 --- a/src/sentry/models/dashboard_widget.py +++ b/src/sentry/models/dashboard_widget.py @@ -96,6 +96,14 @@ class DatasetSourcesTypes(Enum): Was an ambiguous dataset forced to split (i.e. we picked a default) """ FORCED = 3 + """ + Dataset inferred by split script, version 1 + """ + SPLIT_VERSION_1 = 4 + """ + Dataset inferred by split script, version 2 + """ + SPLIT_VERSION_2 = 5 @classmethod def as_choices(cls): @@ -161,6 +169,8 @@ class DashboardWidgetQuery(Model): date_modified = models.DateTimeField(default=timezone.now) # Whether this query is hidden from the UI, used by metric widgets is_hidden = models.BooleanField(default=False) + # Used by Big Number to select aggregate displayed + selected_aggregate = models.IntegerField(null=True) class Meta: app_label = "sentry" diff --git a/src/sentry/models/files/abstractfile.py b/src/sentry/models/files/abstractfile.py index 2bca88fc73448..e05ded4949002 100644 --- a/src/sentry/models/files/abstractfile.py +++ b/src/sentry/models/files/abstractfile.py @@ -204,7 +204,7 @@ def read(self, n=-1): # Django doesn't permit models to have parent classes that are Generic # this kludge lets satisfy both mypy and django class _Parent(Generic[BlobIndexType, BlobType]): - ... + pass else: diff --git a/src/sentry/models/files/abstractfileblob.py b/src/sentry/models/files/abstractfileblob.py index 48cd7371faefd..f53a4dbb84dbf 100644 --- a/src/sentry/models/files/abstractfileblob.py +++ b/src/sentry/models/files/abstractfileblob.py @@ -31,7 +31,7 @@ # Django doesn't permit models to have parent classes that are Generic # this kludge lets satisfy both mypy and django class _Parent(Generic[BlobOwnerType]): - ... + pass else: diff --git a/src/sentry/models/group.py b/src/sentry/models/group.py index 1cb427a26837a..0e5ff5f5005f4 100644 --- a/src/sentry/models/group.py +++ b/src/sentry/models/group.py @@ -1,7 +1,6 @@ from __future__ import annotations import logging -import math import re import warnings from collections import defaultdict, namedtuple @@ -397,7 +396,7 @@ def get_groups_by_external_issue( self, integration: RpcIntegration, organizations: Iterable[Organization], - external_issue_key: str, + external_issue_key: str | None, ) -> QuerySet[Group]: from sentry.integrations.models.external_issue import ExternalIssue from sentry.integrations.services.integration import integration_service @@ -570,7 +569,6 @@ class Group(Model): active_at = models.DateTimeField(null=True, db_index=True) time_spent_total = BoundedIntegerField(default=0) time_spent_count = BoundedIntegerField(default=0) - score = BoundedIntegerField(default=0) # deprecated, do not use. GroupShare has superseded is_public = models.BooleanField(default=False, null=True) data: models.Field[dict[str, Any] | None, dict[str, Any]] = GzippedDictField( @@ -624,9 +622,6 @@ def save(self, *args, **kwargs): self.message = truncatechars(self.message.splitlines()[0], 255) if self.times_seen is None: self.times_seen = 1 - self.score = type(self).calculate_score( - times_seen=self.times_seen, last_seen=self.last_seen - ) super().save(*args, **kwargs) def get_absolute_url( @@ -769,9 +764,6 @@ def get_share_id(self): # Otherwise it has not been shared yet. return None - def get_score(self): - return type(self).calculate_score(self.times_seen, self.last_seen) - def get_latest_event(self) -> GroupEvent | None: if not hasattr(self, "_latest_event"): self._latest_event = self.get_latest_event_for_environments() @@ -922,10 +914,6 @@ def count_users_seen(self, referrer=Referrer.TAGSTORE_GET_GROUPS_USER_COUNTS.val referrer=referrer, )[self.id] - @classmethod - def calculate_score(cls, times_seen, last_seen): - return math.log(float(times_seen or 1)) * 600 + float(last_seen.strftime("%s")) - def get_assignee(self) -> Team | RpcUser | None: from sentry.models.groupassignee import GroupAssignee diff --git a/src/sentry/models/groupassignee.py b/src/sentry/models/groupassignee.py index e3c979eb3eb56..eeea257073f4a 100644 --- a/src/sentry/models/groupassignee.py +++ b/src/sentry/models/groupassignee.py @@ -12,6 +12,7 @@ from sentry.db.models import FlexibleForeignKey, Model, region_silo_model, sane_repr from sentry.db.models.fields.hybrid_cloud_foreign_key import HybridCloudForeignKey from sentry.db.models.manager.base import BaseManager +from sentry.integrations.services.assignment_source import AssignmentSource from sentry.models.grouphistory import GroupHistoryStatus, record_group_history from sentry.models.groupowner import GroupOwner from sentry.models.groupsubscription import GroupSubscription @@ -134,8 +135,9 @@ def assign( create_only: bool = False, extra: dict[str, str] | None = None, force_autoassign: bool = False, + assignment_source: AssignmentSource | None = None, ): - from sentry.integrations.utils import sync_group_assignee_outbound + from sentry.integrations.utils.sync import sync_group_assignee_outbound from sentry.models.activity import Activity from sentry.models.groupsubscription import GroupSubscription @@ -187,7 +189,9 @@ def assign( if assignee_type == "user" and features.has( "organizations:integrations-issue-sync", group.organization, actor=acting_user ): - sync_group_assignee_outbound(group, assigned_to.id, assign=True) + sync_group_assignee_outbound( + group, assigned_to.id, assign=True, assignment_source=assignment_source + ) if not created: # aka re-assignment self.remove_old_assignees(group, assignee, assigned_to_id, assignee_type) @@ -200,8 +204,9 @@ def deassign( acting_user: User | RpcUser | None = None, assigned_to: Team | RpcUser | None = None, extra: dict[str, str] | None = None, + assignment_source: AssignmentSource | None = None, ) -> None: - from sentry.integrations.utils import sync_group_assignee_outbound + from sentry.integrations.utils.sync import sync_group_assignee_outbound from sentry.models.activity import Activity from sentry.models.projectownership import ProjectOwnership @@ -230,7 +235,9 @@ def deassign( if features.has( "organizations:integrations-issue-sync", group.organization, actor=acting_user ): - sync_group_assignee_outbound(group, None, assign=False) + sync_group_assignee_outbound( + group, None, assign=False, assignment_source=assignment_source + ) issue_unassigned.send_robust( project=group.project, group=group, user=acting_user, sender=self.__class__ diff --git a/src/sentry/models/grouphashmetadata.py b/src/sentry/models/grouphashmetadata.py index b661e2178c0c2..10bbb9bd2c09d 100644 --- a/src/sentry/models/grouphashmetadata.py +++ b/src/sentry/models/grouphashmetadata.py @@ -4,6 +4,7 @@ from sentry.backup.scopes import RelocationScope from sentry.db.models import Model, region_silo_model from sentry.db.models.base import sane_repr +from sentry.db.models.fields.foreignkey import FlexibleForeignKey @region_silo_model @@ -11,11 +12,33 @@ class GroupHashMetadata(Model): __relocation_scope__ = RelocationScope.Excluded # GENERAL + grouphash = models.OneToOneField( "sentry.GroupHash", related_name="_metadata", on_delete=models.CASCADE ) date_added = models.DateTimeField(default=timezone.now) + # HASHING + + # Most recent config to produce this hash + latest_grouping_config = models.CharField(null=True) + + # SEER + + # When this hash was sent to Seer. This will be different than `date_added` if we send it to + # Seer as part of a backfill rather than during ingest. + seer_date_sent = models.DateTimeField(null=True) + # Id of the event whose stacktrace was sent to Seer + seer_event_sent = models.CharField(max_length=32, null=True) + # The version of the Seer model used to process this hash value + seer_model = models.CharField(null=True) + # The `GroupHash` record representing the match Seer sent back as a match (if any) + seer_matched_grouphash = FlexibleForeignKey( + "sentry.GroupHash", related_name="seer_matchees", on_delete=models.DO_NOTHING, null=True + ) + # The similarity between this hash's stacktrace and the parent (matched) hash's stacktrace + seer_match_distance = models.FloatField(null=True) + class Meta: app_label = "sentry" db_table = "sentry_grouphashmetadata" diff --git a/src/sentry/models/groupinbox.py b/src/sentry/models/groupinbox.py index d4ab7737c13a2..9f91b1218b79a 100644 --- a/src/sentry/models/groupinbox.py +++ b/src/sentry/models/groupinbox.py @@ -111,7 +111,7 @@ def remove_group_from_inbox(group, action=None, user=None, referrer=None): def bulk_remove_groups_from_inbox(groups, action=None, user=None, referrer=None): - with sentry_sdk.start_span(description="bulk_remove_groups_from_inbox"): + with sentry_sdk.start_span(name="bulk_remove_groups_from_inbox"): try: group_inbox = GroupInbox.objects.filter(group__in=groups) group_inbox.delete() diff --git a/src/sentry/models/project.py b/src/sentry/models/project.py index 6a1a43cd28046..8cadf8fad3a5a 100644 --- a/src/sentry/models/project.py +++ b/src/sentry/models/project.py @@ -54,6 +54,7 @@ from sentry.users.models.user import User SENTRY_USE_SNOWFLAKE = getattr(settings, "SENTRY_USE_SNOWFLAKE", False) +PROJECT_SLUG_MAX_LENGTH = 100 # NOTE: # - When you modify this list, ensure that the platform IDs listed in "sentry/static/app/data/platforms.tsx" match. @@ -232,7 +233,7 @@ class Project(Model, PendingDeletionMixin): __relocation_scope__ = RelocationScope.Organization - slug = SentrySlugField(null=True) + slug = SentrySlugField(null=True, max_length=PROJECT_SLUG_MAX_LENGTH) # DEPRECATED do not use, prefer slug name = models.CharField(max_length=200) forced_color = models.CharField(max_length=6, null=True, blank=True) @@ -469,6 +470,7 @@ def get_full_name(self): return self.slug def transfer_to(self, organization): + from sentry.deletions.models.scheduleddeletion import RegionScheduledDeletion from sentry.incidents.models.alert_rule import AlertRule from sentry.integrations.models.external_issue import ExternalIssue from sentry.models.environment import Environment, EnvironmentProject @@ -476,7 +478,6 @@ def transfer_to(self, organization): from sentry.models.releaseprojectenvironment import ReleaseProjectEnvironment from sentry.models.releases.release_project import ReleaseProject from sentry.models.rule import Rule - from sentry.models.scheduledeletion import RegionScheduledDeletion from sentry.monitors.models import Monitor old_org_id = self.organization_id diff --git a/src/sentry/models/projectownership.py b/src/sentry/models/projectownership.py index 6dbb55cd2fef0..456d51ae6c77c 100644 --- a/src/sentry/models/projectownership.py +++ b/src/sentry/models/projectownership.py @@ -5,6 +5,7 @@ from collections.abc import Mapping, Sequence from typing import TYPE_CHECKING, Any +import sentry_sdk from django.db import models from django.db.models.signals import post_delete, post_save from django.utils import timezone @@ -61,7 +62,7 @@ class Meta: __repr__ = sane_repr("project_id", "is_active") @classmethod - def get_cache_key(self, project_id): + def get_cache_key(self, project_id) -> str: return f"projectownership_project_id:1:{project_id}" @classmethod @@ -134,17 +135,21 @@ def get_owners( owners = {o for rule in rules for o in rule.owners} owners_to_actors = resolve_actors(owners, project_id) - ordered_actors = [] + ordered_actors: list[Actor] = [] for rule in rules: for o in rule.owners: - if o in owners and owners_to_actors.get(o) is not None: - ordered_actors.append(owners_to_actors[o]) - owners.remove(o) + if o in owners: + actor = owners_to_actors.get(o) + if actor is not None: + ordered_actors.append(actor) + owners.remove(o) return ordered_actors, rules @classmethod - def _hydrate_rules(cls, project_id, rules, type: str = OwnerRuleType.OWNERSHIP_RULE.value): + def _hydrate_rules( + cls, project_id: int, rules: Sequence[Rule], type: str = OwnerRuleType.OWNERSHIP_RULE.value + ): """ Get the last matching rule to take the most precedence. """ @@ -165,8 +170,10 @@ def _hydrate_rules(cls, project_id, rules, type: str = OwnerRuleType.OWNERSHIP_R return result @classmethod + @metrics.wraps("projectownership.get_issue_owners") + @sentry_sdk.trace def get_issue_owners( - cls, project_id, data, limit=2 + cls, project_id: int, data: Mapping[str, Any], limit: int = 2 ) -> Sequence[tuple[Rule, Sequence[Team | RpcUser], str]]: """ Get the issue owners for a project if there are any. @@ -177,41 +184,44 @@ def get_issue_owners( """ from sentry.models.projectcodeowners import ProjectCodeOwners - with metrics.timer("projectownership.get_autoassign_owners"): - ownership = cls.get_ownership_cached(project_id) - codeowners = ProjectCodeOwners.get_codeowners_cached(project_id) - if not (ownership or codeowners): - return [] - - if not ownership: - ownership = cls(project_id=project_id) + ownership = cls.get_ownership_cached(project_id) + codeowners = ProjectCodeOwners.get_codeowners_cached(project_id) + if not (ownership or codeowners): + return [] - ownership_rules = cls._matching_ownership_rules(ownership, data) - codeowners_rules = cls._matching_ownership_rules(codeowners, data) if codeowners else [] + if not ownership: + ownership = cls(project_id=project_id) - if not (codeowners_rules or ownership_rules): - return [] + # rules_with_owners is ordered by priority, descending, see also: + # https://docs.sentry.io/product/issues/ownership-rules/#evaluation-flow + rules_with_owners = [] + with metrics.timer("projectownership.get_issue_owners_ownership_rules"): + ownership_rules = list(reversed(cls._matching_ownership_rules(ownership, data))) hydrated_ownership_rules = cls._hydrate_rules( project_id, ownership_rules, OwnerRuleType.OWNERSHIP_RULE.value ) - hydrated_codeowners_rules = cls._hydrate_rules( - project_id, codeowners_rules, OwnerRuleType.CODEOWNERS.value - ) + for item in hydrated_ownership_rules: + if item[1]: # actors + rules_with_owners.append(item) + if len(rules_with_owners) == limit: + return rules_with_owners - rules_in_evaluation_order = [ - *hydrated_ownership_rules[::-1], - *hydrated_codeowners_rules[::-1], - ] + if not codeowners: + return rules_with_owners - rules_with_owners = list( - filter( - lambda item: len(item[1]) > 0, - rules_in_evaluation_order, - ) + with metrics.timer("projectownership.get_issue_owners_codeowners_rules"): + codeowners_rules = list(reversed(cls._matching_ownership_rules(codeowners, data))) + hydrated_codeowners_rules = cls._hydrate_rules( + project_id, codeowners_rules, OwnerRuleType.CODEOWNERS.value ) + for item in hydrated_codeowners_rules: + if item[1]: # actors + rules_with_owners.append(item) + if len(rules_with_owners) == limit: + return rules_with_owners - return rules_with_owners[:limit] + return rules_with_owners @classmethod def _get_autoassignment_types(cls, ownership): @@ -236,7 +246,7 @@ def handle_auto_assignment( organization_id: int | None = None, force_autoassign: bool = False, logging_extra: dict[str, str | bool | int] | None = None, - ): + ) -> None: """ Get the auto-assign owner for a project if there are any. We combine the schemas from IssueOwners and CodeOwners. @@ -358,12 +368,10 @@ def _matching_ownership_rules( cls, ownership: ProjectOwnership | ProjectCodeOwners, data: Mapping[str, Any], - ) -> Sequence[Rule]: + ) -> list[Rule]: rules = [] if ownership.schema is not None: - munged_data = None - if options.get("ownership.munge_data_for_performance"): - munged_data = Matcher.munge_if_needed(data) + munged_data = Matcher.munge_if_needed(data) for rule in load_schema(ownership.schema): if rule.test(data, munged_data): rules.append(rule) diff --git a/src/sentry/monitors/constants.py b/src/sentry/monitors/constants.py index 8f06d3589dff4..fb19678754fc4 100644 --- a/src/sentry/monitors/constants.py +++ b/src/sentry/monitors/constants.py @@ -16,9 +16,6 @@ # being marked as missed DEFAULT_CHECKIN_MARGIN = 1 -# Enforced maximum length of the monitor slug -MAX_SLUG_LENGTH = 50 - class PermitCheckInStatus(Enum): ACCEPT = 0 diff --git a/src/sentry/monitors/endpoints/base_monitor_details.py b/src/sentry/monitors/endpoints/base_monitor_details.py index f220997994ad8..8f9a2c366f333 100644 --- a/src/sentry/monitors/endpoints/base_monitor_details.py +++ b/src/sentry/monitors/endpoints/base_monitor_details.py @@ -13,10 +13,10 @@ from sentry.api.helpers.environments import get_environments from sentry.api.serializers import serialize from sentry.constants import ObjectStatus +from sentry.deletions.models.scheduleddeletion import RegionScheduledDeletion from sentry.models.environment import Environment from sentry.models.project import Project from sentry.models.rule import Rule, RuleActivity, RuleActivityType -from sentry.models.scheduledeletion import RegionScheduledDeletion from sentry.monitors.models import ( CheckInStatus, Monitor, diff --git a/src/sentry/monitors/endpoints/base_monitor_environment_details.py b/src/sentry/monitors/endpoints/base_monitor_environment_details.py index b090001a5cbc8..bbdb5364a5a76 100644 --- a/src/sentry/monitors/endpoints/base_monitor_environment_details.py +++ b/src/sentry/monitors/endpoints/base_monitor_environment_details.py @@ -7,7 +7,7 @@ from sentry.api.base import BaseEndpointMixin from sentry.api.serializers import serialize from sentry.constants import ObjectStatus -from sentry.models.scheduledeletion import RegionScheduledDeletion +from sentry.deletions.models.scheduleddeletion import RegionScheduledDeletion from sentry.monitors.models import MonitorEnvironment, MonitorStatus diff --git a/src/sentry/monitors/endpoints/organization_monitor_index.py b/src/sentry/monitors/endpoints/organization_monitor_index.py index b7dc52b928598..585a37cf26d5c 100644 --- a/src/sentry/monitors/endpoints/organization_monitor_index.py +++ b/src/sentry/monitors/endpoints/organization_monitor_index.py @@ -34,6 +34,8 @@ from sentry.models.environment import Environment from sentry.models.organization import Organization from sentry.monitors.models import ( + DEFAULT_STATUS_ORDER, + MONITOR_ENVIRONMENT_ORDERING, Monitor, MonitorEnvironment, MonitorLimitsExceeded, @@ -66,18 +68,6 @@ def map_value_to_constant(constant, value): from rest_framework.request import Request from rest_framework.response import Response -DEFAULT_ORDERING = [ - MonitorStatus.ERROR, - MonitorStatus.OK, - MonitorStatus.ACTIVE, - MonitorStatus.DISABLED, -] - -MONITOR_ENVIRONMENT_ORDERING = Case( - *[When(status=s, then=Value(i)) for i, s in enumerate(DEFAULT_ORDERING)], - output_field=IntegerField(), -) - def flip_sort_direction(sort_field: str) -> str: if sort_field[0] == "-": @@ -163,8 +153,8 @@ def get(self, request: Request, organization: Organization) -> Response: queryset = queryset.annotate( environment_status_ordering=Case( # Sort DISABLED and is_muted monitors to the bottom of the list - When(status=ObjectStatus.DISABLED, then=Value(len(DEFAULT_ORDERING) + 1)), - When(is_muted=True, then=Value(len(DEFAULT_ORDERING))), + When(status=ObjectStatus.DISABLED, then=Value(len(DEFAULT_STATUS_ORDER) + 1)), + When(is_muted=True, then=Value(len(DEFAULT_STATUS_ORDER))), default=Subquery( monitor_environments_query.annotate( status_ordering=MONITOR_ENVIRONMENT_ORDERING diff --git a/src/sentry/monitors/models.py b/src/sentry/monitors/models.py index 0690ee7089152..966e707819308 100644 --- a/src/sentry/monitors/models.py +++ b/src/sentry/monitors/models.py @@ -11,7 +11,7 @@ import jsonschema from django.conf import settings from django.db import models -from django.db.models import Q +from django.db.models import Case, IntegerField, Q, Value, When from django.db.models.signals import post_delete, pre_save from django.dispatch import receiver from django.utils import timezone @@ -31,13 +31,12 @@ sane_repr, ) from sentry.db.models.fields.hybrid_cloud_foreign_key import HybridCloudForeignKey -from sentry.db.models.fields.slug import SentrySlugField +from sentry.db.models.fields.slug import DEFAULT_SLUG_MAX_LENGTH, SentrySlugField from sentry.db.models.manager.base import BaseManager from sentry.db.models.utils import slugify_instance from sentry.locks import locks from sentry.models.environment import Environment from sentry.models.rule import Rule, RuleSource -from sentry.monitors.constants import MAX_SLUG_LENGTH from sentry.monitors.types import CrontabSchedule, IntervalSchedule from sentry.types.actor import Actor from sentry.utils.retries import TimedRetryPolicy @@ -170,6 +169,20 @@ def as_choices(cls): ) +DEFAULT_STATUS_ORDER = [ + MonitorStatus.ERROR, + MonitorStatus.OK, + MonitorStatus.ACTIVE, + MonitorStatus.DISABLED, +] + +MONITOR_ENVIRONMENT_ORDERING = Case( + When(is_muted=True, then=Value(len(DEFAULT_STATUS_ORDER) + 1)), + *[When(status=s, then=Value(i)) for i, s in enumerate(DEFAULT_STATUS_ORDER)], + output_field=IntegerField(), +) + + class MonitorType: # In the future we may have other types of monitors such as health check # monitors. But for now we just have CRON_JOB style monitors. @@ -296,14 +309,12 @@ def save(self, *args, **kwargs): self, self.name, organization_id=self.organization_id, - max_length=MAX_SLUG_LENGTH, + max_length=DEFAULT_SLUG_MAX_LENGTH, ) return super().save(*args, **kwargs) @property def owner_actor(self) -> Actor | None: - if not (self.owner_user_id or self.owner_team_id): - return None return Actor.from_id(user_id=self.owner_user_id, team_id=self.owner_team_id) @property diff --git a/src/sentry/monitors/processing_errors/manager.py b/src/sentry/monitors/processing_errors/manager.py index 0d6e5c08fdad6..af4d1e839710a 100644 --- a/src/sentry/monitors/processing_errors/manager.py +++ b/src/sentry/monitors/processing_errors/manager.py @@ -10,7 +10,7 @@ from redis.client import StrictRedis from rediscluster import RedisCluster -from sentry import analytics, features +from sentry import analytics from sentry.models.organization import Organization from sentry.models.project import Project from sentry.monitors.models import Monitor @@ -180,8 +180,6 @@ def handle_processing_errors(item: CheckinItem, error: ProcessingErrorsException try: project = Project.objects.get_from_cache(id=item.message["project_id"]) organization = Organization.objects.get_from_cache(id=project.organization_id) - if not features.has("organizations:crons-write-user-feedback", organization): - return metrics.incr( "monitors.checkin.handle_processing_error", diff --git a/src/sentry/monitors/serializers.py b/src/sentry/monitors/serializers.py index bb213eb73150a..78d04987dbf19 100644 --- a/src/sentry/monitors/serializers.py +++ b/src/sentry/monitors/serializers.py @@ -10,6 +10,7 @@ from sentry.models.environment import Environment from sentry.models.project import Project from sentry.monitors.models import ( + MONITOR_ENVIRONMENT_ORDERING, Monitor, MonitorCheckIn, MonitorEnvBrokenDetection, @@ -198,7 +199,8 @@ def get_attrs(self, item_list, user, **kwargs): monitor_environments_qs = ( MonitorEnvironment.objects.filter(monitor__in=item_list) - .order_by("-last_checkin") + .annotate(status_ordering=MONITOR_ENVIRONMENT_ORDERING) + .order_by("status_ordering", "-last_checkin", "environment_id") .exclude( status__in=[MonitorStatus.PENDING_DELETION, MonitorStatus.DELETION_IN_PROGRESS] ) diff --git a/src/sentry/monitors/types.py b/src/sentry/monitors/types.py index 7a1832983c490..50f17140da855 100644 --- a/src/sentry/monitors/types.py +++ b/src/sentry/monitors/types.py @@ -8,7 +8,7 @@ from django.utils.text import slugify from sentry_kafka_schemas.schema_types.ingest_monitors_v1 import CheckIn -from sentry.monitors.constants import MAX_SLUG_LENGTH +from sentry.db.models.fields.slug import DEFAULT_SLUG_MAX_LENGTH class CheckinTrace(TypedDict): @@ -70,7 +70,7 @@ class CheckinItem: @cached_property def valid_monitor_slug(self): - return slugify(self.payload["monitor_slug"])[:MAX_SLUG_LENGTH].strip("-") + return slugify(self.payload["monitor_slug"])[:DEFAULT_SLUG_MAX_LENGTH].strip("-") @property def processing_key(self): diff --git a/src/sentry/monitors/validators.py b/src/sentry/monitors/validators.py index 931c497e6cead..417540964f9fb 100644 --- a/src/sentry/monitors/validators.py +++ b/src/sentry/monitors/validators.py @@ -16,7 +16,8 @@ from sentry.api.serializers.rest_framework.project import ProjectField from sentry.constants import ObjectStatus from sentry.db.models import BoundedPositiveIntegerField -from sentry.monitors.constants import MAX_SLUG_LENGTH, MAX_THRESHOLD, MAX_TIMEOUT +from sentry.db.models.fields.slug import DEFAULT_SLUG_MAX_LENGTH +from sentry.monitors.constants import MAX_THRESHOLD, MAX_TIMEOUT from sentry.monitors.models import CheckInStatus, Monitor, MonitorType, ScheduleType from sentry.monitors.schedule import get_next_schedule, get_prev_schedule from sentry.monitors.types import CrontabSchedule @@ -246,7 +247,7 @@ class MonitorValidator(CamelSnakeSerializer): help_text="Name of the monitor. Used for notifications.", ) slug = SentrySerializerSlugField( - max_length=MAX_SLUG_LENGTH, + max_length=DEFAULT_SLUG_MAX_LENGTH, required=False, help_text="Uniquely identifies your monitor within your organization. Changing this slug will require updates to any instrumented check-in calls.", ) diff --git a/src/sentry/nodestore/bigtable/backend.py b/src/sentry/nodestore/bigtable/backend.py index aa7580bd9403f..fc45086f21712 100644 --- a/src/sentry/nodestore/bigtable/backend.py +++ b/src/sentry/nodestore/bigtable/backend.py @@ -63,6 +63,7 @@ def __init__( self.automatic_expiry = automatic_expiry self.skip_deletes = automatic_expiry and "_SENTRY_CLEANUP" in os.environ + @sentry_sdk.tracing.trace def _get_bytes(self, id: str) -> bytes | None: return self.store.get(id) diff --git a/src/sentry/notifications/notifications/base.py b/src/sentry/notifications/notifications/base.py index d7c5d023c93ad..f752f9f5e2a93 100644 --- a/src/sentry/notifications/notifications/base.py +++ b/src/sentry/notifications/notifications/base.py @@ -171,7 +171,7 @@ def record_analytics(self, event_name: str, *args: Any, **kwargs: Any) -> None: analytics.record(event_name, *args, **kwargs) def record_notification_sent(self, recipient: Actor, provider: ExternalProviders) -> None: - with sentry_sdk.start_span(op="notification.send", description="record_notification_sent"): + with sentry_sdk.start_span(op="notification.send", name="record_notification_sent"): # may want to explicitly pass in the parameters for this event self.record_analytics( f"integrations.{provider.name}.notification_sent", @@ -284,14 +284,14 @@ def send(self) -> None: """The default way to send notifications that respects Notification Settings.""" from sentry.notifications.notify import notify - with sentry_sdk.start_span(op="notification.send", description="get_participants"): + with sentry_sdk.start_span(op="notification.send", name="get_participants"): participants_by_provider = self.get_participants() if not participants_by_provider: return context = self.get_context() for provider, recipients in participants_by_provider.items(): - with sentry_sdk.start_span(op="notification.send", description=f"send_for_{provider}"): + with sentry_sdk.start_span(op="notification.send", name=f"send_for_{provider}"): safe_execute(notify, provider, self, recipients, context) diff --git a/src/sentry/onboarding_tasks/backends/organization_onboarding_task.py b/src/sentry/onboarding_tasks/backends/organization_onboarding_task.py index b14f3e33a6415..809e79a1ccf48 100644 --- a/src/sentry/onboarding_tasks/backends/organization_onboarding_task.py +++ b/src/sentry/onboarding_tasks/backends/organization_onboarding_task.py @@ -2,7 +2,9 @@ from django.db.models import Q from django.utils import timezone +from sentry import analytics from sentry.models.options.organization_option import OrganizationOption +from sentry.models.organization import Organization from sentry.models.organizationonboardingtask import ( OnboardingTaskStatus, OrganizationOnboardingTask, @@ -45,5 +47,13 @@ def try_mark_onboarding_complete(self, organization_id): key="onboarding:complete", value={"updated": json.datetime_to_str(timezone.now())}, ) + + organization = Organization.objects.get(id=organization_id) + analytics.record( + "onboarding.complete", + user_id=organization.default_owner_id, + organization_id=organization_id, + referrer="onboarding_tasks", + ) except IntegrityError: pass diff --git a/src/sentry/options/defaults.py b/src/sentry/options/defaults.py index 0df79019e0ee9..18550131c90cd 100644 --- a/src/sentry/options/defaults.py +++ b/src/sentry/options/defaults.py @@ -424,30 +424,32 @@ flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK | FLAG_AUTOMATOR_MODIFIABLE, ) -# Replay Options -# -# Replay storage backend configuration (only applicable if the direct-storage driver is used) +# Flag Options register( - "replay.storage.backend", - default=None, + "flags:options-audit-log-is-enabled", + default=True, flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK | FLAG_AUTOMATOR_MODIFIABLE, + type=Bool, ) register( - "replay.storage.options", - type=Dict, + "flags:options-audit-log-organization-id", default=None, flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK | FLAG_AUTOMATOR_MODIFIABLE, + type=Int, ) -# Replay Analyzer service. + +# Replay Options +# +# Replay storage backend configuration (only applicable if the direct-storage driver is used) register( - "replay.analyzer_service_url", + "replay.storage.backend", default=None, flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK | FLAG_AUTOMATOR_MODIFIABLE, ) register( - "organizations:session-replay-accessibility-issues-enabled", - type=Bool, - default=True, + "replay.storage.options", + type=Dict, + default=None, flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK | FLAG_AUTOMATOR_MODIFIABLE, ) # Globally disables replay-video. @@ -473,6 +475,14 @@ flags=FLAG_ALLOW_EMPTY | FLAG_AUTOMATOR_MODIFIABLE, ) +# Dev Toolbar Options +register( + "devtoolbar.analytics.enabled", + type=Bool, + default=False, + flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK | FLAG_AUTOMATOR_MODIFIABLE, +) + # Extract spans only from a random fraction of transactions. # @@ -502,9 +512,6 @@ register("slack.signing-secret", flags=FLAG_CREDENTIAL | FLAG_PRIORITIZE_DISK) -# Slack Middleware Parser -register("send-slack-response-from-control-silo", default=False, flags=FLAG_AUTOMATOR_MODIFIABLE) - # Codecov Integration register("codecov.client-secret", flags=FLAG_CREDENTIAL | FLAG_PRIORITIZE_DISK) @@ -581,6 +588,14 @@ register("vsts-limited.client-id", flags=FLAG_PRIORITIZE_DISK | FLAG_AUTOMATOR_MODIFIABLE) register("vsts-limited.client-secret", flags=FLAG_CREDENTIAL | FLAG_PRIORITIZE_DISK) +# Azure DevOps Integration Social Login Flow +register( + "vsts.social-auth-migration", + default=False, + type=Bool, + flags=FLAG_MODIFIABLE_BOOL | FLAG_AUTOMATOR_MODIFIABLE, +) + # PagerDuty Integration register("pagerduty.app-id", default="", flags=FLAG_AUTOMATOR_MODIFIABLE) @@ -1785,6 +1800,12 @@ default=3, flags=FLAG_AUTOMATOR_MODIFIABLE, ) +register( + "performance.traces.trace-explorer-skip-recent-seconds", + type=Int, + default=0, + flags=FLAG_AUTOMATOR_MODIFIABLE, +) register( "performance.traces.span_query_minimum_spans", type=Int, @@ -1846,6 +1867,13 @@ default=[], flags=FLAG_ALLOW_EMPTY | FLAG_AUTOMATOR_MODIFIABLE, ) +# Used for the z-score when calculating the margin of error in performance +register( + "performance.extrapolation.confidence.z-score", + type=Float, + default=1.96, + flags=FLAG_ALLOW_EMPTY | FLAG_AUTOMATOR_MODIFIABLE, +) # Used for enabling flags in ST. Should be removed once Flagpole works in all STs. register("performance.use_metrics.enabled", default=False, flags=FLAG_AUTOMATOR_MODIFIABLE) @@ -2390,12 +2418,6 @@ flags=FLAG_AUTOMATOR_MODIFIABLE, ) -register( - "grouping.config_transition.killswitch_enabled", - type=Bool, - default=False, - flags=FLAG_AUTOMATOR_MODIFIABLE, -) # Sample rate for double writing to experimental dsn register( @@ -2696,6 +2718,11 @@ default=10000, flags=FLAG_AUTOMATOR_MODIFIABLE, ) +register( + "celery_split_queue_task_rollout", + default={}, + flags=FLAG_AUTOMATOR_MODIFIABLE, +) register( "grouping.grouphash_metadata.ingestion_writes_enabled", @@ -2715,13 +2742,6 @@ flags=FLAG_AUTOMATOR_MODIFIABLE, ) -register( - "ownership.munge_data_for_performance", - type=Bool, - default=False, - flags=FLAG_AUTOMATOR_MODIFIABLE, -) - # Restrict uptime issue creation for specific host provider identifiers. Items # in this list map to the `host_provider_id` column in the UptimeSubscription # table. @@ -2741,3 +2761,23 @@ default=False, flags=FLAG_AUTOMATOR_MODIFIABLE, ) + +register( + "celery_split_queue_legacy_mode", + default=["post_process_transactions"], + flags=FLAG_AUTOMATOR_MODIFIABLE, +) + +register( + "celery_split_queue_rollout", + default={"post_process_transactions": 1.0}, + flags=FLAG_AUTOMATOR_MODIFIABLE, +) + +# Secret Scanning. Allows to temporarily disable signature verification. +register( + "secret-scanning.github.enable-signature-verification", + type=Bool, + default=True, + flags=FLAG_AUTOMATOR_MODIFIABLE, +) diff --git a/src/sentry/options/manager.py b/src/sentry/options/manager.py index f8d2b98df824b..aeaf12ff78594 100644 --- a/src/sentry/options/manager.py +++ b/src/sentry/options/manager.py @@ -298,11 +298,6 @@ def get(self, key: str, silent=False): if not (opt.flags & FLAG_NOSTORE): result = self.store.get(opt, silent=silent) if result is not None: - # HACK(mattrobenolt): SENTRY_URL_PREFIX must be kept in sync - # when reading values from the database. This should - # be replaced by a signal. - if key == "system.url-prefix": - settings.SENTRY_URL_PREFIX = result return result # Some values we don't want to allow them to be configured through diff --git a/src/sentry/organizations/services/organization/impl.py b/src/sentry/organizations/services/organization/impl.py index 2d6d44cd5f685..136ec177bad66 100644 --- a/src/sentry/organizations/services/organization/impl.py +++ b/src/sentry/organizations/services/organization/impl.py @@ -12,6 +12,7 @@ from sentry.api.serializers import serialize from sentry.backup.dependencies import merge_users_for_model_in_org from sentry.db.postgres.transactions import enforce_constraints +from sentry.deletions.models.scheduleddeletion import RegionScheduledDeletion from sentry.hybridcloud.models.outbox import ControlOutbox, outbox_context from sentry.hybridcloud.outbox.category import OutboxCategory, OutboxScope from sentry.hybridcloud.rpc import OptionValue, logger @@ -37,7 +38,6 @@ from sentry.models.rule import Rule, RuleActivity from sentry.models.rulesnooze import RuleSnooze from sentry.models.savedsearch import SavedSearch -from sentry.models.scheduledeletion import RegionScheduledDeletion from sentry.models.team import Team, TeamStatus from sentry.monitors.models import Monitor from sentry.organizations.services.organization import ( diff --git a/src/sentry/ownership/grammar.py b/src/sentry/ownership/grammar.py index c1fb8d32fff08..d31f3b11f1707 100644 --- a/src/sentry/ownership/grammar.py +++ b/src/sentry/ownership/grammar.py @@ -3,11 +3,12 @@ import re from collections import namedtuple from collections.abc import Callable, Iterable, Mapping, Sequence -from typing import Any, NamedTuple +from typing import TYPE_CHECKING, Any, NamedTuple from parsimonious.exceptions import ParseError from parsimonious.grammar import Grammar -from parsimonious.nodes import Node, NodeVisitor +from parsimonious.nodes import Node +from parsimonious.nodes import NodeVisitor as BaseNodeVisitor from rest_framework.serializers import ValidationError from sentry.eventstore.models import EventSubjectTemplateData @@ -18,10 +19,15 @@ from sentry.utils.codeowners import codeowners_match from sentry.utils.event_frames import find_stack_frames, get_sdk_name, munged_filename_and_frames from sentry.utils.glob import glob_match -from sentry.utils.safe import PathSearchable, get_path +from sentry.utils.safe import get_path __all__ = ("parse_rules", "dump_schema", "load_schema") +if TYPE_CHECKING: + NodeVisitor = BaseNodeVisitor[str] +else: + NodeVisitor = BaseNodeVisitor + VERSION = 1 URL = "url" @@ -90,12 +96,9 @@ def load(cls, data: Mapping[str, Any]) -> Rule: def test( self, data: Mapping[str, Any], - munged_data: tuple[Sequence[Mapping[str, Any]], Sequence[str]] | None, + munged_data: tuple[Sequence[Mapping[str, Any]], Sequence[str]], ) -> bool | Any: - if munged_data: - return self.matcher.test_with_munged(data, munged_data) - else: - return self.matcher.test(data) + return self.matcher.test(data, munged_data) class Matcher(namedtuple("Matcher", "type pattern")): @@ -124,7 +127,9 @@ def load(cls, data: Mapping[str, str]) -> Matcher: return cls(data["type"], data["pattern"]) @staticmethod - def munge_if_needed(data: PathSearchable) -> tuple[Sequence[Mapping[str, Any]], Sequence[str]]: + def munge_if_needed( + data: Mapping[str, Any] + ) -> tuple[Sequence[Mapping[str, Any]], Sequence[str]]: keys = ["filename", "abs_path"] platform = data.get("platform") sdk_name = get_sdk_name(data) @@ -137,13 +142,11 @@ def munge_if_needed(data: PathSearchable) -> tuple[Sequence[Mapping[str, Any]], return frames, keys - def test_with_munged( - self, data: PathSearchable, munged_data: tuple[Sequence[Mapping[str, Any]], Sequence[str]] + def test( + self, + data: Mapping[str, Any], + munged_data: tuple[Sequence[Mapping[str, Any]], Sequence[str]], ) -> bool: - """ - Temporary function to test pre-munging data performance in production. will remove - and combine with test if prod deployment goes well. - """ if self.type == URL: return self.test_url(data) elif self.type == PATH: @@ -164,31 +167,7 @@ def test_with_munged( ) return False - def test(self, data: PathSearchable) -> bool: - if self.type == URL: - return self.test_url(data) - elif self.type == PATH: - return self.test_frames(*self.munge_if_needed(data)) - elif self.type == MODULE: - return self.test_frames(find_stack_frames(data), ["module"]) - elif self.type.startswith("tags."): - return self.test_tag(data) - elif self.type == CODEOWNERS: - return self.test_frames( - *self.munge_if_needed(data), - # Codeowners has a slightly different syntax compared to issue owners - # As such we need to match it using gitignore logic. - # See syntax documentation here: - # https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/creating-a-repository-on-github/about-code-owners - match_frame_value_func=lambda val, pattern: bool(codeowners_match(val, pattern)), - match_frame_func=lambda frame: frame.get("in_app") is not False, - ) - return False - - def test_url(self, data: PathSearchable) -> bool: - if not isinstance(data, Mapping): - return False - + def test_url(self, data: Mapping[str, Any]) -> bool: url = get_path(data, "request", "url") return url and bool(glob_match(url, self.pattern, ignorecase=True)) @@ -201,7 +180,7 @@ def test_frames( ), match_frame_func: Callable[[Mapping[str, Any]], bool] = lambda _: True, ) -> bool: - for frame in (f for f in frames if isinstance(f, Mapping)): + for frame in frames: if not match_frame_func(frame): continue @@ -215,7 +194,7 @@ def test_frames( return False - def test_tag(self, data: PathSearchable) -> bool: + def test_tag(self, data: Mapping[str, Any]) -> bool: tag = self.type[5:] # inspect the event-payload User interface first before checking tags.user @@ -296,7 +275,7 @@ def visit_matcher_tag(self, node: Node, children: Sequence[Any]) -> str: def visit_owners(self, node: Node, children: tuple[Any, Sequence[Owner]]) -> list[Owner]: _, owners = children - return owners + return list(owners) def visit_owner(self, node: Node, children: tuple[Node, bool, str]) -> Owner: _, is_team, pattern = children @@ -320,7 +299,7 @@ def visit_quoted_identifier(self, node: Node, children: Sequence[Any]) -> str: return str(node.text[1:-1].encode("ascii", "backslashreplace").decode("unicode-escape")) def generic_visit(self, node: Node, children: Sequence[Any]) -> list[Node] | Node: - return children or node + return list(children) or node def parse_rules(data: str) -> Any: @@ -462,7 +441,7 @@ def convert_codeowners_syntax( return result -def resolve_actors(owners: Iterable[Owner], project_id: int) -> dict[Owner, Actor]: +def resolve_actors(owners: Iterable[Owner], project_id: int) -> dict[Owner, Actor | None]: """Convert a list of Owner objects into a dictionary of {Owner: Actor} pairs. Actors not identified are returned as None.""" @@ -563,8 +542,9 @@ def create_schema_from_issue_owners( try: rules = parse_rules(issue_owners) except ParseError as e: + rule_name = e.expr.name if e.expr else str(e.expr) raise ValidationError( - {"raw": f"Parse error: {e.expr.name} (line {e.line()}, column {e.column()})"} + {"raw": f"Parse error: {rule_name} (line {e.line()}, column {e.column()})"} ) schema = dump_schema(rules) diff --git a/src/sentry/plugins/__init__.py b/src/sentry/plugins/__init__.py index 1a7d079a76166..0632b0da1e3c0 100644 --- a/src/sentry/plugins/__init__.py +++ b/src/sentry/plugins/__init__.py @@ -6,4 +6,5 @@ "jira", "pagerduty", "opsgenie", + "phabricator", ) diff --git a/src/sentry/profiles/task.py b/src/sentry/profiles/task.py index f888cf2bda68d..0149b9159366d 100644 --- a/src/sentry/profiles/task.py +++ b/src/sentry/profiles/task.py @@ -187,7 +187,10 @@ def process_profile_task( if not project.flags.has_profiles: first_profile_received.send_robust(project=project, sender=Project) try: - _track_duration_outcome(profile=profile, project=project) + if quotas.backend.should_emit_profile_duration_outcome( + organization=organization, profile=profile + ): + _track_duration_outcome(profile=profile, project=project) except Exception as e: sentry_sdk.capture_exception(e) if profile.get("version") != "2": diff --git a/src/sentry/projectoptions/manager.py b/src/sentry/projectoptions/manager.py index 7256eab87904f..af5b3e572722e 100644 --- a/src/sentry/projectoptions/manager.py +++ b/src/sentry/projectoptions/manager.py @@ -15,8 +15,11 @@ def get_default(self, project=None, epoch=None): epoch = 1 else: epoch = project.get_option("sentry:option-epoch") or 1 + # Find where in the ordered epoch list the project's epoch would go idx = bisect.bisect(self._epoch_default_list, epoch) if idx > 0: + # Return the value corresponding to the highest epoch which doesn't exceed the + # project epoch return self.epoch_defaults[self._epoch_default_list[idx - 1]] return self.default diff --git a/src/sentry/projects/services/project/impl.py b/src/sentry/projects/services/project/impl.py index 8ca8c2a44af67..b5d73ae6e2815 100644 --- a/src/sentry/projects/services/project/impl.py +++ b/src/sentry/projects/services/project/impl.py @@ -35,6 +35,14 @@ def get_by_id(self, *, organization_id: int, id: int) -> RpcProject | None: return serialize_project(project) return None + def get_by_slug(self, *, organization_id: int, slug: str) -> RpcProject | None: + project: Project | None = Project.objects.filter( + slug=slug, organization=organization_id + ).first() + if project: + return serialize_project(project) + return None + def get_many_by_organizations( self, *, diff --git a/src/sentry/projects/services/project/service.py b/src/sentry/projects/services/project/service.py index a5d2928d1ca5b..a4626db47b8f6 100644 --- a/src/sentry/projects/services/project/service.py +++ b/src/sentry/projects/services/project/service.py @@ -59,6 +59,11 @@ def delete_option(self, *, project: RpcProject, key: str) -> None: def get_by_id(self, *, organization_id: int, id: int) -> RpcProject | None: pass + @regional_rpc_method(resolve=ByOrganizationId()) + @abstractmethod + def get_by_slug(self, *, organization_id: int, slug: str) -> RpcProject | None: + pass + @regional_rpc_method(resolve=ByOrganizationId()) @abstractmethod def serialize_many( diff --git a/src/sentry/queue/routers.py b/src/sentry/queue/routers.py new file mode 100644 index 0000000000000..b1ad6081ec9f5 --- /dev/null +++ b/src/sentry/queue/routers.py @@ -0,0 +1,140 @@ +import logging +import random +from collections.abc import Iterator, Mapping, Sequence +from itertools import cycle +from typing import Any, NamedTuple + +from django.conf import settings + +from sentry import options +from sentry.celery import app +from sentry.conf.types.celery import SplitQueueSize +from sentry.utils.celery import build_queue_names + +logger = logging.getLogger(__name__) + + +def _get_known_queues() -> set[str]: + return {c_queue.name for c_queue in app.conf.CELERY_QUEUES} + + +def _validate_destinations(destinations: Sequence[str]) -> None: + for dest in destinations: + assert dest in _get_known_queues(), f"Queue {dest} in split queue config is not declared." + + +class TaskRoute(NamedTuple): + default_queue: str + queues: Iterator[str] + + +def _build_destination_names(default_queue: str, queue_size_conf: SplitQueueSize) -> Sequence[str]: + """ + Validates the configurations and builds the list of queues to cycle through. + + If no valid configuration is provided it returns an empty Sequence. + It is up to the callsite to decide what to do with that to properly route + messages. + """ + + known_queues = _get_known_queues() + + assert ( + default_queue in known_queues + ), f"Queue {default_queue} in split queue config is not declared." + + assert queue_size_conf["in_use"] <= queue_size_conf["total"] + if queue_size_conf["in_use"] >= 2: + destinations = build_queue_names(default_queue, queue_size_conf["in_use"]) + _validate_destinations(destinations) + return destinations + else: + logger.error( + "Invalid configuration for queue %s. In use is not greater than 1: %d. Fall back to source", + default_queue, + queue_size_conf["in_use"], + ) + return [] + + +class SplitQueueTaskRouter: + """ + Routes tasks to split queues. + + As for `SplitQueueRouter` this is meant to spread the load of a queue + to a number of split queues. + + The main difference is that this is a router used directly by Celery. + It is configured as the main router via the `CELERY_ROUTES` setting. + Every time a task is scheduled that does not define a queue this router + is used and it maps a task to a queue. + + Split queues can be rolled out individually via options. + """ + + def __init__(self) -> None: + self.__task_routers = {} + for task, dest_config in settings.CELERY_SPLIT_QUEUE_TASK_ROUTES.items(): + default_destination = dest_config["default_queue"] + destinations: Sequence[str] = [] + if "queues_config" in dest_config: + destinations = _build_destination_names( + dest_config["default_queue"], dest_config["queues_config"] + ) + + if not destinations: + destinations = [dest_config["default_queue"]] + + # It is critical to add a TaskRoute even if the configuration is invalid + # or if the setting does not contain queues spec. This is because + # the task, in this case does not define the queue name, so the router + # has to provide the default one. + self.__task_routers[task] = TaskRoute(default_destination, cycle(destinations)) + + def route_for_task(self, task: str, *args: Any, **kwargs: Any) -> Mapping[str, str] | None: + route = self.__task_routers.get(task) + + if route is None: + return None + + rollout_rate = options.get("celery_split_queue_task_rollout").get(task, 0.0) + if random.random() >= rollout_rate: + return {"queue": route.default_queue} + + return {"queue": next(route.queues)} + + +class SplitQueueRouter: + """ + Returns the split queue to use for a Celery queue. + Split queues allow us to spread the load of a queue to multiple ones. + This takes in input a queue name and returns the split. It is supposed + to be used by the code that schedules the task. + Each split queue can be individually rolled out via options. + WARNING: Do not forget to configure your workers to listen to the + queues appropriately before you start routing messages. + """ + + def __init__(self) -> None: + self.__queue_routers = {} + for source, dest_config in settings.CELERY_SPLIT_QUEUE_ROUTES.items(): + destinations = _build_destination_names(source, dest_config) + if destinations: + self.__queue_routers[source] = cycle(destinations) + + def route_for_queue(self, queue: str) -> str: + rollout_rate = options.get("celery_split_queue_rollout").get(queue, 0.0) + if random.random() >= rollout_rate: + return queue + + if queue in set(options.get("celery_split_queue_legacy_mode")): + # Use legacy route + # This router required to define the routing logic inside the + # settings file. + return settings.SENTRY_POST_PROCESS_QUEUE_SPLIT_ROUTER.get(queue, lambda: queue)() + else: + router = self.__queue_routers.get(queue) + if router is not None: + return next(router) + else: + return queue diff --git a/src/sentry/quotas/base.py b/src/sentry/quotas/base.py index 4b00ff1496582..376a69acfa3a6 100644 --- a/src/sentry/quotas/base.py +++ b/src/sentry/quotas/base.py @@ -15,9 +15,11 @@ from sentry.utils.services import Service if TYPE_CHECKING: + from sentry.models.organization import Organization from sentry.models.project import Project from sentry.models.projectkey import ProjectKey from sentry.monitors.models import Monitor + from sentry.profiles.task import Profile @unique @@ -653,3 +655,11 @@ def update_monitor_slug(self, previous_slug: str, new_slug: str, project_id: int """ Updates a monitor seat assignment's slug. """ + + def should_emit_profile_duration_outcome( + self, organization: Organization, profile: Profile + ) -> bool: + """ + Determines if the profile duration outcome should be emitted. + """ + return True diff --git a/src/sentry/receivers/features.py b/src/sentry/receivers/features.py index c4183c0838991..7eb48ad997171 100644 --- a/src/sentry/receivers/features.py +++ b/src/sentry/receivers/features.py @@ -306,6 +306,7 @@ def record_alert_rule_created( alert_rule_ui_component=None, duplicate_rule=None, wizard_v3=None, + query_type=None, **kwargs, ): # NOTE: This intentionally does not fire for the default issue alert rule @@ -334,6 +335,7 @@ def record_alert_rule_created( alert_rule_ui_component=alert_rule_ui_component, duplicate_rule=duplicate_rule, wizard_v3=wizard_v3, + query_type=query_type, ) diff --git a/src/sentry/receivers/outbox/control.py b/src/sentry/receivers/outbox/control.py index 46931fe263dbc..a9b7d9c3f7db4 100644 --- a/src/sentry/receivers/outbox/control.py +++ b/src/sentry/receivers/outbox/control.py @@ -24,7 +24,7 @@ from sentry.receivers.outbox import maybe_process_tombstone from sentry.relocation.services.relocation_export.service import region_relocation_export_service from sentry.sentry_apps.models.sentry_app import SentryApp -from sentry.tasks.sentry_apps import clear_region_cache +from sentry.sentry_apps.tasks.sentry_apps import clear_region_cache logger = logging.getLogger(__name__) diff --git a/src/sentry/receivers/sentry_apps.py b/src/sentry/receivers/sentry_apps.py index 8bb125988d960..5a46a0878eeea 100644 --- a/src/sentry/receivers/sentry_apps.py +++ b/src/sentry/receivers/sentry_apps.py @@ -12,6 +12,7 @@ from sentry.models.team import Team from sentry.sentry_apps.logic import consolidate_events from sentry.sentry_apps.services.app import RpcSentryAppInstallation, app_service +from sentry.sentry_apps.tasks.sentry_apps import build_comment_webhook, workflow_notification from sentry.signals import ( comment_created, comment_deleted, @@ -22,7 +23,6 @@ issue_resolved, issue_unresolved, ) -from sentry.tasks.sentry_apps import build_comment_webhook, workflow_notification from sentry.users.models.user import User from sentry.users.services.user import RpcUser diff --git a/src/sentry/relay/config/__init__.py b/src/sentry/relay/config/__init__.py index ed7a4c6cce0fe..bd7c9a003de89 100644 --- a/src/sentry/relay/config/__init__.py +++ b/src/sentry/relay/config/__init__.py @@ -60,9 +60,6 @@ "organizations:session-replay-video-disabled", "organizations:session-replay", "organizations:standalone-span-ingestion", - "organizations:transaction-name-mark-scrubbed-as-sanitized", - "organizations:transaction-name-normalize", - "organizations:user-feedback-ingest", "projects:discard-transaction", "projects:profiling-ingest-unsampled-profiles", "projects:span-metrics-extraction", diff --git a/src/sentry/relay/config/metric_extraction.py b/src/sentry/relay/config/metric_extraction.py index fa0b987964a8f..168f67824c316 100644 --- a/src/sentry/relay/config/metric_extraction.py +++ b/src/sentry/relay/config/metric_extraction.py @@ -258,7 +258,10 @@ def _get_widget_metric_specs( widget_queries = ( DashboardWidgetQuery.objects.filter( widget__dashboard__organization=project.organization, - widget__widget_type=DashboardWidgetTypes.DISCOVER, + widget__widget_type__in=[ + DashboardWidgetTypes.DISCOVER, + DashboardWidgetTypes.TRANSACTION_LIKE, + ], ) .prefetch_related("dashboardwidgetqueryondemand_set", "widget") .order_by("-widget__dashboard__last_visited", "widget__order") diff --git a/src/sentry/relay/globalconfig.py b/src/sentry/relay/globalconfig.py index d9d1e81c8a004..6e58750ec3bdf 100644 --- a/src/sentry/relay/globalconfig.py +++ b/src/sentry/relay/globalconfig.py @@ -7,7 +7,7 @@ MetricExtractionGroups, global_metric_extraction_groups, ) -from sentry.relay.types import GenericFiltersConfig +from sentry.relay.types import GenericFiltersConfig, RuleCondition from sentry.utils import metrics # List of options to include in the global config. @@ -28,11 +28,21 @@ ] +class SpanOpDefaultRule(TypedDict): + condition: RuleCondition + value: str + + +class SpanOpDefaults(TypedDict): + rules: list[SpanOpDefaultRule] + + class GlobalConfig(TypedDict, total=False): measurements: MeasurementsConfig aiModelCosts: AIModelCosts metricExtraction: MetricExtractionGroups filters: GenericFiltersConfig | None + spanOpDefaults: SpanOpDefaults options: dict[str, Any] @@ -43,6 +53,25 @@ def get_global_generic_filters() -> GenericFiltersConfig: } +def span_op_defaults() -> SpanOpDefaults: + return { + "rules": [ + { + # If span.data[messaging.system] is set, use span.op "message": + "condition": { + "op": "not", + "inner": { + "op": "eq", + "name": "span.data.messaging\\.system", + "value": None, + }, + }, + "value": "message", + } + ] + } + + @metrics.wraps("relay.globalconfig.get") def get_global_config(): """Return the global configuration for Relay.""" @@ -51,6 +80,7 @@ def get_global_config(): "measurements": get_measurements_config(), "aiModelCosts": ai_model_costs_config(), "metricExtraction": global_metric_extraction_groups(), + "spanOpDefaults": span_op_defaults(), } filters = get_global_generic_filters() diff --git a/src/sentry/remote_config/README.md b/src/sentry/remote_config/README.md deleted file mode 100644 index db945aa508a00..0000000000000 --- a/src/sentry/remote_config/README.md +++ /dev/null @@ -1 +0,0 @@ -# Remote Configuration Product diff --git a/src/sentry/remote_config/docs/api.md b/src/sentry/remote_config/docs/api.md deleted file mode 100644 index fc05ee85c44d6..0000000000000 --- a/src/sentry/remote_config/docs/api.md +++ /dev/null @@ -1,157 +0,0 @@ -# Configurations API - -Host: https://sentry.io/api/0 - -**Authors.** - -@cmanallen - -## Configuration [/projects///configuration/] - -### Get Configuration [GET] - -Retrieve the project's configuration. - -**Attributes** - -| Column | Type | Description | -| -------- | -------------- | --------------------------------------------- | -| features | array[Feature] | Custom, user-defined configuration container. | -| options | Option | Sentry SDK options container. | - -**Feature Object** - -| Field | Type | Description | -| ----- | ------ | ---------------------------------- | -| key | string | The name used to lookup a feature. | -| value | any | A JSON value. | - -**Option Object** - -| Field | Type | Description | -| ------------------ | ----- | --------------------------------------------------- | -| sample_rate | float | Error sample rate. A numeric value between 0 and 1. | -| traces_sample_rate | float | Trace sample rate. A numeric value between 0 and 1. | - -**If an existing configuration exists** - -- Response 200 - - ```json - { - "data": { - "features": [ - { - "key": "hello", - "value": "world" - }, - { - "key": "has_access", - "value": true - } - ], - "options": { - "sample_rate": 1.0, - "traces_sample_rate": 0.5 - } - } - } - ``` - -**If no existing configuration exists** - -- Response 404 - -### Set Configuration [POST] - -Set the project's configuration. - -- Request - - ```json - { - "data": { - "features": [ - { - "key": "hello", - "value": "world" - }, - { - "key": "has_access", - "value": true - } - ], - "options": { - "sample_rate": 1.0, - "traces_sample_rate": 0.5 - } - } - } - ``` - -- Response 201 - - ```json - { - "data": { - "features": [ - { - "key": "hello", - "value": "world" - }, - { - "key": "has_access", - "value": true - } - ], - "options": { - "sample_rate": 1.0, - "traces_sample_rate": 0.5 - } - } - } - ``` - -### Delete Configuration [DELETE] - -Delete the project's configuration. - -- Response 204 - -## Configuration Proxy [/remote-config/projects//] - -Temporary configuration proxy resource. - -### Get Configuration [GET] - -Fetch a project's configuration. Responses should be proxied exactly to the SDK. - -- Response 200 - - - Headers - - Cache-Control: public, max-age=3600 - Content-Type: application/json - ETag: a7966bf58e23583c9a5a4059383ff850 - - - Body - - ```json - { - "features": [ - { - "key": "hello", - "value": "world" - }, - { - "key": "has_access", - "value": true - } - ], - "options": { - "sample_rate": 1.0, - "traces_sample_rate": 0.5 - }, - "version": 1 - } - ``` diff --git a/src/sentry/remote_config/docs/protocol.md b/src/sentry/remote_config/docs/protocol.md deleted file mode 100644 index 30885911c3167..0000000000000 --- a/src/sentry/remote_config/docs/protocol.md +++ /dev/null @@ -1,106 +0,0 @@ -# Remote Configuration Protocol - -Host: https://o1300299.ingest.us.sentry.io - -**Authors.** - -@cmanallen - -## Configuration [/api//configuration/] - -### Get Configuration [GET] - -Retrieve a project's configuration. - -**Attributes** - -| Field | Type | Description | -| -------- | -------------- | --------------------------------------------- | -| features | array[Feature] | Custom, user-defined configuration container. | -| options | Option | Sentry SDK options container. | -| version | number | The version of the protocol. | - -**Feature Object** - -| Field | Type | Description | -| ----- | ------ | ---------------------------------- | -| key | string | The name used to lookup a feature. | -| value | any | A JSON value. | - -**Option Object** - -| Field | Type | Description | -| ------------------ | ----- | ------------------ | -| sample_rate | float | Error sample rate. | -| traces_sample_rate | float | Trace sample rate. | - -**Server ETag Matches** - -If the server's ETag matches the request's a 304 (NOT MODIFIED) response is returned. - -- Request - - - Headers - - Accept: application/json - If-None-Match: 8832040536272351350 - -- Response 304 - - - Headers - - Cache-Control: public, max-age=60 - Content-Type: application/json - ETag: 8832040536272351350 - -**Server ETag Does Not Match or If-None-Match Omitted** - -If the server's ETag does not match the request's a 200 response is returned. - -- Request - - - Headers - - Accept: application/json - If-None-Match: ABC - -- Response 200 - - - Headers - - Cache-Control: public, max-age=60 - Content-Type: application/json - ETag: 8832040536272351350 - - - Body - - ```json - { - "features": [ - { - "key": "hello", - "value": "world" - }, - { - "key": "has_access", - "value": true - } - ], - "options": { - "sample_rate": 1.0, - "traces_sample_rate": 0.5 - }, - "version": 1 - } - ``` - -**No Configuration Exists for the Project** - -- Request - - - Headers - - Accept: application/json - If-None-Match: ABC - -- Response 404 diff --git a/src/sentry/remote_config/endpoints.py b/src/sentry/remote_config/endpoints.py deleted file mode 100644 index e2d8fe8c29730..0000000000000 --- a/src/sentry/remote_config/endpoints.py +++ /dev/null @@ -1,152 +0,0 @@ -import hashlib - -from django.contrib.auth.models import AnonymousUser -from rest_framework import serializers -from rest_framework.authentication import BasicAuthentication -from rest_framework.request import Request -from rest_framework.response import Response -from rest_framework.serializers import Serializer - -from sentry import features -from sentry.api.api_owners import ApiOwner -from sentry.api.api_publish_status import ApiPublishStatus -from sentry.api.authentication import AuthenticationSiloLimit -from sentry.api.base import Endpoint, region_silo_endpoint -from sentry.api.bases.project import ProjectEndpoint, ProjectEventPermission -from sentry.api.permissions import RelayPermission -from sentry.models.project import Project -from sentry.remote_config.storage import make_api_backend, make_configuration_backend -from sentry.silo.base import SiloMode -from sentry.utils import json, metrics - - -class OptionsValidator(Serializer): - sample_rate = serializers.FloatField(max_value=1.0, min_value=0, required=True) - traces_sample_rate = serializers.FloatField(max_value=1.0, min_value=0, required=True) - - -class FeatureValidator(Serializer): - key = serializers.CharField(required=True) - value = serializers.JSONField(required=True, allow_null=True) - - -class ConfigurationValidator(Serializer): - id = serializers.UUIDField(read_only=True) - features: serializers.ListSerializer = serializers.ListSerializer( - child=FeatureValidator(), required=True - ) - options = OptionsValidator(required=True) - - -class ConfigurationContainerValidator(Serializer): - data = ConfigurationValidator(required=True) # type: ignore[assignment] - - -@region_silo_endpoint -class ProjectConfigurationEndpoint(ProjectEndpoint): - owner = ApiOwner.REMOTE_CONFIG - permission_classes = (ProjectEventPermission,) - publish_status = { - "GET": ApiPublishStatus.EXPERIMENTAL, - "POST": ApiPublishStatus.EXPERIMENTAL, - "DELETE": ApiPublishStatus.EXPERIMENTAL, - } - - def get(self, request: Request, project: Project) -> Response: - """Get remote configuration from project options.""" - if not features.has( - "organizations:remote-config", project.organization, actor=request.user - ): - return Response("Disabled", status=404) - - remote_config, source = make_api_backend(project).get() - if remote_config is None: - return Response("Not found.", status=404) - - return Response( - {"data": remote_config}, - status=200, - headers={"X-Sentry-Data-Source": source}, - ) - - def post(self, request: Request, project: Project) -> Response: - """Set remote configuration in project options.""" - if not features.has( - "organizations:remote-config", project.organization, actor=request.user - ): - return Response("Disabled", status=404) - - validator = ConfigurationContainerValidator(data=request.data) - if not validator.is_valid(): - return self.respond(validator.errors, status=400) - - result = validator.validated_data["data"] - - make_api_backend(project).set(result) - metrics.incr("remote_config.configuration.write") - return Response({"data": result}, status=201) - - def delete(self, request: Request, project: Project) -> Response: - """Delete remote configuration from project options.""" - if not features.has( - "organizations:remote-config", project.organization, actor=request.user - ): - return Response("Disabled", status=404) - - make_api_backend(project).pop() - metrics.incr("remote_config.configuration.delete") - return Response("", status=204) - - -@AuthenticationSiloLimit(SiloMode.REGION) -class RelayAuthentication(BasicAuthentication): - """Same as default Relay authentication except without body signing.""" - - def authenticate(self, request: Request): - return (AnonymousUser(), None) - - -class RemoteConfigRelayPermission(RelayPermission): - def has_permission(self, request: Request, view: object) -> bool: - # Relay has permission to do everything! Except the only thing we expose is a simple - # read endpoint full of public data... - return True - - -@region_silo_endpoint -class ProjectConfigurationProxyEndpoint(Endpoint): - publish_status = { - "GET": ApiPublishStatus.EXPERIMENTAL, - } - owner = ApiOwner.REMOTE_CONFIG - authentication_classes = (RelayAuthentication,) - permission_classes = (RemoteConfigRelayPermission,) - enforce_rate_limit = False - - def get(self, request: Request, project_id: int) -> Response: - metrics.incr("remote_config.configuration.requested") - - project = Project.objects.select_related("organization").get(pk=project_id) - if not features.has("organizations:remote-config", project.organization, actor=None): - metrics.incr("remote_config.configuration.flag_disabled") - return Response("Disabled", status=404) - - result, source = make_configuration_backend(project).get() - if result is None: - metrics.incr("remote_config.configuration.not_found") - return Response("Not found", status=404) - - result_str = json.dumps(result) - metrics.incr("remote_config.configuration.returned") - metrics.distribution("remote_config.configuration.size", value=len(result_str)) - - # Emulating cache headers just because. - return Response( - result, - status=200, - headers={ - "Cache-Control": "public, max-age=3600", - "ETag": hashlib.sha1(result_str.encode()).hexdigest(), - "X-Sentry-Data-Source": source, - }, - ) diff --git a/src/sentry/remote_config/storage.py b/src/sentry/remote_config/storage.py deleted file mode 100644 index 86a74da327445..0000000000000 --- a/src/sentry/remote_config/storage.py +++ /dev/null @@ -1,162 +0,0 @@ -from io import BytesIO -from typing import TypedDict - -from sentry import options -from sentry.cache import default_cache -from sentry.models.files.utils import get_storage -from sentry.models.project import Project -from sentry.utils import json, metrics - -JSONValue = str | int | float | bool | None | list["JSONValue"] | dict[str, "JSONValue"] - - -class Options(TypedDict): - sample_rate: float - traces_sample_rate: float - - -class Feature(TypedDict): - key: str - value: JSONValue - - -class StorageFormat(TypedDict): - features: list[Feature] - options: Options - version: int - - -class APIFormat(TypedDict): - features: list[Feature] - options: Options - - -class ConfigurationCache: - def __init__(self, key: str) -> None: - self.key = key - - def get(self) -> StorageFormat | None: - cache_result = default_cache.get(self.key) - - if cache_result is None: - metrics.incr("remote_config.configuration.cache_miss") - else: - metrics.incr("remote_config.configuration.cache_hit") - - return cache_result - - def set(self, value: StorageFormat) -> None: - default_cache.set(self.key, value=value, timeout=None) - - def pop(self) -> None: - try: - default_cache.delete(self.key) - except Exception: - pass - - -class ConfigurationStorage: - def __init__(self, key: str) -> None: - self.key = key - - @property - def storage(self): - return get_storage(self._make_storage_config()) - - def get(self) -> StorageFormat | None: - try: - blob = self.storage.open(self.key) - result = blob.read() - blob.close() - except Exception: - return None - - if result is None: - return None - return json.loads(result) - - def set(self, value: StorageFormat) -> None: - self.storage.save(self.key, BytesIO(json.dumps(value).encode())) - - def pop(self) -> None: - try: - self.storage.delete(self.key) - except Exception: - return None - - def _make_storage_config(self) -> dict | None: - backend = options.get("configurations.storage.backend") - if backend: - return { - "backend": backend, - "options": options.get("configurations.storage.options"), - } - else: - return None - - -class ConfigurationBackend: - def __init__(self, project: Project) -> None: - self.project = project - self.key = f"configurations/{self.project.id}/production" - - self.cache = ConfigurationCache(self.key) - self.storage = ConfigurationStorage(self.key) - - def get(self) -> tuple[StorageFormat | None, str]: - cache_result = self.cache.get() - if cache_result is not None: - return (cache_result, "cache") - - storage_result = self.storage.get() - if storage_result: - self.cache.set(storage_result) - - return (storage_result, "store") - - def set(self, value: StorageFormat) -> None: - self.storage.set(value) - self.cache.set(value) - - def pop(self) -> None: - self.cache.pop() - self.storage.pop() - - -class APIBackendDecorator: - def __init__(self, backend: ConfigurationBackend) -> None: - self.driver = backend - - def get(self) -> tuple[APIFormat | None, str]: - result, source = self.driver.get() - return self._deserialize(result), source - - def set(self, value: APIFormat) -> None: - self.driver.set(self._serialize(value)) - - def pop(self) -> None: - self.driver.pop() - - def _deserialize(self, result: StorageFormat | None) -> APIFormat | None: - if result is None: - return None - - return { - "features": result["features"], - "options": result["options"], - } - - def _serialize(self, result: APIFormat) -> StorageFormat: - return { - "features": result["features"], - "options": result["options"], - "version": 1, - } - - -def make_configuration_backend(project: Project): - return ConfigurationBackend(project) - - -def make_api_backend(project: Project): - return APIBackendDecorator(make_configuration_backend(project)) diff --git a/src/sentry/remote_subscriptions/consumers/result_consumer.py b/src/sentry/remote_subscriptions/consumers/result_consumer.py index bffccf80e371e..6e7ddcdf99b6c 100644 --- a/src/sentry/remote_subscriptions/consumers/result_consumer.py +++ b/src/sentry/remote_subscriptions/consumers/result_consumer.py @@ -9,7 +9,7 @@ from arroyo.processing.strategies.abstract import ProcessingStrategy, ProcessingStrategyFactory from arroyo.processing.strategies.commit import CommitOffsets from arroyo.processing.strategies.run_task import RunTask -from arroyo.types import BrokerValue, Commit, FilteredPayload, Message, Partition +from arroyo.types import Commit, FilteredPayload, Message, Partition from sentry.conf.types.kafka_definition import Topic, get_topic_codec from sentry.remote_subscriptions.models import BaseRemoteSubscription @@ -23,30 +23,12 @@ class ResultProcessor(abc.ABC, Generic[T, U]): - def __init__(self): - self.codec = get_topic_codec(self.topic_for_codec) - @property @abc.abstractmethod def subscription_model(self) -> type[U]: pass - @property - @abc.abstractmethod - def topic_for_codec(self) -> Topic: - pass - - def __call__(self, message: Message[KafkaPayload | FilteredPayload]): - assert not isinstance(message.payload, FilteredPayload) - assert isinstance(message.value, BrokerValue) - - try: - result = self.codec.decode(message.payload.value) - except Exception: - logger.exception( - "Failed to decode message payload", - extra={"payload": message.payload.value}, - ) + def __call__(self, result: T): try: # TODO: Handle subscription not existing - we should remove the subscription from # the remote system in that case. @@ -74,18 +56,43 @@ def handle_result(self, subscription: U | None, result: T): class ResultsStrategyFactory(ProcessingStrategyFactory[KafkaPayload], Generic[T, U]): def __init__(self) -> None: self.result_processor = self.result_processor_cls() + self.codec = get_topic_codec(self.topic_for_codec) + + @property + @abc.abstractmethod + def topic_for_codec(self) -> Topic: + pass @property @abc.abstractmethod def result_processor_cls(self) -> type[ResultProcessor[T, U]]: pass + def decode_payload(self, payload: KafkaPayload | FilteredPayload) -> T | None: + assert not isinstance(payload, FilteredPayload) + try: + return self.codec.decode(payload.value) + except Exception: + logger.exception( + "Failed to decode message payload", + extra={"payload": payload.value}, + ) + return None + + def process_single(self, message: Message[KafkaPayload | FilteredPayload]): + result = self.decode_payload(message.payload) + if result is not None: + self.result_processor(result) + + def create_serial_worker(self, commit: Commit) -> ProcessingStrategy[KafkaPayload]: + return RunTask( + function=self.process_single, + next_step=CommitOffsets(commit), + ) + def create_with_partitions( self, commit: Commit, partitions: Mapping[Partition, int], ) -> ProcessingStrategy[KafkaPayload]: - return RunTask( - function=self.result_processor, - next_step=CommitOffsets(commit), - ) + return self.create_serial_worker(commit) diff --git a/src/sentry/replays/blueprints/api.md b/src/sentry/replays/blueprints/api.md index 1e58fa793cca5..3df4cbaa5396c 100644 --- a/src/sentry/replays/blueprints/api.md +++ b/src/sentry/replays/blueprints/api.md @@ -244,98 +244,6 @@ Deletes a replay instance. - Response 204 -## Replay Accessibility Issues [/projects///replays//accessibility-issues] - -This resource does not accept any URI parameters and is not paginated. Responses are ingested whole. - -### Fetch Replay Accessibility Issues [GET] - -- Parameters - - - timestamp (optional, number) - A UNIX timestamp (seconds since epoch) marking the last moment to render a replay for accessibility analysis. - -Retrieve a collection of accessibility issues. - -**Attributes** - -Issue Type: - -| Column | Type | Description | -| --------- | ---------------------- | --------------------------------------------------- | -| elements | array[IssueElement] | Array of elements matching the accessibility issue. | -| help | string | - | -| help_url | string | - | -| id | string | - | -| impact | Optional[enum[string]] | One of: 'minor', 'moderate', 'serious', 'critical' | -| timestamp | number | - | - -IssueElement Type: - -| Column | Type | Description | -| ------------ | ------------------------------ | --------------------------------------------------- | -| alternatives | array[IssueElementAlternative] | Array of solutions which could solve the problem. | -| element | string | Array of elements matching the accessibility issue. | -| target | array[string] | Array of elements matching the accessibility issue. | - -IssueElementAlternative Type: - -| Column | Type | Description | -| ------- | ------ | ------------------------------------- | -| id | string | String ID of the accessibility issue. | -| message | string | Message explaining the problem. | - -- Response 200 - - - Headers - - - X-Hits=1 - - - Body - - ```json - { - "data": [ - [ - { - "elements": [ - { - "alternatives": [ - { - "id": "button-has-visible-text", - "message": "Element does not have inner text that is visible to screen readers" - }, - { - "id": "aria-label", - "message": "aria-label attribute does not exist or is empty" - }, - { - "id": "aria-labelledby", - "message": "aria-labelledby attribute does not exist, references elements that do not exist or references elements that are empty" - }, - { - "id": "non-empty-title", - "message": "Element has no title attribute" - }, - { - "id": "presentational-role", - "message": "Element's default semantics were not overridden with role=\"none\" or role=\"presentation\"" - } - ], - "element": " + + + {% script %} + + {% endscript %} + +{% comment %} +No need to close `body`. If we do then middleware will inject some extra markup +we don't need. Browsers can figure out when it missing and deal with it. +{% endcomment %} diff --git a/src/sentry/templates/sentry/toolbar/login-success.html b/src/sentry/templates/sentry/toolbar/login-success.html index 10ec6ee3ac917..90529a776a2a2 100644 --- a/src/sentry/templates/sentry/toolbar/login-success.html +++ b/src/sentry/templates/sentry/toolbar/login-success.html @@ -1,25 +1,47 @@ -{% load sentry_helpers %} +{# Auth redirect template for Dev Toolbar. Returned after successfully logging in to a requested organization. #} {% load sentry_assets %} - - - - - -
-

You are logged in!

-

If this window stays open, check the console for errors.

-
- {% script %} - - {% endscript %} - + + + Sentry - Login Success + + + +
+

You are logged in!

+

This window will automatically close after 3 seconds. If not then check the console for errors.

+ +
+ + {% script %} + + {% endscript %} + diff --git a/src/sentry/templatetags/sentry_helpers.py b/src/sentry/templatetags/sentry_helpers.py index 4115246663bd9..cb35982177941 100644 --- a/src/sentry/templatetags/sentry_helpers.py +++ b/src/sentry/templatetags/sentry_helpers.py @@ -22,8 +22,7 @@ register = template.Library() -truncatechars = register.filter(stringfilter(truncatechars)) -truncatechars.is_safe = True +truncatechars = register.filter(stringfilter(truncatechars), is_safe=True) @register.filter @@ -254,7 +253,7 @@ def date(dt, arg=None): @register.simple_tag def percent(value, total, format=None): if not (value and total): - result = 0 + result = 0.0 else: result = int(value) / float(total) * 100 diff --git a/src/sentry/templatetags/sentry_plugins.py b/src/sentry/templatetags/sentry_plugins.py deleted file mode 100644 index 1936199fcc9ba..0000000000000 --- a/src/sentry/templatetags/sentry_plugins.py +++ /dev/null @@ -1,43 +0,0 @@ -from django import template - -from sentry.api.serializers.models.plugin import is_plugin_deprecated -from sentry.plugins.base import Annotation, plugins -from sentry.utils.safe import safe_execute - -register = template.Library() - - -@register.filter -def get_actions(group, request): - project = group.project - - action_list = [] - for plugin in plugins.for_project(project, version=1): - results = safe_execute(plugin.actions, request, group, action_list) - - if not results: - continue - - action_list = results - - for plugin in plugins.for_project(project, version=2): - for action in safe_execute(plugin.get_actions, request, group) or (): - action_list.append(action) - - return [(a[0], a[1]) for a in action_list] - - -@register.filter -def get_annotations(group, request=None) -> list[dict[str, str]]: - project = group.project - - annotation_list = [] - for plugin in plugins.for_project(project, version=2): - if is_plugin_deprecated(plugin, project): - continue - for value in safe_execute(plugin.get_annotations, group=group) or (): - annotation = safe_execute(Annotation, **value) - if annotation: - annotation_list.append(annotation) - - return annotation_list diff --git a/src/sentry/testutils/cases.py b/src/sentry/testutils/cases.py index 97c39f023bfad..87d28fd6db66b 100644 --- a/src/sentry/testutils/cases.py +++ b/src/sentry/testutils/cases.py @@ -1452,7 +1452,6 @@ def __wrap_group(self, group): "last_seen", "first_seen", "data", - "score", "project_id", "time_spent_total", "time_spent_count", @@ -1474,7 +1473,6 @@ def __wrap_group(self, group): self.to_snuba_time_format(group.last_seen), self.to_snuba_time_format(group.first_seen), group.data, - group.score, group.project.id, group.time_spent_total, group.time_spent_count, @@ -1553,6 +1551,7 @@ def store_segment( payload["tags"] = tags if transaction_id: payload["event_id"] = transaction_id + payload["segment_id"] = transaction_id[:16] if profile_id: payload["profile_id"] = profile_id if measurements: @@ -1594,6 +1593,8 @@ def store_indexed_span( store_metrics_summary: Mapping[str, Sequence[Mapping[str, Any]]] | None = None, group: str = "00", category: str | None = None, + organization_id: int = 1, + is_eap: bool = False, ): if span_id is None: span_id = self._random_span_id() @@ -1602,7 +1603,7 @@ def store_indexed_span( payload = { "project_id": project_id, - "organization_id": 1, + "organization_id": organization_id, "span_id": span_id, "trace_id": trace_id, "duration_ms": int(duration), @@ -1628,6 +1629,7 @@ def store_indexed_span( } if transaction_id: payload["event_id"] = transaction_id + payload["segment_id"] = transaction_id[:16] if profile_id: payload["profile_id"] = profile_id if store_metrics_summary: @@ -1640,7 +1642,7 @@ def store_indexed_span( # We want to give the caller the possibility to store only a summary since the database does not deduplicate # on the span_id which makes the assumptions of a unique span_id in the database invalid. if not store_only_summary: - self.store_span(payload) + self.store_span(payload, is_eap=is_eap) if "_metrics_summary" in payload: self.store_metrics_summary(payload) @@ -2725,6 +2727,8 @@ def assert_serialized_widget_query(self, data, widget_data_source): assert data["columns"] == widget_data_source.columns if "fieldAliases" in data: assert data["fieldAliases"] == widget_data_source.field_aliases + if "selectedAggregate" in data: + assert data["selectedAggregate"] == widget_data_source.selected_aggregate def get_widgets(self, dashboard_id): return DashboardWidget.objects.filter(dashboard_id=dashboard_id).order_by("order") diff --git a/src/sentry/testutils/factories.py b/src/sentry/testutils/factories.py index cf9b019fcee39..8eefba329bf02 100644 --- a/src/sentry/testutils/factories.py +++ b/src/sentry/testutils/factories.py @@ -75,7 +75,6 @@ from sentry.integrations.models.repository_project_path_config import RepositoryProjectPathConfig from sentry.integrations.types import ExternalProviders from sentry.issues.grouptype import get_group_type_by_type_id -from sentry.mediators.token_exchange.grant_exchanger import GrantExchanger from sentry.models.activity import Activity from sentry.models.apikey import ApiKey from sentry.models.apitoken import ApiToken @@ -114,7 +113,6 @@ from sentry.models.organizationmemberteam import OrganizationMemberTeam from sentry.models.organizationslugreservation import OrganizationSlugReservation from sentry.models.orgauthtoken import OrgAuthToken -from sentry.models.platformexternalissue import PlatformExternalIssue from sentry.models.project import Project from sentry.models.projectbookmark import ProjectBookmark from sentry.models.projectcodeowners import ProjectCodeOwners @@ -136,6 +134,7 @@ SentryAppInstallationTokenCreator, ) from sentry.sentry_apps.logic import SentryAppCreator +from sentry.sentry_apps.models.platformexternalissue import PlatformExternalIssue from sentry.sentry_apps.models.sentry_app import SentryApp from sentry.sentry_apps.models.sentry_app_installation import SentryAppInstallation from sentry.sentry_apps.models.sentry_app_installation_for_provider import ( @@ -144,6 +143,7 @@ from sentry.sentry_apps.models.servicehook import ServiceHook from sentry.sentry_apps.services.app.serial import serialize_sentry_app_installation from sentry.sentry_apps.services.hook import hook_service +from sentry.sentry_apps.token_exchange.grant_exchanger import GrantExchanger from sentry.signals import project_created from sentry.silo.base import SiloMode from sentry.snuba.dataset import Dataset @@ -171,11 +171,17 @@ from sentry.utils import loremipsum from sentry.utils.performance_issues.performance_problem import PerformanceProblem from sentry.workflow_engine.models import ( + Action, + DataCondition, + DataConditionGroup, + DataConditionGroupAction, DataSource, DataSourceDetector, Detector, + DetectorState, + DetectorWorkflow, Workflow, - WorkflowAction, + WorkflowDataConditionGroup, ) from social_auth.models import UserSocialAuth @@ -946,16 +952,22 @@ def store_event( data, project_id: int, assert_no_errors: bool = True, - event_type: EventType = EventType.DEFAULT, + default_event_type: EventType | None = None, sent_at: datetime | None = None, ) -> Event: """ Like `create_event`, but closer to how events are actually ingested. Prefer to use this method over `create_event` """ - if event_type == EventType.ERROR: + + # this creates a basic message event + if default_event_type == EventType.DEFAULT: data.update({"stacktrace": copy.deepcopy(DEFAULT_EVENT_DATA["stacktrace"])}) + # this creates an error event + elif default_event_type == EventType.ERROR: + data.update({"exception": [{"value": "BadError"}]}) + manager = EventManager(data, sent_at=sent_at) manager.normalize() if assert_no_errors: @@ -1202,12 +1214,13 @@ def create_sentry_app_installation( ): assert install.api_grant is not None assert install.sentry_app.application is not None - GrantExchanger.run( + assert install.sentry_app.proxy_user is not None + GrantExchanger( install=rpc_install, code=install.api_grant.code, client_id=install.sentry_app.application.client_id, user=install.sentry_app.proxy_user, - ) + ).run() install = SentryAppInstallation.objects.get(id=install.id) return install @@ -1973,6 +1986,7 @@ def create_uptime_subscription( @staticmethod def create_project_uptime_subscription( project: Project, + env: Environment | None, uptime_subscription: UptimeSubscription, mode: ProjectUptimeSubscriptionMode, name: str, @@ -1990,6 +2004,7 @@ def create_project_uptime_subscription( return ProjectUptimeSubscription.objects.create( uptime_subscription=uptime_subscription, project=project, + environment=env, mode=mode, name=name, owner_team_id=owner_team_id, @@ -2065,13 +2080,34 @@ def create_workflow( @staticmethod @assume_test_silo_mode(SiloMode.REGION) - def create_workflowaction( + def create_data_condition_group( + **kwargs, + ) -> DataConditionGroup: + return DataConditionGroup.objects.create(**kwargs) + + @staticmethod + @assume_test_silo_mode(SiloMode.REGION) + def create_workflow_data_condition_group( workflow: Workflow | None = None, + condition_group: DataConditionGroup | None = None, **kwargs, - ) -> WorkflowAction: + ) -> WorkflowDataConditionGroup: if workflow is None: workflow = Factories.create_workflow() - return WorkflowAction.objects.create(workflow=workflow, **kwargs) + + if not condition_group: + condition_group = Factories.create_data_condition_group() + + return WorkflowDataConditionGroup.objects.create( + workflow=workflow, condition_group=condition_group + ) + + @staticmethod + @assume_test_silo_mode(SiloMode.REGION) + def create_data_condition( + **kwargs, + ) -> DataCondition: + return DataCondition.objects.create(**kwargs) @staticmethod @assume_test_silo_mode(SiloMode.REGION) @@ -2103,9 +2139,24 @@ def create_detector( if name is None: name = petname.generate(2, " ", letters=10).title() return Detector.objects.create( - organization=organization, name=name, owner_user_id=owner_user_id, owner_team=owner_team + organization=organization, + name=name, + owner_user_id=owner_user_id, + owner_team=owner_team, + **kwargs, ) + @staticmethod + @assume_test_silo_mode(SiloMode.REGION) + def create_detector_state( + detector: Detector | None = None, + **kwargs, + ) -> DetectorState: + if detector is None: + detector = Factories.create_detector() + + return DetectorState.objects.create(detector=detector, **kwargs) + @staticmethod @assume_test_silo_mode(SiloMode.REGION) def create_data_source_detector( @@ -2118,3 +2169,36 @@ def create_data_source_detector( if detector is None: detector = Factories.create_detector() return DataSourceDetector.objects.create(data_source=data_source, detector=detector) + + @staticmethod + @assume_test_silo_mode(SiloMode.REGION) + def create_action(**kwargs) -> Action: + return Action.objects.create(**kwargs) + + @staticmethod + @assume_test_silo_mode(SiloMode.REGION) + def create_detector_workflow( + detector: Detector | None = None, + workflow: Workflow | None = None, + **kwargs, + ) -> DetectorWorkflow: + if detector is None: + detector = Factories.create_detector() + if workflow is None: + workflow = Factories.create_workflow() + return DetectorWorkflow.objects.create(detector=detector, workflow=workflow, **kwargs) + + @staticmethod + @assume_test_silo_mode(SiloMode.REGION) + def create_data_condition_group_action( + action: Action | None = None, + condition_group: DataConditionGroup | None = None, + **kwargs, + ) -> DataConditionGroupAction: + if action is None: + action = Factories.create_action() + if condition_group is None: + condition_group = Factories.create_data_condition_group() + return DataConditionGroupAction.objects.create( + action=action, condition_group=condition_group, **kwargs + ) diff --git a/src/sentry/testutils/fixtures.py b/src/sentry/testutils/fixtures.py index 63b1fe891185b..e9226424f2333 100644 --- a/src/sentry/testutils/fixtures.py +++ b/src/sentry/testutils/fixtures.py @@ -14,6 +14,7 @@ from sentry.integrations.models.integration import Integration from sentry.integrations.models.organization_integration import OrganizationIntegration from sentry.models.activity import Activity +from sentry.models.environment import Environment from sentry.models.grouprelease import GroupRelease from sentry.models.organization import Organization from sentry.models.organizationmember import OrganizationMember @@ -43,6 +44,7 @@ from sentry.users.models.identity import Identity, IdentityProvider from sentry.users.models.user import User from sentry.users.services.user import RpcUser +from sentry.workflow_engine.models import DataSource, Detector, Workflow class Fixtures: @@ -633,21 +635,43 @@ def create_dashboard_widget(self, *args, **kwargs): def create_dashboard_widget_query(self, *args, **kwargs): return Factories.create_dashboard_widget_query(*args, **kwargs) - def create_workflow(self, *args, **kwargs): - return Factories.create_workflow(*args, **kwargs) + def create_workflow_action(self, *args, **kwargs) -> Workflow: + return Factories.create_workflow_action(*args, **kwargs) - def create_workflowaction(self, *args, **kwargs): - return Factories.create_workflowaction(*args, **kwargs) + def create_workflow(self, *args, **kwargs) -> Workflow: + return Factories.create_workflow(*args, **kwargs) - def create_data_source(self, *args, **kwargs): + def create_data_source(self, *args, **kwargs) -> DataSource: return Factories.create_data_source(*args, **kwargs) - def create_detector(self, *args, **kwargs): + def create_data_condition(self, *args, **kwargs): + return Factories.create_data_condition(*args, **kwargs) + + def create_detector(self, *args, **kwargs) -> Detector: return Factories.create_detector(*args, **kwargs) + def create_detector_state(self, *args, **kwargs) -> Detector: + return Factories.create_detector_state(*args, **kwargs) + def create_data_source_detector(self, *args, **kwargs): return Factories.create_data_source_detector(*args, **kwargs) + def create_data_condition_group(self, *args, **kwargs): + return Factories.create_data_condition_group(*args, **kwargs) + + def create_data_condition_group_action(self, *args, **kwargs): + return Factories.create_data_condition_group_action(*args, **kwargs) + + def create_detector_workflow(self, *args, **kwargs): + return Factories.create_detector_workflow(*args, **kwargs) + + def create_workflow_data_condition_group(self, *args, **kwargs): + return Factories.create_workflow_data_condition_group(*args, **kwargs) + + # workflow_engine action + def create_action(self, *args, **kwargs): + return Factories.create_action(*args, **kwargs) + def create_uptime_subscription( self, type: str = "test", @@ -667,7 +691,7 @@ def create_uptime_subscription( if date_updated is None: date_updated = timezone.now() if headers is None: - headers = {} + headers = [] return Factories.create_uptime_subscription( type=type, @@ -688,6 +712,7 @@ def create_uptime_subscription( def create_project_uptime_subscription( self, project: Project | None = None, + env: Environment | None = None, uptime_subscription: UptimeSubscription | None = None, mode=ProjectUptimeSubscriptionMode.AUTO_DETECTED_ACTIVE, name="Test Name", @@ -696,11 +721,14 @@ def create_project_uptime_subscription( ) -> ProjectUptimeSubscription: if project is None: project = self.project + if env is None: + env = self.environment if uptime_subscription is None: uptime_subscription = self.create_uptime_subscription() return Factories.create_project_uptime_subscription( project, + env, uptime_subscription, mode, name, diff --git a/src/sentry/testutils/helpers/backups.py b/src/sentry/testutils/helpers/backups.py index 7b6f60ec7d0bb..0f1d38134c234 100644 --- a/src/sentry/testutils/helpers/backups.py +++ b/src/sentry/testutils/helpers/backups.py @@ -65,6 +65,7 @@ from sentry.models.authprovider import AuthProvider from sentry.models.counter import Counter from sentry.models.dashboard import Dashboard, DashboardTombstone +from sentry.models.dashboard_permissions import DashboardPermissions from sentry.models.dashboard_widget import ( DashboardWidget, DashboardWidgetQuery, @@ -111,6 +112,7 @@ from sentry.users.models.userip import UserIP from sentry.users.models.userrole import UserRole, UserRoleUser from sentry.utils import json +from sentry.workflow_engine.models import Action, DataConditionGroup __all__ = [ "export_to_file", @@ -532,8 +534,11 @@ def create_exhaustive_organization( # Dashboard dashboard = Dashboard.objects.create( - title=f"Dashboard 1 for {slug}", created_by_id=owner_id, organization=org + title=f"Dashboard 1 for {slug}", + created_by_id=owner_id, + organization=org, ) + DashboardPermissions.objects.create(is_creator_only_editable=False, dashboard=dashboard) widget = DashboardWidget.objects.create( dashboard=dashboard, order=1, @@ -608,13 +613,57 @@ def create_exhaustive_organization( access_end=timezone.now() + timedelta(days=1), ) + # Setup a test 'Issue Rule' and 'Automation' workflow = self.create_workflow(organization=org) - self.create_workflowaction(workflow=workflow) - self.create_workflow(organization=org) - self.create_data_source_detector( - self.create_data_source(organization=org), - self.create_detector(organization=org), + detector = self.create_detector(organization=org) + self.create_detector_workflow(detector=detector, workflow=workflow) + self.create_detector_state(detector=detector) + + notification_condition_group = self.create_data_condition_group( + logic_type=DataConditionGroup.Type.ANY, + organization=org, + ) + + send_notification_action = self.create_action(type=Action.Type.Notification, data="") + self.create_data_condition_group_action( + action=send_notification_action, + condition_group=notification_condition_group, + ) + + # TODO @saponifi3d: Update comparison to be DetectorState.Critical + self.create_data_condition( + condition="eq", + comparison="critical", + type="WorkflowCondition", + condition_result="True", + condition_group=notification_condition_group, + ) + + self.create_workflow_data_condition_group( + workflow=workflow, condition_group=notification_condition_group + ) + + data_source = self.create_data_source(organization=org) + + self.create_data_source_detector(data_source, detector) + detector_conditions = self.create_data_condition_group( + logic_type=DataConditionGroup.Type.ALL, + organization=org, + ) + + # TODO @saponifi3d: Create or define trigger workflow action type + trigger_workflows_action = self.create_action(type=Action.Type.TriggerWorkflow, data="") + self.create_data_condition_group_action( + action=trigger_workflows_action, condition_group=detector_conditions + ) + self.create_data_condition( + condition="eq", + comparison="critical", + type="DetectorCondition", + condition_result="True", + condition_group=detector_conditions, ) + detector.workflow_condition_group = detector_conditions return org diff --git a/src/sentry/testutils/helpers/datetime.py b/src/sentry/testutils/helpers/datetime.py index c68d1db640b2c..da1af95d8c5c5 100644 --- a/src/sentry/testutils/helpers/datetime.py +++ b/src/sentry/testutils/helpers/datetime.py @@ -8,7 +8,7 @@ __all__ = ["iso_format", "before_now", "timestamp_format"] -def iso_format(date): +def iso_format(date: datetime) -> str: return date.isoformat()[:19] diff --git a/src/sentry/testutils/helpers/features.py b/src/sentry/testutils/helpers/features.py index a7cf343f21c71..13df0462f3479 100644 --- a/src/sentry/testutils/helpers/features.py +++ b/src/sentry/testutils/helpers/features.py @@ -2,7 +2,7 @@ import functools import logging -from collections.abc import Generator, Mapping +from collections.abc import Generator, Mapping, Sequence from contextlib import contextmanager from unittest.mock import patch @@ -24,7 +24,7 @@ @contextmanager -def Feature(names): +def Feature(names: str | Sequence[str] | dict[str, bool]) -> Generator[None]: """ Control whether a feature is enabled. @@ -102,14 +102,18 @@ def features_override(name, *args, **kwargs): logger.info("Flag defaulting to %s: %s", default_value, repr(name)) return default_value - def batch_features_override(_feature_names, projects=None, organization=None, *args, **kwargs): + def batch_features_override( + _feature_names: Sequence[str], projects=None, organization=None, *args, **kwargs + ): feature_results = {name: names[name] for name in _feature_names if name in names} default_feature_names = [name for name in _feature_names if name not in names] - default_feature_results = {} + default_feature_results: dict[str, dict[str, bool | None]] = {} if default_feature_names: - default_feature_results = default_batch_has( + defaults = default_batch_has( default_feature_names, projects=projects, organization=organization, **kwargs ) + if defaults: + default_feature_results.update(defaults) if projects: results = {} @@ -122,13 +126,13 @@ def batch_features_override(_feature_names, projects=None, organization=None, *a return results elif organization: result_key = f"organization:{organization.id}" - results = {**feature_results, **default_feature_results[result_key]} - results = { + results_for_org = {**feature_results, **default_feature_results[result_key]} + results_for_org = { name: resolve_feature_name_value_for_org(organization, val) - for name, val in results.items() + for name, val in results_for_org.items() if name.startswith("organization") } - return {result_key: results} + return {result_key: results_for_org} with patch("sentry.features.has") as features_has: features_has.side_effect = features_override diff --git a/src/sentry/testutils/helpers/on_demand.py b/src/sentry/testutils/helpers/on_demand.py index fa149f633757c..ae131847340d9 100644 --- a/src/sentry/testutils/helpers/on_demand.py +++ b/src/sentry/testutils/helpers/on_demand.py @@ -20,6 +20,7 @@ def create_widget( dashboard: Dashboard | None = None, widget: DashboardWidget | None = None, discover_widget_split: int | None = None, + widget_type: int = DashboardWidgetTypes.DISCOVER, ) -> tuple[DashboardWidgetQuery, DashboardWidget, Dashboard]: columns = columns or [] dashboard = dashboard or Dashboard.objects.create( @@ -31,7 +32,7 @@ def create_widget( widget = widget or DashboardWidget.objects.create( dashboard=dashboard, order=order, - widget_type=DashboardWidgetTypes.DISCOVER, + widget_type=widget_type, display_type=DashboardWidgetDisplayTypes.LINE_CHART, discover_widget_split=discover_widget_split, ) diff --git a/src/sentry/testutils/hybrid_cloud.py b/src/sentry/testutils/hybrid_cloud.py index f07b5937eded1..20105205ecac5 100644 --- a/src/sentry/testutils/hybrid_cloud.py +++ b/src/sentry/testutils/hybrid_cloud.py @@ -14,9 +14,12 @@ from django.db.backends.base.base import BaseDatabaseWrapper from sentry.db.postgres.transactions import in_test_transaction_enforcement +from sentry.deletions.models.scheduleddeletion import ( + BaseScheduledDeletion, + get_regional_scheduled_deletion, +) from sentry.models.organizationmember import OrganizationMember from sentry.models.organizationmembermapping import OrganizationMemberMapping -from sentry.models.scheduledeletion import BaseScheduledDeletion, get_regional_scheduled_deletion from sentry.silo.base import SiloMode from sentry.testutils.silo import assume_test_silo_mode diff --git a/src/sentry/testutils/pytest/fixtures.py b/src/sentry/testutils/pytest/fixtures.py index 150f94da9c54e..fb50304db1b01 100644 --- a/src/sentry/testutils/pytest/fixtures.py +++ b/src/sentry/testutils/pytest/fixtures.py @@ -9,8 +9,10 @@ import os import re import sys +from collections.abc import Callable, Generator from concurrent.futures import ThreadPoolExecutor from string import Template +from typing import Any, Protocol import pytest import requests @@ -193,14 +195,29 @@ def read_snapshot_file(reference_file: str) -> tuple[str, str]: return (header, refval) +InequalityComparator = Callable[[str, str], bool | str] +default_comparator = lambda refval, output: refval != output + + +class InstaSnapshotter(Protocol): + def __call__( + self, + output: str | Any, + reference_file: str | None = None, + subname: str | None = None, + inequality_comparator: InequalityComparator = default_comparator, + ) -> None: + ... + + @pytest.fixture -def insta_snapshot(request, log): +def insta_snapshot(request: pytest.FixtureRequest) -> Generator[InstaSnapshotter]: def inner( - output, - reference_file=None, - subname=None, - inequality_comparator=lambda refval, output: refval != output, - ): + output: str | Any, + reference_file: str | None = None, + subname: str | None = None, + inequality_comparator: InequalityComparator = default_comparator, + ) -> None: from sentry.testutils.silo import strip_silo_mode_test_suffix if reference_file is None: diff --git a/tests/sentry/remote_config/__init__.py b/src/sentry/toolbar/__init__.py similarity index 100% rename from tests/sentry/remote_config/__init__.py rename to src/sentry/toolbar/__init__.py diff --git a/src/sentry/toolbar/iframe_view.py b/src/sentry/toolbar/iframe_view.py deleted file mode 100644 index c76a93cccb195..0000000000000 --- a/src/sentry/toolbar/iframe_view.py +++ /dev/null @@ -1,36 +0,0 @@ -from typing import Any - -from django.http import HttpRequest, HttpResponse - -from sentry.web.frontend.base import OrganizationView, region_silo_view - - -@region_silo_view -class IframeView(OrganizationView): - # TODO: For perms check. This is just an example and we might not need it. - # required_scope = "org:read,org:integrations" - - security_headers = {"X-Frame-Options": "ALLOWALL"} - - def respond(self, template: str, context: dict[str, Any] | None = None, status: int = 200): - response = super().respond(template, context=context, status=status) - for header, val in IframeView.security_headers.items(): - response[header] = val - return response - - def handle_auth_required(self, request: HttpRequest, *args, **kwargs): - # Override redirects to login - if request.method == "GET": - self.default_context = {} - return self.respond("sentry/toolbar/iframe.html", status=401) - return HttpResponse(status=401) - - def handle_permission_required(self, request: HttpRequest, *args, **kwargs): - # Override redirects to login - if request.method == "GET": - self.default_context = {} - return self.respond("sentry/toolbar/iframe.html", status=403) - return HttpResponse(status=403) - - def get(self, request: HttpRequest, organization, project_id_or_slug): - return self.respond("sentry/toolbar/iframe.html", status=200) diff --git a/src/sentry/toolbar/login_success_view.py b/src/sentry/toolbar/login_success_view.py deleted file mode 100644 index 5725aaa1f5643..0000000000000 --- a/src/sentry/toolbar/login_success_view.py +++ /dev/null @@ -1,9 +0,0 @@ -from django.http import HttpRequest - -from sentry.web.frontend.base import OrganizationView, region_silo_view - - -@region_silo_view -class LoginSuccessView(OrganizationView): - def get(self, request: HttpRequest, organization, project_id_or_slug): - return self.respond("sentry/toolbar/login-success.html", status=200) diff --git a/src/sentry/toolbar/utils/url.py b/src/sentry/toolbar/utils/url.py new file mode 100644 index 0000000000000..abd27de0227fb --- /dev/null +++ b/src/sentry/toolbar/utils/url.py @@ -0,0 +1,89 @@ +import re +from urllib.parse import ParseResult, urlparse + +# Lets break down the regexp: +# 0. Anchor the start of the string with `^` +# 1. The Scheme section: `((?Phttps?)?://)?` +# - optionally we'll capture `http://` or `https://` or `://` +# - if we have `http` or `https` then we'll put them in the group named "scheme" +# 2. The hostname section: `(?P[^:/?]+)` +# - capture all characters but stop at the first of `:` or `/` or `?` +# - put the string in a group named "hostname" +# 3. The post section: `(:(?P[^/?]+))?` +# - optionally we'll capture all the characters starting with `:` and ending before `/` or `?` +# - put the part after `:` in a group named `port` +# 4. Match anything else: `.*$` +# - This is everything after `/` or `?` which we might've found in step #2 or #3 +# - If there is a match or not, we don't name it. It will be ignored. +# +# Test and view it with tools like these: +# https://regex101.com/r/rWQyb9/1 +# https://regex-vis.com/?r=%5E%28%28https%3F%29%3F%3A%2F%2F%29%3F%28%5B%5E%3A%2F%3F%5D%2B%29%28%3A%28%5B%5E%2F%3F%5D%2B%29%29%3F.*%24 +pattern = re.compile("^((?Phttps?)?://)?(?P[^:/?]+)(:(?P[^/?]+))?.*$", re.I) + + +def url_matches(source: ParseResult, target: str) -> bool: + """ + Matches a referrer url with a user-provided one. Checks 3 fields: + * hostname: must equal target.hostname. The first subdomain in target may be a wildcard "*". + * port: must equal target.port, unless it is excluded from target. + * scheme: must equal target.scheme, unless it is excluded from target. + Note both url's path is ignored. + """ + + match = re.match(pattern, target) + if not match: + return False + + scheme = match.group("scheme") + hostname = match.group("hostname") + port = match.group("port") + + if not source.hostname or not hostname: + return False + + is_wildcard_scheme = scheme == "://" or scheme is None + if not is_wildcard_scheme and source.scheme != scheme: + return False + + is_wildcard_subdomain = hostname.startswith("*.") or hostname.startswith(".") + if is_wildcard_subdomain: + source_root = source.hostname.split(".", 1)[1] + target_root = hostname.split(".", 1)[1] + if source_root != target_root: + return False + elif source.hostname != hostname: + return False + + is_default_port = port is None + source_port = _get_port(source) + if not is_default_port and source_port != port: + return False + + return True + + +def _get_port(parsed: ParseResult) -> str: + if parsed.port: + return str(parsed.port) + elif parsed.scheme == "http": + return "80" + elif parsed.scheme == "https": + return "443" + return "" + + +def is_origin_allowed(referrer: str, allowed_origins: list[str]) -> bool: + # Empty referrer is always invalid + if not referrer: + return False + + # The input referrer must be a well-formed url with a valid scheme. + if not referrer.startswith("http://") and not referrer.startswith("https://"): + return False + + parsed_referrer = urlparse(referrer) + for origin in allowed_origins: + if url_matches(parsed_referrer, origin): + return True + return False diff --git a/tests/sentry/remote_config/endpoints/__init__.py b/src/sentry/toolbar/views/__init__.py similarity index 100% rename from tests/sentry/remote_config/endpoints/__init__.py rename to src/sentry/toolbar/views/__init__.py diff --git a/src/sentry/toolbar/views/iframe_view.py b/src/sentry/toolbar/views/iframe_view.py new file mode 100644 index 0000000000000..ba0ff1c5115fd --- /dev/null +++ b/src/sentry/toolbar/views/iframe_view.py @@ -0,0 +1,73 @@ +from typing import Any + +from django.http import HttpRequest, HttpResponse +from django.http.response import HttpResponseBase + +from sentry.models.organization import Organization +from sentry.models.project import Project +from sentry.toolbar.utils.url import is_origin_allowed +from sentry.web.frontend.base import ProjectView, region_silo_view + +TEMPLATE = "sentry/toolbar/iframe.html" + + +def _get_referrer(request) -> str | None: + # 1 R is because of legacy http reasons: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Referer + return request.META.get("HTTP_REFERER") + + +@region_silo_view +class IframeView(ProjectView): + default_context = {} + + def dispatch(self, request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponseBase: + self.organization_slug = kwargs.get("organization_slug", "") + self.project_id_or_slug = kwargs.get("project_id_or_slug", "") + return super().dispatch(request, *args, **kwargs) + + def handle_disabled_member(self, organization: Organization) -> HttpResponse: + return self._respond_with_state("logged-out") + + def handle_not_2fa_compliant(self, request: HttpRequest, *args, **kwargs) -> HttpResponse: + return self._respond_with_state("logged-out") + + def handle_sudo_required(self, request: HttpRequest, *args, **kwargs) -> HttpResponse: + return self._respond_with_state("logged-out") + + def handle_auth_required(self, request: HttpRequest, *args, **kwargs) -> HttpResponse: + return self._respond_with_state("logged-out") + + def handle_permission_required(self, request: HttpRequest, *args, **kwargs) -> HttpResponse: + return self._respond_with_state("missing-project") + + def get( + self, request: HttpRequest, organization: Organization, project: Project, *args, **kwargs + ) -> HttpResponse: + referrer = _get_referrer(request) or "" + allowed_origins: list[str] = project.get_option("sentry:toolbar_allowed_origins") + + if referrer and is_origin_allowed(referrer, allowed_origins): + return self._respond_with_state("success") + + return self._respond_with_state("invalid-domain") + + def _respond_with_state(self, state: str): + response = self.respond( + TEMPLATE, + status=200, # always return 200 so the html will render inside the iframe + context={ + "referrer": _get_referrer(self.request) or "", + "state": state, + "logging": self.request.GET.get("logging", ""), + "organization_slug": self.organization_slug, + "project_id_or_slug": self.project_id_or_slug, + }, + ) + + referrer = _get_referrer(self.request) or "" + + # This is an alternative to @csp_replace - we need to use this pattern to access the referrer. + response._csp_replace = {"frame-ancestors": [referrer.strip("/") or "'none'"]} # type: ignore[attr-defined] + response["X-Frame-Options"] = "DENY" if referrer == "" else "ALLOWALL" + + return response diff --git a/src/sentry/toolbar/views/login_success_view.py b/src/sentry/toolbar/views/login_success_view.py new file mode 100644 index 0000000000000..e581bed6e4f7b --- /dev/null +++ b/src/sentry/toolbar/views/login_success_view.py @@ -0,0 +1,21 @@ +from django.conf import settings +from django.http import HttpRequest + +from sentry.web.frontend.base import OrganizationView, region_silo_view + +TEMPLATE = "sentry/toolbar/login-success.html" + +session_cookie_name = settings.SESSION_COOKIE_NAME + + +@region_silo_view +class LoginSuccessView(OrganizationView): + def get(self, request: HttpRequest, organization, project_id_or_slug): + return self.respond( + TEMPLATE, + status=200, + context={ + "delay": int(request.GET.get("delay", 3000)), + "cookie": f"{session_cookie_name}={request.COOKIES.get(session_cookie_name)}", + }, + ) diff --git a/src/sentry/tsdb/base.py b/src/sentry/tsdb/base.py index 85d46c6987e9e..176e713145a84 100644 --- a/src/sentry/tsdb/base.py +++ b/src/sentry/tsdb/base.py @@ -122,6 +122,7 @@ class BaseTSDB(Service): "get_most_frequent_series", "get_frequency_series", "get_frequency_totals", + "get_distinct_counts_totals_with_conditions", ] ) @@ -554,6 +555,25 @@ def get_distinct_counts_totals( """ raise NotImplementedError + def get_distinct_counts_totals_with_conditions( + self, + model: TSDBModel, + keys: Sequence[int], + start: datetime, + end: datetime | None = None, + rollup: int | None = None, + environment_id: int | None = None, + use_cache: bool = False, + jitter_value: int | None = None, + tenant_ids: dict[str, int | str] | None = None, + referrer_suffix: str | None = None, + conditions: list[dict[str, Any]] | None = None, + ) -> dict[int, Any]: + """ + Count distinct items during a time range with conditions. + """ + raise NotImplementedError + def get_distinct_counts_union( self, model: TSDBModel, diff --git a/src/sentry/tsdb/dummy.py b/src/sentry/tsdb/dummy.py index a4ce1eba59a99..3370876e9a83e 100644 --- a/src/sentry/tsdb/dummy.py +++ b/src/sentry/tsdb/dummy.py @@ -76,6 +76,23 @@ def get_distinct_counts_totals( self.validate_arguments([model], [environment_id]) return {k: 0 for k in keys} + def get_distinct_counts_totals_with_conditions( + self, + model, + keys: Sequence[int], + start, + end=None, + rollup=None, + environment_id=None, + use_cache=False, + jitter_value=None, + tenant_ids=None, + referrer_suffix=None, + conditions=None, + ): + self.validate_arguments([model], [environment_id]) + return 0 + def get_distinct_counts_union( self, model: TSDBModel, diff --git a/src/sentry/tsdb/redissnuba.py b/src/sentry/tsdb/redissnuba.py index ecef8fc7d7fcc..266e612fbec13 100644 --- a/src/sentry/tsdb/redissnuba.py +++ b/src/sentry/tsdb/redissnuba.py @@ -30,6 +30,7 @@ def dont_do_this(callargs): "get_sums": (READ, single_model_argument), "get_distinct_counts_series": (READ, single_model_argument), "get_distinct_counts_totals": (READ, single_model_argument), + "get_distinct_counts_totals_with_conditions": (READ, single_model_argument), "get_distinct_counts_union": (READ, single_model_argument), "get_most_frequent": (READ, single_model_argument), "get_most_frequent_series": (READ, single_model_argument), diff --git a/src/sentry/tsdb/snuba.py b/src/sentry/tsdb/snuba.py index 29fd5225efc9d..50d58b1e3cff3 100644 --- a/src/sentry/tsdb/snuba.py +++ b/src/sentry/tsdb/snuba.py @@ -800,6 +800,38 @@ def get_distinct_counts_totals( referrer_suffix=referrer_suffix, ) + def get_distinct_counts_totals_with_conditions( + self, + model: TSDBModel, + keys: Sequence[int], + start: datetime, + end: datetime | None = None, + rollup: int | None = None, + environment_id: int | None = None, + use_cache: bool = False, + jitter_value: int | None = None, + tenant_ids: dict[str, int | str] | None = None, + referrer_suffix: str | None = None, + conditions: list[dict[str, Any]] | None = None, + ) -> dict[int, Any]: + """ + Count distinct items during a time range with conditions. + """ + return self.get_data( + model, + keys, + start, + end, + rollup, + [environment_id] if environment_id is not None else None, + aggregation="uniq", + use_cache=use_cache, + jitter_value=jitter_value, + tenant_ids=tenant_ids, + referrer_suffix=referrer_suffix, + conditions=conditions, + ) + def get_distinct_counts_union( self, model, keys, start, end=None, rollup=None, environment_id=None, tenant_ids=None ): diff --git a/src/sentry/types/region.py b/src/sentry/types/region.py index 50a8614d1875a..2fb0c7944804b 100644 --- a/src/sentry/types/region.py +++ b/src/sentry/types/region.py @@ -1,6 +1,6 @@ from __future__ import annotations -from collections.abc import Collection, Container, Iterable +from collections.abc import Collection, Iterable from enum import Enum from typing import Any from urllib.parse import urljoin @@ -321,7 +321,7 @@ def _find_orgs_for_user(user_id: int) -> set[int]: @control_silo_function -def find_regions_for_orgs(org_ids: Container[int]) -> set[str]: +def find_regions_for_orgs(org_ids: Iterable[int]) -> set[str]: from sentry.models.organizationmapping import OrganizationMapping if SiloMode.get_current_mode() == SiloMode.MONOLITH: diff --git a/src/sentry/uptime/consumers/results_consumer.py b/src/sentry/uptime/consumers/results_consumer.py index ea3ace1031a9d..7af6259ddacd8 100644 --- a/src/sentry/uptime/consumers/results_consumer.py +++ b/src/sentry/uptime/consumers/results_consumer.py @@ -69,7 +69,6 @@ def build_active_consecutive_status_key( class UptimeResultProcessor(ResultProcessor[CheckResult, UptimeSubscription]): subscription_model = UptimeSubscription - topic_for_codec = Topic.UPTIME_RESULTS def get_subscription_id(self, result: CheckResult) -> str: return result["subscription_id"] @@ -333,3 +332,4 @@ def has_reached_status_threshold( class UptimeResultsStrategyFactory(ResultsStrategyFactory[CheckResult, UptimeSubscription]): result_processor_cls = UptimeResultProcessor + topic_for_codec = Topic.UPTIME_RESULTS diff --git a/src/sentry/uptime/detectors/tasks.py b/src/sentry/uptime/detectors/tasks.py index 279feec3987fa..1e37a61d45799 100644 --- a/src/sentry/uptime/detectors/tasks.py +++ b/src/sentry/uptime/detectors/tasks.py @@ -41,6 +41,8 @@ URL_MIN_PERCENT = 0.05 # Default value for how often we should run these subscriptions when onboarding them ONBOARDING_SUBSCRIPTION_INTERVAL_SECONDS = int(timedelta(minutes=60).total_seconds()) +# Default timeout for auto-detected uptime monitors +ONBOARDING_SUBSCRIPTION_TIMEOUT_MS = 10_000 logger = logging.getLogger("sentry.uptime-url-autodetection") @@ -244,8 +246,12 @@ def monitor_url_for_project(project: Project, url: str): ) get_or_create_project_uptime_subscription( project, + # TODO(epurkhiser): This is where we would put the environment object + # from autodetection if we decide to do that. + environment=None, url=url, interval_seconds=ONBOARDING_SUBSCRIPTION_INTERVAL_SECONDS, + timeout_ms=ONBOARDING_SUBSCRIPTION_TIMEOUT_MS, mode=ProjectUptimeSubscriptionMode.AUTO_DETECTED_ONBOARDING, ) metrics.incr("uptime.detectors.candidate_url.monitor_created", sample_rate=1.0) diff --git a/src/sentry/uptime/endpoints/project_uptime_alert_details.py b/src/sentry/uptime/endpoints/project_uptime_alert_details.py index ec019ac8f6645..65e008add79b1 100644 --- a/src/sentry/uptime/endpoints/project_uptime_alert_details.py +++ b/src/sentry/uptime/endpoints/project_uptime_alert_details.py @@ -92,6 +92,7 @@ def put( instance=uptime_subscription, context={ "organization": project.organization, + "project": project, "access": request.access, "request": request, }, diff --git a/src/sentry/uptime/endpoints/project_uptime_alert_index.py b/src/sentry/uptime/endpoints/project_uptime_alert_index.py index 76bc4b0d469d3..978a8822fbb1b 100644 --- a/src/sentry/uptime/endpoints/project_uptime_alert_index.py +++ b/src/sentry/uptime/endpoints/project_uptime_alert_index.py @@ -2,11 +2,11 @@ from rest_framework.request import Request from rest_framework.response import Response -from sentry import features from sentry.api.api_owners import ApiOwner from sentry.api.api_publish_status import ApiPublishStatus from sentry.api.base import region_silo_endpoint from sentry.api.bases import ProjectEndpoint +from sentry.api.bases.project import ProjectAlertRulePermission from sentry.api.serializers import serialize from sentry.apidocs.constants import ( RESPONSE_BAD_REQUEST, @@ -27,6 +27,7 @@ class ProjectUptimeAlertIndexEndpoint(ProjectEndpoint): "POST": ApiPublishStatus.EXPERIMENTAL, } owner = ApiOwner.CRONS + permission_classes = (ProjectAlertRulePermission,) @extend_schema( operation_id="Create an Uptime Monitor", @@ -44,10 +45,6 @@ def post(self, request: Request, project: Project) -> Response: """ Create a new monitor. """ - if not features.has( - "organizations:uptime-api-create-update", project.organization, actor=request.user - ): - return Response(status=404) validator = UptimeMonitorValidator( data=request.data, context={ diff --git a/src/sentry/uptime/endpoints/serializers.py b/src/sentry/uptime/endpoints/serializers.py index 6d1e4e66d9232..96bfc573f411b 100644 --- a/src/sentry/uptime/endpoints/serializers.py +++ b/src/sentry/uptime/endpoints/serializers.py @@ -12,6 +12,7 @@ class ProjectUptimeSubscriptionSerializerResponse(TypedDict): id: str projectSlug: str + environment: str | None name: str status: int mode: int @@ -32,7 +33,7 @@ def __init__(self, expand=None): def get_attrs( self, item_list: Sequence[ProjectUptimeSubscription], user: Any, **kwargs: Any ) -> MutableMapping[Any, Any]: - prefetch_related_objects(item_list, "uptime_subscription", "project") + prefetch_related_objects(item_list, "uptime_subscription", "project", "environment") owners = list(filter(None, [item.owner for item in item_list])) owners_serialized = serialize( Actor.resolve_many(owners, filter_none=False), user, ActorSerializer() @@ -58,6 +59,7 @@ def serialize( return { "id": str(obj.id), "projectSlug": obj.project.slug, + "environment": obj.environment.name if obj.environment else None, "name": obj.name or f"Uptime Monitoring for {obj.uptime_subscription.url}", "status": obj.uptime_status, "mode": obj.mode, diff --git a/src/sentry/uptime/endpoints/validators.py b/src/sentry/uptime/endpoints/validators.py index 41a287b3f3fe8..9a3634baa3e50 100644 --- a/src/sentry/uptime/endpoints/validators.py +++ b/src/sentry/uptime/endpoints/validators.py @@ -10,6 +10,7 @@ from sentry.api.fields import ActorField from sentry.api.serializers.rest_framework import CamelSnakeSerializer from sentry.auth.superuser import is_active_superuser +from sentry.models.environment import Environment from sentry.uptime.detectors.url_extraction import extract_domain_parts from sentry.uptime.models import ProjectUptimeSubscription, ProjectUptimeSubscriptionMode from sentry.uptime.subscriptions.subscriptions import ( @@ -35,6 +36,16 @@ SUPPORTED_HTTP_METHODS = ["GET", "POST", "HEAD", "PUT", "DELETE", "PATCH", "OPTIONS"] MAX_REQUEST_SIZE_BYTES = 1000 +# This matches the jsonschema for the check config +VALID_INTERVALS = [ + timedelta(minutes=1), + timedelta(minutes=5), + timedelta(minutes=10), + timedelta(minutes=20), + timedelta(minutes=30), + timedelta(minutes=60), +] + HEADERS_LIST_SCHEMA = { "type": "array", "items": { @@ -47,13 +58,17 @@ } -def compute_http_request_size(method: str, url: str, headers: Sequence[tuple[str, str]], body: str): +def compute_http_request_size( + method: str, url: str, headers: Sequence[tuple[str, str]], body: str | None +): request_line_size = len(f"{method} {url} HTTP/1.1\r\n") headers_size = sum( len(key) + len(value.encode("utf-8")) + len("\r\n") for key, value in headers ) - body_size = len(body.encode("utf-8")) - return request_line_size + headers_size + len("\r\n") + body_size + body_size = 0 + if body is not None: + body_size = len(body.encode("utf-8")) + len("\r\n") + return request_line_size + headers_size + body_size @extend_schema_serializer() @@ -64,30 +79,41 @@ class UptimeMonitorValidator(CamelSnakeSerializer): help_text="Name of the uptime monitor", ) owner = ActorField( - required=True, + required=False, allow_null=True, help_text="The ID of the team or user that owns the uptime monitor. (eg. user:51 or team:6)", ) + environment = serializers.CharField( + max_length=64, + required=False, + allow_null=True, + help_text="Name of the environment", + ) url = URLField(required=True, max_length=255) - interval_seconds = serializers.IntegerField( - required=True, min_value=60, max_value=int(timedelta(days=1).total_seconds()) + interval_seconds = serializers.ChoiceField( + required=True, choices=[int(i.total_seconds()) for i in VALID_INTERVALS] + ) + timeout_ms = serializers.IntegerField( + required=True, + min_value=1000, + max_value=30_000, ) mode = serializers.IntegerField(required=False) method = serializers.ChoiceField( required=False, choices=list(zip(SUPPORTED_HTTP_METHODS, SUPPORTED_HTTP_METHODS)) ) headers = serializers.JSONField(required=False) - body = serializers.CharField(required=False) + body = serializers.CharField(required=False, allow_null=True) def validate(self, attrs): headers = [] method = "GET" - body = "" + body = None url = "" if self.instance: headers = self.instance.uptime_subscription.headers method = self.instance.uptime_subscription.method - body = self.instance.uptime_subscription.body or "" + body = self.instance.uptime_subscription.body url = self.instance.uptime_subscription.url request_size = compute_http_request_size( @@ -134,17 +160,27 @@ def validate_mode(self, mode): ) def create(self, validated_data): + if validated_data.get("environment") is not None: + environment = Environment.get_or_create( + project=self.context["project"], + name=validated_data["environment"], + ) + else: + environment = None + method_headers_body = { k: v for k, v in validated_data.items() if k in {"method", "headers", "body"} } try: uptime_monitor, created = get_or_create_project_uptime_subscription( project=self.context["project"], + environment=environment, url=validated_data["url"], interval_seconds=validated_data["interval_seconds"], + timeout_ms=validated_data["timeout_ms"], name=validated_data["name"], mode=validated_data.get("mode", ProjectUptimeSubscriptionMode.MANUAL), - owner=validated_data["owner"], + owner=validated_data.get("owner"), **method_headers_body, ) except MaxManualUptimeSubscriptionsReached: @@ -171,19 +207,32 @@ def update(self, instance: ProjectUptimeSubscription, data): if "interval_seconds" in data else instance.uptime_subscription.interval_seconds ) + timeout_ms = ( + data["timeout_ms"] if "timeout_ms" in data else instance.uptime_subscription.timeout_ms + ) method = data["method"] if "method" in data else instance.uptime_subscription.method headers = data["headers"] if "headers" in data else instance.uptime_subscription.headers body = data["body"] if "body" in data else instance.uptime_subscription.body name = data["name"] if "name" in data else instance.name owner = data["owner"] if "owner" in data else instance.owner + if "environment" in data: + environment = Environment.get_or_create( + project=self.context["project"], + name=data["environment"], + ) + else: + environment = instance.environment + if "mode" in data: raise serializers.ValidationError("Mode can only be specified on creation (for now)") update_project_uptime_subscription( uptime_monitor=instance, + environment=environment, url=url, interval_seconds=interval_seconds, + timeout_ms=timeout_ms, method=method, headers=headers, body=body, diff --git a/src/sentry/uptime/issue_platform.py b/src/sentry/uptime/issue_platform.py index a74e652812ee6..9b72df0aa80da 100644 --- a/src/sentry/uptime/issue_platform.py +++ b/src/sentry/uptime/issue_platform.py @@ -80,6 +80,7 @@ def build_occurrence_from_result( culprit="", # TODO: The url? detection_time=datetime.now(timezone.utc), level="error", + assignee=project_subscription.owner, ) @@ -88,8 +89,13 @@ def build_event_data_for_occurrence( project_subscription: ProjectUptimeSubscription, occurrence: IssueOccurrence, ): + # Default environment when it hasn't been configured + env = "prod" + if project_subscription.environment: + env = project_subscription.environment.name + return { - "environment": "prod", # TODO: Include the environment here when we have it + "environment": env, "event_id": occurrence.event_id, "fingerprint": occurrence.fingerprint, "platform": "other", diff --git a/src/sentry/uptime/migrations/0014_add_uptime_enviromnet.py b/src/sentry/uptime/migrations/0014_add_uptime_enviromnet.py new file mode 100644 index 0000000000000..a4d8ba9f26f78 --- /dev/null +++ b/src/sentry/uptime/migrations/0014_add_uptime_enviromnet.py @@ -0,0 +1,41 @@ +# Generated by Django 5.1.1 on 2024-09-30 16:23 + +import django.db.models.deletion +from django.db import migrations + +import sentry.db.models.fields.foreignkey +from sentry.new_migrations.migrations import CheckedMigration + + +class Migration(CheckedMigration): + # This flag is used to mark that a migration shouldn't be automatically run in production. + # This should only be used for operations where it's safe to run the migration after your + # code has deployed. So this should not be used for most operations that alter the schema + # of a table. + # Here are some things that make sense to mark as post deployment: + # - Large data migrations. Typically we want these to be run manually so that they can be + # monitored and not block the deploy for a long period of time while they run. + # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to + # run this outside deployments so that we don't block them. Note that while adding an index + # is a schema change, it's completely safe to run the operation after the code has deployed. + # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment + + is_post_deployment = False + + dependencies = [ + ("sentry", "0768_fix_old_group_first_seen_dates"), + ("uptime", "0013_uptime_subscription_new_unique"), + ] + + operations = [ + migrations.AddField( + model_name="projectuptimesubscription", + name="environment", + field=sentry.db.models.fields.foreignkey.FlexibleForeignKey( + db_constraint=False, + null=True, + on_delete=django.db.models.deletion.CASCADE, + to="sentry.environment", + ), + ), + ] diff --git a/src/sentry/uptime/migrations/0015_headers_deafult_empty_list.py b/src/sentry/uptime/migrations/0015_headers_deafult_empty_list.py new file mode 100644 index 0000000000000..1226be4d8b055 --- /dev/null +++ b/src/sentry/uptime/migrations/0015_headers_deafult_empty_list.py @@ -0,0 +1,34 @@ +# Generated by Django 5.1.1 on 2024-10-02 19:09 + +from django.db import migrations + +import sentry.db.models.fields.jsonfield +from sentry.new_migrations.migrations import CheckedMigration + + +class Migration(CheckedMigration): + # This flag is used to mark that a migration shouldn't be automatically run in production. + # This should only be used for operations where it's safe to run the migration after your + # code has deployed. So this should not be used for most operations that alter the schema + # of a table. + # Here are some things that make sense to mark as post deployment: + # - Large data migrations. Typically we want these to be run manually so that they can be + # monitored and not block the deploy for a long period of time while they run. + # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to + # run this outside deployments so that we don't block them. Note that while adding an index + # is a schema change, it's completely safe to run the operation after the code has deployed. + # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment + + is_post_deployment = False + + dependencies = [ + ("uptime", "0014_add_uptime_enviromnet"), + ] + + operations = [ + migrations.AlterField( + model_name="uptimesubscription", + name="headers", + field=sentry.db.models.fields.jsonfield.JSONField(db_default=[], default=dict), + ), + ] diff --git a/src/sentry/uptime/migrations/0016_translate_uptime_object_headers_to_lists.py b/src/sentry/uptime/migrations/0016_translate_uptime_object_headers_to_lists.py new file mode 100644 index 0000000000000..278170b716b14 --- /dev/null +++ b/src/sentry/uptime/migrations/0016_translate_uptime_object_headers_to_lists.py @@ -0,0 +1,45 @@ +# Generated by Django 5.1.1 on 2024-10-02 16:06 +from django.db import migrations +from django.db.backends.base.schema import BaseDatabaseSchemaEditor +from django.db.migrations.state import StateApps + +from sentry.new_migrations.migrations import CheckedMigration +from sentry.utils.query import RangeQuerySetWrapperWithProgressBar + + +def migrate_header_objects_to_lists( + apps: StateApps, + schema_editor: BaseDatabaseSchemaEditor, +) -> None: + UptimeSubscription = apps.get_model("uptime", "UptimeSubscription") + for sub in RangeQuerySetWrapperWithProgressBar(UptimeSubscription.objects.filter(headers={})): + sub.headers = [] + sub.save() + + +class Migration(CheckedMigration): + # This flag is used to mark that a migration shouldn't be automatically run in production. + # This should only be used for operations where it's safe to run the migration after your + # code has deployed. So this should not be used for most operations that alter the schema + # of a table. + # Here are some things that make sense to mark as post deployment: + # - Large data migrations. Typically we want these to be run manually so that they can be + # monitored and not block the deploy for a long period of time while they run. + # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to + # run this outside deployments so that we don't block them. Note that while adding an index + # is a schema change, it's completely safe to run the operation after the code has deployed. + # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment + + is_post_deployment = True + + dependencies = [ + ("uptime", "0015_headers_deafult_empty_list"), + ] + + operations = [ + migrations.RunPython( + migrate_header_objects_to_lists, + migrations.RunPython.noop, + hints={"tables": ["uptime_uptimesubscription"]}, + ), + ] diff --git a/src/sentry/uptime/migrations/0017_unique_on_timeout.py b/src/sentry/uptime/migrations/0017_unique_on_timeout.py new file mode 100644 index 0000000000000..cce1a75157d03 --- /dev/null +++ b/src/sentry/uptime/migrations/0017_unique_on_timeout.py @@ -0,0 +1,48 @@ +# Generated by Django 5.1.1 on 2024-10-08 19:37 + +import django.db.models.functions.comparison +import django.db.models.functions.text +from django.db import migrations, models + +from sentry.new_migrations.migrations import CheckedMigration + + +class Migration(CheckedMigration): + # This flag is used to mark that a migration shouldn't be automatically run in production. + # This should only be used for operations where it's safe to run the migration after your + # code has deployed. So this should not be used for most operations that alter the schema + # of a table. + # Here are some things that make sense to mark as post deployment: + # - Large data migrations. Typically we want these to be run manually so that they can be + # monitored and not block the deploy for a long period of time while they run. + # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to + # run this outside deployments so that we don't block them. Note that while adding an index + # is a schema change, it's completely safe to run the operation after the code has deployed. + # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment + + is_post_deployment = False + + dependencies = [ + ("uptime", "0016_translate_uptime_object_headers_to_lists"), + ] + + operations = [ + migrations.RemoveConstraint( + model_name="uptimesubscription", + name="uptime_uptimesubscription_unique_subscription_check", + ), + migrations.AddConstraint( + model_name="uptimesubscription", + constraint=models.UniqueConstraint( + models.F("url"), + models.F("interval_seconds"), + models.F("timeout_ms"), + models.F("method"), + django.db.models.functions.text.MD5("headers"), + django.db.models.functions.comparison.Coalesce( + django.db.models.functions.text.MD5("body"), models.Value("") + ), + name="uptime_uptimesubscription_unique_subscription_check", + ), + ), + ] diff --git a/src/sentry/uptime/models.py b/src/sentry/uptime/models.py index 20a34e666c370..1ed3dc2aa3867 100644 --- a/src/sentry/uptime/models.py +++ b/src/sentry/uptime/models.py @@ -54,7 +54,7 @@ class UptimeSubscription(BaseRemoteSubscription, DefaultFieldsModelExisting): # HTTP method to perform the check with method = models.CharField(max_length=20, db_default="GET") # HTTP headers to send when performing the check - headers = JSONField(json_dumps=headers_json_encoder, db_default={}) + headers = JSONField(json_dumps=headers_json_encoder, db_default=[]) # HTTP body to send when performing the check body = models.TextField(null=True) @@ -71,6 +71,7 @@ class Meta: models.UniqueConstraint( "url", "interval_seconds", + "timeout_ms", "method", MD5("headers"), Coalesce(MD5("body"), Value("")), @@ -100,6 +101,9 @@ class ProjectUptimeSubscription(DefaultFieldsModelExisting): __relocation_scope__ = RelocationScope.Excluded project = FlexibleForeignKey("sentry.Project") + environment = FlexibleForeignKey( + "sentry.Environment", db_index=True, db_constraint=False, null=True + ) uptime_subscription = FlexibleForeignKey("uptime.UptimeSubscription", on_delete=models.PROTECT) mode = models.SmallIntegerField(default=ProjectUptimeSubscriptionMode.MANUAL.value) uptime_status = models.PositiveSmallIntegerField(default=UptimeStatus.OK.value) diff --git a/src/sentry/uptime/subscriptions/subscriptions.py b/src/sentry/uptime/subscriptions/subscriptions.py index 5412a4b311e0d..a8d580d1312e2 100644 --- a/src/sentry/uptime/subscriptions/subscriptions.py +++ b/src/sentry/uptime/subscriptions/subscriptions.py @@ -7,6 +7,7 @@ from django.db.models.expressions import Value from django.db.models.functions import MD5, Coalesce +from sentry.models.environment import Environment from sentry.models.project import Project from sentry.types.actor import Actor from sentry.uptime.detectors.url_extraction import extract_domain_parts @@ -27,8 +28,6 @@ UPTIME_SUBSCRIPTION_TYPE = "uptime_monitor" MAX_AUTO_SUBSCRIPTIONS_PER_ORG = 1 MAX_MANUAL_SUBSCRIPTIONS_PER_ORG = 100 -# Default timeout for all subscriptions -DEFAULT_SUBSCRIPTION_TIMEOUT_MS = 10000 class MaxManualUptimeSubscriptionsReached(ValueError): @@ -38,6 +37,7 @@ class MaxManualUptimeSubscriptionsReached(ValueError): def retrieve_uptime_subscription( url: str, interval_seconds: int, + timeout_ms: int, method: str, headers: Sequence[tuple[str, str]], body: str | None, @@ -45,7 +45,10 @@ def retrieve_uptime_subscription( try: subscription = ( UptimeSubscription.objects.filter( - url=url, interval_seconds=interval_seconds, method=method + url=url, + interval_seconds=interval_seconds, + timeout_ms=timeout_ms, + method=method, ) .annotate( headers_md5=MD5("headers", output_field=TextField()), @@ -65,7 +68,7 @@ def retrieve_uptime_subscription( def get_or_create_uptime_subscription( url: str, interval_seconds: int, - timeout_ms: int = DEFAULT_SUBSCRIPTION_TIMEOUT_MS, + timeout_ms: int, method: str = "GET", headers: Sequence[tuple[str, str]] | None = None, body: str | None = None, @@ -80,7 +83,9 @@ def get_or_create_uptime_subscription( # domain. result = extract_domain_parts(url) - subscription = retrieve_uptime_subscription(url, interval_seconds, method, headers, body) + subscription = retrieve_uptime_subscription( + url, interval_seconds, timeout_ms, method, headers, body + ) created = False if subscription is None: @@ -101,7 +106,7 @@ def get_or_create_uptime_subscription( except IntegrityError: # Handle race condition where we tried to retrieve an existing subscription while it was being created subscription = retrieve_uptime_subscription( - url, interval_seconds, method, headers, body + url, interval_seconds, timeout_ms, method, headers, body ) if subscription is None: @@ -142,9 +147,10 @@ def delete_uptime_subscription(uptime_subscription: UptimeSubscription): def get_or_create_project_uptime_subscription( project: Project, + environment: Environment | None, url: str, interval_seconds: int, - timeout_ms: int = DEFAULT_SUBSCRIPTION_TIMEOUT_MS, + timeout_ms: int, method: str = "GET", headers: Sequence[tuple[str, str]] | None = None, body: str | None = None, @@ -174,6 +180,7 @@ def get_or_create_project_uptime_subscription( owner_team_id = owner.id return ProjectUptimeSubscription.objects.get_or_create( project=project, + environment=environment, uptime_subscription=uptime_subscription, mode=mode.value, name=name, @@ -184,8 +191,10 @@ def get_or_create_project_uptime_subscription( def update_project_uptime_subscription( uptime_monitor: ProjectUptimeSubscription, + environment: Environment | None, url: str, interval_seconds: int, + timeout_ms: int, method: str, headers: Sequence[tuple[str, str]], body: str | None, @@ -197,7 +206,7 @@ def update_project_uptime_subscription( """ cur_uptime_subscription = uptime_monitor.uptime_subscription new_uptime_subscription = get_or_create_uptime_subscription( - url, interval_seconds, cur_uptime_subscription.timeout_ms, method, headers, body + url, interval_seconds, timeout_ms, method, headers, body ) updated_subscription = cur_uptime_subscription.id != new_uptime_subscription.id @@ -217,6 +226,7 @@ def update_project_uptime_subscription( owner_user_id = None uptime_monitor.update( + environment=environment, uptime_subscription=new_uptime_subscription, name=name, mode=mode, diff --git a/src/sentry/users/api/endpoints/user_authenticator_details.py b/src/sentry/users/api/endpoints/user_authenticator_details.py index f0dd463623587..731ce1a811802 100644 --- a/src/sentry/users/api/endpoints/user_authenticator_details.py +++ b/src/sentry/users/api/endpoints/user_authenticator_details.py @@ -131,7 +131,7 @@ def put( :auth required: """ - # TODO temporary solution for both renaming and regenerating recovery code. + # TODO: temporary solution for both renaming and regenerating recovery code. # Need to find new home for regenerating recovery codes as it doesn't really do what put is supposed to do try: authenticator = Authenticator.objects.get(user=user, id=auth_id) diff --git a/src/sentry/users/api/serializers/user.py b/src/sentry/users/api/serializers/user.py index 87f2b7a54bdbd..4d987a9bc19c6 100644 --- a/src/sentry/users/api/serializers/user.py +++ b/src/sentry/users/api/serializers/user.py @@ -61,7 +61,7 @@ class _Identity(TypedDict): class _UserOptions(TypedDict): theme: str # TODO: enum/literal for theme options language: str - stacktraceOrder: int # TODO enum/literal + stacktraceOrder: int # TODO: enum/literal defaultIssueEvent: str timezone: str clock24Hours: bool diff --git a/src/sentry/utils/celery.py b/src/sentry/utils/celery.py index 05e27fec298bf..f576a099bcb8a 100644 --- a/src/sentry/utils/celery.py +++ b/src/sentry/utils/celery.py @@ -1,9 +1,74 @@ +from collections.abc import Mapping, MutableSequence, Sequence from random import randint from typing import Any from celery.schedules import crontab +from kombu import Queue + +from sentry.conf.types.celery import SplitQueueSize, SplitQueueTaskRoute def crontab_with_minute_jitter(*args: Any, **kwargs: Any) -> crontab: kwargs["minute"] = randint(0, 59) return crontab(*args, **kwargs) + + +def build_queue_names(base_name: str, quantity: int) -> Sequence[str]: + ret = [] + for index in range(quantity): + name = f"{base_name}_{index + 1}" + ret.append(name) + return ret + + +def _build_queues(base: str, quantity: int) -> Sequence[Queue]: + return [Queue(name=name, routing_key=name) for name in build_queue_names(base, quantity)] + + +def make_split_task_queues(config: Mapping[str, SplitQueueTaskRoute]) -> list[Queue]: + """ + Generates the split queues definitions from the mapping between + a task name and a config expressed as `SplitQueueTaskRoute`. + """ + ret: list[Queue] = [] + for conf in config.values(): + if "queues_config" in conf: + ret.extend(_build_queues(conf["default_queue"], conf["queues_config"]["total"])) + return ret + + +def make_split_queues(config: Mapping[str, SplitQueueSize]) -> Sequence[Queue]: + """ + Generates the split queue definitions from the mapping between + base queue and split queue config. + """ + ret: MutableSequence[Queue] = [] + for base_name, conf in config.items(): + ret.extend(_build_queues(base_name, conf["total"])) + + return ret + + +def safe_append(queues: MutableSequence[Queue], queue: Queue) -> None: + """ + We define queues as lists in the configuration and we allow override + of the config per environment. + Unfortunately if you add twice a queue with the same name to the celery + config. Celery just creates the queue twice. This can be an undesired behavior + depending on the Celery backend. So this method allows to add queues to + a list without duplications. + """ + existing_queue_names = {q.name for q in queues} + if queue.name not in existing_queue_names: + queues.append(queue) + + +def safe_extend(queues: MutableSequence[Queue], to_add: Sequence[Queue]) -> None: + """ + Like `safe_append` but it works like extend adding multiple queues + to the config. + """ + existing_queue_names = {q.name for q in queues} + for q in to_add: + if q.name not in existing_queue_names: + queues.append(q) diff --git a/src/sentry/utils/eventuser.py b/src/sentry/utils/eventuser.py index a8580f98da68f..99d342e0597ea 100644 --- a/src/sentry/utils/eventuser.py +++ b/src/sentry/utils/eventuser.py @@ -7,7 +7,7 @@ from datetime import datetime from functools import cached_property from ipaddress import IPv4Address, IPv6Address, ip_address -from typing import Any +from typing import Any, TypedDict from django.db.models import QuerySet from snuba_sdk import ( @@ -87,6 +87,15 @@ def get_ip_address_conditions(ip_addresses: list[str]) -> list[Condition]: return conditions +class SerializedEventUser(TypedDict): + id: str + username: str | None + email: str | None + name: str | None + ipAddress: str | None + avatarUrl: str | None + + @dataclass class EventUser: project_id: int | None @@ -334,7 +343,7 @@ def iter_attributes(self): for key in KEYWORD_MAP.keys(): yield key, getattr(self, key) - def serialize(self): + def serialize(self) -> SerializedEventUser: return { "id": str(self.id) if self.id else str(self.user_ident), "username": self.username, diff --git a/src/sentry/utils/flag.py b/src/sentry/utils/flag.py index a19a4829fffdf..7a878a0c7f3c5 100644 --- a/src/sentry/utils/flag.py +++ b/src/sentry/utils/flag.py @@ -4,7 +4,7 @@ flag_manager = ContextVar("flag_manager") # type: ignore[var-annotated] -def initialize_flag_manager(capacity: int = 10) -> None: +def initialize_flag_manager(capacity: int = 100) -> None: flag_manager.set(FlagManager(capacity=capacity)) diff --git a/src/sentry/utils/github.py b/src/sentry/utils/github.py new file mode 100644 index 0000000000000..9c8eab15f1171 --- /dev/null +++ b/src/sentry/utils/github.py @@ -0,0 +1,45 @@ +import base64 +import binascii +from typing import Any + +from cryptography.exceptions import InvalidSignature +from cryptography.hazmat.primitives import hashes, serialization +from cryptography.hazmat.primitives.asymmetric import ec +from pydantic import BaseModel + +from sentry import options + +from .github_client import GitHubClient + + +class GitHubKeysPayload(BaseModel): + public_keys: list[dict[str, Any]] + + +def verify_signature(payload: str, signature: str, key_id: str, subpath: str) -> None: + if not payload or not signature or not key_id: + raise ValueError("Invalid payload, signature, or key_id") + + client_id = options.get("github-login.client-id") + client_secret = options.get("github-login.client-secret") + client = GitHubClient(client_id=client_id, client_secret=client_secret) + response = client.get(f"/meta/public_keys/{subpath}") + keys = GitHubKeysPayload.parse_obj(response) + + public_key = next((k for k in keys.public_keys if k["key_identifier"] == key_id), None) + if not public_key: + raise ValueError("No public key found matching key identifier") + + key = serialization.load_pem_public_key(public_key["key"].encode()) + + if not isinstance(key, ec.EllipticCurvePublicKey): + raise ValueError("Invalid public key type") + + try: + # Decode the base64 signature to bytes + signature_bytes = base64.b64decode(signature) + key.verify(signature_bytes, payload.encode(), ec.ECDSA(hashes.SHA256())) + except InvalidSignature: + raise ValueError("Signature does not match payload") + except binascii.Error: + raise ValueError("Invalid signature encoding") diff --git a/src/sentry/utils/github_client.py b/src/sentry/utils/github_client.py new file mode 100644 index 0000000000000..37a079c545338 --- /dev/null +++ b/src/sentry/utils/github_client.py @@ -0,0 +1,80 @@ +from requests.exceptions import HTTPError + +from sentry.http import build_session +from sentry.utils import json + + +class ApiError(Exception): + code = None + json = None + xml = None + + def __init__(self, text, code=None): + if code is not None: + self.code = code + self.text = text + # TODO(dcramer): pull in XML support from Jira + if text: + try: + self.json = json.loads(text) + except (json.JSONDecodeError, ValueError): + self.json = None + else: + self.json = None + super().__init__(text[:128]) + + @classmethod + def from_response(cls, response): + if response.status_code == 401: + return ApiUnauthorized(response.text) + return cls(response.text, response.status_code) + + +class ApiUnauthorized(ApiError): + code = 401 + + +class GitHubClient: + ApiError = ApiError + + url = "https://api.github.com" + + def __init__(self, url=None, token=None, client_id=None, client_secret=None): + if url is not None: + self.url = url.rstrip("/") + self.token = token + self.client_id = client_id + self.client_secret = client_secret + + def _request(self, method, path, headers=None, data=None, params=None, auth=None): + with build_session() as session: + try: + resp = getattr(session, method.lower())( + url=f"{self.url}{path}", + headers=headers, + json=data, + params=params, + allow_redirects=True, + auth=auth, + ) + resp.raise_for_status() + except HTTPError as e: + raise ApiError.from_response(e.response) + return resp.json() + + def request(self, method, path, data=None, params=None, auth=None): + headers = {"Accept": "application/vnd.github.valkyrie-preview+json"} + + if self.token: + headers.setdefault("Authorization", f"token {self.token}") + + elif auth is None and self.client_id and self.client_secret: + auth = (self.client_id, self.client_secret) + + return self._request(method, path, headers=headers, data=data, params=params, auth=auth) + + def get(self, *args, **kwargs): + return self.request("GET", *args, **kwargs) + + def post(self, *args, **kwargs): + return self.request("POST", *args, **kwargs) diff --git a/src/sentry/utils/kvstore/bigtable.py b/src/sentry/utils/kvstore/bigtable.py index 824f191cd0f1b..bc7f38c3bff6f 100644 --- a/src/sentry/utils/kvstore/bigtable.py +++ b/src/sentry/utils/kvstore/bigtable.py @@ -6,6 +6,7 @@ from threading import Lock from typing import Any +import sentry_sdk from django.utils import timezone from google.api_core import exceptions, retry from google.cloud import bigtable @@ -114,7 +115,8 @@ def _get_table(self, admin: bool = False) -> Table: return table def get(self, key: str) -> bytes | None: - row = self._get_table().read_row(key) + with sentry_sdk.start_span(op="bigtable.get"): + row = self._get_table().read_row(key) if row is None: return None diff --git a/src/sentry/utils/performance_issues/detectors/consecutive_db_detector.py b/src/sentry/utils/performance_issues/detectors/consecutive_db_detector.py index 907b2cbf112ce..4307e668b631f 100644 --- a/src/sentry/utils/performance_issues/detectors/consecutive_db_detector.py +++ b/src/sentry/utils/performance_issues/detectors/consecutive_db_detector.py @@ -125,7 +125,7 @@ def _store_performance_problem(self) -> None: self.stored_problems[fingerprint] = PerformanceProblem( fingerprint, "db", - desc=query, # TODO - figure out which query to use for description + desc=query, # TODO: figure out which query to use for description type=PerformanceConsecutiveDBQueriesGroupType, cause_span_ids=cause_span_ids, parent_span_ids=None, diff --git a/src/sentry/utils/performance_issues/detectors/http_overhead_detector.py b/src/sentry/utils/performance_issues/detectors/http_overhead_detector.py index 7e4eb0f7f3b62..697a1e1a38d3f 100644 --- a/src/sentry/utils/performance_issues/detectors/http_overhead_detector.py +++ b/src/sentry/utils/performance_issues/detectors/http_overhead_detector.py @@ -65,11 +65,13 @@ def visit_span(self, span: Span) -> None: url = span_data.get("url", "") span_start = span.get("start_timestamp", 0) * 1000 - request_start = span_data.get("http.request.request_start", 0) * 1000 + request_start = span_data.get("http.request.request_start", 0) if not url or not span_start or not request_start: return + request_start *= 1000 + if url.startswith("/"): location = "/" else: diff --git a/src/sentry/utils/performance_issues/detectors/io_main_thread_detector.py b/src/sentry/utils/performance_issues/detectors/io_main_thread_detector.py index 5755e56109f94..b4919bf0a144e 100644 --- a/src/sentry/utils/performance_issues/detectors/io_main_thread_detector.py +++ b/src/sentry/utils/performance_issues/detectors/io_main_thread_detector.py @@ -193,7 +193,8 @@ def _is_io_on_main_thread(self, span: Span) -> bool: data = span.get("data", {}) if data is None: return False - file_path = data.get("file.path", "").lower() + file_path = (data.get("file.path") or "").lower() + if any(glob_match(file_path, ignored_pattern) for ignored_pattern in self.IGNORED_LIST): return False # doing is True since the value can be any type diff --git a/src/sentry/utils/performance_issues/detectors/large_payload_detector.py b/src/sentry/utils/performance_issues/detectors/large_payload_detector.py index 755e4c319b9bd..a4c2dd37cbf31 100644 --- a/src/sentry/utils/performance_issues/detectors/large_payload_detector.py +++ b/src/sentry/utils/performance_issues/detectors/large_payload_detector.py @@ -54,6 +54,10 @@ def visit_span(self, span: Span) -> None: return payload_size_threshold = self.settings.get("payload_size_threshold") + + if isinstance(encoded_body_size, str): + encoded_body_size = int(encoded_body_size) + if encoded_body_size > payload_size_threshold: self._store_performance_problem(span) diff --git a/src/sentry/utils/performance_issues/performance_detection.py b/src/sentry/utils/performance_issues/performance_detection.py index db1f02b915b78..dd7571b5e98e6 100644 --- a/src/sentry/utils/performance_issues/performance_detection.py +++ b/src/sentry/utils/performance_issues/performance_detection.py @@ -124,9 +124,7 @@ def detect_performance_problems( sentry_sdk.set_tag("_did_analyze_performance_issue", "true") with ( metrics.timer("performance.detect_performance_issue", sample_rate=0.01), - sentry_sdk.start_span( - op="py.detect_performance_issue", description="none" - ) as sdk_span, + sentry_sdk.start_span(op="py.detect_performance_issue", name="none") as sdk_span, ): return _detect_performance_problems( data, sdk_span, project, is_standalone_spans=is_standalone_spans @@ -338,10 +336,10 @@ def _detect_performance_problems( ) -> list[PerformanceProblem]: event_id = data.get("event_id", None) - with sentry_sdk.start_span(op="function", description="get_detection_settings"): + with sentry_sdk.start_span(op="function", name="get_detection_settings"): detection_settings = get_detection_settings(project.id) - with sentry_sdk.start_span(op="initialize", description="PerformanceDetector"): + with sentry_sdk.start_span(op="initialize", name="PerformanceDetector"): detectors: list[PerformanceDetector] = [ detector_class(detection_settings, data) for detector_class in DETECTOR_CLASSES @@ -350,11 +348,11 @@ def _detect_performance_problems( for detector in detectors: with sentry_sdk.start_span( - op="function", description=f"run_detector_on_data.{detector.type.value}" + op="function", name=f"run_detector_on_data.{detector.type.value}" ): run_detector_on_data(detector, data) - with sentry_sdk.start_span(op="function", description="report_metrics_for_detectors"): + with sentry_sdk.start_span(op="function", name="report_metrics_for_detectors"): # Metrics reporting only for detection, not created issues. report_metrics_for_detectors( data, @@ -368,7 +366,7 @@ def _detect_performance_problems( organization = project.organization problems: list[PerformanceProblem] = [] - with sentry_sdk.start_span(op="performance_detection", description="is_creation_allowed"): + with sentry_sdk.start_span(op="performance_detection", name="is_creation_allowed"): for detector in detectors: if all( [ diff --git a/src/sentry/utils/performance_issues/performance_problem.py b/src/sentry/utils/performance_issues/performance_problem.py index ecd5b4a276b65..11c978539f346 100644 --- a/src/sentry/utils/performance_issues/performance_problem.py +++ b/src/sentry/utils/performance_issues/performance_problem.py @@ -18,7 +18,7 @@ class PerformanceProblem: # The actual bad spans offender_span_ids: Sequence[str] # Evidence to be used for the group - # TODO make evidence_data and evidence_display required once all detectors have been migrated to platform + # TODO: make evidence_data and evidence_display required once all detectors have been migrated to platform # We can't make it required until we stop loading these from nodestore via EventPerformanceProblem, # since there's legacy data in there that won't have these fields. # So until we disable transaction based perf issues we'll need to keep this optional. diff --git a/src/sentry/utils/prompts.py b/src/sentry/utils/prompts.py index b1aa15112077a..07241483be6fd 100644 --- a/src/sentry/utils/prompts.py +++ b/src/sentry/utils/prompts.py @@ -20,6 +20,7 @@ "data_consent_priority": {"required_fields": ["organization_id"]}, "issue_replay_inline_onboarding": {"required_fields": ["organization_id", "project_id"]}, "issue_feedback_hidden": {"required_fields": ["organization_id", "project_id"]}, + "issue_views_add_view_banner": {"required_fields": ["organization_id"]}, } diff --git a/src/sentry/utils/registry.py b/src/sentry/utils/registry.py new file mode 100644 index 0000000000000..1174cb876ac86 --- /dev/null +++ b/src/sentry/utils/registry.py @@ -0,0 +1,35 @@ +from __future__ import annotations + +from typing import Generic, TypeVar + + +class AlreadyRegisteredError(ValueError): + pass + + +class NoRegistrationExistsError(ValueError): + pass + + +T = TypeVar("T") + + +class Registry(Generic[T]): + def __init__(self): + self.registrations: dict[str, T] = {} + + def register(self, key: str): + def inner(item: T) -> T: + if key in self.registrations: + raise AlreadyRegisteredError( + f"A registration already exists for {key}: {self.registrations[key]}" + ) + self.registrations[key] = item + return item + + return inner + + def get(self, key: str) -> T: + if key not in self.registrations: + raise NoRegistrationExistsError(f"No registration exists for {key}") + return self.registrations[key] diff --git a/src/sentry/utils/retries.py b/src/sentry/utils/retries.py index 2b144568d6167..28d780846dad7 100644 --- a/src/sentry/utils/retries.py +++ b/src/sentry/utils/retries.py @@ -1,6 +1,7 @@ import functools import itertools import logging +import math import random import time from abc import ABC, abstractmethod @@ -66,6 +67,49 @@ def delay(attempt: int) -> float: return delay +def sigmoid_delay(offset: int = -5, midpoint: int = 0, step: int = 1) -> Callable[[int], float]: + """ + Returns an S-Curve function. + + A sigmoid is the intersection of these two behaviors: + `while(true): retry() # immediate retry` + and + `while(true): sleep(1); retry() # static-wait then retry` + + The intersection of these two worlds is an exponential function which + gradually ramps the program up to (or down to) a stable state (the s-curve). + The sharpness of the curse is controlled with step. A step of 0 flattens the + curve. A step of infinity turns the curve into a step change (a vertical + line). + + The sigmoid is more difficult to intuit than a simple exponential delay but it + allows you to cap the maximum amount of time you're willing to wait between + retries. The cap is _always_ 1 second regardless of the value of the other + arguments. If you want to wait longer than one second multiply the result of + the function by something! + + Consider this program: + [sigmoid_delay()(i) for i in range(-5, 5)] + is equivalent to: + [0.006, 0.017, 0.0474, 0.119, 0.268, 0.5, 0.731, 0.880, 0.952, 0.982] + + You get the same results with: + [sigmoid_delay()(i) for i in range(10)] + except the window has changed: + [0.5, 0.731, 0.880, 0.952, 0.982, ...] + + Now you see further along the curve. This explains the utility of the `offset` + parameter. The offset allows you to slide along the window. A smaller offset + gives you faster retries. A larger offset gives you slower retries. An offset + pushed too far past the midpoint reduces this function to a static wait. + """ + + def delay(attempt: int) -> float: + return 1 / (1 + math.exp(-step * ((attempt + offset) - midpoint))) + + return delay + + class ConditionalRetryPolicy(RetryPolicy): """ A basic policy that can be used to retry a callable based on the result diff --git a/src/sentry/utils/sdk.py b/src/sentry/utils/sdk.py index 9c5cdbe02ebe9..1d0a6c05462ae 100644 --- a/src/sentry/utils/sdk.py +++ b/src/sentry/utils/sdk.py @@ -47,13 +47,13 @@ "outcomes_consumer.py", ) -# Tasks not included here are not sampled -# If a parent task schedules other tasks you should add it in here or the child -# tasks will not be sampled +# Tasks not included here are sampled with `SENTRY_BACKEND_APM_SAMPLING`. +# If a parent task schedules other tasks, rates propagate to the children. SAMPLED_TASKS = { "sentry.tasks.send_ping": settings.SAMPLED_DEFAULT_RATE, "sentry.tasks.store.process_event": settings.SENTRY_PROCESS_EVENT_APM_SAMPLING, "sentry.tasks.store.process_event_from_reprocessing": settings.SENTRY_PROCESS_EVENT_APM_SAMPLING, + "sentry.tasks.store.save_event": settings.SENTRY_PROCESS_EVENT_APM_SAMPLING, "sentry.tasks.store.save_event_transaction": settings.SENTRY_PROCESS_EVENT_APM_SAMPLING, "sentry.tasks.process_suspect_commits": settings.SENTRY_SUSPECT_COMMITS_APM_SAMPLING, "sentry.tasks.process_commit_context": settings.SENTRY_SUSPECT_COMMITS_APM_SAMPLING, @@ -65,22 +65,27 @@ "sentry.tasks.relay.invalidate_project_config": settings.SENTRY_RELAY_TASK_APM_SAMPLING, "sentry.ingest.transaction_clusterer.tasks.spawn_clusterers": settings.SENTRY_RELAY_TASK_APM_SAMPLING, "sentry.ingest.transaction_clusterer.tasks.cluster_projects": settings.SENTRY_RELAY_TASK_APM_SAMPLING, - "sentry.tasks.process_buffer.process_incr": 0.01, + "sentry.tasks.process_buffer.process_incr": 0.1 * settings.SENTRY_BACKEND_APM_SAMPLING, "sentry.replays.tasks.delete_recording_segments": settings.SAMPLED_DEFAULT_RATE, "sentry.replays.tasks.delete_replay_recording_async": settings.SAMPLED_DEFAULT_RATE, "sentry.tasks.summaries.weekly_reports.schedule_organizations": 1.0, - "sentry.tasks.summaries.weekly_reports.prepare_organization_report": 0.1, - "sentry.profiles.task.process_profile": 0.01, + "sentry.tasks.summaries.weekly_reports.prepare_organization_report": 0.1 + * settings.SENTRY_BACKEND_APM_SAMPLING, + "sentry.profiles.task.process_profile": 0.1 * settings.SENTRY_BACKEND_APM_SAMPLING, "sentry.tasks.derive_code_mappings.process_organizations": settings.SAMPLED_DEFAULT_RATE, "sentry.tasks.derive_code_mappings.derive_code_mappings": settings.SAMPLED_DEFAULT_RATE, "sentry.monitors.tasks.clock_pulse": 1.0, "sentry.tasks.auto_enable_codecov": settings.SAMPLED_DEFAULT_RATE, - "sentry.dynamic_sampling.tasks.boost_low_volume_projects": 0.2, - "sentry.dynamic_sampling.tasks.boost_low_volume_transactions": 0.2, - "sentry.dynamic_sampling.tasks.recalibrate_orgs": 0.2, - "sentry.dynamic_sampling.tasks.sliding_window_org": 0.2, - "sentry.dynamic_sampling.tasks.custom_rule_notifications": 0.2, - "sentry.dynamic_sampling.tasks.clean_custom_rule_notifications": 0.2, + "sentry.dynamic_sampling.tasks.boost_low_volume_projects": 0.2 + * settings.SENTRY_BACKEND_APM_SAMPLING, + "sentry.dynamic_sampling.tasks.boost_low_volume_transactions": 0.2 + * settings.SENTRY_BACKEND_APM_SAMPLING, + "sentry.dynamic_sampling.tasks.recalibrate_orgs": 0.2 * settings.SENTRY_BACKEND_APM_SAMPLING, + "sentry.dynamic_sampling.tasks.sliding_window_org": 0.2 * settings.SENTRY_BACKEND_APM_SAMPLING, + "sentry.dynamic_sampling.tasks.custom_rule_notifications": 0.2 + * settings.SENTRY_BACKEND_APM_SAMPLING, + "sentry.dynamic_sampling.tasks.clean_custom_rule_notifications": 0.2 + * settings.SENTRY_BACKEND_APM_SAMPLING, "sentry.tasks.embeddings_grouping.backfill_seer_grouping_records_for_project": 1.0, } @@ -173,6 +178,10 @@ def get_project_key(): def traces_sampler(sampling_context): + # dont sample warmup requests + if sampling_context.get("wsgi_environ", {}).get("PATH_INFO") == "/_warmup/": + return 0.0 + # Apply sample_rate from custom_sampling_context custom_sample_rate = sampling_context.get("sample_rate") if custom_sample_rate is not None: @@ -271,6 +280,9 @@ def _get_sdk_options() -> tuple[SdkConfig, Dsns]: sdk_options["release"] = ( f"backend@{sdk_options['release']}" if "release" in sdk_options else None ) + sdk_options.setdefault("_experiments", {}).update( + transport_http2=True, + ) # Modify SENTRY_SDK_CONFIG in your deployment scripts to specify your desired DSN dsns = Dsns( @@ -603,7 +615,7 @@ def bind_organization_context(organization: Organization | RpcOrganization) -> N scope = Scope.get_isolation_scope() # XXX(dcramer): this is duplicated in organizationContext.jsx on the frontend - with sentry_sdk.start_span(op="other", description="bind_organization_context"): + with sentry_sdk.start_span(op="other", name="bind_organization_context"): # This can be used to find errors that may have been mistagged check_tag_for_scope_bleed("organization.slug", organization.slug) diff --git a/src/sentry/utils/sentry_apps/webhooks.py b/src/sentry/utils/sentry_apps/webhooks.py index 8e35192a329b4..094e6b5c5a685 100644 --- a/src/sentry/utils/sentry_apps/webhooks.py +++ b/src/sentry/utils/sentry_apps/webhooks.py @@ -21,7 +21,7 @@ from sentry.utils.sentry_apps import SentryAppWebhookRequestsBuffer if TYPE_CHECKING: - from sentry.api.serializers import AppPlatformEvent + from sentry.sentry_apps.api.serializers.app_platform_event import AppPlatformEvent from sentry.sentry_apps.services.app.model import RpcSentryApp diff --git a/src/sentry/utils/snuba.py b/src/sentry/utils/snuba.py index 82b1f61f7d4ff..83814252f2aec 100644 --- a/src/sentry/utils/snuba.py +++ b/src/sentry/utils/snuba.py @@ -13,17 +13,15 @@ from copy import deepcopy from datetime import datetime, timedelta, timezone from hashlib import sha1 -from typing import Any, Protocol, TypeVar +from typing import Any from urllib.parse import urlparse -import sentry_protos.snuba.v1alpha.request_common_pb2 import sentry_sdk import sentry_sdk.scope import urllib3 from dateutil.parser import parse as parse_datetime from django.conf import settings from django.core.cache import cache -from google.protobuf.message import Message as ProtobufMessage from snuba_sdk import DeleteQuery, MetricsQuery, Request from snuba_sdk.legacy import json_to_snql @@ -138,6 +136,7 @@ def log_snuba_info(content): "transaction": "segment_name", "transaction.id": "transaction_id", "segment.id": "segment_id", + "transaction.span_id": "segment_id", "transaction.op": "transaction_op", "user": "user", "user.id": "sentry_tags[user.id]", @@ -177,6 +176,7 @@ def log_snuba_info(content): SPAN_EAP_COLUMN_MAP = { "id": "span_id", "span_id": "span_id", # ideally this would be temporary, but unfortunately its heavily hardcoded in the FE + "parent_span": "parent_span_id", "organization.id": "organization_id", "project": "project_id", "project.id": "project_id", @@ -200,14 +200,24 @@ def log_snuba_info(content): "timestamp": "timestamp", "trace": "trace_id", "transaction": "segment_name", + # `transaction.id` and `segment.id` is going to be replaced by `transaction.span_id` please do not use + # transaction.id is "wrong", its pointing to segment_id to return something for the transistion, but represents the + # txn event id(32 char uuid). EAP will no longer be storing this. "transaction.id": "segment_id", + "transaction.span_id": "segment_id", + "transaction.method": "attr_str[transaction.method]", "is_transaction": "is_segment", "segment.id": "segment_id", # We should be able to delete origin.transaction and just use transaction "origin.transaction": "segment_name", + # Copy paste, unsure if this is truth in production + "messaging.destination.name": "attr_str[messaging.destination.name]", + "messaging.message.id": "attr_str[messaging.message.id]", "span.status_code": "attr_str[status_code]", "replay.id": "attr_str[replay_id]", "span.ai.pipeline.group": "attr_str[ai_pipeline_group]", + "trace.status": "attr_str[trace.status]", + "browser.name": "attr_str[browser.name]", "ai.total_tokens.used": "attr_num[ai_total_tokens_used]", "ai.total_cost": "attr_num[ai_total_cost]", } @@ -281,7 +291,7 @@ def log_snuba_info(content): Dataset.MetricsSummaries: METRICS_SUMMARIES_COLUMN_MAP, Dataset.PerformanceMetrics: METRICS_COLUMN_MAP, Dataset.SpansIndexed: SPAN_COLUMN_MAP, - Dataset.SpansEAP: SPAN_EAP_COLUMN_MAP, + Dataset.EventsAnalyticsPlatform: SPAN_EAP_COLUMN_MAP, Dataset.IssuePlatform: ISSUE_PLATFORM_MAP, Dataset.Replays: {}, } @@ -296,7 +306,7 @@ def log_snuba_info(content): Dataset.Sessions: SESSIONS_FIELD_LIST, Dataset.IssuePlatform: list(ISSUE_PLATFORM_MAP.values()), Dataset.SpansIndexed: list(SPAN_COLUMN_MAP.values()), - Dataset.SpansEAP: list(SPAN_EAP_COLUMN_MAP.values()), + Dataset.EventsAnalyticsPlatform: list(SPAN_EAP_COLUMN_MAP.values()), Dataset.MetricsSummaries: list(METRICS_SUMMARIES_COLUMN_MAP.values()), } @@ -778,7 +788,7 @@ def _prepare_query_params(query_params: SnubaQueryParams, referrer: str | None = "groupby": query_params.groupby, "conditions": query_params_conditions, "aggregations": query_params.aggregations, - "granularity": query_params.rollup, # TODO name these things the same + "granularity": query_params.rollup, # TODO: name these things the same } ) kwargs = {k: v for k, v in kwargs.items() if v is not None} @@ -1176,6 +1186,14 @@ def _bulk_snuba_query(snuba_requests: Sequence[SnubaRequest]) -> ResultSet: elif error["type"] == "schema": raise SchemaValidationError(error["message"]) elif error["type"] == "invalid_query": + logger.warning( + "UnqualifiedQueryError", + extra={ + "error": error["message"], + "has_data": "data" in body and body["data"] is not None, + "query": snuba_requests_list[index].request.serialize(), + }, + ) raise UnqualifiedQueryError(error["message"]) elif error["type"] == "clickhouse": raise clickhouse_error_codes_map.get(error["code"], QueryExecutionError)( @@ -1205,66 +1223,6 @@ def _log_request_query(req: Request) -> None: ) -RPCResponseType = TypeVar("RPCResponseType", bound=ProtobufMessage) - - -class SnubaRPCRequest(Protocol): - def SerializeToString(self, deterministic: bool = ...) -> bytes: - ... - - @property - def meta(self) -> sentry_protos.snuba.v1alpha.request_common_pb2.RequestMeta: - ... - - -def rpc(req: SnubaRPCRequest, resp_type: type[RPCResponseType]) -> RPCResponseType: - """ - You want to call a snuba RPC. Here's how you do it: - - start_time_proto = ProtobufTimestamp() - start_time_proto.FromDatetime(start) - end_time_proto = ProtobufTimestamp() - end_time_proto.FromDatetime(end) - aggregate_req = AggregateBucketRequest( - meta=RequestMeta( - organization_id=organization.id, - cogs_category="events_analytics_platform", - referrer=referrer, - project_ids=[project.id for project in projects], - start_timestamp=start_time_proto, - end_timestamp=end_time_proto, - ), - aggregate=AggregateBucketRequest.FUNCTION_SUM, - filter=TraceItemFilter( - comparison_filter=ComparisonFilter( - key=AttributeKey(name="op", type=AttributeKey.Type.TYPE_STRING), - value=AttributeValue(val_str="ai.run"), - ) - ), - granularity_secs=60, - key=AttributeKey( - name="duration", type=AttributeKey.TYPE_FLOAT - ), - attribute_key_transform_context=AttributeKeyTransformContext(), - ) - aggregate_resp = snuba.rpc(aggregate_req, AggregateBucketResponse) - """ - referrer = req.meta.referrer - with sentry_sdk.start_span(op="snuba_rpc.run", description=req.__class__.__name__) as span: - span.set_tag("snuba.referrer", referrer) - http_resp = _snuba_pool.urlopen( - "POST", - f"/rpc/{req.__class__.__name__}", - body=req.SerializeToString(), - headers={ - "referer": referrer, - }, - ) - resp = resp_type() - resp.ParseFromString(http_resp.data) - return resp - - RawResult = tuple[str, urllib3.response.HTTPResponse, Translator, Translator] @@ -1332,11 +1290,11 @@ def _raw_delete_query( # Enter hub such that http spans are properly nested with timer("delete_query"): referrer = headers.get("referer", "unknown") - with sentry_sdk.start_span(op="snuba_delete.validation", description=referrer) as span: + with sentry_sdk.start_span(op="snuba_delete.validation", name=referrer) as span: span.set_tag("snuba.referrer", referrer) body = request.serialize() - with sentry_sdk.start_span(op="snuba_delete.run", description=body) as span: + with sentry_sdk.start_span(op="snuba_delete.run", name=body) as span: span.set_tag("snuba.referrer", referrer) return _snuba_pool.urlopen( "DELETE", f"/{query.storage_name}", body=body, headers=headers @@ -1350,11 +1308,11 @@ def _raw_mql_query(request: Request, headers: Mapping[str, str]) -> urllib3.resp # TODO: This can be changed back to just `serialize` after we remove SnQL support for MetricsQuery serialized_req = request.serialize() - with sentry_sdk.start_span(op="snuba_mql.validation", description=referrer) as span: + with sentry_sdk.start_span(op="snuba_mql.validation", name=referrer) as span: span.set_tag("snuba.referrer", referrer) body = serialized_req - with sentry_sdk.start_span(op="snuba_mql.run", description=serialized_req) as span: + with sentry_sdk.start_span(op="snuba_mql.run", name=serialized_req) as span: span.set_tag("snuba.referrer", referrer) return _snuba_pool.urlopen( "POST", f"/{request.dataset}/mql", body=body, headers=headers @@ -1367,11 +1325,11 @@ def _raw_snql_query(request: Request, headers: Mapping[str, str]) -> urllib3.res referrer = headers.get("referer", "") serialized_req = request.serialize() - with sentry_sdk.start_span(op="snuba_snql.validation", description=referrer) as span: + with sentry_sdk.start_span(op="snuba_snql.validation", name=referrer) as span: span.set_tag("snuba.referrer", referrer) body = serialized_req - with sentry_sdk.start_span(op="snuba_snql.run", description=serialized_req) as span: + with sentry_sdk.start_span(op="snuba_snql.run", name=serialized_req) as span: span.set_tag("snuba.referrer", referrer) return _snuba_pool.urlopen( "POST", f"/{request.dataset}/snql", body=body, headers=headers @@ -1465,7 +1423,7 @@ def _resolve_column(col): if isinstance(col, int) or isinstance(col, float): return col if ( - dataset != Dataset.SpansEAP + dataset != Dataset.EventsAnalyticsPlatform and isinstance(col, str) and (col.startswith("tags[") or QUOTED_LITERAL_RE.match(col)) ): @@ -1476,7 +1434,7 @@ def _resolve_column(col): if isinstance(col, (list, tuple)) or col in ("project_id", "group_id"): return col - elif dataset == Dataset.SpansEAP: + elif dataset == Dataset.EventsAnalyticsPlatform: if isinstance(col, str) and col.startswith("sentry_tags["): # Replace the first instance of sentry tags with attr str instead return col.replace("sentry_tags", "attr_str", 1) @@ -1508,7 +1466,7 @@ def _resolve_column(col): span_op_breakdown_name = get_span_op_breakdown_name(col) if "span_op_breakdowns_key" in DATASETS[dataset] and span_op_breakdown_name: return f"span_op_breakdowns[{span_op_breakdown_name}]" - if dataset == Dataset.SpansEAP: + if dataset == Dataset.EventsAnalyticsPlatform: return f"attr_str[{col}]" return f"tags[{col}]" @@ -1687,7 +1645,7 @@ def aliased_query_params( ) -# TODO (evanh) Since we are assuming that all string values are columns, +# TODO: (evanh) Since we are assuming that all string values are columns, # this will get tricky if we ever have complex columns where there are # string arguments to the functions that aren't columns def resolve_complex_column(col, resolve_func, ignored): diff --git a/src/sentry/utils/snuba_rpc.py b/src/sentry/utils/snuba_rpc.py new file mode 100644 index 0000000000000..730a78ebb4cb2 --- /dev/null +++ b/src/sentry/utils/snuba_rpc.py @@ -0,0 +1,84 @@ +from __future__ import annotations + +from typing import Protocol, TypeVar + +import sentry_protos.snuba.v1alpha.request_common_pb2 +import sentry_sdk +import sentry_sdk.scope +from google.protobuf.message import Message as ProtobufMessage +from sentry_protos.snuba.v1.error_pb2 import Error as ErrorProto + +from sentry.utils.snuba import SnubaError, _snuba_pool + +RPCResponseType = TypeVar("RPCResponseType", bound=ProtobufMessage) + + +class SnubaRPCError(SnubaError): + pass + + +class SnubaRPCRequest(Protocol): + def SerializeToString(self, deterministic: bool = ...) -> bytes: + ... + + @property + def meta(self) -> sentry_protos.snuba.v1alpha.request_common_pb2.RequestMeta: + ... + + +def rpc(req: SnubaRPCRequest, resp_type: type[RPCResponseType]) -> RPCResponseType: + """ + You want to call a snuba RPC. Here's how you do it: + + start_time_proto = ProtobufTimestamp() + start_time_proto.FromDatetime(start) + end_time_proto = ProtobufTimestamp() + end_time_proto.FromDatetime(end) + aggregate_req = AggregateBucketRequest( + meta=RequestMeta( + organization_id=organization.id, + cogs_category="events_analytics_platform", + referrer=referrer, + project_ids=[project.id for project in projects], + start_timestamp=start_time_proto, + end_timestamp=end_time_proto, + ), + aggregate=AggregateBucketRequest.FUNCTION_SUM, + filter=TraceItemFilter( + comparison_filter=ComparisonFilter( + key=AttributeKey(name="op", type=AttributeKey.Type.TYPE_STRING), + value=AttributeValue(val_str="ai.run"), + ) + ), + granularity_secs=60, + key=AttributeKey( + name="duration", type=AttributeKey.TYPE_FLOAT + ), + attribute_key_transform_context=AttributeKeyTransformContext(), + ) + aggregate_resp = snuba.rpc(aggregate_req, AggregateBucketResponse) + """ + referrer = req.meta.referrer + with sentry_sdk.start_span(op="snuba_rpc.run", name=req.__class__.__name__) as span: + span.set_tag("snuba.referrer", referrer) + + cls = req.__class__ + class_name = cls.__name__ + class_version = cls.__module__.split(".", 3)[2] + + http_resp = _snuba_pool.urlopen( + "POST", + f"/rpc/{class_name}/{class_version}", + body=req.SerializeToString(), + headers={ + "referer": referrer, + }, + ) + if http_resp.status != 200: + error = ErrorProto() + error.ParseFromString(http_resp.data) + raise SnubaRPCError(error) + + resp = resp_type() + resp.ParseFromString(http_resp.data) + return resp diff --git a/src/sentry/utils/urls.py b/src/sentry/utils/urls.py index 1711f006a79a2..b222bdfe68b13 100644 --- a/src/sentry/utils/urls.py +++ b/src/sentry/utils/urls.py @@ -73,3 +73,12 @@ def urlsplit_best_effort(s: str) -> tuple[str, str, str, str]: return scheme, netloc, path, query else: return parsed.scheme, parsed.netloc, parsed.path, parsed.query + + +def parse_id_or_slug_param(id_or_slug: str | None) -> tuple[int | None, str | None]: + if not id_or_slug: + return None, None + + if id_or_slug.isnumeric(): + return int(id_or_slug), None + return None, id_or_slug diff --git a/src/sentry/web/client_config.py b/src/sentry/web/client_config.py index 1f4b310f74720..73a9bd1e47c85 100644 --- a/src/sentry/web/client_config.py +++ b/src/sentry/web/client_config.py @@ -216,8 +216,6 @@ def tracing_data(self) -> Mapping[str, str]: @property def enabled_features(self) -> Iterable[str]: - if self.last_org and features.has("organizations:react-router-6", self.last_org): - yield "organizations:react-router-6" if features.has("organizations:create", actor=self.user): yield "organizations:create" if auth.has_user_registration(): diff --git a/src/sentry/web/debug_urls.py b/src/sentry/web/debug_urls.py index 04d457e434c68..200e13f9954ea 100644 --- a/src/sentry/web/debug_urls.py +++ b/src/sentry/web/debug_urls.py @@ -3,6 +3,7 @@ import sentry.web.frontend.debug.mail from sentry.integrations.web.debug.debug_notify_disable import DebugNotifyDisableView +from sentry.sentry_apps.web.debug_sentry_app_notify_disable import DebugSentryAppNotifyDisableView from sentry.web.frontend.debug import debug_auth_views from sentry.web.frontend.debug.debug_assigned_email import ( DebugAssignedEmailView, @@ -62,9 +63,6 @@ DebugResolvedInReleaseEmailView, DebugResolvedInReleaseUpcomingEmailView, ) -from sentry.web.frontend.debug.debug_sentry_app_notify_disable import ( - DebugSentryAppNotifyDisableView, -) from sentry.web.frontend.debug.debug_setup_2fa_email import DebugSetup2faEmailView from sentry.web.frontend.debug.debug_sso_link_email import ( DebugSsoLinkedEmailView, diff --git a/src/sentry/web/frontend/cli.py b/src/sentry/web/frontend/cli.py new file mode 100644 index 0000000000000..597d9b7f037eb --- /dev/null +++ b/src/sentry/web/frontend/cli.py @@ -0,0 +1,161 @@ +from urllib.parse import quote_plus + +from django.http import HttpRequest, HttpResponse, HttpResponseRedirect + +from sentry.silo.base import control_silo_function +from sentry.utils import metrics + +SCRIPT = r"""#!/bin/sh +set -eu + +# allow overriding the version +VERSION=${SENTRY_CLI_VERSION:-latest} + +PLATFORM=`uname -s` +ARCH=`uname -m` + +case "$PLATFORM" in + CYGWIN*) PLATFORM="Windows" + ;; + MINGW*) PLATFORM="Windows" + ;; + MSYS*) PLATFORM="Windows" + ;; + Darwin) ARCH="universal" + ;; +esac + +case "$ARCH" in + armv6*) ARCH="armv7" + ;; + armv7*) ARCH="armv7" + ;; + armv8*) ARCH="aarch64" + ;; + armv64*) ARCH="aarch64" + ;; + aarch64*) ARCH="aarch64" + ;; +esac + +# If the install directory is not set, set it to a default +if [ -z ${INSTALL_DIR+x} ]; then + INSTALL_DIR=/usr/local/bin +fi +if [ -z ${INSTALL_PATH+x} ]; then + INSTALL_PATH="${INSTALL_DIR}/sentry-cli" +fi + +DOWNLOAD_URL="https://release-registry.services.sentry.io/apps/sentry-cli/${VERSION}?response=download&arch=${ARCH}&platform=${PLATFORM}&package=sentry-cli" + +echo "This script will automatically install sentry-cli (${VERSION}) for you." +echo "Installation path: ${INSTALL_PATH}" +if [ "x$(id -u)" = "x0" ]; then + echo "Warning: this script is currently running as root. This is dangerous. " + echo " Instead run it as normal user. We will sudo as needed." +fi + +if [ -f "$INSTALL_PATH" ]; then + echo "error: sentry-cli is already installed." + echo " run \"sentry-cli update\" to update to latest version" + exit 1 +fi + +if ! hash curl 2> /dev/null; then + echo "error: you do not have 'curl' installed which is required for this script." + exit 1 +fi + +TEMP_FILE=`mktemp "${TMPDIR:-/tmp}/.sentrycli.XXXXXXXX"` +TEMP_HEADER_FILE=`mktemp "${TMPDIR:-/tmp}/.sentrycli-headers.XXXXXXXX"` + +cleanup() { + rm -f "$TEMP_FILE" + rm -f "$TEMP_HEADER_FILE" +} + +trap cleanup EXIT +HTTP_CODE=$(curl -SL --progress-bar "$DOWNLOAD_URL" -D "$TEMP_HEADER_FILE" --output "$TEMP_FILE" --write-out "%{http_code}") +if [ ${HTTP_CODE} -lt 200 ] || [ ${HTTP_CODE} -gt 299 ]; then + echo "error: your platform and architecture (${PLATFORM}-${ARCH}) is unsupported." + exit 1 +fi + +for PYTHON in python3 python2 python ''; do + if hash "$PYTHON"; then + break + fi +done + +if [ "$PYTHON" ]; then + "$PYTHON" - < /dev/null; then + sudo -k sh -c "mkdir -p \"$(dirname "$INSTALL_PATH")\" && mv \"$TEMP_FILE\" \"$INSTALL_PATH\"" +fi + +echo "Sucessfully installed $("$INSTALL_PATH" --version)" + +VERSION=$("$INSTALL_PATH" --version | awk '{print $2}') +MAJOR=$(echo "$VERSION" | cut -d. -f1) +MINOR=$(echo "$VERSION" | cut -d. -f2) +if (test -d "${HOME}/.oh-my-zsh") 2>/dev/null && [ $MAJOR -eq 2 ] && [ $MINOR -ge 22 ]; then + echo 'Detected Oh My Zsh, installing Zsh completions...' + if (mkdir -p "${HOME}/.oh-my-zsh/completions") 2>&1 && ("$INSTALL_PATH" completions zsh > "${HOME}/.oh-my-zsh/completions/_sentry_cli") 2>&1; then + echo "Successfully installed Zsh completions." + else + echo 'Warning: failed to install Zsh completions.' + fi +fi + +echo 'Done!' +""" + + +def get_cli(request: HttpRequest) -> HttpResponse: + metrics.incr("cli.download_script") + return HttpResponse(SCRIPT, content_type="text/plain") + + +@control_silo_function +def get_cli_download_url(request: HttpRequest, platform: str, arch: str) -> HttpResponseRedirect: + url = "https://release-registry.services.sentry.io/apps/sentry-cli/latest?response=download&arch={}&platform={}&package=sentry-cli".format( + quote_plus(arch), + quote_plus(platform), + ) + return HttpResponseRedirect(url) diff --git a/src/sentry/web/frontend/debug/debug_incident_activity_email.py b/src/sentry/web/frontend/debug/debug_incident_activity_email.py index 4d8509fcb5f95..926376b9378c7 100644 --- a/src/sentry/web/frontend/debug/debug_incident_activity_email.py +++ b/src/sentry/web/frontend/debug/debug_incident_activity_email.py @@ -17,7 +17,10 @@ def get(self, request: HttpRequest) -> HttpResponse: id=2, identifier=123, organization=organization, title="Something broke" ) activity = IncidentActivity( - incident=incident, user=user, type=IncidentActivityType.COMMENT.value, comment="hi" + incident=incident, + user_id=user.id, + type=IncidentActivityType.COMMENT.value, + comment="hi", ) email = generate_incident_activity_email(activity, user) return MailPreview( diff --git a/src/sentry/web/frontend/debug/debug_incident_trigger_email.py b/src/sentry/web/frontend/debug/debug_incident_trigger_email.py index 53ebfb3b00920..db58f88c1884a 100644 --- a/src/sentry/web/frontend/debug/debug_incident_trigger_email.py +++ b/src/sentry/web/frontend/debug/debug_incident_trigger_email.py @@ -41,7 +41,7 @@ def get_context(self, request, incident_trigger_mock, user_option_mock): organization=organization, title="Something broke", alert_rule=alert_rule, - status=IncidentStatus.CRITICAL, + status=IncidentStatus.CRITICAL.value, ) trigger = AlertRuleTrigger(alert_rule=alert_rule) diff --git a/src/sentry/web/frontend/debug/debug_mfa_added_email.py b/src/sentry/web/frontend/debug/debug_mfa_added_email.py index 449e359350f8a..6853396103ca0 100644 --- a/src/sentry/web/frontend/debug/debug_mfa_added_email.py +++ b/src/sentry/web/frontend/debug/debug_mfa_added_email.py @@ -1,5 +1,6 @@ import datetime +from django.contrib.auth.models import AnonymousUser from django.http import HttpRequest, HttpResponse from django.views.generic import View @@ -11,6 +12,9 @@ class DebugMfaAddedEmailView(View): def get(self, request: HttpRequest) -> HttpResponse: + if isinstance(request.user, AnonymousUser): + return HttpResponse(status=401) + authenticator = Authenticator(id=0, type=3, user_id=request.user.id) # u2f email = generate_security_email( diff --git a/src/sentry/web/frontend/debug/debug_mfa_removed_email.py b/src/sentry/web/frontend/debug/debug_mfa_removed_email.py index 0f868207f8e67..03cb98317c83c 100644 --- a/src/sentry/web/frontend/debug/debug_mfa_removed_email.py +++ b/src/sentry/web/frontend/debug/debug_mfa_removed_email.py @@ -1,5 +1,6 @@ import datetime +from django.contrib.auth.models import AnonymousUser from django.http import HttpRequest, HttpResponse from django.views.generic import View @@ -11,6 +12,9 @@ class DebugMfaRemovedEmailView(View): def get(self, request: HttpRequest) -> HttpResponse: + if isinstance(request.user, AnonymousUser): + return HttpResponse(status=401) + authenticator = Authenticator(id=0, type=3, user_id=request.user.id) # u2f email = generate_security_email( diff --git a/src/sentry/web/frontend/debug/mail.py b/src/sentry/web/frontend/debug/mail.py index 29136973378cc..2c83f192d9274 100644 --- a/src/sentry/web/frontend/debug/mail.py +++ b/src/sentry/web/frontend/debug/mail.py @@ -33,7 +33,7 @@ from sentry.issues.grouptype import NoiseConfig from sentry.issues.occurrence_consumer import process_event_and_issue_occurrence from sentry.issues.producer import PayloadType, produce_occurrence_to_kafka -from sentry.mail.notifications import get_builder_args +from sentry.mail.notifications import RecipientT, get_builder_args from sentry.models.activity import Activity from sentry.models.group import Group, GroupStatus from sentry.models.organization import Organization @@ -58,7 +58,6 @@ TEST_FEEDBACK_ISSUE_OCCURENCE, TEST_ISSUE_OCCURRENCE, ) -from sentry.types.actor import Actor from sentry.types.group import GroupSubStatus from sentry.users.models.lostpasswordhash import LostPasswordHash from sentry.utils import json, loremipsum @@ -822,7 +821,7 @@ def org_delete_confirm(request): # Used to generate debug email views from a notification def render_preview_email_for_notification( - notification: BaseNotification, recipient: Actor + notification: BaseNotification, recipient: RecipientT ) -> HttpResponse: shared_context = notification.get_context() basic_args = get_builder_args(notification, recipient, shared_context) diff --git a/src/sentry/web/frontend/error_page_embed.py b/src/sentry/web/frontend/error_page_embed.py index fdfc803460ce4..68c608e88303b 100644 --- a/src/sentry/web/frontend/error_page_embed.py +++ b/src/sentry/web/frontend/error_page_embed.py @@ -11,7 +11,7 @@ from django.views.decorators.csrf import csrf_exempt from django.views.generic import View -from sentry import eventstore, features +from sentry import eventstore from sentry.feedback.usecases.create_feedback import FeedbackCreationSource, shim_to_feedback from sentry.models.options.project_option import ProjectOption from sentry.models.project import Project @@ -194,12 +194,7 @@ def dispatch(self, request: HttpRequest) -> HttpResponse: ) project = Project.objects.get(id=report.project_id) - if ( - features.has( - "organizations:user-feedback-ingest", project.organization, actor=request.user - ) - and event is not None - ): + if event is not None: shim_to_feedback( { "name": report.name, diff --git a/src/sentry/web/frontend/setup_wizard.py b/src/sentry/web/frontend/setup_wizard.py index 4c54deefa6a6f..bac02865becdb 100644 --- a/src/sentry/web/frontend/setup_wizard.py +++ b/src/sentry/web/frontend/setup_wizard.py @@ -6,8 +6,9 @@ from urllib.parse import parse_qsl, urlparse, urlunparse from django.conf import settings -from django.http import HttpRequest, HttpResponse +from django.http import Http404, HttpRequest, HttpResponse, HttpResponseBadRequest from django.http.response import HttpResponseBase +from django.shortcuts import get_object_or_404 from sentry.api.endpoints.setup_wizard import SETUP_WIZARD_CACHE_KEY, SETUP_WIZARD_CACHE_TIMEOUT from sentry.api.serializers import serialize @@ -19,12 +20,14 @@ from sentry.models.organizationmapping import OrganizationMapping from sentry.models.organizationmembermapping import OrganizationMemberMapping from sentry.models.orgauthtoken import OrgAuthToken +from sentry.projects.services.project.model import RpcProject from sentry.projects.services.project.service import project_service -from sentry.projects.services.project_key.model import ProjectKeyRole +from sentry.projects.services.project_key.model import ProjectKeyRole, RpcProjectKey from sentry.projects.services.project_key.service import project_key_service from sentry.types.token import AuthTokenType from sentry.users.models.user import User from sentry.users.services.user.model import RpcUser +from sentry.utils import json from sentry.utils.http import absolute_uri from sentry.utils.security.orgauthtoken_token import ( SystemUrlPrefixMissingException, @@ -59,10 +62,13 @@ def get(self, request: HttpRequest, wizard_hash) -> HttpResponseBase: This opens a page where with an active session fill stuff into the cache Redirects to organization whenever cache has been deleted """ - context = {"hash": wizard_hash} - key = f"{SETUP_WIZARD_CACHE_KEY}{wizard_hash}" + context = {"hash": wizard_hash, "enableProjectSelection": False} + cache_key = f"{SETUP_WIZARD_CACHE_KEY}{wizard_hash}" + + org_slug = request.GET.get("org_slug") + project_slug = request.GET.get("project_slug") - wizard_data = default_cache.get(key) + wizard_data = default_cache.get(cache_key) if wizard_data is None: return self.redirect_to_org(request) @@ -74,75 +80,121 @@ def get(self, request: HttpRequest, wizard_hash) -> HttpResponseBase: status=OrganizationStatus.ACTIVE, ).order_by("-date_created") - # TODO: Make wizard compatible with hybrid cloud. For now, we combine all region data for these - # responses, but project names/slugs aren't unique across regions which could confuse some users. - # Wizard should display region beside project/orgs or have a step to ask which region. - # {'us': {'org_ids': [...], 'projects': [...], 'keys': [...]}} region_data_map = defaultdict(lambda: defaultdict(list)) org_mappings_map = {} for mapping in org_mappings: region_data_map[mapping.region_name]["org_ids"].append(mapping.organization_id) - status = OrganizationStatus(mapping.status) - serialized_mapping = { - "id": mapping.organization_id, - "name": mapping.name, - "slug": mapping.slug, - "region": mapping.region_name, - "status": {"id": status.name.lower(), "name": status.label}, - } + serialized_mapping = serialize_org_mapping(mapping) org_mappings_map[mapping.organization_id] = serialized_mapping - for region_name, region_data in region_data_map.items(): - org_ids = region_data["org_ids"] - projects = project_service.get_many_by_organizations( - region_name=region_name, organization_ids=org_ids - ) - region_data["projects"] = projects - - keys_map = defaultdict(list) - for region_name, region_data in region_data_map.items(): - project_ids = [rpc_project.id for rpc_project in region_data["projects"]] - keys = project_key_service.get_project_keys_by_region( - region_name=region_name, - project_ids=project_ids, - role=ProjectKeyRole.store, + context["organizations"] = list(org_mappings_map.values()) + + # If org_slug and project_slug are provided, we will use them to select the project + # If the project is not found or the slugs are not provided, we will show the project selection + if org_slug is not None and project_slug is not None: + target_org_mapping = next( + (mapping for mapping in org_mappings if mapping.slug == org_slug), None ) - region_data["keys"] = keys - for key in region_data["keys"]: - serialized_key = { - "dsn": {"public": key.dsn_public}, - "isActive": key.is_active, - } - keys_map[key.project_id].append(serialized_key) - - filled_projects = [] - for region_name, region_data in region_data_map.items(): - for project in region_data["projects"]: - enriched_project = { - "slug": project.slug, - "id": project.id, - "name": project.name, - "platform": project.platform, - "status": STATUS_LABELS.get(project.status, "unknown"), - } - # The wizard only reads the a few fields so serializing the mapping should work fine - enriched_project["organization"] = org_mappings_map[project.organization_id] - enriched_project["keys"] = keys_map[project.id] - filled_projects.append(enriched_project) - - # Fetching or creating a token - serialized_token = get_token(org_mappings, request.user) - - result = {"apiKeys": serialized_token, "projects": filled_projects} + if target_org_mapping is not None: + target_project = project_service.get_by_slug( + slug=project_slug, organization_id=target_org_mapping.organization_id + ) - key = f"{SETUP_WIZARD_CACHE_KEY}{wizard_hash}" - default_cache.set(key, result, SETUP_WIZARD_CACHE_TIMEOUT) + if target_project is not None: + cache_data = get_cache_data( + mapping=target_org_mapping, project=target_project, user=request.user + ) + default_cache.set(cache_key, cache_data, SETUP_WIZARD_CACHE_TIMEOUT) - context["organizations"] = list(org_mappings_map.values()) + context["enableProjectSelection"] = False + return render_to_response("sentry/setup-wizard.html", context, request) + + context["enableProjectSelection"] = True return render_to_response("sentry/setup-wizard.html", context, request) + def post(self, request: HttpRequest, wizard_hash=None) -> HttpResponse: + """ + This updates the cache content for a specific hash + """ + json_data = json.loads(request.body) + organization_id = json_data.get("organizationId", None) + project_id = json_data.get("projectId", None) + + if organization_id is None or project_id is None or wizard_hash is None: + return HttpResponseBadRequest() + + member_org_ids = OrganizationMemberMapping.objects.filter( + user_id=request.user.id + ).values_list("organization_id", flat=True) + mapping = get_object_or_404( + OrganizationMapping, + organization_id=organization_id, + organization_id__in=member_org_ids, + ) + + project = project_service.get_by_id(organization_id=mapping.organization_id, id=project_id) + if project is None: + raise Http404() + + cache_data = get_cache_data(mapping=mapping, project=project, user=request.user) + + key = f"{SETUP_WIZARD_CACHE_KEY}{wizard_hash}" + default_cache.set(key, cache_data, SETUP_WIZARD_CACHE_TIMEOUT) + return HttpResponse(status=200) + + +def serialize_org_mapping(mapping: OrganizationMapping): + status = OrganizationStatus(mapping.status) + return { + "id": mapping.organization_id, + "name": mapping.name, + "slug": mapping.slug, + "region": mapping.region_name, + "status": {"id": status.name.lower(), "name": status.label}, + } + + +def serialize_project_key(project_key: RpcProjectKey): + return { + "dsn": {"public": project_key.dsn_public}, + "isActive": project_key.is_active, + } + + +def serialize_project(project: RpcProject, organization: dict, keys: list[dict]): + return { + "slug": project.slug, + "id": project.id, + "name": project.name, + "platform": project.platform, + "status": STATUS_LABELS.get(project.status, "unknown"), + "organization": organization, + "keys": keys, + } + + +def get_cache_data(mapping: OrganizationMapping, project: RpcProject, user: RpcUser): + project_key = project_key_service.get_project_key( + organization_id=mapping.organization_id, + project_id=project.id, + role=ProjectKeyRole.store, + ) + if project_key is None: + raise Http404() + + enriched_project = serialize_project( + project=project, + # The wizard only reads the a few fields so serializing the mapping should work fine + organization=serialize_org_mapping(mapping), + keys=[serialize_project_key(project_key)], + ) + + serialized_token = get_org_token(mapping, user) + + return {"apiKeys": serialized_token, "projects": [enriched_project]} + def get_token(mappings: list[OrganizationMapping], user: RpcUser): can_use_org_tokens = len(mappings) == 1 diff --git a/src/sentry/web/urls.py b/src/sentry/web/urls.py index 18ae9ac1aaec9..f4fcb59ad8972 100644 --- a/src/sentry/web/urls.py +++ b/src/sentry/web/urls.py @@ -9,24 +9,29 @@ from django.views.generic import RedirectView from sentry.api.endpoints.oauth_userinfo import OAuthUserInfoEndpoint +from sentry.api.endpoints.warmup import WarmupEndpoint from sentry.auth.providers.saml2.provider import SAML2AcceptACSView, SAML2MetadataView, SAML2SLSView from sentry.charts.endpoints import serve_chartcuterie_config from sentry.integrations.web.doc_integration_avatar import DocIntegrationAvatarPhotoView from sentry.integrations.web.organization_integration_setup import OrganizationIntegrationSetupView -from sentry.toolbar.iframe_view import IframeView -from sentry.toolbar.login_success_view import LoginSuccessView +from sentry.sentry_apps.web.sentryapp_avatar import SentryAppAvatarPhotoView +from sentry.toolbar.views.iframe_view import IframeView +from sentry.toolbar.views.login_success_view import LoginSuccessView from sentry.users.web import accounts from sentry.users.web.account_identity import AccountIdentityAssociateView from sentry.users.web.user_avatar import UserAvatarPhotoView from sentry.web import api -from sentry.web.frontend import generic +from sentry.web.frontend import csrf_failure, generic from sentry.web.frontend.auth_channel_login import AuthChannelLoginView from sentry.web.frontend.auth_close import AuthCloseView from sentry.web.frontend.auth_login import AuthLoginView from sentry.web.frontend.auth_logout import AuthLogoutView from sentry.web.frontend.auth_organization_login import AuthOrganizationLoginView from sentry.web.frontend.auth_provider_login import AuthProviderLoginView +from sentry.web.frontend.cli import get_cli, get_cli_download_url from sentry.web.frontend.disabled_member_view import DisabledMemberView +from sentry.web.frontend.error_404 import Error404View +from sentry.web.frontend.error_500 import Error500View from sentry.web.frontend.error_page_embed import ErrorPageEmbedView from sentry.web.frontend.group_event_json import GroupEventJsonView from sentry.web.frontend.group_plugin_action import GroupPluginActionView @@ -46,7 +51,6 @@ from sentry.web.frontend.react_page import GenericReactPageView, ReactPageView from sentry.web.frontend.reactivate_account import ReactivateAccountView from sentry.web.frontend.release_webhook import ReleaseWebhookView -from sentry.web.frontend.sentryapp_avatar import SentryAppAvatarPhotoView from sentry.web.frontend.setup_wizard import SetupWizardView from sentry.web.frontend.shared_group_details import SharedGroupDetailsView from sentry.web.frontend.sudo import SudoView @@ -60,7 +64,23 @@ generic_react_page_view = GenericReactPageView.as_view() react_page_view = ReactPageView.as_view() -urlpatterns: list[URLResolver | URLPattern] = [] +urlpatterns: list[URLResolver | URLPattern] = [ + re_path( + r"^500/", + Error500View.as_view(), + name="error-500", + ), + re_path( + r"^404/", + Error404View.as_view(), + name="error-404", + ), + re_path( + r"^403-csrf-failure/", + csrf_failure.view, + name="error-403-csrf-failure", + ), +] if getattr(settings, "DEBUG_VIEWS", settings.DEBUG): from sentry.web.debug_urls import urlpatterns as debug_urls @@ -92,6 +112,13 @@ ] urlpatterns += [ + # warmup, used to initialize any connections / pre-load + # the application so that user initiated requests are faster + re_path( + r"^_warmup/$", + WarmupEndpoint.as_view(), + name="sentry-warmup", + ), re_path( r"^api/(?P[\w_-]+)/crossdomain\.xml$", api.crossdomain_xml, @@ -128,6 +155,13 @@ JavaScriptSdkLoader.as_view(), name="sentry-js-sdk-loader", ), + # docs reference this for acquiring the sentry cli + re_path(r"^get-cli/$", get_cli, name="get_cli_script"), + re_path( + r"^get-cli/(?P[^/]+)/(?P[^/]+)/?$", + get_cli_download_url, + name="get_cli_download_url", + ), # Versioned API re_path( r"^api/0/", diff --git a/src/sentry/workflow_engine/migrations/0006_data_conditions.py b/src/sentry/workflow_engine/migrations/0006_data_conditions.py new file mode 100644 index 0000000000000..aabaac0d0b846 --- /dev/null +++ b/src/sentry/workflow_engine/migrations/0006_data_conditions.py @@ -0,0 +1,209 @@ +# Generated by Django 5.1.1 on 2024-09-26 00:11 + +import django.db.models.deletion +from django.db import migrations, models + +import sentry.db.models.fields.bounded +import sentry.db.models.fields.foreignkey +from sentry.new_migrations.migrations import CheckedMigration + + +class Migration(CheckedMigration): + # This flag is used to mark that a migration shouldn't be automatically run in production. + # This should only be used for operations where it's safe to run the migration after your + # code has deployed. So this should not be used for most operations that alter the schema + # of a table. + # Here are some things that make sense to mark as post deployment: + # - Large data migrations. Typically we want these to be run manually so that they can be + # monitored and not block the deploy for a long period of time while they run. + # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to + # run this outside deployments so that we don't block them. Note that while adding an index + # is a schema change, it's completely safe to run the operation after the code has deployed. + # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment + + is_post_deployment = False + + dependencies = [ + ("sentry", "0767_add_selected_aggregate_to_dashboards_widget_query"), + ("workflow_engine", "0005_data_source_detector"), + ] + + operations = [ + migrations.CreateModel( + name="Action", + fields=[ + ( + "id", + sentry.db.models.fields.bounded.BoundedBigAutoField( + primary_key=True, serialize=False + ), + ), + ("date_updated", models.DateTimeField(auto_now=True)), + ("date_added", models.DateTimeField(auto_now_add=True)), + ("required", models.BooleanField(default=False)), + ("type", models.TextField()), + ("data", models.JSONField(default=dict)), + ], + options={ + "abstract": False, + }, + ), + migrations.CreateModel( + name="DataConditionGroup", + fields=[ + ( + "id", + sentry.db.models.fields.bounded.BoundedBigAutoField( + primary_key=True, serialize=False + ), + ), + ("date_updated", models.DateTimeField(auto_now=True)), + ("date_added", models.DateTimeField(auto_now_add=True)), + ("logic_type", models.CharField(default="any", max_length=200)), + ( + "organization", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, to="sentry.organization" + ), + ), + ], + options={ + "abstract": False, + }, + ), + migrations.CreateModel( + name="DataCondition", + fields=[ + ( + "id", + sentry.db.models.fields.bounded.BoundedBigAutoField( + primary_key=True, serialize=False + ), + ), + ("date_updated", models.DateTimeField(auto_now=True)), + ("date_added", models.DateTimeField(auto_now_add=True)), + ("condition", models.CharField(max_length=200)), + ("comparison", models.JSONField()), + ("condition_result", models.JSONField()), + ("type", models.CharField(max_length=200)), + ( + "condition_group", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + to="workflow_engine.dataconditiongroup", + ), + ), + ], + options={ + "abstract": False, + }, + ), + migrations.AddField( + model_name="detector", + name="workflow_condition_group", + field=sentry.db.models.fields.foreignkey.FlexibleForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + to="workflow_engine.dataconditiongroup", + unique=True, + ), + ), + migrations.AddField( + model_name="workflow", + name="when_condition_group", + field=sentry.db.models.fields.foreignkey.FlexibleForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + to="workflow_engine.dataconditiongroup", + ), + ), + migrations.CreateModel( + name="DataConditionGroupAction", + fields=[ + ( + "id", + sentry.db.models.fields.bounded.BoundedBigAutoField( + primary_key=True, serialize=False + ), + ), + ("date_updated", models.DateTimeField(auto_now=True)), + ("date_added", models.DateTimeField(auto_now_add=True)), + ( + "action", + sentry.db.models.fields.foreignkey.FlexibleForeignKey( + on_delete=django.db.models.deletion.CASCADE, to="workflow_engine.action" + ), + ), + ( + "condition_group", + sentry.db.models.fields.foreignkey.FlexibleForeignKey( + on_delete=django.db.models.deletion.CASCADE, + to="workflow_engine.dataconditiongroup", + ), + ), + ], + options={ + "abstract": False, + }, + ), + migrations.CreateModel( + name="DetectorWorkflow", + fields=[ + ( + "id", + sentry.db.models.fields.bounded.BoundedBigAutoField( + primary_key=True, serialize=False + ), + ), + ("date_updated", models.DateTimeField(auto_now=True)), + ("date_added", models.DateTimeField(auto_now_add=True)), + ( + "detector", + sentry.db.models.fields.foreignkey.FlexibleForeignKey( + on_delete=django.db.models.deletion.CASCADE, to="workflow_engine.detector" + ), + ), + ( + "workflow", + sentry.db.models.fields.foreignkey.FlexibleForeignKey( + on_delete=django.db.models.deletion.CASCADE, to="workflow_engine.workflow" + ), + ), + ], + options={ + "abstract": False, + }, + ), + migrations.CreateModel( + name="WorkflowDataConditionGroup", + fields=[ + ( + "id", + sentry.db.models.fields.bounded.BoundedBigAutoField( + primary_key=True, serialize=False + ), + ), + ("date_updated", models.DateTimeField(auto_now=True)), + ("date_added", models.DateTimeField(auto_now_add=True)), + ( + "condition_group", + sentry.db.models.fields.foreignkey.FlexibleForeignKey( + on_delete=django.db.models.deletion.CASCADE, + to="workflow_engine.dataconditiongroup", + unique=True, + ), + ), + ( + "workflow", + sentry.db.models.fields.foreignkey.FlexibleForeignKey( + on_delete=django.db.models.deletion.CASCADE, to="workflow_engine.workflow" + ), + ), + ], + options={ + "abstract": False, + }, + ), + ] diff --git a/src/sentry/workflow_engine/migrations/0007_loosen_workflow_action_relationship.py b/src/sentry/workflow_engine/migrations/0007_loosen_workflow_action_relationship.py new file mode 100644 index 0000000000000..11aedf9be5c16 --- /dev/null +++ b/src/sentry/workflow_engine/migrations/0007_loosen_workflow_action_relationship.py @@ -0,0 +1,43 @@ +# Generated by Django 5.1.1 on 2024-09-27 17:47 + +import django.db.models.deletion +from django.db import migrations + +import sentry.db.models.fields.foreignkey +from sentry.new_migrations.migrations import CheckedMigration + + +class Migration(CheckedMigration): + # This flag is used to mark that a migration shouldn't be automatically run in production. + # This should only be used for operations where it's safe to run the migration after your + # code has deployed. So this should not be used for most operations that alter the schema + # of a table. + # Here are some things that make sense to mark as post deployment: + # - Large data migrations. Typically we want these to be run manually so that they can be + # monitored and not block the deploy for a long period of time while they run. + # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to + # run this outside deployments so that we don't block them. Note that while adding an index + # is a schema change, it's completely safe to run the operation after the code has deployed. + # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment + + is_post_deployment = False + + dependencies = [ + ("workflow_engine", "0006_data_conditions"), + ] + + operations = [ + migrations.AlterField( + model_name="workflowaction", + name="workflow", + field=sentry.db.models.fields.foreignkey.FlexibleForeignKey( + db_constraint=False, + on_delete=django.db.models.deletion.CASCADE, + to="workflow_engine.workflow", + ), + ), + migrations.SeparateDatabaseAndState( + state_operations=[migrations.DeleteModel(name="WorkflowAction")], + database_operations=[], + ), + ] diff --git a/src/sentry/workflow_engine/migrations/0008_detector_state.py b/src/sentry/workflow_engine/migrations/0008_detector_state.py new file mode 100644 index 0000000000000..2b4c0a649af18 --- /dev/null +++ b/src/sentry/workflow_engine/migrations/0008_detector_state.py @@ -0,0 +1,63 @@ +# Generated by Django 5.1.1 on 2024-09-28 00:10 + +import django.db.models.deletion +from django.db import migrations, models + +import sentry.db.models.fields.bounded +import sentry.db.models.fields.foreignkey +import sentry.workflow_engine.models.detector_state +from sentry.new_migrations.migrations import CheckedMigration + + +class Migration(CheckedMigration): + # This flag is used to mark that a migration shouldn't be automatically run in production. + # This should only be used for operations where it's safe to run the migration after your + # code has deployed. So this should not be used for most operations that alter the schema + # of a table. + # Here are some things that make sense to mark as post deployment: + # - Large data migrations. Typically we want these to be run manually so that they can be + # monitored and not block the deploy for a long period of time while they run. + # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to + # run this outside deployments so that we don't block them. Note that while adding an index + # is a schema change, it's completely safe to run the operation after the code has deployed. + # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment + + is_post_deployment = False + + dependencies = [ + ("workflow_engine", "0007_loosen_workflow_action_relationship"), + ] + + operations = [ + migrations.CreateModel( + name="DetectorState", + fields=[ + ( + "id", + sentry.db.models.fields.bounded.BoundedBigAutoField( + primary_key=True, serialize=False + ), + ), + ("date_updated", models.DateTimeField(auto_now=True)), + ("date_added", models.DateTimeField(auto_now_add=True)), + ("detector_group_key", models.CharField(blank=True, max_length=200, null=True)), + ("active", models.BooleanField(default=False)), + ( + "state", + models.CharField( + default=sentry.workflow_engine.models.detector_state.DetectorStatus["OK"], + max_length=200, + ), + ), + ( + "detector", + sentry.db.models.fields.foreignkey.FlexibleForeignKey( + on_delete=django.db.models.deletion.CASCADE, to="workflow_engine.detector" + ), + ), + ], + options={ + "abstract": False, + }, + ), + ] diff --git a/src/sentry/workflow_engine/migrations/0009_detector_type.py b/src/sentry/workflow_engine/migrations/0009_detector_type.py new file mode 100644 index 0000000000000..25495c3435ebf --- /dev/null +++ b/src/sentry/workflow_engine/migrations/0009_detector_type.py @@ -0,0 +1,37 @@ +# Generated by Django 5.1.1 on 2024-10-02 22:26 + +from django.db import migrations, models + +from sentry.new_migrations.migrations import CheckedMigration + + +class Migration(CheckedMigration): + # This flag is used to mark that a migration shouldn't be automatically run in production. + # This should only be used for operations where it's safe to run the migration after your + # code has deployed. So this should not be used for most operations that alter the schema + # of a table. + # Here are some things that make sense to mark as post deployment: + # - Large data migrations. Typically we want these to be run manually so that they can be + # monitored and not block the deploy for a long period of time while they run. + # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to + # run this outside deployments so that we don't block them. Note that while adding an index + # is a schema change, it's completely safe to run the operation after the code has deployed. + # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment + + is_post_deployment = False + # If you're copying this migration don't do this. It's dangerous to disable the checks unless you know what you're + # doing. + checked = False + + dependencies = [ + ("workflow_engine", "0008_detector_state"), + ] + + operations = [ + migrations.AddField( + model_name="detector", + name="type", + field=models.CharField(max_length=200), + preserve_default=False, + ), + ] diff --git a/src/sentry/workflow_engine/models/__init__.py b/src/sentry/workflow_engine/models/__init__.py index ee343230896a5..17dc1b2770049 100644 --- a/src/sentry/workflow_engine/models/__init__.py +++ b/src/sentry/workflow_engine/models/__init__.py @@ -1,7 +1,27 @@ -__all__ = ["DataSource", "DataSourceDetector", "Detector", "Workflow", "WorkflowAction"] +__all__ = [ + "Action", + "DataCondition", + "DataConditionGroup", + "DataConditionGroupAction", + "DataPacket", + "DataSource", + "DataSourceDetector", + "Detector", + "DetectorState", + "DetectorEvaluationResult", + "DetectorWorkflow", + "Workflow", + "WorkflowDataConditionGroup", +] -from .data_source import DataSource +from .action import Action +from .data_condition import DataCondition +from .data_condition_group import DataConditionGroup +from .data_condition_group_action import DataConditionGroupAction +from .data_source import DataPacket, DataSource from .data_source_detector import DataSourceDetector -from .detector import Detector +from .detector import Detector, DetectorEvaluationResult +from .detector_state import DetectorState +from .detector_workflow import DetectorWorkflow from .workflow import Workflow -from .workflow_action import WorkflowAction +from .workflow_data_condition_group import WorkflowDataConditionGroup diff --git a/src/sentry/workflow_engine/models/action.py b/src/sentry/workflow_engine/models/action.py new file mode 100644 index 0000000000000..62d5be7951322 --- /dev/null +++ b/src/sentry/workflow_engine/models/action.py @@ -0,0 +1,30 @@ +from django.db import models + +from sentry.backup.scopes import RelocationScope +from sentry.db.models import DefaultFieldsModel, region_silo_model, sane_repr + + +@region_silo_model +class Action(DefaultFieldsModel): + """ + Actions are actions that can be taken if the conditions of a DataConditionGroup are satisfied. + Examples include: detectors emitting events, sending notifications, creating an issue in the Issue Platform, etc. + """ + + __relocation_scope__ = RelocationScope.Excluded + __repr__ = sane_repr("workflow_id", "type") + + # TODO (@saponifi3d): Don't hardcode these values + class Type(models.TextChoices): + Notification = "SendNotificationAction" + TriggerWorkflow = "TriggerWorkflowAction" + + """ + Required actions cannot be disabled by the user, and will not be displayed in the UI. + These actions will be used internally, to trigger other aspects of the system. + For example, creating a new issue in the Issue Platform or a detector emitting an event. + """ + required = models.BooleanField(default=False) + + type = models.TextField(choices=Type.choices) + data = models.JSONField(default=dict) diff --git a/src/sentry/workflow_engine/models/data_condition.py b/src/sentry/workflow_engine/models/data_condition.py new file mode 100644 index 0000000000000..f7b25c669d9e3 --- /dev/null +++ b/src/sentry/workflow_engine/models/data_condition.py @@ -0,0 +1,33 @@ +from django.db import models + +from sentry.backup.scopes import RelocationScope +from sentry.db.models import DefaultFieldsModel, region_silo_model, sane_repr + +from .data_condition_group import DataConditionGroup + + +@region_silo_model +class DataCondition(DefaultFieldsModel): + """ + A data condition is a way to specify a logic condition, if the condition is met, the condition_result is returned. + """ + + __relocation_scope__ = RelocationScope.Organization + __repr__ = sane_repr("type", "condition") + + # The condition is the logic condition that needs to be met, gt, lt, eq, etc. + condition = models.CharField(max_length=200) + + # The comparison is the value that the condition is compared to for the evaluation, this must be a primitive value + comparison = models.JSONField() + + # The condition_result is the value that is returned if the condition is met, this must be a primitive value + condition_result = models.JSONField() + + # The type of condition, this is used to initialize the condition classes + type = models.CharField(max_length=200) + + condition_group = models.ForeignKey( + DataConditionGroup, + on_delete=models.CASCADE, + ) diff --git a/src/sentry/workflow_engine/models/data_condition_group.py b/src/sentry/workflow_engine/models/data_condition_group.py new file mode 100644 index 0000000000000..3e29aca4b5e69 --- /dev/null +++ b/src/sentry/workflow_engine/models/data_condition_group.py @@ -0,0 +1,22 @@ +from django.db import models + +from sentry.backup.scopes import RelocationScope +from sentry.db.models import DefaultFieldsModel, region_silo_model, sane_repr + + +@region_silo_model +class DataConditionGroup(DefaultFieldsModel): + """ + A data group is a way to specify a group of conditions that must be met for a workflow action to execute + """ + + __relocation_scope__ = RelocationScope.Organization + __repr__ = sane_repr("logic_type") + + class Type(models.TextChoices): + ANY = "any" + ALL = "all" + NONE = "none" + + logic_type = models.CharField(max_length=200, choices=Type.choices, default=Type.ANY) + organization = models.ForeignKey("sentry.Organization", on_delete=models.CASCADE) diff --git a/src/sentry/workflow_engine/models/data_condition_group_action.py b/src/sentry/workflow_engine/models/data_condition_group_action.py new file mode 100644 index 0000000000000..baa4f3494c491 --- /dev/null +++ b/src/sentry/workflow_engine/models/data_condition_group_action.py @@ -0,0 +1,14 @@ +from sentry.backup.scopes import RelocationScope +from sentry.db.models import DefaultFieldsModel, FlexibleForeignKey, region_silo_model + + +@region_silo_model +class DataConditionGroupAction(DefaultFieldsModel): + """ + A model that represents the relationship between a data condition group and an action. + """ + + __relocation_scope__ = RelocationScope.Excluded + + condition_group = FlexibleForeignKey("workflow_engine.DataConditionGroup") + action = FlexibleForeignKey("workflow_engine.Action") diff --git a/src/sentry/workflow_engine/models/data_source.py b/src/sentry/workflow_engine/models/data_source.py index e449d4c37621f..13c409865ae57 100644 --- a/src/sentry/workflow_engine/models/data_source.py +++ b/src/sentry/workflow_engine/models/data_source.py @@ -1,3 +1,6 @@ +import dataclasses +from typing import Generic, TypeVar + from django.db import models from sentry.backup.scopes import RelocationScope @@ -9,6 +12,14 @@ ) from sentry.workflow_engine.models.data_source_detector import DataSourceDetector +T = TypeVar("T") + + +@dataclasses.dataclass +class DataPacket(Generic[T]): + query_id: str + packet: T + @region_silo_model class DataSource(DefaultFieldsModel): @@ -16,6 +27,7 @@ class DataSource(DefaultFieldsModel): class Type(models.IntegerChoices): SNUBA_QUERY_SUBSCRIPTION = 1 + SNUBA_QUERY = 2 organization = FlexibleForeignKey("sentry.Organization") query_id = BoundedBigIntegerField() diff --git a/src/sentry/workflow_engine/models/data_source_detector.py b/src/sentry/workflow_engine/models/data_source_detector.py index b2ab7b7c75c78..7eccb46924038 100644 --- a/src/sentry/workflow_engine/models/data_source_detector.py +++ b/src/sentry/workflow_engine/models/data_source_detector.py @@ -6,6 +6,10 @@ @region_silo_model class DataSourceDetector(DefaultFieldsModel): + """ + Lookup table that maps a DataSource to a Detector. This is used to determine which detectors are available for a given data source. + """ + __relocation_scope__ = RelocationScope.Organization data_source = FlexibleForeignKey("workflow_engine.DataSource") diff --git a/src/sentry/workflow_engine/models/detector.py b/src/sentry/workflow_engine/models/detector.py index 482abf6e781c6..dcabca1c66f54 100644 --- a/src/sentry/workflow_engine/models/detector.py +++ b/src/sentry/workflow_engine/models/detector.py @@ -1,10 +1,24 @@ +from __future__ import annotations + +import abc +import dataclasses +import logging +from typing import TYPE_CHECKING, Any, Generic, TypeVar + from django.db import models from django.db.models import UniqueConstraint from sentry.backup.scopes import RelocationScope from sentry.db.models import DefaultFieldsModel, FlexibleForeignKey, region_silo_model +from sentry.issues import grouptype from sentry.models.owner_base import OwnerModel -from sentry.workflow_engine.models.data_source_detector import DataSourceDetector +from sentry.types.group import PriorityLevel +from sentry.workflow_engine.models import DataPacket + +if TYPE_CHECKING: + from sentry.workflow_engine.models.detector_state import DetectorStatus + +logger = logging.getLogger(__name__) @region_silo_model @@ -13,7 +27,22 @@ class Detector(DefaultFieldsModel, OwnerModel): organization = FlexibleForeignKey("sentry.Organization") name = models.CharField(max_length=200) - data_sources = models.ManyToManyField("workflow_engine.DataSource", through=DataSourceDetector) + + # The data sources that the detector is watching + data_sources = models.ManyToManyField( + "workflow_engine.DataSource", through="workflow_engine.DataSourceDetector" + ) + + # The conditions that must be met for the detector to be considered 'active' + # This will emit an event for the workflow to process + workflow_condition_group = FlexibleForeignKey( + "workflow_engine.DataConditionGroup", + blank=True, + null=True, + unique=True, + on_delete=models.SET_NULL, + ) + type = models.CharField(max_length=200) class Meta(OwnerModel.Meta): constraints = OwnerModel.Meta.constraints + [ @@ -22,3 +51,63 @@ class Meta(OwnerModel.Meta): name="workflow_engine_detector_org_name", ) ] + + @property + def detector_handler(self) -> DetectorHandler | None: + group_type = grouptype.registry.get_by_slug(self.type) + if not group_type: + logger.error( + "No registered grouptype for detector", + extra={ + "group_type": str(group_type), + "detector_id": self.id, + "detector_type": self.type, + }, + ) + return None + + if not group_type.detector_handler: + logger.error( + "Registered grouptype for detector has no detector_handler", + extra={ + "group_type": str(group_type), + "detector_id": self.id, + "detector_type": self.type, + }, + ) + return None + return group_type.detector_handler(self) + + +@dataclasses.dataclass(frozen=True) +class DetectorStateData: + group_key: str | None + active: bool + status: DetectorStatus + # Stateful detectors always process data packets in order. Once we confirm that a data packet has been fully + # processed and all workflows have been done, this value will be used by the stateful detector to prevent + # reprocessing + dedupe_value: int + # Stateful detectors allow various counts to be tracked. We need to update these after we process workflows, so + # include the updates in the state + counter_updates: dict[str, int] + + +@dataclasses.dataclass(frozen=True) +class DetectorEvaluationResult: + is_active: bool + priority: PriorityLevel + data: Any + state_update_data: DetectorStateData | None = None + + +T = TypeVar("T") + + +class DetectorHandler(abc.ABC, Generic[T]): + def __init__(self, detector: Detector): + self.detector = detector + + @abc.abstractmethod + def evaluate(self, data_packet: DataPacket[T]) -> list[DetectorEvaluationResult]: + pass diff --git a/src/sentry/workflow_engine/models/detector_state.py b/src/sentry/workflow_engine/models/detector_state.py new file mode 100644 index 0000000000000..dc68964d23dd1 --- /dev/null +++ b/src/sentry/workflow_engine/models/detector_state.py @@ -0,0 +1,27 @@ +from enum import StrEnum + +from django.db import models + +from sentry.backup.scopes import RelocationScope +from sentry.db.models import DefaultFieldsModel, FlexibleForeignKey, region_silo_model + + +class DetectorStatus(StrEnum): + OK = "ok" + + +@region_silo_model +class DetectorState(DefaultFieldsModel): + __relocation_scope__ = RelocationScope.Organization + + detector = FlexibleForeignKey("workflow_engine.Detector") + + # This key is used when a detector is using group-by + # allows us to link to a specific group from a single detector + detector_group_key = models.CharField(max_length=200, blank=True, null=True) + + # If the detector is currently active + active = models.BooleanField(default=False) + + # The current state of the detector + state = models.CharField(max_length=200, default=DetectorStatus.OK) diff --git a/src/sentry/workflow_engine/models/detector_workflow.py b/src/sentry/workflow_engine/models/detector_workflow.py new file mode 100644 index 0000000000000..2b22bcba2575d --- /dev/null +++ b/src/sentry/workflow_engine/models/detector_workflow.py @@ -0,0 +1,16 @@ +from django.db import models + +from sentry.backup.scopes import RelocationScope +from sentry.db.models import DefaultFieldsModel, FlexibleForeignKey, region_silo_model + + +@region_silo_model +class DetectorWorkflow(DefaultFieldsModel): + """ + A model to represent the relationship between a detector and a workflow. + """ + + __relocation_scope__ = RelocationScope.Organization + + detector = FlexibleForeignKey("workflow_engine.Detector", on_delete=models.CASCADE) + workflow = FlexibleForeignKey("workflow_engine.Workflow", on_delete=models.CASCADE) diff --git a/src/sentry/workflow_engine/models/workflow.py b/src/sentry/workflow_engine/models/workflow.py index ca7bf3b1044ca..4cb8fde6721cf 100644 --- a/src/sentry/workflow_engine/models/workflow.py +++ b/src/sentry/workflow_engine/models/workflow.py @@ -3,6 +3,8 @@ from sentry.backup.scopes import RelocationScope from sentry.db.models import DefaultFieldsModel, FlexibleForeignKey, region_silo_model, sane_repr +from .data_condition_group import DataConditionGroup + @region_silo_model class Workflow(DefaultFieldsModel): @@ -15,6 +17,9 @@ class Workflow(DefaultFieldsModel): name = models.CharField(max_length=200) organization = FlexibleForeignKey("sentry.Organization") + # Required as the 'when' condition for the workflow, this evalutes states emitted from the detectors + when_condition_group = FlexibleForeignKey(DataConditionGroup, blank=True, null=True) + __repr__ = sane_repr("name", "organization_id") class Meta: diff --git a/src/sentry/workflow_engine/models/workflow_action.py b/src/sentry/workflow_engine/models/workflow_action.py deleted file mode 100644 index ccd9ac41a9f5a..0000000000000 --- a/src/sentry/workflow_engine/models/workflow_action.py +++ /dev/null @@ -1,24 +0,0 @@ -from django.db import models - -from sentry.backup.scopes import RelocationScope -from sentry.db.models import DefaultFieldsModel, FlexibleForeignKey, region_silo_model, sane_repr - - -@region_silo_model -class WorkflowAction(DefaultFieldsModel): - """ - A workflow action is an action to be taken as part of a workflow. - These will be executed in order as part of a workflow. - """ - - __relocation_scope__ = RelocationScope.Organization - - class Type(models.TextChoices): - NOTIFICATION = "SendNotificationAction" - - required = models.BooleanField(default=False) - workflow = FlexibleForeignKey("workflow_engine.Workflow") - type = models.TextField(choices=Type.choices) - data = models.JSONField(default=dict) - - __repr__ = sane_repr("workflow_id", "type") diff --git a/src/sentry/workflow_engine/models/workflow_data_condition_group.py b/src/sentry/workflow_engine/models/workflow_data_condition_group.py new file mode 100644 index 0000000000000..905a78d7d77e7 --- /dev/null +++ b/src/sentry/workflow_engine/models/workflow_data_condition_group.py @@ -0,0 +1,18 @@ +from django.db import models + +from sentry.backup.scopes import RelocationScope +from sentry.db.models import DefaultFieldsModel, FlexibleForeignKey, region_silo_model + + +@region_silo_model +class WorkflowDataConditionGroup(DefaultFieldsModel): + """ + A lookup table for the conditions associated with a workflow. + """ + + __relocation_scope__ = RelocationScope.Organization + + condition_group = FlexibleForeignKey( + "workflow_engine.DataConditionGroup", unique=True, on_delete=models.CASCADE + ) + workflow = FlexibleForeignKey("workflow_engine.Workflow", on_delete=models.CASCADE) diff --git a/src/sentry/workflow_engine/processors/__init__.py b/src/sentry/workflow_engine/processors/__init__.py new file mode 100644 index 0000000000000..700cd48361de4 --- /dev/null +++ b/src/sentry/workflow_engine/processors/__init__.py @@ -0,0 +1,7 @@ +__all__ = [ + "process_data_sources", + "process_detectors", +] + +from .data_source import process_data_sources +from .detector import process_detectors diff --git a/src/sentry/workflow_engine/processors/data_source.py b/src/sentry/workflow_engine/processors/data_source.py new file mode 100644 index 0000000000000..00d580acba44a --- /dev/null +++ b/src/sentry/workflow_engine/processors/data_source.py @@ -0,0 +1,45 @@ +import logging + +import sentry_sdk +from django.db.models import Prefetch + +from sentry.utils import metrics +from sentry.workflow_engine.models import DataPacket, DataSource, Detector + +logger = logging.getLogger("sentry.workflow_engine.process_data_source") + + +def process_data_sources( + data_packets: list[DataPacket], query_type: DataSource.Type = DataSource.Type.SNUBA_QUERY +) -> list[tuple[DataPacket, list[Detector]]]: + metrics.incr("sentry.workflow_engine.process_data_sources", tags={"query_type": query_type}) + + data_packet_ids = {packet.query_id for packet in data_packets} + + # Fetch all data sources and associated detectors for the given data packets + with sentry_sdk.start_span(op="sentry.workflow_engine.process_data_sources.fetch_data_sources"): + data_sources = DataSource.objects.filter( + query_id__in=data_packet_ids, type=query_type + ).prefetch_related(Prefetch("detectors")) + + # Build a lookup dict for query_id to detectors + query_id_to_detectors = {ds.query_id: list(ds.detectors.all()) for ds in data_sources} + + # Create the result tuples + result = [] + for packet in data_packets: + detectors = query_id_to_detectors.get(packet.query_id) + + if detectors: + data_packet_tuple = (packet, detectors) + result.append(data_packet_tuple) + else: + logger.warning( + "No detectors found", extra={"query_id": packet.query_id, "query_type": query_type} + ) + metrics.incr( + "sentry.workflow_engine.process_data_sources.no_detectors", + tags={"query_type": query_type}, + ) + + return result diff --git a/src/sentry/workflow_engine/processors/detector.py b/src/sentry/workflow_engine/processors/detector.py new file mode 100644 index 0000000000000..24f02191b4bda --- /dev/null +++ b/src/sentry/workflow_engine/processors/detector.py @@ -0,0 +1,38 @@ +import logging + +from sentry.workflow_engine.models import DataPacket, Detector, DetectorEvaluationResult + +logger = logging.getLogger(__name__) + + +def process_detectors( + data_packet: DataPacket, detectors: list[Detector] +) -> list[tuple[Detector, list[DetectorEvaluationResult]]]: + results = [] + + for detector in detectors: + handler = detector.detector_handler + + if not handler: + continue + + detector_results = handler.evaluate(data_packet) + detector_group_keys = set() + + for result in detector_results: + if result.state_update_data: + if result.state_update_data.group_key in detector_group_keys: + # This shouldn't happen - log an error and continue on, but we should investigate this. + logger.error( + "Duplicate detector state group keys found", + extra={ + "detector_id": detector.id, + "group_key": result.state_update_data.group_key, + }, + ) + detector_group_keys.add(result.state_update_data.group_key) + + if detector_results: + results.append((detector, detector_results)) + + return results diff --git a/src/sentry/wsgi.py b/src/sentry/wsgi.py index f693389e3461f..98deebd9e95ca 100644 --- a/src/sentry/wsgi.py +++ b/src/sentry/wsgi.py @@ -2,6 +2,8 @@ import os.path import sys +from django.urls import reverse + # Add the project to the python path sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir)) @@ -21,7 +23,7 @@ # trigger a warmup of the application application( { - "PATH_INFO": "/_health/", + "PATH_INFO": reverse("sentry-warmup"), "REQUEST_METHOD": "GET", "SERVER_NAME": "127.0.0.1", "SERVER_PORT": "9001", diff --git a/static/app/actionCreators/broadcasts.tsx b/static/app/actionCreators/broadcasts.tsx deleted file mode 100644 index 7f9e11f2554a9..0000000000000 --- a/static/app/actionCreators/broadcasts.tsx +++ /dev/null @@ -1,13 +0,0 @@ -import type {Client} from 'sentry/api'; - -export function getAllBroadcasts(api: Client, orgSlug: string) { - return api.requestPromise(`/organizations/${orgSlug}/broadcasts/`, {method: 'GET'}); -} - -export function markBroadcastsAsSeen(api: Client, idList: string[]) { - return api.requestPromise('/broadcasts/', { - method: 'PUT', - query: {id: idList}, - data: {hasSeen: '1'}, - }); -} diff --git a/static/app/actionCreators/events.tsx b/static/app/actionCreators/events.tsx index 15fd1ed64cedd..a4ee3b0e04173 100644 --- a/static/app/actionCreators/events.tsx +++ b/static/app/actionCreators/events.tsx @@ -51,7 +51,7 @@ type Options = { project?: Readonly; query?: string; queryBatching?: QueryBatching; - queryExtras?: Record; + queryExtras?: Record; referrer?: string; start?: DateString; team?: Readonly; diff --git a/static/app/actionCreators/organization.tsx b/static/app/actionCreators/organization.tsx index 2b9c8e033c64b..46882a81c2e82 100644 --- a/static/app/actionCreators/organization.tsx +++ b/static/app/actionCreators/organization.tsx @@ -42,7 +42,7 @@ async function fetchOrg( } FeatureFlagOverrides.singleton().loadOrg(org); - FeatureObserver.singleton().observeFlags({organization: org, bufferSize: 10}); + FeatureObserver.singleton().observeFlags({organization: org, bufferSize: 100}); OrganizationStore.onUpdate(org, {replace: true}); setActiveOrganization(org); diff --git a/static/app/actionCreators/plugins.tsx b/static/app/actionCreators/plugins.tsx index e53d5bd1a1fcd..51e9d16277fb8 100644 --- a/static/app/actionCreators/plugins.tsx +++ b/static/app/actionCreators/plugins.tsx @@ -9,7 +9,7 @@ import {t} from 'sentry/locale'; import PluginsStore from 'sentry/stores/pluginsStore'; import type {Plugin} from 'sentry/types/integrations'; -const activeFetch = {}; +const activeFetch: Record | null> = {}; // PluginsStore always exists, so api client should be independent of component lifecycle const api = new Client(); diff --git a/static/app/actionCreators/projects.tsx b/static/app/actionCreators/projects.tsx index 654b877e218cf..578dda9f82e0a 100644 --- a/static/app/actionCreators/projects.tsx +++ b/static/app/actionCreators/projects.tsx @@ -14,7 +14,7 @@ import LatestContextStore from 'sentry/stores/latestContextStore'; import ProjectsStatsStore from 'sentry/stores/projectsStatsStore'; import ProjectsStore from 'sentry/stores/projectsStore'; import type {Team} from 'sentry/types/organization'; -import type {PlatformKey, Project} from 'sentry/types/project'; +import type {Project} from 'sentry/types/project'; import type {ApiQueryKey} from 'sentry/utils/queryClient'; import {setApiQueryData, useApiQuery, useQueryClient} from 'sentry/utils/queryClient'; import useApi from 'sentry/utils/useApi'; @@ -355,29 +355,6 @@ export function removeProject({ }); } -/** - * Load platform documentation specific to the project. The DSN and various - * other project specific secrets will be included in the documentation. - * - * @param api API Client - * @param orgSlug Organization Slug - * @param projectSlug Project Slug - * @param platform Project platform. - */ -export function loadDocs({ - api, - orgSlug, - projectSlug, - platform, -}: { - api: Client; - orgSlug: string; - platform: PlatformKey | 'python-tracing' | 'node-tracing' | 'react-native-tracing'; - projectSlug: string; -}) { - return api.requestPromise(`/projects/${orgSlug}/${projectSlug}/docs/${platform}/`); -} - /** * Load the counts of my projects and all projects for the current user * diff --git a/static/app/actionCreators/tags.tsx b/static/app/actionCreators/tags.tsx index f840e855d9145..7a4224ef8560f 100644 --- a/static/app/actionCreators/tags.tsx +++ b/static/app/actionCreators/tags.tsx @@ -171,10 +171,12 @@ export function fetchSpanFieldValues({ endpointParams, projectIds, search, + dataset, }: { api: Client; fieldKey: string; orgSlug: string; + dataset?: 'spans' | 'spansIndexed'; endpointParams?: Query; projectIds?: string[]; search?: string; @@ -199,6 +201,10 @@ export function fetchSpanFieldValues({ query.statsPeriod = endpointParams.statsPeriod; } } + if (dataset === 'spans') { + query.dataset = 'spans'; + query.type = 'string'; + } return api.requestPromise(url, { method: 'GET', diff --git a/static/app/bootstrap/initializeApp.tsx b/static/app/bootstrap/initializeApp.tsx index b550c7e388d3d..98a0eeb6b28a1 100644 --- a/static/app/bootstrap/initializeApp.tsx +++ b/static/app/bootstrap/initializeApp.tsx @@ -1,7 +1,6 @@ import './legacyTwitterBootstrap'; import './exportGlobals'; -import {routes} from 'sentry/routes'; import type {Config} from 'sentry/types/system'; import {metric} from 'sentry/utils/analytics'; @@ -13,7 +12,7 @@ import {renderOnDomReady} from './renderOnDomReady'; export function initializeApp(config: Config) { commonInitialization(config); - initializeSdk(config, {routes}); + initializeSdk(config); // Used for operational metrics to determine that the application js // bundle was loaded by browser. diff --git a/static/app/bootstrap/initializeSdk.tsx b/static/app/bootstrap/initializeSdk.tsx index cf7a9e7455581..9ddba27c96741 100644 --- a/static/app/bootstrap/initializeSdk.tsx +++ b/static/app/bootstrap/initializeSdk.tsx @@ -1,6 +1,4 @@ -/* eslint-disable simple-import-sort/imports */ -// biome-ignore lint/nursery/noRestrictedImports: ignore warning -import {browserHistory, createRoutes, match} from 'react-router'; +// eslint-disable-next-line simple-import-sort/imports import * as Sentry from '@sentry/react'; import {_browserPerformanceTimeOriginMode} from '@sentry/utils'; import type {Event} from '@sentry/types'; @@ -57,31 +55,19 @@ const shouldOverrideBrowserProfiling = window?.__initialData?.user?.isSuperuser; * having routing instrumentation in order to have a smaller bundle size. * (e.g. `static/views/integrationPipeline`) */ -function getSentryIntegrations(routes?: Function) { - const reactRouterIntegration = window.__SENTRY_USING_REACT_ROUTER_SIX - ? Sentry.reactRouterV6BrowserTracingIntegration({ - useEffect: useEffect, - useLocation: useLocation, - useNavigationType: useNavigationType, - createRoutesFromChildren: createRoutesFromChildren, - matchRoutes: matchRoutes, - }) - : Sentry.reactRouterV3BrowserTracingIntegration({ - history: browserHistory as any, - routes: typeof routes === 'function' ? createRoutes(routes()) : [], - match, - enableLongAnimationFrame: true, - _experiments: { - enableInteractions: false, - }, - }); - +function getSentryIntegrations() { const integrations = [ Sentry.extraErrorDataIntegration({ // 6 is arbitrary, seems like a nice number depth: 6, }), - reactRouterIntegration, + Sentry.reactRouterV6BrowserTracingIntegration({ + useEffect: useEffect, + useLocation: useLocation, + useNavigationType: useNavigationType, + createRoutesFromChildren: createRoutesFromChildren, + matchRoutes: matchRoutes, + }), Sentry.browserProfilingIntegration(), Sentry.thirdPartyErrorFilterIntegration({ filterKeys: ['sentry-spa'], @@ -92,13 +78,15 @@ function getSentryIntegrations(routes?: Function) { return integrations; } +// TODO(__SENTRY_USING_REACT_ROUTER_SIX): Remove opts once getsentry has had +// this paramter removed /** * Initialize the Sentry SDK * * If `routes` is passed, we will instrument react-router. Not all * entrypoints require this. */ -export function initializeSdk(config: Config, {routes}: {routes?: Function} = {}) { +export function initializeSdk(config: Config, _otps?: any) { const {apmSampling, sentryConfig, userIdentity} = config; const tracesSampleRate = apmSampling ?? 0; const extraTracePropagationTargets = SPA_DSN @@ -119,7 +107,7 @@ export function initializeSdk(config: Config, {routes}: {routes?: Function} = {} */ release: SENTRY_RELEASE_VERSION ?? sentryConfig?.release, allowUrls: SPA_DSN ? SPA_MODE_ALLOW_URLS : sentryConfig?.allowUrls, - integrations: getSentryIntegrations(routes), + integrations: getSentryIntegrations(), tracesSampleRate, profilesSampleRate: shouldOverrideBrowserProfiling ? 1 : 0.1, tracePropagationTargets: ['localhost', /^\//, ...extraTracePropagationTargets], diff --git a/static/app/components/IssueStreamHeaderLabel.tsx b/static/app/components/IssueStreamHeaderLabel.tsx new file mode 100644 index 0000000000000..d4463294a20af --- /dev/null +++ b/static/app/components/IssueStreamHeaderLabel.tsx @@ -0,0 +1,24 @@ +import {css} from '@emotion/react'; +import styled from '@emotion/styled'; + +import {space} from 'sentry/styles/space'; + +const IssueStreamHeaderLabel = styled('div')<{breakpoint?: string}>` + position: relative; + display: inline-block; + margin-right: ${space(2)}; + justify-content: space-between; + font-size: 13px; + font-weight: ${p => p.theme.fontWeightBold}; + color: ${p => p.theme.subText}; + + ${p => + p.breakpoint && + css` + @media (max-width: ${p.breakpoint}) { + display: none; + } + `} +`; + +export default IssueStreamHeaderLabel; diff --git a/static/app/components/acl/access.spec.tsx b/static/app/components/acl/access.spec.tsx index 999498fa8cd5a..b2dd8de38911f 100644 --- a/static/app/components/acl/access.spec.tsx +++ b/static/app/components/acl/access.spec.tsx @@ -134,7 +134,7 @@ describe('Access', function () { }) ); - render({childrenMock}, {organization}); + render({childrenMock}, {organization}); expect(childrenMock).toHaveBeenCalledWith({ hasAccess: true, @@ -149,9 +149,14 @@ describe('Access', function () { }) ); - render({childrenMock}, { - organization, - }); + render( + + {childrenMock} + , + { + organization, + } + ); expect(childrenMock).toHaveBeenCalledWith({ hasAccess: true, @@ -166,9 +171,14 @@ describe('Access', function () { }) ); - render({childrenMock}, { - organization, - }); + render( + + {childrenMock} + , + { + organization, + } + ); expect(childrenMock).toHaveBeenCalledWith({ hasAccess: true, @@ -208,7 +218,7 @@ describe('Access', function () { ); render( - +

The Child

, {organization} @@ -225,7 +235,7 @@ describe('Access', function () { ); render( - +

The Child

, {organization} diff --git a/static/app/components/acl/access.tsx b/static/app/components/acl/access.tsx index e451306368752..8f12d61623a1f 100644 --- a/static/app/components/acl/access.tsx +++ b/static/app/components/acl/access.tsx @@ -1,11 +1,9 @@ -import {Fragment} from 'react'; - import type {Scope} from 'sentry/types/core'; import type {Organization, Team} from 'sentry/types/organization'; import type {Project} from 'sentry/types/project'; import {isRenderFunc} from 'sentry/utils/isRenderFunc'; +import useOrganization from 'sentry/utils/useOrganization'; import {useUser} from 'sentry/utils/useUser'; -import withOrganization from 'sentry/utils/withOrganization'; // Props that function children will get. type ChildRenderProps = { @@ -17,20 +15,23 @@ type ChildRenderProps = { type ChildFunction = (props: ChildRenderProps) => any; type Props = { - organization: Organization; /** * List of required access levels */ - access?: Scope[]; + access: Scope[]; /** * Children can be a node or a function as child. */ - children?: React.ReactNode | ChildFunction; - + children: React.ReactNode | ChildFunction; /** * Requires superuser */ isSuperuser?: boolean; + /** + * Evaluate access against a defined organization. If this is not provided, + * the access is evaluated against the currently active organization. + */ + organization?: Organization; /** * Optional: To be used when you need to check for access to the Project @@ -39,7 +40,7 @@ type Props = { * An "org-member" does not have project:write but if they are "team-admin" for * of a parent team, they will have appropriate scopes. */ - project?: Project | null | undefined; + project?: Project; /** * Optional: To be used when you need to check for access to the Team * @@ -47,7 +48,7 @@ type Props = { * An "org-member" does not have team:write but if they are "team-admin" for * the team, they will have appropriate scopes. */ - team?: Team | null | undefined; + team?: Team; }; /** @@ -55,48 +56,53 @@ type Props = { */ function Access({ children, - isSuperuser = false, - access = [], + organization: overrideOrganization, + isSuperuser, + access, team, project, - organization, }: Props) { const user = useUser(); - team = team ?? undefined; - project = project ?? undefined; + const implicitOrganization = useOrganization(); + const organization = overrideOrganization || implicitOrganization; - const hasAccess = hasEveryAccess(access, {organization, team, project}); const hasSuperuser = Boolean(user?.isSuperuser); - - const renderProps: ChildRenderProps = { - hasAccess, - hasSuperuser, - }; - - const render = hasAccess && (!isSuperuser || hasSuperuser); + const hasAccess = hasEveryAccess(access, { + organization, + team, + project, + }); if (isRenderFunc(children)) { - return children(renderProps); + return children({ + hasAccess, + hasSuperuser, + }); } - return {render ? children : null}; + const render = hasAccess && (!isSuperuser || hasSuperuser); + return render ? children : null; } export function hasEveryAccess( access: Scope[], - props: {organization?: Organization; project?: Project; team?: Team} -) { - const {organization, team, project} = props; - const {access: orgAccess} = organization || {access: [] as Organization['access']}; - const {access: teamAccess} = team || {access: [] as Team['access']}; - const {access: projAccess} = project || {access: [] as Project['access']}; + entities: { + organization?: Organization | null; + project?: Project | null; + team?: Team | null; + } +): boolean { + const hasOrganizationAccess = entities.organization + ? access.every(acc => entities.organization?.access?.includes(acc)) + : false; + const hasTeamAccess = entities.team + ? access.every(acc => entities.team?.access?.includes(acc)) + : false; + const hasProjectAccess = entities.project + ? access.every(acc => entities.project?.access?.includes(acc)) + : false; - return ( - !access || - access.every(acc => orgAccess.includes(acc)) || - access.every(acc => teamAccess?.includes(acc)) || - access.every(acc => projAccess?.includes(acc)) - ); + return !access.length || hasOrganizationAccess || hasTeamAccess || hasProjectAccess; } -export default withOrganization(Access); +export default Access; diff --git a/static/app/components/acl/role.spec.tsx b/static/app/components/acl/role.spec.tsx deleted file mode 100644 index 81e251035d9ac..0000000000000 --- a/static/app/components/acl/role.spec.tsx +++ /dev/null @@ -1,162 +0,0 @@ -import {OrganizationFixture} from 'sentry-fixture/organization'; -import {UserFixture} from 'sentry-fixture/user'; - -import {act, render, screen} from 'sentry-test/reactTestingLibrary'; - -import {Role} from 'sentry/components/acl/role'; -import ConfigStore from 'sentry/stores/configStore'; -import OrganizationStore from 'sentry/stores/organizationStore'; - -describe('Role', function () { - const organization = OrganizationFixture({ - orgRole: 'admin', - orgRoleList: [ - { - id: 'member', - name: 'Member', - desc: '...', - minimumTeamRole: 'contributor', - isTeamRolesAllowed: true, - }, - { - id: 'admin', - name: 'Admin', - desc: '...', - minimumTeamRole: 'admin', - isTeamRolesAllowed: true, - }, - { - id: 'manager', - name: 'Manager', - desc: '...', - minimumTeamRole: 'admin', - isTeamRolesAllowed: true, - }, - { - id: 'owner', - name: 'Owner', - desc: '...', - minimumTeamRole: 'admin', - isTeamRolesAllowed: true, - }, - ], - }); - - describe('as render prop', function () { - const childrenMock = jest.fn().mockReturnValue(null); - beforeEach(function () { - OrganizationStore.init(); - childrenMock.mockClear(); - }); - - it('has a sufficient role', function () { - render({childrenMock}, { - organization, - }); - - expect(childrenMock).toHaveBeenCalledWith({ - hasRole: true, - }); - }); - - it('has an insufficient role', function () { - render({childrenMock}, { - organization, - }); - - expect(childrenMock).toHaveBeenCalledWith({ - hasRole: false, - }); - }); - - it('gives access to a superuser with insufficient role', function () { - organization.access = ['org:superuser']; - OrganizationStore.onUpdate(organization, {replace: true}); - - render({childrenMock}, { - organization, - }); - - expect(childrenMock).toHaveBeenCalledWith({ - hasRole: true, - }); - }); - - it('does not give access to a made up role', function () { - render({childrenMock}, { - organization, - }); - - expect(childrenMock).toHaveBeenCalledWith({ - hasRole: false, - }); - }); - - it('handles no user', function () { - const user = {...ConfigStore.get('user')}; - ConfigStore.set('user', undefined as any); - render({childrenMock}, { - organization, - }); - - expect(childrenMock).toHaveBeenCalledWith({ - hasRole: false, - }); - act(() => ConfigStore.set('user', user)); - }); - - it('updates if user changes', function () { - ConfigStore.set('user', undefined as any); - const {rerender} = render({childrenMock}, { - organization, - }); - - expect(childrenMock).toHaveBeenCalledWith({ - hasRole: false, - }); - act(() => ConfigStore.set('user', UserFixture())); - - rerender({childrenMock}); - expect(childrenMock).toHaveBeenCalledWith({ - hasRole: true, - }); - }); - - it('handles no organization.orgRoleList', function () { - render( - - {childrenMock} - , - {organization} - ); - - expect(childrenMock).toHaveBeenCalledWith({ - hasRole: false, - }); - }); - }); - - describe('as React node', function () { - it('has a sufficient role', function () { - render( - -
The Child
-
, - {organization} - ); - - expect(screen.getByText('The Child')).toBeInTheDocument(); - }); - - it('has an insufficient role', function () { - render( - -
The Child
-
, - {organization} - ); - - expect(screen.queryByText('The Child')).not.toBeInTheDocument(); - }); - }); -}); diff --git a/static/app/components/acl/role.tsx b/static/app/components/acl/role.tsx deleted file mode 100644 index 2f01646993739..0000000000000 --- a/static/app/components/acl/role.tsx +++ /dev/null @@ -1,77 +0,0 @@ -import {useMemo} from 'react'; - -import ConfigStore from 'sentry/stores/configStore'; -import type {Organization} from 'sentry/types/organization'; -import type {User} from 'sentry/types/user'; -import {isActiveSuperuser} from 'sentry/utils/isActiveSuperuser'; -import {isRenderFunc} from 'sentry/utils/isRenderFunc'; -import withOrganization from 'sentry/utils/withOrganization'; - -type RoleRenderProps = { - hasRole: boolean; -}; - -type ChildrenRenderFn = (props: RoleRenderProps) => React.ReactElement | null; - -function checkUserRole(user: User, organization: Organization, role: RoleProps['role']) { - if (!user) { - return false; - } - - if (isActiveSuperuser()) { - return true; - } - - if (!Array.isArray(organization.orgRoleList)) { - return false; - } - - const roleIds = organization.orgRoleList.map(r => r.id); - - if (!roleIds.includes(role) || !roleIds.includes(organization.orgRole ?? '')) { - return false; - } - - const requiredIndex = roleIds.indexOf(role); - const currentIndex = roleIds.indexOf(organization.orgRole ?? ''); - return currentIndex >= requiredIndex; -} - -interface RoleProps { - /** - * If children is a function then will be treated as a render prop and - * passed RoleRenderProps. - * - * The other interface is more simple, only show `children` if user has - * the minimum required role. - */ - children: React.ReactElement | ChildrenRenderFn; - /** - * Current Organization - */ - organization: Organization; - /** - * Minimum required role - */ - role: string; -} - -function Role({role, organization, children}: RoleProps): React.ReactElement | null { - const user = ConfigStore.get('user'); - - const hasRole = useMemo( - () => checkUserRole(user, organization, role), - // It seems that this returns a stable reference, but - [organization, role, user] - ); - - if (isRenderFunc(children)) { - return children({hasRole}); - } - - return hasRole ? children : null; -} - -const withOrganizationRole = withOrganization(Role); - -export {withOrganizationRole as Role}; diff --git a/static/app/components/acl/useRole.spec.tsx b/static/app/components/acl/useRole.spec.tsx new file mode 100644 index 0000000000000..076bb351bd071 --- /dev/null +++ b/static/app/components/acl/useRole.spec.tsx @@ -0,0 +1,79 @@ +import {OrganizationFixture} from 'sentry-fixture/organization'; +import {UserFixture} from 'sentry-fixture/user'; + +import {renderHook} from 'sentry-test/reactTestingLibrary'; + +import {useRole} from 'sentry/components/acl/useRole'; +import ConfigStore from 'sentry/stores/configStore'; +import OrganizationStore from 'sentry/stores/organizationStore'; +import type {Organization} from 'sentry/types/organization'; +import {OrganizationContext} from 'sentry/views/organizationContext'; + +function createWrapper(organization: Organization) { + return function ({children}: {children: React.ReactNode}) { + return ( + + {children} + + ); + }; +} + +describe('useRole', () => { + const organization = OrganizationFixture({ + // User is an admin of this test org + orgRole: 'admin', + // For these tests, attachments will require an admin role + attachmentsRole: 'admin', + debugFilesRole: 'member', + }); + + beforeEach(() => { + ConfigStore.set('user', UserFixture()); + // OrganizationStore is still called directly in isActiveSuperuser() + OrganizationStore.init(); + OrganizationStore.onUpdate(organization, {replace: true}); + }); + + it('has a sufficient role', () => { + const {result} = renderHook(() => useRole({role: 'attachmentsRole'}), { + wrapper: createWrapper(organization), + }); + expect(result.current.hasRole).toBe(true); + expect(result.current.roleRequired).toBe('admin'); + }); + + it('has an insufficient role', () => { + const org = OrganizationFixture({ + ...organization, + orgRole: 'member', + }); + OrganizationStore.onUpdate(org, {replace: true}); + const {result} = renderHook(() => useRole({role: 'attachmentsRole'}), { + wrapper: createWrapper(org), + }); + expect(result.current.hasRole).toBe(false); + }); + + it('gives access to a superuser with insufficient role', () => { + const org = OrganizationFixture({ + ...organization, + orgRole: 'member', + access: ['org:superuser'], + }); + OrganizationStore.onUpdate(org, {replace: true}); + const {result} = renderHook(() => useRole({role: 'attachmentsRole'}), { + wrapper: createWrapper(org), + }); + expect(result.current.hasRole).toBe(true); + }); + + it('handles no organization.orgRoleList', () => { + const org = {...organization, orgRoleList: []}; + OrganizationStore.onUpdate(org, {replace: true}); + const {result} = renderHook(() => useRole({role: 'attachmentsRole'}), { + wrapper: createWrapper(org), + }); + expect(result.current.hasRole).toBe(false); + }); +}); diff --git a/static/app/components/acl/useRole.tsx b/static/app/components/acl/useRole.tsx new file mode 100644 index 0000000000000..029bd4ed21713 --- /dev/null +++ b/static/app/components/acl/useRole.tsx @@ -0,0 +1,55 @@ +import {useMemo} from 'react'; + +import type {Organization} from 'sentry/types/organization'; +import {isActiveSuperuser} from 'sentry/utils/isActiveSuperuser'; +import useOrganization from 'sentry/utils/useOrganization'; + +function hasOrganizationRole(organization: Organization, roleRequired: string): boolean { + if (!Array.isArray(organization.orgRoleList)) { + return false; + } + + const roleIds = organization.orgRoleList.map(r => r.id); + + const requiredIndex = roleIds.indexOf(roleRequired); + const currentIndex = roleIds.indexOf(organization.orgRole ?? ''); + + if (requiredIndex === -1 || currentIndex === -1) { + return false; + } + + // If the user is a lower role than the required role, they do not have access + return currentIndex >= requiredIndex; +} + +interface UseRoleOptions { + /** + * Minimum required role. + * The required role ('member', 'admin') are stored in the organization object. + * eg: Organization.debugFilesRole = 'member' + */ + role: // Extract keys to enforce that they are available on the Organization type + Extract; +} + +interface UseRoleResult { + hasRole: boolean; + /** + * The required role ('member', 'admin') from the organization object. + */ + roleRequired: string; +} + +export function useRole(options: UseRoleOptions): UseRoleResult { + const organization = useOrganization(); + + return useMemo((): UseRoleResult => { + const roleRequired = organization[options.role]; + if (isActiveSuperuser()) { + return {hasRole: true, roleRequired}; + } + + const hasRole = hasOrganizationRole(organization, roleRequired); + return {hasRole, roleRequired}; + }, [organization, options.role]); +} diff --git a/static/app/components/activity/note/header.tsx b/static/app/components/activity/note/header.tsx index 7755bb9851bfe..36ab77cf10b34 100644 --- a/static/app/components/activity/note/header.tsx +++ b/static/app/components/activity/note/header.tsx @@ -5,19 +5,20 @@ import {openConfirmModal} from 'sentry/components/confirm'; import {DropdownMenu} from 'sentry/components/dropdownMenu'; import {IconEllipsis} from 'sentry/icons'; import {t} from 'sentry/locale'; -import ConfigStore from 'sentry/stores/configStore'; import {space} from 'sentry/styles/space'; import type {User} from 'sentry/types/user'; +import {useUser} from 'sentry/utils/useUser'; type Props = { authorName: string; onDelete: () => void; onEdit: () => void; + // Naming is not great here, but this seems to be the author, aka user who wrote the note. user?: User; }; function NoteHeader({authorName, user, onEdit, onDelete}: Props) { - const activeUser = ConfigStore.get('user'); + const activeUser = useUser(); const canEdit = activeUser && (activeUser.isSuperuser || user?.id === activeUser.id); return ( diff --git a/static/app/components/activity/note/inputWithStorage.tsx b/static/app/components/activity/note/inputWithStorage.tsx index 167a2961ebc5c..b828e14b03ec8 100644 --- a/static/app/components/activity/note/inputWithStorage.tsx +++ b/static/app/components/activity/note/inputWithStorage.tsx @@ -6,6 +6,8 @@ import {NoteInput} from 'sentry/components/activity/note/input'; import type {MentionChangeEvent} from 'sentry/components/activity/note/types'; import type {NoteType} from 'sentry/types/alerts'; import localStorage from 'sentry/utils/localStorage'; +import {StreamlinedNoteInput} from 'sentry/views/issueDetails/streamline/note'; +import {useHasStreamlinedUI} from 'sentry/views/issueDetails/utils'; type InputProps = React.ComponentProps; @@ -14,6 +16,7 @@ type Props = { storageKey: string; onLoad?: (data: string) => string; onSave?: (data: string) => string; + source?: string; text?: string; } & InputProps; @@ -54,8 +57,10 @@ function NoteInputWithStorage({ onLoad, onSave, text, + source, ...props }: Props) { + const hasStreamlinedUi = useHasStreamlinedUI(); const value = useMemo(() => { if (text) { return text; @@ -131,6 +136,19 @@ function NoteInputWithStorage({ ); // Make sure `this.props` does not override `onChange` and `onCreate` + if (hasStreamlinedUi && source === 'issue-details') { + return ( + + ); + } + return ( ); diff --git a/static/app/components/activity/note/mentionStyle.tsx b/static/app/components/activity/note/mentionStyle.tsx index 519c1a21fc312..ca9e10b928e92 100644 --- a/static/app/components/activity/note/mentionStyle.tsx +++ b/static/app/components/activity/note/mentionStyle.tsx @@ -5,13 +5,14 @@ import {space} from 'sentry/styles/space'; type Options = { theme: Theme; minHeight?: number; + streamlined?: boolean; }; /** * Note this is an object for `react-mentions` component and * not a styled component/emotion style */ -export function mentionStyle({theme, minHeight}: Options) { +export function mentionStyle({theme, minHeight, streamlined}: Options) { const inputProps = { fontSize: theme.fontSizeMedium, padding: `${space(1.5)} ${space(2)}`, @@ -21,6 +22,16 @@ export function mentionStyle({theme, minHeight}: Options) { overflow: 'auto', }; + const streamlinedInputProps = { + fontSize: theme.fontSizeMedium, + padding: `${space(1)} ${space(1.5)}`, + outline: 0, + border: `1px solid ${theme.border}`, + borderRadius: theme.borderRadius, + minHeight, + overflow: 'auto', + }; + return { control: { backgroundColor: `${theme.background}`, @@ -56,8 +67,8 @@ export function mentionStyle({theme, minHeight}: Options) { }, // Use the same props for the highliter to keep the phantom text aligned - highlighter: inputProps, - input: inputProps, + highlighter: streamlined ? streamlinedInputProps : inputProps, + input: streamlined ? streamlinedInputProps : inputProps, }, suggestions: { diff --git a/static/app/components/assigneeSelectorDropdown.spec.tsx b/static/app/components/assigneeSelectorDropdown.spec.tsx index b9988e94b8e1e..675aef098928c 100644 --- a/static/app/components/assigneeSelectorDropdown.spec.tsx +++ b/static/app/components/assigneeSelectorDropdown.spec.tsx @@ -1,5 +1,4 @@ import {GroupFixture} from 'sentry-fixture/group'; -import {MemberFixture} from 'sentry-fixture/member'; import {ProjectFixture} from 'sentry-fixture/project'; import {TeamFixture} from 'sentry-fixture/team'; import {UserFixture} from 'sentry-fixture/user'; @@ -17,18 +16,25 @@ import MemberListStore from 'sentry/stores/memberListStore'; import ProjectsStore from 'sentry/stores/projectsStore'; import TeamStore from 'sentry/stores/teamStore'; import type {Group} from 'sentry/types/group'; +import type {Team} from 'sentry/types/organization'; +import type {Project} from 'sentry/types/project'; +import type {User} from 'sentry/types/user'; jest.mock('sentry/actionCreators/modal', () => ({ openInviteMembersModal: jest.fn(), })); describe('AssigneeSelectorDropdown', () => { - let USER_1, USER_2, USER_3, USER_4; - let TEAM_1, TEAM_2; - let PROJECT_1; - let GROUP_1; - let GROUP_2; - let GROUP_3; + let USER_1: User; + let USER_2: User; + let USER_3: User; + let USER_4: User; + let TEAM_1: Team; + let TEAM_2: Team; + let PROJECT_1: Project; + let GROUP_1: Group; + let GROUP_2: Group; + let GROUP_3: Group; beforeEach(() => { USER_1 = UserFixture({ @@ -46,7 +52,7 @@ describe('AssigneeSelectorDropdown', () => { name: 'Epic Fail', email: 'epicf@example.com', }); - USER_4 = MemberFixture({ + USER_4 = UserFixture({ id: '4', name: 'Git Hub', email: 'github@example.com', diff --git a/static/app/components/assigneeSelectorDropdown.tsx b/static/app/components/assigneeSelectorDropdown.tsx index de6f809553d84..476271fb237d5 100644 --- a/static/app/components/assigneeSelectorDropdown.tsx +++ b/static/app/components/assigneeSelectorDropdown.tsx @@ -19,7 +19,6 @@ import LoadingIndicator from 'sentry/components/loadingIndicator'; import {Tooltip} from 'sentry/components/tooltip'; import {IconAdd, IconUser} from 'sentry/icons'; import {t, tct, tn} from 'sentry/locale'; -import ConfigStore from 'sentry/stores/configStore'; import MemberListStore from 'sentry/stores/memberListStore'; import ProjectsStore from 'sentry/stores/projectsStore'; import {useLegacyStore} from 'sentry/stores/useLegacyStore'; @@ -29,6 +28,7 @@ import type {Group, SuggestedOwnerReason} from 'sentry/types/group'; import type {Team} from 'sentry/types/organization'; import type {User} from 'sentry/types/user'; import {buildTeamId} from 'sentry/utils'; +import {useUser} from 'sentry/utils/useUser'; const suggestedReasonTable: Record = { suspectCommit: t('Suspect Commit'), @@ -215,7 +215,7 @@ export default function AssigneeSelectorDropdown({ trigger, }: AssigneeSelectorDropdownProps) { const memberLists = useLegacyStore(MemberListStore); - const sessionUser = ConfigStore.get('user'); + const sessionUser = useUser(); const currentMemberList = memberList ?? memberLists?.members ?? []; diff --git a/static/app/components/avatar/baseAvatar.tsx b/static/app/components/avatar/baseAvatar.tsx index b564ded6bed73..0b73327b1a4fd 100644 --- a/static/app/components/avatar/baseAvatar.tsx +++ b/static/app/components/avatar/baseAvatar.tsx @@ -13,9 +13,8 @@ import Gravatar from './gravatar'; import type {ImageStyleProps} from './styles'; import {imageStyle} from './styles'; -type AllowedSize = (typeof ALLOWED_SIZES)[number]; +type AllowedSize = 20 | 32 | 36 | 48 | 52 | 64 | 80 | 96 | 120; -const ALLOWED_SIZES = [20, 32, 36, 48, 52, 64, 80, 96, 120] as const; const DEFAULT_REMOTE_SIZE = 120 satisfies AllowedSize; interface BaseAvatarProps extends React.HTMLAttributes { diff --git a/static/app/components/avatar/seenByList.tsx b/static/app/components/avatar/seenByList.tsx index 75d534bd3a651..9d7e456f1653f 100644 --- a/static/app/components/avatar/seenByList.tsx +++ b/static/app/components/avatar/seenByList.tsx @@ -6,9 +6,9 @@ import AvatarList from 'sentry/components/avatar/avatarList'; import {Tooltip} from 'sentry/components/tooltip'; import {IconShow} from 'sentry/icons'; import {t} from 'sentry/locale'; -import ConfigStore from 'sentry/stores/configStore'; import type {AvatarUser, User} from 'sentry/types/user'; import {userDisplayName} from 'sentry/utils/formatters'; +import {useUser} from 'sentry/utils/useUser'; type Props = { // Avatar size @@ -35,7 +35,7 @@ function SeenByList({ iconPosition = 'left', className, }: Props) { - const activeUser = ConfigStore.get('user'); + const activeUser = useUser(); const displayUsers = seenBy.filter(user => activeUser.id !== user.id); if (displayUsers.length === 0) { diff --git a/static/app/components/badge/badge.tsx b/static/app/components/badge/badge.tsx index 66d5e4a933be2..7c8ac2b082a2f 100644 --- a/static/app/components/badge/badge.tsx +++ b/static/app/components/badge/badge.tsx @@ -3,14 +3,14 @@ import styled from '@emotion/styled'; import {space} from 'sentry/styles/space'; -interface Props extends React.HTMLAttributes { +export interface BadgeProps extends React.HTMLAttributes { text?: string | number | null; type?: keyof Theme['badge']; } -const Badge = styled(({children, text, ...props}: Props) => ( +const Badge = styled(({children, text, ...props}: BadgeProps) => ( {children ?? text} -))` +))` display: inline-block; height: 20px; min-width: 20px; diff --git a/static/app/components/badge/groupPriority.tsx b/static/app/components/badge/groupPriority.tsx index a4eada534f3f8..1eeb3ceba2dd3 100644 --- a/static/app/components/badge/groupPriority.tsx +++ b/static/app/components/badge/groupPriority.tsx @@ -1,6 +1,7 @@ import {Fragment, useMemo} from 'react'; import type {Theme} from '@emotion/react'; import styled from '@emotion/styled'; +import {VisuallyHidden} from '@react-aria/visually-hidden'; import bannerStar from 'sentry-images/spot/banner-star.svg'; @@ -15,6 +16,7 @@ import HookOrDefault from 'sentry/components/hookOrDefault'; import Placeholder from 'sentry/components/placeholder'; import {Tooltip} from 'sentry/components/tooltip'; import {IconClose} from 'sentry/icons'; +import {IconCellSignal} from 'sentry/icons/iconCellSignal'; import {t, tct} from 'sentry/locale'; import {space} from 'sentry/styles/space'; import type {Activity} from 'sentry/types/group'; @@ -34,6 +36,8 @@ type GroupPriorityDropdownProps = { type GroupPriorityBadgeProps = { priority: PriorityLevel; children?: React.ReactNode; + showLabel?: boolean; + variant?: 'default' | 'signal'; }; const PRIORITY_KEY_TO_LABEL: Record = { @@ -85,21 +89,40 @@ function useLastEditedBy({ export function makeGroupPriorityDropdownOptions({ onChange, + hasIssueStreamTableLayout, }: { + hasIssueStreamTableLayout: boolean; onChange: (value: PriorityLevel) => void; }) { return PRIORITY_OPTIONS.map(priority => ({ textValue: PRIORITY_KEY_TO_LABEL[priority], key: priority, - label: , + label: ( + + ), onAction: () => onChange(priority), })); } -export function GroupPriorityBadge({priority, children}: GroupPriorityBadgeProps) { +export function GroupPriorityBadge({ + priority, + showLabel = true, + variant = 'default', + children, +}: GroupPriorityBadgeProps) { + const bars = + priority === PriorityLevel.HIGH ? 3 : priority === PriorityLevel.MEDIUM ? 2 : 1; + const label = PRIORITY_KEY_TO_LABEL[priority] ?? t('Unknown'); + return ( - - {PRIORITY_KEY_TO_LABEL[priority] ?? t('Unknown')} + } + > + {showLabel ? label : {label}} {children} ); @@ -187,9 +210,14 @@ export function GroupPriorityDropdown({ onChange, lastEditedBy, }: GroupPriorityDropdownProps) { + const organization = useOrganization(); + const hasIssueStreamTableLayout = organization.features.includes( + 'issue-stream-table-layout' + ); + const options: MenuItemProps[] = useMemo( - () => makeGroupPriorityDropdownOptions({onChange}), - [onChange] + () => makeGroupPriorityDropdownOptions({onChange, hasIssueStreamTableLayout}), + [onChange, hasIssueStreamTableLayout] ); return ( @@ -207,7 +235,11 @@ export function GroupPriorityDropdown({ aria-label={t('Modify issue priority')} size="zero" > - + diff --git a/static/app/components/charts/baseChart.tsx b/static/app/components/charts/baseChart.tsx index 16f8732ed1c86..52af47d0e8b43 100644 --- a/static/app/components/charts/baseChart.tsx +++ b/static/app/components/charts/baseChart.tsx @@ -704,6 +704,11 @@ const getTooltipStyles = (p: {theme: Theme}) => css` justify-content: space-between; align-items: baseline; } + .tooltip-code-no-margin { + padding-left: 0; + margin-left: 0; + color: ${p.theme.subText}; + } .tooltip-footer { border-top: solid 1px ${p.theme.innerBorder}; text-align: center; diff --git a/static/app/components/charts/chartZoom.tsx b/static/app/components/charts/chartZoom.tsx index 681539c298d29..41a3442289d39 100644 --- a/static/app/components/charts/chartZoom.tsx +++ b/static/app/components/charts/chartZoom.tsx @@ -22,6 +22,7 @@ import type { } from 'sentry/types/echarts'; import type {InjectedRouter} from 'sentry/types/legacyReactRouter'; import {getUtcDateString, getUtcToLocalDateObject} from 'sentry/utils/dates'; +import withSentryRouter from 'sentry/utils/withSentryRouter'; const getDate = date => date ? moment.utc(date).format(moment.HTML5_FMT.DATETIME_LOCAL_SECONDS) : null; @@ -32,16 +33,15 @@ type Period = { start: DateString; }; -const ZoomPropKeys = [ - 'period', - 'xAxis', - 'onChartReady', - 'onDataZoom', - 'onRestore', - 'onFinished', -] as const; +type ZoomPropKeys = + | 'period' + | 'xAxis' + | 'onChartReady' + | 'onDataZoom' + | 'onRestore' + | 'onFinished'; -export interface ZoomRenderProps extends Pick { +export interface ZoomRenderProps extends Pick { dataZoom?: DataZoomComponentOption[]; end?: Date; isGroupedByDate?: boolean; @@ -398,4 +398,4 @@ class ChartZoom extends Component { } } -export default ChartZoom; +export default withSentryRouter(ChartZoom); diff --git a/static/app/components/charts/eventsChart.tsx b/static/app/components/charts/eventsChart.tsx index f7fdbdb5a8f08..07fd7b641dfb9 100644 --- a/static/app/components/charts/eventsChart.tsx +++ b/static/app/components/charts/eventsChart.tsx @@ -46,7 +46,7 @@ import { isEquation, } from 'sentry/utils/discover/fields'; import type {DiscoverDatasets} from 'sentry/utils/discover/types'; -import {decodeList} from 'sentry/utils/queryString'; +import {decodeList, decodeScalar} from 'sentry/utils/queryString'; import EventsRequest from './eventsRequest'; @@ -78,6 +78,7 @@ type ChartProps = { * a list of series names that are also disableable. */ disableableSeries?: string[]; + forceChartType?: string; fromDiscover?: boolean; height?: number; interval?: string; @@ -137,7 +138,7 @@ class Chart extends Component { } getChartComponent(): ChartComponent { - const {showDaily, timeseriesData, yAxis, chartComponent} = this.props; + const {showDaily, timeseriesData, yAxis, chartComponent, forceChartType} = this.props; if (defined(chartComponent)) { return chartComponent; @@ -148,7 +149,7 @@ class Chart extends Component { } if (timeseriesData.length > 1) { - switch (aggregateMultiPlotType(yAxis)) { + switch (forceChartType || aggregateMultiPlotType(yAxis)) { case 'line': return LineChart; case 'area': @@ -541,6 +542,7 @@ class EventsChart extends Component { // Include previous only on relative dates (defaults to relative if no start and end) const includePrevious = !disablePrevious && !start && !end; + const forceChartType = decodeScalar(router.location.query.forceChartType); const yAxisArray = decodeList(yAxis); const yAxisSeriesNames = yAxisArray.map(name => { let yAxisLabel = name && isEquation(name) ? getEquation(name) : name; @@ -590,6 +592,7 @@ class EventsChart extends Component { {isValidElement(chartHeader) && chartHeader} { return ( ; + queryExtras?: Record; /** * A unique name for what's triggering this request, see organization_events_stats for an allowlist */ diff --git a/static/app/components/charts/releaseSeries.spec.tsx b/static/app/components/charts/releaseSeries.spec.tsx index 5d047e11f4755..22151a9547547 100644 --- a/static/app/components/charts/releaseSeries.spec.tsx +++ b/static/app/components/charts/releaseSeries.spec.tsx @@ -1,7 +1,8 @@ +import {Fragment} from 'react'; import {OrganizationFixture} from 'sentry-fixture/organization'; import {RouterFixture} from 'sentry-fixture/routerFixture'; -import {render, waitFor} from 'sentry-test/reactTestingLibrary'; +import {render, screen, waitFor} from 'sentry-test/reactTestingLibrary'; import type {ReleaseSeriesProps} from 'sentry/components/charts/releaseSeries'; import ReleaseSeries from 'sentry/components/charts/releaseSeries'; @@ -14,6 +15,8 @@ describe('ReleaseSeries', function () { let releasesMock; beforeEach(function () { + jest.resetAllMocks(); + releases = [ { version: 'sentry-android-shop@1.2.0', @@ -218,6 +221,28 @@ describe('ReleaseSeries', function () { await waitFor(() => expect(releasesMock).toHaveBeenCalledTimes(2)); }); + it('shares release fetches between components with memoize enabled', async function () { + render( + + + {({releaseSeries}) => { + return releaseSeries.length > 0 ? Series 1 : null; + }} + + + {({releaseSeries}) => { + return releaseSeries.length > 0 ? Series 2 : null; + }} + + + ); + + await waitFor(() => expect(screen.getByText('Series 1')).toBeInTheDocument()); + await waitFor(() => expect(screen.getByText('Series 2')).toBeInTheDocument()); + + await waitFor(() => expect(releasesMock).toHaveBeenCalledTimes(1)); + }); + it('generates an eCharts `markLine` series from releases', async function () { render({renderFunc}); diff --git a/static/app/components/charts/releaseSeries.tsx b/static/app/components/charts/releaseSeries.tsx index 6f114b5e02d96..b126c57ee68cc 100644 --- a/static/app/components/charts/releaseSeries.tsx +++ b/static/app/components/charts/releaseSeries.tsx @@ -46,7 +46,7 @@ function getOrganizationReleases( organization: Organization, conditions: ReleaseConditions ) { - const query = {}; + const query: Record = {}; Object.keys(conditions).forEach(key => { let value = conditions[key]; if (value && (key === 'start' || key === 'end')) { @@ -64,6 +64,14 @@ function getOrganizationReleases( }) as Promise<[ReleaseMetaBasic[], any, ResponseMeta]>; } +const getOrganizationReleasesMemoized = memoize( + getOrganizationReleases, + (_, __, conditions) => + Object.values(conditions) + .map(val => JSON.stringify(val)) + .join('-') +); + export interface ReleaseSeriesProps extends WithRouterProps { api: Client; children: (s: State) => React.ReactNode; @@ -130,15 +138,6 @@ class ReleaseSeries extends Component { _isMounted: boolean = false; - getOrganizationReleasesMemoized = memoize( - (api: Client, organization: Organization, conditions: ReleaseConditions) => - getOrganizationReleases(api, organization, conditions), - (_, __, conditions) => - Object.values(conditions) - .map(val => JSON.stringify(val)) - .join('-') - ); - async fetchData() { const { api, @@ -164,7 +163,7 @@ class ReleaseSeries extends Component { while (hasMore) { try { const getReleases = memoized - ? this.getOrganizationReleasesMemoized + ? getOrganizationReleasesMemoized : getOrganizationReleases; const [newReleases, , resp] = await getReleases(api, organization, conditions); releases.push(...newReleases); @@ -293,7 +292,6 @@ class ReleaseSeries extends Component { '', - '', '
', ].join(''); }, diff --git a/static/app/components/charts/series/barSeries.tsx b/static/app/components/charts/series/barSeries.tsx index 99d9ef266e676..6120073c92119 100644 --- a/static/app/components/charts/series/barSeries.tsx +++ b/static/app/components/charts/series/barSeries.tsx @@ -1,11 +1,15 @@ import 'echarts/lib/chart/bar'; -import type {BarSeriesOption} from 'echarts'; +import type {BarSeriesOption, LineSeriesOption} from 'echarts'; -function barSeries(props: BarSeriesOption): BarSeriesOption { +/** + * The return type can be BarSeriesOption or LineSeriesOption so that we can add + * custom lines on top of the event bar chart in `eventGraph.tsx`. + */ +function barSeries(props: BarSeriesOption): BarSeriesOption | LineSeriesOption { return { ...props, - type: 'bar', + type: props.type ?? 'bar', }; } diff --git a/static/app/components/charts/useChartZoom.tsx b/static/app/components/charts/useChartZoom.tsx index 34c69a483474a..99a3d5deeb5e9 100644 --- a/static/app/components/charts/useChartZoom.tsx +++ b/static/app/components/charts/useChartZoom.tsx @@ -1,11 +1,10 @@ -import {useCallback, useMemo, useState} from 'react'; +import {useCallback, useEffect, useMemo, useRef} from 'react'; import type { DataZoomComponentOption, + ECharts, InsideDataZoomComponentOption, ToolboxComponentOption, - XAXisComponentOption, } from 'echarts'; -import moment from 'moment-timezone'; import * as qs from 'query-string'; import {updateDateTime} from 'sentry/actionCreators/pageFilters'; @@ -17,112 +16,138 @@ import type { EChartChartReadyHandler, EChartDataZoomHandler, EChartFinishedHandler, - EChartRestoreHandler, } from 'sentry/types/echarts'; -import type {InjectedRouter} from 'sentry/types/legacyReactRouter'; -import {getUtcDateString, getUtcToLocalDateObject} from 'sentry/utils/dates'; +import {getUtcDateString} from 'sentry/utils/dates'; +import {useLocation} from 'sentry/utils/useLocation'; +import {useNavigate} from 'sentry/utils/useNavigate'; +import useRouter from 'sentry/utils/useRouter'; // TODO: replace usages of ChartZoom with useChartZoom -const getDate = date => - date ? moment.utc(date).format(moment.HTML5_FMT.DATETIME_LOCAL_SECONDS) : null; +type DateTimeUpdate = Parameters[0]; -type Period = { - end: DateString; - period: string | null; - start: DateString; -}; - -const ZoomPropKeys = [ - 'period', - 'xAxis', - 'onChartReady', - 'onDataZoom', - 'onRestore', - 'onFinished', -] as const; - -export interface ZoomRenderProps extends Pick { - dataZoom?: DataZoomComponentOption[]; - end?: Date; - isGroupedByDate?: boolean; - showTimeInTooltip?: boolean; - start?: Date; - toolBox?: ToolboxComponentOption; - utc?: boolean; +/** + * Our api query params expects a specific date format + */ +const getQueryTime = (date: DateString | undefined) => + date ? getUtcDateString(date) : null; + +interface ZoomRenderProps { + dataZoom: DataZoomComponentOption[]; + isGroupedByDate: boolean; + onChartReady: EChartChartReadyHandler; + onDataZoom: EChartDataZoomHandler; + onFinished: EChartFinishedHandler; + toolBox: ToolboxComponentOption; } interface Props { children: (props: ZoomRenderProps) => React.ReactNode; chartZoomOptions?: DataZoomComponentOption; + /** + * Disables saving changes to the current period + */ disabled?: boolean; - end?: DateString; - onChartReady?: EChartChartReadyHandler; - onDataZoom?: EChartDataZoomHandler; - onFinished?: EChartFinishedHandler; - onRestore?: EChartRestoreHandler; - onZoom?: (period: Period) => void; - period?: string | null; - router?: InjectedRouter; + onZoom?: (period: DateTimeUpdate) => void; + /** + * Use either `saveOnZoom` or `usePageDate` not both + * Will persist zoom state to page filters + */ saveOnZoom?: boolean; showSlider?: boolean; - start?: DateString; + /** + * Use either `saveOnZoom` or `usePageDate` not both + * Persists zoom state to query params without updating page filters. + * Sets the pageStart and pageEnd query params + */ usePageDate?: boolean; - utc?: boolean | null; - xAxis?: XAXisComponentOption; xAxisIndex?: number | number[]; } +/** + * Adds listeners to the document to allow for cancelling the zoom action + */ +function useChartZoomCancel() { + const chartInstance = useRef(null); + const handleKeyDown = useCallback((evt: KeyboardEvent) => { + if (!chartInstance.current) { + return; + } + + if (evt.key === 'Escape') { + evt.stopPropagation(); + // Mark the component as currently cancelling a zoom selection. This allows + // us to prevent "restore" handlers from running + // "restore" removes the current chart zoom selection + chartInstance.current.dispatchAction({ + type: 'restore', + }); + } + }, []); + + const handleMouseUp = useCallback(() => { + document.body.removeEventListener('mouseup', handleMouseUp); + }, []); + + const handleMouseDown = useCallback(() => { + // Register `mouseup` and `keydown` listeners on mouse down + // This ensures that there is only one live listener at a time + // regardless of how many charts are rendered. NOTE: It's + // important to set `useCapture: true` in the `"keydown"` handler + // otherwise the Escape will close whatever modal or panel the + // chart is in. Those elements register their handlers _earlier_. + document.body.addEventListener('mouseup', handleMouseUp); + document.body.addEventListener('keydown', handleKeyDown, true); + }, [handleKeyDown, handleMouseUp]); + + const handleChartReady = useCallback( + chart => { + if (chartInstance.current) { + // remove listeners from previous chart if called multiple times + chartInstance.current.getDom()?.removeEventListener('mousedown', handleMouseDown); + } + + chartInstance.current = chart; + const chartDom = chart.getDom(); + chartDom.addEventListener('mousedown', handleMouseDown); + }, + [handleMouseDown] + ); + + useEffect(() => { + return () => { + // Cleanup listeners on unmount + document.body.removeEventListener('mouseup', handleMouseUp); + document.body.removeEventListener('keydown', handleKeyDown); + chartInstance.current?.getDom()?.removeEventListener('mousedown', handleMouseDown); + }; + }, [handleMouseDown, handleMouseUp, handleKeyDown]); + + return {handleChartReady}; +} + /** * This hook provides an alternative to using the `ChartZoom` component. It returns * the props that would be passed to the `BaseChart` as zoomRenderProps. */ export function useChartZoom({ - period, - start, - end, - utc, - router, onZoom, usePageDate, saveOnZoom, - onChartReady, - onRestore, - onDataZoom, - onFinished, xAxisIndex, showSlider, chartZoomOptions, - xAxis, - disabled, -}: Omit = {}) { - const [currentPeriod, setCurrentPeriod] = useState({ - period: period!, - start: getDate(start), - end: getDate(end), - }); - const [history, setHistory] = useState([]); - - const [zooming, setZooming] = useState<(() => void) | null>(null); +}: Omit): ZoomRenderProps { + const {handleChartReady} = useChartZoomCancel(); + const location = useLocation(); + const navigate = useNavigate(); + const router = useRouter(); /** - * Save current period state from period in props to be used - * in handling chart's zoom history state + * Used to store the date update function so that we can call it after the chart + * animation is complete */ - const saveCurrentPeriod = useCallback( - (newPeriod: Period) => { - if (disabled) { - return; - } - - setCurrentPeriod({ - period: newPeriod.period, - start: getDate(newPeriod.start), - end: getDate(newPeriod.end), - }); - }, - [disabled] - ); + const zooming = useRef<(() => void) | null>(null); /** * Sets the new period due to a zoom related action @@ -134,14 +159,9 @@ export function useChartZoom({ * Saves a callback function to be called after chart animation is completed */ const setPeriod = useCallback( - (newPeriod, saveHistory = false) => { - const startFormatted = getDate(newPeriod.start); - const endFormatted = getDate(newPeriod.end); - - // Save period so that we can revert back to it when using echarts "back" navigation - if (saveHistory) { - setHistory(curr => [...curr, currentPeriod!]); - } + (newPeriod: DateTimeUpdate) => { + const startFormatted = getQueryTime(newPeriod.start); + const endFormatted = getQueryTime(newPeriod.end); // Callback to let parent component know zoom has changed // This is required for some more perceived responsiveness since @@ -151,23 +171,23 @@ export function useChartZoom({ // URL parameters are changed onZoom?.({ period: newPeriod.period, - start: startFormatted, - end: endFormatted, + start: getQueryTime(newPeriod.start), + end: getQueryTime(newPeriod.end), }); - setZooming(() => { - if (usePageDate && router) { + zooming.current = () => { + if (usePageDate) { const newQuery = { - ...router.location.query, - pageStart: newPeriod.start ? getUtcDateString(newPeriod.start) : undefined, - pageEnd: newPeriod.end ? getUtcDateString(newPeriod.end) : undefined, + ...location.query, + pageStart: startFormatted, + pageEnd: endFormatted, pageStatsPeriod: newPeriod.period ?? undefined, }; // Only push new location if query params has changed because this will cause a heavy re-render - if (qs.stringify(newQuery) !== qs.stringify(router.location.query)) { - router.push({ - pathname: router.location.pathname, + if (qs.stringify(newQuery) !== qs.stringify(location.query)) { + navigate({ + pathname: location.pathname, query: newQuery, }); } @@ -175,69 +195,37 @@ export function useChartZoom({ updateDateTime( { period: newPeriod.period, - start: startFormatted - ? getUtcToLocalDateObject(startFormatted) - : startFormatted, - end: endFormatted ? getUtcToLocalDateObject(endFormatted) : endFormatted, + start: startFormatted, + end: endFormatted, }, router, {save: saveOnZoom} ); } - - saveCurrentPeriod(newPeriod); - }); + }; }, - [currentPeriod, onZoom, router, saveCurrentPeriod, saveOnZoom, usePageDate] + [onZoom, navigate, location, router, saveOnZoom, usePageDate] ); - /** - * Enable zoom immediately instead of having to toggle to zoom - */ - const handleChartReady = chart => { - onChartReady?.(chart); - }; - - /** - * Restores the chart to initial viewport/zoom level - * - * Updates URL state to reflect initial params - */ - const handleZoomRestore = (evt, chart) => { - if (!history.length) { - return; - } - - setPeriod(history[0]); - setHistory([]); - - onRestore?.(evt, chart); - }; - - const handleDataZoom = (evt, chart) => { - const model = chart.getModel(); - const {startValue, endValue} = model._payload.batch[0]; - - // if `rangeStart` and `rangeEnd` are null, then we are going back - if (startValue === null && endValue === null) { - const previousPeriod = history.pop(); - setHistory(history); - - if (!previousPeriod) { - return; + const handleDataZoom = useCallback( + evt => { + // @ts-expect-error weirdly evt.startValue and evt.endValue do not exist + const {startValue, endValue} = evt.batch[0] as { + endValue: number | null; + startValue: number | null; + }; + + // if `rangeStart` and `rangeEnd` are null, then we are going back + if (startValue && endValue) { + setPeriod({ + period: null, + start: startValue ? getUtcDateString(startValue) : null, + end: endValue ? getUtcDateString(endValue) : null, + }); } - - setPeriod(previousPeriod); - } else { - setPeriod( - // Add a day so we go until the end of the day (e.g. next day at midnight) - {period: null, start: moment.utc(startValue), end: moment.utc(endValue)}, - true - ); - } - - onDataZoom?.(evt, chart); - }; + }, + [setPeriod] + ); /** * Chart event when *any* rendering+animation finishes @@ -246,14 +234,15 @@ export function useChartZoom({ * we can let the native zoom animation on the chart complete * before we update URL state and re-render */ - const handleChartFinished = (_props, chart) => { - if (typeof zooming === 'function') { - zooming(); - setZooming(null); + const handleChartFinished = useCallback((_props, chart) => { + if (typeof zooming.current === 'function') { + zooming.current(); + zooming.current = null; } // This attempts to activate the area zoom toolbox feature - const zoom = chart._componentsViews?.find(c => c._features?.dataZoom); + // @ts-expect-error _componentsViews is private + const zoom = chart._componentsViews?.find((c: any) => c._features?.dataZoom); if (zoom && !zoom._features.dataZoom._isZoomActive) { // Calling dispatchAction will re-trigger handleChartFinished chart.dispatchAction({ @@ -262,31 +251,19 @@ export function useChartZoom({ dataZoomSelectActive: true, }); } + }, []); - if (typeof onFinished === 'function') { - onFinished(_props, chart); - } - }; - - const startProp = start ? getUtcToLocalDateObject(start) : undefined; - const endProp = end ? getUtcToLocalDateObject(end) : undefined; - - const dataZoomProp = useMemo(() => { + const dataZoomProp = useMemo(() => { + const zoomInside = DataZoomInside({ + xAxisIndex, + ...(chartZoomOptions as InsideDataZoomComponentOption), + }); return showSlider - ? [ - ...DataZoomSlider({xAxisIndex, ...chartZoomOptions}), - ...DataZoomInside({ - xAxisIndex, - ...(chartZoomOptions as InsideDataZoomComponentOption), - }), - ] - : DataZoomInside({ - xAxisIndex, - ...(chartZoomOptions as InsideDataZoomComponentOption), - }); + ? [...DataZoomSlider({xAxisIndex, ...chartZoomOptions}), ...zoomInside] + : zoomInside; }, [chartZoomOptions, showSlider, xAxisIndex]); - const toolBox = useMemo( + const toolBox = useMemo( () => ToolBox( {}, @@ -307,20 +284,14 @@ export function useChartZoom({ [] ); - const renderProps = { + const renderProps: ZoomRenderProps = { // Zooming only works when grouped by date isGroupedByDate: true, - utc: utc ?? undefined, - start: startProp, - end: endProp, - xAxis, dataZoom: dataZoomProp, - showTimeInTooltip: true, toolBox, - onChartReady: handleChartReady, onDataZoom: handleDataZoom, onFinished: handleChartFinished, - onRestore: handleZoomRestore, + onChartReady: handleChartReady, }; return renderProps; diff --git a/static/app/components/charts/utils.tsx b/static/app/components/charts/utils.tsx index 73028c4a1f107..1cd65eee02086 100644 --- a/static/app/components/charts/utils.tsx +++ b/static/app/components/charts/utils.tsx @@ -8,7 +8,11 @@ import moment from 'moment-timezone'; import {DEFAULT_STATS_PERIOD} from 'sentry/constants'; import type {PageFilters} from 'sentry/types/core'; import type {ReactEchartsRef, Series} from 'sentry/types/echarts'; -import type {EventsStats, MultiSeriesEventsStats} from 'sentry/types/organization'; +import type { + EventsStats, + GroupedMultiSeriesEventsStats, + MultiSeriesEventsStats, +} from 'sentry/types/organization'; import {defined, escape} from 'sentry/utils'; import {getFormattedDate} from 'sentry/utils/dates'; import type {TableDataWithTitle} from 'sentry/utils/discover/discoverQuery'; @@ -216,7 +220,7 @@ export function getSeriesSelection( } function isSingleSeriesStats( - data: MultiSeriesEventsStats | EventsStats + data: MultiSeriesEventsStats | EventsStats | GroupedMultiSeriesEventsStats ): data is EventsStats { return ( (defined(data.data) || defined(data.totals)) && @@ -226,7 +230,12 @@ function isSingleSeriesStats( } export function isMultiSeriesStats( - data: MultiSeriesEventsStats | EventsStats | null | undefined, + data: + | MultiSeriesEventsStats + | EventsStats + | GroupedMultiSeriesEventsStats + | null + | undefined, isTopN?: boolean ): data is MultiSeriesEventsStats { return ( diff --git a/static/app/components/codeSnippet.tsx b/static/app/components/codeSnippet.tsx index c30c62ef8cdeb..0100efaeb7a05 100644 --- a/static/app/components/codeSnippet.tsx +++ b/static/app/components/codeSnippet.tsx @@ -225,8 +225,8 @@ const Header = styled('div')<{isSolid: boolean}>` ${p => p.isSolid ? ` - margin: 0 ${space(0.5)}; - border-bottom: solid 1px var(--prism-highlight-accent); + padding: 0 ${space(0.5)}; + border-bottom: solid 1px ${p.theme.innerBorder}; ` : ` justify-content: flex-end; diff --git a/static/app/components/commitRow.tsx b/static/app/components/commitRow.tsx index b2380235e869d..f71c727f35d9e 100644 --- a/static/app/components/commitRow.tsx +++ b/static/app/components/commitRow.tsx @@ -17,12 +17,12 @@ import Version from 'sentry/components/version'; import VersionHoverCard from 'sentry/components/versionHoverCard'; import {IconQuestion, IconWarning} from 'sentry/icons'; import {t, tct} from 'sentry/locale'; -import ConfigStore from 'sentry/stores/configStore'; import {space} from 'sentry/styles/space'; import type {Commit} from 'sentry/types/integrations'; import type {AvatarProject} from 'sentry/types/project'; import {trackAnalytics} from 'sentry/utils/analytics'; import useOrganization from 'sentry/utils/useOrganization'; +import {useUser} from 'sentry/utils/useUser'; import {Divider} from 'sentry/views/issueDetails/divider'; import {useHasStreamlinedUI} from 'sentry/views/issueDetails/utils'; @@ -49,6 +49,7 @@ function CommitRow({ onCommitClick, project, }: CommitRowProps) { + const user = useUser(); const hasStreamlinedUI = useHasStreamlinedUI(); const organization = useOrganization(); const handleInviteClick = useCallback(() => { @@ -74,7 +75,6 @@ function CommitRow({ }); }, [commit.author, organization]); - const user = ConfigStore.get('user'); const isUser = user?.id === commit.author?.id; const firstRelease = commit.releases?.[0]; diff --git a/static/app/components/compactSelect/control.tsx b/static/app/components/compactSelect/control.tsx index 8b63cb8128481..141d148d7a4f7 100644 --- a/static/app/components/compactSelect/control.tsx +++ b/static/app/components/compactSelect/control.tsx @@ -121,7 +121,7 @@ export interface ControlProps /** * Message to be displayed when all options have been filtered out (via search). */ - emptyMessage?: string; + emptyMessage?: React.ReactNode; /** * Whether to render a grid list rather than a list box. * @@ -215,6 +215,7 @@ export interface ControlProps */ export function Control({ // Control props + autoFocus, trigger, triggerLabel: triggerLabelProp, triggerProps, @@ -297,6 +298,11 @@ export function Control({ ?.focus(); } + // Prevent form submissions on Enter key press in search box + if (e.key === 'Enter') { + e.preventDefault(); + } + // Continue propagation, otherwise the overlay won't close on Esc key press e.continuePropagation(); }, @@ -395,6 +401,20 @@ export function Control({ // eslint-disable-next-line react-hooks/exhaustive-deps }, [menuBody, hideOptions]); + const wasRefAvailable = useRef(false); + useEffect(() => { + // Trigger ref is set by a setState in useOverlay, so we need to wait for it to be available + // We also need to make sure we only focus once + if (!triggerRef.current || wasRefAvailable.current) { + return; + } + wasRefAvailable.current = true; + + if (autoFocus && !disabled) { + triggerRef.current.focus(); + } + }, [autoFocus, disabled, triggerRef]); + /** * The menu's full width, before any option has been filtered out. Used to maintain a * constant width while the user types into the search box. diff --git a/static/app/components/dataExport.tsx b/static/app/components/dataExport.tsx index 6ab45276d578a..5f30338ebd5b2 100644 --- a/static/app/components/dataExport.tsx +++ b/static/app/components/dataExport.tsx @@ -7,6 +7,8 @@ import Feature from 'sentry/components/acl/feature'; import {Button} from 'sentry/components/button'; import {t} from 'sentry/locale'; import type {Organization} from 'sentry/types/organization'; +import useApi from 'sentry/utils/useApi'; +import useOrganization from 'sentry/utils/useOrganization'; import withApi from 'sentry/utils/withApi'; import withOrganization from 'sentry/utils/withOrganization'; @@ -30,39 +32,20 @@ interface DataExportProps { icon?: React.ReactNode; } -function DataExport({ - api, - children, - disabled, - organization, +export function useDataExport({ payload, - icon, -}: DataExportProps): React.ReactElement { - const unmountedRef = useRef(false); - const [inProgress, setInProgress] = useState(false); - - // We clear the indicator if export props change so that the user - // can fire another export without having to wait for the previous one to finish. - useEffect(() => { - if (inProgress) { - setInProgress(false); - } - // We are skipping the inProgress dependency because it would have fired on each handleDataExport - // call and would have immediately turned off the value giving users no feedback on their click action. - // An alternative way to handle this would have probably been to key the component by payload/queryType, - // but that seems like it can be a complex object so tracking changes could result in very brittle behavior. - // eslint-disable-next-line react-hooks/exhaustive-deps - }, [payload.queryType, payload.queryInfo]); - - // Tracking unmounting of the component to prevent setState call on unmounted component - useEffect(() => { - return () => { - unmountedRef.current = true; - }; - }, []); + inProgressCallback, + unmountedRef, +}: { + payload: DataExportPayload; + inProgressCallback?: (inProgress: boolean) => void; + unmountedRef?: React.RefObject; +}) { + const organization = useOrganization(); + const api = useApi(); - const handleDataExport = useCallback(() => { - setInProgress(true); + return useCallback(() => { + inProgressCallback?.(true); // This is a fire and forget request. api @@ -76,7 +59,7 @@ function DataExport({ }) .then(([_data, _, response]) => { // If component has unmounted, don't do anything - if (unmountedRef.current) { + if (unmountedRef?.current) { return; } @@ -90,7 +73,7 @@ function DataExport({ }) .catch(err => { // If component has unmounted, don't do anything - if (unmountedRef.current) { + if (unmountedRef?.current) { return; } const message = @@ -100,9 +83,51 @@ function DataExport({ ); addErrorMessage(message); - setInProgress(false); + inProgressCallback?.(false); }); - }, [payload.queryInfo, payload.queryType, organization.slug, api]); + }, [ + payload.queryInfo, + payload.queryType, + organization.slug, + api, + inProgressCallback, + unmountedRef, + ]); +} + +function DataExport({ + children, + disabled, + payload, + icon, +}: DataExportProps): React.ReactElement { + const unmountedRef = useRef(false); + const [inProgress, setInProgress] = useState(false); + const handleDataExport = useDataExport({ + payload, + unmountedRef, + inProgressCallback: setInProgress, + }); + + // We clear the indicator if export props change so that the user + // can fire another export without having to wait for the previous one to finish. + useEffect(() => { + if (inProgress) { + setInProgress(false); + } + // We are skipping the inProgress dependency because it would have fired on each handleDataExport + // call and would have immediately turned off the value giving users no feedback on their click action. + // An alternative way to handle this would have probably been to key the component by payload/queryType, + // but that seems like it can be a complex object so tracking changes could result in very brittle behavior. + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [payload.queryType, payload.queryInfo]); + + // Tracking unmounting of the component to prevent setState call on unmounted component + useEffect(() => { + return () => { + unmountedRef.current = true; + }; + }, []); return ( diff --git a/static/app/components/dateTime.tsx b/static/app/components/dateTime.tsx index 806ea26c90371..64f08aba4cf0e 100644 --- a/static/app/components/dateTime.tsx +++ b/static/app/components/dateTime.tsx @@ -1,7 +1,7 @@ import moment from 'moment-timezone'; -import ConfigStore from 'sentry/stores/configStore'; import {getFormat} from 'sentry/utils/dates'; +import {useUser} from 'sentry/utils/useUser'; export interface DateTimeProps extends React.HTMLAttributes { /** @@ -59,7 +59,7 @@ export function DateTime({ forcedTimezone, ...props }: DateTimeProps) { - const user = ConfigStore.get('user'); + const user = useUser(); const options = user?.options; const formatString = diff --git a/static/app/components/deprecatedAssigneeSelector.spec.tsx b/static/app/components/deprecatedAssigneeSelector.spec.tsx index e955edaddbd4e..c11f5d77679d8 100644 --- a/static/app/components/deprecatedAssigneeSelector.spec.tsx +++ b/static/app/components/deprecatedAssigneeSelector.spec.tsx @@ -1,5 +1,4 @@ import {GroupFixture} from 'sentry-fixture/group'; -import {MemberFixture} from 'sentry-fixture/member'; import {ProjectFixture} from 'sentry-fixture/project'; import {TeamFixture} from 'sentry-fixture/team'; import {UserFixture} from 'sentry-fixture/user'; @@ -15,19 +14,26 @@ import IndicatorStore from 'sentry/stores/indicatorStore'; import MemberListStore from 'sentry/stores/memberListStore'; import ProjectsStore from 'sentry/stores/projectsStore'; import TeamStore from 'sentry/stores/teamStore'; +import type {Group} from 'sentry/types/group'; +import type {Team} from 'sentry/types/organization'; +import type {Project} from 'sentry/types/project'; +import type {User} from 'sentry/types/user'; jest.mock('sentry/actionCreators/modal', () => ({ openInviteMembersModal: jest.fn(), })); describe('DeprecatedAssigneeSelector', () => { - let assignMock; - let assignGroup2Mock; - let USER_1, USER_2, USER_3, USER_4; - let TEAM_1; - let PROJECT_1; - let GROUP_1; - let GROUP_2; + let assignMock: jest.Mock; + let assignGroup2Mock: jest.Mock; + let USER_1: User; + let USER_2: User; + let USER_3: User; + let USER_4: User; + let TEAM_1: Team; + let PROJECT_1: Project; + let GROUP_1: Group; + let GROUP_2: Group; beforeEach(() => { USER_1 = UserFixture({ @@ -45,7 +51,7 @@ describe('DeprecatedAssigneeSelector', () => { name: 'J J', email: 'jj@example.com', }); - USER_4 = MemberFixture({ + USER_4 = UserFixture({ id: '4', name: 'Jane Doe', email: 'janedoe@example.com', diff --git a/static/app/components/discover/quickContextCommitRow.tsx b/static/app/components/discover/quickContextCommitRow.tsx index 65779b6a84e2e..4584885a08ab9 100644 --- a/static/app/components/discover/quickContextCommitRow.tsx +++ b/static/app/components/discover/quickContextCommitRow.tsx @@ -9,11 +9,11 @@ import PanelItem from 'sentry/components/panels/panelItem'; import TextOverflow from 'sentry/components/textOverflow'; import {Tooltip} from 'sentry/components/tooltip'; import {t, tct} from 'sentry/locale'; -import ConfigStore from 'sentry/stores/configStore'; import {space} from 'sentry/styles/space'; +import {useUser} from 'sentry/utils/useUser'; function QuickContextCommitRow({commit}: CommitRowProps) { - const user = ConfigStore.get('user'); + const user = useUser(); const isUser = user?.id === commit.author?.id; const hasPullRequestURL = commit.pullRequest?.externalUrl; const commitMessage = formatCommitMessage(commit.message); diff --git a/static/app/components/draggableTabs/draggableTabList.tsx b/static/app/components/draggableTabs/draggableTabList.tsx index 9675cc3e8c031..9fb4551b4cc71 100644 --- a/static/app/components/draggableTabs/draggableTabList.tsx +++ b/static/app/components/draggableTabs/draggableTabList.tsx @@ -29,9 +29,9 @@ import {t} from 'sentry/locale'; import {space} from 'sentry/styles/space'; import {defined} from 'sentry/utils'; import {trackAnalytics} from 'sentry/utils/analytics'; -import {browserHistory} from 'sentry/utils/browserHistory'; import {useDimensions} from 'sentry/utils/useDimensions'; import {useDimensionsMultiple} from 'sentry/utils/useDimensionsMultiple'; +import {useNavigate} from 'sentry/utils/useNavigate'; import useOrganization from 'sentry/utils/useOrganization'; import type {DraggableTabListItemProps} from './item'; @@ -125,6 +125,7 @@ function Tabs({ state, className, onReorder, + onReorderComplete, tabVariant, setTabRefs, tabs, @@ -132,6 +133,7 @@ function Tabs({ hoveringKey, setHoveringKey, tempTabActive, + editingTabKey, }: { ariaProps: AriaTabListOptions; hoveringKey: Key | 'addView' | null; @@ -145,7 +147,9 @@ function Tabs({ tempTabActive: boolean; className?: string; disabled?: boolean; + editingTabKey?: string; onChange?: (key: string | number) => void; + onReorderComplete?: () => void; tabVariant?: BaseTabProps['variant']; value?: string | number; }) { @@ -228,10 +232,14 @@ function Tabs({ dragConstraints={dragConstraints} // dragConstraints are the bounds that the tab can be dragged within dragElastic={0} // Prevents the tab from being dragged outside of the dragConstraints (w/o this you can drag it outside but it'll spring back) dragTransition={{bounceStiffness: 400, bounceDamping: 40}} // Recovers spring behavior thats lost when using dragElastic=0 - transition={{delay: -0.1}} // Skips the first few frames of the animation that make the tab appear to shrink before growing + transition={{duration: 0.1}} layout + drag={item.key !== editingTabKey} // Disable dragging if the tab is being edited onDrag={() => setIsDragging(true)} - onDragEnd={() => setIsDragging(false)} + onDragEnd={() => { + setIsDragging(false); + onReorderComplete?.(); + }} onHoverStart={() => setHoveringKey(item.key)} onHoverEnd={() => setHoveringKey(null)} initial={false} @@ -245,7 +253,12 @@ function Tabs({ variant={tabVariant} /> - + ))} @@ -258,10 +271,12 @@ function BaseDraggableTabList({ className, outerWrapStyles, onReorder, + onReorderComplete, onAddView, tabVariant = 'filled', ...props }: BaseDraggableTabListProps) { + const navigate = useNavigate(); const [hoveringKey, setHoveringKey] = useState(null); const {rootProps, setTabListState} = useContext(TabsContext); const organization = useOrganization(); @@ -292,7 +307,7 @@ function BaseDraggableTabList({ organization, }); - browserHistory.push(linkTo); + navigate(linkTo); }, isDisabled: disabled, keyboardActivation, @@ -324,6 +339,7 @@ function BaseDraggableTabList({ state={state} className={className} onReorder={onReorder} + onReorderComplete={onReorderComplete} tabVariant={tabVariant} setTabRefs={setTabElements} tabs={persistentTabs} @@ -331,6 +347,7 @@ function BaseDraggableTabList({ hoveringKey={hoveringKey} setHoveringKey={setHoveringKey} tempTabActive={!!tempTab} + editingTabKey={props.editingTabKey} /> { onReorder: (newOrder: Node[]) => void; className?: string; + editingTabKey?: string; hideBorder?: boolean; onAddView?: React.MouseEventHandler; + onReorderComplete?: () => void; outerWrapStyles?: React.CSSProperties; showTempTab?: boolean; tabVariant?: BaseTabProps['variant']; diff --git a/static/app/components/eventOrGroupExtraDetails.tsx b/static/app/components/eventOrGroupExtraDetails.tsx index 8a078df2c06ad..c4745583a7ac5 100644 --- a/static/app/components/eventOrGroupExtraDetails.tsx +++ b/static/app/components/eventOrGroupExtraDetails.tsx @@ -1,8 +1,11 @@ +import {Fragment} from 'react'; +import {css} from '@emotion/react'; import styled from '@emotion/styled'; +import ErrorLevel from 'sentry/components/events/errorLevel'; import EventAnnotation from 'sentry/components/events/eventAnnotation'; import GlobalSelectionLink from 'sentry/components/globalSelectionLink'; -import InboxShortId from 'sentry/components/group/inboxBadges/shortId'; +import ShortId from 'sentry/components/group/inboxBadges/shortId'; import TimesTag from 'sentry/components/group/inboxBadges/timesTag'; import UnhandledTag from 'sentry/components/group/inboxBadges/unhandledTag'; import IssueReplayCount from 'sentry/components/group/issueReplayCount'; @@ -16,6 +19,9 @@ import {space} from 'sentry/styles/space'; import type {Event} from 'sentry/types/event'; import type {Group} from 'sentry/types/group'; import type {Organization} from 'sentry/types/organization'; +import {defined} from 'sentry/utils'; +import {eventTypeHasLogLevel, getTitle} from 'sentry/utils/events'; +import useReplayCountForIssues from 'sentry/utils/replayCount/useReplayCountForIssues'; import {projectCanLinkToReplay} from 'sentry/utils/replays/projectSupportsReplay'; import withOrganization from 'sentry/utils/withOrganization'; @@ -23,9 +29,36 @@ type Props = { data: Event | Group; organization: Organization; showAssignee?: boolean; + showLifetime?: boolean; }; -function EventOrGroupExtraDetails({data, showAssignee, organization}: Props) { +function Lifetime({ + firstSeen, + lastSeen, + lifetime, +}: { + firstSeen: string; + lastSeen: string; + lifetime?: Group['lifetime']; +}) { + if (!lifetime && !firstSeen && !lastSeen) { + return ; + } + + return ( + + ); +} + +function EventOrGroupExtraDetails({ + data, + showAssignee, + organization, + showLifetime = true, +}: Props) { const { id, lastSeen, @@ -42,76 +75,94 @@ function EventOrGroupExtraDetails({data, showAssignee, organization}: Props) { } = data as Group; const issuesPath = `/organizations/${organization.slug}/issues/`; + const {getReplayCountForIssue} = useReplayCountForIssues(); const showReplayCount = organization.features.includes('session-replay') && - projectCanLinkToReplay(organization, project); + projectCanLinkToReplay(organization, project) && + data.issueCategory && + !!getReplayCountForIssue(data.id, data.issueCategory); - return ( - - {shortId && ( - - ) - } - /> - )} - {isUnhandled && } - {!lifetime && !firstSeen && !lastSeen ? ( - - ) : ( - : null, + shortId ? ( + + } + /> + ) : null, + isUnhandled ? : null, + showLifetime ? ( + + ) : null, + hasNewLayout && subtitle ? {subtitle} : null, + numComments > 0 ? ( + + - )} - {/* Always display comment count on inbox */} - {numComments > 0 && ( - - - {numComments} - - )} - {showReplayCount && } - {logger && ( - - - {logger} - - - )} - {annotations?.map((annotation, key) => ( - - {annotation.displayName} - - ))} - - {showAssignee && assignedTo && ( -
{tct('Assigned to [name]', {name: assignedTo.name})}
- )} + {numComments} +
+ ) : null, + showReplayCount ? : null, + logger ? ( + + + {logger} + + + ) : null, + ...(annotations?.map((annotation, key) => ( + + {annotation.displayName} + + )) ?? []), + showAssignee && assignedTo ? ( +
{tct('Assigned to [name]', {name: assignedTo.name})}
+ ) : null, + ].filter(defined); + + return ( + + {items.map((item, i) => { + if (!item) { + return null; + } + + if (!hasNewLayout) { + return {item}; + } + + return ( + + {item} + {i < items.length - 1 ? : null} + + ); + })} ); } -const GroupExtra = styled('div')` +const GroupExtra = styled('div')<{hasNewLayout: boolean}>` display: inline-grid; grid-auto-flow: column dense; - gap: ${space(1.5)}; + gap: ${p => (p.hasNewLayout ? space(0.75) : space(1.5))}; justify-content: start; align-items: center; color: ${p => p.theme.textColor}; @@ -121,15 +172,27 @@ const GroupExtra = styled('div')` white-space: nowrap; line-height: 1.2; - a { - color: inherit; - } + ${p => + p.hasNewLayout && + css` + color: ${p.theme.subText}; + & > a { + color: ${p.theme.subText}; + } + `} @media (min-width: ${p => p.theme.breakpoints.xlarge}) { line-height: 1; } `; +const Separator = styled('div')` + height: 10px; + width: 1px; + background-color: ${p => p.theme.innerBorder}; + border-radius: 1px; +`; + const ShadowlessProjectBadge = styled(ProjectBadge)` * > img { box-shadow: none; @@ -144,17 +207,35 @@ const CommentsLink = styled(Link)` color: ${p => p.theme.textColor}; `; -const AnnotationNoMargin = styled(EventAnnotation)` +const AnnotationNoMargin = styled(EventAnnotation)<{hasNewLayout: boolean}>` margin-left: 0; padding-left: 0; border-left: none; - & > a { - color: ${p => p.theme.textColor}; - } + + ${p => + !p.hasNewLayout && + css` + & > a { + color: ${p.theme.textColor}; + } + `} + + ${p => + p.hasNewLayout && + css` + & > a:hover { + color: ${p.theme.linkHoverColor}; + } + `} `; const LoggerAnnotation = styled(AnnotationNoMargin)` color: ${p => p.theme.textColor}; `; +const Location = styled('div')` + font-size: ${p => p.theme.fontSizeSmall}; + color: ${p => p.theme.subText}; +`; + export default withOrganization(EventOrGroupExtraDetails); diff --git a/static/app/components/eventOrGroupHeader.tsx b/static/app/components/eventOrGroupHeader.tsx index 8e5a88f9aa417..bf2046d4ad527 100644 --- a/static/app/components/eventOrGroupHeader.tsx +++ b/static/app/components/eventOrGroupHeader.tsx @@ -15,6 +15,7 @@ import type {Organization} from 'sentry/types/organization'; import {getLocation, getMessage, isTombstone} from 'sentry/utils/events'; import {useLocation} from 'sentry/utils/useLocation'; import withOrganization from 'sentry/utils/withOrganization'; +import {createIssueLink} from 'sentry/views/issueList/utils'; import EventTitleError from './eventTitleError'; @@ -46,6 +47,8 @@ function EventOrGroupHeader({ }: EventOrGroupHeaderProps) { const location = useLocation(); + const hasNewLayout = organization.features.includes('issue-stream-table-layout'); + function getTitleChildren() { const {isBookmarked, hasSeen} = data as Group; return ( @@ -69,8 +72,7 @@ function EventOrGroupHeader({ } function getTitle() { - const {id, status} = data as Group; - const {eventID: latestEventId, groupID} = data as Event; + const {status} = data as Group; const commonEleProps = { 'data-test-id': status === 'resolved' ? 'resolved-issue' : null, @@ -82,32 +84,18 @@ function EventOrGroupHeader({ ); } - // If we have passed in a custom event ID, use it; otherwise use default - const finalEventId = eventId ?? latestEventId; - return ( {getTitleChildren()} @@ -120,13 +108,16 @@ function EventOrGroupHeader({ return (
{getTitle()} - {eventLocation && {eventLocation}} - + {eventLocation && !hasNewLayout ? {eventLocation} : null} + {!hasNewLayout ? ( + + ) : null}
); } @@ -140,6 +131,7 @@ const truncateStyles = css` const Title = styled('div')` margin-bottom: ${space(0.25)}; + font-size: ${p => p.theme.fontSizeLarge}; & em { font-size: ${p => p.theme.fontSizeMedium}; font-style: normal; @@ -174,6 +166,7 @@ function Location(props) { const StyledEventMessage = styled(EventMessage)` margin: 0 0 5px; gap: ${space(0.5)}; + font-size: inherit; `; const IconWrapper = styled('span')` diff --git a/static/app/components/eventOrGroupTitle.tsx b/static/app/components/eventOrGroupTitle.tsx index 22bb487b7098b..d3a0cc4ea74b0 100644 --- a/static/app/components/eventOrGroupTitle.tsx +++ b/static/app/components/eventOrGroupTitle.tsx @@ -3,7 +3,8 @@ import styled from '@emotion/styled'; import type {Event} from 'sentry/types/event'; import type {BaseGroup, GroupTombstoneHelper} from 'sentry/types/group'; -import {getTitle, isTombstone} from 'sentry/utils/events'; +import {getMessage, getTitle, isTombstone} from 'sentry/utils/events'; +import useOrganization from 'sentry/utils/useOrganization'; import GroupPreviewTooltip from './groupPreviewTooltip'; @@ -20,13 +21,19 @@ function EventOrGroupTitle({ className, query, }: EventOrGroupTitleProps) { + const organization = useOrganization({allowNull: true}); const {id, groupID} = data as Event; const {title, subtitle} = getTitle(data); const titleLabel = title ?? ''; + const hasNewLayout = + organization?.features.includes('issue-stream-table-layout') ?? false; + + const secondaryTitle = hasNewLayout ? getMessage(data) : subtitle; + return ( - + {!isTombstone(data) && withStackTracePreview ? ( - {titleLabel} + {hasNewLayout ? ( + {titleLabel} + ) : ( + titleLabel + )} ) : ( titleLabel )} - {subtitle && ( + {secondaryTitle && ( - {subtitle} + {hasNewLayout ? ( + {secondaryTitle} + ) : ( + {secondaryTitle} + )}
)} @@ -69,9 +84,23 @@ const Subtitle = styled('em')` height: 100%; `; -const Wrapper = styled('span')` - font-size: ${p => p.theme.fontSizeLarge}; +const Message = styled('span')` + ${p => p.theme.overflowEllipsis}; + display: inline-block; + height: 100%; + color: ${p => p.theme.textColor}; + font-weight: ${p => p.theme.fontWeightNormal}; +`; + +const Title = styled('span')` + ${p => p.theme.overflowEllipsis}; + display: inline-block; + color: ${p => p.theme.textColor}; +`; + +const Wrapper = styled('span')<{hasNewLayout: boolean}>` display: inline-grid; grid-template-columns: auto max-content 1fr max-content; - align-items: baseline; + + align-items: ${p => (p.hasNewLayout ? 'normal' : 'baseline')}; `; diff --git a/static/app/components/events/aiSuggestedSolution/useOpenAISuggestionLocalStorage.tsx b/static/app/components/events/aiSuggestedSolution/useOpenAISuggestionLocalStorage.tsx index 7118c8498d49f..96b7bc66486db 100644 --- a/static/app/components/events/aiSuggestedSolution/useOpenAISuggestionLocalStorage.tsx +++ b/static/app/components/events/aiSuggestedSolution/useOpenAISuggestionLocalStorage.tsx @@ -1,7 +1,7 @@ import {useCallback} from 'react'; -import ConfigStore from 'sentry/stores/configStore'; import {useLocalStorageState} from 'sentry/utils/useLocalStorageState'; +import {useUser} from 'sentry/utils/useUser'; type LocalState = { individualConsent: boolean; @@ -11,7 +11,7 @@ export function useOpenAISuggestionLocalStorage(): [ LocalState, (newState: Partial) => void, ] { - const user = ConfigStore.get('user'); + const user = useUser(); const [localStorageState, setLocalStorageState] = useLocalStorageState( `open-ai-suggestion:${user.id}`, diff --git a/static/app/components/events/autofix/autofixBanner.tsx b/static/app/components/events/autofix/autofixBanner.tsx index efb45193fbc74..3d1199e0b80de 100644 --- a/static/app/components/events/autofix/autofixBanner.tsx +++ b/static/app/components/events/autofix/autofixBanner.tsx @@ -41,7 +41,10 @@ function SuccessfulSetup({ // we don't reopen it immediately, and instead let the button handle this itself. shouldCloseOnInteractOutside: element => { const viewAllButton = openButtonRef.current; - if (viewAllButton?.contains(element)) { + if ( + viewAllButton?.contains(element) || + document.getElementById('sentry-feedback')?.contains(element) + ) { return false; } return true; diff --git a/static/app/components/events/autofix/autofixChanges.spec.tsx b/static/app/components/events/autofix/autofixChanges.spec.tsx index 26deda563c156..53c8689bc81a6 100644 --- a/static/app/components/events/autofix/autofixChanges.spec.tsx +++ b/static/app/components/events/autofix/autofixChanges.spec.tsx @@ -18,6 +18,7 @@ import { describe('AutofixChanges', function () { const defaultProps = { groupId: '1', + runId: '1', onRetry: jest.fn(), step: AutofixStepFixture({ type: AutofixStepType.CHANGES, diff --git a/static/app/components/events/autofix/autofixChanges.tsx b/static/app/components/events/autofix/autofixChanges.tsx index 1bce114ece127..e804438296065 100644 --- a/static/app/components/events/autofix/autofixChanges.tsx +++ b/static/app/components/events/autofix/autofixChanges.tsx @@ -29,6 +29,7 @@ import useApi from 'sentry/utils/useApi'; type AutofixChangesProps = { groupId: string; onRetry: () => void; + runId: string; step: AutofixChangesStep; }; @@ -168,9 +169,11 @@ function PullRequestLinkOrCreateButton({ function AutofixRepoChange({ change, groupId, + runId, }: { change: AutofixCodebaseChange; groupId: string; + runId: string; }) { return ( @@ -181,7 +184,12 @@ function AutofixRepoChange({ - + ); } @@ -193,7 +201,7 @@ const cardAnimationProps: AnimationProps = { transition: testableTransition({duration: 0.3}), }; -export function AutofixChanges({step, onRetry, groupId}: AutofixChangesProps) { +export function AutofixChanges({step, onRetry, groupId, runId}: AutofixChangesProps) { const data = useAutofixData({groupId}); if (step.status === 'ERROR' || data?.status === 'ERROR') { @@ -242,7 +250,7 @@ export function AutofixChanges({step, onRetry, groupId}: AutofixChangesProps) { {step.changes.map((change, i) => ( {i > 0 && } - + ))} @@ -267,7 +275,7 @@ const ChangesContainer = styled('div')` border: 1px solid ${p => p.theme.innerBorder}; border-radius: ${p => p.theme.borderRadius}; overflow: hidden; - box-shadow: ${p => p.theme.dropShadowHeavy}; + box-shadow: ${p => p.theme.dropShadowMedium}; padding-left: ${space(2)}; padding-right: ${space(2)}; padding-top: ${space(1)}; diff --git a/static/app/components/events/autofix/autofixDiff.spec.tsx b/static/app/components/events/autofix/autofixDiff.spec.tsx index 080525059d5d7..2d26c5da597d7 100644 --- a/static/app/components/events/autofix/autofixDiff.spec.tsx +++ b/static/app/components/events/autofix/autofixDiff.spec.tsx @@ -1,15 +1,31 @@ import {AutofixDiffFilePatch} from 'sentry-fixture/autofixDiffFilePatch'; -import {render, screen, userEvent, within} from 'sentry-test/reactTestingLibrary'; +import { + render, + screen, + userEvent, + waitFor, + within, +} from 'sentry-test/reactTestingLibrary'; import {textWithMarkupMatcher} from 'sentry-test/utils'; +import {addErrorMessage} from 'sentry/actionCreators/indicator'; import {AutofixDiff} from 'sentry/components/events/autofix/autofixDiff'; +jest.mock('sentry/actionCreators/indicator'); + describe('AutofixDiff', function () { const defaultProps = { diff: [AutofixDiffFilePatch()], + groupId: '1', + runId: '1', }; + beforeEach(() => { + MockApiClient.clearMockResponses(); + (addErrorMessage as jest.Mock).mockClear(); + }); + it('displays a modified file diff correctly', function () { render(); @@ -68,4 +84,71 @@ describe('AutofixDiff', function () { await userEvent.click(screen.getByRole('button', {name: 'Toggle file diff'})); expect(screen.getAllByTestId('line-context')).toHaveLength(6); }); + + it('can edit changes', async function () { + render(); + + await userEvent.click(screen.getByRole('button', {name: 'Edit changes'})); + + expect( + screen.getByText('Editing src/sentry/processing/backpressure/memory.py') + ).toBeInTheDocument(); + + const textarea = screen.getByRole('textbox'); + await userEvent.clear(textarea); + await userEvent.type(textarea, 'New content'); + + MockApiClient.addMockResponse({ + url: '/issues/1/autofix/update/', + method: 'POST', + }); + + await userEvent.click(screen.getByRole('button', {name: 'Save'})); + + await waitFor(() => { + expect( + screen.queryByText('Editing src/sentry/processing/backpressure/memory.py') + ).not.toBeInTheDocument(); + }); + }); + + it('can reject changes', async function () { + render(); + + MockApiClient.addMockResponse({ + url: '/issues/1/autofix/update/', + method: 'POST', + }); + + await userEvent.click(screen.getByRole('button', {name: 'Reject changes'})); + + await waitFor(() => { + expect(screen.queryByTestId('line-added')).not.toBeInTheDocument(); + expect(screen.queryByTestId('line-removed')).not.toBeInTheDocument(); + }); + }); + + it('shows error message on failed edit', async function () { + render(); + + await userEvent.click(screen.getByRole('button', {name: 'Edit changes'})); + + const textarea = screen.getByRole('textbox'); + await userEvent.clear(textarea); + await userEvent.type(textarea, 'New content'); + + MockApiClient.addMockResponse({ + url: '/issues/1/autofix/update/', + method: 'POST', + statusCode: 500, + }); + + await userEvent.click(screen.getByRole('button', {name: 'Save'})); + + await waitFor(() => { + expect(addErrorMessage).toHaveBeenCalledWith( + 'Something went wrong when updating changes.' + ); + }); + }); }); diff --git a/static/app/components/events/autofix/autofixDiff.tsx b/static/app/components/events/autofix/autofixDiff.tsx index 2f1dd00ac6832..cddd855b4d805 100644 --- a/static/app/components/events/autofix/autofixDiff.tsx +++ b/static/app/components/events/autofix/autofixDiff.tsx @@ -1,20 +1,27 @@ -import {Fragment, useMemo, useState} from 'react'; +import {Fragment, useEffect, useMemo, useRef, useState} from 'react'; import styled from '@emotion/styled'; import {type Change, diffWords} from 'diff'; +import {addErrorMessage} from 'sentry/actionCreators/indicator'; import {Button} from 'sentry/components/button'; import { type DiffLine, DiffLineType, type FilePatch, } from 'sentry/components/events/autofix/types'; +import TextArea from 'sentry/components/forms/controls/textarea'; import InteractionStateLayer from 'sentry/components/interactionStateLayer'; -import {IconChevron} from 'sentry/icons'; +import {IconChevron, IconClose, IconDelete, IconEdit} from 'sentry/icons'; import {t} from 'sentry/locale'; import {space} from 'sentry/styles/space'; +import {useMutation} from 'sentry/utils/queryClient'; +import useApi from 'sentry/utils/useApi'; type AutofixDiffProps = { diff: FilePatch[]; + groupId: string; + runId: string; + repoId?: string; }; interface DiffLineWithChanges extends DiffLine { @@ -91,17 +98,288 @@ function HunkHeader({lines, sectionHeader}: {lines: DiffLine[]; sectionHeader: s ); } -function DiffHunkContent({lines, header}: {header: string; lines: DiffLine[]}) { - const linesWithChanges = useMemo(() => { - return addChangesToDiffLines(lines); +function useUpdateHunk({groupId, runId}: {groupId: string; runId: string}) { + const api = useApi({persistInFlight: true}); + + return useMutation({ + mutationFn: (params: { + fileName: string; + hunkIndex: number; + lines: DiffLine[]; + repoId?: string; + }) => { + return api.requestPromise(`/issues/${groupId}/autofix/update/`, { + method: 'POST', + data: { + run_id: runId, + payload: { + type: 'update_code_change', + repo_id: params.repoId ?? null, + hunk_index: params.hunkIndex, + lines: params.lines, + file_path: params.fileName, + }, + }, + }); + }, + onError: () => { + addErrorMessage(t('Something went wrong when updating changes.')); + }, + }); +} + +function DiffHunkContent({ + groupId, + runId, + repoId, + hunkIndex, + lines, + header, + fileName, +}: { + fileName: string; + groupId: string; + header: string; + hunkIndex: number; + lines: DiffLine[]; + runId: string; + repoId?: string; +}) { + const [linesWithChanges, setLinesWithChanges] = useState([]); + + useEffect(() => { + setLinesWithChanges(addChangesToDiffLines(lines)); }, [lines]); + const [editingGroup, setEditingGroup] = useState(null); + const [editedContent, setEditedContent] = useState(''); + const [editedLines, setEditedLines] = useState([]); + const overlayRef = useRef(null); + + useEffect(() => { + function handleClickOutside(event: MouseEvent) { + if (overlayRef.current && !overlayRef.current.contains(event.target as Node)) { + setEditingGroup(null); + setEditedContent(''); + } + } + + document.addEventListener('mousedown', handleClickOutside); + return () => { + document.removeEventListener('mousedown', handleClickOutside); + }; + }, []); + + const lineGroups = useMemo(() => { + const groups: {end: number; start: number; type: 'change' | DiffLineType}[] = []; + let currentGroup: (typeof groups)[number] | null = null; + + linesWithChanges.forEach((line, index) => { + if (line.line_type !== DiffLineType.CONTEXT) { + if (!currentGroup) { + currentGroup = {start: index, end: index, type: 'change'}; + } else if (currentGroup.type === 'change') { + currentGroup.end = index; + } else { + groups.push(currentGroup); + currentGroup = {start: index, end: index, type: 'change'}; + } + } else if (currentGroup) { + groups.push(currentGroup); + currentGroup = null; + } + }); + + if (currentGroup) { + groups.push(currentGroup); + } + + return groups; + }, [linesWithChanges]); + + const handleEditClick = (index: number) => { + const group = lineGroups.find(g => g.start === index); + if (group) { + const content = linesWithChanges + .slice(group.start, group.end + 1) + .filter(line => line.line_type === DiffLineType.ADDED) + .map(line => line.value) + .join(''); + const splitLines = content.split('\n'); + if (splitLines[splitLines.length - 1] === '') { + splitLines.pop(); + } + setEditedLines(splitLines); + if (content === '\n') { + setEditedContent(''); + } else { + setEditedContent(content.endsWith('\n') ? content.slice(0, -1) : content); + } + setEditingGroup(index); + } + }; + + const handleTextAreaChange = (e: React.ChangeEvent) => { + const newContent = e.target.value; + setEditedContent(newContent); + setEditedLines(newContent.split('\n')); + }; + + const updateHunk = useUpdateHunk({groupId, runId}); + const handleSaveEdit = () => { + if (editingGroup === null) { + return; + } + const group = lineGroups.find(g => g.start === editingGroup); + if (!group) { + return; + } + + let lastSourceLineNo = 0; + let lastTargetLineNo = 0; + let lastDiffLineNo = 0; + + const updatedLines = linesWithChanges + .map((line, index) => { + if (index < group.start) { + lastSourceLineNo = line.source_line_no ?? lastSourceLineNo; + lastTargetLineNo = line.target_line_no ?? lastTargetLineNo; + lastDiffLineNo = line.diff_line_no ?? lastDiffLineNo; + } + if (index >= group.start && index <= group.end) { + if (line.line_type === DiffLineType.ADDED) { + return null; // Remove existing added lines + } + if (line.line_type === DiffLineType.REMOVED) { + lastSourceLineNo = line.source_line_no ?? lastSourceLineNo; + } + return line; // Keep other lines (removed and context) as is + } + return line; + }) + .filter((line): line is DiffLine => line !== null); + + // Insert new added lines + const newAddedLines: DiffLine[] = editedContent.split('\n').map((content, i) => { + lastDiffLineNo++; + lastTargetLineNo++; + return { + diff_line_no: lastDiffLineNo, + source_line_no: null, + target_line_no: lastTargetLineNo, + line_type: DiffLineType.ADDED, + value: content + (i === editedContent.split('\n').length - 1 ? '' : '\n'), + }; + }); + + // Find the insertion point (after the last removed line or at the start of the group) + const insertionIndex = updatedLines.findIndex( + (line, index) => index >= group.start && line.line_type !== DiffLineType.REMOVED + ); + + updatedLines.splice( + insertionIndex === -1 ? group.start : insertionIndex, + 0, + ...newAddedLines + ); + + // Update diff_line_no for all lines after the insertion + for (let i = insertionIndex + newAddedLines.length; i < updatedLines.length; i++) { + updatedLines[i].diff_line_no = ++lastDiffLineNo; + } + + updateHunk.mutate({hunkIndex, lines: updatedLines, repoId, fileName}); + setLinesWithChanges(addChangesToDiffLines(updatedLines)); + setEditingGroup(null); + setEditedContent(''); + }; + + const handleCancelEdit = () => { + setEditingGroup(null); + setEditedContent(''); + }; + + const rejectChanges = (index: number) => { + const group = lineGroups.find(g => g.start === index); + if (!group) { + return; + } + + const updatedLines = linesWithChanges + .map((line, i) => { + if (i >= group.start && i <= group.end) { + if (line.line_type === DiffLineType.ADDED) { + return null; // Remove added lines + } + if (line.line_type === DiffLineType.REMOVED) { + return {...line, line_type: DiffLineType.CONTEXT}; // Convert removed lines to context + } + } + return line; + }) + .filter((line): line is DiffLine => line !== null); + + updateHunk.mutate({hunkIndex, lines: updatedLines, repoId, fileName}); + setLinesWithChanges(addChangesToDiffLines(updatedLines)); + }; + + const getStartLineNumber = (index: number, lineType: DiffLineType) => { + const line = linesWithChanges[index]; + if (lineType === DiffLineType.REMOVED) { + return line.source_line_no; + } + if (lineType === DiffLineType.ADDED) { + // Find the first non-null target_line_no + for (let i = index; i < linesWithChanges.length; i++) { + if (linesWithChanges[i].target_line_no !== null) { + return linesWithChanges[i].target_line_no; + } + } + } + return null; + }; + + const handleClearChanges = () => { + setEditedContent(''); + setEditedLines([]); + }; + + const getDeletedLineTitle = (index: number) => { + return t( + '%s deleted line%s%s', + linesWithChanges + .slice(index, lineGroups.find(g => g.start === index)?.end! + 1) + .filter(l => l.line_type === DiffLineType.REMOVED).length, + linesWithChanges + .slice(index, lineGroups.find(g => g.start === index)?.end) + .filter(l => l.line_type === DiffLineType.REMOVED).length === 1 + ? '' + : 's', + linesWithChanges + .slice(index, lineGroups.find(g => g.start === index)?.end) + .filter(l => l.line_type === DiffLineType.REMOVED).length > 0 + ? t(' from line %s', getStartLineNumber(index, DiffLineType.REMOVED)) + : '' + ); + }; + + const getNewLineTitle = (index: number) => { + return t( + '%s new line%s%s', + editedLines.length, + editedLines.length === 1 ? '' : 's', + editedLines.length > 0 + ? t(' from line %s', getStartLineNumber(index, DiffLineType.ADDED)) + : '' + ); + }; + return ( - {linesWithChanges.map(line => ( - + {linesWithChanges.map((line, index) => ( + {line.source_line_no} {line.target_line_no} + {lineGroups.some(group => index === group.start) && ( + + } + aria-label={t('Edit changes')} + title={t('Edit')} + onClick={() => handleEditClick(index)} + /> + } + aria-label={t('Reject changes')} + title={t('Reject')} + onClick={() => rejectChanges(index)} + /> + + )} + {editingGroup === index && ( + + {t('Editing %s', fileName)} + {getDeletedLineTitle(index)} + {linesWithChanges + .slice(index, lineGroups.find(g => g.start === index)?.end! + 1) + .filter(l => l.line_type === DiffLineType.REMOVED).length > 0 ? ( + + {linesWithChanges + .slice(index, lineGroups.find(g => g.start === index)?.end! + 1) + .filter(l => l.line_type === DiffLineType.REMOVED) + .map((l, i) => ( + {l.value} + ))} + + ) : ( + {t('No lines are being deleted.')} + )} + {getNewLineTitle(index)} + + + } + title={t('Clear all new lines')} + /> + + + + + + + )} ))} @@ -116,7 +459,17 @@ function DiffHunkContent({lines, header}: {header: string; lines: DiffLine[]}) { ); } -function FileDiff({file}: {file: FilePatch}) { +function FileDiff({ + file, + groupId, + runId, + repoId, +}: { + file: FilePatch; + groupId: string; + runId: string; + repoId?: string; +}) { const [isExpanded, setIsExpanded] = useState(true); return ( @@ -138,9 +491,18 @@ function FileDiff({file}: {file: FilePatch}) { {isExpanded && ( - {file.hunks.map(({section_header, source_start, lines}) => { + {file.hunks.map(({section_header, source_start, lines}, index) => { return ( - + ); })} @@ -149,7 +511,7 @@ function FileDiff({file}: {file: FilePatch}) { ); } -export function AutofixDiff({diff}: AutofixDiffProps) { +export function AutofixDiff({diff, groupId, runId, repoId}: AutofixDiffProps) { if (!diff || !diff.length) { return null; } @@ -157,7 +519,13 @@ export function AutofixDiff({diff}: AutofixDiffProps) { return ( {diff.map(file => ( - + ))} ); @@ -248,7 +616,10 @@ const LineNumber = styled('div')<{lineType: DiffLineType}>` const DiffContent = styled('div')<{lineType: DiffLineType}>` position: relative; padding-left: ${space(4)}; + padding-right: ${space(4)}; white-space: pre-wrap; + word-break: break-all; + word-wrap: break-word; ${p => p.lineType === DiffLineType.ADDED && @@ -275,3 +646,101 @@ const CodeDiff = styled('span')<{added?: boolean; removed?: boolean}>` ${p => p.added && `background-color: ${p.theme.diff.added};`}; ${p => p.removed && `background-color: ${p.theme.diff.removed};`}; `; + +const ButtonGroup = styled('div')` + position: absolute; + top: 0; + right: ${space(0.25)}; + display: flex; + opacity: 0; + transition: opacity 0.1s ease-in-out; + + ${DiffContent}:hover & { + opacity: 1; + } +`; + +const ActionButton = styled(Button)` + margin-left: ${space(0.5)}; + font-family: ${p => p.theme.text.family}; +`; + +const EditOverlay = styled('div')` + position: fixed; + bottom: 200px; + right: ${space(2)}; + left: calc(50% + ${space(2)}); + background: ${p => p.theme.backgroundElevated}; + border: 1px solid ${p => p.theme.border}; + border-radius: ${p => p.theme.borderRadius}; + box-shadow: ${p => p.theme.dropShadowHeavy}; + padding: ${space(2)}; + z-index: 1; +`; + +const OverlayButtonGroup = styled('div')` + display: flex; + justify-content: flex-end; + gap: ${space(1)}; + margin-top: ${space(1)}; + font-family: ${p => p.theme.text.family}; +`; + +const RemovedLines = styled('div')` + margin-bottom: ${space(1)}; + font-family: ${p => p.theme.text.familyMono}; + border-radius: ${p => p.theme.borderRadius}; + overflow: hidden; +`; + +const RemovedLine = styled('div')` + background-color: ${p => p.theme.diff.removedRow}; + color: ${p => p.theme.textColor}; + padding: ${space(0.25)} ${space(0.5)}; +`; + +const StyledTextArea = styled(TextArea)` + font-family: ${p => p.theme.text.familyMono}; + font-size: ${p => p.theme.fontSizeSmall}; + background-color: ${p => p.theme.diff.addedRow}; + border-color: ${p => p.theme.border}; + position: relative; + + &:focus { + border-color: ${p => p.theme.focusBorder}; + box-shadow: inset 0 0 0 1px ${p => p.theme.focusBorder}; + } +`; + +const ClearButton = styled(Button)` + position: absolute; + top: -${space(1)}; + right: -${space(1)}; + z-index: 1; +`; + +const TextAreaWrapper = styled('div')` + position: relative; +`; + +const SectionTitle = styled('p')` + margin: ${space(1)} 0; + font-size: ${p => p.theme.fontSizeMedium}; + font-weight: bold; + color: ${p => p.theme.textColor}; + font-family: ${p => p.theme.text.family}; +`; + +const NoChangesMessage = styled('p')` + margin: ${space(1)} 0; + color: ${p => p.theme.subText}; + font-family: ${p => p.theme.text.family}; +`; + +const OverlayTitle = styled('h3')` + margin: 0 0 ${space(2)} 0; + font-size: ${p => p.theme.fontSizeMedium}; + font-weight: bold; + color: ${p => p.theme.textColor}; + font-family: ${p => p.theme.text.family}; +`; diff --git a/static/app/components/events/autofix/autofixDrawer.spec.tsx b/static/app/components/events/autofix/autofixDrawer.spec.tsx index 9c00a410d1e5e..d6851c67dcef7 100644 --- a/static/app/components/events/autofix/autofixDrawer.spec.tsx +++ b/static/app/components/events/autofix/autofixDrawer.spec.tsx @@ -34,7 +34,7 @@ describe('AutofixDrawer', () => { expect(screen.getByRole('heading', {name: 'Autofix'})).toBeInTheDocument(); - expect(screen.getByText('Autofix is ready to start')).toBeInTheDocument(); + expect(screen.getByText('Ready to start')).toBeInTheDocument(); const startButton = screen.getByRole('button', {name: 'Start'}); expect(startButton).toBeInTheDocument(); @@ -88,7 +88,7 @@ describe('AutofixDrawer', () => { await userEvent.click(startOverButton); await waitFor(() => { - expect(screen.getByText('Autofix is ready to start')).toBeInTheDocument(); + expect(screen.getByText('Ready to start')).toBeInTheDocument(); expect(screen.getByRole('button', {name: 'Start'})).toBeInTheDocument(); }); }); diff --git a/static/app/components/events/autofix/autofixDrawer.tsx b/static/app/components/events/autofix/autofixDrawer.tsx index ecdb157262db8..e614146e8a36c 100644 --- a/static/app/components/events/autofix/autofixDrawer.tsx +++ b/static/app/components/events/autofix/autofixDrawer.tsx @@ -1,7 +1,7 @@ import {useState} from 'react'; import styled from '@emotion/styled'; -import bannerImage from 'sentry-images/spot/ai-suggestion-banner.svg'; +import bannerImage from 'sentry-images/insights/module-upsells/insights-module-upsell.svg'; import ProjectAvatar from 'sentry/components/avatar/projectAvatar'; import {Breadcrumbs as NavigationBreadcrumbs} from 'sentry/components/breadcrumbs'; @@ -34,26 +34,27 @@ function AutofixStartBox({onSend}: AutofixStartBoxProps) { return ( -
Autofix is ready to start
+ + + +
Ready to start

We'll begin by trying to figure out the root cause, analyzing the issue details and the codebase. If you have any other helpful context on the issue before we begin, you can share that below.

- setMessage(e.target.value)} - placeholder={'Provide any extra context here...'} - /> -
- - - - + + setMessage(e.target.value)} + placeholder={'Provide any extra context here...'} + /> + +
); } @@ -125,8 +126,13 @@ export function AutofixDrawer({group, project, event}: AutofixDrawerProps) { ); } +const Row = styled('div')` + display: flex; + gap: ${space(1)}; +`; + const IllustrationContainer = styled('div')` - padding-top: ${space(4)}; + padding: ${space(4)} 0 ${space(4)} 0; `; const Illustration = styled('img')` @@ -137,7 +143,6 @@ const StartBox = styled('div')` padding: ${space(2)}; display: flex; flex-direction: column; - justify-content: center; height: 100%; width: 100%; `; diff --git a/static/app/components/events/autofix/autofixFeedback.tsx b/static/app/components/events/autofix/autofixFeedback.tsx index fc6ceda7c048b..1d6010fed3b06 100644 --- a/static/app/components/events/autofix/autofixFeedback.tsx +++ b/static/app/components/events/autofix/autofixFeedback.tsx @@ -1,29 +1,33 @@ import {useRef} from 'react'; import {Button} from 'sentry/components/button'; -import useFeedbackWidget from 'sentry/components/feedback/widget/useFeedbackWidget'; import {IconMegaphone} from 'sentry/icons/iconMegaphone'; import {t} from 'sentry/locale'; +import {useFeedbackForm} from 'sentry/utils/useFeedbackForm'; function AutofixFeedback() { const buttonRef = useRef(null); - const feedback = useFeedbackWidget({ - buttonRef, - messagePlaceholder: t('How can we make Autofix better for you?'), - optionOverrides: { - tags: { - ['feedback.source']: 'issue_details_ai_autofix', - ['feedback.owner']: 'ml-ai', - }, - }, - }); + const openForm = useFeedbackForm(); - if (!feedback) { + if (!openForm) { return null; } return ( - ); diff --git a/static/app/components/events/autofix/autofixInsightCards.spec.tsx b/static/app/components/events/autofix/autofixInsightCards.spec.tsx index b3511f1cb3248..989f755ce0689 100644 --- a/static/app/components/events/autofix/autofixInsightCards.spec.tsx +++ b/static/app/components/events/autofix/autofixInsightCards.spec.tsx @@ -1,19 +1,55 @@ -import {render, screen, userEvent} from 'sentry-test/reactTestingLibrary'; +import {render, screen, userEvent, waitFor} from 'sentry-test/reactTestingLibrary'; +import {addErrorMessage, addSuccessMessage} from 'sentry/actionCreators/indicator'; import AutofixInsightCards from 'sentry/components/events/autofix/autofixInsightCards'; +import type {AutofixInsight} from 'sentry/components/events/autofix/types'; jest.mock('sentry/utils/marked', () => ({ singleLineRenderer: jest.fn(text => text), })); -const sampleInsights = [ +jest.mock('sentry/actionCreators/indicator'); + +const sampleInsights: AutofixInsight[] = [ { - breadcrumb_context: [], - codebase_context: [], + breadcrumb_context: [ + { + body: 'Breadcrumb body', + category: 'ui', + level: 'info', + data_as_json: '{"testData": "testValue"}', + type: 'info', + }, + ], + codebase_context: [ + { + snippet: 'console.log("Hello, World!");', + repo_name: 'sample-repo', + file_path: 'src/index.js', + }, + ], error_message_context: ['Error message 1'], insight: 'Sample insight 1', justification: 'Sample justification 1', + stacktrace_context: [ + { + code_snippet: 'function() { throw new Error("Test error"); }', + repo_name: 'sample-repo', + file_name: 'src/error.js', + vars_as_json: '{"testVar": "testValue"}', + col_no: 1, + line_no: 1, + function: 'testFunction', + }, + ], + }, + { + insight: 'User message', + justification: 'USER', + breadcrumb_context: [], stacktrace_context: [], + codebase_context: [], + error_message_context: [], }, ]; @@ -27,6 +63,12 @@ const sampleRepos = [ }, ]; +beforeEach(() => { + (addSuccessMessage as jest.Mock).mockClear(); + (addErrorMessage as jest.Mock).mockClear(); + MockApiClient.clearMockResponses(); +}); + describe('AutofixInsightCards', () => { const renderComponent = (props = {}) => { return render( @@ -35,6 +77,9 @@ describe('AutofixInsightCards', () => { repos={sampleRepos} hasStepAbove={false} hasStepBelow={false} + groupId="1" + runId="1" + stepIndex={0} {...props} /> ); @@ -43,13 +88,179 @@ describe('AutofixInsightCards', () => { it('renders insights correctly', () => { renderComponent(); expect(screen.getByText('Sample insight 1')).toBeInTheDocument(); + expect(screen.getByText('User message')).toBeInTheDocument(); }); - it('expands context when clicked', async () => { + it('renders breadcrumb context correctly', async () => { renderComponent(); const contextButton = screen.getByText('Context'); + await userEvent.click(contextButton); + expect(screen.getByText('Breadcrumb body')).toBeInTheDocument(); + expect(screen.getByText('info')).toBeInTheDocument(); + }); + + it('renders codebase context correctly', async () => { + renderComponent(); + const contextButton = screen.getByText('Context'); + await userEvent.click(contextButton); + expect(screen.getByText('console.log("Hello, World!");')).toBeInTheDocument(); + expect(screen.getByText('src/index.js')).toBeInTheDocument(); + }); + + it('renders stacktrace context correctly', async () => { + renderComponent(); + const contextButton = screen.getByText('Context'); + await userEvent.click(contextButton); + expect( + screen.getByText('function() { throw new Error("Test error"); }') + ).toBeInTheDocument(); + expect(screen.getByText('src/error.js')).toBeInTheDocument(); + expect(screen.getByText('testVar')).toBeInTheDocument(); + }); + + it('renders user messages differently', () => { + renderComponent(); + const userMessage = screen.getByText('User message'); + expect(userMessage.closest('div')).toHaveStyle('color: inherit'); + }); + + it('renders "No insights yet" message when there are no insights', () => { + renderComponent({insights: []}); + expect( + screen.getByText(/Autofix will share important conclusions here/) + ).toBeInTheDocument(); + }); + + it('toggles context expansion correctly', async () => { + renderComponent(); + const contextButton = screen.getByText('Context'); + await userEvent.click(contextButton); expect(screen.getByText('Sample justification 1')).toBeInTheDocument(); - expect(screen.getByText('`Error message 1`')).toBeInTheDocument(); + + await userEvent.click(contextButton); + expect(screen.queryByText('Sample justification 1')).not.toBeInTheDocument(); + }); + + it('renders multiple insights correctly', () => { + const multipleInsights = [ + ...sampleInsights, + { + insight: 'Another insight', + justification: 'Another justification', + error_message_context: ['Another error message'], + }, + ]; + renderComponent({insights: multipleInsights}); + expect(screen.getByText('Sample insight 1')).toBeInTheDocument(); + expect(screen.getByText('User message')).toBeInTheDocument(); + expect(screen.getByText('Another insight')).toBeInTheDocument(); + }); + + it('renders "Rethink from here" buttons', () => { + renderComponent(); + const rethinkButtons = screen.getAllByRole('button', {name: 'Rethink from here'}); + expect(rethinkButtons.length).toBeGreaterThan(0); + }); + + it('shows rethink input overlay when "Rethink from here" is clicked', async () => { + renderComponent(); + const rethinkButton = screen.getByRole('button', {name: 'Rethink from here'}); + await userEvent.click(rethinkButton); + expect(screen.getByPlaceholderText('Say something...')).toBeInTheDocument(); + }); + + it('hides rethink input overlay when clicked outside', async () => { + renderComponent(); + const rethinkButton = screen.getByRole('button', {name: 'Rethink from here'}); + await userEvent.click(rethinkButton); + expect(screen.getByPlaceholderText('Say something...')).toBeInTheDocument(); + + await userEvent.click(document.body); + expect(screen.queryByPlaceholderText('Say something...')).not.toBeInTheDocument(); + }); + + it('submits rethink request when form is submitted', async () => { + const mockApi = MockApiClient.addMockResponse({ + url: '/issues/1/autofix/update/', + method: 'POST', + }); + + renderComponent(); + const rethinkButton = screen.getByRole('button', {name: 'Rethink from here'}); + await userEvent.click(rethinkButton); + + const input = screen.getByPlaceholderText('Say something...'); + await userEvent.type(input, 'Rethink this part'); + + const submitButton = screen.getByLabelText( + 'Restart analysis from this point in the chain' + ); + await userEvent.click(submitButton); + + expect(mockApi).toHaveBeenCalledWith( + '/issues/1/autofix/update/', + expect.objectContaining({ + method: 'POST', + data: expect.objectContaining({ + run_id: '1', + payload: expect.objectContaining({ + type: 'restart_from_point_with_feedback', + message: 'Rethink this part', + step_index: 0, + retain_insight_card_index: 0, + }), + }), + }) + ); + }); + + it('shows success message after successful rethink submission', async () => { + MockApiClient.addMockResponse({ + url: '/issues/1/autofix/update/', + method: 'POST', + }); + + renderComponent(); + const rethinkButton = screen.getByRole('button', {name: 'Rethink from here'}); + await userEvent.click(rethinkButton); + + const input = screen.getByPlaceholderText('Say something...'); + await userEvent.type(input, 'Rethink this part'); + + const submitButton = screen.getByLabelText( + 'Restart analysis from this point in the chain' + ); + await userEvent.click(submitButton); + + await waitFor(() => { + expect(addSuccessMessage).toHaveBeenCalledWith("Thanks, I'll rethink this..."); + }); + }); + + it('shows error message after failed rethink submission', async () => { + MockApiClient.addMockResponse({ + url: '/issues/1/autofix/update/', + method: 'POST', + statusCode: 500, + }); + + renderComponent(); + const rethinkButton = screen.getByRole('button', {name: 'Rethink from here'}); + await userEvent.click(rethinkButton); + + const input = screen.getByPlaceholderText('Say something...'); + await userEvent.type(input, 'Rethink this part'); + + const submitButton = screen.getByLabelText( + 'Restart analysis from this point in the chain' + ); + await userEvent.click(submitButton); + + await waitFor(() => { + expect(addErrorMessage).toHaveBeenCalledWith( + 'Something went wrong when sending Autofix your message.' + ); + }); }); }); diff --git a/static/app/components/events/autofix/autofixInsightCards.tsx b/static/app/components/events/autofix/autofixInsightCards.tsx index 65d191d17a223..3e5b3e213b2c7 100644 --- a/static/app/components/events/autofix/autofixInsightCards.tsx +++ b/static/app/components/events/autofix/autofixInsightCards.tsx @@ -1,9 +1,10 @@ -import {useState} from 'react'; +import {useEffect, useRef, useState} from 'react'; import styled from '@emotion/styled'; import {AnimatePresence, type AnimationProps, motion} from 'framer-motion'; -import bannerImage from 'sentry-images/spot/ai-suggestion-banner.svg'; +import bannerImage from 'sentry-images/insights/module-upsells/insights-module-upsell.svg'; +import {addErrorMessage, addSuccessMessage} from 'sentry/actionCreators/indicator'; import {Button} from 'sentry/components/button'; import { replaceHeadersWithBold, @@ -21,13 +22,25 @@ import { getBreadcrumbColorConfig, getBreadcrumbTitle, } from 'sentry/components/events/breadcrumbs/utils'; +import Input from 'sentry/components/input'; import StructuredEventData from 'sentry/components/structuredEventData'; import Timeline from 'sentry/components/timeline'; -import {IconArrow, IconChevron, IconCode, IconFire} from 'sentry/icons'; +import { + IconArrow, + IconChevron, + IconCode, + IconEdit, + IconFire, + IconRefresh, + IconUser, +} from 'sentry/icons'; +import {t} from 'sentry/locale'; import {space} from 'sentry/styles/space'; import {BreadcrumbLevelType, BreadcrumbType} from 'sentry/types/breadcrumbs'; import {singleLineRenderer} from 'sentry/utils/marked'; +import {useMutation} from 'sentry/utils/queryClient'; import testableTransition from 'sentry/utils/testableTransition'; +import useApi from 'sentry/utils/useApi'; interface AutofixBreadcrumbSnippetProps { breadcrumb: BreadcrumbContext; @@ -70,9 +83,13 @@ function AutofixBreadcrumbSnippet({breadcrumb}: AutofixBreadcrumbSnippetProps) { export function ExpandableInsightContext({ children, title, + icon, + rounded, }: { children: React.ReactNode; title: string; + icon?: React.ReactNode; + rounded?: boolean; }) { const [expanded, setExpanded] = useState(false); @@ -81,10 +98,18 @@ export function ExpandableInsightContext({ }; return ( - - + + - {title} + + {icon} + {title} + @@ -101,10 +126,14 @@ const animationProps: AnimationProps = { }; interface AutofixInsightCardProps { + groupId: string; hasCardAbove: boolean; hasCardBelow: boolean; + index: number; insight: AutofixInsight; repos: AutofixRepository[]; + runId: string; + stepIndex: number; } function AutofixInsightCard({ @@ -112,111 +141,140 @@ function AutofixInsightCard({ hasCardBelow, hasCardAbove, repos, + index, + stepIndex, + groupId, + runId, }: AutofixInsightCardProps) { + const isUserMessage = insight.justification === 'USER'; + return ( {hasCardAbove && ( - - - - )} - - - -

+ - {insight.error_message_context && - insight.error_message_context.length > 0 && ( + +

+ {insight.error_message_context && + insight.error_message_context.length > 0 && ( +

+ {insight.error_message_context + .map((message, i) => { + return ( + + + + + +

+ + + ); + }) + .reverse()} +

+ )} + {insight.stacktrace_context && insight.stacktrace_context.length > 0 && (
- {insight.error_message_context - .map((message, i) => { + {insight.stacktrace_context + .map((stacktrace, i) => { + let vars: any = {}; + try { + vars = JSON.parse(stacktrace.vars_as_json); + } catch { + vars = {vars: stacktrace.vars_as_json}; + } return ( - - - - - -

- - +

+ } + /> + +
); }) .reverse()}
)} - {insight.stacktrace_context && insight.stacktrace_context.length > 0 && ( -
- {insight.stacktrace_context - .map((stacktrace, i) => { - return ( -
+ {insight.breadcrumb_context && insight.breadcrumb_context.length > 0 && ( +
+ {insight.breadcrumb_context + .map((breadcrumb, i) => { + return ( + + ); + }) + .reverse()} +
+ )} + {insight.codebase_context && insight.codebase_context.length > 0 && ( +
+ {insight.codebase_context + .map((code, i) => { + return ( } - /> - } /> -
- ); - }) - .reverse()} -
- )} - {insight.breadcrumb_context && insight.breadcrumb_context.length > 0 && ( -
- {insight.breadcrumb_context - .map((breadcrumb, i) => { - return ; - }) - .reverse()} -
- )} - {insight.codebase_context && insight.codebase_context.length > 0 && ( -
- {insight.codebase_context - .map((code, i) => { - return ( - } - /> - ); - }) - .reverse()} -
- )} - - + ); + }) + .reverse()} +
+ )} +
+
+ )} + {isUserMessage && ( + + + + + )} {hasCardBelow && ( - - - + )}
@@ -225,10 +283,13 @@ function AutofixInsightCard({ } interface AutofixInsightCardsProps { + groupId: string; hasStepAbove: boolean; hasStepBelow: boolean; insights: AutofixInsight[]; repos: AutofixRepository[]; + runId: string; + stepIndex: number; } function AutofixInsightCards({ @@ -236,17 +297,12 @@ function AutofixInsightCards({ repos, hasStepBelow, hasStepAbove, + stepIndex, + groupId, + runId, }: AutofixInsightCardsProps) { return ( - {!hasStepAbove && ( -
- Insights - - - -
- )} {insights.length > 0 ? ( insights.map((insight, index) => !insight ? null : ( @@ -256,6 +312,10 @@ function AutofixInsightCards({ hasCardBelow={index < insights.length - 1 || hasStepBelow} hasCardAbove={hasStepAbove && index === 0} repos={repos} + index={index} + stepIndex={stepIndex} + groupId={groupId} + runId={runId} /> ) ) @@ -274,6 +334,131 @@ function AutofixInsightCards({ ); } +export function useUpdateInsightCard({groupId, runId}: {groupId: string; runId: string}) { + const api = useApi({persistInFlight: true}); + + return useMutation({ + mutationFn: (params: { + message: string; + retain_insight_card_index: number | null; + step_index: number; + }) => { + return api.requestPromise(`/issues/${groupId}/autofix/update/`, { + method: 'POST', + data: { + run_id: runId, + payload: { + type: 'restart_from_point_with_feedback', + message: params.message, + step_index: params.step_index, + retain_insight_card_index: params.retain_insight_card_index, + }, + }, + }); + }, + onSuccess: _ => { + addSuccessMessage(t("Thanks, I'll rethink this...")); + }, + onError: () => { + addErrorMessage(t('Something went wrong when sending Autofix your message.')); + }, + }); +} + +function ChainLink({ + groupId, + runId, + stepIndex, + insightCardAboveIndex, +}: { + groupId: string; + insightCardAboveIndex: number | null; + runId: string; + stepIndex: number; +}) { + const [showOverlay, setShowOverlay] = useState(false); + const overlayRef = useRef(null); + const [comment, setComment] = useState(''); + const {mutate: send} = useUpdateInsightCard({groupId, runId}); + + const handleClickOutside = event => { + if (overlayRef.current && !overlayRef.current.contains(event.target)) { + setShowOverlay(false); + } + }; + + useEffect(() => { + if (showOverlay) { + document.addEventListener('mousedown', handleClickOutside); + } else { + document.removeEventListener('mousedown', handleClickOutside); + } + return () => { + document.removeEventListener('mousedown', handleClickOutside); + }; + }, [showOverlay]); + + return ( + + + } + size="zero" + className="rethink-button" + title={t('Rethink from here')} + aria-label={t('Rethink from here')} + onClick={() => setShowOverlay(true)} + /> + + {showOverlay && ( + +
{ + e.preventDefault(); + setShowOverlay(false); + setComment(''); + send({ + message: comment, + step_index: stepIndex, + retain_insight_card_index: insightCardAboveIndex, + }); + }} + className="row-form" + > + setComment(e.target.value)} + size="md" + autoFocus + /> +
- - - - - + + ))} @@ -481,7 +517,7 @@ const CausesContainer = styled('div')` border: 1px solid ${p => p.theme.innerBorder}; border-radius: ${p => p.theme.borderRadius}; overflow: hidden; - box-shadow: ${p => p.theme.dropShadowHeavy}; + box-shadow: ${p => p.theme.dropShadowMedium}; `; const PotentialCausesContainer = styled(CausesContainer)` @@ -502,6 +538,12 @@ const RootCauseOption = styled('div')<{selected: boolean}>` padding-right: ${space(2)}; `; +const RootCauseContextContainer = styled('div')` + display: flex; + flex-direction: column; + gap: ${space(0.5)}; +`; + const RootCauseOptionHeader = styled('div')` display: flex; justify-content: space-between; diff --git a/static/app/components/events/autofix/autofixSteps.spec.tsx b/static/app/components/events/autofix/autofixSteps.spec.tsx new file mode 100644 index 0000000000000..b8e54aa8ea42e --- /dev/null +++ b/static/app/components/events/autofix/autofixSteps.spec.tsx @@ -0,0 +1,197 @@ +import {AutofixCodebaseChangeData} from 'sentry-fixture/autofixCodebaseChangeData'; +import {AutofixDataFixture} from 'sentry-fixture/autofixData'; +import {AutofixProgressItemFixture} from 'sentry-fixture/autofixProgressItem'; +import {AutofixStepFixture} from 'sentry-fixture/autofixStep'; + +import {render, screen, userEvent, waitFor} from 'sentry-test/reactTestingLibrary'; + +import {addSuccessMessage} from 'sentry/actionCreators/indicator'; +import {AutofixSteps} from 'sentry/components/events/autofix/autofixSteps'; +import {type AutofixStep, AutofixStepType} from 'sentry/components/events/autofix/types'; + +jest.mock('sentry/actionCreators/indicator'); + +describe('AutofixSteps', () => { + beforeEach(() => { + (addSuccessMessage as jest.Mock).mockClear(); + MockApiClient.clearMockResponses(); + jest.clearAllMocks(); + }); + + const defaultProps = { + data: AutofixDataFixture({ + steps: [ + AutofixStepFixture({ + id: '1', + type: AutofixStepType.DEFAULT, + status: 'COMPLETED', + insights: [], + progress: [], + }), + AutofixStepFixture({ + id: '2', + type: AutofixStepType.ROOT_CAUSE_ANALYSIS, + status: 'COMPLETED', + causes: [ + { + id: 'cause1', + description: 'Root cause 1', + title: 'cause 1', + code_context: [], + }, + ], + selection: null, + progress: [], + }), + ], + repositories: [], + created_at: '2023-01-01T00:00:00Z', + run_id: '1', + status: 'PROCESSING', + }), + groupId: 'group1', + runId: 'run1', + onRetry: jest.fn(), + }; + + it('renders steps correctly', () => { + render(); + + expect(screen.getByText('Root cause 1')).toBeInTheDocument(); + expect( + screen.getByPlaceholderText('Provide any instructions for the fix...') + ).toBeInTheDocument(); + }); + + it('handles root cause selection', async () => { + MockApiClient.addMockResponse({ + url: '/issues/group1/autofix/update/', + method: 'POST', + body: {}, + }); + + render(); + + const input = screen.getByPlaceholderText('Provide any instructions for the fix...'); + await userEvent.type(input, 'Custom root cause'); + await userEvent.click(screen.getByRole('button', {name: 'Find a Fix'})); + + await waitFor(() => { + expect(addSuccessMessage).toHaveBeenCalledWith( + "Great, let's move forward with this root cause." + ); + }); + }); + + it('selects default root cause when input is empty', async () => { + MockApiClient.addMockResponse({ + url: '/issues/group1/autofix/update/', + method: 'POST', + body: {}, + }); + + render(); + + await userEvent.click(screen.getByRole('button', {name: 'Find a Fix'})); + + await waitFor(() => { + expect(addSuccessMessage).toHaveBeenCalledWith( + "Great, let's move forward with this root cause." + ); + }); + }); + + it('renders AutofixMessageBox with correct props', () => { + render(); + + const messageBox = screen.getByPlaceholderText( + 'Provide any instructions for the fix...' + ); + expect(messageBox).toBeInTheDocument(); + + const sendButton = screen.getByRole('button', {name: 'Find a Fix'}); + expect(sendButton).toBeInTheDocument(); + expect(sendButton).toBeEnabled(); + }); + + it('updates message box based on last step', () => { + const propsWithProgress = { + ...defaultProps, + data: { + ...defaultProps.data, + steps: [ + ...(defaultProps.data.steps as AutofixStep[]), + AutofixStepFixture({ + id: '3', + type: AutofixStepType.DEFAULT, + status: 'PROCESSING', + progress: [ + AutofixProgressItemFixture({ + message: 'Log message', + timestamp: '2023-01-01T00:00:00Z', + }), + ], + insights: [], + }), + ], + }, + }; + + render(); + + expect(screen.getByText('Log message')).toBeInTheDocument(); + }); + + it('handles iterating on changes step', async () => { + MockApiClient.addMockResponse({ + url: '/issues/group1/autofix/setup/', + body: { + genAIConsent: {ok: true}, + codebaseIndexing: {ok: true}, + integration: {ok: true}, + githubWriteIntegration: { + repos: [], + }, + }, + }); + MockApiClient.addMockResponse({ + url: '/issues/group1/autofix/update/', + method: 'POST', + body: {}, + }); + + const propsWithChanges = { + ...defaultProps, + data: { + ...defaultProps.data, + steps: [ + AutofixStepFixture({ + id: '1', + type: AutofixStepType.DEFAULT, + status: 'COMPLETED', + insights: [], + progress: [], + index: 0, + }), + AutofixStepFixture({ + id: '2', + type: AutofixStepType.CHANGES, + status: 'COMPLETED', + progress: [], + changes: [AutofixCodebaseChangeData()], + }), + ], + }, + }; + + render(); + + const input = screen.getByPlaceholderText('Say something...'); + await userEvent.type(input, 'Feedback on changes'); + await userEvent.click(screen.getByRole('button', {name: 'Send'})); + + await waitFor(() => { + expect(addSuccessMessage).toHaveBeenCalledWith("Thanks, I'll rethink this..."); + }); + }); +}); diff --git a/static/app/components/events/autofix/autofixSteps.tsx b/static/app/components/events/autofix/autofixSteps.tsx index 47645575f8b2a..7c2310dafe4e9 100644 --- a/static/app/components/events/autofix/autofixSteps.tsx +++ b/static/app/components/events/autofix/autofixSteps.tsx @@ -3,7 +3,9 @@ import styled from '@emotion/styled'; import {AnimatePresence, type AnimationProps, motion} from 'framer-motion'; import {AutofixChanges} from 'sentry/components/events/autofix/autofixChanges'; -import AutofixInsightCards from 'sentry/components/events/autofix/autofixInsightCards'; +import AutofixInsightCards, { + useUpdateInsightCard, +} from 'sentry/components/events/autofix/autofixInsightCards'; import AutofixMessageBox from 'sentry/components/events/autofix/autofixMessageBox'; import { AutofixRootCause, @@ -68,10 +70,8 @@ export function Step({ hasStepAbove, hasErroredStepBefore, }: StepProps) { - const isActive = step.status !== 'PENDING' && step.status !== 'CANCELLED'; - return ( - + @@ -82,6 +82,9 @@ export function Step({ repos={repos} hasStepBelow={hasStepBelow} hasStepAbove={hasStepAbove} + stepIndex={step.index} + groupId={groupId} + runId={runId} /> )} {step.type === AutofixStepType.ROOT_CAUSE_ANALYSIS && ( @@ -94,7 +97,12 @@ export function Step({ /> )} {step.type === AutofixStepType.CHANGES && ( - + )} {hasErroredStepBefore && hasStepBelow && ( @@ -140,8 +148,8 @@ export function AutofixSteps({data, groupId, runId, onRetry}: AutofixStepsProps) const stepsRef = useRef<(HTMLDivElement | null)[]>([]); const {mutate: handleSelectFix} = useSelectCause({groupId, runId}); - const selectRootCause = (text: string) => { - if (text.length > 0) { + const selectRootCause = (text: string, isCustom?: boolean) => { + if (isCustom) { handleSelectFix({customRootCause: text}); } else { if (!steps) { @@ -153,8 +161,21 @@ export function AutofixSteps({data, groupId, runId, onRetry}: AutofixStepsProps) } const cause = step.causes[0]; const id = cause.id; - handleSelectFix({causeId: id}); + handleSelectFix({causeId: id, instruction: text}); + } + }; + + const {mutate: sendFeedbackOnChanges} = useUpdateInsightCard({groupId, runId}); + const iterateOnChangesStep = (text: string) => { + const planStep = steps?.[steps.length - 2]; + if (!planStep || planStep.type !== AutofixStepType.DEFAULT) { + return; } + sendFeedbackOnChanges({ + step_index: planStep.index, + retain_insight_card_index: planStep.insights.length - 1, + message: text, + }); }; const lastStepVisible = useInView( @@ -173,15 +194,9 @@ export function AutofixSteps({data, groupId, runId, onRetry}: AutofixStepsProps) const isRootCauseSelectionStep = lastStep.type === AutofixStepType.ROOT_CAUSE_ANALYSIS && lastStep.status === 'COMPLETED'; - const areCodeChangesShowing = - lastStep.type === AutofixStepType.CHANGES && lastStep.status === 'COMPLETED'; - const disabled = areCodeChangesShowing ? true : false; - const previousStep = steps.length > 2 ? steps[steps.length - 2] : null; - const previousStepErrored = - previousStep !== null && - previousStep?.type === lastStep.type && - previousStep.status === 'ERROR'; + const isChangesStep = + lastStep.type === AutofixStepType.CHANGES && lastStep.status === 'COMPLETED'; const scrollToMatchingStep = () => { const matchingStepIndex = steps.findIndex(step => step.type === lastStep.type); @@ -193,44 +208,54 @@ export function AutofixSteps({data, groupId, runId, onRetry}: AutofixStepsProps) return (
- {steps.map((step, index) => ( -
(stepsRef.current[index] = el)} key={step.id}> - 0} - groupId={groupId} - runId={runId} - onRetry={onRetry} - repos={repos} - hasErroredStepBefore={previousStepErrored} - /> -
- ))} + {steps.map((step, index) => { + const previousStep = index > 0 ? steps[index - 1] : null; + const previousStepErrored = + previousStep !== null && + previousStep?.type === step.type && + previousStep.status === 'ERROR'; + const nextStep = index + 1 < steps.length ? steps[index + 1] : null; + const twoInsightStepsInARow = + nextStep?.type === AutofixStepType.DEFAULT && + step.type === AutofixStepType.DEFAULT; + const twoNonDefaultStepsInARow = + nextStep?.type !== AutofixStepType.DEFAULT && + step.type !== AutofixStepType.DEFAULT; + return ( +
(stepsRef.current[index] = el)} key={step.id}> + 0} + groupId={groupId} + runId={runId} + onRetry={onRetry} + repos={repos} + hasErroredStepBefore={previousStepErrored} + /> + {twoNonDefaultStepsInARow && } +
+ ); + })}
` - opacity: ${p => (p.active ? 1 : 0.6)}; +const StepCard = styled('div')` overflow: hidden; :last-child { @@ -278,3 +302,8 @@ const ContentWrapper = styled(motion.div)` `; const AnimationWrapper = styled(motion.div)``; + +const StepSeparator = styled('div')` + height: 1px; + margin: ${space(1)} 0; +`; diff --git a/static/app/components/events/autofix/types.ts b/static/app/components/events/autofix/types.ts index c5da8c4c5d06f..27f9dcb310bc1 100644 --- a/static/app/components/events/autofix/types.ts +++ b/static/app/components/events/autofix/types.ts @@ -80,7 +80,13 @@ interface BaseStep { id: string; index: number; progress: AutofixProgressItem[]; - status: 'PENDING' | 'PROCESSING' | 'COMPLETED' | 'ERROR' | 'CANCELLED'; + status: + | 'PENDING' + | 'PROCESSING' + | 'COMPLETED' + | 'ERROR' + | 'CANCELLED' + | 'WAITING_FOR_USER_RESPONSE'; title: string; type: AutofixStepType; completedMessage?: string; @@ -160,12 +166,19 @@ export type AutofixRootCauseCodeContext = { snippet?: CodeSnippetContext; }; +export type AutofixRootCauseUnitTest = { + description: string; + file_path: string; + snippet: string; +}; + export type AutofixRootCauseData = { code_context: AutofixRootCauseCodeContext[]; description: string; id: string; title: string; reproduction?: string; + unit_test?: AutofixRootCauseUnitTest; }; export type EventMetadataWithAutofix = EventMetadata & { diff --git a/static/app/components/events/autofix/useAutofix.tsx b/static/app/components/events/autofix/useAutofix.tsx index 4760ee7df2531..fd4c793a8b8c1 100644 --- a/static/app/components/events/autofix/useAutofix.tsx +++ b/static/app/components/events/autofix/useAutofix.tsx @@ -66,10 +66,7 @@ const makeErrorAutofixData = (errorMessage: string): AutofixResponse => { return data; }; -const isPolling = (autofixData?: AutofixData | null) => - autofixData?.status === 'PROCESSING' || - autofixData?.status === 'PENDING' || - autofixData?.status === 'NEED_MORE_INFORMATION'; +const isPolling = (autofixData?: AutofixData | null) => autofixData?.status !== 'PENDING'; export const useAutofixData = ({groupId}: {groupId: string}) => { const {data} = useApiQuery(makeAutofixQueryKey(groupId), { diff --git a/static/app/components/events/breadcrumbs/breadcrumbsDataSection.tsx b/static/app/components/events/breadcrumbs/breadcrumbsDataSection.tsx index e4c6e031e511a..9f3bc3a96b9f9 100644 --- a/static/app/components/events/breadcrumbs/breadcrumbsDataSection.tsx +++ b/static/app/components/events/breadcrumbs/breadcrumbsDataSection.tsx @@ -1,4 +1,5 @@ import {useCallback, useMemo, useRef, useState} from 'react'; +import {ClassNames} from '@emotion/react'; import styled from '@emotion/styled'; import GuideAnchor from 'sentry/components/assistant/guideAnchor'; @@ -151,7 +152,7 @@ export default function BreadcrumbsDataSection({ const hasViewAll = summaryCrumbs.length !== enhancedCrumbs.length; return ( - + - + ); } @@ -222,3 +223,18 @@ const VerticalEllipsis = styled(IconEllipsis)` const ViewAllButton = styled(Button)` padding: ${space(0.75)} ${space(1)}; `; + +function FullWidthGuideAnchor(props: React.ComponentProps) { + return ( + + {({css: classNamesCss}) => ( + + )} + + ); +} diff --git a/static/app/components/events/errorLevelText.tsx b/static/app/components/events/errorLevelText.tsx deleted file mode 100644 index f31ff0a4e8b89..0000000000000 --- a/static/app/components/events/errorLevelText.tsx +++ /dev/null @@ -1,57 +0,0 @@ -import styled from '@emotion/styled'; - -import {IconClose, IconFatal, IconInfo, IconWarning} from 'sentry/icons'; -import {t} from 'sentry/locale'; -import {space} from 'sentry/styles/space'; -import type {Level} from 'sentry/types/event'; -import type {IconSize} from 'sentry/utils/theme'; - -const errorLevelMap: Readonly> = { - error: t('Error'), - fatal: t('Fatal'), - info: t('Info'), - warning: t('Warning'), - sample: t('Sample'), - unknown: t('Unknown'), -}; - -interface IconWithDefaultProps { - Component: React.ComponentType | null; - defaultProps: {isCircled?: boolean}; -} - -const errorLevelIconMap: Readonly> = { - error: {Component: IconClose, defaultProps: {isCircled: true}}, - fatal: {Component: IconFatal, defaultProps: {}}, - info: {Component: IconInfo, defaultProps: {}}, - warning: {Component: IconWarning, defaultProps: {}}, - sample: {Component: null, defaultProps: {}}, - unknown: {Component: null, defaultProps: {}}, -}; - -interface ErrorLevelTextProps { - level: Level; - iconSize?: IconSize; -} - -export function ErrorLevelText({level, iconSize = 'xs'}: ErrorLevelTextProps) { - const Icon = errorLevelIconMap[level]?.Component ?? null; - return ( - - {Icon && ( - - )} - {errorLevelMap[level]} - - ); -} - -const ErrorLevelTextWrapper = styled('div')` - display: flex; - align-items: center; - gap: ${space(0.5)}; -`; diff --git a/static/app/components/events/eventAttachmentActions.tsx b/static/app/components/events/eventAttachmentActions.tsx index 504e9eadb315d..9f04d1f360456 100644 --- a/static/app/components/events/eventAttachmentActions.tsx +++ b/static/app/components/events/eventAttachmentActions.tsx @@ -1,4 +1,4 @@ -import {Role} from 'sentry/components/acl/role'; +import {useRole} from 'sentry/components/acl/useRole'; import {Button, LinkButton} from 'sentry/components/button'; import ButtonBar from 'sentry/components/buttonBar'; import Confirm from 'sentry/components/confirm'; @@ -26,65 +26,62 @@ function EventAttachmentActions({ onDelete, }: Props) { const organization = useOrganization(); + const {hasRole: hasAttachmentRole} = useRole({role: 'attachmentsRole'}); const url = `/api/0/projects/${organization.slug}/${projectSlug}/events/${attachment.event_id}/attachments/${attachment.id}/`; const hasPreview = hasInlineAttachmentRenderer(attachment); return ( - - {({hasRole: hasAttachmentRole}) => ( - - {withPreviewButton && ( - - )} - } - href={hasAttachmentRole ? `${url}?download=1` : ''} - disabled={!hasAttachmentRole} - title={ - hasAttachmentRole - ? t('Download') - : t('Insufficient permissions to download attachments') - } - aria-label={t('Download')} - /> - - )} - + } + href={hasAttachmentRole ? `${url}?download=1` : ''} + disabled={!hasAttachmentRole} + title={ + hasAttachmentRole + ? t('Download') + : t('Insufficient permissions to download attachments') + } + aria-label={t('Download')} + /> + + + ); } @@ -113,9 +111,7 @@ function EventAttachmentsContent({event, project, group}: EventAttachmentsProps) type={SectionKey.ATTACHMENTS} title={title} actions={ - hasStreamlinedUI && project && group ? ( - - ) : null + hasStreamlinedUI && project && group ? : null } > {crashFileStripped && ( diff --git a/static/app/components/events/eventEntries.tsx b/static/app/components/events/eventEntries.tsx index 35aa2b8d69184..0e8bdb121ac6f 100644 --- a/static/app/components/events/eventEntries.tsx +++ b/static/app/components/events/eventEntries.tsx @@ -82,7 +82,7 @@ function EventEntries({ )} {!isShare && isNotSharedOrganization(organization) && ( { - const defaultUser = UserFixture(); + const group = GroupFixture(); beforeEach(() => { ConfigStore.init(); }); it('renders error message', () => { - render(); + render( + + ); expect(screen.getByText('Test message')).toBeInTheDocument(); }); + it('renders location (with issue-stream-table-layout)', () => { + const organization = OrganizationFixture({ + features: ['issue-stream-table-layout'], + }); + + render( + , + {organization} + ); + + expect( + screen.getByText('fetchData(app/components/group/suggestedOwners/suggestedOwners)') + ).toBeInTheDocument(); + }); + it('renders "No error message" when message is not provided', () => { - render(); + render(); expect(screen.getByText('(No error message)')).toBeInTheDocument(); }); it('renders error level indicator dot', () => { render( - + ); expect(screen.getByText('Level: Error')).toBeInTheDocument(); }); - it('renders error level indicator text', () => { - ConfigStore.set( - 'user', - UserFixture({ - ...defaultUser, - options: { - ...defaultUser.options, - prefersIssueDetailsStreamlinedUI: true, - }, - }) - ); - render( - - ); - expect(screen.getByText('Error')).toBeInTheDocument(); - }); - it('renders unhandled tag', () => { render( - + ); expect(screen.getByText('Unhandled')).toBeInTheDocument(); }); diff --git a/static/app/components/events/eventMessage.tsx b/static/app/components/events/eventMessage.tsx index e9a29cc97d9c7..589296db0392f 100644 --- a/static/app/components/events/eventMessage.tsx +++ b/static/app/components/events/eventMessage.tsx @@ -1,16 +1,18 @@ -import {Fragment} from 'react'; import styled from '@emotion/styled'; import ErrorLevel from 'sentry/components/events/errorLevel'; -import {ErrorLevelText} from 'sentry/components/events/errorLevelText'; import UnhandledTag from 'sentry/components/group/inboxBadges/unhandledTag'; import {t} from 'sentry/locale'; import {space} from 'sentry/styles/space'; -import {EventOrGroupType, type Level} from 'sentry/types/event'; +import type {Event, EventOrGroupType, Level} from 'sentry/types/event'; +import type {BaseGroup, GroupTombstoneHelper} from 'sentry/types/group'; +import {eventTypeHasLogLevel, getTitle} from 'sentry/utils/events'; +import useOrganization from 'sentry/utils/useOrganization'; import {Divider} from 'sentry/views/issueDetails/divider'; import {useHasStreamlinedUI} from 'sentry/views/issueDetails/utils'; type Props = { + data: Event | BaseGroup | GroupTombstoneHelper; message: React.ReactNode; type: EventOrGroupType; className?: string; @@ -18,21 +20,12 @@ type Props = { /** * Size of the level indicator. */ - levelIndicatorSize?: '9px' | '11px'; + levelIndicatorSize?: '9px' | '10px' | '11px'; showUnhandled?: boolean; }; -const EVENT_TYPES_WITH_LOG_LEVEL = new Set([ - EventOrGroupType.ERROR, - EventOrGroupType.CSP, - EventOrGroupType.EXPECTCT, - EventOrGroupType.DEFAULT, - EventOrGroupType.EXPECTSTAPLE, - EventOrGroupType.HPKP, - EventOrGroupType.NEL, -]); - function EventMessage({ + data, className, level, levelIndicatorSize, @@ -40,24 +33,29 @@ function EventMessage({ type, showUnhandled = false, }: Props) { + const organization = useOrganization({allowNull: true}); const hasStreamlinedUI = useHasStreamlinedUI(); - const showEventLevel = level && EVENT_TYPES_WITH_LOG_LEVEL.has(type); + + // TODO(malwilley): When the new layout is GA'd, this component should be renamed + const hasNewIssueStreamTableLayout = organization?.features.includes( + 'issue-stream-table-layout' + ); + + const showEventLevel = level && eventTypeHasLogLevel(type); + const {subtitle} = getTitle(data); + const renderedMessage = message ? ( + {message} + ) : ( + ({t('No error message')}) + ); + return ( - {!hasStreamlinedUI ? : null} + {showEventLevel && } + {hasStreamlinedUI && showEventLevel ? : null} {showUnhandled ? : null} - {hasStreamlinedUI && showEventLevel ? ( - - {showUnhandled ? : null} - - - - ) : null} - {message ? ( - {message} - ) : ( - ({t('No error message')}) - )} + {hasStreamlinedUI && showUnhandled ? : null} + {hasNewIssueStreamTableLayout ? subtitle : renderedMessage} ); } diff --git a/static/app/components/events/eventReplay/replayClipSection.tsx b/static/app/components/events/eventReplay/replayClipSection.tsx index e1375a4f82bba..0a744f665047d 100644 --- a/static/app/components/events/eventReplay/replayClipSection.tsx +++ b/static/app/components/events/eventReplay/replayClipSection.tsx @@ -15,11 +15,11 @@ import type {Event} from 'sentry/types/event'; import type {Group} from 'sentry/types/group'; import {getAnalyticsDataForEvent, getAnalyticsDataForGroup} from 'sentry/utils/events'; import useReplayCountForIssues from 'sentry/utils/replayCount/useReplayCountForIssues'; -import normalizeUrl from 'sentry/utils/url/normalizeUrl'; import useOrganization from 'sentry/utils/useOrganization'; -import useRouter from 'sentry/utils/useRouter'; import {SectionKey} from 'sentry/views/issueDetails/streamline/context'; import {InterimSection} from 'sentry/views/issueDetails/streamline/interimSection'; +import {Tab, TabPaths} from 'sentry/views/issueDetails/types'; +import {useGroupDetailsRoute} from 'sentry/views/issueDetails/useGroupDetailsRoute'; import {useHasStreamlinedUI} from 'sentry/views/issueDetails/utils'; interface Props { @@ -38,43 +38,31 @@ const ReplayClipPreview = lazy(() => import('./replayClipPreview')); export function ReplayClipSection({event, group, replayId}: Props) { const organization = useOrganization(); const hasStreamlinedUI = useHasStreamlinedUI(); - const router = useRouter(); const {getReplayCountForIssue} = useReplayCountForIssues(); + const {baseUrl} = useGroupDetailsRoute(); const startTimestampMS = 'startTimestamp' in event ? event.startTimestamp * 1000 : undefined; const timeOfEvent = event.dateCreated ?? startTimestampMS ?? event.dateReceived; const eventTimestampMs = timeOfEvent ? Math.floor(new Date(timeOfEvent).getTime()) : 0; - // don't try to construct the url if we don't have a group - const eventIdFromRouter = router.params.eventId; - const baseUrl = group - ? eventIdFromRouter - ? normalizeUrl( - `/organizations/${organization.slug}/issues/${group.id}/events/${eventIdFromRouter}/` - ) - : normalizeUrl(`/organizations/${organization.slug}/issues/${group.id}/`) - : ''; - const replayUrl = baseUrl - ? location.search.length - ? `${baseUrl}replays/${location.search}/` - : `${baseUrl}replays/` - : ''; - - const seeAllReplaysButton = replayUrl ? ( + const allReplaysButton = ( {t('See All Replays')} - ) : undefined; + ); const replayCount = group ? getReplayCountForIssue(group.id, group.issueCategory) : -1; const overlayContent = - seeAllReplaysButton && replayCount && replayCount > 1 ? ( + replayCount && replayCount > 1 ? (
{t( @@ -82,7 +70,7 @@ export function ReplayClipSection({event, group, replayId}: Props) { tn('%s replay', '%s replays', replayCount ?? 0) )}
- {seeAllReplaysButton} + {allReplaysButton}
) : undefined; @@ -115,7 +103,7 @@ export function ReplayClipSection({event, group, replayId}: Props) { return ( diff --git a/static/app/components/events/eventReplay/replayDrawer.tsx b/static/app/components/events/eventReplay/replayDrawer.tsx new file mode 100644 index 0000000000000..145834fe7c7b5 --- /dev/null +++ b/static/app/components/events/eventReplay/replayDrawer.tsx @@ -0,0 +1,54 @@ +import styled from '@emotion/styled'; + +import ProjectAvatar from 'sentry/components/avatar/projectAvatar'; +import { + CrumbContainer, + EventDrawerBody, + EventDrawerContainer, + EventDrawerHeader, + EventNavigator, + Header, + NavigationCrumbs, + ShortId, +} from 'sentry/components/events/eventDrawer'; +import {t} from 'sentry/locale'; +import type {Group} from 'sentry/types/group'; +import type {Project} from 'sentry/types/project'; +import GroupReplays, { + StyledLayoutPage, +} from 'sentry/views/issueDetails/groupReplays/groupReplays'; + +export function ReplayDrawer({group, project}: {group: Group; project: Project}) { + return ( + + + + + {group.shortId} + + ), + }, + {label: t('Replays')}, + ]} + /> + + +
{t('Replays')}
+
+ + + +
+ ); +} + +const ReplayBody = styled(EventDrawerBody)` + ${StyledLayoutPage} { + box-shadow: unset; + padding: unset; + } +`; diff --git a/static/app/components/events/eventStatisticalDetector/eventThroughput.tsx b/static/app/components/events/eventStatisticalDetector/eventThroughput.tsx index e5dbc28f0cd55..40c86132577dc 100644 --- a/static/app/components/events/eventStatisticalDetector/eventThroughput.tsx +++ b/static/app/components/events/eventStatisticalDetector/eventThroughput.tsx @@ -28,7 +28,6 @@ import {useRelativeDateTime} from 'sentry/utils/profiling/hooks/useRelativeDateT import {MutableSearch} from 'sentry/utils/tokenizeSearch'; import {useLocation} from 'sentry/utils/useLocation'; import useOrganization from 'sentry/utils/useOrganization'; -import useRouter from 'sentry/utils/useRouter'; import transformEventStats from 'sentry/views/performance/trends/utils/transformEventStats'; const BUCKET_SIZE = 6 * 60 * 60; // 6 hours in seconds; @@ -54,7 +53,6 @@ export function EventThroughput({event, group}: EventThroughputProps) { function EventThroughputInner({event, group}: EventThroughputProps) { const theme = useTheme(); - const router = useRouter(); const evidenceData = event.occurrence!.evidenceData; const breakpoint = evidenceData.breakpoint; @@ -183,7 +181,7 @@ function EventThroughputInner({event, group}: EventThroughputProps) { ) : ( {'\u2014'} )} - + {zoomRenderProps => ( )} diff --git a/static/app/components/events/eventStatisticalDetector/lineChart.tsx b/static/app/components/events/eventStatisticalDetector/lineChart.tsx index 9cdbd5d97b4db..955571953409f 100644 --- a/static/app/components/events/eventStatisticalDetector/lineChart.tsx +++ b/static/app/components/events/eventStatisticalDetector/lineChart.tsx @@ -8,7 +8,6 @@ import {LineChart as EChartsLineChart} from 'sentry/components/charts/lineChart' import getBreakpointChartOptionsFromData from 'sentry/components/events/eventStatisticalDetector/breakpointChartOptions'; import type {PageFilters} from 'sentry/types/core'; import type {EventsStatsData} from 'sentry/types/organization'; -import useRouter from 'sentry/utils/useRouter'; import type {NormalizedTrendsTransaction} from 'sentry/views/performance/trends/types'; interface ChartProps { @@ -21,7 +20,6 @@ interface ChartProps { function LineChart({datetime, percentileData, evidenceData, chartType}: ChartProps) { const theme = useTheme(); - const router = useRouter(); const {series, chartOptions} = useMemo(() => { return getBreakpointChartOptionsFromData( @@ -32,7 +30,7 @@ function LineChart({datetime, percentileData, evidenceData, chartType}: ChartPro }, [percentileData, evidenceData, chartType, theme]); return ( - + {zoomRenderProps => ( )} diff --git a/static/app/components/events/eventTags/eventTagsTreeRow.tsx b/static/app/components/events/eventTags/eventTagsTreeRow.tsx index 158368da7708f..286ae659993d9 100644 --- a/static/app/components/events/eventTags/eventTagsTreeRow.tsx +++ b/static/app/components/events/eventTags/eventTagsTreeRow.tsx @@ -3,7 +3,6 @@ import styled from '@emotion/styled'; import * as qs from 'query-string'; import {openNavigateToExternalLinkModal} from 'sentry/actionCreators/modal'; -import {navigateTo} from 'sentry/actionCreators/navigation'; import {hasEveryAccess} from 'sentry/components/acl/access'; import {DropdownMenu} from 'sentry/components/dropdownMenu'; import type {TagTreeContent} from 'sentry/components/events/eventTags/eventTagsTree'; @@ -24,7 +23,6 @@ import {isUrl} from 'sentry/utils/string/isUrl'; import useCopyToClipboard from 'sentry/utils/useCopyToClipboard'; import useMutateProject from 'sentry/utils/useMutateProject'; import useOrganization from 'sentry/utils/useOrganization'; -import useRouter from 'sentry/utils/useRouter'; interface EventTagTreeRowConfig { // Omits the dropdown of actions applicable to this tag @@ -120,7 +118,6 @@ function EventTagsTreeRowDropdown({ project, }: Pick) { const organization = useOrganization(); - const router = useRouter(); const {onClick: handleCopy} = useCopyToClipboard({ text: content.value, }); @@ -140,7 +137,6 @@ function EventTagsTreeRowDropdown({ // Skip tags already highlighted highlightTagSet.has(originalTag.key); const query = generateQueryWithTag({referrer}, originalTag); - const searchQuery = `?${qs.stringify(query)}`; const isProjectAdmin = hasEveryAccess(['project:admin'], { organization, project, @@ -164,21 +160,17 @@ function EventTagsTreeRowDropdown({ key: 'view-events', label: t('View other events with this tag value'), hidden: !event.groupID, - onAction: () => { - navigateTo( - `/organizations/${organization.slug}/issues/${event.groupID}/events/${searchQuery}`, - router - ); + to: { + pathname: `/organizations/${organization.slug}/issues/${event.groupID}/events/`, + query, }, }, { key: 'view-issues', label: t('View issues with this tag value'), - onAction: () => { - navigateTo( - `/organizations/${organization.slug}/issues/${searchQuery}`, - router - ); + to: { + pathname: `/organizations/${organization.slug}/issues/`, + query, }, }, { @@ -200,42 +192,38 @@ function EventTagsTreeRowDropdown({ key: 'release', label: t('View this release'), hidden: originalTag.key !== 'release', - onAction: () => { - navigateTo( - `/organizations/${organization.slug}/releases/${encodeURIComponent( - content.value - )}/`, - router - ); - }, + to: + originalTag.key === 'release' + ? `/organizations/${organization.slug}/releases/${encodeURIComponent(content.value)}/` + : undefined, }, { key: 'transaction', label: t('View this transaction'), hidden: originalTag.key !== 'transaction', - onAction: () => { - const transactionQuery = qs.stringify({ - project: event.projectID, - transaction: content.value, - referrer, - }); - navigateTo( - `/organizations/${organization.slug}/performance/summary/?${transactionQuery}`, - router - ); - }, + to: + originalTag.key === 'transaction' + ? { + pathname: `/organizations/${organization.slug}/performance/summary/`, + query: { + project: event.projectID, + transaction: content.value, + referrer, + }, + } + : undefined, }, { key: 'replay', label: t('View this replay'), hidden: originalTag.key !== 'replay_id' && originalTag.key !== 'replayId', - onAction: () => { - const replayQuery = qs.stringify({referrer}); - navigateTo( - `/organizations/${organization.slug}/replays/${encodeURIComponent(content.value)}/?${replayQuery}`, - router - ); - }, + to: + originalTag.key === 'replay_id' || originalTag.key === 'replayId' + ? { + pathname: `/organizations/${organization.slug}/replays/${encodeURIComponent(content.value)}/`, + query: {referrer}, + } + : undefined, }, { key: 'external-link', diff --git a/static/app/components/events/eventTagsAndScreenshot/screenshot/index.tsx b/static/app/components/events/eventTagsAndScreenshot/screenshot/index.tsx index 4c143fffd1afd..7114c85d72e72 100644 --- a/static/app/components/events/eventTagsAndScreenshot/screenshot/index.tsx +++ b/static/app/components/events/eventTagsAndScreenshot/screenshot/index.tsx @@ -2,7 +2,7 @@ import type {ReactEventHandler} from 'react'; import {Fragment, useState} from 'react'; import styled from '@emotion/styled'; -import {Role} from 'sentry/components/acl/role'; +import {useRole} from 'sentry/components/acl/useRole'; import {Button} from 'sentry/components/button'; import ButtonBar from 'sentry/components/buttonBar'; import {openConfirmModal} from 'sentry/components/confirm'; @@ -52,6 +52,7 @@ function Screenshot({ }: Props) { const orgSlug = organization.slug; const [loadingImage, setLoadingImage] = useState(true); + const {hasRole} = useRole({role: 'attachmentsRole'}); function handleDelete(screenshotAttachmentId: string) { trackAnalytics('issue_details.issue_tab.screenshot_dropdown_deleted', { @@ -164,17 +165,11 @@ function Screenshot({ ); } - return ( - - {({hasRole}) => { - if (!hasRole) { - return null; - } + if (!hasRole) { + return null; + } - return {renderContent(screenshot)}; - }} - - ); + return {renderContent(screenshot)}; } export default Screenshot; diff --git a/static/app/components/events/eventTagsAndScreenshot/tags.tsx b/static/app/components/events/eventTagsAndScreenshot/tags.tsx index 5f43dc3b0283e..26085a76239cd 100644 --- a/static/app/components/events/eventTagsAndScreenshot/tags.tsx +++ b/static/app/components/events/eventTagsAndScreenshot/tags.tsx @@ -1,7 +1,7 @@ -import {forwardRef, useCallback, useMemo, useRef, useState} from 'react'; +import {forwardRef, useCallback, useMemo, useState} from 'react'; import styled from '@emotion/styled'; -import {Button} from 'sentry/components/button'; +import {LinkButton} from 'sentry/components/button'; import ButtonBar from 'sentry/components/buttonBar'; import { getSentryDefaultTags, @@ -15,9 +15,10 @@ import {t, tct} from 'sentry/locale'; import {space} from 'sentry/styles/space'; import type {Event} from 'sentry/types/event'; import type {Project} from 'sentry/types/project'; -import {useGroupTagsDrawer} from 'sentry/views/issueDetails/groupTags/useGroupTagsDrawer'; +import {useLocation} from 'sentry/utils/useLocation'; import {SectionKey} from 'sentry/views/issueDetails/streamline/context'; import {InterimSection} from 'sentry/views/issueDetails/streamline/interimSection'; +import {Tab, TabPaths} from 'sentry/views/issueDetails/types'; import {useHasStreamlinedUI} from 'sentry/views/issueDetails/utils'; import {EventTags} from '../eventTags'; @@ -31,12 +32,7 @@ export const EventTagsDataSection = forwardRef( function EventTagsDataSection({event, projectSlug}: Props, ref) { const sentryTags = getSentryDefaultTags(); const hasStreamlinedUI = useHasStreamlinedUI(); - const openButtonRef = useRef(null); - const {openTagsDrawer} = useGroupTagsDrawer({ - projectSlug: projectSlug, - groupId: event.groupID!, - openButtonRef: openButtonRef, - }); + const location = useLocation(); const [tagFilter, setTagFilter] = useState(TagFilter.ALL); const handleTagFilterChange = useCallback((value: TagFilter) => { @@ -61,10 +57,17 @@ export const EventTagsDataSection = forwardRef( const actions = ( - {hasStreamlinedUI && ( - + )} ({ + x: 0, + y: 0, + width: 0, + height: 30, + left: 0, + top: 0, + right: 0, + bottom: 0, + toJSON: jest.fn(), +})); + +describe('EventFeatureFlagList', function () { + it('renders a list of feature flags with a button to view all', async function () { + render(); + + for (const {flag, result} of MOCK_FLAGS) { + if (result) { + expect(screen.getByText(flag)).toBeInTheDocument(); + } + } + + // When expanded, all should be visible + const viewAllButton = screen.getByRole('button', {name: 'View All'}); + await userEvent.click(viewAllButton); + const drawer = screen.getByRole('complementary', {name: 'Feature flags drawer'}); + expect(drawer).toBeInTheDocument(); + for (const {flag, result} of MOCK_FLAGS) { + if (result) { + expect(screen.getAllByText(flag)[0]).toBeInTheDocument(); + } + } + }); + + it('toggles the drawer when view all is clicked', async function () { + render(); + const viewAllButton = screen.getByRole('button', {name: 'View All'}); + await userEvent.click(viewAllButton); + const drawer = screen.getByRole('complementary', {name: 'Feature flags drawer'}); + expect(drawer).toBeInTheDocument(); + await userEvent.click(viewAllButton); + await waitForDrawerToHide('Feature flags drawer'); + expect(drawer).not.toBeInTheDocument(); + }); + + it('opens the drawer and focuses search when the search button is pressed', async function () { + render(); + + const control = screen.getByRole('button', {name: 'Open Feature Flag Search'}); + expect(control).toBeInTheDocument(); + await userEvent.click(control); + expect( + screen.getByRole('complementary', {name: 'Feature flags drawer'}) + ).toBeInTheDocument(); + const drawerControl = screen.getByRole('textbox', { + name: 'Search Flags', + }); + expect(drawerControl).toBeInTheDocument(); + expect(drawerControl).toHaveFocus(); + }); + + it('renders a flag granular sort dropdown with Newest as the default', async function () { + render(); + + const control = screen.getByRole('button', {name: 'Newest'}); + expect(control).toBeInTheDocument(); + await userEvent.click(control); + expect(screen.getByRole('option', {name: 'Oldest'})).toBeInTheDocument(); + }); + + it('renders a sort group dropdown with Evaluation Order as the default', async function () { + render(); + + const control = screen.getByRole('button', {name: 'Evaluation Order'}); + expect(control).toBeInTheDocument(); + await userEvent.click(control); + expect(screen.getByRole('option', {name: 'Evaluation Order'})).toBeInTheDocument(); + expect(screen.getByRole('option', {name: 'Alphabetical'})).toBeInTheDocument(); + }); + + it('renders a sort group dropdown which affects the granular sort dropdown', async function () { + render(); + + const control = screen.getByRole('button', {name: 'Evaluation Order'}); + expect(control).toBeInTheDocument(); + await userEvent.click(control); + await userEvent.click(screen.getByRole('option', {name: 'Alphabetical'})); + expect(screen.getByRole('button', {name: 'A-Z'})).toBeInTheDocument(); + }); + + it('allows sort dropdown to affect displayed flags', async function () { + render(); + + const [webVitalsFlag, enableReplay] = MOCK_FLAGS.filter(f => f.result === true); + + // the flags are reversed by default, so webVitalsFlag should be below enableReplay + expect( + screen + .getByText(webVitalsFlag.flag) + .compareDocumentPosition(screen.getByText(enableReplay.flag)) + ).toBe(document.DOCUMENT_POSITION_FOLLOWING); + + // the sort should be reversed + const sortControl = screen.getByRole('button', { + name: 'Newest', + }); + await userEvent.click(sortControl); + await userEvent.click(screen.getByRole('option', {name: 'Oldest'})); + + expect( + screen + .getByText(webVitalsFlag.flag) + .compareDocumentPosition(screen.getByText(enableReplay.flag)) + ).toBe(document.DOCUMENT_POSITION_PRECEDING); + + const sortGroupControl = screen.getByRole('button', { + name: 'Evaluation Order', + }); + await userEvent.click(sortGroupControl); + await userEvent.click(screen.getByRole('option', {name: 'Alphabetical'})); + await userEvent.click(sortControl); + await userEvent.click(screen.getByRole('option', {name: 'Z-A'})); + + // webVitalsFlag comes after enableReplay alphabetically + expect( + screen + .getByText(webVitalsFlag.flag) + .compareDocumentPosition(screen.getByText(enableReplay.flag)) + ).toBe(document.DOCUMENT_POSITION_PRECEDING); + }); +}); diff --git a/static/app/components/events/featureFlags/eventFeatureFlagList.tsx b/static/app/components/events/featureFlags/eventFeatureFlagList.tsx index 9ce421ed1eba7..30c1e9977d9ae 100644 --- a/static/app/components/events/featureFlags/eventFeatureFlagList.tsx +++ b/static/app/components/events/featureFlags/eventFeatureFlagList.tsx @@ -6,12 +6,18 @@ import {CompactSelect} from 'sentry/components/compactSelect'; import DropdownButton from 'sentry/components/dropdownButton'; import ErrorBoundary from 'sentry/components/errorBoundary'; import { + ALPHA_OPTIONS, CardContainer, + EVAL_ORDER_OPTIONS, FeatureFlagDrawer, - FLAG_SORT_OPTIONS, FlagControlOptions, FlagSort, - getLabel, + getDefaultFlagSort, + getFlagSortLabel, + getSortGroupLabel, + SORT_GROUP_OPTIONS, + sortedFlags, + SortGroup, } from 'sentry/components/events/featureFlags/featureFlagDrawer'; import useDrawer from 'sentry/components/globalDrawer'; import KeyValueData, { @@ -25,6 +31,7 @@ import type {Project} from 'sentry/types/project'; import {trackAnalytics} from 'sentry/utils/analytics'; import {useFeedbackForm} from 'sentry/utils/useFeedbackForm'; import useOrganization from 'sentry/utils/useOrganization'; +import {SectionKey} from 'sentry/views/issueDetails/streamline/context'; import {InterimSection} from 'sentry/views/issueDetails/streamline/interimSection'; export function EventFeatureFlagList({ @@ -56,7 +63,8 @@ export function EventFeatureFlagList({ ) : null; - const [sortMethod, setSortMethod] = useState(FlagSort.NEWEST); + const [flagSort, setFlagSort] = useState(FlagSort.NEWEST); + const [sortGroup, setSortGroup] = useState(SortGroup.EVAL_ORDER); const {closeDrawer, isDrawerOpen, openDrawer} = useDrawer(); const viewAllButtonRef = useRef(null); const organization = useOrganization(); @@ -79,19 +87,6 @@ export function EventFeatureFlagList({ [event] ); - const handleSortAlphabetical = (flags: KeyValueDataContentProps[]) => { - return [...flags].sort((a, b) => { - return a.item.key.localeCompare(b.item.key); - }); - }; - - const sortedFlags = - sortMethod === FlagSort.ALPHA - ? handleSortAlphabetical(hydratedFlags) - : sortMethod === FlagSort.OLDEST - ? [...hydratedFlags].reverse() - : hydratedFlags; - const onViewAllFlags = useCallback( (focusControl?: FlagControlOptions) => { trackAnalytics('flags.view-all-clicked', { @@ -104,7 +99,8 @@ export function EventFeatureFlagList({ event={event} project={project} hydratedFlags={hydratedFlags} - initialSort={sortMethod} + initialSortGroup={sortGroup} + initialFlagSort={flagSort} focusControl={focusControl} /> ), @@ -123,7 +119,7 @@ export function EventFeatureFlagList({ } ); }, - [openDrawer, event, group, project, sortMethod, hydratedFlags, organization] + [openDrawer, event, group, project, hydratedFlags, organization, flagSort, sortGroup] ); if (!hydratedFlags.length) { @@ -152,13 +148,29 @@ export function EventFeatureFlagList({ {t('View All')} { + setFlagSort(getDefaultFlagSort(selection.value)); + setSortGroup(selection.value); + }} + trigger={triggerProps => ( + + {getSortGroupLabel(sortGroup)} + + )} + /> + { - setSortMethod(selection.value); + setFlagSort(selection.value); trackAnalytics('flags.sort-flags', { organization, sortMethod: selection.value, @@ -166,7 +178,7 @@ export function EventFeatureFlagList({ }} trigger={triggerProps => ( }> - {getLabel(sortMethod)} + {getFlagSortLabel(flagSort)} )} /> @@ -174,7 +186,7 @@ export function EventFeatureFlagList({ ); // Split the flags list into two columns for display - const truncatedItems = sortedFlags.slice(0, 20); + const truncatedItems = sortedFlags({flags: hydratedFlags, sort: flagSort}).slice(0, 20); const columnOne = truncatedItems.slice(0, 10); let columnTwo: typeof truncatedItems = []; if (truncatedItems.length > 10) { @@ -185,11 +197,11 @@ export function EventFeatureFlagList({ diff --git a/static/app/components/events/featureFlags/featureFlagDrawer.spec.tsx b/static/app/components/events/featureFlags/featureFlagDrawer.spec.tsx new file mode 100644 index 0000000000000..89a2fbbcd9d5b --- /dev/null +++ b/static/app/components/events/featureFlags/featureFlagDrawer.spec.tsx @@ -0,0 +1,113 @@ +import {render, screen, userEvent, within} from 'sentry-test/reactTestingLibrary'; + +import {EventFeatureFlagList} from 'sentry/components/events/featureFlags/eventFeatureFlagList'; +import { + MOCK_DATA_SECTION_PROPS, + MOCK_FLAGS, +} from 'sentry/components/events/featureFlags/testUtils'; + +async function renderFlagDrawer() { + // Needed to mock useVirtualizer lists. + jest + .spyOn(window.Element.prototype, 'getBoundingClientRect') + .mockImplementation(() => ({ + x: 0, + y: 0, + width: 0, + height: 30, + left: 0, + top: 0, + right: 0, + bottom: 0, + toJSON: jest.fn(), + })); + render(); + await userEvent.click(screen.getByRole('button', {name: 'View All'})); + return within(screen.getByRole('complementary', {name: 'Feature flags drawer'})); +} + +describe('FeatureFlagDrawer', function () { + it('renders the drawer as expected', async function () { + const drawerScreen = await renderFlagDrawer(); + expect(drawerScreen.getByRole('button', {name: 'Close Drawer'})).toBeInTheDocument(); + + // Inner drawer flags + const {event, group} = MOCK_DATA_SECTION_PROPS; + expect(drawerScreen.getByText(group.shortId)).toBeInTheDocument(); + expect(drawerScreen.getByText(event.id.slice(0, 8))).toBeInTheDocument(); + expect( + drawerScreen.getByText('Feature Flags', {selector: 'span'}) + ).toBeInTheDocument(); + + // Header & Controls + expect(drawerScreen.getByText('Feature Flags', {selector: 'h3'})).toBeInTheDocument(); + expect(drawerScreen.getByRole('textbox', {name: 'Search Flags'})).toBeInTheDocument(); + expect(drawerScreen.getByRole('button', {name: 'Newest'})).toBeInTheDocument(); + expect( + drawerScreen.getByRole('button', {name: 'Evaluation Order'}) + ).toBeInTheDocument(); + + // Contents + for (const {flag, result} of MOCK_FLAGS) { + expect(drawerScreen.getByText(flag)).toBeInTheDocument(); + expect(drawerScreen.getAllByText(result.toString())[0]).toBeInTheDocument(); + } + }); + + it('allows search to affect displayed flags', async function () { + const drawerScreen = await renderFlagDrawer(); + + const [webVitalsFlag, enableReplay] = MOCK_FLAGS.filter(f => f.result === true); + expect(drawerScreen.getByText(webVitalsFlag.flag)).toBeInTheDocument(); + expect(drawerScreen.getByText(enableReplay.flag)).toBeInTheDocument(); + + const searchInput = drawerScreen.getByRole('textbox', { + name: 'Search Flags', + }); + await userEvent.type(searchInput, webVitalsFlag.flag); + + expect(drawerScreen.getByText(webVitalsFlag.flag)).toBeInTheDocument(); + expect(drawerScreen.queryByText(enableReplay.flag)).not.toBeInTheDocument(); + }); + + it('allows sort dropdown to affect displayed flags', async function () { + const drawerScreen = await renderFlagDrawer(); + + const [webVitalsFlag, enableReplay] = MOCK_FLAGS.filter(f => f.result === true); + + // the flags are reversed by default, so webVitalsFlag should be below enableReplay + expect( + drawerScreen + .getByText(webVitalsFlag.flag) + .compareDocumentPosition(drawerScreen.getByText(enableReplay.flag)) + ).toBe(document.DOCUMENT_POSITION_FOLLOWING); + + // the sort should be reversed + const sortControl = drawerScreen.getByRole('button', { + name: 'Newest', + }); + await userEvent.click(sortControl); + await userEvent.click(drawerScreen.getByRole('option', {name: 'Oldest'})); + + expect( + drawerScreen + .getByText(webVitalsFlag.flag) + .compareDocumentPosition(drawerScreen.getByText(enableReplay.flag)) + ).toBe(document.DOCUMENT_POSITION_PRECEDING); + + const sortGroupControl = drawerScreen.getByRole('button', { + name: 'Evaluation Order', + }); + await userEvent.click(sortGroupControl); + await userEvent.click(drawerScreen.getByRole('option', {name: 'Alphabetical'})); + await userEvent.click(sortControl); + await userEvent.click(drawerScreen.getByRole('option', {name: 'Z-A'})); + + // webVitalsFlag comes after enableReplay alphabetically + expect( + drawerScreen + .getByText(webVitalsFlag.flag) + .compareDocumentPosition(drawerScreen.getByText(enableReplay.flag)) + ).toBe(document.DOCUMENT_POSITION_PRECEDING); + }); +}); diff --git a/static/app/components/events/featureFlags/featureFlagDrawer.tsx b/static/app/components/events/featureFlags/featureFlagDrawer.tsx index b15d33186e1f6..3772bf580fa1f 100644 --- a/static/app/components/events/featureFlags/featureFlagDrawer.tsx +++ b/static/app/components/events/featureFlags/featureFlagDrawer.tsx @@ -34,33 +34,73 @@ import useOrganization from 'sentry/utils/useOrganization'; export enum FlagSort { NEWEST = 'newest', OLDEST = 'oldest', - ALPHA = 'alphabetical', + A_TO_Z = 'a-z', + Z_TO_A = 'z-a', } -export const getLabel = (sort: string) => { +export enum SortGroup { + EVAL_ORDER = 'eval', + ALPHABETICAL = 'alphabetical', +} + +export const getFlagSortLabel = (sort: string) => { switch (sort) { + case FlagSort.A_TO_Z: + return t('A-Z'); + case FlagSort.Z_TO_A: + return t('Z-A'); case FlagSort.OLDEST: - return t('Oldest First'); - case FlagSort.ALPHA: - return t('Alphabetical'); + return t('Oldest'); case FlagSort.NEWEST: default: - return t('Newest First'); + return t('Newest'); + } +}; + +export const getSortGroupLabel = (sort: string) => { + switch (sort) { + case SortGroup.ALPHABETICAL: + return t('Alphabetical'); + case SortGroup.EVAL_ORDER: + default: + return t('Evaluation Order'); } }; -export const FLAG_SORT_OPTIONS = [ +export const getDefaultFlagSort = (sortGroup: SortGroup) => { + return sortGroup === SortGroup.EVAL_ORDER ? FlagSort.NEWEST : FlagSort.A_TO_Z; +}; + +export const SORT_GROUP_OPTIONS = [ + { + label: getSortGroupLabel(SortGroup.EVAL_ORDER), + value: SortGroup.EVAL_ORDER, + }, + { + label: getSortGroupLabel(SortGroup.ALPHABETICAL), + value: SortGroup.ALPHABETICAL, + }, +]; + +export const EVAL_ORDER_OPTIONS = [ { - label: getLabel(FlagSort.NEWEST), + label: getFlagSortLabel(FlagSort.NEWEST), value: FlagSort.NEWEST, }, { - label: getLabel(FlagSort.OLDEST), + label: getFlagSortLabel(FlagSort.OLDEST), value: FlagSort.OLDEST, }, +]; + +export const ALPHA_OPTIONS = [ + { + label: getFlagSortLabel(FlagSort.A_TO_Z), + value: FlagSort.A_TO_Z, + }, { - label: getLabel(FlagSort.ALPHA), - value: FlagSort.ALPHA, + label: getFlagSortLabel(FlagSort.Z_TO_A), + value: FlagSort.Z_TO_A, }, ]; @@ -69,11 +109,37 @@ export const enum FlagControlOptions { SORT = 'sort', } +export const handleSortAlphabetical = (flags: KeyValueDataContentProps[]) => { + return [...flags].sort((a, b) => { + return a.item.key.localeCompare(b.item.key); + }); +}; + +export const sortedFlags = ({ + flags, + sort, +}: { + flags: KeyValueDataContentProps[]; + sort: FlagSort; +}): KeyValueDataContentProps[] => { + switch (sort) { + case FlagSort.A_TO_Z: + return handleSortAlphabetical(flags); + case FlagSort.Z_TO_A: + return [...handleSortAlphabetical(flags)].reverse(); + case FlagSort.OLDEST: + return [...flags].reverse(); + default: + return flags; + } +}; + interface FlagDrawerProps { event: Event; group: Group; hydratedFlags: KeyValueDataContentProps[]; - initialSort: FlagSort; + initialFlagSort: FlagSort; + initialSortGroup: SortGroup; project: Project; focusControl?: FlagControlOptions; } @@ -82,28 +148,20 @@ export function FeatureFlagDrawer({ group, event, project, - initialSort, + initialFlagSort, + initialSortGroup, hydratedFlags, focusControl: initialFocusControl, }: FlagDrawerProps) { - const [sortMethod, setSortMethod] = useState(initialSort); + const [sortGroup, setSortGroup] = useState(initialSortGroup); + const [flagSort, setFlagSort] = useState(initialFlagSort); const [search, setSearch] = useState(''); const organization = useOrganization(); const {getFocusProps} = useFocusControl(initialFocusControl); - const handleSortAlphabetical = (flags: KeyValueDataContentProps[]) => { - return [...flags].sort((a, b) => { - return a.item.key.localeCompare(b.item.key); - }); - }; - - const sortedFlags = - sortMethod === FlagSort.ALPHA - ? handleSortAlphabetical(hydratedFlags) - : sortMethod === FlagSort.OLDEST - ? [...hydratedFlags].reverse() - : hydratedFlags; - const searchResults = sortedFlags.filter(f => f.item.key.includes(search)); + const searchResults = sortedFlags({flags: hydratedFlags, sort: flagSort}).filter(f => + f.item.key.includes(search) + ); const actions = ( @@ -122,11 +180,29 @@ export function FeatureFlagDrawer({ { + setFlagSort(getDefaultFlagSort(selection.value)); + setSortGroup(selection.value); + }} + trigger={triggerProps => ( + + {getSortGroupLabel(sortGroup)} + + )} + /> + { - setSortMethod(selection.value); + setFlagSort(selection.value); trackAnalytics('flags.sort-flags', { organization, sortMethod: selection.value, @@ -134,11 +210,9 @@ export function FeatureFlagDrawer({ }} trigger={triggerProps => ( }> - {getLabel(sortMethod)} + {getFlagSortLabel(flagSort)} )} - value={sortMethod} - options={FLAG_SORT_OPTIONS} /> ); diff --git a/static/app/components/events/featureFlags/testUtils.tsx b/static/app/components/events/featureFlags/testUtils.tsx new file mode 100644 index 0000000000000..cdc932dab1f62 --- /dev/null +++ b/static/app/components/events/featureFlags/testUtils.tsx @@ -0,0 +1,33 @@ +import {EventFixture} from 'sentry-fixture/event'; +import {GroupFixture} from 'sentry-fixture/group'; +import {ProjectFixture} from 'sentry-fixture/project'; + +import type {FeatureFlag} from 'sentry/types/event'; + +export const MOCK_FLAGS: FeatureFlag[] = [ + { + flag: 'mobile-replay-ui', + result: false, + }, + { + flag: 'web-vitals-ui', + result: true, + }, + { + flag: 'enable-replay', + result: true, + }, + { + flag: 'secret-feature', + result: false, + }, +]; + +export const MOCK_DATA_SECTION_PROPS = { + event: EventFixture({ + id: 'abc123def456ghi789jkl', + contexts: {flags: {values: MOCK_FLAGS}}, + }), + project: ProjectFixture(), + group: GroupFixture(), +}; diff --git a/static/app/components/events/highlights/highlightsDataSection.spec.tsx b/static/app/components/events/highlights/highlightsDataSection.spec.tsx index 24e302f41d3ef..9620fb5336d70 100644 --- a/static/app/components/events/highlights/highlightsDataSection.spec.tsx +++ b/static/app/components/events/highlights/highlightsDataSection.spec.tsx @@ -1,5 +1,4 @@ import {EventFixture} from 'sentry-fixture/event'; -import {GroupFixture} from 'sentry-fixture/group'; import {OrganizationFixture} from 'sentry-fixture/organization'; import {ProjectFixture} from 'sentry-fixture/project'; @@ -18,7 +17,6 @@ import * as analytics from 'sentry/utils/analytics'; describe('HighlightsDataSection', function () { const organization = OrganizationFixture(); const project = ProjectFixture(); - const group = GroupFixture(); const event = EventFixture({ contexts: TEST_EVENT_CONTEXTS, tags: TEST_EVENT_TAGS, @@ -57,7 +55,6 @@ describe('HighlightsDataSection', function () { event={event} project={project} viewAllRef={{current: null}} - groupId={group.id} />, {organization} ); @@ -92,7 +89,7 @@ describe('HighlightsDataSection', function () { body: {}, }); - render(, { + render(, { organization, }); expect(screen.getByText('Event Highlights')).toBeInTheDocument(); diff --git a/static/app/components/events/highlights/highlightsDataSection.tsx b/static/app/components/events/highlights/highlightsDataSection.tsx index 36471ca5a93ef..f27866b247ae4 100644 --- a/static/app/components/events/highlights/highlightsDataSection.tsx +++ b/static/app/components/events/highlights/highlightsDataSection.tsx @@ -35,14 +35,12 @@ import theme from 'sentry/utils/theme'; import {useDetailedProject} from 'sentry/utils/useDetailedProject'; import {useLocation} from 'sentry/utils/useLocation'; import useOrganization from 'sentry/utils/useOrganization'; -import {useGroupTagsDrawer} from 'sentry/views/issueDetails/groupTags/useGroupTagsDrawer'; import {SectionKey} from 'sentry/views/issueDetails/streamline/context'; import {InterimSection} from 'sentry/views/issueDetails/streamline/interimSection'; import {useHasStreamlinedUI} from 'sentry/views/issueDetails/utils'; interface HighlightsDataSectionProps { event: Event; - groupId: string; project: Project; viewAllRef?: React.RefObject; } @@ -255,34 +253,23 @@ function HighlightsData({ export default function HighlightsDataSection({ viewAllRef, event, - groupId, project, }: HighlightsDataSectionProps) { const organization = useOrganization(); const hasStreamlinedUI = useHasStreamlinedUI(); - const openButtonRef = useRef(null); - const {openTagsDrawer} = useGroupTagsDrawer({ - groupId, - openButtonRef, - projectSlug: project.slug, - }); - const viewAllButton = hasStreamlinedUI ? ( - // Streamline details ui has "Jump to" feature, instead we'll show the drawer button - - ) : viewAllRef ? ( - - ) : null; + const viewAllButton = + !hasStreamlinedUI && viewAllRef ? ( + + ) : null; return ( ( {item.icon} - {item.title} - {item.subtitle} + + {item.title} + {item.subtitle && {item.subtitle}} + ))} @@ -61,32 +63,32 @@ export function HighlightsIconSummary({event}: HighlightsIconSummaryProps) { const IconBar = styled('div')` position: relative; - padding: ${space(2)} ${space(0.5)}; + padding: ${space(1)} ${space(0.5)}; `; const IconSummary = styled('div')` + display: flex; + align-items: center; + gap: ${space(1)}; flex: none; - display: grid; - grid-template: 1fr 1fr / auto 1fr; - grid-column-gap: ${space(1)}; - grid-row-gap: ${space(0.5)}; +`; + +const IconDescription = styled('div')` + display: flex; + flex-direction: column; + gap: ${space(0.5)}; `; const IconWrapper = styled('div')` - grid-area: 1 / 1 / 3 / 2; - align-self: center; + flex: none; `; const IconTitle = styled('div')` - grid-area: 1 / 2 / 2 / 3; - align-self: self-end; line-height: 1; `; const IconSubtitle = styled('div')` - grid-area: 2 / 2 / 3 / 3; color: ${p => p.theme.subText}; font-size: ${p => p.theme.fontSizeSmall}; line-height: 1; - align-self: self-start; `; diff --git a/static/app/components/events/interfaces/crashContent/exception/actionableItems.tsx b/static/app/components/events/interfaces/crashContent/exception/actionableItems.tsx index 55057f2f425df..0b27a0292ced1 100644 --- a/static/app/components/events/interfaces/crashContent/exception/actionableItems.tsx +++ b/static/app/components/events/interfaces/crashContent/exception/actionableItems.tsx @@ -114,6 +114,24 @@ export function getErrorMessage( meta: metaData, }, ]; + case NativeProcessingErrors.NATIVE_SYMBOLICATOR_FAILED: + return [ + { + title: t('Failed to process native stacktraces'), + desc: null, + data: errorData, + meta: metaData, + }, + ]; + case NativeProcessingErrors.NATIVE_INTERNAL_FAILURE: + return [ + { + title: t('Internal failure when attempting to symbolicate'), + desc: null, + data: errorData, + meta: metaData, + }, + ]; case JavascriptProcessingErrors.JS_MISSING_SOURCES_CONTENT: return [ { diff --git a/static/app/components/events/interfaces/crashContent/exception/actionableItemsUtils.tsx b/static/app/components/events/interfaces/crashContent/exception/actionableItemsUtils.tsx index 4e043cfd29dbb..5c425266bf457 100644 --- a/static/app/components/events/interfaces/crashContent/exception/actionableItemsUtils.tsx +++ b/static/app/components/events/interfaces/crashContent/exception/actionableItemsUtils.tsx @@ -35,6 +35,8 @@ export type ActionableItemTypes = export const ActionableItemWarning = [ ProguardProcessingErrors.PROGUARD_MISSING_LINENO, NativeProcessingErrors.NATIVE_MISSING_OPTIONALLY_BUNDLED_DSYM, + NativeProcessingErrors.NATIVE_SYMBOLICATOR_FAILED, + NativeProcessingErrors.NATIVE_INTERNAL_FAILURE, GenericSchemaErrors.FUTURE_TIMESTAMP, GenericSchemaErrors.CLOCK_DRIFT, GenericSchemaErrors.PAST_TIMESTAMP, @@ -66,6 +68,14 @@ interface NativeBadDSYMError extends BaseActionableItem { type: NativeProcessingErrors.NATIVE_BAD_DSYM; } +interface NativeSymbolicatorFailedError extends BaseActionableItem { + type: NativeProcessingErrors.NATIVE_SYMBOLICATOR_FAILED; +} + +interface NativeInternalFailureError extends BaseActionableItem { + type: NativeProcessingErrors.NATIVE_INTERNAL_FAILURE; +} + interface JSMissingSourcesContentError extends BaseActionableItem { type: JavascriptProcessingErrors.JS_MISSING_SOURCES_CONTENT; } @@ -109,6 +119,8 @@ export type ActionableItemErrors = | NativeMissingOptionalBundledDSYMError | NativeMissingDSYMError | NativeBadDSYMError + | NativeSymbolicatorFailedError + | NativeInternalFailureError | JSMissingSourcesContentError | FetchGenericError | RestrictedIpError diff --git a/static/app/components/events/interfaces/crashContent/stackTrace/content.tsx b/static/app/components/events/interfaces/crashContent/stackTrace/content.tsx index a73b37dc86a8c..48060ffd77a21 100644 --- a/static/app/components/events/interfaces/crashContent/stackTrace/content.tsx +++ b/static/app/components/events/interfaces/crashContent/stackTrace/content.tsx @@ -83,7 +83,7 @@ function Content({ } function setInitialFrameMap(): {[frameIndex: number]: boolean} { - const indexMap = {}; + const indexMap: Record = {}; (data.frames ?? []).forEach((frame, frameIdx) => { const nextFrame = (data.frames ?? [])[frameIdx + 1]; const repeatedFrame = isRepeatedFrame(frame, nextFrame); @@ -96,7 +96,7 @@ function Content({ function getInitialFrameCounts(): {[frameIndex: number]: number} { let count = 0; - const countMap = {}; + const countMap: Record = {}; (data.frames ?? []).forEach((frame, frameIdx) => { const nextFrame = (data.frames ?? [])[frameIdx + 1]; const repeatedFrame = isRepeatedFrame(frame, nextFrame); diff --git a/static/app/components/events/interfaces/crashContent/stackTrace/platformIcon.tsx b/static/app/components/events/interfaces/crashContent/stackTrace/platformIcon.tsx index 8ed1863a3adae..26bcfaef6af6e 100644 --- a/static/app/components/events/interfaces/crashContent/stackTrace/platformIcon.tsx +++ b/static/app/components/events/interfaces/crashContent/stackTrace/platformIcon.tsx @@ -1,15 +1,18 @@ import styled from '@emotion/styled'; import {PlatformIcon} from 'platformicons'; +import {useHasStreamlinedUI} from 'sentry/views/issueDetails/utils'; + type Props = { platform: string; }; function StacktracePlatformIcon({platform}: Props) { + const hasStreamlineUi = useHasStreamlinedUI(); return ( @@ -19,7 +22,7 @@ function StacktracePlatformIcon({platform}: Props) { const StyledPlatformIcon = styled(PlatformIcon)` position: absolute; top: 0; - left: -20px; + left: -${p => p.size}; border-radius: 3px 0 0 3px; @media (max-width: ${p => p.theme.breakpoints.medium}) { diff --git a/static/app/components/events/interfaces/debugMeta/debugImageDetails/candidate/actions.tsx b/static/app/components/events/interfaces/debugMeta/debugImageDetails/candidate/actions.tsx index e5eef7c7bac66..8ac2c2f4d623b 100644 --- a/static/app/components/events/interfaces/debugMeta/debugImageDetails/candidate/actions.tsx +++ b/static/app/components/events/interfaces/debugMeta/debugImageDetails/candidate/actions.tsx @@ -2,7 +2,7 @@ import {Fragment} from 'react'; import styled from '@emotion/styled'; import Access from 'sentry/components/acl/access'; -import {Role} from 'sentry/components/acl/role'; +import {useRole} from 'sentry/components/acl/useRole'; import MenuItemActionLink from 'sentry/components/actions/menuItemActionLink'; import {Button, LinkButton} from 'sentry/components/button'; import ButtonBar from 'sentry/components/buttonBar'; @@ -45,6 +45,7 @@ function Actions({ }: Props) { const {download, location: debugFileId} = candidate; const {status} = download; + const {hasRole} = useRole({role: 'debugFilesRole'}); if (!debugFileId || !isInternalSource) { return null; @@ -54,82 +55,78 @@ function Actions({ const downloadUrl = `${baseUrl}/projects/${organization.slug}/${projSlug}/files/dsyms/?id=${debugFileId}`; const actions = ( - - {({hasRole}) => ( - - {({hasAccess}) => ( - - } - /> - } - anchorRight + + {({hasAccess}) => ( + + } + /> + } + anchorRight + > + + } + href={downloadUrl} + onClick={event => { + if (deleted) { + event.preventDefault(); + } + }} + disabled={!hasRole || deleted} > - - } - href={downloadUrl} - onClick={event => { - if (deleted) { - event.preventDefault(); - } - }} - disabled={!hasRole || deleted} - > - {t('Download')} - - - - onDelete(debugFileId)} - message={debugFileDeleteConfirmationInfo} - disabled={!hasAccess || deleted} - shouldConfirm - > - {t('Delete')} - - - - - - } - href={downloadUrl} - disabled={!hasRole} - > - {t('Download')} - - - - onDelete(debugFileId)} - disabled={!hasAccess} - > -
{activeThread?.state && ( -
+ {t('Thread State')} - {getThreadStateIcon(threadStateDisplay)} - {threadStateDisplay} + + {threadStateDisplay} {threadStateDisplay && ( )} {getLockReason(activeThread?.heldLocks)} -
+ )} {!hideThreadTags && ( @@ -350,6 +355,19 @@ export function Threads({data, event, projectSlug, groupingCurrentLevel}: Props) )} {renderContent(childrenProps)} + {hasStreamlinedUI && group && ( + + + + )}
); }} @@ -373,22 +391,21 @@ export function Threads({data, event, projectSlug, groupingCurrentLevel}: Props) const Grid = styled('div')` display: grid; grid-template-columns: auto 1fr; + gap: ${space(2)}; +`; + +const TheadStateContainer = styled('div')` + ${p => p.theme.overflowEllipsis} `; const ThreadStateWrapper = styled('div')` display: flex; position: relative; flex-direction: row; - align-items: flex-start; + align-items: center; gap: ${space(0.5)}; `; -const ThreadState = styled(TextOverflow)` - max-width: 100%; - text-align: left; - font-weight: ${p => p.theme.fontWeightBold}; -`; - const LockReason = styled(TextOverflow)` font-weight: ${p => p.theme.fontWeightNormal}; color: ${p => p.theme.gray300}; diff --git a/static/app/components/events/meta/annotatedText/redaction.tsx b/static/app/components/events/meta/annotatedText/redaction.tsx index e07937d1a33b8..825fc050472aa 100644 --- a/static/app/components/events/meta/annotatedText/redaction.tsx +++ b/static/app/components/events/meta/annotatedText/redaction.tsx @@ -2,6 +2,5 @@ import styled from '@emotion/styled'; export const Redaction = styled('span')<{withoutBackground?: boolean}>` cursor: default; - vertical-align: middle; ${p => !p.withoutBackground && `background: rgba(255, 0, 0, 0.05);`} `; diff --git a/static/app/components/events/suspectCommits.spec.tsx b/static/app/components/events/suspectCommits.spec.tsx index 99bdea50a4104..19a7174fb34cf 100644 --- a/static/app/components/events/suspectCommits.spec.tsx +++ b/static/app/components/events/suspectCommits.spec.tsx @@ -68,12 +68,16 @@ describe('SuspectCommits', function () { committers, }, }); + MockApiClient.addMockResponse({ + url: `/organizations/${organization.slug}/projects/`, + body: [project], + }); }); it('Renders base commit row', async function () { render( ; eventId: string; - project: AvatarProject; + projectSlug: Project['slug']; group?: Group; } -export function SuspectCommits({group, eventId, project, commitRow: CommitRow}: Props) { +export function SuspectCommits({ + group, + eventId, + projectSlug, + commitRow: CommitRow, +}: Props) { const organization = useOrganization(); const [isExpanded, setIsExpanded] = useState(false); + const project = useProjectFromSlug({organization, projectSlug}); const {data} = useCommitters({ eventId, - projectSlug: project.slug, + projectSlug, }); const committers = data?.committers ?? []; @@ -66,7 +71,7 @@ export function SuspectCommits({group, eventId, project, commitRow: CommitRow}: const handlePullRequestClick = (commit: Commit, commitIndex: number) => { trackAnalytics('issue_details.suspect_commits.pull_request_clicked', { organization, - project_id: parseInt(project.id as string, 10), + project_id: parseInt(project?.id as string, 10), suspect_commit_calculation: commit.suspectCommitType ?? 'unknown', suspect_commit_index: commitIndex, ...getAnalyticsDataForGroup(group), @@ -76,7 +81,7 @@ export function SuspectCommits({group, eventId, project, commitRow: CommitRow}: const handleCommitClick = (commit: Commit, commitIndex: number) => { trackAnalytics('issue_details.suspect_commits.commit_clicked', { organization, - project_id: parseInt(project.id as string, 10), + project_id: parseInt(project?.id as string, 10), has_pull_request: commit.pullRequest?.id !== undefined, suspect_commit_calculation: commit.suspectCommitType ?? 'unknown', suspect_commit_index: commitIndex, @@ -88,7 +93,7 @@ export function SuspectCommits({group, eventId, project, commitRow: CommitRow}: return hasStreamlinedUI ? ( - + {commits.slice(0, 100).map((commit, commitIndex) => ( {t('Suspect Commit')} @@ -101,9 +106,6 @@ export function SuspectCommits({group, eventId, project, commitRow: CommitRow}: project={project} /> - - - ))} @@ -166,27 +168,6 @@ const StreamlinedPanel = styled(Panel)` margin-bottom: 0; width: 100%; min-width: 85%; - &:last-child { - margin-right: ${space(1.5)}; - } - &:first-child { - margin-left: ${space(1.5)}; - } -`; - -const IllustrationContainer = styled('div')` - position: absolute; - top: 0px; - right: 50px; - - @media (max-width: ${p => p.theme.breakpoints.xlarge}) { - display: none; - pointer-events: none; - } -`; - -const Illustration = styled('img')` - height: 110px; `; const SuspectCommitWrapper = styled('div')` diff --git a/static/app/components/events/userFeedback/userFeedbackDrawer.tsx b/static/app/components/events/userFeedback/userFeedbackDrawer.tsx new file mode 100644 index 0000000000000..8723a8f44d149 --- /dev/null +++ b/static/app/components/events/userFeedback/userFeedbackDrawer.tsx @@ -0,0 +1,70 @@ +import styled from '@emotion/styled'; + +import ProjectAvatar from 'sentry/components/avatar/projectAvatar'; +import { + CrumbContainer, + EventDrawerBody, + EventDrawerContainer, + EventDrawerHeader, + EventNavigator, + Header, + NavigationCrumbs, + ShortId, +} from 'sentry/components/events/eventDrawer'; +import {Body} from 'sentry/components/layouts/thirds'; +import {t} from 'sentry/locale'; +import type {Group} from 'sentry/types/group'; +import type {Project} from 'sentry/types/project'; +import {useLocation} from 'sentry/utils/useLocation'; +import useOrganization from 'sentry/utils/useOrganization'; +import usePageFilters from 'sentry/utils/usePageFilters'; +import GroupUserFeedback from 'sentry/views/issueDetails/groupUserFeedback'; + +export function UserFeedbackDrawer({group, project}: {group: Group; project: Project}) { + const location = useLocation(); + const organization = useOrganization(); + const {selection} = usePageFilters(); + const {environments} = selection; + + return ( + + + + + {group.shortId} + + ), + }, + {label: t('User Feedback')}, + ]} + /> + + +
{t('User Feedback')}
+
+ + + +
+ ); +} + +/* Disable grid from Layout styles in drawer */ +const UserFeedbackBody = styled(EventDrawerBody)` + ${Body} { + grid-template-columns: unset; + } +`; diff --git a/static/app/components/featureFeedback/feedbackModal.tsx b/static/app/components/featureFeedback/feedbackModal.tsx index a6a09a5a22926..6e42a489b38c0 100644 --- a/static/app/components/featureFeedback/feedbackModal.tsx +++ b/static/app/components/featureFeedback/feedbackModal.tsx @@ -30,6 +30,7 @@ import {defined} from 'sentry/utils'; import {useLocation} from 'sentry/utils/useLocation'; import useMedia from 'sentry/utils/useMedia'; import useProjects from 'sentry/utils/useProjects'; +import {useUser} from 'sentry/utils/useUser'; export const feedbackClient = new BrowserClient({ // feedback project under Sentry organization @@ -95,7 +96,7 @@ export function FeedbackModal({ const location = useLocation(); const theme = useTheme(); - const user = ConfigStore.get('user'); + const user = useUser(); const isSelfHosted = ConfigStore.get('isSelfHosted'); const [state, setState] = useState( props.children === undefined diff --git a/static/app/components/feedback/feedbackItem/feedbackActions.tsx b/static/app/components/feedback/feedbackItem/feedbackActions.tsx index 4a5458aa07149..ccd15fddf4597 100644 --- a/static/app/components/feedback/feedbackItem/feedbackActions.tsx +++ b/static/app/components/feedback/feedbackItem/feedbackActions.tsx @@ -48,8 +48,17 @@ export default function FeedbackActions({ } function LargeWidth({feedbackItem}: {feedbackItem: FeedbackIssue}) { - const {isResolved, onResolveClick, isSpam, onSpamClick, hasSeen, onMarkAsReadClick} = - useFeedbackActions({feedbackItem}); + const { + disableDelete, + hasDelete, + onDelete, + isResolved, + onResolveClick, + isSpam, + onSpamClick, + hasSeen, + onMarkAsReadClick, + } = useFeedbackActions({feedbackItem}); return ( @@ -66,13 +75,27 @@ function LargeWidth({feedbackItem}: {feedbackItem: FeedbackIssue}) { + {hasDelete && ( + + )} ); } function MediumWidth({feedbackItem}: {feedbackItem: FeedbackIssue}) { - const {isResolved, onResolveClick, isSpam, onSpamClick, hasSeen, onMarkAsReadClick} = - useFeedbackActions({feedbackItem}); + const { + disableDelete, + hasDelete, + onDelete, + isResolved, + onResolveClick, + isSpam, + onSpamClick, + hasSeen, + onMarkAsReadClick, + } = useFeedbackActions({feedbackItem}); return ( @@ -103,6 +126,14 @@ function MediumWidth({feedbackItem}: {feedbackItem: FeedbackIssue}) { label: hasSeen ? t('Mark Unread') : t('Mark Read'), onAction: onMarkAsReadClick, }, + { + key: 'delete', + priority: 'danger' as const, + label: t('Delete'), + hidden: !hasDelete, + disabled: disableDelete, + onAction: onDelete, + }, ].filter(defined)} /> @@ -110,8 +141,17 @@ function MediumWidth({feedbackItem}: {feedbackItem: FeedbackIssue}) { } function SmallWidth({feedbackItem}: {feedbackItem: FeedbackIssue}) { - const {isResolved, onResolveClick, isSpam, onSpamClick, hasSeen, onMarkAsReadClick} = - useFeedbackActions({feedbackItem}); + const { + disableDelete, + hasDelete, + onDelete, + isResolved, + onResolveClick, + isSpam, + onSpamClick, + hasSeen, + onMarkAsReadClick, + } = useFeedbackActions({feedbackItem}); return ( ); diff --git a/static/app/components/feedback/feedbackItem/feedbackAssignedTo.tsx b/static/app/components/feedback/feedbackItem/feedbackAssignedTo.tsx index d31c3c1a3f225..dbe4818d22342 100644 --- a/static/app/components/feedback/feedbackItem/feedbackAssignedTo.tsx +++ b/static/app/components/feedback/feedbackItem/feedbackAssignedTo.tsx @@ -52,7 +52,7 @@ export default function FeedbackAssignedTo({ projectIds: [feedbackIssue.project.id], }); - const owners = getOwnerList([], eventOwners ?? null, feedbackIssue.assignedTo); + const owners = getOwnerList([], eventOwners, feedbackIssue.assignedTo); // A new `key` will make the component re-render when showActorName changes const key = showActorName ? 'showActor' : 'hideActor'; diff --git a/static/app/components/feedback/feedbackItem/feedbackItem.tsx b/static/app/components/feedback/feedbackItem/feedbackItem.tsx index d8d2ac4f034e5..e814ba315897f 100644 --- a/static/app/components/feedback/feedbackItem/feedbackItem.tsx +++ b/static/app/components/feedback/feedbackItem/feedbackItem.tsx @@ -97,11 +97,9 @@ export default function FeedbackItem({feedbackItem, eventData, tags}: Props) { /> {eventData ? ( - + + + ) : null}
} title={t('Tags')}> diff --git a/static/app/components/feedback/feedbackItem/feedbackItemLoader.tsx b/static/app/components/feedback/feedbackItem/feedbackItemLoader.tsx index 30da57475098d..5b2cd41bd980f 100644 --- a/static/app/components/feedback/feedbackItem/feedbackItemLoader.tsx +++ b/static/app/components/feedback/feedbackItem/feedbackItemLoader.tsx @@ -21,11 +21,17 @@ export default function FeedbackItemLoader() { const projectSlug = useCurrentFeedbackProject(); useSentryAppComponentsData({projectId: projectSlug}); + useEffect(() => { + if (issueResult.isError) { + trackAnalytics('feedback.feedback-item-not-found', {organization, feedbackId}); + } + }, [organization, issueResult.isError, feedbackId]); + useEffect(() => { if (issueData) { trackAnalytics('feedback.feedback-item-rendered', {organization}); } - }, [organization, issueData]); + }, [issueData, organization]); // There is a case where we are done loading, but we're fetching updates // This happens when the user has seen a feedback, clicks around a bit, then diff --git a/static/app/components/feedback/feedbackItem/feedbackShortId.tsx b/static/app/components/feedback/feedbackItem/feedbackShortId.tsx index c13274c767765..48cebcffe89f4 100644 --- a/static/app/components/feedback/feedbackItem/feedbackShortId.tsx +++ b/static/app/components/feedback/feedbackItem/feedbackShortId.tsx @@ -64,6 +64,11 @@ export default function FeedbackShortId({className, feedbackItem, style}: Props) text: feedbackItem.shortId, }); + const {onClick: handleCopyMarkdown} = useCopyToClipboard({ + text: `[${feedbackItem.shortId}](${feedbackUrl})`, + successMessage: t('Copied Markdown Feedback Link to clipboard'), + }); + return ( diff --git a/static/app/components/feedback/feedbackItem/feedbackTimestampsTooltip.tsx b/static/app/components/feedback/feedbackItem/feedbackTimestampsTooltip.tsx index cb818606c7dd8..17c20ce953749 100644 --- a/static/app/components/feedback/feedbackItem/feedbackTimestampsTooltip.tsx +++ b/static/app/components/feedback/feedbackItem/feedbackTimestampsTooltip.tsx @@ -4,16 +4,16 @@ import moment from 'moment-timezone'; import AutoSelectText from 'sentry/components/autoSelectText'; import {t} from 'sentry/locale'; -import ConfigStore from 'sentry/stores/configStore'; import {space} from 'sentry/styles/space'; import type {FeedbackIssue} from 'sentry/utils/feedback/types'; +import {useUser} from 'sentry/utils/useUser'; type Props = { feedbackItem: FeedbackIssue; }; export default function FeedbackTimestampsTooltip({feedbackItem}: Props) { - const user = ConfigStore.get('user'); + const user = useUser(); const options = user?.options ?? {}; const format = options.clock24Hours ? 'HH:mm:ss z' : 'LTS z'; const dateFirstSeen = feedbackItem.firstSeen ? moment(feedbackItem.firstSeen) : null; diff --git a/static/app/components/feedback/feedbackItem/messageSection.tsx b/static/app/components/feedback/feedbackItem/messageSection.tsx index 4de03b30b47c4..ec46f71fb074c 100644 --- a/static/app/components/feedback/feedbackItem/messageSection.tsx +++ b/static/app/components/feedback/feedbackItem/messageSection.tsx @@ -1,14 +1,17 @@ import {Fragment} from 'react'; import styled from '@emotion/styled'; -import {Role} from 'sentry/components/acl/role'; +import {useRole} from 'sentry/components/acl/useRole'; +import Tag from 'sentry/components/badge/tag'; import {Flex} from 'sentry/components/container/flex'; import FeedbackItemUsername from 'sentry/components/feedback/feedbackItem/feedbackItemUsername'; import FeedbackTimestampsTooltip from 'sentry/components/feedback/feedbackItem/feedbackTimestampsTooltip'; import FeedbackViewers from 'sentry/components/feedback/feedbackItem/feedbackViewers'; import {ScreenshotSection} from 'sentry/components/feedback/feedbackItem/screenshotSection'; +import ExternalLink from 'sentry/components/links/externalLink'; import TimeSince from 'sentry/components/timeSince'; -import {t} from 'sentry/locale'; +import {Tooltip} from 'sentry/components/tooltip'; +import {t, tct} from 'sentry/locale'; import {space} from 'sentry/styles/space'; import type {Event} from 'sentry/types/event'; import type {FeedbackIssue} from 'sentry/utils/feedback/types'; @@ -21,37 +24,53 @@ interface Props { export default function MessageSection({eventData, feedbackItem}: Props) { const organization = useOrganization(); + const {hasRole} = useRole({role: 'attachmentsRole'}); const project = feedbackItem.project; + const isSpam = eventData?.occurrence?.evidenceData.isSpam; + return ( - - - ) : undefined, - overlayStyle: {maxWidth: 300}, - }} - /> + + {isSpam ? ( + + + ), + } + )} + > + {t('spam')} + + + ) : null} + + ) : undefined, + overlayStyle: {maxWidth: 300}, + }} + /> +
{feedbackItem.metadata.message}
- {eventData && project ? ( - - {({hasRole}) => - hasRole ? ( - - ) : null - } - + {eventData && project && hasRole ? ( + ) : null}
diff --git a/static/app/components/feedback/feedbackItem/traceDataSection.tsx b/static/app/components/feedback/feedbackItem/traceDataSection.tsx index 478cce007a23c..e37fce4afe4fc 100644 --- a/static/app/components/feedback/feedbackItem/traceDataSection.tsx +++ b/static/app/components/feedback/feedbackItem/traceDataSection.tsx @@ -1,13 +1,17 @@ import {useEffect} from 'react'; import Section from 'sentry/components/feedback/feedbackItem/feedbackItemSection'; +import Placeholder from 'sentry/components/placeholder'; import {IconSpan} from 'sentry/icons'; import {t} from 'sentry/locale'; import type {Event} from 'sentry/types/event'; import {trackAnalytics} from 'sentry/utils/analytics'; import useOrganization from 'sentry/utils/useOrganization'; import {TraceDataSection as IssuesTraceDataSection} from 'sentry/views/issueDetails/traceDataSection'; -import {useTraceTimelineEvents} from 'sentry/views/issueDetails/traceTimeline/useTraceTimelineEvents'; +import { + type TimelineEvent, + useTraceTimelineEvents, +} from 'sentry/views/issueDetails/traceTimeline/useTraceTimelineEvents'; /** * Doesn't require a Section wrapper. Rendered conditionally if @@ -17,11 +21,9 @@ import {useTraceTimelineEvents} from 'sentry/views/issueDetails/traceTimeline/us export default function TraceDataSection({ eventData, crashReportId, - hasProject, }: { crashReportId: string | undefined; eventData: Event; - hasProject: boolean; }) { // If there's a linked error from a crash report and only one other issue, showing both could be redundant. // TODO: we could add a jest test .spec for this ^ @@ -29,11 +31,7 @@ export default function TraceDataSection({ const {oneOtherIssueEvent, traceEvents, isLoading, isError} = useTraceTimelineEvents({ event: eventData, }); - const show = - !isLoading && - !isError && - traceEvents.length > 1 && // traceEvents include the current event. - (!hasProject || !crashReportId || oneOtherIssueEvent?.id === crashReportId); + // Note traceEvents includes the current event (feedback). useEffect(() => { if (isError) { @@ -45,23 +43,36 @@ export default function TraceDataSection({ organization, }); } - if (hasProject && !!crashReportId && oneOtherIssueEvent?.id === crashReportId) { + if (eventIsCrashReportDup(oneOtherIssueEvent, crashReportId)) { trackAnalytics('feedback.trace-section.crash-report-dup', {organization}); } } }, [ crashReportId, - hasProject, isError, isLoading, - oneOtherIssueEvent?.id, + oneOtherIssueEvent, organization, traceEvents.length, ]); - return show && organization.features.includes('user-feedback-trace-section') ? ( + return organization.features.includes('user-feedback-trace-section') && + !isError && + traceEvents.length > 1 && + !eventIsCrashReportDup(oneOtherIssueEvent, crashReportId) ? (
} title={t('Data From The Same Trace')}> - + {isLoading ? ( + + ) : ( + + )}
) : null; } + +function eventIsCrashReportDup( + event: TimelineEvent | undefined, + crashReportId: string | undefined +) { + return !!crashReportId && event?.id === crashReportId; +} diff --git a/static/app/components/feedback/feedbackItem/useFeedbackActions.ts b/static/app/components/feedback/feedbackItem/useFeedbackActions.ts index 55c423da66900..451deeeb02b98 100644 --- a/static/app/components/feedback/feedbackItem/useFeedbackActions.ts +++ b/static/app/components/feedback/feedbackItem/useFeedbackActions.ts @@ -5,6 +5,7 @@ import { addLoadingMessage, addSuccessMessage, } from 'sentry/actionCreators/indicator'; +import {useDeleteFeedback} from 'sentry/components/feedback/useDeleteFeedback'; import useMutateFeedback from 'sentry/components/feedback/useMutateFeedback'; import {t} from 'sentry/locale'; import {GroupStatus} from 'sentry/types/group'; @@ -27,12 +28,18 @@ const mutationOptions = { export default function useFeedbackActions({feedbackItem}: Props) { const organization = useOrganization(); + const projectId = feedbackItem.project?.id; const {markAsRead, resolve} = useMutateFeedback({ feedbackIds: [feedbackItem.id], organization, projectIds: feedbackItem.project ? [feedbackItem.project.id] : [], }); + const deleteFeedback = useDeleteFeedback([feedbackItem.id], projectId); + + const hasDelete = organization.features.includes('issue-platform-deletion-ui'); + const disableDelete = !organization.access.includes('event:admin'); + const onDelete = deleteFeedback; // reuse the issues ignored category for spam feedbacks const isResolved = feedbackItem.status === GroupStatus.RESOLVED; @@ -63,6 +70,9 @@ export default function useFeedbackActions({feedbackItem}: Props) { }, [hasSeen, markAsRead]); return { + disableDelete, + hasDelete, + onDelete, isResolved, onResolveClick, isSpam, diff --git a/static/app/components/feedback/feedbackSearch.tsx b/static/app/components/feedback/feedbackSearch.tsx index bf3f7f2087559..52e55f48d702f 100644 --- a/static/app/components/feedback/feedbackSearch.tsx +++ b/static/app/components/feedback/feedbackSearch.tsx @@ -1,15 +1,11 @@ -import type {CSSProperties} from 'react'; import {useCallback, useMemo} from 'react'; -import styled from '@emotion/styled'; import orderBy from 'lodash/orderBy'; import {fetchTagValues, useFetchOrganizationTags} from 'sentry/actionCreators/tags'; import {SearchQueryBuilder} from 'sentry/components/searchQueryBuilder'; import type {FilterKeySection} from 'sentry/components/searchQueryBuilder/types'; -import SmartSearchBar from 'sentry/components/smartSearchBar'; import {t} from 'sentry/locale'; import type {Tag, TagCollection, TagValue} from 'sentry/types/group'; -import type {Organization} from 'sentry/types/organization'; import {getUtcDateString} from 'sentry/utils/dates'; import {isAggregateField} from 'sentry/utils/discover/fields'; import { @@ -86,14 +82,7 @@ function getFeedbackFilterKeys(supportedTags: TagCollection) { return Object.fromEntries(keys.map(key => [key, allTags[key]])); } -const getFilterKeySections = ( - tags: TagCollection, - organization: Organization -): FilterKeySection[] => { - if (!organization.features.includes('search-query-builder-user-feedback')) { - return []; - } - +const getFilterKeySections = (tags: TagCollection): FilterKeySection[] => { const customTags: Tag[] = Object.values(tags).filter( tag => tag.kind === FieldKind.TAG && @@ -121,12 +110,7 @@ const getFilterKeySections = ( ]; }; -interface Props { - className?: string; - style?: CSSProperties; -} - -export default function FeedbackSearch({className, style}: Props) { +export default function FeedbackSearch() { const {selection: pageFilters} = usePageFilters(); const projectIds = pageFilters.projects; const {pathname, query: locationQuery} = useLocation(); @@ -168,8 +152,8 @@ export default function FeedbackSearch({className, style}: Props) { ); const filterKeySections = useMemo(() => { - return getFilterKeySections(issuePlatformTags, organization); - }, [issuePlatformTags, organization]); + return getFilterKeySections(issuePlatformTags); + }, [issuePlatformTags]); const getTagValues = useCallback( (tag: Tag, searchQuery: string): Promise => { @@ -218,42 +202,16 @@ export default function FeedbackSearch({className, style}: Props) { [navigate, pathname, locationQuery] ); - if (organization.features.includes('search-query-builder-user-feedback')) { - return ( - - ); - } - return ( - - - + ); } - -const SearchContainer = styled('div')` - display: grid; - width: 100%; -`; diff --git a/static/app/components/feedback/list/feedbackListBulkSelection.tsx b/static/app/components/feedback/list/feedbackListBulkSelection.tsx index 4a2780ff13dbc..ddb1c8d08e1f7 100644 --- a/static/app/components/feedback/list/feedbackListBulkSelection.tsx +++ b/static/app/components/feedback/list/feedbackListBulkSelection.tsx @@ -9,6 +9,7 @@ import {IconEllipsis} from 'sentry/icons/iconEllipsis'; import {t, tct} from 'sentry/locale'; import {space} from 'sentry/styles/space'; import {GroupStatus} from 'sentry/types/group'; +import useOrganization from 'sentry/utils/useOrganization'; interface Props extends Pick< @@ -24,7 +25,8 @@ export default function FeedbackListBulkSelection({ selectedIds, deselectAll, }: Props) { - const {onToggleResovled, onMarkAsRead, onMarkUnread} = useBulkEditFeedbacks({ + const organization = useOrganization(); + const {onDelete, onToggleResolved, onMarkAsRead, onMarkUnread} = useBulkEditFeedbacks({ selectedIds, deselectAll, }); @@ -36,6 +38,10 @@ export default function FeedbackListBulkSelection({ const newMailboxSpam = mailbox === 'ignored' ? GroupStatus.UNRESOLVED : GroupStatus.IGNORED; + const hasDelete = + organization.features.includes('issue-platform-deletion-ui') && selectedIds !== 'all'; + const disableDelete = !organization.access.includes('event:admin'); + return ( @@ -49,7 +55,7 @@ export default function FeedbackListBulkSelection({ @@ -58,7 +64,7 @@ export default function FeedbackListBulkSelection({ - - )} - - + {openForm && !isPending && ( + + + + + )} + + )} + ); } -const Wrapper = styled(Panel)` - display: flex; - flex-direction: column; - margin-bottom: 0; - background: linear-gradient( - 269.35deg, - ${p => p.theme.backgroundTertiary} 0.32%, - rgba(245, 243, 247, 0) 99.69% - ); - padding: ${space(1.5)} ${space(2)}; +const Body = styled('div')` + padding: 0 ${space(4)} ${space(1.5)} ${space(4)}; +`; + +const HeadlinePreview = styled('span')` + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; + margin-right: ${space(0.5)}; + flex-shrink: 0; + max-width: 92%; +`; + +const SummaryPreview = styled('span')` + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; + flex-grow: 1; + color: ${p => p.theme.subText}; +`; + +const Wrapper = styled(Panel)<{isStreamlined: boolean}>` + margin-bottom: ${p => (p.isStreamlined ? 0 : space(1))}; + padding: ${space(0.5)}; `; const StyledTitleRow = styled('div')` display: flex; - align-items: center; + align-items: flex-start; justify-content: space-between; + padding: ${space(1)} ${space(1)} ${space(1)} ${space(1)}; + + &:hover { + cursor: pointer; + background: ${p => p.theme.backgroundSecondary}; + } `; -const StyledTitle = styled('div')` - margin: 0; - color: ${p => p.theme.text}; - font-size: ${p => p.theme.fontSizeMedium}; - font-weight: 600; - align-items: center; +const CollapsedRow = styled('div')` display: flex; + width: 100%; + align-items: flex-start; + overflow: hidden; `; -const StyledFeatureBadge = styled(FeatureBadge)` - margin-top: -1px; -`; +const StyledFeatureBadge = styled(FeatureBadge)``; -const SummaryContent = styled('div')` +const HeadlineContent = styled('span')` overflow-wrap: break-word; p { margin: 0; @@ -219,14 +221,17 @@ const SummaryContent = styled('div')` code { word-break: break-all; } + width: 100%; `; -const StyledLoadingIndicator = styled(LoadingIndicator)` - display: flex; - align-items: center; - justify-content: center; - width: 16px; - max-height: 16px; +const SummaryContent = styled('div')` + overflow-wrap: break-word; + p { + margin: 0; + } + code { + word-break: break-all; + } `; const ImpactContent = styled('div')` @@ -242,12 +247,20 @@ const Content = styled('div')` const ButtonContainer = styled('div')` margin-top: ${space(1.5)}; - margin-bottom: ${space(0.5)}; + align-items: center; + display: flex; `; -const SummaryHeaderContainer = styled('div')<{isStreamlined: boolean}>` - display: flex; - align-items: center; - margin-top: ${space(1)}; - color: ${p => (p.isStreamlined ? p.theme.subText : p.theme.text)}; +const IconContainer = styled('div')` + flex-shrink: 0; + margin-right: ${space(1)}; + margin-top: ${space(0.25)}; + max-height: ${space(2)}; +`; + +const IconContainerRight = styled('div')` + flex-shrink: 0; + margin-left: ${space(1)}; + margin-top: ${space(0.25)}; + max-height: ${space(2)}; `; diff --git a/static/app/components/group/inboxBadges/shortId.tsx b/static/app/components/group/inboxBadges/shortId.tsx index 04e77743da72f..88f5873beead9 100644 --- a/static/app/components/group/inboxBadges/shortId.tsx +++ b/static/app/components/group/inboxBadges/shortId.tsx @@ -37,5 +37,4 @@ const IdWrapper = styled('div')` overflow: hidden; text-overflow: ellipsis; white-space: nowrap; - margin-top: 1px; `; diff --git a/static/app/components/group/issueReplayCount.tsx b/static/app/components/group/issueReplayCount.tsx index 5c343a07d53a7..fd99e28b6e179 100644 --- a/static/app/components/group/issueReplayCount.tsx +++ b/static/app/components/group/issueReplayCount.tsx @@ -54,6 +54,10 @@ const ReplayCountLink = styled(Link)` color: ${p => p.theme.gray400}; font-size: ${p => p.theme.fontSizeSmall}; gap: 0 ${space(0.5)}; + + &:hover { + color: ${p => p.theme.linkHoverColor}; + } `; export default IssueReplayCount; diff --git a/static/app/components/group/streamlinedParticipantList.spec.tsx b/static/app/components/group/streamlinedParticipantList.spec.tsx index 62ba12a621f79..915f31e17a660 100644 --- a/static/app/components/group/streamlinedParticipantList.spec.tsx +++ b/static/app/components/group/streamlinedParticipantList.spec.tsx @@ -19,22 +19,32 @@ describe('ParticipantList', () => { it('expands and collapses the list when clicked', async () => { render(); expect(screen.queryByText('#team-1')).not.toBeInTheDocument(); - await userEvent.click(screen.getByText('JD')); + await userEvent.click(screen.getByText('JD'), {skipHover: true}); expect(await screen.findByText('#team-1')).toBeInTheDocument(); expect(await screen.findByText('Bob Alice')).toBeInTheDocument(); expect(screen.getByText('Teams (2)')).toBeInTheDocument(); expect(screen.getByText('Individuals (2)')).toBeInTheDocument(); - await userEvent.click(screen.getAllByText('JD')[0]); + await userEvent.click(screen.getAllByText('JD')[0], {skipHover: true}); expect(screen.queryByText('Bob Alice')).not.toBeInTheDocument(); }); it('does not display section headers when there is only users or teams', async () => { render(); - await userEvent.click(screen.getByText('JD')); + await userEvent.click(screen.getByText('JD'), {skipHover: true}); expect(await screen.findByText('Bob Alice')).toBeInTheDocument(); expect(screen.queryByText('Teams')).not.toBeInTheDocument(); }); + + it('skips duplicate information between name and email', async () => { + const duplicateInfoUsers = [ + UserFixture({id: '1', name: 'john.doe@example.com', email: 'john.doe@example.com'}), + ]; + render(); + await userEvent.click(screen.getByText('J'), {skipHover: true}); + // Would find two elements if it was duplicated + expect(await screen.findByText('john.doe@example.com')).toBeInTheDocument(); + }); }); diff --git a/static/app/components/group/streamlinedParticipantList.tsx b/static/app/components/group/streamlinedParticipantList.tsx index 30763a9902f05..b64409f0f81fd 100644 --- a/static/app/components/group/streamlinedParticipantList.tsx +++ b/static/app/components/group/streamlinedParticipantList.tsx @@ -49,7 +49,9 @@ export default function ParticipantList({users, teams}: DropdownListProps) {
{`#${team.slug}`} - {tn('%s member', '%s members', team.memberCount)} + + {tn('%s member', '%s members', team.memberCount)} +
))} @@ -59,10 +61,12 @@ export default function ParticipantList({users, teams}: DropdownListProps) { {users.map(user => ( -
- {user.name} - {user.email} -
+ +
{user.name}
+ {user.email !== user.name ? ( + {user.email} + ) : null} +
))} @@ -82,6 +86,7 @@ const ParticipantListWrapper = styled('div')` max-height: 325px; overflow-y: auto; border-radius: ${p => p.theme.borderRadius}; + color: ${p => p.theme.textColor}; & > div:not(:last-child) { border-bottom: 1px solid ${p => p.theme.border}; @@ -106,10 +111,16 @@ const UserRow = styled('div')` gap: ${space(1)}; line-height: 1.2; font-size: ${p => p.theme.fontSizeSmall}; + min-height: 45px; +`; + +const NameWrapper = styled('div')` + & > div:only-child { + margin-top: ${space(0.25)}; + } `; -const SubText = styled('div')` - color: ${p => p.theme.subText}; +const SmallText = styled('div')` font-size: ${p => p.theme.fontSizeExtraSmall}; `; diff --git a/static/app/components/issues/groupList.tsx b/static/app/components/issues/groupList.tsx index 667e57d0a805f..7992c02d8b856 100644 --- a/static/app/components/issues/groupList.tsx +++ b/static/app/components/issues/groupList.tsx @@ -47,7 +47,9 @@ export type GroupListColumn = | 'users' | 'priority' | 'assignee' - | 'lastTriggered'; + | 'lastTriggered' + | 'lastSeen' + | 'firstSeen'; type Props = WithRouterProps & { api: Client; diff --git a/static/app/components/keyValueTable.tsx b/static/app/components/keyValueTable.tsx index e099864d2788e..c1c4b23f206a7 100644 --- a/static/app/components/keyValueTable.tsx +++ b/static/app/components/keyValueTable.tsx @@ -1,5 +1,5 @@ import {Fragment} from 'react'; -import type {Theme} from '@emotion/react'; +import {css, type Theme} from '@emotion/react'; import styled from '@emotion/styled'; import {space} from 'sentry/styles/space'; @@ -25,20 +25,18 @@ export function KeyValueTableRow({keyName, value, type}: Props) { ); } -const commonStyles = ({theme, type}: {type: Props['type']} & {theme: Theme}) => ` +const commonStyles = ({theme, type}: {type: Props['type']} & {theme: Theme}) => css` font-size: ${theme.fontSizeMedium}; padding: ${space(0.5)} ${space(1)}; - font-weight: ${p => p.theme.fontWeightNormal}; + font-weight: ${theme.fontWeightNormal}; line-height: inherit; - ${p => p.theme.overflowEllipsis}; - - background-color: ${ - type === 'error' - ? theme.red100 + ' !important' - : type === 'warning' - ? 'var(--background-warning-default, rgba(245, 176, 0, 0.09)) !important' - : 'inherit' - }; + ${theme.overflowEllipsis}; + + background-color: ${type === 'error' + ? theme.red100 + ' !important' + : type === 'warning' + ? 'var(--background-warning-default, rgba(245, 176, 0, 0.09)) !important' + : 'inherit'}; &:nth-of-type(2n-1) { background-color: ${theme.backgroundSecondary}; } diff --git a/static/app/components/lazyLoad.spec.tsx b/static/app/components/lazyLoad.spec.tsx index 6915d87d407f3..d7eca30ff2908 100644 --- a/static/app/components/lazyLoad.spec.tsx +++ b/static/app/components/lazyLoad.spec.tsx @@ -51,21 +51,14 @@ describe('LazyLoad', function () { it('renders with error message when promise is rejected', async function () { // eslint-disable-next-line no-console jest.spyOn(console, 'error').mockImplementation(jest.fn()); - const getComponent = jest.fn( - () => - new Promise((_resolve, reject) => - reject(new Error('Could not load component')) - ) - ); - - try { - render(); - } catch (err) { - // ignore - } + const getComponent = () => Promise.reject(new Error('Could not load component')); + + render(); expect( - await screen.findByText('There was an error loading a component.') + await screen.findByText('There was an error loading a component.', undefined, { + timeout: 5000, + }) ).toBeInTheDocument(); // eslint-disable-next-line no-console diff --git a/static/app/components/links/link.tsx b/static/app/components/links/link.tsx index 4a2908ef0c267..c54d15f4e8a7d 100644 --- a/static/app/components/links/link.tsx +++ b/static/app/components/links/link.tsx @@ -1,7 +1,8 @@ import {forwardRef} from 'react'; -// biome-ignore lint/nursery/noRestrictedImports: Will be removed with react router 6 -import {Link as RouterLink} from 'react-router'; -import {Link as Router6Link} from 'react-router-dom'; +import { + Link as RouterLink, + type LinkProps as ReactRouterLinkProps, +} from 'react-router-dom'; import styled from '@emotion/styled'; import type {LocationDescriptor} from 'history'; @@ -38,6 +39,7 @@ export interface LinkProps * Forwarded ref */ forwardedRef?: React.Ref; + state?: ReactRouterLinkProps['state']; } /** @@ -49,17 +51,9 @@ function BaseLink({disabled, to, forwardedRef, ...props}: LinkProps): React.Reac to = normalizeUrl(to, location); if (!disabled && location) { - if (window.__SENTRY_USING_REACT_ROUTER_SIX) { - return ( - - ); - } - - return ; + return ( + + ); } return
; diff --git a/static/app/components/links/listLink.tsx b/static/app/components/links/listLink.tsx index af7475b24b675..36680d62bcaff 100644 --- a/static/app/components/links/listLink.tsx +++ b/static/app/components/links/listLink.tsx @@ -1,5 +1,3 @@ -// biome-ignore lint/nursery/noRestrictedImports: Will be removed with react router 6 -import {Link as RouterLink} from 'react-router'; import {NavLink} from 'react-router-dom'; import styled from '@emotion/styled'; import classNames from 'classnames'; @@ -8,7 +6,6 @@ import type {LocationDescriptor} from 'history'; import {locationDescriptorToTo} from 'sentry/utils/reactRouter6Compat/location'; import normalizeUrl from 'sentry/utils/url/normalizeUrl'; import {useLocation} from 'sentry/utils/useLocation'; -import useRouter from 'sentry/utils/useRouter'; interface ListLinkProps extends Omit< @@ -36,31 +33,20 @@ function ListLink({ disabled = false, ...props }: ListLinkProps) { - const router = useRouter(); const location = useLocation(); const target = normalizeUrl(to); const active = isActive?.(target, index) ?? - // XXX(epurkhiser): our shim for router.isActive will throw an error in - // react-router 6. Fallback to manually checking if the path is active - (window.__SENTRY_USING_REACT_ROUTER_SIX - ? location.pathname === (typeof target === 'string' ? target : target.pathname) - : router.isActive(target, index)); - - const link = window.__SENTRY_USING_REACT_ROUTER_SIX ? ( - - {children} - - ) : ( - - {children} - - ); + // XXX(epurkhiser): This is carry over from the react-router 3 days. + // There's probably a a better way to detect active + location.pathname === (typeof target === 'string' ? target : target.pathname); return ( - {link} + + {children} + ); } diff --git a/static/app/components/links/styles.tsx b/static/app/components/links/styles.tsx index 3893f4a6f26c9..8f85e7f462b6c 100644 --- a/static/app/components/links/styles.tsx +++ b/static/app/components/links/styles.tsx @@ -1,6 +1,6 @@ -import type {Theme} from '@emotion/react'; +import {css, type Theme} from '@emotion/react'; -export const linkStyles = ({disabled, theme}: {theme: Theme; disabled?: boolean}) => ` +export const linkStyles = ({disabled, theme}: {theme: Theme; disabled?: boolean}) => css` border-radius: ${theme.linkBorderRadius}; &:focus-visible { @@ -9,14 +9,12 @@ export const linkStyles = ({disabled, theme}: {theme: Theme; disabled?: boolean} outline: none; } - ${ - disabled && - ` - color:${theme.disabled}; - pointer-events: none; - :hover { - color: ${theme.disabled}; - } - ` - } + ${disabled && + css` + color: ${theme.disabled}; + pointer-events: none; + :hover { + color: ${theme.disabled}; + } + `} `; diff --git a/static/app/components/loadingIndicator.stories.tsx b/static/app/components/loadingIndicator.stories.tsx new file mode 100644 index 0000000000000..d397d48e596e3 --- /dev/null +++ b/static/app/components/loadingIndicator.stories.tsx @@ -0,0 +1,10 @@ +import LoadingIndicator from 'sentry/components/loadingIndicator'; +import storyBook from 'sentry/stories/storyBook'; + +export default storyBook(LoadingIndicator, story => { + story('Default', () => ); + + story('Mini', () => ); + + story('With Message', () => Loading...); +}); diff --git a/static/app/components/loadingIndicator.tsx b/static/app/components/loadingIndicator.tsx index 6147119f3fe49..c7ce58f5fb80f 100644 --- a/static/app/components/loadingIndicator.tsx +++ b/static/app/components/loadingIndicator.tsx @@ -39,7 +39,7 @@ function LoadingIndicator(props: Props) { 'loading-indicator': true, }); - let loadingStyle = {}; + let loadingStyle: React.CSSProperties = {}; if (size) { loadingStyle = { width: size, diff --git a/static/app/components/metrics/metricSearchBar.spec.tsx b/static/app/components/metrics/metricSearchBar.spec.tsx index 748b25d951a03..ec8b3083e27c6 100644 --- a/static/app/components/metrics/metricSearchBar.spec.tsx +++ b/static/app/components/metrics/metricSearchBar.spec.tsx @@ -14,11 +14,6 @@ describe('metricSearchBar', function () { url: '/organizations/org-slug/metrics/tags/', body: [], }); - MockApiClient.addMockResponse({ - method: 'POST', - url: '/organizations/org-slug/recent-searches/', - body: [], - }); MockApiClient.addMockResponse({ method: 'GET', url: '/organizations/org-slug/recent-searches/', @@ -36,57 +31,10 @@ describe('metricSearchBar', function () { }); }); - describe('using SmartSearchBar', function () { - it('does not allow illegal filters', async function () { - render( - - ); - await screen.findByPlaceholderText('Filter by tags'); - await userEvent.type(screen.getByPlaceholderText('Filter by tags'), 'potato:db'); - expect(screen.getByTestId('search-autocomplete-item')).toHaveTextContent( - "The field potato isn't supported here." - ); - await userEvent.keyboard('{enter}'); - expect(onChange).not.toHaveBeenCalled(); - }); - it('does not allow insights filters when not using an insights mri', async function () { - render( - - ); - await screen.findByPlaceholderText('Filter by tags'); - await userEvent.type( - screen.getByPlaceholderText('Filter by tags'), - 'span.module:db' - ); - expect(screen.getByTestId('search-autocomplete-item')).toHaveTextContent( - "The field span.module isn't supported here." - ); - await userEvent.keyboard('{enter}'); - expect(onChange).not.toHaveBeenCalled(); - }); - it('allows insights specific filters when using an insights mri', async function () { - render( - - ); - await screen.findByPlaceholderText('Filter by tags'); - await userEvent.type( - screen.getByPlaceholderText('Filter by tags'), - 'span.module:db' - ); - expect(screen.queryByTestId('search-autocomplete-item')).not.toBeInTheDocument(); - await userEvent.keyboard('{enter}'); - expect(onChange).toHaveBeenCalledWith('span.module:"db"'); - }); - }); - describe('using SearchQueryBuilder', function () { - const organization = {features: ['search-query-builder-metrics']}; it('does not allow illegal filters', async function () { render( - , - { - organization, - } + ); await screen.findByPlaceholderText('Filter by tags'); await userEvent.type(screen.getByPlaceholderText('Filter by tags'), 'potato:db'); @@ -96,10 +44,7 @@ describe('metricSearchBar', function () { }); it('does not allow insights filters when not using an insights mri', async function () { render( - , - { - organization, - } + ); await screen.findByPlaceholderText('Filter by tags'); await userEvent.type( @@ -112,10 +57,7 @@ describe('metricSearchBar', function () { }); it('allows insights specific filters when using an insights mri', async function () { render( - , - { - organization, - } + ); await screen.findByPlaceholderText('Filter by tags'); await userEvent.type( diff --git a/static/app/components/metrics/metricSearchBar.tsx b/static/app/components/metrics/metricSearchBar.tsx index 482b4e405de31..f3d8b58a7375c 100644 --- a/static/app/components/metrics/metricSearchBar.tsx +++ b/static/app/components/metrics/metricSearchBar.tsx @@ -1,6 +1,5 @@ import {useCallback, useMemo} from 'react'; import {css, type SerializedStyles} from '@emotion/react'; -import {useId} from '@react-aria/utils'; import {QueryFieldGroup} from 'sentry/components/metrics/queryFieldGroup'; import { @@ -8,14 +7,10 @@ import { type SearchQueryBuilderProps, } from 'sentry/components/searchQueryBuilder'; import type {SmartSearchBarProps} from 'sentry/components/smartSearchBar'; -import SmartSearchBar from 'sentry/components/smartSearchBar'; import {t} from 'sentry/locale'; import {SavedSearchType, type TagCollection} from 'sentry/types/group'; import type {MRI} from 'sentry/types/metrics'; -import { - hasMetricsNewInputs, - hasMetricsNewSearchQueryBuilder, -} from 'sentry/utils/metrics/features'; +import {hasMetricsNewInputs} from 'sentry/utils/metrics/features'; import {getUseCaseFromMRI} from 'sentry/utils/metrics/mri'; import type {MetricTag} from 'sentry/utils/metrics/types'; import {useMetricsTags} from 'sentry/utils/metrics/useMetricsTags'; @@ -64,14 +59,12 @@ export function MetricSearchBar({ onChange, query, projectIds, - id: idProp, ...props }: MetricSearchBarProps) { const organization = useOrganization(); const api = useApi(); const {selection} = usePageFilters(); const selectedProjects = useSelectedProjects(); - const id = useId(idProp); const projectIdNumbers = useMemo( () => projectIds?.map(projectId => parseInt(projectId, 10)), [projectIds] @@ -172,37 +165,11 @@ export function MetricSearchBar({ css: wideSearchBarCss(disabled), }; - const smartSearchProps: Partial & {css: SerializedStyles} = { - id, - disabled, - maxMenuHeight: 220, - organization, - onGetTagValues: getTagValues, - // don't highlight tags while loading as we don't know yet if they are supported - highlightUnsupportedTags: !isPending, - onClose: handleChange, - onSearch: handleChange, - placeholder: t('Filter by tags'), - query, - savedSearchType: SavedSearchType.METRIC, - css: wideSearchBarCss(disabled), - ...props, - ...searchConfig, - }; - if (hasMetricsNewInputs(organization)) { - if (hasMetricsNewSearchQueryBuilder(organization)) { - return ; - } - - return ; - } - - if (hasMetricsNewSearchQueryBuilder(organization)) { - return ; + return ; } - return ; + return ; } function wideSearchBarCss(disabled?: boolean) { diff --git a/static/app/components/modals/inviteMembersModal/index.tsx b/static/app/components/modals/inviteMembersModal/index.tsx index a4f4753780e98..88c3128b848b4 100644 --- a/static/app/components/modals/inviteMembersModal/index.tsx +++ b/static/app/components/modals/inviteMembersModal/index.tsx @@ -1,14 +1,24 @@ import {css} from '@emotion/react'; +import styled from '@emotion/styled'; import type {ModalRenderProps} from 'sentry/actionCreators/modal'; import ErrorBoundary from 'sentry/components/errorBoundary'; import LoadingError from 'sentry/components/loadingError'; import LoadingIndicator from 'sentry/components/loadingIndicator'; +import { + ErrorAlert, + InviteMessage, +} from 'sentry/components/modals/inviteMembersModal/inviteHeaderMessages'; +import {InviteMembersContext} from 'sentry/components/modals/inviteMembersModal/inviteMembersContext'; +import InviteMembersFooter from 'sentry/components/modals/inviteMembersModal/inviteMembersFooter'; import InviteMembersModalView from 'sentry/components/modals/inviteMembersModal/inviteMembersModalview'; +import InviteRowControl from 'sentry/components/modals/inviteMembersModal/inviteRowControlNew'; import type {InviteRow} from 'sentry/components/modals/inviteMembersModal/types'; import useInviteModal from 'sentry/components/modals/inviteMembersModal/useInviteModal'; import {InviteModalHook} from 'sentry/components/modals/memberInviteModalCustomization'; +import {ORG_ROLES} from 'sentry/constants'; import {t} from 'sentry/locale'; +import {space} from 'sentry/styles/space'; import {trackAnalytics} from 'sentry/utils/analytics'; import {isActiveSuperuser} from 'sentry/utils/isActiveSuperuser'; import useOrganization from 'sentry/utils/useOrganization'; @@ -19,6 +29,8 @@ interface InviteMembersModalProps extends ModalRenderProps { } function InviteMembersModal({ + Header, + Body, closeModal, initialData, source, @@ -37,6 +49,7 @@ function InviteMembersModal({ setEmails, setRole, setTeams, + setInviteStatus, willInvite, complete, inviteStatus, @@ -70,7 +83,41 @@ function InviteMembersModal({ onSendInvites={sendInvites} > {({sendInvites: _sendInvites, canSend, headerInfo}) => { - return ( + return organization.features.includes('invite-members-new-modal') ? ( + +
+ + {t('Invite New Members')} +
+ + + {headerInfo} + + +
+ +
+
+ ) : ( p.theme.fontWeightNormal}; + font-size: ${p => p.theme.headerFontSize}; + margin-top: 0; + margin-bottom: ${space(0.75)}; +`; + +const StyledInviteRow = styled(InviteRowControl)` + margin-bottom: ${space(1.5)}; +`; + export default InviteMembersModal; diff --git a/static/app/components/modals/inviteMembersModal/inviteHeaderMessages.tsx b/static/app/components/modals/inviteMembersModal/inviteHeaderMessages.tsx new file mode 100644 index 0000000000000..a08daeb972a89 --- /dev/null +++ b/static/app/components/modals/inviteMembersModal/inviteHeaderMessages.tsx @@ -0,0 +1,33 @@ +import styled from '@emotion/styled'; + +import Alert from 'sentry/components/alert'; +import {useInviteMembersContext} from 'sentry/components/modals/inviteMembersModal/inviteMembersContext'; +import {t} from 'sentry/locale'; +import {space} from 'sentry/styles/space'; + +export function ErrorAlert() { + const {error} = useInviteMembersContext(); + return error ? ( + + {error} + + ) : null; +} + +export function InviteMessage() { + const {willInvite} = useInviteMembersContext(); + return willInvite ? ( + {t('Invite unlimited new members to join your organization.')} + ) : ( + + {t( + 'You can’t invite users directly, but we’ll forward your request to an org owner or manager for approval.' + )} + + ); +} + +const Subtext = styled('p')` + color: ${p => p.theme.subText}; + margin-bottom: ${space(3)}; +`; diff --git a/static/app/components/modals/inviteMembersModal/inviteMembersContext.tsx b/static/app/components/modals/inviteMembersModal/inviteMembersContext.tsx new file mode 100644 index 0000000000000..208b0002fd943 --- /dev/null +++ b/static/app/components/modals/inviteMembersModal/inviteMembersContext.tsx @@ -0,0 +1,56 @@ +import {createContext, useContext} from 'react'; + +import type { + InviteRow, + InviteStatus, + NormalizedInvite, +} from 'sentry/components/modals/inviteMembersModal/types'; + +export type InviteMembersContextValue = { + complete: boolean; + inviteStatus: InviteStatus; + invites: NormalizedInvite[]; + pendingInvites: InviteRow; + reset: () => void; + sendInvites: () => void; + sendingInvites: boolean; + setEmails: (emails: string[], index: number) => void; + setInviteStatus: (inviteStatus: InviteStatus) => void; + setRole: (role: string, index: number) => void; + setTeams: (teams: string[], index: number) => void; + willInvite: boolean; + error?: string; +}; + +export const defaultInviteProps = { + complete: false, + inviteStatus: {}, + invites: [], + pendingInvites: { + emails: new Set(), + role: '', + teams: new Set(), + }, + reset: () => {}, + sendInvites: () => {}, + sendingInvites: false, + setEmails: () => {}, + setRole: () => {}, + setTeams: () => {}, + setInviteStatus: () => {}, + willInvite: false, +}; + +export const InviteMembersContext = createContext(null); + +export function useInviteMembersContext(): InviteMembersContextValue { + const context = useContext(InviteMembersContext); + + if (!context) { + throw new Error( + 'useInviteMembersContext must be used within a InviteMembersContext.Provider' + ); + } + + return context; +} diff --git a/static/app/components/modals/inviteMembersModal/inviteMembersFooter.spec.tsx b/static/app/components/modals/inviteMembersModal/inviteMembersFooter.spec.tsx new file mode 100644 index 0000000000000..8cfc649a7c9f1 --- /dev/null +++ b/static/app/components/modals/inviteMembersModal/inviteMembersFooter.spec.tsx @@ -0,0 +1,88 @@ +import {OrganizationFixture} from 'sentry-fixture/organization'; + +import {render, screen, userEvent} from 'sentry-test/reactTestingLibrary'; + +import { + defaultInviteProps, + InviteMembersContext, +} from 'sentry/components/modals/inviteMembersModal/inviteMembersContext'; +import InviteMembersFooter from 'sentry/components/modals/inviteMembersModal/inviteMembersFooter'; + +describe('InviteRowControlNew', function () { + const renderComponent = props => { + render( + + + , + {organization: OrganizationFixture({features: ['invite-members-new-modal']})} + ); + }; + + it('disables send button when there are no emails', function () { + renderComponent({}); + + const sendButton = screen.getByLabelText(/send invite/i); + expect(sendButton).toBeDisabled(); + }); + + it('enables send button when there are emails', async function () { + const mockSetInviteStatus = jest.fn(); + const mockSendInvites = jest.fn(); + renderComponent({ + invites: [ + { + email: 'moo-deng@email.com', + role: 'member', + teams: new Set(['moo-deng']), + }, + ], + setInviteStatus: mockSetInviteStatus, + sendInvites: mockSendInvites, + }); + + const sendButton = screen.getByLabelText(/send invite/i); + expect(sendButton).toBeEnabled(); + await userEvent.click(sendButton); + expect(mockSetInviteStatus).toHaveBeenCalled(); + expect(mockSendInvites).toHaveBeenCalled(); + }); + + it('displays correct status message for sent invites', function () { + renderComponent({ + complete: true, + inviteStatus: { + 'moo-deng': {sent: true}, + 'moo-waan': {sent: true}, + }, + willInvite: true, + }); + expect(screen.getByTestId('sent-invites')).toHaveTextContent(/2/i); + expect(screen.queryByTestId('failed-invites')).not.toBeInTheDocument(); + }); + + it('displays correct status message for failed invites', function () { + renderComponent({ + complete: true, + inviteStatus: { + 'moo-deng': {sent: false, error: 'Error'}, + 'moo-waan': {sent: false, error: 'Error'}, + }, + willInvite: true, + }); + expect(screen.getByText(/2/i)).toBeInTheDocument(); + }); + + it('displays correct status message for sent and failed invites', function () { + renderComponent({ + complete: true, + inviteStatus: { + 'moo-deng': {sent: true}, + 'moo-waan': {sent: true}, + 'moo-toon': {sent: false, error: 'Error'}, + }, + willInvite: true, + }); + expect(screen.getByTestId('sent-invites')).toHaveTextContent(/2/i); + expect(screen.getByTestId('failed-invites')).toHaveTextContent(/1/i); + }); +}); diff --git a/static/app/components/modals/inviteMembersModal/inviteMembersFooter.tsx b/static/app/components/modals/inviteMembersModal/inviteMembersFooter.tsx new file mode 100644 index 0000000000000..a130cc89a194e --- /dev/null +++ b/static/app/components/modals/inviteMembersModal/inviteMembersFooter.tsx @@ -0,0 +1,80 @@ +import {Fragment} from 'react'; +import styled from '@emotion/styled'; + +import ButtonBar from 'sentry/components/buttonBar'; +import InviteButton from 'sentry/components/modals/inviteMembersModal/inviteButton'; +import {useInviteMembersContext} from 'sentry/components/modals/inviteMembersModal/inviteMembersContext'; +import InviteStatusMessage from 'sentry/components/modals/inviteMembersModal/inviteStatusMessage'; +import {space} from 'sentry/styles/space'; +import useOrganization from 'sentry/utils/useOrganization'; + +interface Props { + canSend: boolean; +} + +export default function InviteMembersFooter({canSend}: Props) { + const organization = useOrganization(); + const { + complete, + inviteStatus, + setInviteStatus, + invites, + pendingInvites, + sendInvites, + sendingInvites, + willInvite, + } = useInviteMembersContext(); + const isValidInvites = invites.length > 0; + + const removeSentInvites = () => { + const emails = Object.keys(inviteStatus); + let newInviteStatus = {}; + emails.forEach(email => { + if (pendingInvites.emails.has(email)) { + newInviteStatus = {...newInviteStatus, [email]: inviteStatus[email]}; + } + }); + setInviteStatus(newInviteStatus); + }; + + return ( + +
+ {/* TODO(mia): remove these props and use InviteMemberContext once old modal is removed */} + +
+ + + { + organization.features.includes('invite-members-new-modal') && + removeSentInvites(); + sendInvites(); + }} + /> + + +
+ ); +} + +const FooterContent = styled('div')` + display: flex; + gap: ${space(1)}; + align-items: center; + justify-content: space-between; + flex: 1; +`; diff --git a/static/app/components/modals/inviteMembersModal/inviteRowControlNew.spec.tsx b/static/app/components/modals/inviteMembersModal/inviteRowControlNew.spec.tsx new file mode 100644 index 0000000000000..7d5b3eb03792b --- /dev/null +++ b/static/app/components/modals/inviteMembersModal/inviteRowControlNew.spec.tsx @@ -0,0 +1,138 @@ +import {TeamFixture} from 'sentry-fixture/team'; + +import {render, screen, userEvent} from 'sentry-test/reactTestingLibrary'; + +import { + defaultInviteProps, + InviteMembersContext, +} from 'sentry/components/modals/inviteMembersModal/inviteMembersContext'; +import InviteRowControlNew from 'sentry/components/modals/inviteMembersModal/inviteRowControlNew'; +import TeamStore from 'sentry/stores/teamStore'; + +describe('InviteRowControlNew', function () { + const teamData = [ + { + id: '1', + slug: 'moo-deng', + name: "Moo Deng's Team", + }, + { + id: '2', + slug: 'moo-waan', + name: "Moo Waan's Team", + }, + ]; + const teams = teamData.map(data => TeamFixture(data)); + + const getComponent = props => ( + + + + ); + + beforeEach(function () { + TeamStore.loadInitialData(teams); + }); + + it('renders', function () { + render(getComponent(defaultInviteProps)); + + expect(screen.getByText('Email addresses')).toBeInTheDocument(); + expect(screen.getByText('Role')).toBeInTheDocument(); + expect(screen.getByText('Add to team')).toBeInTheDocument(); + }); + + describe.each([ + {email: 'test-space@example.com', delimiter: ' '}, + {email: 'test-comma@example.com', delimiter: ','}, + {email: 'test-newline@example.com', delimiter: '{enter}'}, + ])('updates email addresses when new emails are inputted', ({email, delimiter}) => { + it(`invokes the mock correctly with one using delimiter "${delimiter}"`, async () => { + const mockSetEmails = jest.fn(); + render(getComponent({...defaultInviteProps, setEmails: mockSetEmails})); + const emailInput = screen.getByLabelText('Email Addresses'); + await userEvent.type(emailInput, `${email}${delimiter}`); + expect(mockSetEmails).toHaveBeenCalled(); + }); + + it(`invokes the mock correctly with many using delimiter "${delimiter}"`, async () => { + const mockSetEmails = jest.fn(); + render(getComponent({...defaultInviteProps, setEmails: mockSetEmails})); + const emailInput = screen.getByLabelText('Email Addresses'); + await userEvent.type(emailInput, `${email}${delimiter}`); + await userEvent.type(emailInput, `${email}${delimiter}`); + await userEvent.type(emailInput, `${email}${delimiter}`); + expect(mockSetEmails).toHaveBeenCalledTimes(3); + }); + }); + + it('updates email addresses when new emails are inputted and input is unfocussed', async function () { + const mockSetEmails = jest.fn(); + render(getComponent({...defaultInviteProps, setEmails: mockSetEmails})); + const emailInput = screen.getByLabelText('Email Addresses'); + await userEvent.type(emailInput, 'test-unfocus@example.com'); + await userEvent.tab(); + expect(mockSetEmails).toHaveBeenCalled(); + }); + + it('updates role value when new role is selected', async function () { + const mockSetRole = jest.fn(); + render(getComponent({...defaultInviteProps, setRole: mockSetRole})); + const roleInput = screen.getByLabelText('Role'); + await userEvent.click(roleInput); + await userEvent.click(screen.getByText('Billing')); + expect(mockSetRole).toHaveBeenCalled(); + }); + + it('disables team selection when team roles are not allowed', function () { + render( + getComponent({ + ...defaultInviteProps, + pendingInvites: { + ...defaultInviteProps.pendingInvites, + role: 'billing', + }, + }) + ); + const teamInput = screen.getByLabelText('Add to Team'); + expect(teamInput).toBeDisabled(); + }); + + it('enables team selection when team roles are allowed', async function () { + const mockSetTeams = jest.fn(); + render( + getComponent({ + ...defaultInviteProps, + pendingInvites: { + ...defaultInviteProps.pendingInvites, + role: 'member', + }, + setTeams: mockSetTeams, + }) + ); + const teamInput = screen.getByLabelText('Add to Team'); + expect(teamInput).toBeEnabled(); + await userEvent.click(teamInput); + await userEvent.click(screen.getByText('#moo-deng')); + await userEvent.click(screen.getByText('#moo-waan')); + expect(mockSetTeams).toHaveBeenCalledTimes(2); + }); +}); diff --git a/static/app/components/modals/inviteMembersModal/inviteRowControlNew.tsx b/static/app/components/modals/inviteMembersModal/inviteRowControlNew.tsx new file mode 100644 index 0000000000000..11965e0c0f930 --- /dev/null +++ b/static/app/components/modals/inviteMembersModal/inviteRowControlNew.tsx @@ -0,0 +1,219 @@ +import {useCallback, useState} from 'react'; +import type {MultiValueProps} from 'react-select'; +import type {Theme} from '@emotion/react'; +import {useTheme} from '@emotion/react'; +import styled from '@emotion/styled'; + +import type {StylesConfig} from 'sentry/components/forms/controls/selectControl'; +import SelectControl from 'sentry/components/forms/controls/selectControl'; +import {useInviteMembersContext} from 'sentry/components/modals/inviteMembersModal/inviteMembersContext'; +import RoleSelectControl from 'sentry/components/roleSelectControl'; +import TeamSelector from 'sentry/components/teamSelector'; +import {t} from 'sentry/locale'; +import {space} from 'sentry/styles/space'; +import type {SelectValue} from 'sentry/types/core'; +import type {OrgRole} from 'sentry/types/organization'; + +import renderEmailValue from './renderEmailValue'; +import type {InviteStatus} from './types'; + +type SelectOption = SelectValue; + +type Props = { + roleDisabledUnallowed: boolean; + roleOptions: OrgRole[]; +}; + +function ValueComponent( + props: MultiValueProps, + inviteStatus: InviteStatus +) { + return renderEmailValue(inviteStatus[props.data.value], props); +} + +function mapToOptions(values: string[]): SelectOption[] { + return values.map(value => ({value, label: value})); +} + +function InviteRowControl({roleDisabledUnallowed, roleOptions}: Props) { + const {inviteStatus, pendingInvites, setEmails, setRole, setTeams, reset} = + useInviteMembersContext(); + const emails = [...(pendingInvites.emails ?? [])]; + const role = pendingInvites.role ?? ''; + const teams = [...(pendingInvites.teams ?? [])]; + + const onChangeEmails = (opts: SelectOption[]) => { + setEmails(opts?.map(v => v.value) ?? [], 0); + }; + const onChangeRole = (value: SelectOption) => setRole(value?.value, 0); + const onChangeTeams = (opts: SelectOption[]) => + setTeams(opts ? opts.map(v => v.value) : [], 0); + + const [inputValue, setInputValue] = useState(''); + + const theme = useTheme(); + + const isTeamRolesAllowedForRole = useCallback<(roleId: string) => boolean>( + roleId => { + const roleOptionsMap = roleOptions.reduce( + (rolesMap, roleOption) => ({...rolesMap, [roleOption.id]: roleOption}), + {} + ); + return roleOptionsMap[roleId]?.isTeamRolesAllowed ?? true; + }, + [roleOptions] + ); + const isTeamRolesAllowed = isTeamRolesAllowedForRole(role); + + const handleKeyDown = (e: React.KeyboardEvent) => { + switch (e.key) { + case 'Enter': + case ',': + case ' ': + e.preventDefault(); + handleInput(inputValue); + setInputValue(''); + break; + default: + // do nothing. + } + }; + + const handleInput = input => { + const newEmails = input.trim() ? input.trim().split(/[\s,]+/) : []; + if (newEmails.length > 0) { + onChangeEmails([ + ...mapToOptions(emails), + ...newEmails.map(email => ({label: email, value: email})), + ]); + } + }; + + return ( + +
+ Email addresses + ValueComponent(props, inviteStatus), + DropdownIndicator: () => null, + }} + options={mapToOptions(emails)} + onBlur={(e: React.ChangeEvent) => { + handleInput(e.target.value); + }} + styles={getStyles(theme, inviteStatus)} + onInputChange={setInputValue} + onKeyDown={handleKeyDown} + onChange={onChangeEmails} + multiple + creatable + clearable + onClear={reset} + menuIsOpen={false} + /> +
+ +
+ Role + { + onChangeRole(roleOption); + if (!isTeamRolesAllowedForRole(roleOption.value)) { + onChangeTeams([]); + } + }} + /> +
+
+ Add to team + +
+
+
+ ); +} + +/** + * The email select control has custom selected item states as items + * show their delivery status after the form is submitted. + */ +function getStyles(theme: Theme, inviteStatus: InviteStatus): StylesConfig { + return { + multiValue: (provided, {data}: MultiValueProps) => { + const status = inviteStatus[data.value]; + return { + ...provided, + ...(status?.error + ? { + color: theme.red400, + border: `1px solid ${theme.red300}`, + backgroundColor: theme.red100, + } + : {}), + }; + }, + multiValueLabel: (provided, {data}: MultiValueProps) => { + const status = inviteStatus[data.value]; + return { + ...provided, + pointerEvents: 'all', + ...(status?.error ? {color: theme.red400} : {}), + }; + }, + multiValueRemove: (provided, {data}: MultiValueProps) => { + const status = inviteStatus[data.value]; + return { + ...provided, + ...(status?.error + ? { + borderLeft: `1px solid ${theme.red300}`, + ':hover': {backgroundColor: theme.red100, color: theme.red400}, + } + : {}), + }; + }, + }; +} + +const Heading = styled('div')` + margin-bottom: ${space(1)}; + font-weight: ${p => p.theme.fontWeightBold}; + text-transform: uppercase; + font-size: ${p => p.theme.fontSizeSmall}; +`; + +const RowWrapper = styled('div')` + display: flex; + flex-direction: column; + gap: ${space(1.5)}; +`; + +const RoleTeamWrapper = styled('div')` + display: grid; + gap: ${space(1.5)}; + grid-template-columns: 1fr 1fr; + align-items: start; +`; + +export default InviteRowControl; diff --git a/static/app/components/modals/inviteMembersModal/inviteStatusMessage.tsx b/static/app/components/modals/inviteMembersModal/inviteStatusMessage.tsx index b43b1aec77788..f8df7ec568875 100644 --- a/static/app/components/modals/inviteMembersModal/inviteStatusMessage.tsx +++ b/static/app/components/modals/inviteMembersModal/inviteStatusMessage.tsx @@ -4,10 +4,63 @@ import LoadingIndicator from 'sentry/components/loadingIndicator'; import {IconCheckmark, IconWarning} from 'sentry/icons'; import {t, tct, tn} from 'sentry/locale'; import {space} from 'sentry/styles/space'; +import useOrganization from 'sentry/utils/useOrganization'; import type {InviteStatus} from './types'; -interface Props { +interface InviteCountProps { + count: number; + label: string; + isRequest?: boolean; +} + +function InviteCount({count, label, isRequest}: InviteCountProps) { + return ( + + {isRequest + ? tn('%s invite request', '%s invite requests', count) + : tn('%s invite', '%s invites', count)} + + ); +} + +interface CountMessageProps { + errorCount: number; + sentCount: number; + isRequest?: boolean; +} + +function CountMessage({sentCount, errorCount, isRequest}: CountMessageProps) { + const invites = ( + + ); + const failedInvites = ( + + ); + const tctComponents = { + invites, + failed: errorCount, + failedInvites, + }; + return ( +
+ {sentCount > 0 && ( + + + {tct('[invites] sent.', tctComponents)} + + )} + {errorCount > 0 && ( + + + {tct('[failedInvites] failed to send.', tctComponents)} + + )} +
+ ); +} + +interface InviteStatusMessageProps { complete: boolean; hasDuplicateEmails: boolean; inviteStatus: InviteStatus; @@ -21,7 +74,10 @@ export default function InviteStatusMessage({ inviteStatus, sendingInvites, willInvite, -}: Props) { +}: InviteStatusMessageProps) { + const organization = useOrganization(); + const isNewInviteModal = organization.features.includes('invite-members-new-modal'); + if (sendingInvites) { return ( @@ -38,8 +94,29 @@ export default function InviteStatusMessage({ const sentCount = statuses.filter(i => i.sent).length; const errorCount = statuses.filter(i => i.error).length; + const statusIndicator = + hasDuplicateEmails || errorCount > 0 ? ( + + ) : ( + + ); + + if (isNewInviteModal) { + return ( + + ); + } + if (willInvite) { - const invites = {tn('%s invite', '%s invites', sentCount)}; + const invites = ( + + {tn('%s invite', '%s invites', sentCount)} + + ); const tctComponents = { invites, failed: errorCount, @@ -47,7 +124,7 @@ export default function InviteStatusMessage({ return ( - + {statusIndicator} {errorCount > 0 ? tct('Sent [invites], [failed] failed to send.', tctComponents) @@ -57,15 +134,18 @@ export default function InviteStatusMessage({ ); } const inviteRequests = ( - {tn('%s invite request', '%s invite requests', sentCount)} + + {tn('%s invite request', '%s invite requests', sentCount)} + ); const tctComponents = { inviteRequests, failed: errorCount, }; + return ( - + {statusIndicator} {errorCount > 0 ? tct( '[inviteRequests] pending approval, [failed] failed to send.', @@ -76,10 +156,11 @@ export default function InviteStatusMessage({ ); } + // TODO(mia): remove once old modal is removed if (hasDuplicateEmails) { return ( - + {t('Duplicate emails between invite rows.')} ); @@ -88,14 +169,19 @@ export default function InviteStatusMessage({ return null; } -export const StatusMessage = styled('div')<{status?: 'success' | 'error'}>` +export const StatusMessage = styled('div')<{ + isNewInviteModal?: boolean; + status?: 'success' | 'error'; +}>` display: flex; gap: ${space(1)}; align-items: center; font-size: ${p => p.theme.fontSizeMedium}; - color: ${p => (p.status === 'error' ? p.theme.errorText : p.theme.textColor)}; + color: ${p => + p.status === 'error' && !p.isNewInviteModal ? p.theme.errorText : p.theme.textColor}; +`; - > :first-child { - ${p => p.status === 'success' && `color: ${p.theme.successText}`}; - } +export const BoldCount = styled('div')` + display: inline; + font-weight: bold; `; diff --git a/static/app/components/modals/inviteMembersModal/renderEmailValue.tsx b/static/app/components/modals/inviteMembersModal/renderEmailValue.tsx index deb02ff34f764..a883838f0a4cd 100644 --- a/static/app/components/modals/inviteMembersModal/renderEmailValue.tsx +++ b/static/app/components/modals/inviteMembersModal/renderEmailValue.tsx @@ -6,6 +6,7 @@ import LoadingIndicator from 'sentry/components/loadingIndicator'; import {Tooltip} from 'sentry/components/tooltip'; import {IconCheckmark, IconWarning} from 'sentry/icons'; import {space} from 'sentry/styles/space'; +import useOrganization from 'sentry/utils/useOrganization'; import type {InviteStatus} from './types'; @@ -13,6 +14,7 @@ function renderEmailValue