diff --git a/.github/DISCUSSION_TEMPLATE/ideas.yml b/.github/DISCUSSION_TEMPLATE/ideas.yml new file mode 100644 index 000000000000..7d90b3525344 --- /dev/null +++ b/.github/DISCUSSION_TEMPLATE/ideas.yml @@ -0,0 +1,55 @@ +title: "[Suggestion] " +body: + - type: markdown + attributes: + value: | + ## Before We Start + + Please provide reasonably detailed responses to the question below to help the Core Team and maintainers + to understand how you run RabbitMQ and why you'd like to see the suggested changes. + - type: markdown + attributes: + value: | + ## Relevant Details + - type: dropdown + id: rabbitmq_series + attributes: + label: RabbitMQ series + options: + - 3.13.x + - 4.0.x + - 4.1.x + validations: + required: true + - type: input + id: os + attributes: + label: Operating system (distribution) used + description: What OS or distribution do you run RabbitMQ on? + validations: + required: true + - type: dropdown + id: deployment_type + attributes: + label: How is RabbitMQ deployed? + options: + - Community Docker image + - Debian package + - RPM package + - Generic binary package + - Kubernetes Operator(s) from Team RabbitMQ + - Bitnami Helm chart + - Chocolatey package + - Windows installer + - Windows binary package + - RabbitMQ-as-a-Service from a public cloud provider + - Other + validations: + required: true + - type: textarea + id: details + attributes: + label: What would you like to suggest for a future version of RabbitMQ? + description: Please take the time to explain how you use RabbitMQ and why this change is important + validations: + required: true diff --git a/.github/DISCUSSION_TEMPLATE/other.yml b/.github/DISCUSSION_TEMPLATE/other.yml new file mode 100644 index 000000000000..60cd0beaf16a --- /dev/null +++ b/.github/DISCUSSION_TEMPLATE/other.yml @@ -0,0 +1,56 @@ +title: "[Other] " +body: + - type: markdown + attributes: + value: | + ## Before We Start + + This category exists for free form questions where deployment details are less relevant, e.g. application and topology + advice kind of questions. Please provide a reasonably detailed description of how you use RabbitMQ. + - type: checkboxes + attributes: + label: Community Support Policy + description: + options: + - label: I have read [RabbitMQ's Community Support Policy](https://github.com/rabbitmq/rabbitmq-server/blob/main/COMMUNITY_SUPPORT.md) + required: true + - type: markdown + attributes: + value: | + ## Relevant Details + - type: dropdown + id: rabbitmq_version + attributes: + label: RabbitMQ version used + options: + - 4.0.2 + - 3.13.7 + - 3.13.6 + - 3.12.x or older + validations: + required: true + - type: dropdown + id: deployment_type + attributes: + label: How is RabbitMQ deployed? + options: + - Community Docker image + - Debian package + - RPM package + - Generic binary package + - Kubernetes Operator(s) from Team RabbitMQ + - Bitnami Helm chart + - Chocolatey package + - Windows installer + - Windows binary package + - RabbitMQ-as-a-Service from a public cloud provider + - Other + validations: + required: true + - type: textarea + id: details + attributes: + label: Steps to reproduce the behavior in question + description: What specific steps need to be performed in order to reproduce this behavior? Why? + validations: + required: true diff --git a/.github/DISCUSSION_TEMPLATE/questions.yml b/.github/DISCUSSION_TEMPLATE/questions.yml new file mode 100644 index 000000000000..b15d2f4a737f --- /dev/null +++ b/.github/DISCUSSION_TEMPLATE/questions.yml @@ -0,0 +1,202 @@ +title: "[Questions] " +body: + - type: markdown + attributes: + value: | + ## Before We Start + + Please provide reasonably detailed responses to the question below to help others help you. + + If you omit relevant information, those trying to reproduce what you are about to report will have to guess. + Guessing is a very time consuming, and therefore expensive, approach to troubleshooting distributed messaging infrastructure. + - type: checkboxes + attributes: + label: Community Support Policy + description: + options: + - label: I have read [RabbitMQ's Community Support Policy](https://github.com/rabbitmq/rabbitmq-server/blob/main/COMMUNITY_SUPPORT.md) + required: true + - label: I agree to provide all relevant information (versions, logs, rabbitmq-diagnostics output, detailed reproduction steps) + required: true + - type: markdown + attributes: + value: | + ## Relevant Details + - type: dropdown + id: rabbitmq_version + attributes: + label: RabbitMQ version used + options: + - 4.0.2 + - 3.13.7 + - 3.13.6 + - 3.12.x or older + validations: + required: true + - type: dropdown + id: erlang_version + attributes: + label: Erlang version used + options: + - 26.2.x + - 26.1.x + - 26.0.x + validations: + required: true + - type: input + id: os + attributes: + label: Operating system (distribution) used + description: What OS or distribution do you run RabbitMQ on? + validations: + required: true + - type: dropdown + id: deployment_type + attributes: + label: How is RabbitMQ deployed? + options: + - Community Docker image + - Debian package + - RPM package + - Generic binary package + - Kubernetes Operator(s) from Team RabbitMQ + - Bitnami Helm chart + - Chocolatey package + - Windows installer + - Windows binary package + - RabbitMQ-as-a-Service from a public cloud provider + - Other + validations: + required: true + - type: textarea + id: diagnostics_status + attributes: + label: rabbitmq-diagnostics status output + value: | + See https://www.rabbitmq.com/docs/cli to learn how to use rabbitmq-diagnostics +
+ + ``` + # PASTE OUTPUT HERE, BETWEEN BACKTICKS + ``` +
+ validations: + required: true + - type: textarea + id: rabbitmq_logs + attributes: + label: Logs from node 1 (with sensitive values edited out) + description: Relevant RabbitMQ logs with sensitive values edited out + value: | + See https://www.rabbitmq.com/docs/logging to learn how to collect logs +
+ + ``` + # PASTE LOG HERE, BETWEEN BACKTICKS + ``` +
+ validations: + required: true + - type: textarea + id: logs_node_2 + attributes: + label: Logs from node 2 (if applicable, with sensitive values edited out) + description: Relevant RabbitMQ logs with sensitive values edited out + value: | + See https://www.rabbitmq.com/docs/logging to learn how to collect logs +
+ + ``` + # PASTE LOG HERE, BETWEEN BACKTICKS + ``` +
+ validations: + required: false + - type: textarea + id: logs_node_3 + attributes: + label: Logs from node 3 (if applicable, with sensitive values edited out) + description: Relevant RabbitMQ logs with sensitive values edited out + value: | + See https://www.rabbitmq.com/docs/logging to learn how to collect logs +
+ + ``` + # PASTE LOG HERE, BETWEEN BACKTICKS + ``` +
+ validations: + required: false + - type: textarea + id: rabbitmq_conf + attributes: + label: rabbitmq.conf + description: rabbitmq.conf contents + value: | + See https://www.rabbitmq.com/docs/configure#config-location to learn how to find rabbitmq.conf file location +
+ + ``` + # PASTE rabbitmq.conf HERE, BETWEEN BACKTICKS + ``` +
+ validations: + required: true + - type: textarea + id: deployment_steps + attributes: + label: Steps to deploy RabbitMQ cluster + description: How would you explain how you deploy RabbitMQ to a new colleague? + validations: + required: true + - type: textarea + id: reproduction_steps + attributes: + label: Steps to reproduce the behavior in question + description: What specific steps need to be performed in order to reproduce this behavior? Why? + validations: + required: true + - type: textarea + id: advanced_config + attributes: + label: advanced.config + description: advanced.config contents (if applicable) + value: | + See https://www.rabbitmq.com/docs/configure#config-location to learn how to find advanced.config file location +
+ + ``` + # PASTE advanced.config HERE, BETWEEN BACKTICKS + ``` +
+ validations: + required: false + - type: textarea + id: app_code + attributes: + label: Application code + description: Relevant messaging-related parts of application code + value: | +
+ + ```python + # PASTE CODE HERE, BETWEEN BACKTICKS + ``` +
+ validations: + required: false + - type: textarea + id: k8s_deployment + attributes: + label: Kubernetes deployment file + description: Kubernetes deployment YAML that demonstrates how RabbitMQ is deployed (if applicable) + value: | +
+ + ```yaml + # Relevant parts of K8S deployment that demonstrate how RabbitMQ is deployed + # PASTE YAML HERE, BETWEEN BACKTICKS + ``` +
+ validations: + required: false diff --git a/.github/workflows/gazelle-scheduled.yaml b/.github/workflows/gazelle-scheduled.yaml index 122a120eadf1..69536463c99d 100644 --- a/.github/workflows/gazelle-scheduled.yaml +++ b/.github/workflows/gazelle-scheduled.yaml @@ -30,7 +30,7 @@ jobs: run: | bazel run gazelle - name: CREATE PULL REQUEST - uses: peter-evans/create-pull-request@v6.1.0 + uses: peter-evans/create-pull-request@v7.0.5 with: token: ${{ secrets.REPO_SCOPED_TOKEN }} committer: GitHub diff --git a/.github/workflows/gazelle.yaml b/.github/workflows/gazelle.yaml index 5bb10f3ee206..d5eba7c39a58 100644 --- a/.github/workflows/gazelle.yaml +++ b/.github/workflows/gazelle.yaml @@ -25,7 +25,7 @@ jobs: run: | bazel run gazelle - name: CREATE PULL REQUEST - uses: peter-evans/create-pull-request@v6.1.0 + uses: peter-evans/create-pull-request@v7.0.5 with: token: ${{ secrets.REPO_SCOPED_TOKEN }} committer: GitHub diff --git a/.github/workflows/rabbitmq_peer_discovery_aws.yaml b/.github/workflows/rabbitmq_peer_discovery_aws.yaml index 35063adf120d..4550510131f0 100644 --- a/.github/workflows/rabbitmq_peer_discovery_aws.yaml +++ b/.github/workflows/rabbitmq_peer_discovery_aws.yaml @@ -66,7 +66,7 @@ jobs: ecs-cli --version - name: AUTHENTICATE TO GOOGLE CLOUD if: steps.authorized.outputs.authorized == 'true' - uses: google-github-actions/auth@v2.1.3 + uses: google-github-actions/auth@v2.1.6 with: credentials_json: ${{ secrets.REMOTE_CACHE_CREDENTIALS_JSON }} - name: CONFIGURE BAZEL diff --git a/.github/workflows/templates/test-mixed-versions.template.yaml b/.github/workflows/templates/test-mixed-versions.template.yaml index ece427df4ce2..02135223e45b 100644 --- a/.github/workflows/templates/test-mixed-versions.template.yaml +++ b/.github/workflows/templates/test-mixed-versions.template.yaml @@ -23,11 +23,8 @@ on: push: branches: - main - - v3.12.x - - v3.11.x - - v3.10.x - - v3.9.x - - v3.8.x + - v4.0.x + - v3.13.x - bump-otp-* - bump-elixir-* - bump-rbe-* @@ -99,7 +96,7 @@ jobs: https://builds.hex.pm https://cdn.jsdelivr.net/hex - name: AUTHENTICATE TO GOOGLE CLOUD - uses: google-github-actions/auth@v2.1.3 + uses: google-github-actions/auth@v2.1.6 with: credentials_json: ${{ secrets.REMOTE_CACHE_CREDENTIALS_JSON }} - name: BUILD SECONDARY UMBRELLA ARCHIVE diff --git a/.github/workflows/templates/test.template.yaml b/.github/workflows/templates/test.template.yaml index eeda4286f20c..4f7234af3285 100644 --- a/.github/workflows/templates/test.template.yaml +++ b/.github/workflows/templates/test.template.yaml @@ -22,7 +22,8 @@ name: Test on: push: branches: - - main +#! - main + - v4.0.x - v3.13.x - v3.12.x - v3.11.x @@ -41,7 +42,7 @@ on: - '*.bzl' - '*.bazel' - .github/workflows/test.yaml - pull_request: +#! pull_request: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true @@ -72,7 +73,7 @@ jobs: run: | echo "value=bazel-repo-cache-${{ hashFiles('MODULE.bazel') }}" | tee -a $GITHUB_OUTPUT - name: AUTHENTICATE TO GOOGLE CLOUD - uses: google-github-actions/auth@v2.1.3 + uses: google-github-actions/auth@v2.1.6 with: credentials_json: ${{ secrets.REMOTE_CACHE_CREDENTIALS_JSON }} - name: REPO CACHE diff --git a/.github/workflows/test-authnz.yaml b/.github/workflows/test-authnz.yaml index e21ce54614ec..2b0342b03823 100644 --- a/.github/workflows/test-authnz.yaml +++ b/.github/workflows/test-authnz.yaml @@ -42,7 +42,7 @@ jobs: - erlang_version: "26.2" elixir_version: 1.15.7 env: - SELENIUM_DIR: deps/rabbitmq_management/selenium + SELENIUM_DIR: selenium DOCKER_NETWORK: rabbitmq_net steps: - name: Checkout @@ -58,7 +58,7 @@ jobs: https://cdn.jsdelivr.net/hex - name: Authenticate To Google Cloud - uses: google-github-actions/auth@v2.1.3 + uses: google-github-actions/auth@v2.1.6 with: credentials_json: ${{ secrets.REMOTE_CACHE_CREDENTIALS_JSON }} @@ -91,7 +91,8 @@ jobs: - name: Run Suites run: | - RABBITMQ_DOCKER_IMAGE=bazel/packaging/docker-image:rabbitmq-amd64 ${SELENIUM_DIR}/run-suites.sh full-suite-authnz-messaging + RABBITMQ_DOCKER_IMAGE=bazel/packaging/docker-image:rabbitmq-amd64 \ + ${SELENIUM_DIR}/run-suites.sh full-suite-authnz-messaging - name: Upload Test Artifacts if: always() diff --git a/.github/workflows/test-make-target.yaml b/.github/workflows/test-make-target.yaml new file mode 100644 index 000000000000..7d08bca09b2c --- /dev/null +++ b/.github/workflows/test-make-target.yaml @@ -0,0 +1,81 @@ +name: Test target (make) +on: + workflow_call: + inputs: + erlang_version: + required: true + type: string + elixir_version: + required: true + type: string + metadata_store: + required: true + type: string + make_target: + required: true + type: string + plugin: + required: true + type: string +jobs: + test: + name: ${{ inputs.plugin }} (${{ inputs.make_target }}) + runs-on: ubuntu-latest + timeout-minutes: 30 + steps: + - name: CHECKOUT REPOSITORY + uses: actions/checkout@v4 + + - name: FETCH TAGS + run: git fetch --tags + + - name: SETUP OTP & ELIXIR + uses: erlef/setup-beam@v1.17 + with: + otp-version: ${{ inputs.erlang_version }} + elixir-version: ${{ inputs.elixir_version }} + hexpm-mirrors: | + https://builds.hex.pm + https://cdn.jsdelivr.net/hex + # This currently only applies to Elixir; and can be safely + # restricted to the build jobs to avoid duplication in output. + disable_problem_matchers: true + + - name: SETUP DOTNET (rabbit) + uses: actions/setup-dotnet@v4 + if: inputs.plugin == 'rabbit' + with: + dotnet-version: '3.1.x' + + - name: SETUP SLAPD (rabbitmq_auth_backend_ldap) + if: inputs.plugin == 'rabbitmq_auth_backend_ldap' + run: | + sudo apt-get update && \ + sudo apt-get install -y \ + apparmor-utils \ + ldap-utils \ + slapd + + sudo aa-complain `which slapd` + + - name: RUN TESTS + if: inputs.plugin != 'rabbitmq_cli' + run: | + make -C deps/${{ inputs.plugin }} ${{ inputs.make_target }} RABBITMQ_METADATA_STORE=${{ inputs.metadata_store }} + + # rabbitmq_cli needs a correct broker version for two of its tests. + # But setting PROJECT_VERSION makes other plugins fail. + - name: RUN TESTS (rabbitmq_cli) + if: inputs.plugin == 'rabbitmq_cli' + run: | + make -C deps/${{ inputs.plugin }} ${{ inputs.make_target }} RABBITMQ_METADATA_STORE=${{ inputs.metadata_store }} PROJECT_VERSION="4.1.0" + + - name: UPLOAD TEST LOGS + if: always() + uses: actions/upload-artifact@v4 + with: + name: CT logs (${{ inputs.plugin }} ${{ inputs.make_target }} OTP-${{ inputs.erlang_version }} ${{ inputs.metadata_store }}) + path: | + logs/ + !logs/**/log_private + if-no-files-found: ignore diff --git a/.github/workflows/test-make-tests.yaml b/.github/workflows/test-make-tests.yaml new file mode 100644 index 000000000000..a0142656815d --- /dev/null +++ b/.github/workflows/test-make-tests.yaml @@ -0,0 +1,114 @@ +name: Run tests (make) +on: + workflow_call: + inputs: + erlang_version: + required: true + type: string + elixir_version: + required: true + type: string + metadata_store: + required: true + type: string +jobs: + test-rabbit: + name: Test rabbit + strategy: + fail-fast: false + matrix: + make_target: + - parallel-ct-set-1 + - parallel-ct-set-2 + - parallel-ct-set-3 + - parallel-ct-set-4 + - ct-clustering_management + - eunit ct-dead_lettering + - ct-feature_flags + - ct-metadata_store_clustering + - ct-quorum_queue + - ct-rabbit_stream_queue + uses: ./.github/workflows/test-make-target.yaml + with: + erlang_version: ${{ inputs.erlang_version }} + elixir_version: ${{ inputs.elixir_version }} + metadata_store: ${{ inputs.metadata_store }} + make_target: ${{ matrix.make_target }} + plugin: rabbit + + test-rabbitmq-mqtt: + name: Test rabbitmq_mqtt + uses: ./.github/workflows/test-make-target.yaml + with: + erlang_version: ${{ inputs.erlang_version }} + elixir_version: ${{ inputs.elixir_version }} + metadata_store: ${{ inputs.metadata_store }} + make_target: parallel-ct-set-1 + plugin: rabbitmq_mqtt + + # The integration_SUITE requires secrets and + # is therefore run from a separate workflow. + test-rabbitmq-peer-discovery-aws: + name: Test rabbitmq_peer_discovery_aws (partially) + uses: ./.github/workflows/test-make-target.yaml + with: + erlang_version: ${{ inputs.erlang_version }} + elixir_version: ${{ inputs.elixir_version }} + metadata_store: ${{ inputs.metadata_store }} + make_target: ct-config_schema ct-unit + plugin: rabbitmq_peer_discovery_aws + + test-plugin: + name: Test plugins + strategy: + fail-fast: false + matrix: + plugin: + - amqp10_client + - amqp10_common + - amqp_client + - oauth2_client + - rabbit_common + - rabbitmq_amqp_client + - rabbitmq_auth_backend_cache + - rabbitmq_auth_backend_http + - rabbitmq_auth_backend_ldap + - rabbitmq_auth_backend_oauth2 + - rabbitmq_auth_mechanism_ssl + - rabbitmq_aws + - rabbitmq_cli + - rabbitmq_consistent_hash_exchange + - rabbitmq_event_exchange + - rabbitmq_federation + - rabbitmq_federation_management + - rabbitmq_federation_prometheus + - rabbitmq_jms_topic_exchange + - rabbitmq_management + - rabbitmq_management_agent + - rabbitmq_peer_discovery_common + - rabbitmq_peer_discovery_consul + - rabbitmq_peer_discovery_etcd + - rabbitmq_peer_discovery_k8s + - rabbitmq_prelaunch + - rabbitmq_prometheus + - rabbitmq_recent_history_exchange + - rabbitmq_sharding + - rabbitmq_shovel + - rabbitmq_shovel_management + - rabbitmq_shovel_prometheus + - rabbitmq_stomp + - rabbitmq_stream + - rabbitmq_stream_common + - rabbitmq_stream_management + - rabbitmq_tracing + - rabbitmq_trust_store + - rabbitmq_web_dispatch + - rabbitmq_web_mqtt + - rabbitmq_web_stomp + uses: ./.github/workflows/test-make-target.yaml + with: + erlang_version: ${{ inputs.erlang_version }} + elixir_version: ${{ inputs.elixir_version }} + metadata_store: ${{ inputs.metadata_store }} + make_target: tests + plugin: ${{ matrix.plugin }} diff --git a/.github/workflows/test-make-type-check.yaml b/.github/workflows/test-make-type-check.yaml new file mode 100644 index 000000000000..bf977874aff9 --- /dev/null +++ b/.github/workflows/test-make-type-check.yaml @@ -0,0 +1,82 @@ +name: Type check (make) +on: + workflow_call: + inputs: + erlang_version: + required: true + type: string + elixir_version: + required: true + type: string +jobs: + type-check-plugin: + name: Type check plugins + strategy: + fail-fast: false + matrix: + plugin: + # These are using plugin-specific test jobs. + - rabbit + - rabbitmq_mqtt + - rabbitmq_peer_discovery_aws + # These are from the test-plugin test job. + - amqp10_client + - amqp10_common + - amqp_client + - oauth2_client + - rabbit_common + - rabbitmq_amqp_client + - rabbitmq_auth_backend_cache + - rabbitmq_auth_backend_http + - rabbitmq_auth_backend_ldap + - rabbitmq_auth_backend_oauth2 + - rabbitmq_auth_mechanism_ssl + - rabbitmq_aws + - rabbitmq_consistent_hash_exchange + - rabbitmq_event_exchange + - rabbitmq_federation + - rabbitmq_federation_management + - rabbitmq_federation_prometheus + - rabbitmq_jms_topic_exchange + - rabbitmq_management + - rabbitmq_management_agent + - rabbitmq_peer_discovery_common + - rabbitmq_peer_discovery_consul + # @todo We are getting errors because of wrong types + # in the eetcd dep. But upgrading requires using gun 2.0, + # which we can't because another app's dep, emqtt, requires + # gun 1.3.x. So for now we don't type check this plugin. + #- rabbitmq_peer_discovery_etcd + - rabbitmq_peer_discovery_k8s + - rabbitmq_prelaunch + - rabbitmq_prometheus + - rabbitmq_recent_history_exchange + - rabbitmq_sharding + - rabbitmq_shovel + - rabbitmq_shovel_management + - rabbitmq_shovel_prometheus + - rabbitmq_stomp + - rabbitmq_stream + - rabbitmq_stream_common + - rabbitmq_stream_management + - rabbitmq_tracing + - rabbitmq_trust_store + - rabbitmq_web_dispatch + - rabbitmq_web_mqtt + - rabbitmq_web_stomp + # This one we do not want to run tests so no corresponding test job. + - rabbitmq_ct_helpers + # These do not have tests at this time so no corresponding test job. + - rabbitmq_ct_client_helpers + - rabbitmq_random_exchange + - rabbitmq_top + - rabbitmq_web_mqtt_examples + - rabbitmq_web_stomp_examples + - trust_store_http + uses: ./.github/workflows/test-make-target.yaml + with: + erlang_version: ${{ inputs.erlang_version }} + elixir_version: ${{ inputs.elixir_version }} + metadata_store: khepri # Not actually used. + make_target: dialyze + plugin: ${{ matrix.plugin }} diff --git a/.github/workflows/test-make.yaml b/.github/workflows/test-make.yaml new file mode 100644 index 000000000000..85e04fea086c --- /dev/null +++ b/.github/workflows/test-make.yaml @@ -0,0 +1,84 @@ +name: Test (make) +on: + push: + branches: + - main + paths: + - deps/** + - scripts/** + - Makefile + - plugins.mk + - rabbitmq-components.mk + - .github/workflows/test-make.yaml + pull_request: +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true +jobs: + build-and-xref: + name: Build and Xref + strategy: + fail-fast: false + matrix: + erlang_version: + - '26' + - '27' + elixir_version: + - '1.17' + # @todo Add macOS and Windows. + runs-on: ubuntu-latest + timeout-minutes: 30 + steps: + - name: CHECKOUT REPOSITORY + uses: actions/checkout@v4 + + - name: FETCH TAGS + run: git fetch --tags + + - name: SETUP OTP & ELIXIR + uses: erlef/setup-beam@v1.17 + with: + otp-version: ${{ matrix.erlang_version }} + elixir-version: ${{ matrix.elixir_version }} + hexpm-mirrors: | + https://builds.hex.pm + https://cdn.jsdelivr.net/hex + + - name: BUILD + run: make + + - name: XREF + run: make xref + + test: + name: Test + strategy: + fail-fast: false + matrix: + erlang_version: + - '26' + - '27' + elixir_version: + - '1.17' + metadata_store: + - mnesia + - khepri + uses: ./.github/workflows/test-make-tests.yaml + with: + erlang_version: ${{ matrix.erlang_version }} + elixir_version: ${{ matrix.elixir_version }} + metadata_store: ${{ matrix.metadata_store }} + + type-check: + name: Type check + strategy: + fail-fast: false + matrix: + erlang_version: # Latest OTP + - '27' + elixir_version: # Latest Elixir + - '1.17' + uses: ./.github/workflows/test-make-type-check.yaml + with: + erlang_version: ${{ matrix.erlang_version }} + elixir_version: ${{ matrix.elixir_version }} diff --git a/.github/workflows/test-management-ui-for-pr.yaml b/.github/workflows/test-management-ui-for-pr.yaml new file mode 100644 index 000000000000..98ec573b739d --- /dev/null +++ b/.github/workflows/test-management-ui-for-pr.yaml @@ -0,0 +1,99 @@ +name: Test Management UI with Selenium for PRs +on: + pull_request: + paths: + - 'deps/**' + - 'selenium/**' + - .github/workflows/test-management-ui-for-pr.yaml +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true +jobs: + selenium: + runs-on: ubuntu-22.04 + strategy: + fail-fast: false + matrix: + erlang_version: + - "26.2" + browser: + - chrome + include: + - erlang_version: "26.2" + elixir_version: 1.15.7 + env: + SELENIUM_DIR: selenium + DOCKER_NETWORK: rabbitmq_net + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Configure OTP & Elixir + uses: erlef/setup-beam@v1.17 + with: + otp-version: ${{ matrix.erlang_version }} + elixir-version: ${{ matrix.elixir_version }} + hexpm-mirrors: | + https://builds.hex.pm + https://cdn.jsdelivr.net/hex + + - name: Authenticate To Google Cloud + uses: google-github-actions/auth@v2.1.6 + with: + credentials_json: ${{ secrets.REMOTE_CACHE_CREDENTIALS_JSON }} + + - name: Configure Bazel + run: | + if [ -n "${{ secrets.REMOTE_CACHE_BUCKET_NAME }}" ]; then + cat << EOF >> user.bazelrc + build --remote_cache=https://storage.googleapis.com/${{ secrets.REMOTE_CACHE_BUCKET_NAME }} + build --google_default_credentials + + build --remote_download_toplevel + EOF + fi + cat << EOF >> user.bazelrc + build --color=yes + EOF + + - name: Build & Load RabbitMQ OCI + run: | + bazelisk run packaging/docker-image:rabbitmq-amd64 + + - name: Configure Docker Network + run: | + docker network create ${DOCKER_NETWORK} + + - name: Build Test Runner Image + run: | + cd ${SELENIUM_DIR} + docker build -t mocha-test --target test . + + - name: Run full ui suites on a standalone rabbitmq server + run: | + RABBITMQ_DOCKER_IMAGE=bazel/packaging/docker-image:rabbitmq-amd64 \ + ${SELENIUM_DIR}/run-suites.sh + mkdir -p /tmp/full-suite + mv /tmp/selenium/* /tmp/full-suite + mkdir -p /tmp/full-suite/logs + mv ${SELENIUM_DIR}/logs/* /tmp/full-suite/logs + mkdir -p /tmp/full-suite/screens + mv ${SELENIUM_DIR}/screens/* /tmp/full-suite/screens + + - name: Upload Test Artifacts + if: always() + uses: actions/upload-artifact@v4.3.2 + with: + name: test-artifacts-${{ matrix.browser }}-${{ matrix.erlang_version }} + path: | + /tmp/full-suite + /tmp/short-suite + + summary-selenium: + needs: + - selenium + runs-on: ubuntu-latest + steps: + - name: SUMMARY + run: | + echo "SUCCESS" diff --git a/.github/workflows/test-selenium.yaml b/.github/workflows/test-management-ui.yaml similarity index 78% rename from .github/workflows/test-selenium.yaml rename to .github/workflows/test-management-ui.yaml index 4e0bed652833..b05a80cb4e91 100644 --- a/.github/workflows/test-selenium.yaml +++ b/.github/workflows/test-management-ui.yaml @@ -16,11 +16,9 @@ on: - BUILD.* - '*.bzl' - '*.bazel' - - .github/workflows/test-selenium.yaml - pull_request: - paths: - - 'deps/rabbitmq_management/**' - - .github/workflows/test-selenium-for-pull-requests.yaml + - 'selenium/**' + - .github/workflows/test-management-ui.yaml + concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true @@ -38,7 +36,7 @@ jobs: - erlang_version: "26.2" elixir_version: 1.15.7 env: - SELENIUM_DIR: deps/rabbitmq_management/selenium + SELENIUM_DIR: selenium DOCKER_NETWORK: rabbitmq_net steps: - name: Checkout @@ -54,7 +52,7 @@ jobs: https://cdn.jsdelivr.net/hex - name: Authenticate To Google Cloud - uses: google-github-actions/auth@v2.1.3 + uses: google-github-actions/auth@v2.1.6 with: credentials_json: ${{ secrets.REMOTE_CACHE_CREDENTIALS_JSON }} @@ -85,9 +83,16 @@ jobs: cd ${SELENIUM_DIR} docker build -t mocha-test --target test . - - name: Run Suites + - name: Run short ui suite on a 3-node rabbitmq cluster run: | - RABBITMQ_DOCKER_IMAGE=bazel/packaging/docker-image:rabbitmq-amd64 ${SELENIUM_DIR}/run-suites.sh + RABBITMQ_DOCKER_IMAGE=bazel/packaging/docker-image:rabbitmq-amd64 \ + ADDON_PROFILES=cluster ${SELENIUM_DIR}/run-suites.sh short-suite-management-ui + mkdir -p /tmp/short-suite + mv /tmp/selenium/* /tmp/short-suite + mkdir -p /tmp/short-suite/logs + mv ${SELENIUM_DIR}/logs/* /tmp/short-suite/logs + mkdir -p /tmp/short-suite/screens + mv ${SELENIUM_DIR}/screens/* /tmp/short-suite/screens - name: Upload Test Artifacts if: always() @@ -95,9 +100,8 @@ jobs: with: name: test-artifacts-${{ matrix.browser }}-${{ matrix.erlang_version }} path: | - logs/* - screens/* - /tmp/selenium/* + /tmp/full-suite + /tmp/short-suite summary-selenium: needs: diff --git a/.github/workflows/test-mixed-versions.yaml b/.github/workflows/test-mixed-versions.yaml index 4594ffadc26d..f79c4bce8833 100644 --- a/.github/workflows/test-mixed-versions.yaml +++ b/.github/workflows/test-mixed-versions.yaml @@ -3,11 +3,8 @@ on: push: branches: - main - - v3.12.x - - v3.11.x - - v3.10.x - - v3.9.x - - v3.8.x + - v4.0.x + - v3.13.x - bump-otp-* - bump-elixir-* - bump-rbe-* @@ -77,7 +74,7 @@ jobs: https://builds.hex.pm https://cdn.jsdelivr.net/hex - name: AUTHENTICATE TO GOOGLE CLOUD - uses: google-github-actions/auth@v2.1.3 + uses: google-github-actions/auth@v2.1.6 with: credentials_json: ${{ secrets.REMOTE_CACHE_CREDENTIALS_JSON }} - name: BUILD SECONDARY UMBRELLA ARCHIVE @@ -617,6 +614,24 @@ jobs: repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} plugin: rabbitmq_federation_management secrets: inherit + test-rabbitmq_federation_prometheus-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_federation_prometheus + secrets: inherit test-rabbitmq_jms_topic_exchange-mixed: needs: - check-workflow @@ -905,6 +920,24 @@ jobs: repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} plugin: rabbitmq_shovel_management secrets: inherit + test-rabbitmq_shovel_prometheus-mixed: + needs: + - check-workflow + - test-rabbit-0-mixed + - test-rabbit-1-mixed + - test-rabbit-2-mixed + - test-rabbit-3-mixed + - test-rabbit-4-mixed + - test-rabbit-5-mixed + - test-rabbit-6-mixed + - test-rabbit-7-mixed + - test-rabbit-8-mixed + - test-rabbit-9-mixed + uses: ./.github/workflows/test-plugin-mixed.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_shovel_prometheus + secrets: inherit test-rabbitmq_stomp-mixed: needs: - check-workflow @@ -1126,6 +1159,7 @@ jobs: - test-rabbitmq_event_exchange-mixed - test-rabbitmq_federation-mixed - test-rabbitmq_federation_management-mixed + - test-rabbitmq_federation_prometheus-mixed - test-rabbitmq_jms_topic_exchange-mixed - test-rabbitmq_management-mixed - test-rabbitmq_management_agent-mixed @@ -1142,6 +1176,7 @@ jobs: - test-rabbitmq_sharding-mixed - test-rabbitmq_shovel-mixed - test-rabbitmq_shovel_management-mixed + - test-rabbitmq_shovel_prometheus-mixed - test-rabbitmq_stomp-mixed - test-rabbitmq_stream-mixed - test-rabbitmq_stream_management-mixed diff --git a/.github/workflows/test-plugin-mixed.yaml b/.github/workflows/test-plugin-mixed.yaml index baf08ec8c0c5..edffefaaeea7 100644 --- a/.github/workflows/test-plugin-mixed.yaml +++ b/.github/workflows/test-plugin-mixed.yaml @@ -29,10 +29,7 @@ jobs: - 26 metadata_store: - mnesia - # Khepri is currently skipped because Khepri is an unstable feature: we don't guarantee upgrability. - # Mixed-version tests currently fail with Khepri because of a new machine version introduced in - # Khepri v0.14.0. - # - khepri + - khepri include: - erlang_version: 26 elixir_version: 1.15 @@ -54,7 +51,7 @@ jobs: https://builds.hex.pm https://cdn.jsdelivr.net/hex - name: AUTHENTICATE TO GOOGLE CLOUD - uses: google-github-actions/auth@v2.1.3 + uses: google-github-actions/auth@v2.1.6 with: credentials_json: ${{ secrets.REMOTE_CACHE_CREDENTIALS_JSON }} - name: CONFIGURE BAZEL @@ -78,7 +75,7 @@ jobs: - uses: actions/setup-dotnet@v4 if: inputs.plugin == 'rabbit' with: - dotnet-version: '3.1.x' + dotnet-version: '8.0' - name: deps/amqp10_client SETUP if: inputs.plugin == 'amqp10_client' run: | diff --git a/.github/workflows/test-plugin.yaml b/.github/workflows/test-plugin.yaml index a2ddafa1f561..3998013c03eb 100644 --- a/.github/workflows/test-plugin.yaml +++ b/.github/workflows/test-plugin.yaml @@ -51,7 +51,7 @@ jobs: https://builds.hex.pm https://cdn.jsdelivr.net/hex - name: AUTHENTICATE TO GOOGLE CLOUD - uses: google-github-actions/auth@v2.1.3 + uses: google-github-actions/auth@v2.1.6 with: credentials_json: ${{ secrets.REMOTE_CACHE_CREDENTIALS_JSON }} - name: CONFIGURE BAZEL diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 3a77957396b5..d4b0802441c8 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -2,7 +2,7 @@ name: Test on: push: branches: - - main + - v4.0.x - v3.13.x - v3.12.x - v3.11.x @@ -21,7 +21,6 @@ on: - '*.bzl' - '*.bazel' - .github/workflows/test.yaml - pull_request: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true @@ -52,7 +51,7 @@ jobs: run: | echo "value=bazel-repo-cache-${{ hashFiles('MODULE.bazel') }}" | tee -a $GITHUB_OUTPUT - name: AUTHENTICATE TO GOOGLE CLOUD - uses: google-github-actions/auth@v2.1.3 + uses: google-github-actions/auth@v2.1.6 with: credentials_json: ${{ secrets.REMOTE_CACHE_CREDENTIALS_JSON }} - name: REPO CACHE @@ -554,6 +553,24 @@ jobs: repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} plugin: rabbitmq_federation_management secrets: inherit + test-rabbitmq_federation_prometheus: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_federation_prometheus + secrets: inherit test-rabbitmq_jms_topic_exchange: needs: - check-workflow @@ -842,6 +859,24 @@ jobs: repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} plugin: rabbitmq_shovel_management secrets: inherit + test-rabbitmq_shovel_prometheus: + needs: + - check-workflow + - test-rabbit-0 + - test-rabbit-1 + - test-rabbit-2 + - test-rabbit-3 + - test-rabbit-4 + - test-rabbit-5 + - test-rabbit-6 + - test-rabbit-7 + - test-rabbit-8 + - test-rabbit-9 + uses: ./.github/workflows/test-plugin.yaml + with: + repo_cache_key: ${{ needs.check-workflow.outputs.repo_cache_key }} + plugin: rabbitmq_shovel_prometheus + secrets: inherit test-rabbitmq_stomp: needs: - check-workflow @@ -1063,6 +1098,7 @@ jobs: - test-rabbitmq_event_exchange - test-rabbitmq_federation - test-rabbitmq_federation_management + - test-rabbitmq_federation_prometheus - test-rabbitmq_jms_topic_exchange - test-rabbitmq_management - test-rabbitmq_management_agent @@ -1079,6 +1115,7 @@ jobs: - test-rabbitmq_sharding - test-rabbitmq_shovel - test-rabbitmq_shovel_management + - test-rabbitmq_shovel_prometheus - test-rabbitmq_stomp - test-rabbitmq_stream - test-rabbitmq_stream_management diff --git a/.gitignore b/.gitignore index f5c68fc329d8..1bc1578cb1d2 100644 --- a/.gitignore +++ b/.gitignore @@ -48,6 +48,7 @@ elvis !/deps/rabbitmq_event_exchange/ !/deps/rabbitmq_federation/ !/deps/rabbitmq_federation_management/ +!/deps/rabbitmq_federation_prometheus/ !/deps/rabbitmq_jms_topic_exchange/ !/deps/rabbitmq_management/ !/deps/rabbitmq_management_agent/ @@ -64,6 +65,7 @@ elvis !/deps/rabbitmq_sharding/ !/deps/rabbitmq_shovel/ !/deps/rabbitmq_shovel_management/ +!/deps/rabbitmq_shovel_prometheus/ !/deps/rabbitmq_stomp/ !/deps/rabbitmq_stream/ !/deps/rabbitmq_stream_common/ diff --git a/MODULE.bazel b/MODULE.bazel index 19d7af1155af..c231fed571e0 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -56,7 +56,7 @@ bazel_dep( bazel_dep( name = "rabbitmq_osiris", - version = "1.8.2", + version = "1.8.3", repo_name = "osiris", ) @@ -147,8 +147,8 @@ erlang_package.hex_package( erlang_package.hex_package( name = "cuttlefish", build_file = "@rabbitmq-server//bazel:BUILD.cuttlefish", - sha256 = "d3ef90bd2f5923477ab772fbda5cd5ad088438e4fd56801b455b87ada9f46fa3", - version = "3.1.0", + sha256 = "43cadd7f34b3dbbab52a7f4110d1df276a13cff5e11afe0f5a774f69f012b76b", + version = "3.4.0", ) erlang_package.hex_package( @@ -210,15 +210,15 @@ erlang_package.hex_package( erlang_package.hex_package( name = "khepri", build_file = "@rabbitmq-server//bazel:BUILD.khepri", - sha256 = "dccfaeb3583a04722e2258911f7f906ce67f8efac80504be4923aaafae6d4e21", - version = "0.14.0", + sha256 = "feee8a0a1f3f78dd9f8860feacba63cc165c81af1b351600903e34a20676d5f6", + version = "0.16.0", ) erlang_package.hex_package( name = "khepri_mnesia_migration", build_file = "@rabbitmq-server//bazel:BUILD.khepri_mnesia_migration", - sha256 = "f56d277ca7876371615cef9c5674c78854f31cf9f26ce97fd3f4b5a65573ccc4", - version = "0.5.0", + sha256 = "950e46306f8e9a91a5dbf1f7e465dc251bdbc7737809ebf2c493f4058983d87c", + version = "0.7.0", ) erlang_package.hex_package( @@ -231,8 +231,8 @@ erlang_package.hex_package( erlang_package.hex_package( name = "observer_cli", build_file = "@rabbitmq-server//bazel:BUILD.observer_cli", - sha256 = "a41b6d3e11a3444e063e09cc225f7f3e631ce14019e5fbcaebfda89b1bd788ea", - version = "1.7.3", + sha256 = "872cf8e833a3a71ebd05420692678ec8aaede8fd96c805a4687398f6b23a3014", + version = "1.7.5", ) erlang_package.hex_package( @@ -253,8 +253,8 @@ erlang_package.hex_package( name = "ra", build_file = "@rabbitmq-server//bazel:BUILD.ra", pkg = "ra", - sha256 = "264def8b2ba20599f87b37e12f1d5d557911d2201a41749ce16158f98365d599", - version = "2.13.5", + sha256 = "1d553dd971a0b398b7af0fa8c8458dda575715ff71c65c972e9500b24039b240", + version = "2.14.0", ) erlang_package.git_package( @@ -428,7 +428,7 @@ secondary_umbrella = use_extension( use_repo( secondary_umbrella, - "rabbitmq-server-generic-unix-3.13", + "rabbitmq-server-generic-unix-4.0", ) hex = use_extension( diff --git a/Makefile b/Makefile index d5409a22ed27..f0a62971d91c 100644 --- a/Makefile +++ b/Makefile @@ -24,8 +24,7 @@ ADDITIONAL_PLUGINS ?= DEPS = rabbit_common rabbit $(PLUGINS) $(ADDITIONAL_PLUGINS) DEP_PLUGINS = rabbit_common/mk/rabbitmq-dist.mk \ - rabbit_common/mk/rabbitmq-run.mk \ - rabbit_common/mk/rabbitmq-tools.mk + rabbit_common/mk/rabbitmq-run.mk DISABLE_DISTCLEAN = 1 @@ -61,6 +60,20 @@ include rabbitmq-components.mk # multiple times (including for release file names and whatnot). PROJECT_VERSION := $(PROJECT_VERSION) +# Fetch/build community plugins. +# +# To include community plugins in commands, use +# `make COMMUNITY_PLUGINS=1` or export the variable. +# They are not included otherwise. Note that only +# the top-level Makefile can do this. +# +# Note that the community plugins will be fetched using +# SSH and therefore may be subject to GH authentication. + +ifdef COMMUNITY_PLUGINS +DEPS += $(RABBITMQ_COMMUNITY) +endif + include erlang.mk include mk/github-actions.mk include mk/bazel.mk @@ -594,6 +607,7 @@ TIER1_PLUGINS := \ rabbitmq_event_exchange \ rabbitmq_federation \ rabbitmq_federation_management \ + rabbitmq_federation_prometheus \ rabbitmq_jms_topic_exchange \ rabbitmq_management \ rabbitmq_management_agent \ @@ -610,6 +624,7 @@ TIER1_PLUGINS := \ rabbitmq_sharding \ rabbitmq_shovel \ rabbitmq_shovel_management \ + rabbitmq_shovel_prometheus \ rabbitmq_stomp \ rabbitmq_stream \ rabbitmq_stream_management \ diff --git a/bazel/bzlmod/secondary_umbrella.bzl b/bazel/bzlmod/secondary_umbrella.bzl index adfb76cff4e2..7c8b9b9cb7b0 100644 --- a/bazel/bzlmod/secondary_umbrella.bzl +++ b/bazel/bzlmod/secondary_umbrella.bzl @@ -25,12 +25,12 @@ EOF def secondary_umbrella(): http_archive( - name = "rabbitmq-server-generic-unix-3.13", + name = "rabbitmq-server-generic-unix-4.0", build_file = "@//:BUILD.package_generic_unix", patch_cmds = [ADD_PLUGINS_DIR_BUILD_FILE], - strip_prefix = "rabbitmq_server-3.13.1", + strip_prefix = "rabbitmq_server-4.0.0", # This file is produced just in time by the test-mixed-versions.yaml GitHub Actions workflow. urls = [ - "https://rabbitmq-github-actions.s3.eu-west-1.amazonaws.com/secondary-umbrellas/26.1/package-generic-unix-for-mixed-version-testing-v3.13.1.tar.xz", + "https://rabbitmq-github-actions.s3.eu-west-1.amazonaws.com/secondary-umbrellas/26.1/package-generic-unix-for-mixed-version-testing-v4.0.2.tar.xz", ], ) diff --git a/deps/amqp10_client/Makefile b/deps/amqp10_client/Makefile index 36c117c78ea1..ceb96f382525 100644 --- a/deps/amqp10_client/Makefile +++ b/deps/amqp10_client/Makefile @@ -33,13 +33,11 @@ DEPS = amqp10_common credentials_obfuscation TEST_DEPS = rabbit rabbitmq_ct_helpers LOCAL_DEPS = ssl inets crypto public_key -DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-test.mk -DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk \ - rabbit_common/mk/rabbitmq-hexpm.mk \ - rabbit_common/mk/rabbitmq-dist.mk \ - rabbit_common/mk/rabbitmq-run.mk \ - rabbit_common/mk/rabbitmq-test.mk \ - rabbit_common/mk/rabbitmq-tools.mk +DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk +# We do not depend on rabbit therefore can't run the broker; +# however we can run a test broker in the test suites. +DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk \ + rabbit_common/mk/rabbitmq-hexpm.mk DEP_PLUGINS += elvis_mk dep_elvis_mk = git https://github.com/inaka/elvis.mk.git master diff --git a/deps/amqp10_client/src/amqp10_client.erl b/deps/amqp10_client/src/amqp10_client.erl index 68cac2622265..c5ebc7ba123f 100644 --- a/deps/amqp10_client/src/amqp10_client.erl +++ b/deps/amqp10_client/src/amqp10_client.erl @@ -106,9 +106,7 @@ open_connection(ConnectionConfig0) -> notify_when_opened => NotifyWhenOpened, notify_when_closed => NotifyWhenClosed }, - Sasl = maps:get(sasl, ConnectionConfig1), - ConnectionConfig2 = ConnectionConfig1#{sasl => amqp10_client_connection:encrypt_sasl(Sasl)}, - ConnectionConfig = merge_default_tls_options(ConnectionConfig2), + ConnectionConfig = merge_default_tls_options(ConnectionConfig1), amqp10_client_connection:open(ConnectionConfig). %% @doc Closes a connection. @@ -431,8 +429,8 @@ parse_result(Map) -> throw(plain_sasl_missing_userinfo); _ -> case UserInfo of - [] -> none; - undefined -> none; + [] -> anon; + undefined -> anon; U -> parse_usertoken(U) end end, @@ -458,11 +456,6 @@ parse_result(Map) -> Ret0#{tls_opts => {secure_port, TlsOpts}} end. - -parse_usertoken(undefined) -> - none; -parse_usertoken("") -> - none; parse_usertoken(U) -> [User, Pass] = string:tokens(U, ":"), {plain, @@ -534,7 +527,7 @@ parse_uri_test_() -> [?_assertEqual({ok, #{address => "my_host", port => 9876, hostname => <<"my_host">>, - sasl => none}}, parse_uri("amqp://my_host:9876")), + sasl => anon}}, parse_uri("amqp://my_host:9876")), %% port defaults ?_assertMatch({ok, #{port := 5671}}, parse_uri("amqps://my_host")), ?_assertMatch({ok, #{port := 5672}}, parse_uri("amqp://my_host")), diff --git a/deps/amqp10_client/src/amqp10_client_connection.erl b/deps/amqp10_client/src/amqp10_client_connection.erl index 4a9c738eac98..df0548aa9ef1 100644 --- a/deps/amqp10_client/src/amqp10_client_connection.erl +++ b/deps/amqp10_client/src/amqp10_client_connection.erl @@ -22,9 +22,7 @@ socket_ready/2, protocol_header_received/5, begin_session/1, - heartbeat/1, - encrypt_sasl/1, - decrypt_sasl/1]). + heartbeat/1]). %% gen_statem callbacks -export([init/1, @@ -52,7 +50,8 @@ -type address() :: inet:socket_address() | inet:hostname(). -type encrypted_sasl() :: {plaintext, binary()} | {encrypted, binary()}. --type decrypted_sasl() :: none | anon | {plain, User :: binary(), Pwd :: binary()}. +-type decrypted_sasl() :: none | anon | external | {plain, User :: binary(), Pwd :: binary()}. +-type sasl() :: encrypted_sasl() | decrypted_sasl(). -type connection_config() :: #{container_id => binary(), % AMQP container id @@ -72,14 +71,12 @@ % set to a negative value to allow a sender to "overshoot" the flow % control by this margin transfer_limit_margin => 0 | neg_integer(), - %% These credentials_obfuscation-wrapped values have the type of - %% decrypted_sasl/0 - sasl => encrypted_sasl() | decrypted_sasl(), + sasl => sasl(), properties => amqp10_client_types:properties() }. -record(state, - {next_channel = 1 :: pos_integer(), + {next_channel = 0 :: non_neg_integer(), connection_sup :: pid(), reader_m_ref :: reference() | undefined, sessions_sup :: pid() | undefined, @@ -92,16 +89,15 @@ }). -export_type([connection_config/0, - amqp10_socket/0, - encrypted_sasl/0, - decrypted_sasl/0]). + amqp10_socket/0]). %% ------------------------------------------------------------------- %% Public API. %% ------------------------------------------------------------------- -spec open(connection_config()) -> supervisor:startchild_ret(). -open(Config) -> +open(Config0) -> + Config = maps:update_with(sasl, fun maybe_encrypt_sasl/1, Config0), %% Start the supervision tree dedicated to that connection. It %% starts at least a connection process (the PID we want to return) %% and a reader process (responsible for opening and reading the @@ -127,17 +123,23 @@ open(Config) -> close(Pid, Reason) -> gen_statem:cast(Pid, {close, Reason}). --spec encrypt_sasl(decrypted_sasl()) -> encrypted_sasl(). -encrypt_sasl(none) -> - credentials_obfuscation:encrypt(none); -encrypt_sasl(DecryptedSasl) -> - credentials_obfuscation:encrypt(term_to_binary(DecryptedSasl)). - --spec decrypt_sasl(encrypted_sasl()) -> decrypted_sasl(). -decrypt_sasl(none) -> - credentials_obfuscation:decrypt(none); -decrypt_sasl(EncryptedSasl) -> - binary_to_term(credentials_obfuscation:decrypt(EncryptedSasl)). +-spec maybe_encrypt_sasl(decrypted_sasl()) -> sasl(). +maybe_encrypt_sasl(Sasl) + when Sasl =:= none orelse + Sasl =:= anon orelse + Sasl =:= external -> + Sasl; +maybe_encrypt_sasl(Plain = {plain, _User, _Passwd}) -> + credentials_obfuscation:encrypt(term_to_binary(Plain)). + +-spec maybe_decrypt_sasl(sasl()) -> decrypted_sasl(). +maybe_decrypt_sasl(Sasl) + when Sasl =:= none orelse + Sasl =:= anon orelse + Sasl =:= external -> + Sasl; +maybe_decrypt_sasl(Encrypted) -> + binary_to_term(credentials_obfuscation:decrypt(Encrypted)). %% ------------------------------------------------------------------- %% Private API. @@ -207,13 +209,11 @@ sasl_hdr_sent({call, From}, begin_session, {keep_state, State1}. sasl_hdr_rcvds(_EvtType, #'v1_0.sasl_mechanisms'{ - sasl_server_mechanisms = {array, symbol, Mechs}}, - State = #state{config = #{sasl := EncryptedSasl}}) -> - DecryptedSasl = decrypt_sasl(EncryptedSasl), - SaslBin = {symbol, decrypted_sasl_to_bin(DecryptedSasl)}, - case lists:any(fun(S) when S =:= SaslBin -> true; - (_) -> false - end, Mechs) of + sasl_server_mechanisms = {array, symbol, AvailableMechs}}, + State = #state{config = #{sasl := Sasl}}) -> + DecryptedSasl = maybe_decrypt_sasl(Sasl), + OurMech = {symbol, decrypted_sasl_to_mechanism(DecryptedSasl)}, + case lists:member(OurMech, AvailableMechs) of true -> ok = send_sasl_init(State, DecryptedSasl), {next_state, sasl_init_sent, State}; @@ -454,6 +454,15 @@ send_close(#state{socket = Socket}, _Reason) -> send_sasl_init(State, anon) -> Frame = #'v1_0.sasl_init'{mechanism = {symbol, <<"ANONYMOUS">>}}, send(Frame, 1, State); +send_sasl_init(State, external) -> + Frame = #'v1_0.sasl_init'{ + mechanism = {symbol, <<"EXTERNAL">>}, + %% "This response is empty when the client is requesting to act + %% as the identity the server associated with its authentication + %% credentials." + %% https://datatracker.ietf.org/doc/html/rfc4422#appendix-A.1 + initial_response = {binary, <<>>}}, + send(Frame, 1, State); send_sasl_init(State, {plain, User, Pass}) -> Response = <<0:8, User/binary, 0:8, Pass/binary>>, Frame = #'v1_0.sasl_init'{mechanism = {symbol, <<"PLAIN">>}, @@ -546,9 +555,12 @@ translate_err(#'v1_0.error'{condition = Cond, description = Desc}) -> amqp10_event(Evt) -> {amqp10_event, {connection, self(), Evt}}. -decrypted_sasl_to_bin({plain, _, _}) -> <<"PLAIN">>; -decrypted_sasl_to_bin(anon) -> <<"ANONYMOUS">>; -decrypted_sasl_to_bin(none) -> <<"ANONYMOUS">>. +decrypted_sasl_to_mechanism(anon) -> + <<"ANONYMOUS">>; +decrypted_sasl_to_mechanism(external) -> + <<"EXTERNAL">>; +decrypted_sasl_to_mechanism({plain, _, _}) -> + <<"PLAIN">>. config_defaults() -> #{sasl => none, diff --git a/deps/amqp10_client/src/amqp10_client_session.erl b/deps/amqp10_client/src/amqp10_client_session.erl index b66308a826b2..c1e5eb46214f 100644 --- a/deps/amqp10_client/src/amqp10_client_session.erl +++ b/deps/amqp10_client/src/amqp10_client_session.erl @@ -69,9 +69,6 @@ %% "The remotely chosen handle is referred to as the input handle." [2.6.2] -type input_handle() :: link_handle(). --type snd_settle_mode() :: unsettled | settled | mixed. --type rcv_settle_mode() :: first | second. - -type terminus_durability() :: none | configuration | unsettled_state. -type target_def() :: #{address => link_address(), @@ -964,7 +961,8 @@ rcv_settle_mode(_) -> undefined. % TODO: work out if we can assume accepted translate_delivery_state(undefined) -> undefined; translate_delivery_state(#'v1_0.accepted'{}) -> accepted; -translate_delivery_state(#'v1_0.rejected'{}) -> rejected; +translate_delivery_state(#'v1_0.rejected'{error = undefined}) -> rejected; +translate_delivery_state(#'v1_0.rejected'{error = Error}) -> {rejected, Error}; translate_delivery_state(#'v1_0.modified'{}) -> modified; translate_delivery_state(#'v1_0.released'{}) -> released; translate_delivery_state(#'v1_0.received'{}) -> received; @@ -1171,16 +1169,13 @@ make_link_ref(Role, Session, Handle) -> #link_ref{role = Role, session = Session, link_handle = Handle}. translate_message_annotations(MA) - when is_map(MA) andalso - map_size(MA) > 0 -> - Content = maps:fold(fun (K, V, Acc) -> - [{sym(K), wrap_map_value(V)} | Acc] - end, [], MA), - #'v1_0.message_annotations'{content = Content}; + when map_size(MA) > 0 -> + {map, maps:fold(fun(K, V, Acc) -> + [{sym(K), wrap_map_value(V)} | Acc] + end, [], MA)}; translate_message_annotations(_MA) -> undefined. - wrap_map_value(true) -> {boolean, true}; wrap_map_value(false) -> diff --git a/deps/amqp10_client/src/amqp10_msg.erl b/deps/amqp10_client/src/amqp10_msg.erl index 91a7efebe329..fa046cc60657 100644 --- a/deps/amqp10_client/src/amqp10_msg.erl +++ b/deps/amqp10_client/src/amqp10_msg.erl @@ -193,7 +193,8 @@ header(first_acquirer = K, header(delivery_count = K, #amqp10_msg{header = #'v1_0.header'{delivery_count = D}}) -> header_value(K, D); -header(K, #amqp10_msg{header = undefined}) -> header_value(K, undefined). +header(K, #amqp10_msg{header = undefined}) -> + header_value(K, undefined). -spec delivery_annotations(amqp10_msg()) -> #{annotations_key() => any()}. delivery_annotations(#amqp10_msg{delivery_annotations = undefined}) -> diff --git a/deps/amqp10_client/test/system_SUITE.erl b/deps/amqp10_client/test/system_SUITE.erl index 302754d4fad3..7a64425c7583 100644 --- a/deps/amqp10_client/test/system_SUITE.erl +++ b/deps/amqp10_client/test/system_SUITE.erl @@ -103,8 +103,7 @@ stop_amqp10_client_app(Config) -> %% ------------------------------------------------------------------- init_per_group(rabbitmq, Config0) -> - Config = rabbit_ct_helpers:set_config(Config0, - {sasl, {plain, <<"guest">>, <<"guest">>}}), + Config = rabbit_ct_helpers:set_config(Config0, {sasl, anon}), Config1 = rabbit_ct_helpers:merge_app_env(Config, [{rabbit, [{max_message_size, 134217728}]}]), @@ -115,7 +114,7 @@ init_per_group(rabbitmq_strict, Config0) -> {sasl, {plain, <<"guest">>, <<"guest">>}}), Config1 = rabbit_ct_helpers:merge_app_env(Config, [{rabbit, - [{amqp1_0_default_user, none}, + [{anonymous_login_user, none}, {max_message_size, 134217728}]}]), rabbit_ct_helpers:run_steps(Config1, rabbit_ct_broker_helpers:setup_steps()); @@ -349,8 +348,8 @@ roundtrip(OpenConf, Body) -> Msg0 = amqp10_msg:new(<<"my-tag">>, Body, true), Msg1 = amqp10_msg:set_application_properties(#{"a_key" => "a_value"}, Msg0), Msg2 = amqp10_msg:set_properties(Props, Msg1), - Msg = amqp10_msg:set_message_annotations(#{<<"x-key">> => "x-value", - <<"x_key">> => "x_value"}, Msg2), + Msg = amqp10_msg:set_message_annotations(#{<<"x-key 1">> => "value 1", + <<"x-key 2">> => "value 2"}, Msg2), ok = amqp10_client:send_msg(Sender, Msg), ok = amqp10_client:detach_link(Sender), await_link(Sender, {detached, normal}, link_detach_timeout), @@ -365,8 +364,8 @@ roundtrip(OpenConf, Body) -> % ct:pal(?LOW_IMPORTANCE, "roundtrip message Out: ~tp~nIn: ~tp~n", [OutMsg, Msg]), ?assertMatch(Props, amqp10_msg:properties(OutMsg)), ?assertEqual(#{<<"a_key">> => <<"a_value">>}, amqp10_msg:application_properties(OutMsg)), - ?assertMatch(#{<<"x-key">> := <<"x-value">>, - <<"x_key">> := <<"x_value">>}, amqp10_msg:message_annotations(OutMsg)), + ?assertMatch(#{<<"x-key 1">> := <<"value 1">>, + <<"x-key 2">> := <<"value 2">>}, amqp10_msg:message_annotations(OutMsg)), ?assertEqual([Body], amqp10_msg:body(OutMsg)), ok. @@ -720,14 +719,14 @@ insufficient_credit(Config) -> OpenStep = fun({0 = Ch, #'v1_0.open'{}, _Pay}) -> {Ch, [#'v1_0.open'{container_id = {utf8, <<"mock">>}}]} end, - BeginStep = fun({1 = Ch, #'v1_0.begin'{}, _Pay}) -> - {Ch, [#'v1_0.begin'{remote_channel = {ushort, 1}, + BeginStep = fun({0 = Ch, #'v1_0.begin'{}, _Pay}) -> + {Ch, [#'v1_0.begin'{remote_channel = {ushort, Ch}, next_outgoing_id = {uint, 1}, incoming_window = {uint, 1000}, outgoing_window = {uint, 1000}} ]} end, - AttachStep = fun({1 = Ch, #'v1_0.attach'{role = false, + AttachStep = fun({0 = Ch, #'v1_0.attach'{role = false, name = Name}, <<>>}) -> {Ch, [#'v1_0.attach'{name = Name, handle = {uint, 99}, @@ -760,14 +759,14 @@ multi_transfer_without_delivery_id(Config) -> OpenStep = fun({0 = Ch, #'v1_0.open'{}, _Pay}) -> {Ch, [#'v1_0.open'{container_id = {utf8, <<"mock">>}}]} end, - BeginStep = fun({1 = Ch, #'v1_0.begin'{}, _Pay}) -> - {Ch, [#'v1_0.begin'{remote_channel = {ushort, 1}, + BeginStep = fun({0 = Ch, #'v1_0.begin'{}, _Pay}) -> + {Ch, [#'v1_0.begin'{remote_channel = {ushort, Ch}, next_outgoing_id = {uint, 1}, incoming_window = {uint, 1000}, outgoing_window = {uint, 1000}} ]} end, - AttachStep = fun({1 = Ch, #'v1_0.attach'{role = true, + AttachStep = fun({0 = Ch, #'v1_0.attach'{role = true, name = Name}, <<>>}) -> {Ch, [#'v1_0.attach'{name = Name, handle = {uint, 99}, @@ -776,7 +775,7 @@ multi_transfer_without_delivery_id(Config) -> ]} end, - LinkCreditStep = fun({1 = Ch, #'v1_0.flow'{}, <<>>}) -> + LinkCreditStep = fun({0 = Ch, #'v1_0.flow'{}, <<>>}) -> {Ch, {multi, [[#'v1_0.transfer'{handle = {uint, 99}, delivery_id = {uint, 12}, more = true}, diff --git a/deps/amqp10_common/Makefile b/deps/amqp10_common/Makefile index 6d1b124b817b..6208fecad785 100644 --- a/deps/amqp10_common/Makefile +++ b/deps/amqp10_common/Makefile @@ -24,7 +24,7 @@ define HEX_TARBALL_EXTRA_METADATA } endef -DIALYZER_OPTS += --src -r test -DTEST +#DIALYZER_OPTS += --src -r test -DTEST BUILD_DEPS = rabbit_common TEST_DEPS = rabbitmq_ct_helpers proper @@ -38,12 +38,10 @@ TEST_DEPS = rabbitmq_ct_helpers proper -include development.pre.mk -DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-test.mk +DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk +# We do not depend on rabbit therefore can't run the broker. DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk \ - rabbit_common/mk/rabbitmq-hexpm.mk \ - rabbit_common/mk/rabbitmq-dist.mk \ - rabbit_common/mk/rabbitmq-test.mk \ - rabbit_common/mk/rabbitmq-tools.mk + rabbit_common/mk/rabbitmq-hexpm.mk PLT_APPS = eunit diff --git a/deps/amqp10_common/include/amqp10_types.hrl b/deps/amqp10_common/include/amqp10_types.hrl index 3068f6efb4f5..ad29b86d9c14 100644 --- a/deps/amqp10_common/include/amqp10_types.hrl +++ b/deps/amqp10_common/include/amqp10_types.hrl @@ -15,5 +15,10 @@ -define(AMQP_ROLE_SENDER, false). -define(AMQP_ROLE_RECEIVER, true). +% [2.8.2] +-type snd_settle_mode() :: unsettled | settled | mixed. +% [2.8.3] +-type rcv_settle_mode() :: first | second. + % [3.2.16] -define(MESSAGE_FORMAT, 0). diff --git a/deps/amqp10_common/src/amqp10_framing.erl b/deps/amqp10_common/src/amqp10_framing.erl index 4742a639766a..39f32f962208 100644 --- a/deps/amqp10_common/src/amqp10_framing.erl +++ b/deps/amqp10_common/src/amqp10_framing.erl @@ -122,11 +122,11 @@ decode({described, Descriptor, {map, Fields} = Type}) -> #'v1_0.application_properties'{} -> #'v1_0.application_properties'{content = decode_map(Fields)}; #'v1_0.delivery_annotations'{} -> - #'v1_0.delivery_annotations'{content = decode_map(Fields)}; + #'v1_0.delivery_annotations'{content = decode_annotations(Fields)}; #'v1_0.message_annotations'{} -> - #'v1_0.message_annotations'{content = decode_map(Fields)}; + #'v1_0.message_annotations'{content = decode_annotations(Fields)}; #'v1_0.footer'{} -> - #'v1_0.footer'{content = decode_map(Fields)}; + #'v1_0.footer'{content = decode_annotations(Fields)}; #'v1_0.amqp_value'{} -> #'v1_0.amqp_value'{content = Type}; Else -> @@ -149,6 +149,16 @@ decode(Other) -> decode_map(Fields) -> [{decode(K), decode(V)} || {K, V} <- Fields]. +%% "The annotations type is a map where the keys are restricted to be of type symbol +%% or of type ulong. All ulong keys, and all symbolic keys except those beginning +%% with "x-" are reserved." [3.2.10] +%% Since we already parse annotations here and neither the client nor server uses +%% reserved keys, we perform strict validation and crash if any reserved keys are used. +decode_annotations(Fields) -> + lists:map(fun({{symbol, <<"x-", _/binary>>} = K, V}) -> + {K, decode(V)} + end, Fields). + -spec encode_described(list | map | binary | annotations | '*', non_neg_integer(), amqp10_frame()) -> @@ -216,7 +226,7 @@ pprint(Other) -> Other. -include_lib("eunit/include/eunit.hrl"). encode_decode_test_() -> - Data = [{{utf8, <<"k">>}, {binary, <<"v">>}}], + Data = [{{symbol, <<"x-my key">>}, {binary, <<"my value">>}}], Test = fun(M) -> [M] = decode_bin(iolist_to_binary(encode_bin(M))) end, [ fun() -> Test(#'v1_0.application_properties'{content = Data}) end, diff --git a/deps/amqp10_common/test/prop_SUITE.erl b/deps/amqp10_common/test/prop_SUITE.erl index 4cb04f594f37..37ffaead77bf 100644 --- a/deps/amqp10_common/test/prop_SUITE.erl +++ b/deps/amqp10_common/test/prop_SUITE.erl @@ -412,14 +412,21 @@ footer_section() -> annotations() -> ?LET(KvList, - list({oneof([amqp_symbol(), - amqp_ulong()]), + list({non_reserved_annotation_key(), prefer_simple_type()}), begin KvList1 = lists:uniq(fun({K, _V}) -> K end, KvList), lists:filter(fun({_K, V}) -> V =/= null end, KvList1) end). +non_reserved_annotation_key() -> + {symbol, ?LET(L, + ?SIZED(Size, resize(Size * 10, list(ascii_char()))), + begin + Bin = list_to_binary(L) , + <<"x-", Bin/binary>> + end)}. + sequence_no() -> amqp_uint(). diff --git a/deps/amqp_client/Makefile b/deps/amqp_client/Makefile index c873f300e553..43dbb62901ad 100644 --- a/deps/amqp_client/Makefile +++ b/deps/amqp_client/Makefile @@ -43,13 +43,11 @@ LOCAL_DEPS = xmerl ssl public_key DEPS = rabbit_common credentials_obfuscation TEST_DEPS = rabbitmq_ct_helpers rabbit meck -DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-test.mk -DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk \ - rabbit_common/mk/rabbitmq-hexpm.mk \ - rabbit_common/mk/rabbitmq-dist.mk \ - rabbit_common/mk/rabbitmq-run.mk \ - rabbit_common/mk/rabbitmq-test.mk \ - rabbit_common/mk/rabbitmq-tools.mk +DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk +# We do not depend on rabbit therefore can't run the broker; +# however we can run a test broker in the test suites. +DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk \ + rabbit_common/mk/rabbitmq-hexpm.mk PLT_APPS = ssl public_key diff --git a/deps/amqp_client/test/system_SUITE.erl b/deps/amqp_client/test/system_SUITE.erl index fe8309ce473a..2ff03e8d20a5 100644 --- a/deps/amqp_client/test/system_SUITE.erl +++ b/deps/amqp_client/test/system_SUITE.erl @@ -335,14 +335,16 @@ safe_call_timeouts_test(Params = #amqp_params_network{}) -> meck:unload(amqp_network_connection); safe_call_timeouts_test(Params = #amqp_params_direct{}) -> + %% We must mock net_kernel:get_net_ticktime/0 as changing + %% the tick time directly could lead to nodes disconnecting. + meck:new(net_kernel, [unstick, passthrough]), + TestCallTimeout = 30000, - NetTicktime0 = net_kernel:get_net_ticktime(), amqp_util:update_call_timeout(TestCallTimeout), %% 1. NetTicktime >= DIRECT_OPERATION_TIMEOUT (120s) NetTicktime1 = 140, - net_kernel:set_net_ticktime(NetTicktime1, 1), - wait_until_net_ticktime(NetTicktime1), + meck:expect(net_kernel, get_net_ticktime, fun() -> NetTicktime1 end), {ok, Connection1} = amqp_connection:start(Params), ?assertEqual((NetTicktime1 * 1000) + ?CALL_TIMEOUT_DEVIATION, @@ -356,15 +358,12 @@ safe_call_timeouts_test(Params = #amqp_params_direct{}) -> %% 2. Transitioning NetTicktime >= DIRECT_OPERATION_TIMEOUT (120s) NetTicktime2 = 120, - net_kernel:set_net_ticktime(NetTicktime2, 1), - ?assertEqual({ongoing_change_to, NetTicktime2}, net_kernel:get_net_ticktime()), + meck:expect(net_kernel, get_net_ticktime, fun() -> {ongoing_change_to, NetTicktime2} end), {ok, Connection2} = amqp_connection:start(Params), ?assertEqual((NetTicktime2 * 1000) + ?CALL_TIMEOUT_DEVIATION, amqp_util:call_timeout()), - wait_until_net_ticktime(NetTicktime2), - ?assertEqual(ok, amqp_connection:close(Connection2)), wait_for_death(Connection2), @@ -373,15 +372,14 @@ safe_call_timeouts_test(Params = #amqp_params_direct{}) -> %% 3. NetTicktime < DIRECT_OPERATION_TIMEOUT (120s) NetTicktime3 = 60, - net_kernel:set_net_ticktime(NetTicktime3, 1), - wait_until_net_ticktime(NetTicktime3), + meck:expect(net_kernel, get_net_ticktime, fun() -> NetTicktime3 end), {ok, Connection3} = amqp_connection:start(Params), ?assertEqual((?DIRECT_OPERATION_TIMEOUT + ?CALL_TIMEOUT_DEVIATION), amqp_util:call_timeout()), - net_kernel:set_net_ticktime(NetTicktime0, 1), - wait_until_net_ticktime(NetTicktime0), + meck:unload(net_kernel), + ?assertEqual(ok, amqp_connection:close(Connection3)), wait_for_death(Connection3), @@ -1578,16 +1576,6 @@ assert_down_with_error(MonitorRef, CodeAtom) -> exit(did_not_die) end. -wait_until_net_ticktime(NetTicktime) -> - case net_kernel:get_net_ticktime() of - NetTicktime -> ok; - {ongoing_change_to, NetTicktime} -> - timer:sleep(1000), - wait_until_net_ticktime(NetTicktime); - _ -> - throw({error, {net_ticktime_not_set, NetTicktime}}) - end. - set_resource_alarm(Resource, Config) when Resource =:= memory orelse Resource =:= disk -> SrcDir = ?config(amqp_client_srcdir, Config), diff --git a/deps/oauth2_client/BUILD.bazel b/deps/oauth2_client/BUILD.bazel index be565ee245d8..491ea1e4da3c 100644 --- a/deps/oauth2_client/BUILD.bazel +++ b/deps/oauth2_client/BUILD.bazel @@ -108,6 +108,7 @@ rabbitmq_integration_suite( size = "small", additional_beam = [ "test/oauth_http_mock.beam", + "test/oauth2_client_test_util.beam", ], runtime_deps = [ "@cowboy//:erlang_app", diff --git a/deps/oauth2_client/Makefile b/deps/oauth2_client/Makefile index 2acf3a7c2d0d..6dcf2cbaf7c6 100644 --- a/deps/oauth2_client/Makefile +++ b/deps/oauth2_client/Makefile @@ -9,13 +9,8 @@ LOCAL_DEPS = ssl inets crypto public_key PLT_APPS = rabbit -DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-test.mk -DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk \ - rabbit_common/mk/rabbitmq-hexpm.mk \ - rabbit_common/mk/rabbitmq-dist.mk \ - rabbit_common/mk/rabbitmq-run.mk \ - rabbit_common/mk/rabbitmq-test.mk \ - rabbit_common/mk/rabbitmq-tools.mk +DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk +DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk include rabbitmq-components.mk include erlang.mk diff --git a/deps/oauth2_client/include/oauth2_client.hrl b/deps/oauth2_client/include/oauth2_client.hrl index 745eeec33a53..b7f93104f167 100644 --- a/deps/oauth2_client/include/oauth2_client.hrl +++ b/deps/oauth2_client/include/oauth2_client.hrl @@ -48,7 +48,19 @@ %% The closest we have to a type import in Erlang -type option(T) :: rabbit_types:option(T). +-type oauth_provider_id() :: root | binary(). + +-record(openid_configuration, { + issuer :: option(uri_string:uri_string()), + token_endpoint :: option(uri_string:uri_string()), + authorization_endpoint :: option(uri_string:uri_string()), + end_session_endpoint :: option(uri_string:uri_string()), + jwks_uri :: option(uri_string:uri_string()) + }). +-type openid_configuration() :: #openid_configuration{}. + -record(oauth_provider, { + id :: oauth_provider_id(), issuer :: option(uri_string:uri_string()), token_endpoint :: option(uri_string:uri_string()), authorization_endpoint :: option(uri_string:uri_string()), @@ -58,7 +70,6 @@ }). -type oauth_provider() :: #oauth_provider{}. --type oauth_provider_id() :: binary(). -record(access_token_request, { client_id :: string() | binary(), diff --git a/deps/oauth2_client/src/oauth2_client.erl b/deps/oauth2_client/src/oauth2_client.erl index cb667ee72615..335bcfdfba1b 100644 --- a/deps/oauth2_client/src/oauth2_client.erl +++ b/deps/oauth2_client/src/oauth2_client.erl @@ -7,7 +7,10 @@ -module(oauth2_client). -export([get_access_token/2, get_expiration_time/1, refresh_access_token/2, - get_oauth_provider/1, get_oauth_provider/2, + get_oauth_provider/1, get_oauth_provider/2, + get_openid_configuration/2, get_openid_configuration/3, + merge_openid_configuration/2, + merge_oauth_provider/2, extract_ssl_options_as_list/1 ]). @@ -43,7 +46,8 @@ refresh_access_token(OAuthProvider, Request) -> append_paths(Path1, Path2) -> erlang:iolist_to_binary([Path1, Path2]). --spec get_openid_configuration(uri_string:uri_string(), erlang:iodata() | <<>>, ssl:tls_option() | []) -> {ok, oauth_provider()} | {error, term()}. +-spec get_openid_configuration(uri_string:uri_string(), erlang:iodata() | <<>>, + ssl:tls_option() | []) -> {ok, openid_configuration()} | {error, term()}. get_openid_configuration(IssuerURI, OpenIdConfigurationPath, TLSOptions) -> URLMap = uri_string:parse(IssuerURI), Path = case maps:get(path, URLMap) of @@ -52,24 +56,106 @@ get_openid_configuration(IssuerURI, OpenIdConfigurationPath, TLSOptions) -> P -> append_paths(P, OpenIdConfigurationPath) end, URL = uri_string:resolve(Path, IssuerURI), - rabbit_log:debug("get_openid_configuration issuer URL ~p (~p)", [URL, TLSOptions]), + rabbit_log:debug("get_openid_configuration issuer URL ~p (~p)", [URL, + format_ssl_options(TLSOptions)]), Options = [], Response = httpc:request(get, {URL, []}, TLSOptions, Options), - enrich_oauth_provider(parse_openid_configuration_response(Response), TLSOptions). + parse_openid_configuration_response(Response). --spec get_openid_configuration(uri_string:uri_string(), ssl:tls_option() | []) -> {ok, oauth_provider()} | {error, term()}. +-spec get_openid_configuration(uri_string:uri_string(), ssl:tls_option() | []) -> + {ok, openid_configuration()} | {error, term()}. get_openid_configuration(IssuerURI, TLSOptions) -> get_openid_configuration(IssuerURI, ?DEFAULT_OPENID_CONFIGURATION_PATH, TLSOptions). +% Returns {ok, with_modidified_oauth_provider} or {ok} if oauth_provider was +% not modified +-spec merge_openid_configuration(openid_configuration(), oauth_provider()) -> + oauth_provider(). +merge_openid_configuration(OpendIdConfiguration, OAuthProvider) -> + OAuthProvider0 = case OpendIdConfiguration#openid_configuration.issuer of + undefined -> OAuthProvider; + Issuer -> + OAuthProvider#oauth_provider{issuer = Issuer} + end, + OAuthProvider1 = case OpendIdConfiguration#openid_configuration.token_endpoint of + undefined -> OAuthProvider0; + TokenEndpoint -> + OAuthProvider0#oauth_provider{token_endpoint = TokenEndpoint} + end, + OAuthProvider2 = case OpendIdConfiguration#openid_configuration.authorization_endpoint of + undefined -> OAuthProvider1; + AuthorizationEndpoint -> + OAuthProvider1#oauth_provider{authorization_endpoint = AuthorizationEndpoint} + end, + OAuthProvider3 = case OpendIdConfiguration#openid_configuration.end_session_endpoint of + undefined -> OAuthProvider2; + EndSessionEndpoint -> + OAuthProvider2#oauth_provider{end_session_endpoint = EndSessionEndpoint} + end, + case OpendIdConfiguration#openid_configuration.jwks_uri of + undefined -> OAuthProvider3; + JwksUri -> + OAuthProvider3#oauth_provider{jwks_uri = JwksUri} + end. + +-spec merge_oauth_provider(oauth_provider(), proplists:proplist()) -> + proplists:proplist(). +merge_oauth_provider(OAuthProvider, Proplist) -> + Proplist0 = case OAuthProvider#oauth_provider.token_endpoint of + undefined -> Proplist; + TokenEndpoint -> [{token_endpoint, TokenEndpoint} | + proplists:delete(token_endpoint, Proplist)] + end, + Proplist1 = case OAuthProvider#oauth_provider.authorization_endpoint of + undefined -> Proplist0; + AuthzEndpoint -> [{authorization_endpoint, AuthzEndpoint} | + proplists:delete(authorization_endpoint, Proplist0)] + end, + Proplist2 = case OAuthProvider#oauth_provider.end_session_endpoint of + undefined -> Proplist1; + EndSessionEndpoint -> [{end_session_endpoint, EndSessionEndpoint} | + proplists:delete(end_session_endpoint, Proplist1)] + end, + case OAuthProvider#oauth_provider.jwks_uri of + undefined -> Proplist2; + JwksEndPoint -> [{jwks_uri, JwksEndPoint} | + proplists:delete(jwks_uri, Proplist2)] + end. + +parse_openid_configuration_response({error, Reason}) -> + {error, Reason}; +parse_openid_configuration_response({ok,{{_,Code,Reason}, Headers, Body}}) -> + map_response_to_openid_configuration(Code, Reason, Headers, Body). +map_response_to_openid_configuration(Code, Reason, Headers, Body) -> + case decode_body(proplists:get_value("content-type", Headers, ?CONTENT_JSON), Body) of + {error, {error, InternalError}} -> + {error, InternalError}; + {error, _} = Error -> + Error; + Value -> + case Code of + 200 -> {ok, map_to_openid_configuration(Value)}; + 201 -> {ok, map_to_openid_configuration(Value)}; + _ -> {error, Reason} + end + end. +map_to_openid_configuration(Map) -> + #openid_configuration{ + issuer = maps:get(?RESPONSE_ISSUER, Map), + token_endpoint = maps:get(?RESPONSE_TOKEN_ENDPOINT, Map, undefined), + authorization_endpoint = maps:get(?RESPONSE_AUTHORIZATION_ENDPOINT, Map, undefined), + end_session_endpoint = maps:get(?RESPONSE_END_SESSION_ENDPOINT, Map, undefined), + jwks_uri = maps:get(?RESPONSE_JWKS_URI, Map, undefined) + }. --spec get_expiration_time(successful_access_token_response()) -> +-spec get_expiration_time(successful_access_token_response()) -> {ok, [{expires_in, integer() }| {exp, integer() }]} | {error, missing_exp_field}. get_expiration_time(#successful_access_token_response{expires_in = ExpiresInSec, access_token = AccessToken}) -> case ExpiresInSec of - undefined -> - case jwt_helper:get_expiration_time(jwt_helper:decode(AccessToken)) of + undefined -> + case jwt_helper:get_expiration_time(jwt_helper:decode(AccessToken)) of {ok, Exp} -> {ok, [{exp, Exp}]}; - {error, _} = Error -> Error + {error, _} = Error -> Error end; _ -> {ok, [{expires_in, ExpiresInSec}]} end. @@ -112,34 +198,19 @@ do_update_oauth_provider_endpoints_configuration(OAuthProvider) -> List = application:get_env(rabbitmq_auth_backend_oauth2, key_config, []), ModifiedList = case OAuthProvider#oauth_provider.jwks_uri of undefined -> List; - JwksEndPoint -> [{jwks_url, JwksEndPoint} | List] + JwksEndPoint -> [{jwks_url, JwksEndPoint} | proplists:delete(jwks_url, List)] end, application:set_env(rabbitmq_auth_backend_oauth2, key_config, ModifiedList), - rabbit_log:debug("Updated oauth_provider details: ~p ", [ OAuthProvider]), + rabbit_log:debug("Updated oauth_provider details: ~p ", [ format_oauth_provider(OAuthProvider)]), OAuthProvider. do_update_oauth_provider_endpoints_configuration(OAuthProviderId, OAuthProvider) -> OAuthProviders = application:get_env(rabbitmq_auth_backend_oauth2, oauth_providers, #{}), - LookupProviderPropList = maps:get(OAuthProviderId, OAuthProviders), - ModifiedList0 = case OAuthProvider#oauth_provider.token_endpoint of - undefined -> LookupProviderPropList; - TokenEndpoint -> [{token_endpoint, TokenEndpoint} | LookupProviderPropList] - end, - ModifiedList1 = case OAuthProvider#oauth_provider.authorization_endpoint of - undefined -> ModifiedList0; - AuthzEndpoint -> [{authorization_endpoint, AuthzEndpoint} | ModifiedList0] - end, - ModifiedList2 = case OAuthProvider#oauth_provider.end_session_endpoint of - undefined -> ModifiedList1; - EndSessionEndpoint -> [{end_session_endpoint, EndSessionEndpoint} | ModifiedList1] - end, - ModifiedList3 = case OAuthProvider#oauth_provider.jwks_uri of - undefined -> ModifiedList2; - JwksEndPoint -> [{jwks_uri, JwksEndPoint} | ModifiedList2] - end, - ModifiedOAuthProviders = maps:put(OAuthProviderId, ModifiedList3, OAuthProviders), + Proplist = maps:get(OAuthProviderId, OAuthProviders), + ModifiedOAuthProviders = maps:put(OAuthProviderId, + merge_oauth_provider(OAuthProvider, Proplist), OAuthProviders), application:set_env(rabbitmq_auth_backend_oauth2, oauth_providers, ModifiedOAuthProviders), - rabbit_log:debug("Replacing oauth_providers ~p", [ ModifiedOAuthProviders]), + rabbit_log:debug("Replaced oauth_providers "), OAuthProvider. use_global_locks_on_all_nodes() -> @@ -176,25 +247,27 @@ unlock(LockId) -> get_oauth_provider(ListOfRequiredAttributes) -> case application:get_env(rabbitmq_auth_backend_oauth2, default_oauth_provider) of undefined -> get_oauth_provider_from_keyconfig(ListOfRequiredAttributes); - {ok, DefaultOauthProvider} -> - rabbit_log:debug("Using default_oauth_provider ~p", [DefaultOauthProvider]), - get_oauth_provider(DefaultOauthProvider, ListOfRequiredAttributes) + {ok, DefaultOauthProviderId} -> + rabbit_log:debug("Using default_oauth_provider ~p", [DefaultOauthProviderId]), + get_oauth_provider(DefaultOauthProviderId, ListOfRequiredAttributes) end. get_oauth_provider_from_keyconfig(ListOfRequiredAttributes) -> OAuthProvider = lookup_oauth_provider_from_keyconfig(), - rabbit_log:debug("Using oauth_provider ~p from keyconfig", [OAuthProvider]), + rabbit_log:debug("Using oauth_provider ~s from keyconfig", [format_oauth_provider(OAuthProvider)]), case find_missing_attributes(OAuthProvider, ListOfRequiredAttributes) of [] -> {ok, OAuthProvider}; - _ -> + _ = MissingAttributes -> + rabbit_log:debug("OauthProvider has following missing attributes ~p", [MissingAttributes]), Result2 = case OAuthProvider#oauth_provider.issuer of undefined -> {error, {missing_oauth_provider_attributes, [issuer]}}; Issuer -> rabbit_log:debug("Downloading oauth_provider using issuer ~p", [Issuer]), case get_openid_configuration(Issuer, get_ssl_options_if_any(OAuthProvider)) of - {ok, OauthProvider} -> - {ok, update_oauth_provider_endpoints_configuration(OauthProvider)}; + {ok, OpenIdConfiguration} -> + {ok, update_oauth_provider_endpoints_configuration( + merge_openid_configuration(OpenIdConfiguration, OAuthProvider))}; {error, _} = Error2 -> Error2 end end, @@ -202,7 +275,7 @@ get_oauth_provider_from_keyconfig(ListOfRequiredAttributes) -> {ok, OAuthProvider2} -> case find_missing_attributes(OAuthProvider2, ListOfRequiredAttributes) of [] -> - rabbit_log:debug("Resolved oauth_provider ~p", [OAuthProvider]), + rabbit_log:debug("Resolved oauth_provider ~p", [format_oauth_provider(OAuthProvider)]), {ok, OAuthProvider2}; _ = Attrs-> {error, {missing_oauth_provider_attributes, Attrs}} @@ -213,35 +286,37 @@ get_oauth_provider_from_keyconfig(ListOfRequiredAttributes) -> -spec get_oauth_provider(oauth_provider_id(), list()) -> {ok, oauth_provider()} | {error, any()}. +get_oauth_provider(root, ListOfRequiredAttributes) -> + get_oauth_provider(ListOfRequiredAttributes); + get_oauth_provider(OAuth2ProviderId, ListOfRequiredAttributes) when is_list(OAuth2ProviderId) -> get_oauth_provider(list_to_binary(OAuth2ProviderId), ListOfRequiredAttributes); -get_oauth_provider(OAuth2ProviderId, ListOfRequiredAttributes) when is_binary(OAuth2ProviderId) -> - rabbit_log:debug("get_oauth_provider ~p with at least these attributes: ~p", [OAuth2ProviderId, ListOfRequiredAttributes]), - case lookup_oauth_provider_config(OAuth2ProviderId) of +get_oauth_provider(OAuthProviderId, ListOfRequiredAttributes) when is_binary(OAuthProviderId) -> + rabbit_log:debug("get_oauth_provider ~p with at least these attributes: ~p", [OAuthProviderId, ListOfRequiredAttributes]), + case lookup_oauth_provider_config(OAuthProviderId) of {error, _} = Error0 -> rabbit_log:debug("Failed to find oauth_provider ~p configuration due to ~p", - [OAuth2ProviderId, Error0]), + [OAuthProviderId, Error0]), Error0; Config -> rabbit_log:debug("Found oauth_provider configuration ~p", [Config]), - OAuthProvider = case Config of - {error,_} = Error -> Error; - _ -> map_to_oauth_provider(Config) - end, - rabbit_log:debug("Resolved oauth_provider ~p", [OAuthProvider]), + OAuthProvider = map_to_oauth_provider(Config), + rabbit_log:debug("Resolved oauth_provider ~p", [format_oauth_provider(OAuthProvider)]), case find_missing_attributes(OAuthProvider, ListOfRequiredAttributes) of [] -> {ok, OAuthProvider}; - _ -> + _ = MissingAttributes -> + rabbit_log:debug("OauthProvider has following missing attributes ~p", [MissingAttributes]), Result2 = case OAuthProvider#oauth_provider.issuer of undefined -> {error, {missing_oauth_provider_attributes, [issuer]}}; Issuer -> rabbit_log:debug("Downloading oauth_provider ~p using issuer ~p", - [OAuth2ProviderId, Issuer]), + [OAuthProviderId, Issuer]), case get_openid_configuration(Issuer, get_ssl_options_if_any(OAuthProvider)) of - {ok, OauthProvider} -> - {ok, update_oauth_provider_endpoints_configuration(OAuth2ProviderId, OauthProvider)}; + {ok, OpenIdConfiguration} -> + {ok, update_oauth_provider_endpoints_configuration(OAuthProviderId, + merge_openid_configuration(OpenIdConfiguration, OAuthProvider))}; {error, _} = Error2 -> Error2 end end, @@ -249,7 +324,7 @@ get_oauth_provider(OAuth2ProviderId, ListOfRequiredAttributes) when is_binary(OA {ok, OAuthProvider2} -> case find_missing_attributes(OAuthProvider2, ListOfRequiredAttributes) of [] -> - rabbit_log:debug("Resolved oauth_provider ~p", [OAuthProvider]), + rabbit_log:debug("Resolved oauth_provider ~p", [format_oauth_provider(OAuthProvider)]), {ok, OAuthProvider2}; _ = Attrs-> {error, {missing_oauth_provider_attributes, Attrs}} @@ -289,6 +364,7 @@ lookup_oauth_provider_from_keyconfig() -> EndSessionEndpoint = application:get_env(rabbitmq_auth_backend_oauth2, end_session_endpoint, undefined), Map = maps:from_list(application:get_env(rabbitmq_auth_backend_oauth2, key_config, [])), #oauth_provider{ + id = root, issuer = Issuer, jwks_uri = maps:get(jwks_url, Map, undefined), %% jwks_url not uri . _url is the legacy name token_endpoint = TokenEndpoint, @@ -297,8 +373,6 @@ lookup_oauth_provider_from_keyconfig() -> ssl_options = extract_ssl_options_as_list(Map) }. - - -spec extract_ssl_options_as_list(#{atom() => any()}) -> proplists:proplist(). extract_ssl_options_as_list(Map) -> {Verify, CaCerts, CaCertFile} = case get_verify_or_peer_verification(Map, verify_peer) of @@ -313,7 +387,6 @@ extract_ssl_options_as_list(Map) -> end; verify_none -> {verify_none, undefined, undefined} end, - [ {verify, Verify} ] ++ case Verify of @@ -363,10 +436,16 @@ lookup_oauth_provider_config(OAuth2ProviderId) -> case maps:get(OAuth2ProviderId, MapOfProviders, undefined) of undefined -> {error, {oauth_provider_not_found, OAuth2ProviderId}}; - Value -> Value + OAuthProvider -> + ensure_oauth_provider_has_id_property(OAuth2ProviderId, OAuthProvider) end; _ -> {error, invalid_oauth_provider_configuration} end. +ensure_oauth_provider_has_id_property(OAuth2ProviderId, OAuth2Provider) -> + case proplists:is_defined(id, OAuth2Provider) of + true -> OAuth2Provider; + false -> OAuth2Provider ++ [{id, OAuth2ProviderId}] + end. build_access_token_request_body(Request) -> uri_string:compose_query([ @@ -429,8 +508,6 @@ decode_body(MimeType, Body) -> true -> decode_body(?CONTENT_JSON, Body); false -> {error, mime_type_is_not_json} end. - - map_to_successful_access_token_response(Map) -> #successful_access_token_response{ access_token = maps:get(?RESPONSE_ACCESS_TOKEN, Map), @@ -438,25 +515,14 @@ map_to_successful_access_token_response(Map) -> refresh_token = maps:get(?RESPONSE_REFRESH_TOKEN, Map, undefined), expires_in = maps:get(?RESPONSE_EXPIRES_IN, Map, undefined) }. - map_to_unsuccessful_access_token_response(Map) -> #unsuccessful_access_token_response{ error = maps:get(?RESPONSE_ERROR, Map), error_description = maps:get(?RESPONSE_ERROR_DESCRIPTION, Map, undefined) }. - - -map_to_oauth_provider(Map) when is_map(Map) -> - #oauth_provider{ - issuer = maps:get(?RESPONSE_ISSUER, Map), - token_endpoint = maps:get(?RESPONSE_TOKEN_ENDPOINT, Map, undefined), - authorization_endpoint = maps:get(?RESPONSE_AUTHORIZATION_ENDPOINT, Map, undefined), - end_session_endpoint = maps:get(?RESPONSE_END_SESSION_ENDPOINT, Map, undefined), - jwks_uri = maps:get(?RESPONSE_JWKS_URI, Map, undefined) - }; - -map_to_oauth_provider(PropList) when is_list(PropList) -> +map_to_oauth_provider(PropList) when is_list(PropList) -> #oauth_provider{ + id = proplists:get_value(id, PropList), issuer = proplists:get_value(issuer, PropList), token_endpoint = proplists:get_value(token_endpoint, PropList), authorization_endpoint = proplists:get_value(authorization_endpoint, PropList, undefined), @@ -464,13 +530,6 @@ map_to_oauth_provider(PropList) when is_list(PropList) -> jwks_uri = proplists:get_value(jwks_uri, PropList, undefined), ssl_options = extract_ssl_options_as_list(maps:from_list(proplists:get_value(https, PropList, []))) }. - - -enrich_oauth_provider({ok, OAuthProvider}, TLSOptions) -> - {ok, OAuthProvider#oauth_provider{ssl_options=TLSOptions}}; -enrich_oauth_provider(Response, _) -> - Response. - map_to_access_token_response(Code, Reason, Headers, Body) -> case decode_body(proplists:get_value("content-type", Headers, ?CONTENT_JSON), Body) of {error, {error, InternalError}} -> @@ -487,28 +546,38 @@ map_to_access_token_response(Code, Reason, Headers, Body) -> _ -> {error, Reason} end end. - -map_response_to_oauth_provider(Code, Reason, Headers, Body) -> - case decode_body(proplists:get_value("content-type", Headers, ?CONTENT_JSON), Body) of - {error, {error, InternalError}} -> - {error, InternalError}; - {error, _} = Error -> - Error; - Value -> - case Code of - 200 -> {ok, map_to_oauth_provider(Value)}; - 201 -> {ok, map_to_oauth_provider(Value)}; - _ -> {error, Reason} - end - end. - - parse_access_token_response({error, Reason}) -> {error, Reason}; parse_access_token_response({ok,{{_,Code,Reason}, Headers, Body}}) -> map_to_access_token_response(Code, Reason, Headers, Body). -parse_openid_configuration_response({error, Reason}) -> - {error, Reason}; -parse_openid_configuration_response({ok,{{_,Code,Reason}, Headers, Body}}) -> - map_response_to_oauth_provider(Code, Reason, Headers, Body). +-spec format_ssl_options([ssl:tls_client_option()]) -> string(). +format_ssl_options(TlsOptions) -> + CaCertsCount = case proplists:get_value(cacerts, TlsOptions, []) of + [] -> 0; + Certs -> length(Certs) + end, + io_lib:format("{verify: ~p, fail_if_no_peer_cert: ~p, crl_check: ~p, " ++ + "depth: ~p, cacertfile: ~p, cacerts(count): ~p }", [ + proplists:get_value(verify, TlsOptions), + proplists:get_value(fail_if_no_peer_cert, TlsOptions), + proplists:get_value(crl_check, TlsOptions), + proplists:get_value(depth, TlsOptions), + proplists:get_value(cacertfile, TlsOptions), + CaCertsCount]). + +format_oauth_provider_id(root) -> ""; +format_oauth_provider_id(Id) -> binary_to_list(Id). + +-spec format_oauth_provider(oauth_provider()) -> string(). +format_oauth_provider(OAuthProvider) -> + io_lib:format("{id: ~p, issuer: ~p, token_endpoint: ~p, " ++ + "authorization_endpoint: ~p, end_session_endpoint: ~p, " ++ + "jwks_uri: ~p, ssl_options: ~s }", [ + format_oauth_provider_id(OAuthProvider#oauth_provider.id), + OAuthProvider#oauth_provider.issuer, + OAuthProvider#oauth_provider.token_endpoint, + OAuthProvider#oauth_provider.authorization_endpoint, + OAuthProvider#oauth_provider.end_session_endpoint, + OAuthProvider#oauth_provider.jwks_uri, + format_ssl_options(OAuthProvider#oauth_provider.ssl_options)]). diff --git a/deps/oauth2_client/test/system_SUITE.erl b/deps/oauth2_client/test/system_SUITE.erl index 1be0acc72815..a0be9dd3976d 100644 --- a/deps/oauth2_client/test/system_SUITE.erl +++ b/deps/oauth2_client/test/system_SUITE.erl @@ -16,189 +16,76 @@ -define(MOCK_TOKEN_ENDPOINT, <<"/token">>). -define(AUTH_PORT, 8000). --define(GRANT_ACCESS_TOKEN, -#{request => - #{ - method => <<"POST">>, - path => ?MOCK_TOKEN_ENDPOINT, - parameters => [ - {?REQUEST_CLIENT_ID, <<"guest">>}, - {?REQUEST_CLIENT_SECRET, <<"password">>} - ] - }, - response => [ - {code, 200}, - {content_type, ?CONTENT_JSON}, - {payload, [ - {access_token, <<"some access token">>}, - {token_type, <<"Bearer">>} - ]} - ] -}). --define(DENIES_ACCESS_TOKEN, -#{request => - #{ - method => <<"POST">>, - path => ?MOCK_TOKEN_ENDPOINT, - parameters => [ - {?REQUEST_CLIENT_ID, <<"invalid_client">>}, - {?REQUEST_CLIENT_SECRET, <<"password">>} - ] - }, - response => [ - {code, 400}, - {content_type, ?CONTENT_JSON}, - {payload, [ - {error, <<"invalid_client">>}, - {error_description, <<"invalid client found">>} - ]} - ] -}). - --define(AUTH_SERVER_ERROR, -#{request => - #{ - method => <<"POST">>, - path => ?MOCK_TOKEN_ENDPOINT, - parameters => [ - {?REQUEST_CLIENT_ID, <<"guest">>}, - {?REQUEST_CLIENT_SECRET, <<"password">>} - ] - }, - response => [ - {code, 500} - ] -}). - --define(NON_JSON_PAYLOAD, -#{request => - #{ - method => <<"POST">>, - path => ?MOCK_TOKEN_ENDPOINT, - parameters => [ - {?REQUEST_CLIENT_ID, <<"guest">>}, - {?REQUEST_CLIENT_SECRET, <<"password">>} - ] - }, - response => [ - {code, 400}, - {content_type, ?CONTENT_JSON}, - {payload, <<"{ some illegal json}">>} - ] -}). - --define(GET_OPENID_CONFIGURATION, -#{request => - #{ - method => <<"GET">>, - path => ?DEFAULT_OPENID_CONFIGURATION_PATH - }, - response => [ - {code, 200}, - {content_type, ?CONTENT_JSON}, - {payload, [ - {issuer, build_issuer("http") }, - {authorization_endpoint, <<"http://localhost:8000/authorize">>}, - {token_endpoint, build_token_endpoint_uri("http")}, - {end_session_endpoint, <<"http://localhost:8000/logout">>}, - {jwks_uri, build_jwks_uri("http")} - ]} - ] -}). --define(GET_OPENID_CONFIGURATION_WITH_SSL, -#{request => - #{ - method => <<"GET">>, - path => ?DEFAULT_OPENID_CONFIGURATION_PATH - }, - response => [ - {code, 200}, - {content_type, ?CONTENT_JSON}, - {payload, [ - {issuer, build_issuer("https") }, - {authorization_endpoint, <<"https://localhost:8000/authorize">>}, - {token_endpoint, build_token_endpoint_uri("https")}, - {end_session_endpoint, <<"http://localhost:8000/logout">>}, - {jwks_uri, build_jwks_uri("https")} - ]} - ] -}). --define(GRANTS_REFRESH_TOKEN, - #{request => #{ - method => <<"POST">>, - path => ?MOCK_TOKEN_ENDPOINT, - parameters => [ - {?REQUEST_CLIENT_ID, <<"guest">>}, - {?REQUEST_CLIENT_SECRET, <<"password">>}, - {?REQUEST_REFRESH_TOKEN, <<"some refresh token">>} - ] - }, - response => [ - {code, 200}, - {content_type, ?CONTENT_JSON}, - {payload, [ - {access_token, <<"some refreshed access token">>}, - {token_type, <<"Bearer">>} - ]} - ] -}). +-define(ISSUER_PATH, "/somepath"). +-define(CUSTOM_OPENID_CONFIGURATION_ENDPOINT, "/somepath"). +-define(UTIL_MOD, oauth2_client_test_util). +-define(EXPIRES_IN_SECONDS, 10000). all() -> [ - {group, http_up}, - {group, http_down}, - {group, https} + {group, https_down}, + {group, https}, + {group, with_all_oauth_provider_settings} + ]. groups() -> [ - {http_up, [], [ - {group, verify_access_token}, - {group, with_all_oauth_provider_settings}, - {group, without_all_oauth_providers_settings} - ]}, {with_all_oauth_provider_settings, [], [ {group, verify_get_oauth_provider} ]}, {without_all_oauth_providers_settings, [], [ {group, verify_get_oauth_provider} ]}, + {verify_openid_configuration, [], [ + get_openid_configuration, + get_openid_configuration_returns_partial_payload, + get_openid_configuration_using_path, + get_openid_configuration_using_path_and_custom_endpoint, + get_openid_configuration_using_custom_endpoint + ]}, {verify_access_token, [], [ grants_access_token, denies_access_token, auth_server_error, non_json_payload, - grants_refresh_token + grants_refresh_token, + expiration_time_in_response_payload, + expiration_time_in_token ]}, {verify_get_oauth_provider, [], [ get_oauth_provider, + {with_default_oauth_provider, [], [ + get_oauth_provider + ]}, get_oauth_provider_given_oauth_provider_id ]}, - {http_down, [], [ + {https_down, [], [ connection_error ]}, {https, [], [ + {group, verify_openid_configuration}, grants_access_token, grants_refresh_token, ssl_connection_error, - {group, with_all_oauth_provider_settings}, {group, without_all_oauth_providers_settings} ]} ]. init_per_suite(Config) -> [ - {denies_access_token, [ {token_endpoint, ?DENIES_ACCESS_TOKEN} ]}, - {auth_server_error, [ {token_endpoint, ?AUTH_SERVER_ERROR} ]}, - {non_json_payload, [ {token_endpoint, ?NON_JSON_PAYLOAD} ]}, - {grants_refresh_token, [ {token_endpoint, ?GRANTS_REFRESH_TOKEN} ]} + {denies_access_token, [ {token_endpoint, denies_access_token_expectation()} ]}, + {auth_server_error, [ {token_endpoint, auth_server_error_when_access_token_request_expectation()} ]}, + {non_json_payload, [ {token_endpoint, non_json_payload_when_access_token_request_expectation()} ]}, + {grants_refresh_token, [ {token_endpoint, grants_refresh_token_expectation()} ]} | Config]. end_per_suite(Config) -> Config. init_per_group(https, Config) -> + {ok, _} = application:ensure_all_started(inets), {ok, _} = application:ensure_all_started(ssl), application:ensure_all_started(cowboy), Config0 = rabbit_ct_helpers:run_setup_steps(Config), @@ -207,31 +94,51 @@ init_per_group(https, Config) -> WrongCaCertFile = filename:join([CertsDir, "server", "server.pem"]), [{group, https}, {oauth_provider_id, <<"uaa">>}, - {oauth_provider, build_https_oauth_provider(CaCertFile)}, - {oauth_provider_with_issuer, keep_only_issuer_and_ssl_options(build_https_oauth_provider(CaCertFile))}, + {oauth_provider, build_https_oauth_provider(<<"uaa">>, CaCertFile)}, + {oauth_provider_with_issuer, keep_only_issuer_and_ssl_options( + build_https_oauth_provider(<<"uaa">>, CaCertFile))}, {issuer, build_issuer("https")}, - {oauth_provider_with_wrong_ca, build_https_oauth_provider(WrongCaCertFile)} | + {oauth_provider_with_wrong_ca, build_https_oauth_provider(<<"uaa">>, WrongCaCertFile)} | Config0]; -init_per_group(http_up, Config) -> +init_per_group(https_down, Config) -> {ok, _} = application:ensure_all_started(inets), - application:ensure_all_started(cowboy), - [{group, http_up}, - {oauth_provider_id, <<"uaa">>}, - {issuer, build_issuer("http")}, - {oauth_provider_with_issuer, keep_only_issuer_and_ssl_options(build_http_oauth_provider())}, - {oauth_provider, build_http_oauth_provider()} | Config]; + {ok, _} = application:ensure_all_started(ssl), + Config0 = rabbit_ct_helpers:run_setup_steps(Config), + CertsDir = ?config(rmq_certsdir, Config0), + CaCertFile = filename:join([CertsDir, "testca", "cacert.pem"]), -init_per_group(http_down, Config) -> - [{issuer, build_issuer("http")}, + [{issuer, build_issuer("https")}, {oauth_provider_id, <<"uaa">>}, - {oauth_provider, build_http_oauth_provider()} | Config]; + {oauth_provider, build_https_oauth_provider(<<"uaa">>, CaCertFile)} | Config]; + +init_per_group(openid_configuration_with_path, Config) -> + [{use_openid_configuration_with_path, true} | Config]; init_per_group(with_all_oauth_provider_settings, Config) -> - [{with_all_oauth_provider_settings, true} | Config]; + Config0 = rabbit_ct_helpers:run_setup_steps(Config), + CertsDir = ?config(rmq_certsdir, Config0), + CaCertFile = filename:join([CertsDir, "testca", "cacert.pem"]), + + [{with_all_oauth_provider_settings, true}, + {oauth_provider_id, <<"uaa">>}, + {oauth_provider, build_https_oauth_provider(<<"uaa">>, CaCertFile)} | Config]; init_per_group(without_all_oauth_providers_settings, Config) -> - [{with_all_oauth_provider_settings, false} | Config]; + Config0 = rabbit_ct_helpers:run_setup_steps(Config), + CertsDir = ?config(rmq_certsdir, Config0), + CaCertFile = filename:join([CertsDir, "testca", "cacert.pem"]), + + [{with_all_oauth_provider_settings, false}, + {oauth_provider_id, <<"uaa">>}, + {oauth_provider, keep_only_issuer_and_ssl_options( + build_https_oauth_provider(<<"uaa">>, CaCertFile))} | Config]; + +init_per_group(with_default_oauth_provider, Config) -> + OAuthProvider = ?config(oauth_provider, Config), + application:set_env(rabbitmq_auth_backend_oauth2, default_oauth_provider, + OAuthProvider#oauth_provider.id), + Config; init_per_group(_, Config) -> Config. @@ -239,54 +146,87 @@ init_per_group(_, Config) -> get_http_oauth_server_expectations(TestCase, Config) -> case ?config(TestCase, Config) of - undefined -> - case ?config(group, Config) of - https -> [ - {token_endpoint, ?GRANT_ACCESS_TOKEN}, - {get_openid_configuration, ?GET_OPENID_CONFIGURATION_WITH_SSL } - ]; - _ -> [ - {token_endpoint, ?GRANT_ACCESS_TOKEN}, - {get_openid_configuration, ?GET_OPENID_CONFIGURATION } - ] - end; - Expectations -> Expectations + undefined -> + ct:log("get_openid_configuration_http_expectation : ~p", [get_openid_configuration_http_expectation(TestCase)]), + [ {token_endpoint, build_http_mock_behaviour(build_http_access_token_request(), + build_http_200_access_token_response())}, + {get_openid_configuration, get_openid_configuration_http_expectation(TestCase)} + ]; + Expectations -> + Expectations end. +get_openid_configuration_http_expectation(TestCaseAtom) -> + TestCase = binary_to_list(atom_to_binary(TestCaseAtom)), + Payload = case string:find(TestCase, "returns_partial_payload") of + nomatch -> + build_http_get_openid_configuration_payload(); + _ -> + List0 = proplists:delete(authorization_endpoint, + build_http_get_openid_configuration_payload()), + proplists:delete(end_session_endpoint, List0) + end, + Path = case string:find(TestCase, "path") of + nomatch -> ""; + _ -> ?ISSUER_PATH + end, + Endpoint = case string:find(TestCase, "custom_endpoint") of + nomatch -> ?DEFAULT_OPENID_CONFIGURATION_PATH; + _ -> ?CUSTOM_OPENID_CONFIGURATION_ENDPOINT + end, + build_http_mock_behaviour(build_http_get_openid_configuration_request(Endpoint, Path), + build_http_200_json_response(Payload)). lookup_expectation(Endpoint, Config) -> proplists:get_value(Endpoint, ?config(oauth_server_expectations, Config)). + + configure_all_oauth_provider_settings(Config) -> OAuthProvider = ?config(oauth_provider, Config), - OAuthProviders = #{ ?config(oauth_provider_id, Config) => oauth_provider_to_proplist(OAuthProvider) }, - - application:set_env(rabbitmq_auth_backend_oauth2, issuer, OAuthProvider#oauth_provider.issuer), - application:set_env(rabbitmq_auth_backend_oauth2, oauth_providers, OAuthProviders), - application:set_env(rabbitmq_auth_backend_oauth2, token_endpoint, OAuthProvider#oauth_provider.token_endpoint), - application:set_env(rabbitmq_auth_backend_oauth2, end_sessione_endpoint, OAuthProvider#oauth_provider.end_session_endpoint), - application:set_env(rabbitmq_auth_backend_oauth2, authorization_endpoint, OAuthProvider#oauth_provider.authorization_endpoint), + OAuthProviders = #{ ?config(oauth_provider_id, Config) => + oauth_provider_to_proplist(OAuthProvider) }, + + application:set_env(rabbitmq_auth_backend_oauth2, issuer, + OAuthProvider#oauth_provider.issuer), + application:set_env(rabbitmq_auth_backend_oauth2, oauth_providers, + OAuthProviders), + application:set_env(rabbitmq_auth_backend_oauth2, token_endpoint, + OAuthProvider#oauth_provider.token_endpoint), + application:set_env(rabbitmq_auth_backend_oauth2, end_session_endpoint, + OAuthProvider#oauth_provider.end_session_endpoint), + application:set_env(rabbitmq_auth_backend_oauth2, authorization_endpoint, + OAuthProvider#oauth_provider.authorization_endpoint), KeyConfig = [ { jwks_url, OAuthProvider#oauth_provider.jwks_uri } ] ++ case OAuthProvider#oauth_provider.ssl_options of - undefined -> + undefined -> []; - _ -> - [ {peer_verification, proplists:get_value(verify, OAuthProvider#oauth_provider.ssl_options) }, - {cacertfile, proplists:get_value(cacertfile, OAuthProvider#oauth_provider.ssl_options) } ] + _ -> + [ {peer_verification, proplists:get_value(verify, + OAuthProvider#oauth_provider.ssl_options) }, + {cacertfile, proplists:get_value(cacertfile, + OAuthProvider#oauth_provider.ssl_options) } + ] end, application:set_env(rabbitmq_auth_backend_oauth2, key_config, KeyConfig). configure_minimum_oauth_provider_settings(Config) -> OAuthProvider = ?config(oauth_provider_with_issuer, Config), - OAuthProviders = #{ ?config(oauth_provider_id, Config) => oauth_provider_to_proplist(OAuthProvider) }, - application:set_env(rabbitmq_auth_backend_oauth2, oauth_providers, OAuthProviders), - application:set_env(rabbitmq_auth_backend_oauth2, issuer, OAuthProvider#oauth_provider.issuer), + OAuthProviders = #{ ?config(oauth_provider_id, Config) => + oauth_provider_to_proplist(OAuthProvider) }, + application:set_env(rabbitmq_auth_backend_oauth2, oauth_providers, + OAuthProviders), + application:set_env(rabbitmq_auth_backend_oauth2, issuer, + OAuthProvider#oauth_provider.issuer), KeyConfig = case OAuthProvider#oauth_provider.ssl_options of - undefined -> + undefined -> []; - _ -> - [ {peer_verification, proplists:get_value(verify, OAuthProvider#oauth_provider.ssl_options) }, - {cacertfile, proplists:get_value(cacertfile, OAuthProvider#oauth_provider.ssl_options) } ] + _ -> + [{peer_verification, proplists:get_value(verify, + OAuthProvider#oauth_provider.ssl_options) }, + {cacertfile, proplists:get_value(cacertfile, + OAuthProvider#oauth_provider.ssl_options) } + ] end, application:set_env(rabbitmq_auth_backend_oauth2, key_config, KeyConfig). @@ -303,14 +243,14 @@ init_per_testcase(TestCase, Config) -> ListOfExpectations = maps:values(proplists:to_map(HttpOauthServerExpectations)), case ?config(group, Config) of - http_up -> - start_http_oauth_server(?AUTH_PORT, ListOfExpectations); https -> - start_https_oauth_server(?AUTH_PORT, ?config(rmq_certsdir, Config), ListOfExpectations); - _ -> - ok + ct:log("Start https with expectations ~p", [ListOfExpectations]), + start_https_oauth_server(?AUTH_PORT, ?config(rmq_certsdir, Config), + ListOfExpectations); + _ -> + do_nothing end, - [{oauth_server_expectations, HttpOauthServerExpectations} | Config ]. + [{oauth_server_expectations, HttpOauthServerExpectations} | Config ]. end_per_testcase(_, Config) -> application:unset_env(rabbitmq_auth_backend_oauth2, oauth_providers), @@ -320,27 +260,126 @@ end_per_testcase(_, Config) -> application:unset_env(rabbitmq_auth_backend_oauth2, end_session_endpoint), application:unset_env(rabbitmq_auth_backend_oauth2, key_config), case ?config(group, Config) of - http_up -> - stop_http_auth_server(); https -> - stop_http_auth_server(); - _ -> - ok + stop_https_auth_server(); + _ -> + do_nothing end, Config. end_per_group(https_and_rabbitmq_node, Config) -> rabbit_ct_helpers:run_steps(Config, rabbit_ct_broker_helpers:teardown_steps()); +end_per_group(with_default_oauth_provider, Config) -> + application:unset_env(rabbitmq_auth_backend_oauth2, default_oauth_provider), + Config; + end_per_group(_, Config) -> Config. +get_openid_configuration(Config) -> + ExpectedOAuthProvider = ?config(oauth_provider, Config), + SslOptions = [{ssl, ExpectedOAuthProvider#oauth_provider.ssl_options}], + {ok, ActualOpenId} = oauth2_client:get_openid_configuration( + build_issuer("https"), + SslOptions), + ExpectedOpenId = map_oauth_provider_to_openid_configuration(ExpectedOAuthProvider), + assertOpenIdConfiguration(ExpectedOpenId, ActualOpenId). + +map_oauth_provider_to_openid_configuration(OAuthProvider) -> + #openid_configuration{ + issuer = OAuthProvider#oauth_provider.issuer, + token_endpoint = OAuthProvider#oauth_provider.token_endpoint, + end_session_endpoint = OAuthProvider#oauth_provider.end_session_endpoint, + jwks_uri = OAuthProvider#oauth_provider.jwks_uri, + authorization_endpoint = OAuthProvider#oauth_provider.authorization_endpoint + }. +get_openid_configuration_returns_partial_payload(Config) -> + ExpectedOAuthProvider0 = ?config(oauth_provider, Config), + ExpectedOAuthProvider = #oauth_provider{ + issuer = ExpectedOAuthProvider0#oauth_provider.issuer, + token_endpoint = ExpectedOAuthProvider0#oauth_provider.token_endpoint, + jwks_uri = ExpectedOAuthProvider0#oauth_provider.jwks_uri}, + + SslOptions = [{ssl, ExpectedOAuthProvider0#oauth_provider.ssl_options}], + {ok, Actual} = oauth2_client:get_openid_configuration( + build_issuer("https"), + SslOptions), + ExpectedOpenId = map_oauth_provider_to_openid_configuration(ExpectedOAuthProvider), + assertOpenIdConfiguration(ExpectedOpenId, Actual). + +get_openid_configuration_using_path(Config) -> + ExpectedOAuthProvider = ?config(oauth_provider, Config), + SslOptions = [{ssl, ExpectedOAuthProvider#oauth_provider.ssl_options}], + {ok, Actual} = oauth2_client:get_openid_configuration( + build_issuer("https", ?ISSUER_PATH), + SslOptions), + ExpectedOpenId = map_oauth_provider_to_openid_configuration(ExpectedOAuthProvider), + assertOpenIdConfiguration(ExpectedOpenId,Actual). +get_openid_configuration_using_path_and_custom_endpoint(Config) -> + ExpectedOAuthProvider = ?config(oauth_provider, Config), + SslOptions = [{ssl, ExpectedOAuthProvider#oauth_provider.ssl_options}], + {ok, Actual} = oauth2_client:get_openid_configuration( + build_issuer("https", ?ISSUER_PATH), + ?CUSTOM_OPENID_CONFIGURATION_ENDPOINT, + SslOptions), + ExpectedOpenId = map_oauth_provider_to_openid_configuration(ExpectedOAuthProvider), + assertOpenIdConfiguration(ExpectedOpenId, Actual). +get_openid_configuration_using_custom_endpoint(Config) -> + ExpectedOAuthProvider = ?config(oauth_provider, Config), + SslOptions = [{ssl, ExpectedOAuthProvider#oauth_provider.ssl_options}], + {ok, Actual} = oauth2_client:get_openid_configuration( + build_issuer("https"), + ?CUSTOM_OPENID_CONFIGURATION_ENDPOINT, + SslOptions), + ExpectedOpenId = map_oauth_provider_to_openid_configuration(ExpectedOAuthProvider), + assertOpenIdConfiguration(ExpectedOpenId, Actual). + + +assertOpenIdConfiguration(ExpectedOpenIdProvider, ActualOpenIdProvider) -> + ?assertEqual(ExpectedOpenIdProvider#openid_configuration.issuer, + ActualOpenIdProvider#openid_configuration.issuer), + ?assertEqual(ExpectedOpenIdProvider#openid_configuration.jwks_uri, + ActualOpenIdProvider#openid_configuration.jwks_uri), + ?assertEqual(ExpectedOpenIdProvider#openid_configuration.end_session_endpoint, + ActualOpenIdProvider#openid_configuration.end_session_endpoint), + ?assertEqual(ExpectedOpenIdProvider#openid_configuration.token_endpoint, + ActualOpenIdProvider#openid_configuration.token_endpoint), + ?assertEqual(ExpectedOpenIdProvider#openid_configuration.authorization_endpoint, + ActualOpenIdProvider#openid_configuration.authorization_endpoint). + +expiration_time_in_response_payload(Config) -> + #{request := #{parameters := Parameters}, + response := [ {code, 200}, {content_type, _CT}, {payload, _JsonPayload}] } = + lookup_expectation(token_endpoint, Config), + + {ok, #successful_access_token_response{} = Response } = + oauth2_client:get_access_token(?config(oauth_provider, Config), + build_access_token_request(Parameters)), + + {ok, [{expires_in, 10000}]} = oauth2_client:get_expiration_time( + Response#successful_access_token_response{expires_in = 10000}). + +expiration_time_in_token(Config) -> + #{request := #{parameters := Parameters}, + response := [ {code, 200}, {content_type, _CT}, {payload, _JsonPayload}] } = + lookup_expectation(token_endpoint, Config), + + {ok, #successful_access_token_response{} = Response } = + oauth2_client:get_access_token(?config(oauth_provider, Config), + build_access_token_request(Parameters)), + + {ok, [{exp, ?EXPIRES_IN_SECONDS}]} = oauth2_client:get_expiration_time(Response). + grants_access_token_dynamically_resolving_oauth_provider(Config) -> #{request := #{parameters := Parameters}, - response := [ {code, 200}, {content_type, _CT}, {payload, JsonPayload}] } = lookup_expectation(token_endpoint, Config), + response := [ {code, 200}, {content_type, _CT}, {payload, JsonPayload}] } = + lookup_expectation(token_endpoint, Config), - {ok, #successful_access_token_response{access_token = AccessToken, token_type = TokenType} } = - oauth2_client:get_access_token(?config(oauth_provider_id, Config), build_access_token_request(Parameters)), + {ok, #successful_access_token_response{access_token = AccessToken, + token_type = TokenType} } = + oauth2_client:get_access_token(?config(oauth_provider_id, Config), + build_access_token_request(Parameters)), ?assertEqual(proplists:get_value(token_type, JsonPayload), TokenType), ?assertEqual(proplists:get_value(access_token, JsonPayload), AccessToken). @@ -350,8 +389,10 @@ grants_access_token(Config) -> response := [ {code, 200}, {content_type, _CT}, {payload, JsonPayload}] } = lookup_expectation(token_endpoint, Config), - {ok, #successful_access_token_response{access_token = AccessToken, token_type = TokenType} } = - oauth2_client:get_access_token(?config(oauth_provider, Config), build_access_token_request(Parameters)), + {ok, #successful_access_token_response{access_token = AccessToken, + token_type = TokenType} } = + oauth2_client:get_access_token(?config(oauth_provider, Config), + build_access_token_request(Parameters)), ?assertEqual(proplists:get_value(token_type, JsonPayload), TokenType), ?assertEqual(proplists:get_value(access_token, JsonPayload), AccessToken). @@ -360,8 +401,10 @@ grants_refresh_token(Config) -> response := [ {code, 200}, {content_type, _CT}, {payload, JsonPayload}] } = lookup_expectation(token_endpoint, Config), - {ok, #successful_access_token_response{access_token = AccessToken, token_type = TokenType} } = - oauth2_client:refresh_access_token(?config(oauth_provider, Config), build_refresh_token_request(Parameters)), + {ok, #successful_access_token_response{access_token = AccessToken, + token_type = TokenType} } = + oauth2_client:refresh_access_token(?config(oauth_provider, Config), + build_refresh_token_request(Parameters)), ?assertEqual(proplists:get_value(token_type, JsonPayload), TokenType), ?assertEqual(proplists:get_value(access_token, JsonPayload), AccessToken). @@ -369,8 +412,10 @@ denies_access_token(Config) -> #{request := #{parameters := Parameters}, response := [ {code, 400}, {content_type, _CT}, {payload, JsonPayload}] } = lookup_expectation(token_endpoint, Config), - {error, #unsuccessful_access_token_response{error = Error, error_description = ErrorDescription} } = - oauth2_client:get_access_token(?config(oauth_provider, Config),build_access_token_request(Parameters)), + {error, #unsuccessful_access_token_response{error = Error, + error_description = ErrorDescription} } = + oauth2_client:get_access_token(?config(oauth_provider, Config), + build_access_token_request(Parameters)), ?assertEqual(proplists:get_value(error, JsonPayload), Error), ?assertEqual(proplists:get_value(error_description, JsonPayload), ErrorDescription). @@ -378,12 +423,14 @@ auth_server_error(Config) -> #{request := #{parameters := Parameters}, response := [ {code, 500} ] } = lookup_expectation(token_endpoint, Config), {error, "Internal Server Error"} = - oauth2_client:get_access_token(?config(oauth_provider, Config), build_access_token_request(Parameters)). + oauth2_client:get_access_token(?config(oauth_provider, Config), + build_access_token_request(Parameters)). non_json_payload(Config) -> #{request := #{parameters := Parameters}} = lookup_expectation(token_endpoint, Config), {error, {failed_to_decode_json, _ErrorArgs}} = - oauth2_client:get_access_token(?config(oauth_provider, Config), build_access_token_request(Parameters)). + oauth2_client:get_access_token(?config(oauth_provider, Config), + build_access_token_request(Parameters)). connection_error(Config) -> #{request := #{parameters := Parameters}} = lookup_expectation(token_endpoint, Config), @@ -397,44 +444,116 @@ ssl_connection_error(Config) -> {error, {failed_connect, _} } = oauth2_client:get_access_token( ?config(oauth_provider_with_wrong_ca, Config), build_access_token_request(Parameters)). -get_oauth_provider(Config) -> - #{response := [ {code, 200}, {content_type, _CT}, {payload, JsonPayload}] } - = lookup_expectation(get_openid_configuration, Config), - - {ok, #oauth_provider{issuer = Issuer, token_endpoint = TokenEndPoint, jwks_uri = Jwks_uri}} = +verify_get_oauth_provider_returns_oauth_provider_from_key_config() -> + {ok, #oauth_provider{id = Id, + issuer = Issuer, + token_endpoint = TokenEndPoint, + jwks_uri = Jwks_uri}} = oauth2_client:get_oauth_provider([issuer, token_endpoint, jwks_uri]), + ExpectedIssuer = application:get_env(rabbitmq_auth_backend_oauth2, issuer, undefined), + ExpectedTokenEndPoint = application:get_env(rabbitmq_auth_backend_oauth2, token_endpoint, undefined), + ExpectedJwks_uri = proplists:get_value(jwks_url, + application:get_env(rabbitmq_auth_backend_oauth2, key_config, [])), + ?assertEqual(root, Id), + ?assertEqual(ExpectedIssuer, Issuer), + ?assertEqual(ExpectedTokenEndPoint, TokenEndPoint), + ?assertEqual(ExpectedJwks_uri, Jwks_uri). + +verify_get_oauth_provider_returns_default_oauth_provider(DefaultOAuthProviderId) -> + {ok, OAuthProvider1} = + oauth2_client:get_oauth_provider([issuer, token_endpoint, jwks_uri]), + {ok, OAuthProvider2} = + oauth2_client:get_oauth_provider(DefaultOAuthProviderId, + [issuer, token_endpoint, jwks_uri]), + ct:log("verify_get_oauth_provider_returns_default_oauth_provider ~p vs ~p", [OAuthProvider1, OAuthProvider2]), + ?assertEqual(OAuthProvider1, OAuthProvider2). - ?assertEqual(proplists:get_value(issuer, JsonPayload), Issuer), - ?assertEqual(proplists:get_value(token_endpoint, JsonPayload), TokenEndPoint), - ?assertEqual(proplists:get_value(jwks_uri, JsonPayload), Jwks_uri). +get_oauth_provider(Config) -> + case ?config(with_all_oauth_provider_settings, Config) of + true -> + case application:get_env(rabbitmq_auth_backend_oauth2, default_oauth_provider, undefined) of + undefined -> + verify_get_oauth_provider_returns_oauth_provider_from_key_config(); + DefaultOAuthProviderId -> + verify_get_oauth_provider_returns_default_oauth_provider(DefaultOAuthProviderId) + end; + false -> + #{response := [ {code, 200}, {content_type, _CT}, {payload, JsonPayload}] } + = lookup_expectation(get_openid_configuration, Config), + {ok, #oauth_provider{issuer = Issuer, + token_endpoint = TokenEndPoint, + jwks_uri = Jwks_uri} + } = oauth2_client:get_oauth_provider([issuer, token_endpoint, jwks_uri]), + + ?assertEqual(proplists:get_value(issuer, JsonPayload), Issuer), + ?assertEqual(proplists:get_value(token_endpoint, JsonPayload), TokenEndPoint), + ?assertEqual(proplists:get_value(jwks_uri, JsonPayload), Jwks_uri) + end. get_oauth_provider_given_oauth_provider_id(Config) -> - #{response := [ {code, 200}, {content_type, _CT}, {payload, JsonPayload}] } - = lookup_expectation(get_openid_configuration, Config), - - ct:log("get_oauth_provider ~p", [?config(oauth_provider_id, Config)]), - {ok, #oauth_provider{ - issuer = Issuer, - token_endpoint = TokenEndPoint, - authorization_endpoint = AuthorizationEndpoint, - end_session_endpoint = EndSessionEndpoint, - jwks_uri = Jwks_uri}} = - oauth2_client:get_oauth_provider(?config(oauth_provider_id, Config), - [issuer, token_endpoint, jwks_uri, authorization_endpoint, end_session_endpoint]), - - ?assertEqual(proplists:get_value(issuer, JsonPayload), Issuer), - ?assertEqual(proplists:get_value(token_endpoint, JsonPayload), TokenEndPoint), - ?assertEqual(proplists:get_value(authorization_endpoint, JsonPayload), AuthorizationEndpoint), - ?assertEqual(proplists:get_value(end_session_endpoint, JsonPayload), EndSessionEndpoint), - ?assertEqual(proplists:get_value(jwks_uri, JsonPayload), Jwks_uri). + case ?config(with_all_oauth_provider_settings, Config) of + true -> + {ok, #oauth_provider{ + id = Id, + issuer = Issuer, + token_endpoint = TokenEndPoint, + authorization_endpoint = AuthorizationEndpoint, + end_session_endpoint = EndSessionEndpoint, + jwks_uri = Jwks_uri}} = + oauth2_client:get_oauth_provider(?config(oauth_provider_id, Config), + [issuer, token_endpoint, jwks_uri, authorization_endpoint, + end_session_endpoint]), + + OAuthProviders = application:get_env(rabbitmq_auth_backend_oauth2, + oauth_providers, #{}), + ExpectedProvider = maps:get(Id, OAuthProviders, []), + ?assertEqual(proplists:get_value(issuer, ExpectedProvider), + Issuer), + ?assertEqual(proplists:get_value(token_endpoint, ExpectedProvider), + TokenEndPoint), + ?assertEqual(proplists:get_value(authorization_endpoint, ExpectedProvider), + AuthorizationEndpoint), + ?assertEqual(proplists:get_value(end_session_endpoint, ExpectedProvider), + EndSessionEndpoint), + ?assertEqual(proplists:get_value(jwks_uri, ExpectedProvider), + Jwks_uri); + false -> + #{response := [ {code, 200}, {content_type, _CT}, {payload, JsonPayload}] } + = lookup_expectation(get_openid_configuration, Config), + + {ok, #oauth_provider{ + issuer = Issuer, + token_endpoint = TokenEndPoint, + authorization_endpoint = AuthorizationEndpoint, + end_session_endpoint = EndSessionEndpoint, + jwks_uri = Jwks_uri}} = + oauth2_client:get_oauth_provider(?config(oauth_provider_id, Config), + [issuer, token_endpoint, jwks_uri, authorization_endpoint, + end_session_endpoint]), + + ?assertEqual(proplists:get_value(issuer, JsonPayload), + Issuer), + ?assertEqual(proplists:get_value(token_endpoint, JsonPayload), + TokenEndPoint), + ?assertEqual(proplists:get_value(authorization_endpoint, JsonPayload), + AuthorizationEndpoint), + ?assertEqual(proplists:get_value(end_session_endpoint, JsonPayload), + EndSessionEndpoint), + ?assertEqual(proplists:get_value(jwks_uri, JsonPayload), + Jwks_uri) + end. + %%% HELPERS build_issuer(Scheme) -> + build_issuer(Scheme, ""). +build_issuer(Scheme, Path) -> uri_string:recompose(#{scheme => Scheme, host => "localhost", port => rabbit_data_coercion:to_integer(?AUTH_PORT), - path => ""}). + path => Path}). + build_token_endpoint_uri(Scheme) -> uri_string:recompose(#{scheme => Scheme, @@ -459,60 +578,48 @@ build_refresh_token_request(Request) -> client_secret = proplists:get_value(?REQUEST_CLIENT_SECRET, Request), refresh_token = proplists:get_value(?REQUEST_REFRESH_TOKEN, Request) }. -build_http_oauth_provider() -> - #oauth_provider { - issuer = build_issuer("http"), - token_endpoint = build_token_endpoint_uri("http"), - jwks_uri = build_jwks_uri("http") - }. keep_only_issuer_and_ssl_options(OauthProvider) -> #oauth_provider { + id = OauthProvider#oauth_provider.id, issuer = OauthProvider#oauth_provider.issuer, ssl_options = OauthProvider#oauth_provider.ssl_options }. -build_https_oauth_provider(CaCertFile) -> +build_https_oauth_provider(Id, CaCertFile) -> #oauth_provider { + id = Id, issuer = build_issuer("https"), + authorization_endpoint = "https://localhost:8000/authorize", + end_session_endpoint = "https://localhost:8000/logout", token_endpoint = build_token_endpoint_uri("https"), jwks_uri = build_jwks_uri("https"), ssl_options = ssl_options(verify_peer, false, CaCertFile) }. -oauth_provider_to_proplist(#oauth_provider{ issuer = Issuer, token_endpoint = TokenEndpoint, - ssl_options = SslOptions, jwks_uri = Jwks_url}) -> +oauth_provider_to_proplist(#oauth_provider{ + issuer = Issuer, + token_endpoint = TokenEndpoint, + end_session_endpoint = EndSessionEndpoint, + authorization_endpoint = AuthorizationEndpoint, + ssl_options = SslOptions, + jwks_uri = Jwks_uri}) -> [ { issuer, Issuer}, {token_endpoint, TokenEndpoint}, + {end_session_endpoint, EndSessionEndpoint}, + {authorization_endpoint, AuthorizationEndpoint}, { https, case SslOptions of undefined -> []; Value -> Value - end}, - {jwks_url, Jwks_url} ]. - -start_http_oauth_server(Port, Expectations) when is_list(Expectations) -> - Dispatch = cowboy_router:compile([ - {'_', [{Path, oauth_http_mock, Expected} || #{request := #{path := Path}} = Expected <- Expectations ]} - ]), - ct:log("start_http_oauth_server with expectation list : ~p -> dispatch: ~p", [Expectations, Dispatch]), - {ok, _} = cowboy:start_clear(mock_http_auth_listener,[ {port, Port} ], - #{env => #{dispatch => Dispatch}}); - -start_http_oauth_server(Port, #{request := #{path := Path}} = Expected) -> - Dispatch = cowboy_router:compile([ - {'_', [{Path, oauth_http_mock, Expected}]} - ]), - ct:log("start_http_oauth_server with expectation : ~p -> dispatch: ~p ", [Expected, Dispatch]), - {ok, _} = cowboy:start_clear( - mock_http_auth_listener, - [{port, Port} - ], - #{env => #{dispatch => Dispatch}}). + end}, + {jwks_uri, Jwks_uri} ]. start_https_oauth_server(Port, CertsDir, Expectations) when is_list(Expectations) -> Dispatch = cowboy_router:compile([ - {'_', [{Path, oauth_http_mock, Expected} || #{request := #{path := Path}} = Expected <- Expectations ]} + {'_', [{Path, oauth_http_mock, Expected} || #{request := #{path := Path}} + = Expected <- Expectations ]} ]), - ct:log("start_https_oauth_server with expectation list : ~p -> dispatch: ~p", [Expectations, Expectations]), + ct:log("start_https_oauth_server with expectation list : ~p -> dispatch: ~p", + [Expectations, Dispatch]), {ok, _} = cowboy:start_tls( mock_http_auth_listener, [{port, Port}, @@ -523,7 +630,8 @@ start_https_oauth_server(Port, CertsDir, Expectations) when is_list(Expectations start_https_oauth_server(Port, CertsDir, #{request := #{path := Path}} = Expected) -> Dispatch = cowboy_router:compile([{'_', [{Path, oauth_http_mock, Expected}]}]), - ct:log("start_https_oauth_server with expectation : ~p -> dispatch: ~p", [Expected, Dispatch]), + ct:log("start_https_oauth_server with expectation : ~p -> dispatch: ~p", + [Expected, Dispatch]), {ok, _} = cowboy:start_tls( mock_http_auth_listener, [{port, Port}, @@ -532,7 +640,7 @@ start_https_oauth_server(Port, CertsDir, #{request := #{path := Path}} = Expecte ], #{env => #{dispatch => Dispatch}}). -stop_http_auth_server() -> +stop_https_auth_server() -> cowboy:stop_listener(mock_http_auth_listener). -spec ssl_options(ssl:verify_type(), boolean(), file:filename()) -> list(). @@ -543,3 +651,130 @@ ssl_options(PeerVerification, FailIfNoPeerCert, CaCertFile) -> {crl_check, false}, {crl_cache, {ssl_crl_cache, {internal, [{http, 10000}]}}}, {cacertfile, CaCertFile}]. + +token(ExpiresIn) -> + Jwk = ?UTIL_MOD:fixture_jwk(), + AccessToken = ?UTIL_MOD:expirable_token_with_expiration_time(ExpiresIn), + {_, EncodedToken} = ?UTIL_MOD:sign_token_hs(AccessToken, Jwk), + EncodedToken. + + + +build_http_mock_behaviour(Request, Response) -> + #{request => Request, response => Response}. +build_http_get_request(Path) -> + build_http_get_request(Path, undefined). +build_http_get_request(Path, Parameters) -> + build_http_request(<<"GET">>, Path, Parameters). +build_http_request(Method, Path, Parameters) when is_binary(Path) -> + #{ + method => Method, + path => Path, + parameters => Parameters + }; +build_http_request(Method, Path, Parameters) -> + Request = #{ + method => Method, + path => list_to_binary(Path) + }, + case Parameters of + [] -> Request; + undefined -> Request; + _ -> maps:put(parameters, Parameters, Request) + end. + +build_http_get_openid_configuration_request() -> + build_http_get_openid_configuration_request(?DEFAULT_OPENID_CONFIGURATION_PATH). +build_http_get_openid_configuration_request(Endpoint) -> + build_http_get_openid_configuration_request(Endpoint, ""). +build_http_get_openid_configuration_request(Endpoint, Path) -> + build_http_get_request(Path ++ Endpoint). + + +build_http_200_json_response(Payload) -> + build_http_response(200, ?CONTENT_JSON, Payload). + +build_http_response(Code, ContentType, Payload) -> + [ + {code, Code}, + {content_type, ContentType}, + {payload, Payload} + ]. +build_http_get_openid_configuration_payload() -> + Scheme = "https", + [ + {issuer, build_issuer(Scheme) }, + {authorization_endpoint, Scheme ++ "://localhost:8000/authorize"}, + {token_endpoint, build_token_endpoint_uri(Scheme)}, + {end_session_endpoint, Scheme ++ "://localhost:8000/logout"}, + {jwks_uri, build_jwks_uri(Scheme)} + ]. + +build_http_access_token_request() -> + build_http_request( + <<"POST">>, + ?MOCK_TOKEN_ENDPOINT, + [ + {?REQUEST_CLIENT_ID, <<"guest">>}, + {?REQUEST_CLIENT_SECRET, <<"password">>} + ]). +build_http_200_access_token_response() -> + [ + {code, 200}, + {content_type, ?CONTENT_JSON}, + {payload, [ + {access_token, token(?EXPIRES_IN_SECONDS)}, + {token_type, <<"Bearer">>} + ]} + ]. +build_http_400_access_token_response() -> + [ + {code, 400}, + {content_type, ?CONTENT_JSON}, + {payload, [ + {error, <<"invalid_client">>}, + {error_description, <<"invalid client found">>} + ]} + ]. +denies_access_token_expectation() -> + build_http_mock_behaviour(build_http_request( + <<"POST">>, + ?MOCK_TOKEN_ENDPOINT, + [ + {?REQUEST_CLIENT_ID, <<"invalid_client">>}, + {?REQUEST_CLIENT_SECRET, <<"password">>} + ]), build_http_400_access_token_response() + ). +auth_server_error_when_access_token_request_expectation() -> + build_http_mock_behaviour(build_http_request( + <<"POST">>, + ?MOCK_TOKEN_ENDPOINT, + [ + {?REQUEST_CLIENT_ID, <<"guest">>}, + {?REQUEST_CLIENT_SECRET, <<"password">>} + ]), [{code, 500}] + ). +non_json_payload_when_access_token_request_expectation() -> + build_http_mock_behaviour(build_http_request( + <<"POST">>, + ?MOCK_TOKEN_ENDPOINT, + [ + {?REQUEST_CLIENT_ID, <<"guest">>}, + {?REQUEST_CLIENT_SECRET, <<"password">>} + ]), [ + {code, 400}, + {content_type, ?CONTENT_JSON}, + {payload, <<"{ some illegal json}">>} + ] + ). + +grants_refresh_token_expectation() -> + build_http_mock_behaviour(build_http_request( + <<"POST">>, + ?MOCK_TOKEN_ENDPOINT, + [ + {?REQUEST_CLIENT_ID, <<"guest">>}, + {?REQUEST_CLIENT_SECRET, <<"password">>}, + {?REQUEST_REFRESH_TOKEN, <<"some refresh token">>} + ]), build_http_200_access_token_response() + ). diff --git a/deps/oauth2_client/test/unit_SUITE.erl b/deps/oauth2_client/test/unit_SUITE.erl index 0ffa6304ad14..ab632ceedc68 100644 --- a/deps/oauth2_client/test/unit_SUITE.erl +++ b/deps/oauth2_client/test/unit_SUITE.erl @@ -20,6 +20,7 @@ all() -> [ {group, ssl_options}, + {group, merge}, {group, get_expiration_time} ]. @@ -37,9 +38,96 @@ groups() -> access_token_response_without_expiration_time, access_token_response_with_expires_in, access_token_response_with_exp_in_access_token + ]}, + {merge, [], [ + merge_openid_configuration, + merge_oauth_provider ]} ]. +merge_oauth_provider(_) -> + OAuthProvider = #oauth_provider{id = "some_id", ssl_options = [ {verify, verify_none} ]}, + Proplist = [], + Proplist1 = oauth2_client:merge_oauth_provider(OAuthProvider, Proplist), + ?assertEqual([], Proplist), + + OAuthProvider1 = OAuthProvider#oauth_provider{jwks_uri = "https://jwks_uri"}, + Proplist2 = oauth2_client:merge_oauth_provider(OAuthProvider1, Proplist1), + ?assertEqual([{jwks_uri, OAuthProvider1#oauth_provider.jwks_uri}], Proplist2), + + OAuthProvider2 = OAuthProvider1#oauth_provider{end_session_endpoint = "https://end_session_endpoint"}, + Proplist3 = oauth2_client:merge_oauth_provider(OAuthProvider2, Proplist2), + ?assertEqual([{jwks_uri, OAuthProvider2#oauth_provider.jwks_uri}, + {end_session_endpoint, OAuthProvider2#oauth_provider.end_session_endpoint}], + Proplist3), + + OAuthProvider3 = OAuthProvider2#oauth_provider{authorization_endpoint = "https://authorization_endpoint"}, + Proplist4 = oauth2_client:merge_oauth_provider(OAuthProvider3, Proplist3), + ?assertEqual([{jwks_uri, OAuthProvider3#oauth_provider.jwks_uri}, + {end_session_endpoint, OAuthProvider3#oauth_provider.end_session_endpoint}, + {authorization_endpoint, OAuthProvider3#oauth_provider.authorization_endpoint}], + Proplist4), + + OAuthProvider4 = OAuthProvider3#oauth_provider{token_endpoint = "https://token_endpoint"}, + Proplist5 = oauth2_client:merge_oauth_provider(OAuthProvider4, Proplist4), + ?assertEqual([{jwks_uri, OAuthProvider4#oauth_provider.jwks_uri}, + {end_session_endpoint, OAuthProvider4#oauth_provider.end_session_endpoint}, + {authorization_endpoint, OAuthProvider4#oauth_provider.authorization_endpoint}, + {token_endpoint, OAuthProvider4#oauth_provider.token_endpoint}], + Proplist5). + +merge_openid_configuration(_) -> + OpenIdConfiguration = #openid_configuration{}, + OAuthProvider = #oauth_provider{id = "some_id", ssl_options = [ {verify, verify_none} ]}, + OAuthProvider1 = oauth2_client:merge_openid_configuration( + OpenIdConfiguration, OAuthProvider), + ?assertEqual(OAuthProvider#oauth_provider.id, OAuthProvider1#oauth_provider.id), + ?assertEqual([{verify, verify_none}], OAuthProvider1#oauth_provider.ssl_options), + ?assertEqual(undefined, OAuthProvider1#oauth_provider.jwks_uri), + ?assertEqual(undefined, OAuthProvider1#oauth_provider.token_endpoint), + ?assertEqual(undefined, OAuthProvider1#oauth_provider.authorization_endpoint), + ?assertEqual(undefined, OAuthProvider1#oauth_provider.end_session_endpoint), + + OpenIdConfiguration1 = #openid_configuration{jwks_uri = "https://jwks_uri"}, + OAuthProvider2 = oauth2_client:merge_openid_configuration( + OpenIdConfiguration1, OAuthProvider1), + ?assertEqual(OpenIdConfiguration1#openid_configuration.jwks_uri, + OAuthProvider2#oauth_provider.jwks_uri), + ?assertEqual(undefined, OAuthProvider2#oauth_provider.token_endpoint), + ?assertEqual(undefined, OAuthProvider2#oauth_provider.authorization_endpoint), + ?assertEqual(undefined, OAuthProvider2#oauth_provider.end_session_endpoint), + + OpenIdConfiguration2 = #openid_configuration{end_session_endpoint = "https://end_session_endpoint"}, + OAuthProvider3 = oauth2_client:merge_openid_configuration( + OpenIdConfiguration2, OAuthProvider2), + ?assertEqual(OpenIdConfiguration2#openid_configuration.end_session_endpoint, + OAuthProvider3#oauth_provider.end_session_endpoint), + ?assertEqual(undefined, OAuthProvider3#oauth_provider.authorization_endpoint), + ?assertEqual(undefined, OAuthProvider3#oauth_provider.token_endpoint), + + OpenIdConfiguration3 = #openid_configuration{authorization_endpoint = "https://authorization_endpoint"}, + OAuthProvider4 = oauth2_client:merge_openid_configuration( + OpenIdConfiguration3, OAuthProvider3), + ?assertEqual(OpenIdConfiguration3#openid_configuration.authorization_endpoint, + OAuthProvider4#oauth_provider.authorization_endpoint), + ?assertEqual(undefined, OAuthProvider4#oauth_provider.token_endpoint), + + OpenIdConfiguration4 = #openid_configuration{token_endpoint = "https://token_endpoint"}, + OAuthProvider5 = oauth2_client:merge_openid_configuration( + OpenIdConfiguration4, OAuthProvider4), + ?assertEqual(OpenIdConfiguration4#openid_configuration.token_endpoint, + OAuthProvider5#oauth_provider.token_endpoint), + + ?assertEqual(OpenIdConfiguration2#openid_configuration.end_session_endpoint, + OAuthProvider5#oauth_provider.end_session_endpoint), + ?assertEqual(OpenIdConfiguration3#openid_configuration.authorization_endpoint, + OAuthProvider5#oauth_provider.authorization_endpoint), + ?assertEqual(OpenIdConfiguration2#openid_configuration.end_session_endpoint, + OAuthProvider5#oauth_provider.end_session_endpoint), + ?assertEqual(OpenIdConfiguration1#openid_configuration.jwks_uri, + OAuthProvider5#oauth_provider.jwks_uri). + + no_ssl_options_triggers_verify_peer(_) -> ?assertMatch([ {verify, verify_peer}, @@ -83,7 +171,7 @@ peer_verification_set_to_verify_none(_) -> ?assertEqual(Expected2, oauth2_client:extract_ssl_options_as_list(#{ peer_verification => verify_none, cacertfile => "/tmp" - })). + })). peer_verification_set_to_verify_peer_with_cacertfile(_) -> @@ -144,4 +232,3 @@ access_token_response_without_expiration_time(_) -> }, ct:log("AccessTokenResponse ~p", [AccessTokenResponse]), ?assertEqual({error, missing_exp_field}, oauth2_client:get_expiration_time(AccessTokenResponse)). - diff --git a/deps/rabbit/.gitignore b/deps/rabbit/.gitignore index 7f6246dc7b9e..9e124a080135 100644 --- a/deps/rabbit/.gitignore +++ b/deps/rabbit/.gitignore @@ -2,7 +2,5 @@ /etc/ /test/config_schema_SUITE_data/schema/** -rabbit-rabbitmq-deps.mk - [Bb]in/ [Oo]bj/ diff --git a/deps/rabbit/BUILD.bazel b/deps/rabbit/BUILD.bazel index 7df4bb179377..9bebe9be3ed5 100644 --- a/deps/rabbit/BUILD.bazel +++ b/deps/rabbit/BUILD.bazel @@ -26,7 +26,7 @@ exports_files(glob([ ]) + ["INSTALL"]) _APP_ENV = """[ - %% See https://www.rabbitmq.com/consumers.html#acknowledgement-timeout + %% See https://www.rabbitmq.com/docs/consumers#acknowledgement-timeout %% 30 minutes {consumer_timeout, 1800000}, {tcp_listeners, [5672]}, @@ -34,10 +34,8 @@ _APP_ENV = """[ {ssl_listeners, []}, {num_ssl_acceptors, 10}, {ssl_options, []}, - {vm_memory_high_watermark, 0.4}, - {vm_memory_high_watermark_paging_ratio, 0.5}, + {vm_memory_high_watermark, 0.6}, {vm_memory_calculation_strategy, rss}, - {memory_monitor_interval, 2500}, {disk_free_limit, 50000000}, %% 50MB {backing_queue_module, rabbit_variable_queue}, %% 0 ("no limit") would make a better default, but that @@ -45,6 +43,8 @@ _APP_ENV = """[ {frame_max, 131072}, %% see rabbitmq-server#1593 {channel_max, 2047}, + {session_max_per_connection, 64}, + {link_max_per_session, 256}, {ranch_connection_max, infinity}, {heartbeat, 60}, {msg_store_file_size_limit, 16777216}, @@ -58,8 +58,6 @@ _APP_ENV = """[ {default_user_tags, [administrator]}, {default_vhost, <<"/">>}, {default_permissions, [<<".*">>, <<".*">>, <<".*">>]}, - {amqp1_0_default_user, <<"guest">>}, - {amqp1_0_default_vhost, <<"/">>}, {loopback_users, [<<"guest">>]}, {password_hashing_module, rabbit_password_hashing_sha256}, {server_properties, []}, @@ -67,7 +65,9 @@ _APP_ENV = """[ {collect_statistics_interval, 5000}, {mnesia_table_loading_retry_timeout, 30000}, {mnesia_table_loading_retry_limit, 10}, - {auth_mechanisms, ['PLAIN', 'AMQPLAIN']}, + {anonymous_login_user, <<"guest">>}, + {anonymous_login_pass, <<"guest">>}, + {auth_mechanisms, ['PLAIN', 'AMQPLAIN', 'ANONYMOUS']}, {auth_backends, [rabbit_auth_backend_internal]}, {delegate_count, 16}, {trace_vhosts, []}, @@ -84,10 +84,7 @@ _APP_ENV = """[ {linger, {true, 0}}, {exit_on_close, false} ]}, - {halt_on_upgrade_failure, true}, {ssl_apps, [asn1, crypto, public_key, ssl]}, - %% classic queue storage implementation version - {classic_queue_default_version, 2}, %% see rabbitmq-server#227 and related tickets. %% msg_store_credit_disc_bound only takes effect when %% messages are persisted to the message store. If messages @@ -106,10 +103,6 @@ _APP_ENV = """[ %% and rabbitmq-server#667 {channel_operation_timeout, 15000}, - %% see rabbitmq-server#486 - {autocluster, - [{peer_discovery_backend, rabbit_peer_discovery_classic_config}] - }, %% used by rabbit_peer_discovery_classic_config {cluster_nodes, {[], disc}}, @@ -139,13 +132,10 @@ _APP_ENV = """[ {credentials_obfuscation_fallback_secret, <<"nocookie">>}, {dead_letter_worker_consumer_prefetch, 32}, {dead_letter_worker_publisher_confirm_timeout, 180000}, - - %% EOL date for the current release series, if known/announced - {release_series_eol_date, none}, - {vhost_process_reconciliation_run_interval, 30}, %% for testing - {vhost_process_reconciliation_enabled, true} + {vhost_process_reconciliation_enabled, true}, + {license_line, "Licensed under the MPL 2.0. Website: https://rabbitmq.com"} ] """ @@ -330,10 +320,10 @@ rabbitmq_integration_suite( rabbitmq_integration_suite( name = "clustering_events_SUITE", + size = "medium", additional_beam = [ ":test_event_recorder_beam", ], - size = "medium", ) rabbitmq_integration_suite( @@ -470,18 +460,9 @@ rabbitmq_integration_suite( ) rabbitmq_integration_suite( - name = "feature_flags_with_unpriveleged_user_SUITE", - size = "large", - additional_beam = [ - ":feature_flags_SUITE_beam_files", - ], - flaky = True, - shard_count = 2, - # The enabling_* tests chmod files and then expect writes to be blocked. - # This probably doesn't work because we are root in the remote docker image. - tags = ["no-remote-exec"], + name = "msg_size_metrics_SUITE", runtime_deps = [ - "//deps/rabbit/test/feature_flags_SUITE_data/my_plugin:erlang_app", + "//deps/rabbitmq_amqp_client:erlang_app", ], ) @@ -705,8 +686,12 @@ rabbitmq_suite( rabbitmq_suite( name = "rabbit_fifo_int_SUITE", size = "medium", + additional_beam = [ + ":test_test_util_beam", + ], deps = [ "//deps/rabbit_common:erlang_app", + "//deps/rabbitmq_ct_helpers:erlang_app", "@aten//:erlang_app", "@gen_batch_server//:erlang_app", "@meck//:erlang_app", @@ -722,6 +707,7 @@ rabbitmq_suite( ], deps = [ "//deps/rabbit_common:erlang_app", + "@meck//:erlang_app", "@proper//:erlang_app", "@ra//:erlang_app", ], @@ -735,6 +721,15 @@ rabbitmq_suite( ], ) +rabbitmq_suite( + name = "rabbit_fifo_q_SUITE", + size = "small", + deps = [ + "//deps/rabbit_common:erlang_app", + "@proper//:erlang_app", + ], +) + rabbitmq_integration_suite( name = "rabbit_fifo_dlx_integration_SUITE", size = "medium", @@ -818,7 +813,7 @@ rabbitmq_integration_suite( additional_beam = [ ":test_queue_utils_beam", ], - shard_count = 19, + shard_count = 20, deps = [ "@proper//:erlang_app", ], @@ -986,6 +981,11 @@ rabbitmq_integration_suite( size = "medium", ) +rabbitmq_suite( + name = "unit_msg_size_metrics_SUITE", + size = "small", +) + rabbitmq_suite( name = "unit_operator_policy_SUITE", size = "small", @@ -1157,7 +1157,7 @@ rabbitmq_integration_suite( rabbitmq_integration_suite( name = "metadata_store_clustering_SUITE", size = "large", - shard_count = 18, + shard_count = 19, sharding_method = "case", ) @@ -1195,6 +1195,14 @@ rabbitmq_integration_suite( ], ) +rabbitmq_integration_suite( + name = "cluster_upgrade_SUITE", + size = "medium", + additional_beam = [ + ":test_queue_utils_beam", + ], +) + rabbitmq_integration_suite( name = "amqp_client_SUITE", size = "large", @@ -1349,6 +1357,7 @@ eunit( ":test_test_rabbit_event_handler_beam", ":test_clustering_utils_beam", ":test_event_recorder_beam", + ":test_rabbit_ct_hook_beam", ], target = ":test_erlang_app", test_env = { diff --git a/deps/rabbit/INSTALL b/deps/rabbit/INSTALL index d105eb549833..14da76dbce1d 100644 --- a/deps/rabbit/INSTALL +++ b/deps/rabbit/INSTALL @@ -1,2 +1,2 @@ -Please see https://www.rabbitmq.com/download.html for installation +Please see https://www.rabbitmq.com/docs/download for installation guides. diff --git a/deps/rabbit/Makefile b/deps/rabbit/Makefile index 92d2b27aa80f..f47d655be09b 100644 --- a/deps/rabbit/Makefile +++ b/deps/rabbit/Makefile @@ -14,10 +14,8 @@ define PROJECT_ENV {ssl_listeners, []}, {num_ssl_acceptors, 10}, {ssl_options, []}, - {vm_memory_high_watermark, 0.4}, - {vm_memory_high_watermark_paging_ratio, 0.5}, + {vm_memory_high_watermark, 0.6}, {vm_memory_calculation_strategy, rss}, - {memory_monitor_interval, 2500}, {disk_free_limit, 50000000}, %% 50MB {backing_queue_module, rabbit_variable_queue}, %% 0 ("no limit") would make a better default, but that @@ -25,6 +23,8 @@ define PROJECT_ENV {frame_max, 131072}, %% see rabbitmq-server#1593 {channel_max, 2047}, + {session_max_per_connection, 64}, + {link_max_per_session, 256}, {ranch_connection_max, infinity}, {heartbeat, 60}, {msg_store_file_size_limit, 16777216}, @@ -38,8 +38,6 @@ define PROJECT_ENV {default_user_tags, [administrator]}, {default_vhost, <<"/">>}, {default_permissions, [<<".*">>, <<".*">>, <<".*">>]}, - {amqp1_0_default_user, <<"guest">>}, - {amqp1_0_default_vhost, <<"/">>}, {loopback_users, [<<"guest">>]}, {password_hashing_module, rabbit_password_hashing_sha256}, {server_properties, []}, @@ -47,7 +45,12 @@ define PROJECT_ENV {collect_statistics_interval, 5000}, {mnesia_table_loading_retry_timeout, 30000}, {mnesia_table_loading_retry_limit, 10}, - {auth_mechanisms, ['PLAIN', 'AMQPLAIN']}, + %% The identity to act as for anonymous logins. + {anonymous_login_user, <<"guest">>}, + {anonymous_login_pass, <<"guest">>}, + %% "The server mechanisms are ordered in decreasing level of preference." + %% AMQP §5.3.3.1 + {auth_mechanisms, ['PLAIN', 'AMQPLAIN', 'ANONYMOUS']}, {auth_backends, [rabbit_auth_backend_internal]}, {delegate_count, 16}, {trace_vhosts, []}, @@ -64,10 +67,7 @@ define PROJECT_ENV {linger, {true, 0}}, {exit_on_close, false} ]}, - {halt_on_upgrade_failure, true}, {ssl_apps, [asn1, crypto, public_key, ssl]}, - %% classic queue storage implementation version - {classic_queue_default_version, 2}, %% see rabbitmq-server#227 and related tickets. %% msg_store_credit_disc_bound only takes effect when %% messages are persisted to the message store. If messages @@ -85,14 +85,10 @@ define PROJECT_ENV %% see rabbitmq-server#248 %% and rabbitmq-server#667 {channel_operation_timeout, 15000}, - %% See https://www.rabbitmq.com/consumers.html#acknowledgement-timeout + %% See https://www.rabbitmq.com/docs/consumers#acknowledgement-timeout %% 30 minutes {consumer_timeout, 1800000}, - %% see rabbitmq-server#486 - {autocluster, - [{peer_discovery_backend, rabbit_peer_discovery_classic_config}] - }, %% used by rabbit_peer_discovery_classic_config {cluster_nodes, {[], disc}}, @@ -122,11 +118,10 @@ define PROJECT_ENV {credentials_obfuscation_fallback_secret, <<"nocookie">>}, {dead_letter_worker_consumer_prefetch, 32}, {dead_letter_worker_publisher_confirm_timeout, 180000}, - %% EOL date for the current release series, if known/announced - {release_series_eol_date, none}, {vhost_process_reconciliation_run_interval, 30}, %% for testing - {vhost_process_reconciliation_enabled, true} + {vhost_process_reconciliation_enabled, true}, + {license_line, "Licensed under the MPL 2.0. Website: https://rabbitmq.com"} ] endef @@ -139,8 +134,6 @@ TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers meck proper amqp_clie PLT_APPS += mnesia runtime_tools dep_syslog = git https://github.com/schlagert/syslog 4.0.0 -dep_osiris = git https://github.com/rabbitmq/osiris v1.8.2 -dep_systemd = hex 0.6.1 define usage_xml_to_erl $(subst __,_,$(patsubst $(DOCS_DIR)/rabbitmq%.1.xml, src/rabbit_%_usage.erl, $(subst -,_,$(1)))) @@ -151,12 +144,8 @@ MANPAGES = $(wildcard $(DOCS_DIR)/*.[0-9]) WEB_MANPAGES = $(patsubst %,%.html,$(MANPAGES)) MD_MANPAGES = $(patsubst %,%.md,$(MANPAGES)) -DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-test.mk -DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk \ - rabbit_common/mk/rabbitmq-dist.mk \ - rabbit_common/mk/rabbitmq-run.mk \ - rabbit_common/mk/rabbitmq-test.mk \ - rabbit_common/mk/rabbitmq-tools.mk +DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk +DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk include ../../rabbitmq-components.mk include ../../erlang.mk @@ -219,8 +208,114 @@ SLOW_CT_SUITES := backing_queue \ vhost FAST_CT_SUITES := $(filter-out $(sort $(SLOW_CT_SUITES)),$(CT_SUITES)) -ct-fast: CT_SUITES = $(FAST_CT_SUITES) -ct-slow: CT_SUITES = $(SLOW_CT_SUITES) +ct-fast: + $(MAKE) ct CT_SUITES='$(FAST_CT_SUITES)' + +ct-slow: + $(MAKE) ct CT_SUITES='$(SLOW_CT_SUITES)' + +CT_OPTS += -ct_hooks rabbit_ct_hook [] + +# Parallel CT. +# +# @todo We must ensure that the CT_OPTS also apply to ct-master +# @todo We should probably refactor ct_master.erl to have node init in a separate .erl + +define ct_master.erl + StartOpts = #{ + host => "localhost", + connection => standard_io, + args => ["-hidden"] + }, + {ok, Pid1, _} = peer:start(StartOpts#{name => "rabbit_shard1"}), + {ok, Pid2, _} = peer:start(StartOpts#{name => "rabbit_shard2"}), + {ok, Pid3, _} = peer:start(StartOpts#{name => "rabbit_shard3"}), + {ok, Pid4, _} = peer:start(StartOpts#{name => "rabbit_shard4"}), + peer:call(Pid1, net_kernel, set_net_ticktime, [5]), + peer:call(Pid2, net_kernel, set_net_ticktime, [5]), + peer:call(Pid3, net_kernel, set_net_ticktime, [5]), + peer:call(Pid4, net_kernel, set_net_ticktime, [5]), + peer:call(Pid1, persistent_term, put, [rabbit_ct_tcp_port_base, 23000]), + peer:call(Pid2, persistent_term, put, [rabbit_ct_tcp_port_base, 25000]), + peer:call(Pid3, persistent_term, put, [rabbit_ct_tcp_port_base, 27000]), + peer:call(Pid4, persistent_term, put, [rabbit_ct_tcp_port_base, 29000]), + ct_master:run("$1"), + peer:stop(Pid4), + peer:stop(Pid3), + peer:stop(Pid2), + peer:stop(Pid1), + halt() +endef + +PARALLEL_CT_SET_1_A = amqp_client unit_cluster_formation_locking_mocks unit_cluster_formation_sort_nodes unit_collections unit_config_value_encryption unit_connection_tracking +PARALLEL_CT_SET_1_B = amqp_address amqp_auth amqp_credit_api_v2 amqp_system signal_handling single_active_consumer unit_access_control_authn_authz_context_propagation unit_access_control_credential_validation unit_amqp091_content_framing unit_amqp091_server_properties unit_app_management +PARALLEL_CT_SET_1_C = amqp_proxy_protocol amqpl_consumer_ack amqpl_direct_reply_to backing_queue bindings rabbit_db_maintenance rabbit_db_msup rabbit_db_policy rabbit_db_queue rabbit_db_topic_exchange rabbit_direct_reply_to_prop cluster_limit cluster_minority term_to_binary_compat_prop topic_permission transactions unicode unit_access_control +PARALLEL_CT_SET_1_D = amqqueue_backward_compatibility channel_interceptor channel_operation_timeout classic_queue classic_queue_prop config_schema peer_discovery_dns peer_discovery_tmp_hidden_node per_node_limit per_user_connection_channel_limit + +PARALLEL_CT_SET_2_A = cluster confirms_rejects consumer_timeout rabbit_access_control rabbit_confirms rabbit_core_metrics_gc rabbit_cuttlefish rabbit_db_binding rabbit_db_exchange +PARALLEL_CT_SET_2_B = clustering_recovery crashing_queues deprecated_features direct_exchange_routing_v2 disconnect_detected_during_alarm exchanges unit_gen_server2 +PARALLEL_CT_SET_2_C = disk_monitor dynamic_qq unit_disk_monitor unit_file_handle_cache unit_log_management unit_operator_policy +PARALLEL_CT_SET_2_D = queue_length_limits queue_parallel quorum_queue_member_reconciliation rabbit_fifo rabbit_fifo_dlx rabbit_stream_coordinator + +PARALLEL_CT_SET_3_A = definition_import per_user_connection_channel_limit_partitions per_vhost_connection_limit_partitions policy priority_queue_recovery rabbit_fifo_prop rabbit_fifo_v0 rabbit_stream_sac_coordinator unit_credit_flow unit_queue_consumers unit_queue_location unit_quorum_queue +PARALLEL_CT_SET_3_B = cluster_upgrade list_consumers_sanity_check list_queues_online_and_offline logging lqueue maintenance_mode rabbit_fifo_q +PARALLEL_CT_SET_3_C = cli_forget_cluster_node feature_flags_v2 mc_unit message_containers_deaths_v2 message_size_limit metadata_store_migration +PARALLEL_CT_SET_3_D = metadata_store_phase1 metrics mirrored_supervisor msg_store peer_discovery_classic_config proxy_protocol runtime_parameters unit_stats_and_metrics unit_supervisor2 unit_vm_memory_monitor + +PARALLEL_CT_SET_4_A = clustering_events rabbit_local_random_exchange rabbit_message_interceptor rabbitmq_4_0_deprecations unit_pg_local unit_plugin_directories unit_plugin_versioning unit_policy_validators unit_priority_queue +PARALLEL_CT_SET_4_B = per_user_connection_tracking per_vhost_connection_limit rabbit_fifo_dlx_integration rabbit_fifo_int +PARALLEL_CT_SET_4_C = msg_size_metrics unit_msg_size_metrics per_vhost_msg_store per_vhost_queue_limit priority_queue upgrade_preparation vhost +PARALLEL_CT_SET_4_D = per_user_connection_channel_tracking product_info publisher_confirms_parallel queue_type rabbitmq_queues_cli_integration rabbitmqctl_integration rabbitmqctl_shutdown routing + +PARALLEL_CT_SET_1 = $(sort $(PARALLEL_CT_SET_1_A) $(PARALLEL_CT_SET_1_B) $(PARALLEL_CT_SET_1_C) $(PARALLEL_CT_SET_1_D)) +PARALLEL_CT_SET_2 = $(sort $(PARALLEL_CT_SET_2_A) $(PARALLEL_CT_SET_2_B) $(PARALLEL_CT_SET_2_C) $(PARALLEL_CT_SET_2_D)) +PARALLEL_CT_SET_3 = $(sort $(PARALLEL_CT_SET_3_A) $(PARALLEL_CT_SET_3_B) $(PARALLEL_CT_SET_3_C) $(PARALLEL_CT_SET_3_D)) +PARALLEL_CT_SET_4 = $(sort $(PARALLEL_CT_SET_4_A) $(PARALLEL_CT_SET_4_B) $(PARALLEL_CT_SET_4_C) $(PARALLEL_CT_SET_4_D)) + +SEQUENTIAL_CT_SUITES = clustering_management dead_lettering feature_flags metadata_store_clustering quorum_queue rabbit_stream_queue +PARALLEL_CT_SUITES = $(PARALLEL_CT_SET_1) $(PARALLEL_CT_SET_2) $(PARALLEL_CT_SET_3) $(PARALLEL_CT_SET_4) + +ifneq ($(filter-out $(SEQUENTIAL_CT_SUITES) $(PARALLEL_CT_SUITES),$(CT_SUITES)),) +$(error Some test suites in CT_SUITES but not configured for CI: $(filter-out $(SEQUENTIAL_CT_SUITES) $(PARALLEL_CT_SUITES),$(CT_SUITES))) +endif + +define tpl_parallel_ct_test_spec +{logdir, "$(CT_LOGS_DIR)"}. +{logdir, master, "$(CT_LOGS_DIR)"}. +{create_priv_dir, all_nodes, auto_per_run}. + +{node, shard1, 'rabbit_shard1@localhost'}. +{node, shard2, 'rabbit_shard2@localhost'}. +{node, shard3, 'rabbit_shard3@localhost'}. +{node, shard4, 'rabbit_shard4@localhost'}. + +{define, 'Set1', [$(call comma_list,$(addsuffix _SUITE,$1))]}. +{define, 'Set2', [$(call comma_list,$(addsuffix _SUITE,$2))]}. +{define, 'Set3', [$(call comma_list,$(addsuffix _SUITE,$3))]}. +{define, 'Set4', [$(call comma_list,$(addsuffix _SUITE,$4))]}. + +{suites, shard1, "test/", 'Set1'}. +{suites, shard2, "test/", 'Set2'}. +{suites, shard3, "test/", 'Set3'}. +{suites, shard4, "test/", 'Set4'}. +endef + +define parallel_ct_set_target +tpl_parallel_ct_test_spec_set_$1 = $$(call tpl_parallel_ct_test_spec,$(PARALLEL_CT_SET_$(1)_A),$(PARALLEL_CT_SET_$(1)_B),$(PARALLEL_CT_SET_$(1)_C),$(PARALLEL_CT_SET_$(1)_D)) + +parallel-ct-set-$(1): test-build + $(verbose) mkdir -p $(CT_LOGS_DIR) + $(verbose) $$(call core_render,tpl_parallel_ct_test_spec_set_$(1),ct.set-$(1).spec) + $$(call erlang,$$(call ct_master.erl,ct.set-$(1).spec),-sname parallel_ct_$(PROJECT)@localhost -hidden -kernel net_ticktime 5) +endef + +$(foreach set,1 2 3 4,$(eval $(call parallel_ct_set_target,$(set)))) + +# @todo Generate ct.test.spec from Makefile variables instead of hardcoded for ct-master + +parallel-ct: test-build + $(verbose) mkdir -p $(CT_LOGS_DIR) + $(call erlang,$(call ct_master.erl,ct.test.spec),-sname parallel_ct_$(PROJECT)@localhost -hidden -kernel net_ticktime 5) # -------------------------------------------------------------------- # Compilation. @@ -280,6 +375,7 @@ web-manpages: $(WEB_MANPAGES) $(MD_MANPAGES) gsub(/class="D1"/, "class=\"D1 lang-bash\"", line); \ gsub(/class="Bd Bd-indent"/, "class=\"Bd Bd-indent lang-bash\"", line); \ gsub(/&#[xX]201[cCdD];/, "\\"", line); \ + gsub(/\.html/, "", line); \ print line; \ } } \ ' > "$@" diff --git a/deps/rabbit/README.md b/deps/rabbit/README.md index 3424377e3cad..2e2e7e2ccdbf 100644 --- a/deps/rabbit/README.md +++ b/deps/rabbit/README.md @@ -1,6 +1,6 @@ # RabbitMQ Server -[RabbitMQ](https://rabbitmq.com) is a [feature rich](https://rabbitmq.com/documentation.html), multi-protocol messaging broker. It supports: +[RabbitMQ](https://www.rabbitmq.com) is a [feature rich](https://www.rabbitmq.com/docs), multi-protocol messaging broker. It supports: * AMQP 0-9-1 * AMQP 1.0 @@ -10,23 +10,22 @@ ## Installation - * [Installation guides](https://rabbitmq.com/download.html) for various platforms - * [Kubernetes Cluster Operator](https://www.rabbitmq.com/kubernetes/operator/operator-overview.html) - * [Changelog](https://www.rabbitmq.com/changelog.html) + * [Installation guides](https://www.rabbitmq.com/docs/download) for various platforms + * [Kubernetes Cluster Operator](https://www.rabbitmq.com/kubernetes/operator/operator-overview) * [Releases](https://github.com/rabbitmq/rabbitmq-server/releases) on GitHub - * [Supported and unsupported series](https://www.rabbitmq.com/versions.html) - * [Supported Erlang versions](https://www.rabbitmq.com/which-erlang.html) + * [Supported and unsupported series](https://www.rabbitmq.com/release-information) + * [Supported Erlang versions](https://www.rabbitmq.com/docs/which-erlang) ## Tutorials & Documentation - * [RabbitMQ tutorials](https://rabbitmq.com/getstarted.html) - * [All documentation guides](https://rabbitmq.com/documentation.html) - * [CLI tools guide](https://rabbitmq.com/cli.html) - * [Configuration guide](https://rabbitmq.com/configure.html) - * [Client libraries and tools](https://rabbitmq.com/devtools.html) - * [Monitoring guide](https://rabbitmq.com/monitoring.html) - * [Production checklist](https://rabbitmq.com/production-checklist.html) + * [RabbitMQ tutorials](https://www.rabbitmq.com/tutorials) + * [All documentation guides](https://www.rabbitmq.com/docs) + * [CLI tools guide](https://www.rabbitmq.com/docs/cli) + * [Configuration guide](https://www.rabbitmq.com/docs/configure) + * [Client libraries and tools](https://www.rabbitmq.com/client-libraries/devtools) + * [Monitoring guide](https://www.rabbitmq.com/docs/monitoring) + * [Production checklist](https://www.rabbitmq.com/docs/production-checklist) * [Runnable tutorials](https://github.com/rabbitmq/rabbitmq-tutorials/) * [Documentation source](https://github.com/rabbitmq/rabbitmq-website/) @@ -34,14 +33,15 @@ ## Getting Help * [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users) - * [Commercial support](https://rabbitmq.com/services.html) from [Pivotal](https://pivotal.io) for open source RabbitMQ - * [Community Slack](https://rabbitmq-slack.herokuapp.com/) + * [Commercial support](https://tanzu.vmware.com/rabbitmq/oss) from [Broadcom](https://tanzu.vmware.com) for open source RabbitMQ + * [Community Discord](https://www.rabbitmq.com/discord) + * [Community Slack](https://www.rabbitmq.com/slack) * `#rabbitmq` on Freenode ## Contributing -See [CONTRIBUTING.md](./CONTRIBUTING.md) and our [development process overview](https://rabbitmq.com/github.html). +See [CONTRIBUTING.md](./CONTRIBUTING.md) and our [development process overview](https://www.rabbitmq.com/github). Questions about contributing, internals and so on are very welcome on the [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users). @@ -53,8 +53,8 @@ RabbitMQ server is [licensed under the MPL 2.0](LICENSE-MPL-RabbitMQ). ## Building From Source and Packaging - * [Building RabbitMQ from Source](https://rabbitmq.com/build-server.html) - * [Building RabbitMQ Distribution Packages](https://rabbitmq.com/build-server.html) + * [Building RabbitMQ from Source](https://www.rabbitmq.com/docs/build-server) + * [Building RabbitMQ Distribution Packages](https://www.rabbitmq.com/docs/build-server) ## Copyright diff --git a/deps/rabbit/SECURITY.md b/deps/rabbit/SECURITY.md index 30c5c73da7b5..d4b36312c5ca 100644 --- a/deps/rabbit/SECURITY.md +++ b/deps/rabbit/SECURITY.md @@ -2,7 +2,7 @@ ## Supported Versions -See [RabbitMQ Release Series](https://www.rabbitmq.com/versions.html) for a list of currently supported +See [RabbitMQ Release Series](https://www.rabbitmq.com/release-information) for a list of currently supported versions. Vulnerabilities reported for versions out of support will not be investigated. diff --git a/deps/rabbit/app.bzl b/deps/rabbit/app.bzl index 44095b8a7d13..4832861d9782 100644 --- a/deps/rabbit/app.bzl +++ b/deps/rabbit/app.bzl @@ -58,6 +58,7 @@ def all_beam_files(name = "all_beam_files"): "src/rabbit_amqqueue_sup_sup.erl", "src/rabbit_auth_backend_internal.erl", "src/rabbit_auth_mechanism_amqplain.erl", + "src/rabbit_auth_mechanism_anonymous.erl", "src/rabbit_auth_mechanism_cr_demo.erl", "src/rabbit_auth_mechanism_plain.erl", "src/rabbit_autoheal.erl", @@ -146,8 +147,10 @@ def all_beam_files(name = "all_beam_files"): "src/rabbit_fifo_dlx_sup.erl", "src/rabbit_fifo_dlx_worker.erl", "src/rabbit_fifo_index.erl", + "src/rabbit_fifo_q.erl", "src/rabbit_fifo_v0.erl", "src/rabbit_fifo_v1.erl", + "src/rabbit_fifo_v3.erl", "src/rabbit_file.erl", "src/rabbit_global_counters.erl", "src/rabbit_guid.erl", @@ -166,6 +169,7 @@ def all_beam_files(name = "all_beam_files"): "src/rabbit_metrics.erl", "src/rabbit_mirror_queue_misc.erl", "src/rabbit_mnesia.erl", + "src/rabbit_msg_size_metrics.erl", "src/rabbit_msg_store.erl", "src/rabbit_msg_store_gc.erl", "src/rabbit_networking.erl", @@ -311,6 +315,7 @@ def all_test_beam_files(name = "all_test_beam_files"): "src/rabbit_amqqueue_sup_sup.erl", "src/rabbit_auth_backend_internal.erl", "src/rabbit_auth_mechanism_amqplain.erl", + "src/rabbit_auth_mechanism_anonymous.erl", "src/rabbit_auth_mechanism_cr_demo.erl", "src/rabbit_auth_mechanism_plain.erl", "src/rabbit_autoheal.erl", @@ -399,8 +404,10 @@ def all_test_beam_files(name = "all_test_beam_files"): "src/rabbit_fifo_dlx_sup.erl", "src/rabbit_fifo_dlx_worker.erl", "src/rabbit_fifo_index.erl", + "src/rabbit_fifo_q.erl", "src/rabbit_fifo_v0.erl", "src/rabbit_fifo_v1.erl", + "src/rabbit_fifo_v3.erl", "src/rabbit_file.erl", "src/rabbit_global_counters.erl", "src/rabbit_guid.erl", @@ -419,6 +426,7 @@ def all_test_beam_files(name = "all_test_beam_files"): "src/rabbit_metrics.erl", "src/rabbit_mirror_queue_misc.erl", "src/rabbit_mnesia.erl", + "src/rabbit_msg_size_metrics.erl", "src/rabbit_msg_store.erl", "src/rabbit_msg_store_gc.erl", "src/rabbit_networking.erl", @@ -519,6 +527,7 @@ def all_srcs(name = "all_srcs"): "include/amqqueue.hrl", "include/amqqueue_v2.hrl", "include/internal_user.hrl", + "include/khepri.hrl", "include/mc.hrl", "include/rabbit_amqp.hrl", "include/rabbit_global_counters.hrl", @@ -541,6 +550,7 @@ def all_srcs(name = "all_srcs"): "src/rabbit_fifo_dlx.hrl", "src/rabbit_fifo_v0.hrl", "src/rabbit_fifo_v1.hrl", + "src/rabbit_fifo_v3.hrl", "src/rabbit_stream_coordinator.hrl", "src/rabbit_stream_sac_coordinator.hrl", ], @@ -581,6 +591,7 @@ def all_srcs(name = "all_srcs"): "src/rabbit_amqqueue_sup_sup.erl", "src/rabbit_auth_backend_internal.erl", "src/rabbit_auth_mechanism_amqplain.erl", + "src/rabbit_auth_mechanism_anonymous.erl", "src/rabbit_auth_mechanism_cr_demo.erl", "src/rabbit_auth_mechanism_plain.erl", "src/rabbit_autoheal.erl", @@ -672,8 +683,10 @@ def all_srcs(name = "all_srcs"): "src/rabbit_fifo_dlx_sup.erl", "src/rabbit_fifo_dlx_worker.erl", "src/rabbit_fifo_index.erl", + "src/rabbit_fifo_q.erl", "src/rabbit_fifo_v0.erl", "src/rabbit_fifo_v1.erl", + "src/rabbit_fifo_v3.erl", "src/rabbit_file.erl", "src/rabbit_global_counters.erl", "src/rabbit_guid.erl", @@ -692,6 +705,7 @@ def all_srcs(name = "all_srcs"): "src/rabbit_metrics.erl", "src/rabbit_mirror_queue_misc.erl", "src/rabbit_mnesia.erl", + "src/rabbit_msg_size_metrics.erl", "src/rabbit_msg_store.erl", "src/rabbit_msg_store_gc.erl", "src/rabbit_networking.erl", @@ -980,14 +994,6 @@ def test_suite_beam_files(name = "test_suite_beam_files"): app_name = "rabbit", erlc_opts = "//:test_erlc_opts", ) - erlang_bytecode( - name = "feature_flags_with_unpriveleged_user_SUITE_beam_files", - testonly = True, - srcs = ["test/feature_flags_with_unpriveleged_user_SUITE.erl"], - outs = ["test/feature_flags_with_unpriveleged_user_SUITE.beam"], - app_name = "rabbit", - erlc_opts = "//:test_erlc_opts", - ) erlang_bytecode( name = "list_consumers_sanity_check_SUITE_beam_files", testonly = True, @@ -1288,7 +1294,10 @@ def test_suite_beam_files(name = "test_suite_beam_files"): testonly = True, srcs = ["test/rabbit_fifo_SUITE.erl"], outs = ["test/rabbit_fifo_SUITE.beam"], - hdrs = ["src/rabbit_fifo.hrl"], + hdrs = [ + "src/rabbit_fifo.hrl", + "src/rabbit_fifo_dlx.hrl", + ], app_name = "rabbit", erlc_opts = "//:test_erlc_opts", deps = ["//deps/rabbit_common:erlang_app"], @@ -1700,6 +1709,14 @@ def test_suite_beam_files(name = "test_suite_beam_files"): erlc_opts = "//:test_erlc_opts", deps = ["//deps/amqp_client:erlang_app", "//deps/rabbitmq_ct_helpers:erlang_app"], ) + erlang_bytecode( + name = "unit_msg_size_metrics_SUITE_beam_files", + testonly = True, + srcs = ["test/unit_msg_size_metrics_SUITE.erl"], + outs = ["test/unit_msg_size_metrics_SUITE.beam"], + app_name = "rabbit", + erlc_opts = "//:test_erlc_opts", + ) erlang_bytecode( name = "unit_operator_policy_SUITE_beam_files", testonly = True, @@ -1969,7 +1986,7 @@ def test_suite_beam_files(name = "test_suite_beam_files"): outs = ["test/metadata_store_clustering_SUITE.beam"], app_name = "rabbit", erlc_opts = "//:test_erlc_opts", - deps = ["//deps/amqp_client:erlang_app"], + deps = ["//deps/amqp_client:erlang_app", "//deps/rabbitmq_ct_helpers:erlang_app"], ) erlang_bytecode( name = "metadata_store_migration_SUITE_beam_files", @@ -2142,3 +2159,39 @@ def test_suite_beam_files(name = "test_suite_beam_files"): erlc_opts = "//:test_erlc_opts", deps = ["//deps/amqp_client:erlang_app"], ) + erlang_bytecode( + name = "rabbit_fifo_q_SUITE_beam_files", + testonly = True, + srcs = ["test/rabbit_fifo_q_SUITE.erl"], + outs = ["test/rabbit_fifo_q_SUITE.beam"], + hdrs = ["src/rabbit_fifo.hrl"], + app_name = "rabbit", + erlc_opts = "//:test_erlc_opts", + deps = ["@proper//:erlang_app"], + ) + erlang_bytecode( + name = "cluster_upgrade_SUITE_beam_files", + testonly = True, + srcs = ["test/cluster_upgrade_SUITE.erl"], + outs = ["test/cluster_upgrade_SUITE.beam"], + app_name = "rabbit", + erlc_opts = "//:test_erlc_opts", + deps = ["//deps/amqp_client:erlang_app"], + ) + erlang_bytecode( + name = "test_rabbit_ct_hook_beam", + testonly = True, + srcs = ["test/rabbit_ct_hook.erl"], + outs = ["test/rabbit_ct_hook.beam"], + app_name = "rabbit", + erlc_opts = "//:test_erlc_opts", + ) + erlang_bytecode( + name = "msg_size_metrics_SUITE_beam_files", + testonly = True, + srcs = ["test/msg_size_metrics_SUITE.erl"], + outs = ["test/msg_size_metrics_SUITE.beam"], + app_name = "rabbit", + erlc_opts = "//:test_erlc_opts", + deps = ["//deps/amqp_client:erlang_app"], + ) diff --git a/deps/rabbit/ct.test.spec b/deps/rabbit/ct.test.spec new file mode 100644 index 000000000000..e1027d06105f --- /dev/null +++ b/deps/rabbit/ct.test.spec @@ -0,0 +1,187 @@ +{logdir, "logs/"}. +{logdir, master, "logs/"}. +{create_priv_dir, all_nodes, auto_per_run}. + +{node, shard1, 'rabbit_shard1@localhost'}. +{node, shard2, 'rabbit_shard2@localhost'}. +{node, shard3, 'rabbit_shard3@localhost'}. +{node, shard4, 'rabbit_shard4@localhost'}. + +%% +%% Sets of test suites that take around the same time to complete. +%% + +{define, 'Set1', [ + amqp_address_SUITE +, amqp_auth_SUITE +, amqp_client_SUITE +, amqp_credit_api_v2_SUITE +, amqp_proxy_protocol_SUITE +, amqp_system_SUITE +, amqpl_consumer_ack_SUITE +, amqpl_direct_reply_to_SUITE +, amqqueue_backward_compatibility_SUITE +, backing_queue_SUITE +, bindings_SUITE +, channel_interceptor_SUITE +, channel_operation_timeout_SUITE +, classic_queue_SUITE +, classic_queue_prop_SUITE +]}. + +{define, 'Set2', [ + cluster_SUITE +, config_schema_SUITE +, confirms_rejects_SUITE +, consumer_timeout_SUITE +, crashing_queues_SUITE +, deprecated_features_SUITE +, direct_exchange_routing_v2_SUITE +, disconnect_detected_during_alarm_SUITE +, disk_monitor_SUITE +, dynamic_qq_SUITE +, exchanges_SUITE +, rabbit_stream_queue_SUITE +]}. + +{define, 'Set3', [ + cli_forget_cluster_node_SUITE +, feature_flags_SUITE +, feature_flags_v2_SUITE +, list_consumers_sanity_check_SUITE +, list_queues_online_and_offline_SUITE +, logging_SUITE +, lqueue_SUITE +, maintenance_mode_SUITE +, mc_unit_SUITE +, message_containers_deaths_v2_SUITE +, message_size_limit_SUITE +, metadata_store_migration_SUITE +, metadata_store_phase1_SUITE +, metrics_SUITE +, mirrored_supervisor_SUITE +, msg_store_SUITE +, peer_discovery_classic_config_SUITE +]}. + +{define, 'Set4', [ + msg_size_metrics_SUITE +, peer_discovery_dns_SUITE +, peer_discovery_tmp_hidden_node_SUITE +, per_node_limit_SUITE +, per_user_connection_channel_limit_SUITE +, per_user_connection_channel_tracking_SUITE +, per_user_connection_tracking_SUITE +, per_vhost_connection_limit_SUITE +, per_vhost_msg_store_SUITE +, per_vhost_queue_limit_SUITE +, policy_SUITE +, priority_queue_SUITE +, priority_queue_recovery_SUITE +, product_info_SUITE +, proxy_protocol_SUITE +, publisher_confirms_parallel_SUITE +, unit_msg_size_metrics_SUITE +]}. + +{define, 'Set5', [ + clustering_recovery_SUITE +, metadata_store_clustering_SUITE +, queue_length_limits_SUITE +, queue_parallel_SUITE +, quorum_queue_SUITE +, rabbit_access_control_SUITE +, rabbit_confirms_SUITE +, rabbit_core_metrics_gc_SUITE +, rabbit_cuttlefish_SUITE +, rabbit_db_binding_SUITE +, rabbit_db_exchange_SUITE +, rabbit_db_maintenance_SUITE +, rabbit_db_msup_SUITE +, rabbit_db_policy_SUITE +, rabbit_db_queue_SUITE +, rabbit_db_topic_exchange_SUITE +, rabbit_direct_reply_to_prop_SUITE +]}. + +{define, 'Set6', [ + queue_type_SUITE +, quorum_queue_member_reconciliation_SUITE +, rabbit_fifo_SUITE +, rabbit_fifo_dlx_SUITE +, rabbit_fifo_dlx_integration_SUITE +, rabbit_fifo_int_SUITE +, rabbit_fifo_prop_SUITE +, rabbit_fifo_v0_SUITE +, rabbit_local_random_exchange_SUITE +, rabbit_message_interceptor_SUITE +, rabbit_stream_coordinator_SUITE +, rabbit_stream_sac_coordinator_SUITE +, rabbitmq_4_0_deprecations_SUITE +, rabbitmq_queues_cli_integration_SUITE +, rabbitmqctl_integration_SUITE +, rabbitmqctl_shutdown_SUITE +, routing_SUITE +, runtime_parameters_SUITE +]}. + +{define, 'Set7', [ + cluster_limit_SUITE +, cluster_minority_SUITE +, clustering_management_SUITE +, signal_handling_SUITE +, single_active_consumer_SUITE +, term_to_binary_compat_prop_SUITE +, topic_permission_SUITE +, transactions_SUITE +, unicode_SUITE +, unit_access_control_SUITE +, unit_access_control_authn_authz_context_propagation_SUITE +, unit_access_control_credential_validation_SUITE +, unit_amqp091_content_framing_SUITE +, unit_amqp091_server_properties_SUITE +, unit_app_management_SUITE +, unit_cluster_formation_locking_mocks_SUITE +, unit_cluster_formation_sort_nodes_SUITE +, unit_collections_SUITE +, unit_config_value_encryption_SUITE +, unit_connection_tracking_SUITE +]}. + +{define, 'Set8', [ + dead_lettering_SUITE +, definition_import_SUITE +, per_user_connection_channel_limit_partitions_SUITE +, per_vhost_connection_limit_partitions_SUITE +, unit_credit_flow_SUITE +, unit_disk_monitor_SUITE +, unit_file_handle_cache_SUITE +, unit_gen_server2_SUITE +, unit_log_management_SUITE +, unit_operator_policy_SUITE +, unit_pg_local_SUITE +, unit_plugin_directories_SUITE +, unit_plugin_versioning_SUITE +, unit_policy_validators_SUITE +, unit_priority_queue_SUITE +, unit_queue_consumers_SUITE +, unit_queue_location_SUITE +, unit_quorum_queue_SUITE +, unit_stats_and_metrics_SUITE +, unit_supervisor2_SUITE +, unit_vm_memory_monitor_SUITE +, upgrade_preparation_SUITE +, vhost_SUITE +]}. + +{suites, shard1, "test/", 'Set1'}. +{suites, shard1, "test/", 'Set2'}. + +{suites, shard2, "test/", 'Set3'}. +{suites, shard2, "test/", 'Set4'}. + +{suites, shard3, "test/", 'Set5'}. +{suites, shard3, "test/", 'Set6'}. + +{suites, shard4, "test/", 'Set7'}. +{suites, shard4, "test/", 'Set8'}. diff --git a/deps/rabbit/docs/.gitignore b/deps/rabbit/docs/.gitignore new file mode 100644 index 000000000000..1342b3e396fc --- /dev/null +++ b/deps/rabbit/docs/.gitignore @@ -0,0 +1,3 @@ +*.html +*.md + diff --git a/deps/rabbit/docs/README-for-packages b/deps/rabbit/docs/README-for-packages index f507a74054fa..eb5f8287f1b2 100644 --- a/deps/rabbit/docs/README-for-packages +++ b/deps/rabbit/docs/README-for-packages @@ -2,13 +2,9 @@ This is rabbitmq-server, a message broker implementing AMQP 0-9-1, AMQP 1.0, STOMP and MQTT. Most of the documentation for RabbitMQ is provided on the RabbitMQ web -site. You can see documentation for the current version at +site. You can see documentation for the current and previous versions at -https://www.rabbitmq.com/documentation.html - -and for previous versions at - -https://www.rabbitmq.com/previous.html +https://www.rabbitmq.com/docs Man pages are installed with this package. Of particular interest are rabbitmqctl(8), rabbitmq-diagnostics(8), rabbitmq-queues(8). @@ -16,14 +12,14 @@ They interact with a running node. rabbitmq-plugins(8) is used to manage plugin All of these should be run as the superuser. Learn more about CLI tools at -https://www.rabbitmq.com/cli.html +https://www.rabbitmq.com/docs/cli An example configuration file is provided in the same directory as this README. Copy it to /etc/rabbitmq/rabbitmq.conf to use it. The RabbitMQ server must be restarted after changing the configuration file. Learn more about configuration at -https://www.rabbitmq.com/configure.html +https://www.rabbitmq.com/docs/configure An example policy file for HA queues is provided in the same directory as this README. Copy and chmod +x it to diff --git a/deps/rabbit/docs/README.md b/deps/rabbit/docs/README.md index df2a126466b8..b47fc721dd2c 100644 --- a/deps/rabbit/docs/README.md +++ b/deps/rabbit/docs/README.md @@ -1,31 +1,63 @@ # Manual Pages and Documentation Extras -This directory contains [CLI tool](https://rabbitmq.com/cli.html) man page sources as well as a few documentation extras: +This directory contains [CLI tools](https://rabbitmq.com/docs/cli/) man page sources as well as a few documentation extras: - * An [annotated rabbitmq.conf example](./rabbitmq.conf.example) (see [new style configuration format](https://www.rabbitmq.com/configure.html#config-file-formats)) - * An [annotated advanced.config example](./advanced.config.example) (see [The advanced.config file](https://www.rabbitmq.com/configure.html#advanced-config-file)) + * An [annotated rabbitmq.conf example](./rabbitmq.conf.example) (see [new style configuration format](https://www.rabbitmq.com/docs/configure#config-file-formats)) + * An [annotated advanced.config example](./advanced.config.example) (see [The advanced.config file](https://www.rabbitmq.com/docs/configure#advanced-config-file)) * A [systemd unit file example](./rabbitmq-server.service.example) -Please [see rabbitmq.com](https://rabbitmq.com/documentation.html) for documentation guides. +Please [see rabbitmq.com](https://rabbitmq.com/docs/) for documentation guides. -## Classic Config File Format Example +## man Pages -Feeling nostalgic and looking for the [classic configuration file example](https://github.com/rabbitmq/rabbitmq-server/blob/v3.7.x/docs/rabbitmq.config.example)? -Now that's old school! Keep in mind that classic configuration file **should be considered deprecated**. -Prefer `rabbitmq.conf` (see [new style configuration format](https://www.rabbitmq.com/configure.html#config-file-formats)) -with an `advanced.config` to complement it as needed. +### Dependencies + * `man` + * [`tidy5`](https://binaries.html-tidy.org/) (a.k.a. `tidy-html5`) -## man Pages +On macOS, `tidy5` can be installed with Homebrew: + +``` shell +brew install tidy-html5 +``` + +and then be found under the `bin` directory of the Homebrew cellar: + +``` shell +/opt/homebrew/bin/tidy --help +``` ### Source Files -This directory contains man pages that are converted to HTML using `mandoc`: +This directory contains man pages in ntroff, the man page format. + +To inspect a local version, use `man`: + +``` shell +man docs/rabbitmq-diagnostics.8 + +man docs/rabbitmq-queues.8 +``` + +To converted all man pages to HTML using `mandoc`: + +``` shell +gmake web-manpages +``` - gmake web-manpages +The result then must be post-processed and copied to the website repository: -The result is then copied to the [website repository](https://github.com/rabbitmq/rabbitmq-website/tree/live/site/man) +``` shell +# cd deps/rabbit/docs +# +# clear all generated HTML and Markdown files +rm *.html *.md +# export tidy5 path +export TIDY5_BIN=/opt/homebrew/bin/tidy; +# run the post-processing script, in this case it updates the 3.13.x version of the docs +./postprocess_man_html.sh . /path/to/rabbitmq-website.git/versioned_docs/version-3.13/man/ +``` ### Contributions diff --git a/deps/rabbit/docs/advanced.config.example b/deps/rabbit/docs/advanced.config.example index dc5ab8fc0c51..1b7c30005a24 100644 --- a/deps/rabbit/docs/advanced.config.example +++ b/deps/rabbit/docs/advanced.config.example @@ -4,17 +4,17 @@ %% ---------------------------------------------------------------------------- %% Advanced Erlang Networking/Clustering Options. %% - %% See https://www.rabbitmq.com/clustering.html for details + %% See https://www.rabbitmq.com/docs/clustering for details %% ---------------------------------------------------------------------------- %% Sets the net_kernel tick time. %% Please see http://erlang.org/doc/man/kernel_app.html and - %% https://www.rabbitmq.com/nettick.html for further details. + %% https://www.rabbitmq.com/docs/nettick for further details. %% %% {kernel, [{net_ticktime, 60}]}, %% ---------------------------------------------------------------------------- %% RabbitMQ Shovel Plugin %% - %% See https://www.rabbitmq.com/shovel.html for details + %% See https://www.rabbitmq.com/docs/shovel for details %% ---------------------------------------------------------------------------- {rabbitmq_shovel, @@ -87,7 +87,7 @@ %% The LDAP plugin can perform a variety of queries against your %% LDAP server to determine questions of authorisation. See - %% https://www.rabbitmq.com/ldap.html#authorisation for more + %% https://www.rabbitmq.com/docs/ldap#authorisation for more %% information. %% Set the query to use when determining vhost access diff --git a/deps/rabbit/docs/postprocess_man_html.sh b/deps/rabbit/docs/postprocess_man_html.sh new file mode 100755 index 000000000000..82c4e622ee09 --- /dev/null +++ b/deps/rabbit/docs/postprocess_man_html.sh @@ -0,0 +1,92 @@ +#!/bin/sh + +set -e + +srcdir="$1" +destdir="$2" + +tidy_bin=${TIDY5_BIN:-"tidy5"} + +for src in "$srcdir"/*.html; do + name=$(basename "$src" .html) + dest="$destdir/$name.md" + echo "src=$src" "dest=$dest" "name=$name" + + cat < "$dest" +--- +title: $name +--- +EOF + +$tidy_bin -i --wrap 0 \ + --asxhtml \ + --show-body-only yes \ + --drop-empty-elements yes \ + --drop-empty-paras yes \ + --enclose-block-text yes \ + --enclose-text yes "$src" \ + | \ + awk ' + / */, "", title); + + print level, title, "{#" id "}"; + next; + } + /dt id="/ { + id = $0; + sub(/.*(id|name)="/, "", id); + sub(/".*/, "", id); + + line = $0; + sub(/id="[^"]*"/, "", line); + print line; + + next; + } + /a class="permalink"/ { + title = $0; + sub(/ *]*>/, "", title); + sub(/<\/a>/, "", title); + sub(/]*>/, "", title); + gsub(/>\*\\*<", title); + + print level "#", title, "{#" id "}"; + next; + } + { + line = $0; + gsub(/{/, "\\{", line); + gsub(/
  • /, "
  • \n", line); + gsub(/<\/li>/, "\n
  • ", line); + gsub(/<\/ul>/, "\n", line); + gsub(/]*>/, "", line); + gsub(/<\/div>]/, "<\/div>\n]", line); + gsub(/style="[^"]*"/, "", line); + print line; + next; + } + ' > "$dest" +done \ No newline at end of file diff --git a/deps/rabbit/docs/rabbitmq-diagnostics.8 b/deps/rabbit/docs/rabbitmq-diagnostics.8 index c4862b488d49..56f2405bdc36 100644 --- a/deps/rabbit/docs/rabbitmq-diagnostics.8 +++ b/deps/rabbit/docs/rabbitmq-diagnostics.8 @@ -29,7 +29,7 @@ is a command line tool that provides commands used for diagnostics, monitoring and health checks of RabbitMQ nodes. See the -.Lk https://rabbitmq.com/documentation.html "RabbitMQ documentation guides" +.Lk https://www.rabbitmq.com/docs "RabbitMQ documentation guides" to learn more about RabbitMQ diagnostics, monitoring and health checks. .Nm @@ -40,7 +40,7 @@ health checks are available to be used interactively and by monitoring tools. By default if it is not possible to connect to and authenticate with the target node (for example if it is stopped), the operation will fail. To learn more, see the -.Lk https://rabbitmq.com/monitoring.html "RabbitMQ Monitoring guide" +.Lk https://www.rabbitmq.com/docs/monitoring "RabbitMQ Monitoring guide" . .\" ------------------------------------------------------------------ .Sh OPTIONS @@ -81,14 +81,14 @@ Default is .It Fl l , Fl -longnames Must be specified when the cluster is configured to use long (FQDN) node names. To learn more, see the -.Lk https://rabbitmq.com/clustering.html "RabbitMQ Clustering guide" +.Lk https://www.rabbitmq.com/docs/clustering "RabbitMQ Clustering guide" .It Fl -erlang-cookie Ar cookie Shared secret to use to authenticate to the target node. Prefer using a local file or the .Ev RABBITMQ_ERLANG_COOKIE environment variable instead of specifying this option on the command line. To learn more, see the -.Lk https://rabbitmq.com/cli.html "RabbitMQ CLI Tools guide" +.Lk https://www.rabbitmq.com/docs/cli "RabbitMQ CLI Tools guide" .El .\" ------------------------------------------------------------------ .Sh COMMANDS @@ -215,7 +215,7 @@ in Lists resource alarms, if any, in the cluster. .Pp See -.Lk https://rabbitmq.com/alarms.html "RabbitMQ Resource Alarms guide" +.Lk https://www.rabbitmq.com/docs/alarms "RabbitMQ Resource Alarms guide" to learn more. .Pp Example: @@ -236,7 +236,7 @@ Health check that fails (returns with a non-zero code) if there are alarms in effect on any of the cluster nodes. .Pp See -.Lk https://rabbitmq.com/alarms.html "RabbitMQ Resource Alarms guide" +.Lk https://www.rabbitmq.com/docs/alarms "RabbitMQ Resource Alarms guide" to learn more. .Pp Example: @@ -268,7 +268,7 @@ Health check that fails (returns with a non-zero code) if there are alarms in effect on the target node. .Pp See -.Lk https://rabbitmq.com/alarms.html "RabbitMQ Resource Alarms guide" +.Lk https://www.rabbitmq.com/docs/alarms "RabbitMQ Resource Alarms guide" to learn more. .Pp Example: @@ -285,7 +285,7 @@ The check only validates if a new TCP connection is accepted. It does not perform messaging protocol handshake or authenticate. .Pp See -.Lk https://rabbitmq.com/networking.html "RabbitMQ Networking guide" +.Lk https://www.rabbitmq.com/docs/networking "RabbitMQ Networking guide" to learn more. .Pp Example: @@ -299,7 +299,7 @@ is not listening on the specified port (there is no listener that uses that port). .Pp See -.Lk https://rabbitmq.com/networking.html "RabbitMQ Networking guide" +.Lk https://www.rabbitmq.com/docs/networking "RabbitMQ Networking guide" to learn more. .Pp Example: @@ -312,7 +312,7 @@ Health check that fails (returns with a non-zero code) if the target node does not have a listener for the specified protocol. .Pp See -.Lk https://rabbitmq.com/networking.html "RabbitMQ Networking guide" +.Lk https://www.rabbitmq.com/docs/networking "RabbitMQ Networking guide" to learn more. .Pp Example: @@ -369,7 +369,7 @@ Example: Runs a peer discovery on the target node and prints the discovered nodes, if any. .Pp See -.Lk https://rabbitmq.com/cluster-formation.html "RabbitMQ Cluster Formation guide" +.Lk https://www.rabbitmq.com/docs/cluster-formation "RabbitMQ Cluster Formation guide" to learn more. .Pp Example: @@ -389,7 +389,7 @@ to authenticate CLI tools and peers. The value can be compared with the hash found in error messages of CLI tools. .Pp See -.Lk https://rabbitmq.com/clustering.html#erlang-cookie "RabbitMQ Clustering guide" +.Lk https://www.rabbitmq.com/docs/clustering#erlang-cookie "RabbitMQ Clustering guide" to learn more. .Pp Example: @@ -492,7 +492,7 @@ what protocols and ports the node is listening on for client, CLI tool and peer connections. .Pp See -.Lk https://rabbitmq.com/networking.html "RabbitMQ Networking guide" +.Lk https://www.rabbitmq.com/docs/networking "RabbitMQ Networking guide" to learn more. .Pp Example: @@ -544,7 +544,7 @@ terabytes .El .Pp See -.Lk https://rabbitmq.com/memory-use.html "RabbitMQ Memory Use guide" +.Lk https://www.rabbitmq.com/docs/memory-use "RabbitMQ Memory Use guide" to learn more. .Pp Example: @@ -615,7 +615,7 @@ Note that RabbitMQ can be configured to only accept a subset of those versions, for example, SSLv3 is deactivated by default. .Pp See -.Lk https://rabbitmq.com/ssl.html "RabbitMQ TLS guide" +.Lk https://www.rabbitmq.com/docs/ssl "RabbitMQ TLS guide" to learn more. .Pp Example: @@ -697,10 +697,10 @@ See .Cm quorum_status in .Xr rabbitmq-queues 8 -.It Cm check_if_node_is_mirror_sync_critical +.It Cm check_if_cluster_has_classic_queue_mirroring_policy .Pp See -.Cm check_if_node_is_mirror_sync_critical +.Cm check_if_cluster_has_classic_queue_mirroring_policy in .Xr rabbitmq-queues 8 .It Cm check_if_node_is_quorum_critical @@ -723,4 +723,4 @@ in .\" ------------------------------------------------------------------ .Sh AUTHOR .\" ------------------------------------------------------------------ -.An The RabbitMQ Team Aq Mt rabbitmq-core@groups.vmware.com +.An The RabbitMQ Team Aq Mt contact-tanzu-data.pdl@broadcom.com diff --git a/deps/rabbit/docs/rabbitmq-echopid.8 b/deps/rabbit/docs/rabbitmq-echopid.8 index bca16ce67418..4985aee3ca20 100644 --- a/deps/rabbit/docs/rabbitmq-echopid.8 +++ b/deps/rabbit/docs/rabbitmq-echopid.8 @@ -67,4 +67,4 @@ The short-name form of the RabbitMQ node name. .\" ------------------------------------------------------------------ .Sh AUTHOR .\" ------------------------------------------------------------------ -.An The RabbitMQ Team Aq Mt rabbitmq-core@groups.vmware.com +.An The RabbitMQ Team Aq Mt contact-tanzu-data.pdl@broadcom.com diff --git a/deps/rabbit/docs/rabbitmq-env.conf.5 b/deps/rabbit/docs/rabbitmq-env.conf.5 index e11a47fe540c..bc198e697142 100644 --- a/deps/rabbit/docs/rabbitmq-env.conf.5 +++ b/deps/rabbit/docs/rabbitmq-env.conf.5 @@ -84,4 +84,4 @@ file RabbitMQ configuration file location is changed to "/data/services/rabbitmq .\" ------------------------------------------------------------------ .Sh AUTHOR .\" ------------------------------------------------------------------ -.An The RabbitMQ Team Aq Mt rabbitmq-core@groups.vmware.com +.An The RabbitMQ Team Aq Mt contact-tanzu-data.pdl@broadcom.com diff --git a/deps/rabbit/docs/rabbitmq-plugins.8 b/deps/rabbit/docs/rabbitmq-plugins.8 index 5e258ed05ecc..de6d24de2953 100644 --- a/deps/rabbit/docs/rabbitmq-plugins.8 +++ b/deps/rabbit/docs/rabbitmq-plugins.8 @@ -28,7 +28,7 @@ .Nm is a command line tool for managing RabbitMQ plugins. See the -.Lk https://www.rabbitmq.com/plugins.html "RabbitMQ Plugins guide" +.Lk https://www.rabbitmq.com/docs/plugins "RabbitMQ Plugins guide" for an overview of RabbitMQ plugins and how they are used. .Nm @@ -65,7 +65,7 @@ can be specified to make resolve and update plugin state directly (without contacting the node). Such changes will only have an effect on next node start. To learn more, see the -.Lk https://www.rabbitmq.com/plugins.html "RabbitMQ Plugins guide" +.Lk https://www.rabbitmq.com/docs/plugins "RabbitMQ Plugins guide" . .\" ------------------------------------------------------------------ .Sh OPTIONS @@ -106,14 +106,14 @@ Default is .It Fl l , Fl -longnames Must be specified when the cluster is configured to use long (FQDN) node names. To learn more, see the -.Lk https://www.rabbitmq.com/clustering.html "RabbitMQ Clustering guide" +.Lk https://www.rabbitmq.com/docs/clustering "RabbitMQ Clustering guide" .It Fl -erlang-cookie Ar cookie Shared secret to use to authenticate to the target node. Prefer using a local file or the .Ev RABBITMQ_ERLANG_COOKIE environment variable instead of specifying this option on the command line. To learn more, see the -.Lk https://www.rabbitmq.com/cli.html "RabbitMQ CLI Tools guide" +.Lk https://www.rabbitmq.com/docs/cli "RabbitMQ CLI Tools guide" .El .\" ------------------------------------------------------------------ .Sh COMMANDS @@ -252,4 +252,4 @@ plugin and its dependencies and disables everything else: .\" ------------------------------------------------------------------ .Sh AUTHOR .\" ------------------------------------------------------------------ -.An The RabbitMQ Team Aq Mt rabbitmq-core@groups.vmware.com +.An The RabbitMQ Team Aq Mt contact-tanzu-data.pdl@broadcom.com diff --git a/deps/rabbit/docs/rabbitmq-queues.8 b/deps/rabbit/docs/rabbitmq-queues.8 index 54c792cd421b..486b75e82f71 100644 --- a/deps/rabbit/docs/rabbitmq-queues.8 +++ b/deps/rabbit/docs/rabbitmq-queues.8 @@ -29,9 +29,9 @@ is a command line tool that provides commands used to manage queues, for example, grow, shrink or rebalance replicas of replicated queue types. See the -.Lk https://www.rabbitmq.com/quorum-queues.html "RabbitMQ quorum queues guide" +.Lk https://www.rabbitmq.com/docs/quorum-queues "RabbitMQ quorum queues guide" and the general -.Lk https://www.rabbitmq.com/queues.html "RabbitMQ queues guide" +.Lk https://www.rabbitmq.com/docs/queues "RabbitMQ queues guide" to learn more about queue types in RabbitMQ. . .\" ------------------------------------------------------------------ @@ -73,14 +73,14 @@ Default is .It Fl l , Fl -longnames Must be specified when the cluster is configured to use long (FQDN) node names. To learn more, see the -.Lk https://www.rabbitmq.com/clustering.html "RabbitMQ Clustering guide" +.Lk https://www.rabbitmq.com/docs/clustering "RabbitMQ Clustering guide" .It Fl -erlang-cookie Ar cookie Shared secret to use to authenticate to the target node. Prefer using a local file or the .Ev RABBITMQ_ERLANG_COOKIE environment variable instead of specifying this option on the command line. To learn more, see the -.Lk https://www.rabbitmq.com/cli.html "RabbitMQ CLI Tools guide" +.Lk https://www.rabbitmq.com/docs/cli "RabbitMQ CLI Tools guide" .El .\" ------------------------------------------------------------------ .Sh COMMANDS @@ -182,13 +182,14 @@ This command is currently only supported by quorum queues. Example: .Sp .Dl rabbitmq-queues peek --vhost Qo a-vhost Qc Qo a-queue Qc Qo 1 Qc -.It Cm check_if_node_is_mirror_sync_critical +.It Cm check_if_cluster_has_classic_queue_mirroring_policy .Pp -Health check that exits with a non-zero code if there are classic mirrored queues without online synchronised mirrors (queues that would potentially lose data if the target node is shut down). +Health check that exits with a non-zero code if there are policies in the cluster that enable classic queue mirroring. +Classic queue mirroring has been deprecated since 2021 and was completely removed in the RabbitMQ 4.0 development cycle. .Pp Example: .Sp -.Dl rabbitmq-queues check_if_node_is_mirror_sync_critical +.Dl rabbitmq-queues check_if_cluster_has_classic_queue_mirroring_policy .It Cm check_if_node_is_quorum_critical .Pp Health check that exits with a non-zero code if there are queues with minimum online quorum (queues that would lose their quorum if the target node is shut down). @@ -210,4 +211,4 @@ Example: .\" ------------------------------------------------------------------ .Sh AUTHOR .\" ------------------------------------------------------------------ -.An The RabbitMQ Team Aq Mt rabbitmq-core@groups.vmware.com +.An The RabbitMQ Team Aq Mt contact-tanzu-data.pdl@broadcom.com diff --git a/deps/rabbit/docs/rabbitmq-server.8 b/deps/rabbit/docs/rabbitmq-server.8 index 9b37fb06f739..13a3574dd9ef 100644 --- a/deps/rabbit/docs/rabbitmq-server.8 +++ b/deps/rabbit/docs/rabbitmq-server.8 @@ -36,19 +36,19 @@ Defaults to .Pa /etc/rabbitmq/rabbitmq.conf . Node configuration file path. To learn more, see the -.Lk https://www.rabbitmq.com/configure.html "RabbitMQ Configuration guide" +.Lk https://www.rabbitmq.com/docs/configure "RabbitMQ Configuration guide" .It Ev RABBITMQ_MNESIA_BASE Defaults to .Pa /var/lib/rabbitmq/mnesia . Node data directory will be located (or created) in this directory. To learn more, see the -.Lk https://www.rabbitmq.com/relocate.html "RabbitMQ File and Directory guide" +.Lk https://www.rabbitmq.com/docs/relocate "RabbitMQ File and Directory guide" .It Ev RABBITMQ_LOG_BASE Defaults to .Pa /var/log/rabbitmq . Log files generated by the server will be placed in this directory. To learn more, see the -.Lk https://www.rabbitmq.com/logging.html "RabbitMQ Logging guide" +.Lk https://www.rabbitmq.com/docs/logging "RabbitMQ Logging guide" .It Ev RABBITMQ_NODENAME Defaults to .Qq rabbit@ . @@ -57,17 +57,17 @@ Can be used to run multiple nodes on the same host. Every node in a cluster must have a unique .Ev RABBITMQ_NODENAME To learn more, see the -.Lk https://www.rabbitmq.com/clustering.html "RabbitMQ Clustering guide" +.Lk https://www.rabbitmq.com/docs/clustering "RabbitMQ Clustering guide" .It Ev RABBITMQ_NODE_IP_ADDRESS By default RabbitMQ will bind to all IPv6 and IPv4 interfaces available. This variable limits the node to one network interface or address family. To learn more, see the -.Lk https://www.rabbitmq.com/networking.html "RabbitMQ Networking guide" +.Lk https://www.rabbitmq.com/docs/networking "RabbitMQ Networking guide" .It Ev RABBITMQ_NODE_PORT AMQP 0-9-1 and AMQP 1.0 port. Defaults to 5672. To learn more, see the -.Lk https://www.rabbitmq.com/networking.html "RabbitMQ Networking guide" +.Lk https://www.rabbitmq.com/docs/networking "RabbitMQ Networking guide" .El .\" ------------------------------------------------------------------ .Sh OPTIONS @@ -96,4 +96,4 @@ For example, runs RabbitMQ AMQP server in the background: .\" ------------------------------------------------------------------ .Sh AUTHOR .\" ------------------------------------------------------------------ -.An The RabbitMQ Team Aq Mt rabbitmq-core@groups.vmware.com +.An The RabbitMQ Team Aq Mt contact-tanzu-data.pdl@broadcom.com diff --git a/deps/rabbit/docs/rabbitmq-server.service.example b/deps/rabbit/docs/rabbitmq-server.service.example index 69531b1ff60a..af3d04b41d84 100644 --- a/deps/rabbit/docs/rabbitmq-server.service.example +++ b/deps/rabbit/docs/rabbitmq-server.service.example @@ -27,7 +27,7 @@ TimeoutStartSec=3600 # You *may* wish to add the following to automatically restart RabbitMQ # in the event of a failure. systemd service restarts are not a # replacement for service monitoring. Please see -# https://www.rabbitmq.com/monitoring.html +# https://www.rabbitmq.com/docs/monitoring # # Restart=on-failure # RestartSec=10 diff --git a/deps/rabbit/docs/rabbitmq-service.8 b/deps/rabbit/docs/rabbitmq-service.8 index bf1b9eb3f2c6..73320d32dc91 100644 --- a/deps/rabbit/docs/rabbitmq-service.8 +++ b/deps/rabbit/docs/rabbitmq-service.8 @@ -87,17 +87,17 @@ Can be used to run multiple nodes on the same host. Every node in a cluster must have a unique .Ev RABBITMQ_NODENAME To learn more, see the -.Lk https://www.rabbitmq.com/clustering.html "RabbitMQ Clustering guide" +.Lk https://www.rabbitmq.com/docs/clustering "RabbitMQ Clustering guide" .It Ev RABBITMQ_NODE_IP_ADDRESS By default RabbitMQ will bind to all IPv6 and IPv4 interfaces available. This variable limits the node to one network interface or address family. To learn more, see the -.Lk https://www.rabbitmq.com/networking.html "RabbitMQ Networking guide" +.Lk https://www.rabbitmq.com/docs/networking "RabbitMQ Networking guide" .It Ev RABBITMQ_NODE_PORT AMQP 0-9-1 and AMQP 1.0 port. Defaults to 5672. To learn more, see the -.Lk https://www.rabbitmq.com/networking.html "RabbitMQ Networking guide" +.Lk https://www.rabbitmq.com/docs/networking "RabbitMQ Networking guide" .It Ev ERLANG_SERVICE_MANAGER_PATH Defaults to .Pa C:\(rsProgram\ Files\(rserl{version}\(rserts-{version}\(rsbin @@ -150,4 +150,4 @@ is to discard the server output. .\" ------------------------------------------------------------------ .Sh AUTHOR .\" ------------------------------------------------------------------ -.An The RabbitMQ Team Aq Mt rabbitmq-core@groups.vmware.com +.An The RabbitMQ Team Aq Mt contact-tanzu-data.pdl@broadcom.com diff --git a/deps/rabbit/docs/rabbitmq-streams.8 b/deps/rabbit/docs/rabbitmq-streams.8 index 77f23b899966..b3ba4ea9a299 100644 --- a/deps/rabbit/docs/rabbitmq-streams.8 +++ b/deps/rabbit/docs/rabbitmq-streams.8 @@ -29,7 +29,7 @@ is a command line tool that provides commands used to manage streams, for example, add or delete stream replicas. See the -.Lk https://www.rabbitmq.com/streams.html "RabbitMQ streams overview". +.Lk https://www.rabbitmq.com/docs/streams "RabbitMQ streams overview". .\" ------------------------------------------------------------------ .Sh OPTIONS .\" ------------------------------------------------------------------ @@ -69,14 +69,14 @@ Default is .It Fl l , Fl -longnames Must be specified when the cluster is configured to use long (FQDN) node names. To learn more, see the -.Lk https://www.rabbitmq.com/clustering.html "RabbitMQ Clustering guide" +.Lk https://www.rabbitmq.com/docs/clustering "RabbitMQ Clustering guide" .It Fl -erlang-cookie Ar cookie Shared secret to use to authenticate to the target node. Prefer using a local file or the .Ev RABBITMQ_ERLANG_COOKIE environment variable instead of specifying this option on the command line. To learn more, see the -.Lk https://www.rabbitmq.com/cli.html "RabbitMQ CLI Tools guide" +.Lk https://www.rabbitmq.com/docs/cli "RabbitMQ CLI Tools guide" .El .\" ------------------------------------------------------------------ .Sh COMMANDS @@ -447,4 +447,4 @@ for each consumer attached to the stream-1 stream and belonging to the stream-1 .\" ------------------------------------------------------------------ .Sh AUTHOR .\" ------------------------------------------------------------------ -.An The RabbitMQ Team Aq Mt rabbitmq-core@groups.vmware.com +.An The RabbitMQ Team Aq Mt contact-tanzu-data.pdl@broadcom.com diff --git a/deps/rabbit/docs/rabbitmq-upgrade.8 b/deps/rabbit/docs/rabbitmq-upgrade.8 index b8fc573c3087..88f5af765176 100644 --- a/deps/rabbit/docs/rabbitmq-upgrade.8 +++ b/deps/rabbit/docs/rabbitmq-upgrade.8 @@ -28,7 +28,7 @@ .Nm is a command line tool that provides commands used during the upgrade of RabbitMQ nodes. See the -.Lk https://www.rabbitmq.com/upgrade.html "RabbitMQ upgrade guide" +.Lk https://www.rabbitmq.com/docs/upgrade "RabbitMQ upgrade guide" to learn more about RabbitMQ installation upgrades. . .\" ------------------------------------------------------------------ @@ -70,14 +70,14 @@ Default is .It Fl l , Fl -longnames Must be specified when the cluster is configured to use long (FQDN) node names. To learn more, see the -.Lk https://www.rabbitmq.com/clustering.html "RabbitMQ Clustering guide" +.Lk https://www.rabbitmq.com/docs/clustering "RabbitMQ Clustering guide" .It Fl -erlang-cookie Ar cookie Shared secret to use to authenticate to the target node. Prefer using a local file or the .Ev RABBITMQ_ERLANG_COOKIE environment variable instead of specifying this option on the command line. To learn more, see the -.Lk https://www.rabbitmq.com/cli.html "RabbitMQ CLI Tools guide" +.Lk https://www.rabbitmq.com/docs/cli "RabbitMQ CLI Tools guide" .El .\" ------------------------------------------------------------------ .Sh COMMANDS @@ -104,7 +104,7 @@ Puts the node in maintenance mode. Such nodes will not serve any client traffic or considered for hosting any queue leader replicas. .Pp To learn more, see the -.Lk https://www.rabbitmq.com/upgrade.html#maintenance-mode "RabbitMQ Upgrade guide" +.Lk https://www.rabbitmq.com/docs/upgrade#maintenance-mode "RabbitMQ Upgrade guide" .\" ------------------------------------ .It Cm revive .Pp @@ -112,7 +112,7 @@ Puts the node out of maintenance and into regular operating mode. Such nodes will again serve client traffic and considered for queue leader replica placement. .Pp To learn more, see the -.Lk https://www.rabbitmq.com/upgrade.html#maintenance-mode "RabbitMQ Upgrade guide" +.Lk https://www.rabbitmq.com/docs/upgrade#maintenance-mode "RabbitMQ Upgrade guide" .\" ------------------------------------------------------------------ .Sh SEE ALSO .\" ------------------------------------------------------------------ @@ -127,4 +127,4 @@ To learn more, see the .\" ------------------------------------------------------------------ .Sh AUTHOR .\" ------------------------------------------------------------------ -.An The RabbitMQ Team Aq Mt rabbitmq-core@groups.vmware.com +.An The RabbitMQ Team Aq Mt contact-tanzu-data.pdl@broadcom.com diff --git a/deps/rabbit/docs/rabbitmq.conf.example b/deps/rabbit/docs/rabbitmq.conf.example index 11a0c4e792e1..3cab148eaa8a 100644 --- a/deps/rabbit/docs/rabbitmq.conf.example +++ b/deps/rabbit/docs/rabbitmq.conf.example @@ -117,7 +117,7 @@ # ssl_options.secure_renegotiate = true # ## Limits what TLS versions the server enables for client TLS -## connections. See https://www.rabbitmq.com/ssl.html#tls-versions for details. +## connections. See https://www.rabbitmq.com/docs/ssl#tls-versions for details. ## ## Cutting edge TLS version which requires recent client runtime ## versions and has no cipher suite in common with earlier TLS versions. @@ -132,7 +132,7 @@ ## from connecting. ## If TLSv1.3 is enabled and cipher suites are overridden, TLSv1.3-specific ## cipher suites must also be explicitly enabled. -## See https://www.rabbitmq.com/ssl.html#cipher-suites and https://wiki.openssl.org/index.php/TLS1.3#Ciphersuites +## See https://www.rabbitmq.com/docs/ssl#cipher-suites and https://wiki.openssl.org/index.php/TLS1.3#Ciphersuites ## for details. # ## The example below uses TLSv1.3 cipher suites only @@ -232,6 +232,7 @@ ## # auth_mechanisms.1 = PLAIN # auth_mechanisms.2 = AMQPLAIN +# auth_mechanisms.3 = ANONYMOUS ## The rabbitmq-auth-mechanism-ssl plugin makes it possible to ## authenticate a user based on the client's x509 (TLS) certificate. @@ -269,7 +270,7 @@ ## Loading Definitions ## ==================== ## -## Relevant documentation: https://www.rabbitmq.com/definitions.html#import-on-boot +## Relevant documentation: https://www.rabbitmq.com/docs/definitions#import-on-boot ## ## To import definitions from a local file on node boot, set the ## load_definitions config key to a path of a previously exported @@ -381,7 +382,7 @@ ## Memory-based Flow Control threshold. ## -# vm_memory_high_watermark.relative = 0.4 +# vm_memory_high_watermark.relative = 0.6 ## Alternatively, we can set a limit (in bytes) of RAM used by the node. ## @@ -403,33 +404,11 @@ -## Fraction of the high watermark limit at which queues start to -## page message out to disc in order to free up memory. -## For example, when vm_memory_high_watermark is set to 0.4 and this value is set to 0.5, -## paging can begin as early as when 20% of total available RAM is used by the node. -## -## Values greater than 1.0 can be dangerous and should be used carefully. -## -## One alternative to this is to use durable queues and publish messages -## as persistent (delivery mode = 2). With this combination queues will -## move messages to disk much more rapidly. -## -## Another alternative is to configure queues to page all messages (both -## persistent and transient) to disk as quickly -## as possible, see https://www.rabbitmq.com/docs/lazy-queues. -## -# vm_memory_high_watermark_paging_ratio = 0.5 - ## Selects Erlang VM memory consumption calculation strategy. Can be `allocated`, `rss` or `legacy` (aliased as `erlang`), ## Introduced in 3.6.11. `rss` is the default as of 3.6.12. ## See https://github.com/rabbitmq/rabbitmq-server/issues/1223 and rabbitmq/rabbitmq-common#224 for background. # vm_memory_calculation_strategy = rss -## Interval (in milliseconds) at which we perform the check of the memory -## levels against the watermarks. -## -# memory_monitor_interval = 2500 - ## The total memory available can be calculated from the OS resources ## - default option - or provided as a configuration parameter. # total_memory_available_override_value = 2GB @@ -501,7 +480,7 @@ ## Make clustering happen *automatically* at startup. Only applied ## to nodes that have just been reset or started for the first time. ## -## Relevant doc guide: https://rabbitmq.com//cluster-formation.html +## Relevant doc guide: https://www.rabbitmq.com/docs//cluster-formation ## # cluster_formation.peer_discovery_backend = rabbit_peer_discovery_classic_config @@ -668,7 +647,7 @@ ## Inter-node communication port range. ## The parameters inet_dist_listen_min and inet_dist_listen_max ## can be configured in the classic config format only. -## Related doc guide: https://www.rabbitmq.com/networking.html#epmd-inet-dist-port-range. +## Related doc guide: https://www.rabbitmq.com/docs/networking#epmd-inet-dist-port-range. ## ---------------------------------------------------------------------------- @@ -905,14 +884,8 @@ ## # mqtt.proxy_protocol = false -## Set the default user name and password used for anonymous connections (when client -## provides no credentials). Anonymous connections are highly discouraged! -## -# mqtt.default_user = guest -# mqtt.default_pass = guest - ## Enable anonymous connections. If this is set to false, clients MUST provide -## credentials in order to connect. See also the mqtt.default_user/mqtt.default_pass +## credentials in order to connect. See also the anonymous_login_user/anonymous_login_pass ## keys. Anonymous connections are highly discouraged! ## # mqtt.allow_anonymous = true @@ -952,25 +925,6 @@ -## ---------------------------------------------------------------------------- -## RabbitMQ AMQP 1.0 Support -## -## See https://github.com/rabbitmq/rabbitmq-amqp1.0/blob/stable/README.md. -## ---------------------------------------------------------------------------- - -# ======================================= -# AMQP 1.0 section -# ======================================= - - -## Connections that are not authenticated with SASL will connect as this -## account. See the README for more information. -## -## Please note that setting this will allow clients to connect without -## authenticating! -## -# amqp1_0.default_user = guest - ## Logging settings. ## ## See https://www.rabbitmq.com/docs/logging for details. @@ -1013,6 +967,35 @@ # log.exchange.level = info +## File size-based log rotation + +## Note that `log.file.rotation.size` cannot be combined with `log.file.rotation.date`, +## the two options are mutually exclusive. + +## rotate when the file reaches 10 MiB +# log.file.rotation.size = 10485760 + +## keep up to 5 archived log files in addition to the current one +# log.file.rotation.count = 5 + +## compress the archived logs +# log.file.rotation.compress = true + + +## Date-based log rotation + +## Note that `log.file.rotation.date` cannot be combined with `log.file.rotation.size`, +## the two options are mutually exclusive. + +## rotate every night at midnight +# log.file.rotation.date = $D0 + +## keep up to 5 archived log files in addition to the current one +# log.file.rotation.count = 5 + +## compress the archived logs +# log.file.rotation.compress = true + ## ---------------------------------------------------------------------------- ## RabbitMQ LDAP Plugin diff --git a/deps/rabbit/docs/rabbitmqctl.8 b/deps/rabbit/docs/rabbitmqctl.8 index ca5c5f03115e..063f92c1690b 100644 --- a/deps/rabbit/docs/rabbitmqctl.8 +++ b/deps/rabbit/docs/rabbitmqctl.8 @@ -46,7 +46,7 @@ could not authenticate to the target node successfully. To learn more, see the -.Lk https://rabbitmq.com/cli.html "RabbitMQ CLI Tools guide" +.Lk https://www.rabbitmq.com/docs/cli "RabbitMQ CLI Tools guide" .\" ------------------------------------------------------------------------------------------------ .Sh OPTIONS .\" ------------------------------------------------------------------------------------------------ @@ -93,14 +93,14 @@ The default is .It Fl l , Fl -longnames Must be specified when the cluster is configured to use long (FQDN) node names. To learn more, see the -.Lk https://www.rabbitmq.com/clustering.html "RabbitMQ Clustering guide" +.Lk https://www.rabbitmq.com/docs/clustering "RabbitMQ Clustering guide" .It Fl -erlang-cookie Ar cookie Shared secret to use to authenticate to the target node. Prefer using a local file or the .Ev RABBITMQ_ERLANG_COOKIE environment variable instead of specifying this option on the command line. To learn more, see the -.Lk https://www.rabbitmq.com/cli.html "RabbitMQ CLI Tools guide" +.Lk https://www.rabbitmq.com/docs/cli "RabbitMQ CLI Tools guide" .El .\" ------------------------------------------------------------------------------------------------ .Sh COMMANDS @@ -210,7 +210,7 @@ Stops the Erlang node on which RabbitMQ is running. To restart the node follow the instructions for .Qq Running the Server in the -.Lk https://rabbitmq.com/download.html installation guide . +.Lk https://www.rabbitmq.com/docs/download installation guide . .Pp If a .Ar pid_file @@ -461,7 +461,7 @@ is part of, as a ram node: .Dl rabbitmqctl join_cluster hare@elena --ram .Pp To learn more, see the -.Lk https://www.rabbitmq.com/clustering.html "RabbitMQ Clustering guide". +.Lk https://www.rabbitmq.com/docs/clustering "RabbitMQ Clustering guide". .\" ------------------------------------------------------------------ .\" ## User management .\" ------------------------------------------------------------------ @@ -1285,11 +1285,11 @@ queue, including stack, heap, and internal structures. .It Cm mirror_pids If the queue is mirrored, this lists the IDs of the mirrors (follower replicas). To learn more, see the -.Lk https://www.rabbitmq.com/ha.html "RabbitMQ Mirroring guide" +.Lk https://www.rabbitmq.com/docs/3.13/ha "RabbitMQ Mirroring guide" .It Cm synchronised_mirror_pids If the queue is mirrored, this gives the IDs of the mirrors (follower replicas) which are in sync with the leader replica. To learn more, see the -.Lk https://www.rabbitmq.com/ha.html "RabbitMQ Mirroring guide" +.Lk https://www.rabbitmq.com/docs/3.13/ha "RabbitMQ Mirroring guide" .It Cm state The state of the queue. Normally @@ -2457,4 +2457,4 @@ Reset the stats database for all nodes in the cluster. .\" ------------------------------------------------------------------------------------------------ .Sh AUTHOR .\" ------------------------------------------------------------------------------------------------ -.An The RabbitMQ Team Aq Mt rabbitmq-core@groups.vmware.com +.An The RabbitMQ Team Aq Mt contact-tanzu-data.pdl@broadcom.com diff --git a/deps/rabbit/include/khepri.hrl b/deps/rabbit/include/khepri.hrl new file mode 100644 index 000000000000..31c5b03c9d02 --- /dev/null +++ b/deps/rabbit/include/khepri.hrl @@ -0,0 +1,9 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2024 Broadcom. All Rights Reserved. The term “Broadcom” +%% refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-define(KHEPRI_ROOT_PATH, [rabbitmq]). diff --git a/deps/rabbit/include/rabbit_amqp.hrl b/deps/rabbit/include/rabbit_amqp.hrl index 84e98d5d565d..185e80fe0c64 100644 --- a/deps/rabbit/include/rabbit_amqp.hrl +++ b/deps/rabbit/include/rabbit_amqp.hrl @@ -37,6 +37,7 @@ [pid, frame_max, timeout, + container_id, vhost, user, node diff --git a/deps/rabbit/priv/schema/rabbit.schema b/deps/rabbit/priv/schema/rabbit.schema index 07624a055f85..e930ddbf0fcd 100644 --- a/deps/rabbit/priv/schema/rabbit.schema +++ b/deps/rabbit/priv/schema/rabbit.schema @@ -122,13 +122,13 @@ end}. %% %% Original key for definition loading from a JSON file or directory of files. See -%% https://www.rabbitmq.com/management.html#load-definitions +%% https://www.rabbitmq.com/docs/management#load-definitions {mapping, "load_definitions", "rabbit.load_definitions", [{datatype, string}, {validators, ["file_accessible"]}]}. %% Newer syntax for definition loading from a JSON file or directory of files. See -%% https://www.rabbitmq.com/management.html#load-definitions +%% https://www.rabbitmq.com/docs/management#load-definitions {mapping, "definitions.local.path", "rabbit.definitions.local_path", [{datatype, string}, {validators, ["file_accessible"]}]}. @@ -161,7 +161,7 @@ end}. {datatype, {enum, [sha, sha224, sha256, sha384, sha512]}}]}. %% Load definitions from a remote URL over HTTPS. See -%% https://www.rabbitmq.com/management.html#load-definitions +%% https://www.rabbitmq.com/docs/management#load-definitions {mapping, "definitions.https.url", "rabbit.definitions.url", [{datatype, string}]}. @@ -229,7 +229,12 @@ end}. [{datatype, {enum, [true, false]}}]}. {mapping, "definitions.tls.password", "rabbit.definitions.ssl_options.password", - [{datatype, string}]}. + [{datatype, [tagged_binary, binary]}]}. + +{translation, "rabbit.definitions.ssl_options.password", +fun(Conf) -> + rabbit_cuttlefish:optionally_tagged_string("definitions.tls.password", Conf) +end}. {mapping, "definitions.tls.secure_renegotiate", "rabbit.definitions.ssl_options.secure_renegotiate", [{datatype, {enum, [true, false]}}]}. @@ -290,7 +295,7 @@ fun(Conf) -> end}. %% TLS options. -%% See https://www.rabbitmq.com/ssl.html for full documentation. +%% See https://www.rabbitmq.com/docs/ssl for full documentation. %% %% {ssl_options, [{cacertfile, "/path/to/testca/cacert.pem"}, %% {certfile, "/path/to/server/cert.pem"}, @@ -395,7 +400,12 @@ end}. [{datatype, {enum, [true, false]}}]}. {mapping, "ssl_options.password", "rabbit.ssl_options.password", - [{datatype, string}]}. + [{datatype, [tagged_binary, binary]}]}. + +{translation, "rabbit.ssl_options.password", +fun(Conf) -> + rabbit_cuttlefish:optionally_tagged_binary("ssl_options.password", Conf) +end}. {mapping, "ssl_options.psk_identity", "rabbit.ssl_options.psk_identity", [{datatype, string}]}. @@ -434,22 +444,22 @@ end}. %% =========================================================================== %% Choose the available SASL mechanism(s) to expose. -%% The two default (built in) mechanisms are 'PLAIN' and -%% 'AMQPLAIN'. Additional mechanisms can be added via -%% plugins. +%% The three default (built in) mechanisms are 'PLAIN', 'AMQPLAIN' and 'ANONYMOUS'. +%% Additional mechanisms can be added via plugins. %% -%% See https://www.rabbitmq.com/authentication.html for more details. +%% See https://www.rabbitmq.com/docs/access-control for more details. %% -%% {auth_mechanisms, ['PLAIN', 'AMQPLAIN']}, +%% {auth_mechanisms, ['PLAIN', 'AMQPLAIN', 'ANONYMOUS']}, {mapping, "auth_mechanisms.$name", "rabbit.auth_mechanisms", [ {datatype, atom}]}. {translation, "rabbit.auth_mechanisms", -fun(Conf) -> - Settings = cuttlefish_variable:filter_by_prefix("auth_mechanisms", Conf), - [ V || {_, V} <- Settings ] -end}. + fun(Conf) -> + Settings = cuttlefish_variable:filter_by_prefix("auth_mechanisms", Conf), + Sorted = lists:keysort(1, Settings), + [V || {_, V} <- Sorted] + end}. %% Select an authentication backend to use. RabbitMQ provides an @@ -629,7 +639,7 @@ end}. %% On first start RabbitMQ will create a vhost and a user. These %% config items control what gets created. See -%% https://www.rabbitmq.com/access-control.html for further +%% https://www.rabbitmq.com/docs/access-control for further %% information about vhosts and access control. %% %% {default_vhost, <<"/">>}, @@ -656,12 +666,12 @@ fun(Conf) -> end}. {mapping, "default_pass", "rabbit.default_pass", [ - {datatype, string} + {datatype, [tagged_binary, binary]} ]}. {translation, "rabbit.default_pass", fun(Conf) -> - list_to_binary(cuttlefish:conf_get("default_pass", Conf)) + rabbit_cuttlefish:optionally_tagged_binary("default_pass", Conf) end}. {mapping, "default_permissions.configure", "rabbit.default_permissions", [ @@ -696,7 +706,7 @@ end}. ]}. {mapping, "default_users.$name.password", "rabbit.default_users", [ - {datatype, string} + {datatype, [tagged_binary, binary]} ]}. {mapping, "default_users.$name.configure", "rabbit.default_users", [ @@ -725,6 +735,22 @@ end}. end end}. +%% Connections that skip SASL layer or use SASL mechanism ANONYMOUS will use this identity. +%% Setting this to a username will allow (anonymous) clients to connect and act as this +%% given user. For production environments, set this value to 'none'. +{mapping, "anonymous_login_user", "rabbit.anonymous_login_user", + [{datatype, [{enum, [none]}, binary]}]}. + +{mapping, "anonymous_login_pass", "rabbit.anonymous_login_pass", [ + {datatype, [tagged_binary, binary]} +]}. + +{translation, "rabbit.anonymous_login_pass", +fun(Conf) -> + rabbit_cuttlefish:optionally_tagged_binary("anonymous_login_pass", Conf) +end}. + + %% %% Default Policies %% ==================== @@ -845,7 +871,7 @@ end}. %% Tags for default user %% %% For more details about tags, see the documentation for the -%% Management Plugin at https://www.rabbitmq.com/management.html. +%% Management Plugin at https://www.rabbitmq.com/docs/management. %% %% {default_user_tags, [administrator]}, @@ -922,6 +948,20 @@ end}. end }. +%% Sets the maximum number of AMQP 1.0 sessions that can be simultaneously +%% active on an AMQP 1.0 connection. +%% +%% {session_max_per_connection, 1}, +{mapping, "session_max_per_connection", "rabbit.session_max_per_connection", + [{datatype, integer}, {validators, ["positive_16_bit_unsigned_integer"]}]}. + +%% Sets the maximum number of AMQP 1.0 links that can be simultaneously +%% active on an AMQP 1.0 session. +%% +%% {link_max_per_session, 10}, +{mapping, "link_max_per_session", "rabbit.link_max_per_session", + [{datatype, integer}, {validators, ["positive_32_bit_unsigned_integer"]}]}. + %% Set the max permissible number of client connections per node. %% `infinity` means "no limit". %% @@ -1073,11 +1113,11 @@ end}. %% Resource Limits & Flow Control %% ============================== %% -%% See https://www.rabbitmq.com/memory.html for full details. +%% See https://www.rabbitmq.com/docs/memory for full details. %% Memory-based Flow Control threshold. %% -%% {vm_memory_high_watermark, 0.4}, +%% {vm_memory_high_watermark, 0.6}, %% Alternatively, we can set a limit (in bytes) of RAM used by the node. %% @@ -1123,6 +1163,8 @@ fun(Conf) -> end end}. +%% DEPRECATED. Not used since RabbitMQ 4.0 +%% %% Fraction of the high watermark limit at which queues start to %% page message out to disc in order to free up memory. %% @@ -1134,6 +1176,8 @@ end}. "rabbit.vm_memory_high_watermark_paging_ratio", [{datatype, float}, {validators, ["less_than_1"]}]}. +%% DEPRECATED. Not used since RabbitMQ 4.0 +%% %% Interval (in milliseconds) at which we perform the check of the memory %% levels against the watermarks. %% @@ -1203,7 +1247,7 @@ end}. %% %% How to respond to cluster partitions. -%% See https://www.rabbitmq.com/partitions.html for further details. +%% See https://www.rabbitmq.com/docs/partitions for further details. %% %% {cluster_partition_handling, ignore}, @@ -1326,28 +1370,6 @@ fun(Conf) -> end end}. -%% Cluster formation: Randomized startup delay -%% -%% DEPRECATED: This is a no-op. Old configs are still allowed, but a warning will be printed. - -{mapping, "cluster_formation.randomized_startup_delay_range.min", "rabbit.cluster_formation.randomized_startup_delay_range", []}. -{mapping, "cluster_formation.randomized_startup_delay_range.max", "rabbit.cluster_formation.randomized_startup_delay_range", []}. - -{translation, "rabbit.cluster_formation.randomized_startup_delay_range", -fun(Conf) -> - Min = cuttlefish:conf_get("cluster_formation.randomized_startup_delay_range.min", Conf, undefined), - Max = cuttlefish:conf_get("cluster_formation.randomized_startup_delay_range.max", Conf, undefined), - - case {Min, Max} of - {undefined, undefined} -> - ok; - _ -> - cuttlefish:warn("cluster_formation.randomized_startup_delay_range.min and " - "cluster_formation.randomized_startup_delay_range.max are deprecated") - end, - cuttlefish:unset() -end}. - %% Cluster formation: lock acquisition retries as passed to https://erlang.org/doc/man/global.html#set_lock-3 %% %% Currently used in classic, k8s, and aws peer discovery backends. @@ -1398,7 +1420,7 @@ end}. %% %% Make clustering happen *automatically* at startup - only applied %% to nodes that have just been reset or started for the first time. -%% See https://www.rabbitmq.com/clustering.html#auto-config for +%% See https://www.rabbitmq.com/docs/clustering#auto-config for %% further details. %% %% {cluster_nodes, {['rabbit@my.host.com'], disc}}, @@ -1547,7 +1569,7 @@ end}. ]}. %% Size in bytes below which to embed messages in the queue index. See -%% https://www.rabbitmq.com/persistence-conf.html +%% https://www.rabbitmq.com/docs/persistence-conf %% %% {queue_index_embed_msgs_below, 4096} @@ -2421,7 +2443,7 @@ end}. {mapping, "raft.segment_max_entries", "ra.segment_max_entries", [ {datatype, integer}, - {validators, ["non_zero_positive_integer", "non_zero_positive_16_bit_integer"]} + {validators, ["non_zero_positive_integer", "positive_16_bit_unsigned_integer"]} ]}. {translation, "ra.segment_max_entries", @@ -2550,6 +2572,7 @@ end}. %% Backing queue version %% +%% DEPRECATED. Not used since RabbitMQ 4.0 {mapping, "classic_queue.default_version", "rabbit.classic_queue_default_version", [ {datatype, integer}, {validators, ["non_zero_positive_integer"]} @@ -2639,32 +2662,6 @@ end}. end }. -% =============================== -% AMQP 1.0 -% =============================== - -%% Connections that skip SASL layer or use SASL mechanism ANONYMOUS will connect as this account. -%% Setting this to a username will allow clients to connect without authenticating. -%% For production environments, set this value to 'none'. -{mapping, "amqp1_0.default_user", "rabbit.amqp1_0_default_user", - [{datatype, [{enum, [none]}, string]}]}. - -{mapping, "amqp1_0.default_vhost", "rabbit.amqp1_0_default_vhost", - [{datatype, string}]}. - -{translation, "rabbit.amqp1_0_default_user", -fun(Conf) -> - case cuttlefish:conf_get("amqp1_0.default_user", Conf) of - none -> none; - User -> list_to_binary(User) - end -end}. - -{translation , "rabbit.amqp1_0_default_vhost", -fun(Conf) -> - list_to_binary(cuttlefish:conf_get("amqp1_0.default_vhost", Conf)) -end}. - {mapping, "stream.replication.port_range.min", "osiris.port_range", [ {datatype, [integer]}, {validators, ["non_zero_positive_integer"]} @@ -2753,10 +2750,15 @@ fun(Int) when is_integer(Int) -> Int >= 1 end}. -{validator, "non_zero_positive_16_bit_integer", "number should be between 1 and 65535", -fun(Int) when is_integer(Int) -> - (Int >= 1) and (Int =< 65535) -end}. +{validator, "positive_16_bit_unsigned_integer", "number should be between 1 and 65535", + fun(Int) when is_integer(Int) -> + (Int >= 1) and (Int =< 16#ff_ff) + end}. + +{validator, "positive_32_bit_unsigned_integer", "number should be between 1 and 4294967295", + fun(Int) when is_integer(Int) -> + (Int >= 1) and (Int =< 16#ff_ff_ff_ff) + end}. {validator, "valid_regex", "string must be a valid regular expression", fun("") -> false; diff --git a/deps/rabbit/src/mc.erl b/deps/rabbit/src/mc.erl index 74704c25c2b6..465c7054f089 100644 --- a/deps/rabbit/src/mc.erl +++ b/deps/rabbit/src/mc.erl @@ -383,6 +383,7 @@ record_death(Reason, SourceQueue, routing_keys = RKeys, count = 1, anns = DeathAnns}, + ReasonBin = atom_to_binary(Reason), Anns = case Anns0 of #{deaths := Deaths0} -> Deaths = case Deaths0 of @@ -406,7 +407,7 @@ record_death(Reason, SourceQueue, [{Key, NewDeath} | Deaths0] end end, - Anns0#{<<"x-last-death-reason">> := atom_to_binary(Reason), + Anns0#{<<"x-last-death-reason">> := ReasonBin, <<"x-last-death-queue">> := SourceQueue, <<"x-last-death-exchange">> := Exchange, deaths := Deaths}; @@ -419,7 +420,6 @@ record_death(Reason, SourceQueue, _ -> [{Key, NewDeath}] end, - ReasonBin = atom_to_binary(Reason), Anns0#{<<"x-first-death-reason">> => ReasonBin, <<"x-first-death-queue">> => SourceQueue, <<"x-first-death-exchange">> => Exchange, diff --git a/deps/rabbit/src/mc_amqp.erl b/deps/rabbit/src/mc_amqp.erl index 3a90e2879842..be63597c3f96 100644 --- a/deps/rabbit/src/mc_amqp.erl +++ b/deps/rabbit/src/mc_amqp.erl @@ -222,14 +222,7 @@ get_property(priority, Msg) -> -spec protocol_state(state(), mc:annotations()) -> iolist(). protocol_state(Msg0 = #msg_body_decoded{header = Header0, message_annotations = MA0}, Anns) -> - FirstAcquirer = first_acquirer(Anns), - Header = case Header0 of - undefined -> - #'v1_0.header'{durable = true, - first_acquirer = FirstAcquirer}; - #'v1_0.header'{} -> - Header0#'v1_0.header'{first_acquirer = FirstAcquirer} - end, + Header = update_header_from_anns(Header0, Anns), MA = protocol_state_message_annotations(MA0, Anns), Msg = Msg0#msg_body_decoded{header = Header, message_annotations = MA}, @@ -238,14 +231,7 @@ protocol_state(Msg0 = #msg_body_decoded{header = Header0, protocol_state(#msg_body_encoded{header = Header0, message_annotations = MA0, bare_and_footer = BareAndFooter}, Anns) -> - FirstAcquirer = first_acquirer(Anns), - Header = case Header0 of - undefined -> - #'v1_0.header'{durable = true, - first_acquirer = FirstAcquirer}; - #'v1_0.header'{} -> - Header0#'v1_0.header'{first_acquirer = FirstAcquirer} - end, + Header = update_header_from_anns(Header0, Anns), MA = protocol_state_message_annotations(MA0, Anns), Sections = to_sections(Header, MA, []), [encode(Sections), BareAndFooter]; @@ -269,10 +255,9 @@ protocol_state(#v1{message_annotations = MA0, _ -> undefined end, - Header = #'v1_0.header'{durable = Durable, - priority = Priority, - ttl = Ttl, - first_acquirer = first_acquirer(Anns)}, + Header = update_header_from_anns(#'v1_0.header'{durable = Durable, + priority = Priority, + ttl = Ttl}, Anns), MA = protocol_state_message_annotations(MA0, Anns), Sections = to_sections(Header, MA, []), [encode(Sections), BareAndFooter]. @@ -573,13 +558,22 @@ msg_body_encoded([{{pos, Pos}, {body, Code}}], BarePos, Msg) binary_part_bare_and_footer(Payload, Start) -> binary_part(Payload, Start, byte_size(Payload) - Start). --spec first_acquirer(mc:annotations()) -> boolean(). -first_acquirer(Anns) -> +update_header_from_anns(undefined, Anns) -> + update_header_from_anns(#'v1_0.header'{durable = true}, Anns); +update_header_from_anns(Header, Anns) -> + DeliveryCount = case Anns of + #{delivery_count := C} -> C; + _ -> 0 + end, Redelivered = case Anns of #{redelivered := R} -> R; _ -> false end, - not Redelivered. + FirstAcq = not Redelivered andalso + DeliveryCount =:= 0 andalso + not is_map_key(deaths, Anns), + Header#'v1_0.header'{first_acquirer = FirstAcq, + delivery_count = {uint, DeliveryCount}}. encode_deaths(Deaths) -> lists:map( diff --git a/deps/rabbit/src/mc_amqpl.erl b/deps/rabbit/src/mc_amqpl.erl index f1b023d3fe79..8de27294723a 100644 --- a/deps/rabbit/src/mc_amqpl.erl +++ b/deps/rabbit/src/mc_amqpl.erl @@ -176,7 +176,7 @@ convert_from(mc_amqp, Sections, Env) -> {Headers2, CorrId091} = message_id(CorrId, <<"x-correlation-id">>, Headers1), Headers = case Env of - #{message_containers_store_amqp_v1 := false} -> + #{'rabbitmq_4.0.0' := false} -> Headers3 = case AProp of undefined -> Headers2; diff --git a/deps/rabbit/src/mc_compat.erl b/deps/rabbit/src/mc_compat.erl index 702f8c0f64ca..056905239d96 100644 --- a/deps/rabbit/src/mc_compat.erl +++ b/deps/rabbit/src/mc_compat.erl @@ -54,7 +54,9 @@ get_annotation(?ANN_ROUTING_KEYS, #basic_message{routing_keys = RKeys}) -> get_annotation(?ANN_EXCHANGE, #basic_message{exchange_name = Ex}) -> Ex#resource.name; get_annotation(id, #basic_message{id = Id}) -> - Id. + Id; +get_annotation(_Key, #basic_message{}) -> + undefined. set_annotation(id, Value, #basic_message{} = Msg) -> Msg#basic_message{id = Value}; @@ -92,7 +94,11 @@ set_annotation(?ANN_TIMESTAMP, Millis, #basic_message{content = #content{properties = B} = C0} = Msg) -> C = C0#content{properties = B#'P_basic'{timestamp = Millis div 1000}, properties_bin = none}, - Msg#basic_message{content = C}. + Msg#basic_message{content = C}; +set_annotation(delivery_count, _Value, #basic_message{} = Msg) -> + %% Ignore AMQP 1.0 specific delivery-count. + %% https://github.com/rabbitmq/rabbitmq-server/issues/12398 + Msg. is_persistent(#basic_message{content = Content}) -> get_property(durable, Content). diff --git a/deps/rabbit/src/rabbit.erl b/deps/rabbit/src/rabbit.erl index de3153f42f85..f6f6fa364278 100644 --- a/deps/rabbit/src/rabbit.erl +++ b/deps/rabbit/src/rabbit.erl @@ -26,17 +26,21 @@ -export([product_info/0, product_name/0, product_version/0, + product_license_line/0, base_product_name/0, base_product_version/0, motd_file/0, - motd/0]). + motd/0, + pg_local_scope/1]). %% For CLI, testing and mgmt-agent. -export([set_log_level/1, log_locations/0, config_files/0]). -export([is_booted/1, is_booted/0, is_booting/1, is_booting/0]). %%--------------------------------------------------------------------------- %% Boot steps. --export([maybe_insert_default_data/0, boot_delegate/0, recover/0, pg_local/0]). +-export([maybe_insert_default_data/0, boot_delegate/0, recover/0, + pg_local_amqp_session/0, + pg_local_amqp_connection/0]). %% for tests -export([validate_msg_store_io_batch_size_and_credit_disc_bound/2]). @@ -263,9 +267,15 @@ {mfa, {rabbit_vhosts, boot, []}}, {requires, notify_cluster}]}). --rabbit_boot_step({pg_local, - [{description, "local-only pg scope"}, - {mfa, {rabbit, pg_local, []}}, +-rabbit_boot_step({pg_local_amqp_session, + [{description, "local-only pg scope for AMQP sessions"}, + {mfa, {rabbit, pg_local_amqp_session, []}}, + {requires, kernel_ready}, + {enables, core_initialized}]}). + +-rabbit_boot_step({pg_local_amqp_connection, + [{description, "local-only pg scope for AMQP connections"}, + {mfa, {rabbit, pg_local_amqp_connection, []}}, {requires, kernel_ready}, {enables, core_initialized}]}). @@ -911,14 +921,14 @@ start(normal, []) -> [product_name(), product_version(), rabbit_misc:otp_release(), emu_flavor(), BaseName, BaseVersion, - ?COPYRIGHT_MESSAGE, ?INFORMATION_MESSAGE], + ?COPYRIGHT_MESSAGE, product_license_line()], #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}); _ -> ?LOG_INFO( "~n Starting ~ts ~ts on Erlang ~ts [~ts]~n ~ts~n ~ts", [product_name(), product_version(), rabbit_misc:otp_release(), emu_flavor(), - ?COPYRIGHT_MESSAGE, ?INFORMATION_MESSAGE], + ?COPYRIGHT_MESSAGE, product_license_line()], #{domain => ?RMQLOG_DOMAIN_PRELAUNCH}) end, log_motd(), @@ -1115,11 +1125,18 @@ boot_delegate() -> -spec recover() -> 'ok'. recover() -> - ok = rabbit_vhost:recover(), - ok. + ok = rabbit_vhost:recover(). -pg_local() -> - rabbit_sup:start_child(pg, [node()]). +pg_local_amqp_session() -> + PgScope = pg_local_scope(amqp_session), + rabbit_sup:start_child(pg_amqp_session, pg, [PgScope]). + +pg_local_amqp_connection() -> + PgScope = pg_local_scope(amqp_connection), + rabbit_sup:start_child(pg_amqp_connection, pg, [PgScope]). + +pg_local_scope(Prefix) -> + list_to_atom(io_lib:format("~s_~s", [Prefix, node()])). -spec maybe_insert_default_data() -> 'ok'. @@ -1322,7 +1339,7 @@ print_banner() -> "~n Logs: ~ts" ++ LogFmt ++ "~n" "~n Config file(s): ~ts" ++ CfgFmt ++ "~n" "~n Starting broker...", - [Product, Version, ?COPYRIGHT_MESSAGE, ?INFORMATION_MESSAGE] ++ + [Product, Version, ?COPYRIGHT_MESSAGE, product_license_line()] ++ [rabbit_misc:otp_release(), emu_flavor(), crypto_version()] ++ MOTDArgs ++ LogLocations ++ @@ -1506,6 +1523,10 @@ product_name() -> #{product_base_name := BaseName} -> BaseName end. +-spec product_license_line() -> string(). +product_license_line() -> + application:get_env(rabbit, license_line, ?INFORMATION_MESSAGE). + -spec product_version() -> string(). product_version() -> diff --git a/deps/rabbit/src/rabbit_amqp1_0.erl b/deps/rabbit/src/rabbit_amqp1_0.erl index cba97ec2a58f..c63f471919c7 100644 --- a/deps/rabbit/src/rabbit_amqp1_0.erl +++ b/deps/rabbit/src/rabbit_amqp1_0.erl @@ -6,8 +6,6 @@ %% -module(rabbit_amqp1_0). --define(PROCESS_GROUP_NAME, rabbit_amqp10_connections). - -export([list_local/0, register_connection/1]). @@ -36,8 +34,11 @@ emit_connection_info_local(Items, Ref, AggregatorPid) -> -spec list_local() -> [pid()]. list_local() -> - pg:get_local_members(node(), ?PROCESS_GROUP_NAME). + pg:which_groups(pg_scope()). -spec register_connection(pid()) -> ok. register_connection(Pid) -> - ok = pg:join(node(), ?PROCESS_GROUP_NAME, Pid). + ok = pg:join(pg_scope(), Pid, Pid). + +pg_scope() -> + rabbit:pg_local_scope(amqp_connection). diff --git a/deps/rabbit/src/rabbit_amqp_management.erl b/deps/rabbit/src/rabbit_amqp_management.erl index 7facfe67cf71..e4555e806033 100644 --- a/deps/rabbit/src/rabbit_amqp_management.erl +++ b/deps/rabbit/src/rabbit_amqp_management.erl @@ -433,12 +433,13 @@ encode_queue(Q, NumMsgs, NumConsumers) -> Replicas =:= undefined -> KVList0 end, - KVList = if is_atom(Leader) -> - [{{utf8, <<"leader">>}, - {utf8, atom_to_binary(Leader)} - } | KVList1]; - Leader =:= undefined -> - KVList1 + KVList = case Leader of + undefined -> + KVList1; + _ -> + [{{utf8, <<"leader">>}, + {utf8, atom_to_binary(Leader)} + } | KVList1] end, {map, KVList}. diff --git a/deps/rabbit/src/rabbit_amqp_reader.erl b/deps/rabbit/src/rabbit_amqp_reader.erl index 9b81a1d322da..0ad228a4e653 100644 --- a/deps/rabbit/src/rabbit_amqp_reader.erl +++ b/deps/rabbit/src/rabbit_amqp_reader.erl @@ -8,9 +8,10 @@ -module(rabbit_amqp_reader). -include_lib("rabbit_common/include/rabbit.hrl"). +-include_lib("amqp10_common/include/amqp10_types.hrl"). -include("rabbit_amqp.hrl"). --export([init/2, +-export([init/1, info/2, mainloop/2]). @@ -34,6 +35,7 @@ -record(v1_connection, {name :: binary(), + container_id :: none | binary(), vhost :: none | rabbit_types:vhost(), %% server host host :: inet:ip_address() | inet:hostname(), @@ -44,12 +46,12 @@ %% client port peer_port :: inet:port_number(), connected_at :: integer(), - user :: rabbit_types:option(rabbit_types:user()), + user :: unauthenticated | rabbit_types:user(), timeout :: non_neg_integer(), incoming_max_frame_size :: pos_integer(), outgoing_max_frame_size :: unlimited | pos_integer(), channel_max :: non_neg_integer(), - auth_mechanism :: none | anonymous | {binary(), module()}, + auth_mechanism :: sasl_init_unprocessed | {binary(), module()}, auth_state :: term(), properties :: undefined | {map, list(tuple())} }). @@ -64,7 +66,9 @@ sock :: rabbit_net:socket(), proxy_socket :: undefined | {rabbit_proxy_socket, any(), any()}, connection :: #v1_connection{}, - connection_state :: pre_init | starting | waiting_amqp0100 | securing | running | closing | closed, + connection_state :: received_amqp3100 | waiting_sasl_init | securing | + waiting_amqp0100 | waiting_open | running | + closing | closed, callback :: handshake | {frame_header, protocol()} | {frame_body, protocol(), DataOffset :: pos_integer(), channel_number()}, @@ -82,37 +86,40 @@ %%-------------------------------------------------------------------------- unpack_from_0_9_1( - {Sock,RecvLen, PendingRecv, SupPid, Buf, BufLen, ProxySocket, + {Sock, PendingRecv, SupPid, Buf, BufLen, ProxySocket, ConnectionName, Host, PeerHost, Port, PeerPort, ConnectedAt}, - Parent, HandshakeTimeout) -> - #v1{parent = Parent, - sock = Sock, - callback = handshake, - recv_len = RecvLen, - pending_recv = PendingRecv, - connection_state = pre_init, - heartbeater = none, - helper_sup = SupPid, - buf = Buf, - buf_len = BufLen, - proxy_socket = ProxySocket, - tracked_channels = maps:new(), - writer = none, + Parent) -> + logger:update_process_metadata(#{connection => ConnectionName}), + #v1{parent = Parent, + sock = Sock, + callback = {frame_header, sasl}, + recv_len = 8, + pending_recv = PendingRecv, + heartbeater = none, + helper_sup = SupPid, + buf = Buf, + buf_len = BufLen, + proxy_socket = ProxySocket, + tracked_channels = maps:new(), + writer = none, + connection_state = received_amqp3100, connection = #v1_connection{ name = ConnectionName, + container_id = none, vhost = none, host = Host, peer_host = PeerHost, port = Port, peer_port = PeerPort, connected_at = ConnectedAt, - user = none, - timeout = HandshakeTimeout, + user = unauthenticated, + timeout = ?NORMAL_TIMEOUT, incoming_max_frame_size = ?INITIAL_MAX_FRAME_SIZE, outgoing_max_frame_size = ?INITIAL_MAX_FRAME_SIZE, + %% "Prior to any explicit negotiation, [...] the maximum channel number is 0." [2.4.1] channel_max = 0, - auth_mechanism = none, - auth_state = none}}. + auth_mechanism = sasl_init_unprocessed, + auth_state = unauthenticated}}. -spec system_continue(pid(), [sys:dbg_opt()], state()) -> no_return() | ok. system_continue(Parent, Deb, State) -> @@ -138,7 +145,9 @@ inet_op(F) -> rabbit_misc:throw_on_error(inet_error, F). recvloop(Deb, State = #v1{pending_recv = true}) -> mainloop(Deb, State); -recvloop(Deb, State = #v1{sock = Sock, recv_len = RecvLen, buf_len = BufLen}) +recvloop(Deb, State = #v1{sock = Sock, + recv_len = RecvLen, + buf_len = BufLen}) when BufLen < RecvLen -> case rabbit_net:setopts(Sock, [{active, once}]) of ok -> @@ -146,15 +155,19 @@ recvloop(Deb, State = #v1{sock = Sock, recv_len = RecvLen, buf_len = BufLen}) {error, Reason} -> throw({inet_error, Reason}) end; -recvloop(Deb, State = #v1{recv_len = RecvLen, buf = Buf, buf_len = BufLen}) -> +recvloop(Deb, State0 = #v1{callback = Callback, + recv_len = RecvLen, + buf = Buf, + buf_len = BufLen}) -> Bin = case Buf of [B] -> B; _ -> list_to_binary(lists:reverse(Buf)) end, {Data, Rest} = split_binary(Bin, RecvLen), - recvloop(Deb, handle_input(State#v1.callback, Data, - State#v1{buf = [Rest], - buf_len = BufLen - RecvLen})). + State1 = State0#v1{buf = [Rest], + buf_len = BufLen - RecvLen}, + State = handle_input(Callback, Data, State1), + recvloop(Deb, State). -spec mainloop([sys:dbg_opt()], state()) -> no_return() | ok. @@ -195,10 +208,10 @@ handle_other({'EXIT', Parent, Reason}, State = #v1{parent = Parent}) -> exit(Reason); handle_other({{'DOWN', ChannelNum}, _MRef, process, SessionPid, Reason}, State) -> handle_session_exit(ChannelNum, SessionPid, Reason, State); -handle_other(handshake_timeout, State) - when ?IS_RUNNING(State) orelse - State#v1.connection_state =:= closing orelse - State#v1.connection_state =:= closed -> +handle_other(handshake_timeout, State = #v1{connection_state = ConnState}) + when ConnState =:= running orelse + ConnState =:= closing orelse + ConnState =:= closed -> State; handle_other(handshake_timeout, State) -> throw({handshake_timeout, State#v1.callback}); @@ -238,7 +251,8 @@ handle_other(Other, _State) -> exit({unexpected_message, Other}). switch_callback(State, Callback, Length) -> - State#v1{callback = Callback, recv_len = Length}. + State#v1{callback = Callback, + recv_len = Length}. terminate(Reason, State) when ?IS_RUNNING(State) -> @@ -281,7 +295,7 @@ handle_session_exit(ChannelNum, SessionPid, Reason, State0) -> "Session error: ~tp", [Reason]) end, - handle_exception(State, SessionPid, R) + handle_exception(State, ChannelNum, R) end, maybe_close(S). @@ -307,19 +321,19 @@ error_frame(Condition, Fmt, Args) -> handle_exception(State = #v1{connection_state = closed}, Channel, #'v1_0.error'{description = {utf8, Desc}}) -> rabbit_log_connection:error( - "Error on AMQP 1.0 connection ~tp (~tp), channel ~tp:~n~tp", + "Error on AMQP 1.0 connection ~tp (~tp), channel number ~b:~n~tp", [self(), closed, Channel, Desc]), State; handle_exception(State = #v1{connection_state = CS}, Channel, Error = #'v1_0.error'{description = {utf8, Desc}}) when ?IS_RUNNING(State) orelse CS =:= closing -> rabbit_log_connection:error( - "Error on AMQP 1.0 connection ~tp (~tp), channel ~tp:~n~tp", + "Error on AMQP 1.0 connection ~tp (~tp), channel number ~b:~n~tp", [self(), CS, Channel, Desc]), close(Error, State); -handle_exception(State, Channel, Error) -> +handle_exception(State, _Channel, Error) -> silent_close_delay(), - throw({handshake_error, State#v1.connection_state, Channel, Error}). + throw({handshake_error, State#v1.connection_state, Error}). is_connection_frame(#'v1_0.open'{}) -> true; is_connection_frame(#'v1_0.close'{}) -> true; @@ -330,21 +344,30 @@ handle_frame(Mode, Channel, Body, State) -> handle_frame0(Mode, Channel, Body, State) catch _:#'v1_0.error'{} = Reason -> - handle_exception(State, 0, Reason); + handle_exception(State, Channel, Reason); _:{error, {not_allowed, Username}} -> %% section 2.8.15 in http://docs.oasis-open.org/amqp/core/v1.0/os/amqp-core-complete-v1.0-os.pdf - handle_exception(State, 0, error_frame( - ?V_1_0_AMQP_ERROR_UNAUTHORIZED_ACCESS, - "Access for user '~ts' was refused: insufficient permissions", - [Username])); + handle_exception(State, + Channel, + error_frame( + ?V_1_0_AMQP_ERROR_UNAUTHORIZED_ACCESS, + "Access for user '~ts' was refused: insufficient permissions", + [Username])); _:Reason:Trace -> - handle_exception(State, 0, error_frame( - ?V_1_0_AMQP_ERROR_INTERNAL_ERROR, - "Reader error: ~tp~n~tp", - [Reason, Trace])) + handle_exception(State, + Channel, + error_frame( + ?V_1_0_AMQP_ERROR_INTERNAL_ERROR, + "Reader error: ~tp~n~tp", + [Reason, Trace])) end. -%% Nothing specifies that connection methods have to be on a particular channel. +handle_frame0(amqp, Channel, _Body, + #v1{connection = #v1_connection{channel_max = ChannelMax}}) + when Channel > ChannelMax -> + protocol_error(?V_1_0_CONNECTION_ERROR_FRAMING_ERROR, + "channel number (~b) exceeds maximum channel number (~b)", + [Channel, ChannelMax]); handle_frame0(_Mode, Channel, Body, State = #v1{connection_state = CS}) when CS =:= closing orelse @@ -379,17 +402,21 @@ parse_frame_body(Body, _Channel) -> end. handle_connection_frame( - #'v1_0.open'{max_frame_size = ClientMaxFrame, + #'v1_0.open'{container_id = {utf8, ContainerId}, + max_frame_size = ClientMaxFrame, channel_max = ClientChannelMax, idle_time_out = IdleTimeout, hostname = Hostname, properties = Properties}, - #v1{connection_state = starting, - connection = Connection = #v1_connection{name = ConnectionName, - user = User = #user{username = Username}}, + #v1{connection_state = waiting_open, + connection = Connection = #v1_connection{ + name = ConnectionName, + user = User = #user{username = Username}, + auth_mechanism = {Mechanism, _Mod} + }, helper_sup = HelperSupPid, sock = Sock} = State0) -> - + logger:update_process_metadata(#{amqp_container => ContainerId}), Vhost = vhost(Hostname), ok = check_user_loopback(State0), ok = check_vhost_exists(Vhost, State0), @@ -401,8 +428,9 @@ handle_connection_frame( rabbit_core_metrics:auth_attempt_succeeded(<<>>, Username, amqp10), notify_auth(user_authentication_success, Username, State0), rabbit_log_connection:info( - "AMQP 1.0 connection: user '~ts' authenticated and granted access to vhost '~ts'", - [Username, Vhost]), + "Connection from AMQP 1.0 container '~ts': user '~ts' authenticated " + "using SASL mechanism ~s and granted access to vhost '~ts'", + [ContainerId, Username, Mechanism, Vhost]), OutgoingMaxFrameSize = case ClientMaxFrame of undefined -> @@ -450,20 +478,26 @@ handle_connection_frame( SendTimeoutSec, SendFun, ReceiveTimeoutSec, ReceiveFun), {ok, IncomingMaxFrameSize} = application:get_env(rabbit, frame_max), - %% TODO enforce channel_max - ChannelMax = case ClientChannelMax of - undefined -> - %% default as per 2.7.1 - 16#ff_ff; - {ushort, N} -> - N - end, + {ok, SessionMax} = application:get_env(rabbit, session_max_per_connection), + %% "The channel-max value is the highest channel number that can be used on the connection. + %% This value plus one is the maximum number of sessions that can be simultaneously active + %% on the connection." [2.7.1] + ChannelMax = SessionMax - 1, + %% Assert config is valid. + true = ChannelMax >= 0 andalso ChannelMax =< 16#ff_ff, + EffectiveChannelMax = case ClientChannelMax of + undefined -> + ChannelMax; + {ushort, N} -> + min(N, ChannelMax) + end, State1 = State0#v1{connection_state = running, connection = Connection#v1_connection{ + container_id = ContainerId, vhost = Vhost, incoming_max_frame_size = IncomingMaxFrameSize, outgoing_max_frame_size = OutgoingMaxFrameSize, - channel_max = ChannelMax, + channel_max = EffectiveChannelMax, properties = Properties, timeout = ReceiveTimeoutMillis}, heartbeater = Heartbeater}, @@ -488,7 +522,7 @@ handle_connection_frame( %% https://docs.oasis-open.org/amqp/anonterm/v1.0/cs01/anonterm-v1.0-cs01.html#doc-anonymous-relay {symbol, <<"ANONYMOUS-RELAY">>}], Open = #'v1_0.open'{ - channel_max = ClientChannelMax, + channel_max = {ushort, EffectiveChannelMax}, max_frame_size = {uint, IncomingMaxFrameSize}, %% "the value in idle-time-out SHOULD be half the peer's actual timeout threshold" [2.4.5] idle_time_out = {uint, ReceiveTimeoutMillis div 2}, @@ -502,10 +536,9 @@ handle_connection_frame(#'v1_0.close'{}, State0) -> close(undefined, State). start_writer(#v1{helper_sup = SupPid, - sock = Sock, - connection = #v1_connection{outgoing_max_frame_size = MaxFrame}} = State) -> + sock = Sock} = State) -> ChildSpec = #{id => writer, - start => {rabbit_amqp_writer, start_link, [Sock, MaxFrame, self()]}, + start => {rabbit_amqp_writer, start_link, [Sock, self()]}, restart => transient, significant => true, shutdown => ?WORKER_WAIT, @@ -536,50 +569,53 @@ handle_session_frame(Channel, Body, #v1{tracked_channels = Channels} = State) -> end end. -%% TODO: write a proper ANONYMOUS plugin and unify with STOMP -handle_sasl_frame(#'v1_0.sasl_init'{mechanism = {symbol, <<"ANONYMOUS">>}, - hostname = _Hostname}, - #v1{connection_state = starting, - connection = Connection, - sock = Sock} = State0) -> - case default_user() of - none -> - silent_close_delay(), - Outcome = #'v1_0.sasl_outcome'{code = ?V_1_0_SASL_CODE_SYS_PERM}, - ok = send_on_channel0(Sock, Outcome, rabbit_amqp_sasl), - throw(banned_unauthenticated_connection); - _ -> - %% We only need to send the frame, again start_connection - %% will set up the default user. - Outcome = #'v1_0.sasl_outcome'{code = ?V_1_0_SASL_CODE_OK}, - ok = send_on_channel0(Sock, Outcome, rabbit_amqp_sasl), - State = State0#v1{connection_state = waiting_amqp0100, - connection = Connection#v1_connection{auth_mechanism = anonymous}}, - switch_callback(State, handshake, 8) - end; -handle_sasl_frame(#'v1_0.sasl_init'{mechanism = {symbol, Mechanism}, - initial_response = {binary, Response}, - hostname = _Hostname}, - State0 = #v1{connection_state = starting, - connection = Connection, - sock = Sock}) -> +handle_sasl_frame(#'v1_0.sasl_init'{mechanism = {symbol, Mechanism}, + initial_response = Response, + hostname = _}, + State0 = #v1{connection_state = waiting_sasl_init, + connection = Connection, + sock = Sock}) -> + ResponseBin = case Response of + undefined -> <<>>; + {binary, Bin} -> Bin + end, AuthMechanism = auth_mechanism_to_module(Mechanism, Sock), - State = State0#v1{connection = - Connection#v1_connection{ - auth_mechanism = {Mechanism, AuthMechanism}, - auth_state = AuthMechanism:init(Sock)}, - connection_state = securing}, - auth_phase_1_0(Response, State); + AuthState = AuthMechanism:init(Sock), + State = State0#v1{ + connection = Connection#v1_connection{ + auth_mechanism = {Mechanism, AuthMechanism}, + auth_state = AuthState}, + connection_state = securing}, + auth_phase(ResponseBin, State); handle_sasl_frame(#'v1_0.sasl_response'{response = {binary, Response}}, State = #v1{connection_state = securing}) -> - auth_phase_1_0(Response, State); + auth_phase(Response, State); handle_sasl_frame(Performative, State) -> throw({unexpected_1_0_sasl_frame, Performative, State}). -handle_input(handshake, <<"AMQP", 0, 1, 0, 0>>, - #v1{connection_state = waiting_amqp0100} = State) -> - start_connection(amqp, State); - +handle_input(handshake, + <<"AMQP",0,1,0,0>>, + #v1{connection_state = waiting_amqp0100, + sock = Sock, + connection = #v1_connection{user = #user{}}, + helper_sup = HelperSup + } = State0) -> + %% At this point, client already got successfully authenticated by SASL. + send_handshake(Sock, <<"AMQP",0,1,0,0>>), + ChildSpec = #{id => session_sup, + start => {rabbit_amqp_session_sup, start_link, [self()]}, + restart => transient, + significant => true, + shutdown => infinity, + type => supervisor}, + {ok, SessionSupPid} = supervisor:start_child(HelperSup, ChildSpec), + State = State0#v1{ + session_sup = SessionSupPid, + %% "After establishing or accepting a TCP connection and sending + %% the protocol header, each peer MUST send an open frame before + %% sending any other frames." [2.4.1] + connection_state = waiting_open}, + switch_callback(State, {frame_header, amqp}, 8); handle_input({frame_header, Mode}, Header = <>, State) when DOff >= 2 -> @@ -604,7 +640,8 @@ handle_input({frame_header, Mode}, handle_input({frame_header, _Mode}, Malformed, _State) -> throw({bad_1_0_header, Malformed}); handle_input({frame_body, Mode, DOff, Channel}, - FrameBin, State) -> + FrameBin, + State) -> %% Figure 2.16 %% DOff = 4-byte words minus 8 bytes we've already read ExtendedHeaderSize = (DOff * 32 - 64), @@ -615,75 +652,24 @@ handle_input({frame_body, Mode, DOff, Channel}, handle_input(Callback, Data, _State) -> throw({bad_input, Callback, Data}). --spec init(protocol(), tuple()) -> no_return(). -init(Mode, PackedState) -> - {ok, HandshakeTimeout} = application:get_env(rabbit, handshake_timeout), +-spec init(tuple()) -> no_return(). +init(PackedState) -> {parent, Parent} = erlang:process_info(self(), parent), ok = rabbit_connection_sup:remove_connection_helper_sup(Parent, helper_sup_amqp_091), - State0 = unpack_from_0_9_1(PackedState, Parent, HandshakeTimeout), - State = start_connection(Mode, State0), + State0 = unpack_from_0_9_1(PackedState, Parent), + State = advertise_sasl_mechanism(State0), %% By invoking recvloop here we become 1.0. recvloop(sys:debug_options([]), State). -start_connection(Mode = sasl, State = #v1{sock = Sock}) -> +advertise_sasl_mechanism(State0 = #v1{connection_state = received_amqp3100, + sock = Sock}) -> send_handshake(Sock, <<"AMQP",3,1,0,0>>), - %% "The server mechanisms are ordered in decreasing level of preference." [5.3.3.1] Ms0 = [{symbol, atom_to_binary(M)} || M <- auth_mechanisms(Sock)], - Ms1 = case default_user() of - none -> Ms0; - _ -> Ms0 ++ [{symbol, <<"ANONYMOUS">>}] - end, - Ms2 = {array, symbol, Ms1}, - Ms = #'v1_0.sasl_mechanisms'{sasl_server_mechanisms = Ms2}, + Ms1 = {array, symbol, Ms0}, + Ms = #'v1_0.sasl_mechanisms'{sasl_server_mechanisms = Ms1}, ok = send_on_channel0(Sock, Ms, rabbit_amqp_sasl), - start_connection0(Mode, State); - -start_connection(Mode = amqp, - State = #v1{sock = Sock, - connection = C = #v1_connection{user = User}}) -> - case User of - none -> - %% Client either skipped SASL layer or used SASL mechansim ANONYMOUS. - case default_user() of - none -> - send_handshake(Sock, <<"AMQP",3,1,0,0>>), - throw(banned_unauthenticated_connection); - NoAuthUsername -> - case rabbit_access_control:check_user_login(NoAuthUsername, []) of - {ok, NoAuthUser} -> - State1 = State#v1{connection = C#v1_connection{user = NoAuthUser}}, - send_handshake(Sock, <<"AMQP",0,1,0,0>>), - start_connection0(Mode, State1); - {refused, _, _, _} -> - send_handshake(Sock, <<"AMQP",3,1,0,0>>), - throw(amqp1_0_default_user_missing) - end - end; - #user{} -> - %% Client already got successfully authenticated by SASL. - send_handshake(Sock, <<"AMQP",0,1,0,0>>), - start_connection0(Mode, State) - end. - -start_connection0(Mode, State0 = #v1{connection = Connection, - helper_sup = HelperSup}) -> - SessionSup = case Mode of - sasl -> - undefined; - amqp -> - ChildSpec = #{id => session_sup, - start => {rabbit_amqp_session_sup, start_link, [self()]}, - restart => transient, - significant => true, - shutdown => infinity, - type => supervisor}, - {ok, Pid} = supervisor:start_child(HelperSup, ChildSpec), - Pid - end, - State = State0#v1{session_sup = SessionSup, - connection_state = starting, - connection = Connection#v1_connection{timeout = ?NORMAL_TIMEOUT}}, - switch_callback(State, {frame_header, Mode}, 8). + State = State0#v1{connection_state = waiting_sasl_init}, + switch_callback(State, {frame_header, sasl}, 8). send_handshake(Sock, Handshake) -> ok = inet_op(fun () -> rabbit_net:send(Sock, Handshake) end). @@ -712,18 +698,25 @@ auth_mechanism_to_module(TypeBin, Sock) -> end end. +%% Returns mechanisms ordered in decreasing level of preference (as configured). auth_mechanisms(Sock) -> - {ok, Configured} = application:get_env(rabbit, auth_mechanisms), - [Name || {Name, Module} <- rabbit_registry:lookup_all(auth_mechanism), - Module:should_offer(Sock), lists:member(Name, Configured)]. - -%% Begin 1-0 - -auth_phase_1_0(Response, - State = #v1{sock = Sock, - connection = Connection = - #v1_connection{auth_mechanism = {Name, AuthMechanism}, - auth_state = AuthState}}) -> + {ok, ConfiguredMechs} = application:get_env(rabbit, auth_mechanisms), + RegisteredMechs = rabbit_registry:lookup_all(auth_mechanism), + lists:filter( + fun(Mech) -> + case proplists:lookup(Mech, RegisteredMechs) of + {Mech, Mod} -> + Mod:should_offer(Sock); + none -> + false + end + end, ConfiguredMechs). + +auth_phase( + Response, + State = #v1{sock = Sock, + connection = Conn = #v1_connection{auth_mechanism = {Name, AuthMechanism}, + auth_state = AuthState}}) -> case AuthMechanism:handle_response(Response, AuthState) of {refused, Username, Msg, Args} -> %% We don't trust the client at this point - force them to wait @@ -740,18 +733,19 @@ auth_phase_1_0(Response, auth_fail(none, State), protocol_error(?V_1_0_AMQP_ERROR_DECODE_ERROR, Msg, Args); {challenge, Challenge, AuthState1} -> - Secure = #'v1_0.sasl_challenge'{challenge = {binary, Challenge}}, - ok = send_on_channel0(Sock, Secure, rabbit_amqp_sasl), - State#v1{connection = Connection#v1_connection{auth_state = AuthState1}}; + Challenge = #'v1_0.sasl_challenge'{challenge = {binary, Challenge}}, + ok = send_on_channel0(Sock, Challenge, rabbit_amqp_sasl), + State1 = State#v1{connection = Conn#v1_connection{auth_state = AuthState1}}, + switch_callback(State1, {frame_header, sasl}, 8); {ok, User} -> Outcome = #'v1_0.sasl_outcome'{code = ?V_1_0_SASL_CODE_OK}, ok = send_on_channel0(Sock, Outcome, rabbit_amqp_sasl), State1 = State#v1{connection_state = waiting_amqp0100, - connection = Connection#v1_connection{user = User}}, + connection = Conn#v1_connection{user = User, + auth_state = authenticated}}, switch_callback(State1, handshake, 8) end. - auth_fail(Username, State) -> rabbit_core_metrics:auth_attempt_failed(<<>>, Username, amqp10), notify_auth(user_authentication_failure, Username, State). @@ -819,8 +813,7 @@ send_to_new_session( vhost({utf8, <<"vhost:", VHost/binary>>}) -> VHost; vhost(_) -> - application:get_env(rabbit, amqp1_0_default_vhost, - application:get_env(rabbit, default_vhost, <<"/">>)). + application:get_env(rabbit, default_vhost, <<"/">>). check_user_loopback(#v1{connection = #v1_connection{user = #user{username = Username}}, sock = Socket} = State) -> @@ -910,19 +903,10 @@ ensure_credential_expiry_timer(User) -> ok; false -> protocol_error(?V_1_0_AMQP_ERROR_UNAUTHORIZED_ACCESS, - "Credential expired ~b ms ago", [Time]) + "Credential expired ~b ms ago", [abs(Time)]) end end. --spec default_user() -> none | rabbit_types:username(). -default_user() -> - case application:get_env(rabbit, amqp1_0_default_user) of - {ok, none} -> - none; - {ok, Username} when is_binary(Username) -> - Username - end. - %% We don't trust the client at this point - force them to wait %% for a bit so they can't DOS us with repeated failed logins etc. silent_close_delay() -> @@ -968,15 +952,18 @@ i(auth_mechanism, #v1{connection = #v1_connection{auth_mechanism = Val}}) -> _ -> Val end; i(frame_max, #v1{connection = #v1_connection{outgoing_max_frame_size = Val}}) -> - Val; + %% Some HTTP API clients expect an integer to be reported. + %% https://github.com/rabbitmq/rabbitmq-server/issues/11838 + if Val =:= unlimited -> ?UINT_MAX; + is_integer(Val) -> Val + end; i(timeout, #v1{connection = #v1_connection{timeout = Millis}}) -> Millis div 1000; -i(user, - #v1{connection = #v1_connection{user = #user{username = Val}}}) -> - Val; -i(user, - #v1{connection = #v1_connection{user = none}}) -> - ''; +i(user, #v1{connection = #v1_connection{user = User}}) -> + case User of + #user{username = Val} -> Val; + unauthenticated -> '' + end; i(state, S) -> i(connection_state, S); i(connection_state, #v1{connection_state = Val}) -> @@ -985,6 +972,8 @@ i(connected_at, #v1{connection = #v1_connection{connected_at = Val}}) -> Val; i(name, #v1{connection = #v1_connection{name = Val}}) -> Val; +i(container_id, #v1{connection = #v1_connection{container_id = Val}}) -> + Val; i(vhost, #v1{connection = #v1_connection{vhost = Val}}) -> Val; i(host, #v1{connection = #v1_connection{host = Val}}) -> diff --git a/deps/rabbit/src/rabbit_amqp_session.erl b/deps/rabbit/src/rabbit_amqp_session.erl index 932eb24ca2a2..31d5348b56b5 100644 --- a/deps/rabbit/src/rabbit_amqp_session.erl +++ b/deps/rabbit/src/rabbit_amqp_session.erl @@ -30,47 +30,54 @@ }} }). --define(PROTOCOL, amqp10). --define(HIBERNATE_AFTER, 6_000). --define(CREDIT_REPLY_TIMEOUT, 30_000). --define(UINT_OUTGOING_WINDOW, {uint, ?UINT_MAX}). --define(MAX_INCOMING_WINDOW, 400). -%% "The next-outgoing-id MAY be initialized to an arbitrary value" [2.5.6] --define(INITIAL_OUTGOING_TRANSFER_ID, ?UINT_MAX - 3). -%% "Note that, despite its name, the delivery-count is not a count but a -%% sequence number initialized at an arbitrary point by the sender." [2.6.7] --define(INITIAL_DELIVERY_COUNT, ?UINT_MAX - 4). --define(INITIAL_OUTGOING_DELIVERY_ID, 0). --define(DEFAULT_MAX_HANDLE, ?UINT_MAX). -%% [3.4] --define(OUTCOMES, [?V_1_0_SYMBOL_ACCEPTED, - ?V_1_0_SYMBOL_REJECTED, - ?V_1_0_SYMBOL_RELEASED, - ?V_1_0_SYMBOL_MODIFIED]). --define(MAX_PERMISSION_CACHE_SIZE, 12). --define(PROCESS_GROUP_NAME, amqp_sessions). --define(UINT(N), {uint, N}). %% This is the link credit that we grant to sending clients. %% We are free to choose whatever we want, sending clients must obey. %% Default soft limits / credits in deps/rabbit/Makefile are: %% 32 for quorum queues %% 256 for streams %% 400 for classic queues +%% Note however that rabbit_channel can easily overshoot quorum queues' soft limit by 300 due to +%% higher credit_flow_default_credit setting. %% If link target is a queue (rather than an exchange), we could use one of these depending %% on target queue type. For the time being just use a static value that's something in between. %% In future, we could dynamically grow (or shrink) the link credit we grant depending on how fast %% target queue(s) actually confirm messages: see paper "Credit-Based Flow Control for ATM Networks" %% from 1995, section 4.2 "Static vs. adaptive credit control" for pros and cons. --define(LINK_CREDIT_RCV, 128). --define(MANAGEMENT_LINK_CREDIT_RCV, 8). +%% We choose a default of 170 because 170 x 1.5 = 255 which is still below DEFAULT_MAX_QUEUE_CREDIT of 256. +%% We use "x 1.5" in this calculation because we grant 170 new credit half way through leading to maximum +%% 85 + 170 = 255 unconfirmed in-flight messages to the target queue. +%% By staying below DEFAULT_MAX_QUEUE_CREDIT, we avoid situations where a single client is able to enqueue +%% faster to a quorum queue than to consume from it. (Remember that a quorum queue fsyncs each credit top +%% up and batch of enqueues.) +-define(DEFAULT_MAX_LINK_CREDIT, 170). +%% Initial and maximum link credit that we grant to a sending queue. +%% Only when we sent sufficient messages to the writer proc, we will again grant +%% credits to the sending queue. We have this limit in place to ensure that our +%% session proc won't be flooded with messages by the sending queue, especially +%% if we are throttled sending messages to the client either by the writer proc +%% or by remote-incoming window (i.e. session flow control). +-define(DEFAULT_MAX_QUEUE_CREDIT, 256). +-define(DEFAULT_MAX_INCOMING_WINDOW, 400). +-define(MAX_MANAGEMENT_LINK_CREDIT, 8). -define(MANAGEMENT_NODE_ADDRESS, <<"/management">>). +-define(UINT_OUTGOING_WINDOW, {uint, ?UINT_MAX}). +%% "The next-outgoing-id MAY be initialized to an arbitrary value" [2.5.6] +-define(INITIAL_OUTGOING_TRANSFER_ID, ?UINT_MAX - 3). +%% "Note that, despite its name, the delivery-count is not a count but a +%% sequence number initialized at an arbitrary point by the sender." [2.6.7] +-define(INITIAL_DELIVERY_COUNT, ?UINT_MAX - 4). +-define(INITIAL_OUTGOING_DELIVERY_ID, 0). +-define(UINT(N), {uint, N}). +%% [3.4] +-define(OUTCOMES, [?V_1_0_SYMBOL_ACCEPTED, + ?V_1_0_SYMBOL_REJECTED, + ?V_1_0_SYMBOL_RELEASED, + ?V_1_0_SYMBOL_MODIFIED]). -define(DEFAULT_EXCHANGE_NAME, <<>>). -%% This is the maximum credit we grant to a sending queue. -%% Only when we sent sufficient messages to the writer proc, we will again grant credits -%% to the sending queue. We have this limit in place to ensure that our session proc won't be flooded -%% with messages by the sending queue, especially if we are throttled sending messages to the client -%% either by the writer proc or by remote-incoming window (i.e. session flow control). --define(LINK_CREDIT_RCV_FROM_QUEUE_MAX, 256). +-define(PROTOCOL, amqp10). +-define(MAX_PERMISSION_CACHE_SIZE, 12). +-define(HIBERNATE_AFTER, 6_000). +-define(CREDIT_REPLY_TIMEOUT, 30_000). -export([start_link/8, process_frame/2, @@ -133,6 +140,7 @@ }). -record(incoming_link, { + snd_settle_mode :: snd_settle_mode(), %% The exchange is either defined in the ATTACH frame and static for %% the life time of the link or dynamically provided in each message's %% "to" field (address v2). @@ -143,6 +151,7 @@ routing_key :: rabbit_types:routing_key() | to | subject, %% queue_name_bin is only set if the link target address refers to a queue. queue_name_bin :: undefined | rabbit_misc:resource_name(), + max_message_size :: pos_integer(), delivery_count :: sequence_no(), credit :: rabbit_queue_type:credit(), %% TRANSFER delivery IDs published to queues but not yet confirmed by queues @@ -172,10 +181,9 @@ -record(queue_flow_ctl, { delivery_count :: sequence_no(), %% We cap the actual credit we grant to the sending queue. - credit :: 0..?LINK_CREDIT_RCV_FROM_QUEUE_MAX, - %% Credit as desired by the receiving client. If larger than - %% LINK_CREDIT_RCV_FROM_QUEUE_MAX, we will top up in batches to the sending queue. - desired_credit :: rabbit_queue_type:credit(), + %% If client_flow_ctl.credit is larger than max_queue_credit, + %% we will top up in batches to the sending queue. + credit :: rabbit_queue_type:credit(), drain :: boolean() }). @@ -187,7 +195,7 @@ send_settled :: boolean(), max_message_size :: unlimited | pos_integer(), - %% When feature flag credit_api_v2 becomes required, + %% When feature flag rabbitmq_4.0.0 becomes required, %% the following 2 fields should be deleted. credit_api_version :: 1 | 2, %% When credit API v1 is used, our session process holds the delivery-count @@ -197,10 +205,18 @@ %% client and for the link to the sending queue. client_flow_ctl :: #client_flow_ctl{} | credit_api_v1, queue_flow_ctl :: #queue_flow_ctl{} | credit_api_v1, - %% True if we sent a credit request to the sending queue - %% but haven't processed the corresponding credit reply yet. - credit_req_in_flight :: boolean() | credit_api_v1, - %% While credit_req_in_flight is true, we stash the + %% 'true' means: + %% * we haven't processed a credit reply yet since we last sent + %% a credit request to the sending queue. + %% * a credit request is certainly in flight + %% * possibly multiple credit requests are in flight (e.g. rabbit_fifo_client + %% will re-send credit requests on our behalf on quorum queue leader changes) + %% 'false' means: + %% * we processed a credit reply since we last sent a credit request to the sending queue + %% * probably no credit request is in flight, but there might be + %% (we aren't sure since we don't use correlations for credit requests) + at_least_one_credit_req_in_flight :: boolean() | credit_api_v1, + %% While at_least_one_credit_req_in_flight is true, we stash the %% latest credit request from the receiving client. stashed_credit_req :: none | #credit_req{} | credit_api_v1 }). @@ -217,7 +233,7 @@ frames :: [transfer_frame_body(), ...], queue_ack_required :: boolean(), %% Queue that sent us this message. - %% When feature flag credit_api_v2 becomes required, this field should be deleted. + %% When feature flag rabbitmq_4.0.0 becomes required, this field should be deleted. queue_pid :: pid() | credit_api_v2, delivery_id :: delivery_number(), outgoing_unsettled :: #outgoing_unsettled{} @@ -244,7 +260,11 @@ incoming_window_margin = 0 :: non_neg_integer(), resource_alarms :: sets:set(rabbit_alarm:resource_alarm_source()), trace_state :: rabbit_trace:state(), - conn_name :: binary() + conn_name :: binary(), + max_handle :: link_handle(), + max_incoming_window :: pos_integer(), + max_link_credit :: pos_integer(), + max_queue_credit :: pos_integer() }). -record(state, { @@ -357,32 +377,56 @@ process_frame(Pid, FrameBody) -> gen_server:cast(Pid, {frame_body, FrameBody}). init({ReaderPid, WriterPid, ChannelNum, MaxFrameSize, User, Vhost, ConnName, - #'v1_0.begin'{next_outgoing_id = ?UINT(RemoteNextOutgoingId), - incoming_window = ?UINT(RemoteIncomingWindow), - outgoing_window = ?UINT(RemoteOutgoingWindow), - handle_max = HandleMax0}}) -> + #'v1_0.begin'{ + %% "If a session is locally initiated, the remote-channel MUST NOT be set." [2.7.2] + remote_channel = undefined, + next_outgoing_id = ?UINT(RemoteNextOutgoingId), + incoming_window = ?UINT(RemoteIncomingWindow), + outgoing_window = ?UINT(RemoteOutgoingWindow), + handle_max = ClientHandleMax}}) -> process_flag(trap_exit, true), process_flag(message_queue_data, off_heap), - ok = pg:join(node(), ?PROCESS_GROUP_NAME, self()), + ok = pg:join(pg_scope(), self(), self()), Alarms0 = rabbit_alarm:register(self(), {?MODULE, conserve_resources, []}), Alarms = sets:from_list(Alarms0, [{version, 2}]), - NextOutgoingId = ?INITIAL_OUTGOING_TRANSFER_ID, + {ok, LinkMax} = application:get_env(rabbit, link_max_per_session), + %% "The handle-max value is the highest handle value that can be used on the session." [2.7.2] + %% The lowest handle is 0. + HandleMax = LinkMax - 1, + %% Assert config is valid. + true = HandleMax >= 0 andalso HandleMax =< ?UINT_MAX, + EffectiveHandleMax = case ClientHandleMax of + undefined -> + HandleMax; + ?UINT(N) -> + min(N, HandleMax) + end, + + MaxLinkCredit = application:get_env( + rabbit, max_link_credit, ?DEFAULT_MAX_LINK_CREDIT), + MaxQueueCredit = application:get_env( + rabbit, max_queue_credit, ?DEFAULT_MAX_QUEUE_CREDIT), + MaxIncomingWindow = application:get_env( + rabbit, max_incoming_window, ?DEFAULT_MAX_INCOMING_WINDOW), + true = is_valid_max(MaxLinkCredit), + true = is_valid_max(MaxQueueCredit), + true = is_valid_max(MaxIncomingWindow), IncomingWindow = case sets:is_empty(Alarms) of - true -> ?MAX_INCOMING_WINDOW; + true -> MaxIncomingWindow; false -> 0 end, + NextOutgoingId = ?INITIAL_OUTGOING_TRANSFER_ID, - HandleMax = case HandleMax0 of - ?UINT(Max) -> Max; - _ -> ?DEFAULT_MAX_HANDLE - end, - Reply = #'v1_0.begin'{remote_channel = {ushort, ChannelNum}, - handle_max = ?UINT(HandleMax), - next_outgoing_id = ?UINT(NextOutgoingId), - incoming_window = ?UINT(IncomingWindow), - outgoing_window = ?UINT_OUTGOING_WINDOW}, + Reply = #'v1_0.begin'{ + %% "When an endpoint responds to a remotely initiated session, the remote-channel + %% MUST be set to the channel on which the remote session sent the begin." [2.7.2] + remote_channel = {ushort, ChannelNum}, + next_outgoing_id = ?UINT(NextOutgoingId), + incoming_window = ?UINT(IncomingWindow), + outgoing_window = ?UINT_OUTGOING_WINDOW, + handle_max = ?UINT(EffectiveHandleMax)}, rabbit_amqp_writer:send_command(WriterPid, ChannelNum, Reply), {ok, #state{next_incoming_id = RemoteNextOutgoingId, @@ -399,7 +443,11 @@ init({ReaderPid, WriterPid, ChannelNum, MaxFrameSize, User, Vhost, ConnName, channel_num = ChannelNum, resource_alarms = Alarms, trace_state = rabbit_trace:init(Vhost), - conn_name = ConnName + conn_name = ConnName, + max_handle = EffectiveHandleMax, + max_incoming_window = MaxIncomingWindow, + max_link_credit = MaxLinkCredit, + max_queue_credit = MaxQueueCredit }}}. terminate(_Reason, #state{incoming_links = IncomingLinks, @@ -417,7 +465,7 @@ terminate(_Reason, #state{incoming_links = IncomingLinks, -spec list_local() -> [pid()]. list_local() -> - pg:get_local_members(node(), ?PROCESS_GROUP_NAME). + pg:which_groups(pg_scope()). -spec conserve_resources(pid(), rabbit_alarm:resource_alarm_source(), @@ -452,16 +500,11 @@ handle_info({{'DOWN', QName}, _MRef, process, QPid, Reason}, handle_cast({frame_body, FrameBody}, #state{cfg = #cfg{writer_pid = WriterPid, channel_num = Ch}} = State0) -> - try handle_control(FrameBody, State0) of - {reply, Replies, State} when is_list(Replies) -> - lists:foreach(fun (Reply) -> - rabbit_amqp_writer:send_command(WriterPid, Ch, Reply) - end, Replies), - noreply(State); - {reply, Reply, State} -> - rabbit_amqp_writer:send_command(WriterPid, Ch, Reply), - noreply(State); - {noreply, State} -> + try handle_frame(FrameBody, State0) of + {ok, ReplyFrames, State} -> + lists:foreach(fun(Frame) -> + rabbit_amqp_writer:send_command(WriterPid, Ch, Frame) + end, ReplyFrames), noreply(State); {stop, _, _} = Stop -> Stop @@ -484,7 +527,9 @@ handle_cast({conserve_resources, Alarm, Conserve}, cfg = #cfg{resource_alarms = Alarms0, incoming_window_margin = Margin0, writer_pid = WriterPid, - channel_num = Ch} = Cfg + channel_num = Ch, + max_incoming_window = MaxIncomingWindow + } = Cfg } = State0) -> Alarms = case Conserve of true -> sets:add_element(Alarm, Alarms0); @@ -497,11 +542,11 @@ handle_cast({conserve_resources, Alarm, Conserve}, %% Notify the client to not send us any more TRANSFERs. Since we decrase %% our incoming window dynamically, there might be incoming in-flight %% TRANSFERs. So, let's be lax and allow for some excess TRANSFERs. - {true, 0, ?MAX_INCOMING_WINDOW}; + {true, 0, MaxIncomingWindow}; {false, true} -> %% All alarms cleared. %% Notify the client that it can resume sending us TRANSFERs. - {true, ?MAX_INCOMING_WINDOW, 0}; + {true, MaxIncomingWindow, 0}; _ -> {false, IncomingWindow0, Margin0} end, @@ -559,7 +604,8 @@ send_delivery_state_changes(#state{stashed_rejected = [], stashed_eol = []} = State) -> State; send_delivery_state_changes(State0 = #state{cfg = #cfg{writer_pid = Writer, - channel_num = ChannelNum}}) -> + channel_num = ChannelNum, + max_link_credit = MaxLinkCredit}}) -> %% Order is important: %% 1. Process queue rejections. {RejectedIds, GrantCredits0, State1} = handle_stashed_rejected(State0), @@ -580,7 +626,7 @@ send_delivery_state_changes(State0 = #state{cfg = #cfg{writer_pid = Writer, rabbit_amqp_writer:send_command(Writer, ChannelNum, Frame) end, DetachFrames), maps:foreach(fun(HandleInt, DeliveryCount) -> - F0 = flow(?UINT(HandleInt), DeliveryCount), + F0 = flow(?UINT(HandleInt), DeliveryCount, MaxLinkCredit), F = session_flow_fields(F0, State), rabbit_amqp_writer:send_command(Writer, ChannelNum, F) end, GrantCredits), @@ -588,7 +634,8 @@ send_delivery_state_changes(State0 = #state{cfg = #cfg{writer_pid = Writer, handle_stashed_rejected(#state{stashed_rejected = []} = State) -> {[], #{}, State}; -handle_stashed_rejected(#state{stashed_rejected = Actions, +handle_stashed_rejected(#state{cfg = #cfg{max_link_credit = MaxLinkCredit}, + stashed_rejected = Actions, incoming_links = Links} = State0) -> {Ids, GrantCredits, Ls} = lists:foldl( @@ -605,7 +652,8 @@ handle_stashed_rejected(#state{stashed_rejected = Actions, end, Link1 = Link0#incoming_link{incoming_unconfirmed_map = U}, {Link, GrantCreds} = maybe_grant_link_credit( - HandleInt, Link1, GrantCreds0), + MaxLinkCredit, HandleInt, + Link1, GrantCreds0), {Ids1, GrantCreds, maps:update(HandleInt, Link, Links0)}; error -> Acc @@ -622,7 +670,8 @@ handle_stashed_rejected(#state{stashed_rejected = Actions, handle_stashed_settled(GrantCredits, #state{stashed_settled = []} = State) -> {[], GrantCredits, State}; -handle_stashed_settled(GrantCredits0, #state{stashed_settled = Actions, +handle_stashed_settled(GrantCredits0, #state{cfg = #cfg{max_link_credit = MaxLinkCredit}, + stashed_settled = Actions, incoming_links = Links} = State0) -> {Ids, GrantCredits, Ls} = lists:foldl( @@ -651,7 +700,8 @@ handle_stashed_settled(GrantCredits0, #state{stashed_settled = Actions, end, Link1 = Link0#incoming_link{incoming_unconfirmed_map = U}, {Link, GrantCreds} = maybe_grant_link_credit( - HandleInt, Link1, GrantCreds0), + MaxLinkCredit, HandleInt, + Link1, GrantCreds0), {Ids2, GrantCreds, maps:update(HandleInt, Link, Links0)}; _ -> Acc @@ -691,11 +741,14 @@ handle_stashed_down(#state{stashed_down = QNames, handle_stashed_eol(DetachFrames, GrantCredits, #state{stashed_eol = []} = State) -> {[], [], DetachFrames, GrantCredits, State}; -handle_stashed_eol(DetachFrames0, GrantCredits0, #state{stashed_eol = Eols} = State0) -> +handle_stashed_eol(DetachFrames0, GrantCredits0, #state{cfg = #cfg{max_link_credit = MaxLinkCredit}, + stashed_eol = Eols} = State0) -> {ReleasedIs, AcceptedIds, DetachFrames, GrantCredits, State1} = lists:foldl(fun(QName, {RIds0, AIds0, DetachFrames1, GrantCreds0, S0 = #state{incoming_links = Links0, queue_states = QStates0}}) -> - {RIds, AIds, GrantCreds1, Links} = settle_eol(QName, {RIds0, AIds0, GrantCreds0, Links0}), + {RIds, AIds, GrantCreds1, Links} = settle_eol( + QName, MaxLinkCredit, + {RIds0, AIds0, GrantCreds0, Links0}), QStates = rabbit_queue_type:remove(QName, QStates0), S1 = S0#state{incoming_links = Links, queue_states = QStates}, @@ -706,14 +759,14 @@ handle_stashed_eol(DetachFrames0, GrantCredits0, #state{stashed_eol = Eols} = St State = State1#state{stashed_eol = []}, {ReleasedIs, AcceptedIds, DetachFrames, GrantCredits, State}. -settle_eol(QName, {_ReleasedIds, _AcceptedIds, _GrantCredits, Links} = Acc) -> +settle_eol(QName, MaxLinkCredit, {_ReleasedIds, _AcceptedIds, _GrantCredits, Links} = Acc) -> maps:fold(fun(HandleInt, #incoming_link{incoming_unconfirmed_map = U0} = Link0, {RelIds0, AcceptIds0, GrantCreds0, Links0}) -> {RelIds, AcceptIds, U} = settle_eol0(QName, {RelIds0, AcceptIds0, U0}), Link1 = Link0#incoming_link{incoming_unconfirmed_map = U}, {Link, GrantCreds} = maybe_grant_link_credit( - HandleInt, Link1, GrantCreds0), + MaxLinkCredit, HandleInt, Link1, GrantCreds0), Links1 = maps:update(HandleInt, Link, Links0), @@ -837,20 +890,246 @@ disposition(DeliveryState, First, Last) -> first = ?UINT(First), last = Last1}. -handle_control(#'v1_0.attach'{ - role = ?AMQP_ROLE_SENDER, - snd_settle_mode = ?V_1_0_SENDER_SETTLE_MODE_SETTLED, - name = Name = {utf8, LinkName}, - handle = Handle = ?UINT(HandleInt), - source = Source = #'v1_0.source'{address = ClientTerminusAddress}, - target = Target = #'v1_0.target'{address = {utf8, ?MANAGEMENT_NODE_ADDRESS}}, - initial_delivery_count = DeliveryCount = ?UINT(DeliveryCountInt), - properties = Properties - } = Attach, - #state{management_link_pairs = Pairs0, - incoming_management_links = Links - } = State0) -> +handle_frame({Performative = #'v1_0.transfer'{handle = ?UINT(Handle)}, Paylaod}, + State0 = #state{incoming_links = IncomingLinks}) -> + {Flows, State1} = session_flow_control_received_transfer(State0), + + {Reply, State} = + case IncomingLinks of + #{Handle := Link0} -> + case incoming_link_transfer(Performative, Paylaod, Link0, State1) of + {ok, Reply0, Link, State2} -> + {Reply0, State2#state{incoming_links = IncomingLinks#{Handle := Link}}}; + {error, Reply0} -> + %% "When an error occurs at a link endpoint, the endpoint MUST be detached + %% with appropriate error information supplied in the error field of the + %% detach frame. The link endpoint MUST then be destroyed." [2.6.5] + {Reply0, State1#state{incoming_links = maps:remove(Handle, IncomingLinks)}} + end; + _ -> + incoming_mgmt_link_transfer(Performative, Paylaod, State1) + end, + reply_frames(Reply ++ Flows, State); + +%% Although the AMQP message format [3.2] requires a body, it is valid to send a transfer frame without payload. +%% For example, when a large multi transfer message is streamed using the ProtonJ2 client, the client could send +%% a final #'v1_0.transfer'{more=false} frame without a payload. +handle_frame(Performative = #'v1_0.transfer'{}, State) -> + handle_frame({Performative, <<>>}, State); + +%% Flow control. These frames come with two pieces of information: +%% the session window, and optionally, credit for a particular link. +%% We'll deal with each of them separately. +handle_frame(#'v1_0.flow'{handle = Handle} = Flow, + #state{incoming_links = IncomingLinks, + outgoing_links = OutgoingLinks, + incoming_management_links = IncomingMgmtLinks, + outgoing_management_links = OutgoingMgmtLinks + } = State0) -> + State = session_flow_control_received_flow(Flow, State0), + S = case Handle of + undefined -> + %% "If not set, the flow frame is carrying only information + %% pertaining to the session endpoint." [2.7.4] + State; + ?UINT(HandleInt) -> + %% "If set, indicates that the flow frame carries flow state information + %% for the local link endpoint associated with the given handle." [2.7.4] + case OutgoingLinks of + #{HandleInt := OutgoingLink} -> + handle_outgoing_link_flow_control(OutgoingLink, Flow, State); + _ -> + case OutgoingMgmtLinks of + #{HandleInt := OutgoingMgmtLink} -> + handle_outgoing_mgmt_link_flow_control(OutgoingMgmtLink, Flow, State); + _ when is_map_key(HandleInt, IncomingLinks) orelse + is_map_key(HandleInt, IncomingMgmtLinks) -> + %% We're being told about available messages at the sender. + State; + _ -> + %% "If set to a handle that is not currently associated with + %% an attached link, the recipient MUST respond by ending the + %% session with an unattached-handle session error." [2.7.4] + rabbit_log:warning( + "Received Flow frame for unknown link handle: ~tp", [Flow]), + protocol_error( + ?V_1_0_SESSION_ERROR_UNATTACHED_HANDLE, + "Unattached link handle: ~b", [HandleInt]) + end + end + end, + reply_frames([], S); + +handle_frame(#'v1_0.disposition'{role = ?AMQP_ROLE_RECEIVER, + first = ?UINT(First), + last = Last0, + state = Outcome, + settled = DispositionSettled} = Disposition, + #state{outgoing_unsettled_map = UnsettledMap0, + queue_states = QStates0} = State0) -> + Last = case Last0 of + ?UINT(L) -> + L; + undefined -> + %% "If not set, this is taken to be the same as first." [2.7.6] + First + end, + UnsettledMapSize = map_size(UnsettledMap0), + case UnsettledMapSize of + 0 -> + reply_frames([], State0); + _ -> + DispositionRangeSize = diff(Last, First) + 1, + {Settled, UnsettledMap} = + case DispositionRangeSize =< UnsettledMapSize of + true -> + %% It is cheaper to iterate over the range of settled delivery IDs. + serial_number:foldl(fun settle_delivery_id/2, + {#{}, UnsettledMap0}, + First, Last); + false -> + %% It is cheaper to iterate over the outgoing unsettled map. + Iter = maps:iterator(UnsettledMap0, + fun(D1, D2) -> compare(D1, D2) =/= greater end), + {Settled0, UnsettledList} = + maps:fold( + fun (DeliveryId, + #outgoing_unsettled{queue_name = QName, + consumer_tag = Ctag, + msg_id = MsgId} = Unsettled, + {SettledAcc, UnsettledAcc}) -> + case serial_number:in_range(DeliveryId, First, Last) of + true -> + SettledAcc1 = maps_update_with( + {QName, Ctag}, + fun(MsgIds) -> [MsgId | MsgIds] end, + [MsgId], + SettledAcc), + {SettledAcc1, UnsettledAcc}; + false -> + {SettledAcc, [{DeliveryId, Unsettled} | UnsettledAcc]} + end + end, + {#{}, []}, Iter), + {Settled0, maps:from_list(UnsettledList)} + end, + + SettleOp = settle_op_from_outcome(Outcome), + {QStates, Actions} = + maps:fold( + fun({QName, Ctag}, MsgIdsRev, {QS0, ActionsAcc}) -> + MsgIds = lists:reverse(MsgIdsRev), + case rabbit_queue_type:settle(QName, SettleOp, Ctag, MsgIds, QS0) of + {ok, QS, Actions0} -> + messages_acknowledged(SettleOp, QName, QS, MsgIds), + {QS, ActionsAcc ++ Actions0}; + {protocol_error, _ErrorType, Reason, ReasonArgs} -> + protocol_error(?V_1_0_AMQP_ERROR_INTERNAL_ERROR, + Reason, ReasonArgs) + end + end, {QStates0, []}, Settled), + + State1 = State0#state{outgoing_unsettled_map = UnsettledMap, + queue_states = QStates}, + Reply = case DispositionSettled of + true -> []; + false -> [Disposition#'v1_0.disposition'{settled = true, + role = ?AMQP_ROLE_SENDER}] + end, + State = handle_queue_actions(Actions, State1), + reply_frames(Reply, State) + end; + +handle_frame(#'v1_0.attach'{handle = ?UINT(Handle)} = Attach, + #state{cfg = #cfg{max_handle = MaxHandle}} = State) -> ok = validate_attach(Attach), + case Handle > MaxHandle of + true -> + protocol_error(?V_1_0_CONNECTION_ERROR_FRAMING_ERROR, + "link handle value (~b) exceeds maximum link handle value (~b)", + [Handle, MaxHandle]); + false -> + handle_attach(Attach, State) + end; + +handle_frame(Detach = #'v1_0.detach'{handle = ?UINT(HandleInt)}, + State0 = #state{incoming_links = IncomingLinks, + outgoing_links = OutgoingLinks0, + outgoing_unsettled_map = Unsettled0, + outgoing_pending = Pending0, + queue_states = QStates0, + cfg = #cfg{user = #user{username = Username}}}) -> + {OutgoingLinks, Unsettled, Pending, QStates} = + case maps:take(HandleInt, OutgoingLinks0) of + {#outgoing_link{queue_name = QName}, OutgoingLinks1} -> + Ctag = handle_to_ctag(HandleInt), + {Unsettled1, Pending1} = remove_outgoing_link(Ctag, Unsettled0, Pending0), + case rabbit_amqqueue:lookup(QName) of + {ok, Q} -> + Spec = #{consumer_tag => Ctag, + reason => remove, + user => Username}, + case rabbit_queue_type:cancel(Q, Spec, QStates0) of + {ok, QStates1} -> + {OutgoingLinks1, Unsettled1, Pending1, QStates1}; + {error, Reason} -> + protocol_error( + ?V_1_0_AMQP_ERROR_INTERNAL_ERROR, + "Failed to remove consumer from ~s: ~tp", + [rabbit_misc:rs(amqqueue:get_name(Q)), Reason]) + end; + {error, not_found} -> + {OutgoingLinks1, Unsettled1, Pending1, QStates0} + end; + error -> + {OutgoingLinks0, Unsettled0, Pending0, QStates0} + end, + + State1 = State0#state{incoming_links = maps:remove(HandleInt, IncomingLinks), + outgoing_links = OutgoingLinks, + outgoing_unsettled_map = Unsettled, + outgoing_pending = Pending, + queue_states = QStates}, + State = maybe_detach_mgmt_link(HandleInt, State1), + Reply = detach_reply(Detach, State, State0), + publisher_or_consumer_deleted(State, State0), + reply_frames(Reply, State); + +handle_frame(#'v1_0.end'{}, + State0 = #state{cfg = #cfg{writer_pid = WriterPid, + channel_num = Ch}}) -> + State = send_delivery_state_changes(State0), + ok = try rabbit_amqp_writer:send_command_sync(WriterPid, Ch, #'v1_0.end'{}) + catch exit:{Reason, {gen_server, call, _ArgList}} + when Reason =:= shutdown orelse + Reason =:= noproc -> + %% AMQP connection and therefore the writer process got already terminated + %% before we had the chance to synchronously end the session. + ok + end, + {stop, normal, State}; + +handle_frame(Frame, _State) -> + protocol_error(?V_1_0_AMQP_ERROR_INTERNAL_ERROR, + "Unexpected frame ~tp", + [amqp10_framing:pprint(Frame)]). + +reply_frames(Frames, State) -> + {ok, session_flow_fields(Frames, State), State}. + +handle_attach(#'v1_0.attach'{ + role = ?AMQP_ROLE_SENDER, + snd_settle_mode = ?V_1_0_SENDER_SETTLE_MODE_SETTLED, + name = Name = {utf8, LinkName}, + handle = Handle = ?UINT(HandleInt), + source = Source = #'v1_0.source'{address = ClientTerminusAddress}, + target = Target = #'v1_0.target'{address = {utf8, ?MANAGEMENT_NODE_ADDRESS}}, + initial_delivery_count = DeliveryCount = ?UINT(DeliveryCountInt), + properties = Properties + } = Attach, + #state{management_link_pairs = Pairs0, + incoming_management_links = Links + } = State0) -> ok = check_paired(Properties), Pairs = case Pairs0 of #{LinkName := #management_link_pair{ @@ -875,7 +1154,7 @@ handle_control(#'v1_0.attach'{ MaxMessageSize = persistent_term:get(max_message_size), Link = #management_link{name = LinkName, delivery_count = DeliveryCountInt, - credit = ?MANAGEMENT_LINK_CREDIT_RCV, + credit = ?MAX_MANAGEMENT_LINK_CREDIT, max_message_size = MaxMessageSize}, State = State0#state{management_link_pairs = Pairs, incoming_management_links = maps:put(HandleInt, Link, Links)}, @@ -892,23 +1171,22 @@ handle_control(#'v1_0.attach'{ properties = Properties}, Flow = #'v1_0.flow'{handle = Handle, delivery_count = DeliveryCount, - link_credit = ?UINT(?MANAGEMENT_LINK_CREDIT_RCV)}, - reply0([Reply, Flow], State); - -handle_control(#'v1_0.attach'{ - role = ?AMQP_ROLE_RECEIVER, - name = Name = {utf8, LinkName}, - handle = Handle = ?UINT(HandleInt), - source = Source = #'v1_0.source'{address = {utf8, ?MANAGEMENT_NODE_ADDRESS}}, - target = Target = #'v1_0.target'{address = ClientTerminusAddress}, - rcv_settle_mode = RcvSettleMode, - max_message_size = MaybeMaxMessageSize, - properties = Properties - } = Attach, - #state{management_link_pairs = Pairs0, - outgoing_management_links = Links - } = State0) -> - ok = validate_attach(Attach), + link_credit = ?UINT(?MAX_MANAGEMENT_LINK_CREDIT)}, + reply_frames([Reply, Flow], State); + +handle_attach(#'v1_0.attach'{ + role = ?AMQP_ROLE_RECEIVER, + name = Name = {utf8, LinkName}, + handle = Handle = ?UINT(HandleInt), + source = Source = #'v1_0.source'{address = {utf8, ?MANAGEMENT_NODE_ADDRESS}}, + target = Target = #'v1_0.target'{address = ClientTerminusAddress}, + rcv_settle_mode = RcvSettleMode, + max_message_size = MaybeMaxMessageSize, + properties = Properties + } = Attach, + #state{management_link_pairs = Pairs0, + outgoing_management_links = Links + } = State0) -> ok = check_paired(Properties), Pairs = case Pairs0 of #{LinkName := #management_link_pair{ @@ -949,43 +1227,47 @@ handle_control(#'v1_0.attach'{ %% Echo back that we will respect the client's requested max-message-size. max_message_size = MaybeMaxMessageSize, properties = Properties}, - reply0(Reply, State); - -handle_control(#'v1_0.attach'{role = ?AMQP_ROLE_SENDER, - name = LinkName, - handle = Handle = ?UINT(HandleInt), - source = Source, - snd_settle_mode = SndSettleMode, - target = Target, - initial_delivery_count = DeliveryCount = ?UINT(DeliveryCountInt) - } = Attach, - State0 = #state{incoming_links = IncomingLinks0, - permission_cache = PermCache0, - cfg = #cfg{vhost = Vhost, - user = User}}) -> - ok = validate_attach(Attach), + reply_frames([Reply], State); + +handle_attach(#'v1_0.attach'{role = ?AMQP_ROLE_SENDER, + name = LinkName, + handle = Handle = ?UINT(HandleInt), + source = Source, + snd_settle_mode = MaybeSndSettleMode, + target = Target, + initial_delivery_count = DeliveryCount = ?UINT(DeliveryCountInt) + }, + State0 = #state{incoming_links = IncomingLinks0, + permission_cache = PermCache0, + cfg = #cfg{max_link_credit = MaxLinkCredit, + vhost = Vhost, + user = User}}) -> case ensure_target(Target, Vhost, User, PermCache0) of {ok, Exchange, RoutingKey, QNameBin, PermCache} -> + SndSettleMode = snd_settle_mode(MaybeSndSettleMode), + MaxMessageSize = persistent_term:get(max_message_size), IncomingLink = #incoming_link{ + snd_settle_mode = SndSettleMode, exchange = Exchange, routing_key = RoutingKey, queue_name_bin = QNameBin, + max_message_size = MaxMessageSize, delivery_count = DeliveryCountInt, - credit = ?LINK_CREDIT_RCV}, + credit = MaxLinkCredit}, _Outcomes = outcomes(Source), Reply = #'v1_0.attach'{ name = LinkName, handle = Handle, source = Source, - snd_settle_mode = SndSettleMode, + snd_settle_mode = MaybeSndSettleMode, rcv_settle_mode = ?V_1_0_RECEIVER_SETTLE_MODE_FIRST, target = Target, %% We are the receiver. role = ?AMQP_ROLE_RECEIVER, - max_message_size = {ulong, persistent_term:get(max_message_size)}}, + max_message_size = {ulong, MaxMessageSize}}, Flow = #'v1_0.flow'{handle = Handle, delivery_count = DeliveryCount, - link_credit = ?UINT(?LINK_CREDIT_RCV)}, + link_credit = ?UINT(MaxLinkCredit)}, %%TODO check that handle is not in use for any other open links. %%"The handle MUST NOT be used for other open links. An attempt to attach %% using a handle which is already associated with a link MUST be responded to @@ -994,28 +1276,27 @@ handle_control(#'v1_0.attach'{role = ?AMQP_ROLE_SENDER, State = State0#state{incoming_links = IncomingLinks, permission_cache = PermCache}, rabbit_global_counters:publisher_created(?PROTOCOL), - reply0([Reply, Flow], State); + reply_frames([Reply, Flow], State); {error, Reason} -> protocol_error(?V_1_0_AMQP_ERROR_INVALID_FIELD, "Attach rejected: ~tp", [Reason]) end; -handle_control(#'v1_0.attach'{role = ?AMQP_ROLE_RECEIVER, - name = LinkName, - handle = Handle = ?UINT(HandleInt), - source = Source, - snd_settle_mode = SndSettleMode, - rcv_settle_mode = RcvSettleMode, - max_message_size = MaybeMaxMessageSize} = Attach, - State0 = #state{queue_states = QStates0, - outgoing_links = OutgoingLinks0, - permission_cache = PermCache0, - topic_permission_cache = TopicPermCache0, - cfg = #cfg{vhost = Vhost, - user = User = #user{username = Username}, - reader_pid = ReaderPid}}) -> - ok = validate_attach(Attach), +handle_attach(#'v1_0.attach'{role = ?AMQP_ROLE_RECEIVER, + name = LinkName, + handle = Handle = ?UINT(HandleInt), + source = Source, + snd_settle_mode = SndSettleMode, + rcv_settle_mode = RcvSettleMode, + max_message_size = MaybeMaxMessageSize} = Attach, + State0 = #state{queue_states = QStates0, + outgoing_links = OutgoingLinks0, + permission_cache = PermCache0, + topic_permission_cache = TopicPermCache0, + cfg = #cfg{vhost = Vhost, + user = User = #user{username = Username}, + reader_pid = ReaderPid}}) -> {SndSettled, EffectiveSndSettleMode} = case SndSettleMode of ?V_1_0_SENDER_SETTLE_MODE_SETTLED -> @@ -1045,17 +1326,17 @@ handle_control(#'v1_0.attach'{role = ?AMQP_ROLE_RECEIVER, QType = amqqueue:get_type(Q), %% Whether credit API v1 or v2 is used is decided only here at link attachment time. %% This decision applies to the whole life time of the link. - %% This means even when feature flag credit_api_v2 will be enabled later, this consumer will + %% This means even when feature flag rabbitmq_4.0.0 will be enabled later, this consumer will %% continue to use credit API v1. This is the safest and easiest solution avoiding %% transferring link flow control state (the delivery-count) at runtime from this session %% process to the queue process. - %% Eventually, after feature flag credit_api_v2 gets enabled and a subsequent rolling upgrade, + %% Eventually, after feature flag rabbitmq_4.0.0 gets enabled and a subsequent rolling upgrade, %% all consumers will use credit API v2. %% Streams always use credit API v2 since the stream client (rabbit_stream_queue) holds the link %% flow control state. Hence, credit API mixed version isn't an issue for streams. {CreditApiVsn, Mode, DeliveryCount, ClientFlowCtl, QueueFlowCtl, CreditReqInFlight, StashedCreditReq} = - case rabbit_feature_flags:is_enabled(credit_api_v2) orelse + case rabbit_feature_flags:is_enabled('rabbitmq_4.0.0') orelse QType =:= rabbit_stream_queue of true -> {2, @@ -1066,7 +1347,6 @@ handle_control(#'v1_0.attach'{role = ?AMQP_ROLE_RECEIVER, echo = false}, #queue_flow_ctl{delivery_count = ?INITIAL_DELIVERY_COUNT, credit = 0, - desired_credit = 0, drain = false}, false, none}; @@ -1116,7 +1396,7 @@ handle_control(#'v1_0.attach'{role = ?AMQP_ROLE_RECEIVER, delivery_count = DeliveryCount, client_flow_ctl = ClientFlowCtl, queue_flow_ctl = QueueFlowCtl, - credit_req_in_flight = CreditReqInFlight, + at_least_one_credit_req_in_flight = CreditReqInFlight, stashed_credit_req = StashedCreditReq}, OutgoingLinks = OutgoingLinks0#{HandleInt => Link}, State1 = State0#state{queue_states = QStates, @@ -1137,227 +1417,14 @@ handle_control(#'v1_0.attach'{role = ?AMQP_ROLE_RECEIVER, end end) of {ok, Reply, State} -> - reply0(Reply, State); + reply_frames(Reply, State); {error, Reason} -> protocol_error( ?V_1_0_AMQP_ERROR_INTERNAL_ERROR, "Could not operate on ~s: ~tp", [rabbit_misc:rs(QName), Reason]) end - end; - -handle_control({Performative = #'v1_0.transfer'{handle = ?UINT(Handle)}, Paylaod}, - State0 = #state{incoming_links = IncomingLinks}) -> - {Flows, State1} = session_flow_control_received_transfer(State0), - - {Reply, State} = - case IncomingLinks of - #{Handle := Link0} -> - case incoming_link_transfer(Performative, Paylaod, Link0, State1) of - {ok, Reply0, Link, State2} -> - {Reply0, State2#state{incoming_links = IncomingLinks#{Handle := Link}}}; - {error, Reply0} -> - %% "When an error occurs at a link endpoint, the endpoint MUST be detached - %% with appropriate error information supplied in the error field of the - %% detach frame. The link endpoint MUST then be destroyed." [2.6.5] - {Reply0, State1#state{incoming_links = maps:remove(Handle, IncomingLinks)}} - end; - _ -> - incoming_mgmt_link_transfer(Performative, Paylaod, State1) - end, - reply0(Reply ++ Flows, State); - - -%% Although the AMQP message format [3.2] requires a body, it is valid to send a transfer frame without payload. -%% For example, when a large multi transfer message is streamed using the ProtonJ2 client, the client could send -%% a final #'v1_0.transfer'{more=false} frame without a payload. -handle_control(Performative = #'v1_0.transfer'{}, State) -> - handle_control({Performative, <<>>}, State); - -%% Flow control. These frames come with two pieces of information: -%% the session window, and optionally, credit for a particular link. -%% We'll deal with each of them separately. -handle_control(#'v1_0.flow'{handle = Handle} = Flow, - #state{incoming_links = IncomingLinks, - outgoing_links = OutgoingLinks, - incoming_management_links = IncomingMgmtLinks, - outgoing_management_links = OutgoingMgmtLinks - } = State0) -> - State = session_flow_control_received_flow(Flow, State0), - S = case Handle of - undefined -> - %% "If not set, the flow frame is carrying only information - %% pertaining to the session endpoint." [2.7.4] - State; - ?UINT(HandleInt) -> - %% "If set, indicates that the flow frame carries flow state information - %% for the local link endpoint associated with the given handle." [2.7.4] - case OutgoingLinks of - #{HandleInt := OutgoingLink} -> - handle_outgoing_link_flow_control(OutgoingLink, Flow, State); - _ -> - case OutgoingMgmtLinks of - #{HandleInt := OutgoingMgmtLink} -> - handle_outgoing_mgmt_link_flow_control(OutgoingMgmtLink, Flow, State); - _ when is_map_key(HandleInt, IncomingLinks) orelse - is_map_key(HandleInt, IncomingMgmtLinks) -> - %% We're being told about available messages at the sender. - State; - _ -> - %% "If set to a handle that is not currently associated with - %% an attached link, the recipient MUST respond by ending the - %% session with an unattached-handle session error." [2.7.4] - rabbit_log:warning( - "Received Flow frame for unknown link handle: ~tp", [Flow]), - protocol_error( - ?V_1_0_SESSION_ERROR_UNATTACHED_HANDLE, - "Unattached link handle: ~b", [HandleInt]) - end - end - end, - {noreply, S}; - -handle_control(Detach = #'v1_0.detach'{handle = ?UINT(HandleInt)}, - State0 = #state{incoming_links = IncomingLinks, - outgoing_links = OutgoingLinks0, - outgoing_unsettled_map = Unsettled0, - outgoing_pending = Pending0, - queue_states = QStates0, - cfg = #cfg{user = #user{username = Username}}}) -> - {OutgoingLinks, Unsettled, Pending, QStates} = - case maps:take(HandleInt, OutgoingLinks0) of - {#outgoing_link{queue_name = QName}, OutgoingLinks1} -> - Ctag = handle_to_ctag(HandleInt), - {Unsettled1, Pending1} = remove_outgoing_link(Ctag, Unsettled0, Pending0), - case rabbit_amqqueue:lookup(QName) of - {ok, Q} -> - Spec = #{consumer_tag => Ctag, - reason => remove, - user => Username}, - case rabbit_queue_type:cancel(Q, Spec, QStates0) of - {ok, QStates1} -> - {OutgoingLinks1, Unsettled1, Pending1, QStates1}; - {error, Reason} -> - protocol_error( - ?V_1_0_AMQP_ERROR_INTERNAL_ERROR, - "Failed to remove consumer from ~s: ~tp", - [rabbit_misc:rs(amqqueue:get_name(Q)), Reason]) - end; - {error, not_found} -> - {OutgoingLinks1, Unsettled1, Pending1, QStates0} - end; - error -> - {OutgoingLinks0, Unsettled0, Pending0, QStates0} - end, - - State1 = State0#state{incoming_links = maps:remove(HandleInt, IncomingLinks), - outgoing_links = OutgoingLinks, - outgoing_unsettled_map = Unsettled, - outgoing_pending = Pending, - queue_states = QStates}, - State = maybe_detach_mgmt_link(HandleInt, State1), - maybe_detach_reply(Detach, State, State0), - publisher_or_consumer_deleted(State, State0), - {noreply, State}; - -handle_control(#'v1_0.end'{}, - State0 = #state{cfg = #cfg{writer_pid = WriterPid, - channel_num = Ch}}) -> - State = send_delivery_state_changes(State0), - ok = try rabbit_amqp_writer:send_command_sync(WriterPid, Ch, #'v1_0.end'{}) - catch exit:{Reason, {gen_server, call, _ArgList}} - when Reason =:= shutdown orelse - Reason =:= noproc -> - %% AMQP connection and therefore the writer process got already terminated - %% before we had the chance to synchronously end the session. - ok - end, - {stop, normal, State}; - -handle_control(#'v1_0.disposition'{role = ?AMQP_ROLE_RECEIVER, - first = ?UINT(First), - last = Last0, - state = Outcome, - settled = DispositionSettled} = Disposition, - #state{outgoing_unsettled_map = UnsettledMap0, - queue_states = QStates0} = State0) -> - Last = case Last0 of - ?UINT(L) -> - L; - undefined -> - %% "If not set, this is taken to be the same as first." [2.7.6] - First - end, - UnsettledMapSize = map_size(UnsettledMap0), - case UnsettledMapSize of - 0 -> - {noreply, State0}; - _ -> - DispositionRangeSize = diff(Last, First) + 1, - {Settled, UnsettledMap} = - case DispositionRangeSize =< UnsettledMapSize of - true -> - %% It is cheaper to iterate over the range of settled delivery IDs. - serial_number:foldl(fun settle_delivery_id/2, - {#{}, UnsettledMap0}, - First, Last); - false -> - %% It is cheaper to iterate over the outgoing unsettled map. - Iter = maps:iterator(UnsettledMap0, - fun(D1, D2) -> compare(D1, D2) =/= greater end), - {Settled0, UnsettledList} = - maps:fold( - fun (DeliveryId, - #outgoing_unsettled{queue_name = QName, - consumer_tag = Ctag, - msg_id = MsgId} = Unsettled, - {SettledAcc, UnsettledAcc}) -> - case serial_number:in_range(DeliveryId, First, Last) of - true -> - SettledAcc1 = maps_update_with( - {QName, Ctag}, - fun(MsgIds) -> [MsgId | MsgIds] end, - [MsgId], - SettledAcc), - {SettledAcc1, UnsettledAcc}; - false -> - {SettledAcc, [{DeliveryId, Unsettled} | UnsettledAcc]} - end - end, - {#{}, []}, Iter), - {Settled0, maps:from_list(UnsettledList)} - end, - - SettleOp = settle_op_from_outcome(Outcome), - {QStates, Actions} = - maps:fold( - fun({QName, Ctag}, MsgIdsRev, {QS0, ActionsAcc}) -> - MsgIds = lists:reverse(MsgIdsRev), - case rabbit_queue_type:settle(QName, SettleOp, Ctag, MsgIds, QS0) of - {ok, QS, Actions0} -> - messages_acknowledged(SettleOp, QName, QS, MsgIds), - {QS, ActionsAcc ++ Actions0}; - {protocol_error, _ErrorType, Reason, ReasonArgs} -> - protocol_error(?V_1_0_AMQP_ERROR_INTERNAL_ERROR, - Reason, ReasonArgs) - end - end, {QStates0, []}, Settled), - - State1 = State0#state{outgoing_unsettled_map = UnsettledMap, - queue_states = QStates}, - Reply = case DispositionSettled of - true -> []; - false -> [Disposition#'v1_0.disposition'{settled = true, - role = ?AMQP_ROLE_SENDER}] - end, - State = handle_queue_actions(Actions, State1), - reply0(Reply, State) - end; - -handle_control(Frame, _State) -> - protocol_error(?V_1_0_AMQP_ERROR_INTERNAL_ERROR, - "Unexpected frame ~tp", - [amqp10_framing:pprint(Frame)]). + end. send_pending(#state{remote_incoming_window = RemoteIncomingWindow, outgoing_pending = Buf0 @@ -1392,16 +1459,11 @@ send_pending(#state{remote_incoming_window = RemoteIncomingWindow, end end. -handle_credit_reply(Action = {credit_reply, Ctag, _DeliveryCount, _Credit, _Available, Drain}, +handle_credit_reply(Action = {credit_reply, Ctag, _DeliveryCount, _Credit, _Available, _Drain}, State = #state{outgoing_links = OutgoingLinks}) -> Handle = ctag_to_handle(Ctag), case OutgoingLinks of - #{Handle := Link = #outgoing_link{queue_flow_ctl = QFC, - credit_req_in_flight = CreditReqInFlight}} -> - %% Assert that we expect a credit reply for this consumer. - true = CreditReqInFlight, - %% Assert that "The sender's value is always the last known value indicated by the receiver." - Drain = QFC#queue_flow_ctl.drain, + #{Handle := Link} -> handle_credit_reply0(Action, Handle, Link, State); _ -> %% Ignore credit reply for a detached link. @@ -1418,18 +1480,16 @@ handle_credit_reply0( echo = CEcho }, queue_flow_ctl = #queue_flow_ctl{ - delivery_count = QDeliveryCount, - credit = QCredit, - desired_credit = DesiredCredit - } = QFC, + delivery_count = QDeliveryCount + } = QFC0, stashed_credit_req = StashedCreditReq } = Link0, #state{outgoing_links = OutgoingLinks, queue_states = QStates0 } = S0) -> - %% Assert that flow control state between us and the queue is in sync. - QCredit = Credit, + %% Assertion: Our (receiver) delivery-count should be always + %% in sync with the delivery-count of the sending queue. QDeliveryCount = DeliveryCount, case StashedCreditReq of @@ -1439,24 +1499,32 @@ handle_credit_reply0( S = pop_credit_req(Handle, Ctag, Link0, S0), echo(CEcho, Handle, CDeliveryCount, CCredit, Available, S), S; - none when QCredit =:= 0 andalso - DesiredCredit > 0 -> + none when Credit =:= 0 andalso + CCredit > 0 -> QName = Link0#outgoing_link.queue_name, %% Provide queue next batch of credits. - CappedCredit = cap_credit(DesiredCredit), + CappedCredit = cap_credit(CCredit, S0#state.cfg#cfg.max_queue_credit), {ok, QStates, Actions} = rabbit_queue_type:credit( QName, Ctag, DeliveryCount, CappedCredit, false, QStates0), Link = Link0#outgoing_link{ - queue_flow_ctl = QFC#queue_flow_ctl{credit = CappedCredit} - }, + queue_flow_ctl = QFC0#queue_flow_ctl{credit = CappedCredit}, + at_least_one_credit_req_in_flight = true}, S = S0#state{queue_states = QStates, outgoing_links = OutgoingLinks#{Handle := Link}}, handle_queue_actions(Actions, S); none -> - Link = Link0#outgoing_link{credit_req_in_flight = false}, + %% Although we (the receiver) usually determine link credit, we set here + %% our link credit to what the queue says our link credit is (which is safer + %% in case credit requests got applied out of order in quorum queues). + %% This should be fine given that we asserted earlier that our delivery-count is + %% in sync with the delivery-count of the sending queue. + QFC = QFC0#queue_flow_ctl{credit = Credit}, + Link = Link0#outgoing_link{ + queue_flow_ctl = QFC, + at_least_one_credit_req_in_flight = false}, S = S0#state{outgoing_links = OutgoingLinks#{Handle := Link}}, - echo(CEcho, Handle, CDeliveryCount, DesiredCredit, Available, S), + echo(CEcho, Handle, CDeliveryCount, CCredit, Available, S), S end; handle_credit_reply0( @@ -1465,14 +1533,16 @@ handle_credit_reply0( Link0 = #outgoing_link{ queue_name = QName, client_flow_ctl = #client_flow_ctl{ - delivery_count = CDeliveryCount0 } = CFC, + delivery_count = CDeliveryCount0, + credit = CCredit + } = CFC, queue_flow_ctl = #queue_flow_ctl{ - delivery_count = QDeliveryCount0, - desired_credit = DesiredCredit + delivery_count = QDeliveryCount0 } = QFC, stashed_credit_req = StashedCreditReq}, S0 = #state{cfg = #cfg{writer_pid = Writer, - channel_num = ChanNum}, + channel_num = ChanNum, + max_queue_credit = MaxQueueCredit}, outgoing_links = OutgoingLinks, queue_states = QStates0}) -> %% If the queue sent us a drain credit_reply, @@ -1480,31 +1550,38 @@ handle_credit_reply0( 0 = Credit, case DeliveryCount =:= QDeliveryCount0 andalso - DesiredCredit > 0 of + CCredit > 0 of true -> %% We're in drain mode. The queue did not advance its delivery-count which means - %% it might still have messages available for us. We also desire more messages. + %% it might still have messages available for us. The client also desires more messages. %% Therefore, we do the next round of credit top-up. We prioritise finishing %% the current drain credit top-up rounds over a stashed credit request because %% this is easier to reason about and the queue will reply promptly meaning %% the stashed request will be processed soon enough. - CappedCredit = cap_credit(DesiredCredit), - Link = Link0#outgoing_link{queue_flow_ctl = QFC#queue_flow_ctl{credit = CappedCredit}}, - - {ok, QStates, Actions} = - rabbit_queue_type:credit( - QName, Ctag, DeliveryCount, CappedCredit, true, QStates0), + CappedCredit = cap_credit(CCredit, MaxQueueCredit), + {ok, QStates, Actions} = rabbit_queue_type:credit( + QName, Ctag, DeliveryCount, + CappedCredit, true, QStates0), + Link = Link0#outgoing_link{ + queue_flow_ctl = QFC#queue_flow_ctl{credit = CappedCredit}, + at_least_one_credit_req_in_flight = true}, S = S0#state{queue_states = QStates, outgoing_links = OutgoingLinks#{Handle := Link}}, handle_queue_actions(Actions, S); false -> + case compare(DeliveryCount, QDeliveryCount0) of + equal -> ok; + greater -> ok; %% the sending queue advanced its delivery-count + less -> error({unexpected_delivery_count, DeliveryCount, QDeliveryCount0}) + end, + %% We're in drain mode. %% The queue either advanced its delivery-count which means it has - %% no more messages available for us, or we do not desire more messages. + %% no more messages available for us, or the client does not desire more messages. %% Therefore, we're done with draining and we "the sender will (after sending %% all available messages) advance the delivery-count as much as possible, %% consuming all link-credit, and send the flow state to the receiver." - CDeliveryCount = add(CDeliveryCount0, DesiredCredit), + CDeliveryCount = add(CDeliveryCount0, CCredit), Flow0 = #'v1_0.flow'{handle = ?UINT(Handle), delivery_count = ?UINT(CDeliveryCount), link_credit = ?UINT(0), @@ -1519,9 +1596,8 @@ handle_credit_reply0( queue_flow_ctl = QFC#queue_flow_ctl{ delivery_count = DeliveryCount, credit = 0, - desired_credit = 0, drain = false}, - credit_req_in_flight = false + at_least_one_credit_req_in_flight = false }, S = S0#state{outgoing_links = OutgoingLinks#{Handle := Link}}, case StashedCreditReq of @@ -1548,24 +1624,23 @@ pop_credit_req( drain = Drain, echo = Echo }}, - S0 = #state{outgoing_links = OutgoingLinks, + S0 = #state{cfg = #cfg{max_queue_credit = MaxQueueCredit}, + outgoing_links = OutgoingLinks, queue_states = QStates0}) -> LinkCreditSnd = amqp10_util:link_credit_snd( DeliveryCountRcv, LinkCreditRcv, CDeliveryCount), - CappedCredit = cap_credit(LinkCreditSnd), - {ok, QStates, Actions} = - rabbit_queue_type:credit( - QName, Ctag, QDeliveryCount, CappedCredit, Drain, QStates0), + CappedCredit = cap_credit(LinkCreditSnd, MaxQueueCredit), + {ok, QStates, Actions} = rabbit_queue_type:credit( + QName, Ctag, QDeliveryCount, + CappedCredit, Drain, QStates0), Link = Link0#outgoing_link{ client_flow_ctl = CFC#client_flow_ctl{ credit = LinkCreditSnd, echo = Echo}, queue_flow_ctl = QFC#queue_flow_ctl{ credit = CappedCredit, - desired_credit = LinkCreditSnd, - drain = Drain - }, - credit_req_in_flight = true, + drain = Drain}, + at_least_one_credit_req_in_flight = true, stashed_credit_req = none }, S = S0#state{queue_states = QStates, @@ -1685,10 +1760,12 @@ sent_pending_delivery( credit_api_version = CreditApiVsn, client_flow_ctl = CFC0, queue_flow_ctl = QFC0, - credit_req_in_flight = CreditReqInFlight0 + at_least_one_credit_req_in_flight = CreditReqInFlight0 } = Link0 = maps:get(Handle, OutgoingLinks0), S = case CreditApiVsn of + 1 -> + S0; 2 -> #client_flow_ctl{ delivery_count = CDeliveryCount0, @@ -1696,8 +1773,7 @@ sent_pending_delivery( } = CFC0, #queue_flow_ctl{ delivery_count = QDeliveryCount0, - credit = QCredit0, - desired_credit = DesiredCredit0 + credit = QCredit0 } = QFC0, CDeliveryCount = add(CDeliveryCount0, 1), @@ -1715,17 +1791,17 @@ sent_pending_delivery( QDeliveryCount = add(QDeliveryCount0, 1), QCredit1 = max(0, QCredit0 - 1), - DesiredCredit = max(0, DesiredCredit0 - 1), {QCredit, CreditReqInFlight, QStates, Actions} = case QCredit1 =:= 0 andalso - DesiredCredit > 0 andalso + CCredit > 0 andalso not CreditReqInFlight0 of true -> %% assertion none = Link0#outgoing_link.stashed_credit_req, %% Provide queue next batch of credits. - CappedCredit = cap_credit(DesiredCredit), + CappedCredit = cap_credit(CCredit, + S0#state.cfg#cfg.max_queue_credit), {ok, QStates1, Actions0} = rabbit_queue_type:credit( QName, Ctag, QDeliveryCount, CappedCredit, @@ -1740,17 +1816,15 @@ sent_pending_delivery( credit = CCredit}, QFC = QFC0#queue_flow_ctl{ delivery_count = QDeliveryCount, - credit = QCredit, - desired_credit = DesiredCredit}, - Link = Link0#outgoing_link{client_flow_ctl = CFC, - queue_flow_ctl = QFC, - credit_req_in_flight = CreditReqInFlight}, + credit = QCredit}, + Link = Link0#outgoing_link{ + client_flow_ctl = CFC, + queue_flow_ctl = QFC, + at_least_one_credit_req_in_flight = CreditReqInFlight}, OutgoingLinks = OutgoingLinks0#{Handle := Link}, S1 = S0#state{outgoing_links = OutgoingLinks, queue_states = QStates}, - handle_queue_actions(Actions, S1); - 1 -> - S0 + handle_queue_actions(Actions, S1) end, record_outgoing_unsettled(Pending, S). @@ -1769,18 +1843,14 @@ record_outgoing_unsettled(#pending_delivery{queue_ack_required = false}, State) %% Also, queue client already acked to queue on behalf of us. State. -reply0([], State) -> - {noreply, State}; -reply0(Reply, State) -> - {reply, session_flow_fields(Reply, State), State}. - %% Implements section "receiving a transfer" in 2.5.6 session_flow_control_received_transfer( #state{next_incoming_id = NextIncomingId, incoming_window = InWindow0, remote_outgoing_window = RemoteOutgoingWindow, cfg = #cfg{incoming_window_margin = Margin, - resource_alarms = Alarms} + resource_alarms = Alarms, + max_incoming_window = MaxIncomingWindow} } = State) -> InWindow1 = InWindow0 - 1, case InWindow1 < -Margin of @@ -1792,12 +1862,12 @@ session_flow_control_received_transfer( false -> ok end, - {Flows, InWindow} = case InWindow1 =< (?MAX_INCOMING_WINDOW div 2) andalso + {Flows, InWindow} = case InWindow1 =< (MaxIncomingWindow div 2) andalso sets:is_empty(Alarms) of true -> %% We've reached halfway and there are no %% disk or memory alarm, open the window. - {[#'v1_0.flow'{}], ?MAX_INCOMING_WINDOW}; + {[#'v1_0.flow'{}], MaxIncomingWindow}; false -> {[], InWindow1} end, @@ -1834,31 +1904,39 @@ settle_op_from_outcome(#'v1_0.rejected'{}) -> discard; settle_op_from_outcome(#'v1_0.released'{}) -> requeue; -%% Keep the same Modified behaviour as in RabbitMQ 3.x -settle_op_from_outcome(#'v1_0.modified'{delivery_failed = true, - undeliverable_here = UndelHere}) - when UndelHere =/= true -> - requeue; -settle_op_from_outcome(#'v1_0.modified'{}) -> - %% If delivery_failed is not true, we can't increment its delivery_count. - %% So, we will have to reject without requeue. - %% - %% If undeliverable_here is true, this is not quite correct because - %% undeliverable_here refers to the link, and not the message in general. - %% However, we cannot filter messages from being assigned to individual consumers. - %% That's why we will have to reject it without requeue. - discard; + +%% Not all queue types support the modified outcome fields correctly. +%% However, we still allow the client to settle with the modified outcome +%% because some client libraries such as Apache QPid make use of it: +%% https://github.com/apache/qpid-jms/blob/90eb60f59cb59b7b9ad8363ee8a843d6903b8e77/qpid-jms-client/src/main/java/org/apache/qpid/jms/JmsMessageConsumer.java#L464 +%% In such cases, it's better when RabbitMQ does not end the session. +%% See https://github.com/rabbitmq/rabbitmq-server/issues/6121 +settle_op_from_outcome(#'v1_0.modified'{delivery_failed = DelFailed, + undeliverable_here = UndelHere, + message_annotations = Anns0}) -> + Anns = case Anns0 of + undefined -> + #{}; + {map, KVList} -> + Anns1 = lists:map( + %% "all symbolic keys except those beginning with "x-" are reserved." [3.2.10] + fun({{symbol, <<"x-", _/binary>> = K}, V}) -> + {K, unwrap(V)} + end, KVList), + maps:from_list(Anns1) + end, + {modify, + default(DelFailed, false), + default(UndelHere, false), + Anns}; settle_op_from_outcome(Outcome) -> protocol_error( ?V_1_0_AMQP_ERROR_INVALID_FIELD, "Unrecognised state: ~tp in DISPOSITION", [Outcome]). --spec flow({uint, link_handle()}, sequence_no()) -> #'v1_0.flow'{}. -flow(Handle, DeliveryCount) -> - flow(Handle, DeliveryCount, ?LINK_CREDIT_RCV). - --spec flow({uint, link_handle()}, sequence_no(), non_neg_integer()) -> #'v1_0.flow'{}. +-spec flow({uint, link_handle()}, sequence_no(), rabbit_queue_type:credit()) -> + #'v1_0.flow'{}. flow(Handle, DeliveryCount, LinkCredit) -> #'v1_0.flow'{handle = Handle, delivery_count = ?UINT(DeliveryCount), @@ -1952,7 +2030,7 @@ handle_queue_actions(Actions, State) -> S0 = #state{outgoing_links = OutgoingLinks0, outgoing_pending = Pending}) -> %% credit API v1 - %% Delete this branch when feature flag credit_api_v2 becomes required. + %% Delete this branch when feature flag rabbitmq_4.0.0 becomes required. Handle = ctag_to_handle(Ctag), Link = #outgoing_link{delivery_count = Count0} = maps:get(Handle, OutgoingLinks0), {Count, Credit, S} = case Drain of @@ -2201,6 +2279,7 @@ incoming_link_transfer( settled = Settled}, MsgPart, Link0 = #incoming_link{ + max_message_size = MaxMessageSize, multi_transfer_msg = Multi = #multi_transfer_msg{ payload_fragments_rev = PFR0, delivery_id = FirstDeliveryId, @@ -2210,7 +2289,7 @@ incoming_link_transfer( validate_multi_transfer_delivery_id(DeliveryId, FirstDeliveryId), validate_multi_transfer_settled(Settled, FirstSettled), PFR = [MsgPart | PFR0], - validate_incoming_message_size(PFR), + validate_message_size(PFR, MaxMessageSize), Link = Link0#incoming_link{multi_transfer_msg = Multi#multi_transfer_msg{payload_fragments_rev = PFR}}, {ok, [], Link, State}; incoming_link_transfer( @@ -2228,8 +2307,10 @@ incoming_link_transfer( rcv_settle_mode = RcvSettleMode, handle = Handle = ?UINT(HandleInt)}, MsgPart, - #incoming_link{exchange = LinkExchange, + #incoming_link{snd_settle_mode = SndSettleMode, + exchange = LinkExchange, routing_key = LinkRKey, + max_message_size = MaxMessageSize, delivery_count = DeliveryCount0, incoming_unconfirmed_map = U0, credit = Credit0, @@ -2242,7 +2323,8 @@ incoming_link_transfer( vhost = Vhost, trace_state = Trace, conn_name = ConnName, - channel_num = ChannelNum}}) -> + channel_num = ChannelNum, + max_link_credit = MaxLinkCredit}}) -> {PayloadBin, DeliveryId, Settled} = case MultiTransfer of @@ -2257,8 +2339,12 @@ incoming_link_transfer( ok = validate_multi_transfer_settled(MaybeSettled, FirstSettled), {MsgBin0, FirstDeliveryId, FirstSettled} end, + validate_transfer_snd_settle_mode(SndSettleMode, Settled), validate_transfer_rcv_settle_mode(RcvSettleMode, Settled), - validate_incoming_message_size(PayloadBin), + PayloadSize = iolist_size(PayloadBin), + validate_message_size(PayloadSize, MaxMessageSize), + rabbit_msg_size_metrics:observe(?PROTOCOL, PayloadSize), + messages_received(Settled), Mc0 = mc:init(mc_amqp, PayloadBin, #{}), case lookup_target(LinkExchange, LinkRKey, Mc0, Vhost, User, PermCache0) of @@ -2267,7 +2353,6 @@ incoming_link_transfer( check_user_id(Mc2, User), TopicPermCache = check_write_permitted_on_topic( X, User, RoutingKey, TopicPermCache0), - messages_received(Settled), QNames = rabbit_exchange:route(X, Mc2, #{return_binding_keys => true}), rabbit_trace:tap_in(Mc2, QNames, ConnName, ChannelNum, Username, Trace), Opts = #{correlation => {HandleInt, DeliveryId}}, @@ -2287,7 +2372,8 @@ incoming_link_transfer( DeliveryCount = add(DeliveryCount0, 1), Credit1 = Credit0 - 1, {Credit, Reply1} = maybe_grant_link_credit( - Credit1, DeliveryCount, map_size(U), Handle), + Credit1, MaxLinkCredit, + DeliveryCount, map_size(U), Handle), Reply = Reply0 ++ Reply1, Link = Link0#incoming_link{ delivery_count = DeliveryCount, @@ -2302,9 +2388,34 @@ incoming_link_transfer( [DeliveryTag, DeliveryId, Reason]) end; {error, #'v1_0.error'{} = Err} -> - Disposition = released(DeliveryId), - Detach = detach(HandleInt, Link0, Err), - {error, [Disposition, Detach]} + Disposition = case Settled of + true -> []; + false -> [released(DeliveryId)] + end, + Detach = [detach(HandleInt, Link0, Err)], + {error, Disposition ++ Detach}; + {error, anonymous_terminus, #'v1_0.error'{} = Err} -> + %% https://docs.oasis-open.org/amqp/anonterm/v1.0/cs01/anonterm-v1.0-cs01.html#doc-routingerrors + case Settled of + true -> + Info = {map, [{{symbol, <<"delivery-tag">>}, DeliveryTag}]}, + Err1 = Err#'v1_0.error'{info = Info}, + Detach = detach(HandleInt, Link0, Err1), + {error, [Detach]}; + false -> + Disposition = rejected(DeliveryId, Err), + DeliveryCount = add(DeliveryCount0, 1), + Credit1 = Credit0 - 1, + {Credit, Reply0} = maybe_grant_link_credit( + Credit1, MaxLinkCredit, + DeliveryCount, map_size(U0), Handle), + Reply = [Disposition | Reply0], + Link = Link0#incoming_link{ + delivery_count = DeliveryCount, + credit = Credit, + multi_transfer_msg = undefined}, + {ok, Reply, Link, State0} + end end. lookup_target(#exchange{} = X, LinkRKey, Mc, _, _, PermCache) -> @@ -2328,16 +2439,16 @@ lookup_target(to, to, Mc, Vhost, User, PermCache0) -> check_internal_exchange(X), lookup_routing_key(X, RKey, Mc, PermCache); {error, not_found} -> - {error, error_not_found(XName)} + {error, anonymous_terminus, error_not_found(XName)} end; {error, bad_address} -> - {error, + {error, anonymous_terminus, #'v1_0.error'{ condition = ?V_1_0_AMQP_ERROR_PRECONDITION_FAILED, description = {utf8, <<"bad 'to' address string: ", String/binary>>}}} end; undefined -> - {error, + {error, anonymous_terminus, #'v1_0.error'{ condition = ?V_1_0_AMQP_ERROR_PRECONDITION_FAILED, description = {utf8, <<"anonymous terminus requires 'to' address to be set">>}}} @@ -2381,36 +2492,43 @@ released(DeliveryId) -> settled = true, state = #'v1_0.released'{}}. -maybe_grant_link_credit(Credit, DeliveryCount, NumUnconfirmed, Handle) -> - case grant_link_credit(Credit, NumUnconfirmed) of +rejected(DeliveryId, Error) -> + #'v1_0.disposition'{role = ?AMQP_ROLE_RECEIVER, + first = ?UINT(DeliveryId), + settled = true, + state = #'v1_0.rejected'{error = Error}}. + +maybe_grant_link_credit(Credit, MaxLinkCredit, DeliveryCount, NumUnconfirmed, Handle) -> + case grant_link_credit(Credit, MaxLinkCredit, NumUnconfirmed) of true -> - {?LINK_CREDIT_RCV, [flow(Handle, DeliveryCount)]}; + {MaxLinkCredit, [flow(Handle, DeliveryCount, MaxLinkCredit)]}; false -> {Credit, []} end. maybe_grant_link_credit( + MaxLinkCredit, HandleInt, Link = #incoming_link{credit = Credit, incoming_unconfirmed_map = U, delivery_count = DeliveryCount}, AccMap) -> - case grant_link_credit(Credit, map_size(U)) of + case grant_link_credit(Credit, MaxLinkCredit, map_size(U)) of true -> - {Link#incoming_link{credit = ?LINK_CREDIT_RCV}, + {Link#incoming_link{credit = MaxLinkCredit}, AccMap#{HandleInt => DeliveryCount}}; false -> {Link, AccMap} end. -grant_link_credit(Credit, NumUnconfirmed) -> - Credit =< ?LINK_CREDIT_RCV / 2 andalso - NumUnconfirmed < ?LINK_CREDIT_RCV. +grant_link_credit(Credit, MaxLinkCredit, NumUnconfirmed) -> + Credit =< MaxLinkCredit div 2 andalso + NumUnconfirmed < MaxLinkCredit. maybe_grant_mgmt_link_credit(Credit, DeliveryCount, Handle) - when Credit =< ?MANAGEMENT_LINK_CREDIT_RCV / 2 -> - {?MANAGEMENT_LINK_CREDIT_RCV, - [flow(Handle, DeliveryCount, ?MANAGEMENT_LINK_CREDIT_RCV)]}; + when Credit =< ?MAX_MANAGEMENT_LINK_CREDIT div 2 -> + {?MAX_MANAGEMENT_LINK_CREDIT, + [flow(Handle, DeliveryCount, ?MAX_MANAGEMENT_LINK_CREDIT)]}; maybe_grant_mgmt_link_credit(Credit, _, _) -> {Credit, []}. @@ -2677,7 +2795,7 @@ handle_outgoing_link_flow_control( credit_api_version = CreditApiVsn, client_flow_ctl = CFC, queue_flow_ctl = QFC, - credit_req_in_flight = CreditReqInFlight + at_least_one_credit_req_in_flight = CreditReqInFlight } = Link0, #'v1_0.flow'{handle = ?UINT(HandleInt), delivery_count = MaybeDeliveryCountRcv, @@ -2695,26 +2813,27 @@ handle_outgoing_link_flow_control( 2 -> case CreditReqInFlight of false -> - DesiredCredit = amqp10_util:link_credit_snd( + LinkCreditSnd = amqp10_util:link_credit_snd( DeliveryCountRcv, LinkCreditRcv, CFC#client_flow_ctl.delivery_count), - CappedCredit = cap_credit(DesiredCredit), + CappedCredit = cap_credit(LinkCreditSnd, + State0#state.cfg#cfg.max_queue_credit), Link = Link0#outgoing_link{ - credit_req_in_flight = true, client_flow_ctl = CFC#client_flow_ctl{ - credit = DesiredCredit, + credit = LinkCreditSnd, echo = Echo}, queue_flow_ctl = QFC#queue_flow_ctl{ credit = CappedCredit, - desired_credit = DesiredCredit, - drain = Drain}}, + drain = Drain}, + at_least_one_credit_req_in_flight = true}, {ok, QStates, Actions} = rabbit_queue_type:credit( QName, Ctag, QFC#queue_flow_ctl.delivery_count, CappedCredit, Drain, QStates0), - State = State0#state{queue_states = QStates, - outgoing_links = OutgoingLinks#{HandleInt := Link}}, + State = State0#state{ + queue_states = QStates, + outgoing_links = OutgoingLinks#{HandleInt := Link}}, handle_queue_actions(Actions, State); true -> %% A credit request is currently in-flight. Let's first process its reply @@ -2758,7 +2877,7 @@ delivery_count_rcv(undefined) -> %% credits to a queue has to synchronously wait for a credit reply from the queue: %% https://github.com/rabbitmq/rabbitmq-server/blob/b9566f4d02f7ceddd2f267a92d46affd30fb16c8/deps/rabbitmq_codegen/credit_extension.json#L43 %% This blocks our entire AMQP 1.0 session process. Since the credit reply from the -%% queue did not contain the consumr tag prior to feature flag credit_api_v2, we +%% queue did not contain the consumr tag prior to feature flag rabbitmq_4.0.0, we %% must behave here the same way as non-native AMQP 1.0: We wait until the queue %% sends us a credit reply sucht that we can correlate that reply with our consumer tag. process_credit_reply_sync( @@ -2823,7 +2942,7 @@ process_credit_reply_sync_quorum_queue(Ctag, QName, Credit, State0) -> no_return(). credit_reply_timeout(QType, QName) -> Fmt = "Timed out waiting for credit reply from ~s ~s. " - "Hint: Enable feature flag credit_api_v2", + "Hint: Enable feature flag rabbitmq_4.0.0", Args = [QType, rabbit_misc:rs(QName)], rabbit_log:error(Fmt, Args), protocol_error(?V_1_0_AMQP_ERROR_INTERNAL_ERROR, Fmt, Args). @@ -2831,6 +2950,15 @@ credit_reply_timeout(QType, QName) -> default(undefined, Default) -> Default; default(Thing, _Default) -> Thing. +snd_settle_mode({ubyte, Val}) -> + case Val of + 0 -> unsettled; + 1 -> settled; + 2 -> mixed + end; +snd_settle_mode(undefined) -> + mixed. + transfer_frames(Transfer, Sections, unlimited) -> [[Transfer, Sections]]; transfer_frames(Transfer, Sections, MaxFrameSize) -> @@ -2976,6 +3104,22 @@ validate_multi_transfer_settled(Other, First) "(interpreted) field 'settled' on first transfer (~p)", [Other, First]). +validate_transfer_snd_settle_mode(mixed, _Settled) -> + ok; +validate_transfer_snd_settle_mode(unsettled, false) -> + %% "If the negotiated value for snd-settle-mode at attachment is unsettled, + %% then this field MUST be false (or unset) on every transfer frame for a delivery" [2.7.5] + ok; +validate_transfer_snd_settle_mode(settled, true) -> + %% "If the negotiated value for snd-settle-mode at attachment is settled, + %% then this field MUST be true on at least one transfer frame for a delivery" [2.7.5] + ok; +validate_transfer_snd_settle_mode(SndSettleMode, Settled) -> + protocol_error( + ?V_1_0_CONNECTION_ERROR_FRAMING_ERROR, + "sender settle mode is '~s' but transfer settled flag is interpreted as being '~s'", + [SndSettleMode, Settled]). + %% "If the message is being sent settled by the sender, %% the value of this field [rcv-settle-mode] is ignored." [2.7.5] validate_transfer_rcv_settle_mode(?V_1_0_RECEIVER_SETTLE_MODE_SECOND, _Settled = false) -> @@ -2983,14 +3127,10 @@ validate_transfer_rcv_settle_mode(?V_1_0_RECEIVER_SETTLE_MODE_SECOND, _Settled = validate_transfer_rcv_settle_mode(_, _) -> ok. -validate_incoming_message_size(Message) -> - validate_message_size(Message, persistent_term:get(max_message_size)). - validate_message_size(_, unlimited) -> ok; -validate_message_size(Message, MaxMsgSize) - when is_integer(MaxMsgSize) -> - MsgSize = iolist_size(Message), +validate_message_size(MsgSize, MaxMsgSize) + when is_integer(MsgSize) -> case MsgSize =< MaxMsgSize of true -> ok; @@ -2999,12 +3139,14 @@ validate_message_size(Message, MaxMsgSize) %% We apply that sentence to both incoming messages that are too large for us and outgoing messages that are %% too large for the client. %% This is an interesting protocol difference to MQTT where we instead discard outgoing messages that are too - %% large to send then behave as if we had completed sending that message [MQTT 5.0, MQTT-3.1.2-25]. + %% large to send and then behave as if we had completed sending that message [MQTT 5.0, MQTT-3.1.2-25]. protocol_error( ?V_1_0_LINK_ERROR_MESSAGE_SIZE_EXCEEDED, "message size (~b bytes) > maximum message size (~b bytes)", [MsgSize, MaxMsgSize]) - end. + end; +validate_message_size(Msg, MaxMsgSize) -> + validate_message_size(iolist_size(Msg), MaxMsgSize). -spec ensure_terminus(source | target, term(), @@ -3195,26 +3337,22 @@ publisher_or_consumer_deleted( %% If we previously already sent a detach with an error condition, and the Detach we %% receive here is therefore the client's reply, do not reply again with a 3rd detach. -maybe_detach_reply( - Detach, - #state{incoming_links = NewIncomingLinks, - outgoing_links = NewOutgoingLinks, - incoming_management_links = NewIncomingMgmtLinks, - outgoing_management_links = NewOutgoingMgmtLinks, - cfg = #cfg{writer_pid = WriterPid, - channel_num = Ch}}, - #state{incoming_links = OldIncomingLinks, - outgoing_links = OldOutgoingLinks, - incoming_management_links = OldIncomingMgmtLinks, - outgoing_management_links = OldOutgoingMgmtLinks}) +detach_reply(Detach, + #state{incoming_links = NewIncomingLinks, + outgoing_links = NewOutgoingLinks, + incoming_management_links = NewIncomingMgmtLinks, + outgoing_management_links = NewOutgoingMgmtLinks}, + #state{incoming_links = OldIncomingLinks, + outgoing_links = OldOutgoingLinks, + incoming_management_links = OldIncomingMgmtLinks, + outgoing_management_links = OldOutgoingMgmtLinks}) when map_size(NewIncomingLinks) < map_size(OldIncomingLinks) orelse map_size(NewOutgoingLinks) < map_size(OldOutgoingLinks) orelse map_size(NewIncomingMgmtLinks) < map_size(OldIncomingMgmtLinks) orelse map_size(NewOutgoingMgmtLinks) < map_size(OldOutgoingMgmtLinks) -> - Reply = Detach#'v1_0.detach'{error = undefined}, - rabbit_amqp_writer:send_command(WriterPid, Ch, Reply); -maybe_detach_reply(_, _, _) -> - ok. + [Detach#'v1_0.detach'{error = undefined}]; +detach_reply(_, _, _) -> + []. -spec maybe_detach_mgmt_link(link_handle(), state()) -> state(). maybe_detach_mgmt_link( @@ -3396,18 +3534,27 @@ error_not_found(Resource) -> condition = ?V_1_0_AMQP_ERROR_NOT_FOUND, description = {utf8, Description}}. --spec cap_credit(rabbit_queue_type:credit()) -> - 0..?LINK_CREDIT_RCV_FROM_QUEUE_MAX. -cap_credit(DesiredCredit) -> - min(DesiredCredit, ?LINK_CREDIT_RCV_FROM_QUEUE_MAX). +is_valid_max(Val) -> + is_integer(Val) andalso + Val > 0 andalso + Val =< ?UINT_MAX. + +pg_scope() -> + rabbit:pg_local_scope(amqp_session). + +-spec cap_credit(rabbit_queue_type:credit(), pos_integer()) -> + rabbit_queue_type:credit(). +cap_credit(DesiredCredit, MaxCredit) -> + min(DesiredCredit, MaxCredit). ensure_mc_cluster_compat(Mc) -> - IsEnabled = rabbit_feature_flags:is_enabled(message_containers_store_amqp_v1), + Feature = 'rabbitmq_4.0.0', + IsEnabled = rabbit_feature_flags:is_enabled(Feature), case IsEnabled of true -> Mc; false -> - McEnv = #{message_containers_store_amqp_v1 => IsEnabled}, + McEnv = #{Feature => IsEnabled}, %% other nodes in the cluster may not understand the new internal %% amqp mc format - in this case we convert to AMQP legacy format %% for compatibility @@ -3458,3 +3605,8 @@ format_status( permission_cache => PermissionCache, topic_permission_cache => TopicPermissionCache}, maps:update(state, State, Status). + +unwrap({_Tag, V}) -> + V; +unwrap(V) -> + V. diff --git a/deps/rabbit/src/rabbit_amqp_writer.erl b/deps/rabbit/src/rabbit_amqp_writer.erl index 40f2ba70c5c0..7b239a10a107 100644 --- a/deps/rabbit/src/rabbit_amqp_writer.erl +++ b/deps/rabbit/src/rabbit_amqp_writer.erl @@ -11,7 +11,7 @@ -include("rabbit_amqp.hrl"). %% client API --export([start_link/3, +-export([start_link/2, send_command/3, send_command/4, send_command_sync/3, @@ -27,7 +27,6 @@ -record(state, { sock :: rabbit_net:socket(), - max_frame_size :: unlimited | pos_integer(), reader :: rabbit_types:connection(), pending :: iolist(), %% This field is just an optimisation to minimize the cost of erlang:iolist_size/1 @@ -46,10 +45,10 @@ %%% client API %%% %%%%%%%%%%%%%%%%%% --spec start_link (rabbit_net:socket(), non_neg_integer(), pid()) -> +-spec start_link (rabbit_net:socket(), pid()) -> rabbit_types:ok(pid()). -start_link(Sock, MaxFrame, ReaderPid) -> - Args = {Sock, MaxFrame, ReaderPid}, +start_link(Sock, ReaderPid) -> + Args = {Sock, ReaderPid}, Opts = [{hibernate_after, ?HIBERNATE_AFTER}], gen_server:start_link(?MODULE, Args, Opts). @@ -75,7 +74,7 @@ send_command_sync(Writer, ChannelNum, Performative) -> Request = {send_command, ChannelNum, Performative}, gen_server:call(Writer, Request, ?CALL_TIMEOUT). -%% Delete this function when feature flag credit_api_v2 becomes required. +%% Delete this function when feature flag rabbitmq_4.0.0 becomes required. -spec send_command_and_notify(pid(), pid(), rabbit_types:channel_number(), @@ -96,9 +95,8 @@ internal_send_command(Sock, Performative, Protocol) -> %%% gen_server callbacks %%% %%%%%%%%%%%%%%%%%%%%%%%%%%%% -init({Sock, MaxFrame, ReaderPid}) -> +init({Sock, ReaderPid}) -> State = #state{sock = Sock, - max_frame_size = MaxFrame, reader = ReaderPid, pending = [], pending_size = 0, @@ -113,7 +111,7 @@ handle_cast({send_command, SessionPid, ChannelNum, Performative, Payload}, State State1 = internal_send_command_async(ChannelNum, Performative, Payload, State0), State = credit_flow_ack(SessionPid, State1), no_reply(State); -%% Delete below function clause when feature flag credit_api_v2 becomes required. +%% Delete below function clause when feature flag rabbitmq_4.0.0 becomes required. handle_cast({send_command_and_notify, QueuePid, SessionPid, ChannelNum, Performative, Payload}, State0) -> State1 = internal_send_command_async(ChannelNum, Performative, Payload, State0), State = credit_flow_ack(SessionPid, State1), @@ -133,7 +131,7 @@ handle_info({{'DOWN', session}, _MRef, process, SessionPid, _Reason}, credit_flow:peer_down(SessionPid), State = State0#state{monitored_sessions = maps:remove(SessionPid, Sessions)}, no_reply(State); -%% Delete below function clause when feature flag credit_api_v2 becomes required. +%% Delete below function clause when feature flag rabbitmq_4.0.0 becomes required. handle_info({'DOWN', _MRef, process, QueuePid, _Reason}, State) -> rabbit_amqqueue:notify_sent_queue_down(QueuePid), no_reply(State). @@ -142,12 +140,10 @@ format_status(Status) -> maps:update_with( state, fun(#state{sock = Sock, - max_frame_size = MaxFrame, reader = Reader, pending = Pending, pending_size = PendingSize}) -> #{socket => Sock, - max_frame_size => MaxFrame, reader => Reader, %% Below 2 fields should always have the same value. pending => iolist_size(Pending), @@ -189,12 +185,11 @@ internal_send_command_async(Channel, Performative, pending_size = PendingSize + iolist_size(Frame)}). internal_send_command_async(Channel, Performative, Payload, - State = #state{max_frame_size = MaxFrame, - pending = Pending, + State = #state{pending = Pending, pending_size = PendingSize}) -> - Frames = assemble_frame(Channel, Performative, Payload, MaxFrame), - maybe_flush(State#state{pending = [Frames | Pending], - pending_size = PendingSize + iolist_size(Frames)}). + Frame = assemble_frame_with_payload(Channel, Performative, Payload), + maybe_flush(State#state{pending = [Frame | Pending], + pending_size = PendingSize + iolist_size(Frame)}). assemble_frame(Channel, Performative) -> assemble_frame(Channel, Performative, amqp10_framing). @@ -210,8 +205,7 @@ assemble_frame(Channel, Performative, rabbit_amqp_sasl) -> PerfBin = amqp10_framing:encode_bin(Performative), amqp10_binary_generator:build_frame(Channel, ?AMQP_SASL_FRAME_TYPE, PerfBin). -%%TODO respect MaxFrame -assemble_frame(Channel, Performative, Payload, _MaxFrame) -> +assemble_frame_with_payload(Channel, Performative, Payload) -> ?TRACE("channel ~b <-~n ~tp~n followed by ~tb bytes of payload", [Channel, amqp10_framing:pprint(Performative), iolist_size(Payload)]), PerfIoData = amqp10_framing:encode_bin(Performative), diff --git a/deps/rabbit/src/rabbit_amqqueue.erl b/deps/rabbit/src/rabbit_amqqueue.erl index 4deecdd157de..2ef86b0203da 100644 --- a/deps/rabbit/src/rabbit_amqqueue.erl +++ b/deps/rabbit/src/rabbit_amqqueue.erl @@ -70,6 +70,7 @@ -export([queue/1, queue_names/1]). -export([kill_queue/2, kill_queue/3, kill_queue_hard/2, kill_queue_hard/3]). +-export([delete_transient_queues_on_node/1]). %% internal -export([internal_declare/2, internal_delete/2, run_backing_queue/3, @@ -251,22 +252,30 @@ get_queue_type(Args, DefaultQueueType) -> rabbit_queue_type:discover(V) end. --spec internal_declare(amqqueue:amqqueue(), boolean()) -> - {created | existing, amqqueue:amqqueue()} | queue_absent(). +-spec internal_declare(Queue, Recover) -> Ret when + Queue :: amqqueue:amqqueue(), + Recover :: boolean(), + Ret :: {created | existing, amqqueue:amqqueue()} | + queue_absent() | + rabbit_khepri:timeout_error(). internal_declare(Q, Recover) -> do_internal_declare(Q, Recover). do_internal_declare(Q0, true) -> - %% TODO Why do we return the old state instead of the actual one? - %% I'm leaving it like it was before the khepri refactor, because - %% rabbit_amqqueue_process:init_it2 compares the result of this declare to decide - %% if continue or stop. If we return the actual one, it fails and the queue stops - %% silently during init. - %% Maybe we should review this bit of code at some point. Q = amqqueue:set_state(Q0, live), - ok = store_queue(Q), - {created, Q0}; + case store_queue(Q) of + ok -> + %% TODO Why do we return the old state instead of the actual one? + %% I'm leaving it like it was before the khepri refactor, because + %% rabbit_amqqueue_process:init_it2 compares the result of this + %% declare to decide if continue or stop. If we return the actual + %% one, it fails and the queue stops silently during init. + %% Maybe we should review this bit of code at some point. + {created, Q0}; + {error, timeout} = Err -> + Err + end; do_internal_declare(Q0, false) -> Q = rabbit_policy:set(amqqueue:set_state(Q0, live)), Queue = rabbit_queue_decorator:set(Q), @@ -279,12 +288,18 @@ do_internal_declare(Q0, false) -> update(Name, Fun) -> rabbit_db_queue:update(Name, Fun). -%% only really used for quorum queues to ensure the rabbit_queue record +-spec ensure_rabbit_queue_record_is_initialized(Queue) -> Ret when + Queue :: amqqueue:amqqueue(), + Ret :: ok | {error, timeout}. + +%% only really used for stream queues to ensure the rabbit_queue record %% is initialised ensure_rabbit_queue_record_is_initialized(Q) -> store_queue(Q). --spec store_queue(amqqueue:amqqueue()) -> 'ok'. +-spec store_queue(Queue) -> Ret when + Queue :: amqqueue:amqqueue(), + Ret :: ok | {error, timeout}. store_queue(Q0) -> Q = rabbit_queue_decorator:set(Q0), @@ -324,12 +339,10 @@ is_server_named_allowed(Args) -> Type = get_queue_type(Args), rabbit_queue_type:is_server_named_allowed(Type). --spec lookup - (name()) -> - rabbit_types:ok(amqqueue:amqqueue()) | - rabbit_types:error('not_found'); - ([name()]) -> - [amqqueue:amqqueue()]. +-spec lookup(QueueName) -> Ret when + QueueName :: name(), + Ret :: rabbit_types:ok(amqqueue:amqqueue()) + | rabbit_types:error('not_found'). lookup(Name) when is_record(Name, resource) -> rabbit_db_queue:get(Name). @@ -720,31 +733,42 @@ augment_declare_args(VHost, Durable, Exclusive, AutoDelete, Args0) -> #{default_queue_type := DefaultQueueType} when is_binary(DefaultQueueType) andalso not HasQTypeArg -> - Type = rabbit_queue_type:discover(DefaultQueueType), - IsPermitted = is_queue_args_combination_permitted( - Durable, Exclusive), - IsCompatible = rabbit_queue_type:is_compatible( - Type, Durable, Exclusive, AutoDelete), - case IsPermitted andalso IsCompatible of - true -> - %% patch up declare arguments with x-queue-type if there - %% is a vhost default set the queue is durable and not exclusive - %% and there is no queue type argument - %% present - rabbit_misc:set_table_value(Args0, - <<"x-queue-type">>, - longstr, - DefaultQueueType); - false -> - %% if the properties are incompatible with the declared - %% DQT, use the fall back type - rabbit_misc:set_table_value(Args0, - <<"x-queue-type">>, - longstr, - rabbit_queue_type:short_alias_of(rabbit_queue_type:fallback())) - end; + update_args_table_with_queue_type(DefaultQueueType, Durable, Exclusive, AutoDelete, Args0); _ -> - Args0 + case HasQTypeArg of + true -> Args0; + false -> + update_args_table_with_queue_type(rabbit_queue_type:short_alias_of(rabbit_queue_type:default()), Durable, Exclusive, AutoDelete, Args0) + end + end. + +-spec update_args_table_with_queue_type( + rabbit_queue_type:queue_type() | binary(), + boolean(), boolean(), boolean(), + rabbit_framing:amqp_table()) -> rabbit_framing:amqp_table(). +update_args_table_with_queue_type(DefaultQueueType, Durable, Exclusive, AutoDelete, Args) -> + Type = rabbit_queue_type:discover(DefaultQueueType), + IsPermitted = is_queue_args_combination_permitted( + Durable, Exclusive), + IsCompatible = rabbit_queue_type:is_compatible( + Type, Durable, Exclusive, AutoDelete), + case IsPermitted andalso IsCompatible of + true -> + %% patch up declare arguments with x-queue-type if there + %% is a vhost default set the queue is durable and not exclusive + %% and there is no queue type argument + %% present + rabbit_misc:set_table_value(Args, + <<"x-queue-type">>, + longstr, + DefaultQueueType); + false -> + %% if the properties are incompatible with the declared + %% DQT, use the fall back type + rabbit_misc:set_table_value(Args, + <<"x-queue-type">>, + longstr, + rabbit_queue_type:short_alias_of(rabbit_queue_type:fallback())) end. -spec check_exclusive_access(amqqueue:amqqueue(), pid()) -> @@ -1247,8 +1271,8 @@ list_local_followers() -> [Q || Q <- list(), amqqueue:is_quorum(Q), - amqqueue:get_state(Q) =/= crashed, amqqueue:get_leader(Q) =/= node(), + lists:member(node(), get_quorum_nodes(Q)), rabbit_quorum_queue:is_recoverable(Q) ]. @@ -1499,7 +1523,14 @@ notify_policy_changed(Q) when ?is_amqqueue(Q) -> consumers(Q) when ?amqqueue_is_classic(Q) -> QPid = amqqueue:get_pid(Q), - delegate:invoke(QPid, {gen_server2, call, [consumers, infinity]}); + try + delegate:invoke(QPid, {gen_server2, call, [consumers, infinity]}) + catch + exit:_ -> + %% The queue process exited during the call. + %% Note that `delegate:invoke/2' catches errors but not exits. + [] + end; consumers(Q) when ?amqqueue_is_quorum(Q) -> QPid = amqqueue:get_pid(Q), case ra:local_query(QPid, fun rabbit_fifo:query_consumers/1) of @@ -1595,17 +1626,23 @@ delete_immediately_by_resource(Resources) -> -spec delete (amqqueue:amqqueue(), 'false', 'false', rabbit_types:username()) -> qlen() | + rabbit_types:error(timeout) | {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}; (amqqueue:amqqueue(), 'true' , 'false', rabbit_types:username()) -> - qlen() | rabbit_types:error('in_use') | + qlen() | + rabbit_types:error('in_use') | + rabbit_types:error(timeout) | {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}; (amqqueue:amqqueue(), 'false', 'true', rabbit_types:username()) -> - qlen() | rabbit_types:error('not_empty') | + qlen() | + rabbit_types:error('not_empty') | + rabbit_types:error(timeout) | {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}; (amqqueue:amqqueue(), 'true' , 'true', rabbit_types:username()) -> qlen() | rabbit_types:error('in_use') | rabbit_types:error('not_empty') | + rabbit_types:error(timeout) | {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}. delete(Q, IfUnused, IfEmpty, ActingUser) -> rabbit_queue_type:delete(Q, IfUnused, IfEmpty, ActingUser). @@ -1649,6 +1686,11 @@ delete_with(QueueName, ConnPid, IfUnused, IfEmpty, Username, CheckExclusive) whe {error, {exit, _, _}} -> %% delete()/delegate:invoke might return {error, {exit, _, _}} {ok, 0}; + {error, timeout} -> + rabbit_misc:protocol_error( + internal_error, + "The operation to delete the queue from the metadata store " + "timed out", []); {ok, Count} -> {ok, Count}; {protocol_error, Type, Reason, ReasonArgs} -> @@ -1662,7 +1704,10 @@ delete_crashed(Q) when ?amqqueue_is_classic(Q) -> delete_crashed(Q, ActingUser) when ?amqqueue_is_classic(Q) -> rabbit_classic_queue:delete_crashed(Q, ActingUser). --spec delete_crashed_internal(amqqueue:amqqueue(), rabbit_types:username()) -> 'ok'. +-spec delete_crashed_internal(Q, ActingUser) -> Ret when + Q :: amqqueue:amqqueue(), + ActingUser :: rabbit_types:username(), + Ret :: ok | {error, timeout}. delete_crashed_internal(Q, ActingUser) when ?amqqueue_is_classic(Q) -> rabbit_classic_queue:delete_crashed_internal(Q, ActingUser). @@ -1757,7 +1802,10 @@ notify_sent_queue_down(QPid) -> resume(QPid, ChPid) -> delegate:invoke_no_result(QPid, {gen_server2, cast, [{resume, ChPid}]}). --spec internal_delete(amqqueue:amqqueue(), rabbit_types:username()) -> 'ok'. +-spec internal_delete(Queue, ActingUser) -> Ret when + Queue :: amqqueue:amqqueue(), + ActingUser :: rabbit_types:username(), + Ret :: ok | {error, timeout}. internal_delete(Queue, ActingUser) -> internal_delete(Queue, ActingUser, normal). @@ -1767,9 +1815,11 @@ internal_delete(Queue, ActingUser, Reason) -> case rabbit_db_queue:delete(QueueName, Reason) of ok -> ok; + {error, timeout} = Err -> + Err; Deletions -> - _ = rabbit_binding:process_deletions(Deletions), - rabbit_binding:notify_deletions(Deletions, ?INTERNAL_USER), + ok = rabbit_binding:process_deletions(Deletions), + ok = rabbit_binding:notify_deletions(Deletions, ?INTERNAL_USER), rabbit_core_metrics:queue_deleted(QueueName), ok = rabbit_event:notify(queue_deleted, [{name, QueueName}, @@ -1782,6 +1832,7 @@ internal_delete(Queue, ActingUser, Reason) -> %% TODO this is used by `rabbit_mnesia:remove_node_if_mnesia_running` %% Does it make any sense once mnesia is not used/removed? forget_all_durable(Node) -> + rabbit_log:info("Will remove all classic queues from node ~ts. The node is likely being removed from the cluster.", [Node]), UpdateFun = fun(Q) -> forget_node_for_queue(Q) end, @@ -1839,13 +1890,39 @@ on_node_up(_Node) -> -spec on_node_down(node()) -> 'ok'. on_node_down(Node) -> + case delete_transient_queues_on_node(Node) of + ok -> + ok; + {error, timeout} -> + %% This case is possible when running Khepri. The node going down + %% could leave the cluster in a minority so the command to delete + %% the transient queue records would fail. Also see + %% `rabbit_khepri:init/0': we also try this deletion when the node + %% restarts - a time that the cluster is very likely to have a + %% majority - to ensure these records are deleted. + rabbit_log:warning("transient queues for node '~ts' could not be " + "deleted because of a timeout. These queues " + "will be removed when node '~ts' restarts or " + "is removed from the cluster.", [Node, Node]), + ok + end. + +-spec delete_transient_queues_on_node(Node) -> Ret when + Node :: node(), + Ret :: ok | rabbit_khepri:timeout_error(). + +delete_transient_queues_on_node(Node) -> {Time, Ret} = timer:tc(fun() -> rabbit_db_queue:delete_transient(filter_transient_queues_to_delete(Node)) end), case Ret of - ok -> ok; - {QueueNames, Deletions} -> + ok -> + ok; + {error, timeout} = Err -> + Err; + {QueueNames, Deletions} when is_list(QueueNames) -> case length(QueueNames) of 0 -> ok; - N -> rabbit_log:info("~b transient queues from an old incarnation of node ~tp deleted in ~fs", + N -> rabbit_log:info("~b transient queues from node '~ts' " + "deleted in ~fs", [N, Node, Time / 1_000_000]) end, notify_queue_binding_deletions(Deletions), @@ -1865,14 +1942,14 @@ filter_transient_queues_to_delete(Node) -> end. notify_queue_binding_deletions(QueueDeletions) when is_list(QueueDeletions) -> - Deletions = rabbit_binding:process_deletions( - lists:foldl(fun rabbit_binding:combine_deletions/2, - rabbit_binding:new_deletions(), - QueueDeletions)), + Deletions = lists:foldl( + fun rabbit_binding:combine_deletions/2, + rabbit_binding:new_deletions(), QueueDeletions), + ok = rabbit_binding:process_deletions(Deletions), rabbit_binding:notify_deletions(Deletions, ?INTERNAL_USER); notify_queue_binding_deletions(QueueDeletions) -> - Deletions = rabbit_binding:process_deletions(QueueDeletions), - rabbit_binding:notify_deletions(Deletions, ?INTERNAL_USER). + ok = rabbit_binding:process_deletions(QueueDeletions), + rabbit_binding:notify_deletions(QueueDeletions, ?INTERNAL_USER). notify_transient_queues_deleted(QueueDeletions) -> lists:foreach( diff --git a/deps/rabbit/src/rabbit_amqqueue_process.erl b/deps/rabbit/src/rabbit_amqqueue_process.erl index da9c1751f8b0..63f886bd3763 100644 --- a/deps/rabbit/src/rabbit_amqqueue_process.erl +++ b/deps/rabbit/src/rabbit_amqqueue_process.erl @@ -119,7 +119,9 @@ arguments, owner_pid, exclusive, - user_who_performed_action + user_who_performed_action, + leader, + members ]). -define(INFO_KEYS, [pid | ?CREATION_EVENT_KEYS ++ ?STATISTICS_KEYS -- [name, type]]). @@ -226,6 +228,12 @@ init_it2(Recover, From, State = #q{q = Q, false -> {stop, normal, {existing, Q1}, State} end; + {error, timeout} -> + Reason = {protocol_error, internal_error, + "Could not declare ~ts on node '~ts' because the " + "metadata store operation timed out", + [rabbit_misc:rs(amqqueue:get_name(Q)), node()]}, + {stop, normal, Reason, State}; Err -> {stop, normal, Err, State} end. @@ -291,7 +299,7 @@ terminate(shutdown = R, State = #q{backing_queue = BQ, q = Q0}) -> end, State); terminate({shutdown, missing_owner = Reason}, {{reply_to, From}, #q{q = Q} = State}) -> %% if the owner was missing then there will be no queue, so don't emit stats - State1 = terminate_shutdown(terminate_delete(false, Reason, State), State), + State1 = terminate_shutdown(terminate_delete(false, Reason, none, State), State), send_reply(From, {owner_died, Q}), State1; terminate({shutdown, _} = R, State = #q{backing_queue = BQ}) -> @@ -304,18 +312,22 @@ terminate(normal, State = #q{status = {terminated_by, auto_delete}}) -> %% thousands of queues. A optimisation introduced by server#1513 %% needs to be reverted by this case, avoiding to guard the delete %% operation on `rabbit_durable_queue` - terminate_shutdown(terminate_delete(true, auto_delete, State), State); -terminate(normal, State) -> %% delete case - terminate_shutdown(terminate_delete(true, normal, State), State); + terminate_shutdown(terminate_delete(true, auto_delete, none, State), State); +terminate(normal, {{reply_to, ReplyTo}, State}) -> %% delete case + terminate_shutdown(terminate_delete(true, normal, ReplyTo, State), State); +terminate(normal, State) -> + terminate_shutdown(terminate_delete(true, normal, none, State), State); %% If we crashed don't try to clean up the BQS, probably best to leave it. terminate(_Reason, State = #q{q = Q}) -> terminate_shutdown(fun (BQS) -> Q2 = amqqueue:set_state(Q, crashed), - rabbit_amqqueue:store_queue(Q2), + %% When mnesia is removed this update can become + %% an async Khepri command. + _ = rabbit_amqqueue:store_queue(Q2), BQS end, State). -terminate_delete(EmitStats, Reason0, +terminate_delete(EmitStats, Reason0, ReplyTo, State = #q{q = Q, backing_queue = BQ, status = Status}) -> @@ -326,19 +338,24 @@ terminate_delete(EmitStats, Reason0, missing_owner -> normal; Any -> Any end, + Len = BQ:len(BQS), BQS1 = BQ:delete_and_terminate(Reason, BQS), if EmitStats -> rabbit_event:if_enabled(State, #q.stats_timer, fun() -> emit_stats(State) end); true -> ok end, %% This try-catch block transforms throws to errors since throws are not - %% logged. - try - %% don't care if the internal delete doesn't return 'ok'. - rabbit_amqqueue:internal_delete(Q, ActingUser, Reason0) - catch - {error, ReasonE} -> error(ReasonE) - end, + %% logged. When mnesia is removed this `try` can be removed: Khepri + %% returns errors as error tuples instead. + Reply = try rabbit_amqqueue:internal_delete(Q, ActingUser, Reason0) of + ok -> + {ok, Len}; + {error, _} = Err -> + Err + catch + {error, ReasonE} -> error(ReasonE) + end, + send_reply(ReplyTo, Reply), BQS1 end. @@ -1068,6 +1085,8 @@ i(auto_delete, #q{q = Q}) -> amqqueue:is_auto_delete(Q); i(arguments, #q{q = Q}) -> amqqueue:get_arguments(Q); i(pid, _) -> self(); +i(leader, State) -> node(i(pid, State)); +i(members, State) -> [i(leader, State)]; i(owner_pid, #q{q = Q}) when ?amqqueue_exclusive_owner_is(Q, none) -> ''; i(owner_pid, #q{q = Q}) -> @@ -1390,15 +1409,16 @@ handle_call(stat, _From, State) -> ensure_expiry_timer(State), reply({ok, BQ:len(BQS), rabbit_queue_consumers:count()}, State1); -handle_call({delete, IfUnused, IfEmpty, ActingUser}, _From, +handle_call({delete, IfUnused, IfEmpty, ActingUser}, From, State = #q{backing_queue_state = BQS, backing_queue = BQ}) -> IsEmpty = BQ:is_empty(BQS), IsUnused = is_unused(State), if IfEmpty and not(IsEmpty) -> reply({error, not_empty}, State); IfUnused and not(IsUnused) -> reply({error, in_use}, State); - true -> stop({ok, BQ:len(BQS)}, - State#q{status = {terminated_by, ActingUser}}) + true -> + State1 = State#q{status = {terminated_by, ActingUser}}, + stop({{reply_to, From}, State1}) end; handle_call(purge, _From, State = #q{backing_queue = BQ, @@ -1516,7 +1536,7 @@ handle_cast({credit, SessionPid, CTag, Credit, Drain}, backing_queue = BQ, backing_queue_state = BQS0} = State) -> %% Credit API v1. - %% Delete this function clause when feature flag credit_api_v2 becomes required. + %% Delete this function clause when feature flag rabbitmq_4.0.0 becomes required. %% Behave like non-native AMQP 1.0: Send send_credit_reply before deliveries. rabbit_classic_queue:send_credit_reply_credit_api_v1( SessionPid, amqqueue:get_name(Q), BQ:len(BQS0)), diff --git a/deps/rabbit/src/rabbit_auth_mechanism_amqplain.erl b/deps/rabbit/src/rabbit_auth_mechanism_amqplain.erl index a801d16e8dbc..a17202b5b1b7 100644 --- a/deps/rabbit/src/rabbit_auth_mechanism_amqplain.erl +++ b/deps/rabbit/src/rabbit_auth_mechanism_amqplain.erl @@ -30,14 +30,17 @@ should_offer(_Sock) -> init(_Sock) -> []. --define(IS_STRING_TYPE(Type), Type =:= longstr orelse Type =:= shortstr). +-define(IS_STRING_TYPE(Type), + Type =:= longstr orelse + Type =:= shortstr orelse + Type =:= binary). handle_response(Response, _State) -> LoginTable = rabbit_binary_parser:parse_table(Response), case {lists:keysearch(<<"LOGIN">>, 1, LoginTable), lists:keysearch(<<"PASSWORD">>, 1, LoginTable)} of {{value, {_, UserType, User}}, - {value, {_, PassType, Pass}}} when ?IS_STRING_TYPE(UserType); + {value, {_, PassType, Pass}}} when ?IS_STRING_TYPE(UserType) andalso ?IS_STRING_TYPE(PassType) -> rabbit_access_control:check_user_pass_login(User, Pass); {{value, {_, _UserType, _User}}, diff --git a/deps/rabbit/src/rabbit_auth_mechanism_anonymous.erl b/deps/rabbit/src/rabbit_auth_mechanism_anonymous.erl new file mode 100644 index 000000000000..a5183156d45c --- /dev/null +++ b/deps/rabbit/src/rabbit_auth_mechanism_anonymous.erl @@ -0,0 +1,54 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(rabbit_auth_mechanism_anonymous). +-behaviour(rabbit_auth_mechanism). + +-export([description/0, should_offer/1, init/1, handle_response/2]). +-export([credentials/0]). + +-define(STATE, []). + +-rabbit_boot_step( + {?MODULE, + [{description, "auth mechanism anonymous"}, + {mfa, {rabbit_registry, register, [auth_mechanism, <<"ANONYMOUS">>, ?MODULE]}}, + {requires, rabbit_registry}, + {enables, kernel_ready}]}). + +description() -> + [{description, <<"SASL ANONYMOUS authentication mechanism">>}]. + +should_offer(_Sock) -> + case credentials() of + {ok, _, _} -> + true; + error -> + false + end. + +init(_Sock) -> + ?STATE. + +handle_response(_TraceInfo, ?STATE) -> + {ok, User, Pass} = credentials(), + rabbit_access_control:check_user_pass_login(User, Pass). + +-spec credentials() -> + {ok, rabbit_types:username(), rabbit_types:password()} | error. +credentials() -> + case application:get_env(rabbit, anonymous_login_user) of + {ok, User} when is_binary(User) -> + case application:get_env(rabbit, anonymous_login_pass) of + {ok, Pass} when is_binary(Pass) -> + {ok, User, Pass}; + _ -> + error + end; + _ -> + error + end. diff --git a/deps/rabbit/src/rabbit_auth_mechanism_plain.erl b/deps/rabbit/src/rabbit_auth_mechanism_plain.erl index 31e235227500..d0881b4acc84 100644 --- a/deps/rabbit/src/rabbit_auth_mechanism_plain.erl +++ b/deps/rabbit/src/rabbit_auth_mechanism_plain.erl @@ -39,11 +39,15 @@ handle_response(Response, _State) -> extract_user_pass(Response) -> case extract_elem(Response) of - {ok, User, Response1} -> case extract_elem(Response1) of - {ok, Pass, <<>>} -> {ok, User, Pass}; - _ -> error - end; - error -> error + {ok, User, Response1} -> + case extract_elem(Response1) of + {ok, Pass, <<>>} -> + {ok, User, Pass}; + _ -> + error + end; + error -> + error end. extract_elem(<<0:8, Rest/binary>>) -> diff --git a/deps/rabbit/src/rabbit_binding.erl b/deps/rabbit/src/rabbit_binding.erl index cf7f79b51e6a..bde550e2d0a6 100644 --- a/deps/rabbit/src/rabbit_binding.erl +++ b/deps/rabbit/src/rabbit_binding.erl @@ -13,7 +13,7 @@ -export([list/1, list_for_source/1, list_for_destination/1, list_for_source_and_destination/2, list_for_source_and_destination/3, list_explicit/0]). --export([new_deletions/0, combine_deletions/2, add_deletion/3, +-export([new_deletions/0, combine_deletions/2, add_deletion/5, process_deletions/1, notify_deletions/2, group_bindings_fold/3]). -export([info_keys/0, info/1, info/2, info_all/1, info_all/2, info_all/4]). @@ -22,6 +22,9 @@ -export([reverse_route/1, index_route/1]). -export([binding_type/2]). +%% For testing only +-export([fetch_deletion/2]). + -define(DEFAULT_EXCHANGE(VHostPath), #resource{virtual_host = VHostPath, kind = exchange, name = <<>>}). @@ -50,9 +53,12 @@ rabbit_types:ok_or_error(rabbit_types:amqp_error())). -type bindings() :: [rabbit_types:binding()]. -%% TODO this should really be opaque but that seems to confuse 17.1's -%% dialyzer into objecting to everything that uses it. --type deletions() :: dict:dict(). +-record(deletion, {exchange :: rabbit_types:exchange(), + %% Whether the exchange was deleted. + deleted :: boolean(), + bindings :: sets:set(rabbit_types:binding())}). + +-opaque deletions() :: #{XName :: rabbit_exchange:name() => #deletion{}}. %%---------------------------------------------------------------------------- @@ -159,6 +165,19 @@ binding_type0(false, true) -> binding_type0(_, _) -> transient. +binding_checks(Binding, InnerFun) -> + fun(Src, Dst) -> + case rabbit_exchange:validate_binding(Src, Binding) of + ok -> + %% this argument is used to check queue exclusivity; + %% in general, we want to fail on that in preference to + %% anything else + InnerFun(Src, Dst); + Err -> + Err + end + end. + -spec remove(rabbit_types:binding(), rabbit_types:username()) -> bind_res(). remove(Binding, ActingUser) -> remove(Binding, fun (_Src, _Dst) -> ok end, ActingUser). @@ -360,57 +379,96 @@ index_route(#route{binding = #binding{source = Source, %% ---------------------------------------------------------------------------- %% Binding / exchange deletion abstraction API %% ---------------------------------------------------------------------------- - -anything_but( NotThis, NotThis, NotThis) -> NotThis; -anything_but( NotThis, NotThis, This) -> This; -anything_but( NotThis, This, NotThis) -> This; -anything_but(_NotThis, This, This) -> This. +%% +%% `deletions()' describe a set of removals of bindings and/or exchanges from +%% the metadata store. +%% +%% This deletion collection is used for two purposes: +%% +%%
      +%%
    • "Processing" of deletions. Processing here means that the +%% exchanges and bindings are passed into the {@link rabbit_exchange} +%% callbacks. When an exchange is deleted the `rabbit_exchange:delete/1' +%% callback is invoked and when the exchange is not deleted but some bindings +%% are deleted the `rabbit_exchange:remove_bindings/2' is invoked.
    • +%%
    • Notification of metadata deletion. Like other internal +%% notifications, {@link rabbit_binding:notify_deletions()} uses {@link +%% rabbit_event} to notify any interested consumers of a resource deletion. +%% An example consumer of {@link rabbit_event} is the `rabbitmq_event_exchange' +%% plugin which publishes these notifications as messages.
    • +%%
    +%% +%% The point of collecting deletions into this opaque type is to be able to +%% collect all bindings deleted for a given exchange into a list. This allows +%% us to invoke the `rabbit_exchange:remove_bindings/2' callback with all +%% deleted bindings at once rather than passing each deleted binding +%% individually. -spec new_deletions() -> deletions(). -new_deletions() -> dict:new(). - --spec add_deletion - (rabbit_exchange:name(), - {'undefined' | rabbit_types:exchange(), - 'deleted' | 'not_deleted', - bindings()}, - deletions()) -> - deletions(). - -add_deletion(XName, Entry, Deletions) -> - dict:update(XName, fun (Entry1) -> merge_entry(Entry1, Entry) end, - Entry, Deletions). +new_deletions() -> #{}. + +-spec add_deletion(XName, X, XDeleted, Bindings, Deletions) -> Deletions1 + when + XName :: rabbit_exchange:name(), + X :: rabbit_types:exchange(), + XDeleted :: deleted | not_deleted, + Bindings :: bindings(), + Deletions :: deletions(), + Deletions1 :: deletions(). + +add_deletion(XName, X, WasDeleted, Bindings, Deletions) + when (WasDeleted =:= deleted orelse WasDeleted =:= not_deleted) andalso + is_list(Bindings) andalso is_map(Deletions) -> + WasDeleted1 = case WasDeleted of + deleted -> true; + not_deleted -> false + end, + Bindings1 = sets:from_list(Bindings, [{version, 2}]), + Deletion = #deletion{exchange = X, + deleted = WasDeleted1, + bindings = Bindings1}, + maps:update_with( + XName, + fun(Deletion1) -> + merge_deletion(Deletion1, Deletion) + end, Deletion, Deletions). -spec combine_deletions(deletions(), deletions()) -> deletions(). -combine_deletions(Deletions1, Deletions2) -> - dict:merge(fun (_XName, Entry1, Entry2) -> merge_entry(Entry1, Entry2) end, - Deletions1, Deletions2). - -merge_entry({X1, Deleted1, Bindings1}, {X2, Deleted2, Bindings2}) -> - {anything_but(undefined, X1, X2), - anything_but(not_deleted, Deleted1, Deleted2), - Bindings1 ++ Bindings2}; -merge_entry({X1, Deleted1, Bindings1, none}, {X2, Deleted2, Bindings2, none}) -> - {anything_but(undefined, X1, X2), - anything_but(not_deleted, Deleted1, Deleted2), - Bindings1 ++ Bindings2, none}. - -notify_deletions({error, not_found}, _) -> - ok; -notify_deletions(Deletions, ActingUser) -> - dict:fold(fun (XName, {_X, deleted, Bs, _}, ok) -> - notify_exchange_deletion(XName, ActingUser), - notify_bindings_deletion(Bs, ActingUser); - (_XName, {_X, not_deleted, Bs, _}, ok) -> - notify_bindings_deletion(Bs, ActingUser); - (XName, {_X, deleted, Bs}, ok) -> +combine_deletions(Deletions1, Deletions2) + when is_map(Deletions1) andalso is_map(Deletions2) -> + maps:merge_with( + fun (_XName, Deletion1, Deletion2) -> + merge_deletion(Deletion1, Deletion2) + end, Deletions1, Deletions2). + +merge_deletion( + #deletion{deleted = Deleted1, bindings = Bindings1}, + #deletion{exchange = X2, deleted = Deleted2, bindings = Bindings2}) -> + %% Assume that X2 is more up to date than X1. + X = X2, + Deleted = Deleted1 orelse Deleted2, + Bindings = sets:union(Bindings1, Bindings2), + #deletion{exchange = X, + deleted = Deleted, + bindings = Bindings}. + +-spec notify_deletions(Deletions, ActingUser) -> ok when + Deletions :: rabbit_binding:deletions(), + ActingUser :: rabbit_types:username(). + +notify_deletions(Deletions, ActingUser) when is_map(Deletions) -> + maps:foreach( + fun (XName, #deletion{deleted = XDeleted, bindings = Bindings}) -> + case XDeleted of + true -> notify_exchange_deletion(XName, ActingUser), - notify_bindings_deletion(Bs, ActingUser); - (_XName, {_X, not_deleted, Bs}, ok) -> - notify_bindings_deletion(Bs, ActingUser) - end, ok, Deletions). + notify_bindings_deletion(Bindings, ActingUser); + false -> + notify_bindings_deletion(Bindings, ActingUser) + end + end, Deletions). notify_exchange_deletion(XName, ActingUser) -> ok = rabbit_event:notify( @@ -418,35 +476,58 @@ notify_exchange_deletion(XName, ActingUser) -> [{name, XName}, {user_who_performed_action, ActingUser}]). -notify_bindings_deletion(Bs, ActingUser) -> - [rabbit_event:notify(binding_deleted, - info(B) ++ [{user_who_performed_action, ActingUser}]) - || B <- Bs], - ok. +notify_bindings_deletion(Bindings, ActingUser) -> + sets:fold( + fun(Binding, ok) -> + rabbit_event:notify( + binding_deleted, + info(Binding) ++ [{user_who_performed_action, ActingUser}]), + ok + end, ok, Bindings). --spec process_deletions(deletions()) -> deletions(). +-spec process_deletions(deletions()) -> ok. process_deletions(Deletions) -> - dict:map(fun (_XName, {X, deleted, Bindings}) -> - Bs = lists:flatten(Bindings), - Serial = rabbit_exchange:serial(X), - rabbit_exchange:callback(X, delete, Serial, [X]), - {X, deleted, Bs, none}; - (_XName, {X, not_deleted, Bindings}) -> - Bs = lists:flatten(Bindings), - Serial = rabbit_exchange:serial(X), - rabbit_exchange:callback(X, remove_bindings, Serial, [X, Bs]), - {X, not_deleted, Bs, none} - end, Deletions). - -binding_checks(Binding, InnerFun) -> - fun(Src, Dst) -> - case rabbit_exchange:validate_binding(Src, Binding) of - ok -> - %% this argument is used to check queue exclusivity; - %% in general, we want to fail on that in preference to - %% anything else - InnerFun(Src, Dst); - Err -> - Err - end + maps:foreach( + fun (_XName, #deletion{exchange = X, + deleted = XDeleted, + bindings = Bindings}) -> + Serial = rabbit_exchange:serial(X), + case XDeleted of + true -> + rabbit_exchange:callback(X, delete, Serial, [X]); + false -> + Bindings1 = sets:to_list(Bindings), + rabbit_exchange:callback( + X, remove_bindings, Serial, [X, Bindings1]) + end + end, Deletions). + +-spec fetch_deletion(XName, Deletions) -> Ret when + XName :: rabbit_exchange:name(), + Deletions :: deletions(), + Ret :: {X, WasDeleted, Bindings}, + X :: rabbit_types:exchange(), + WasDeleted :: deleted | not_deleted, + Bindings :: bindings(). +%% @doc Fetches the deletions for the given exchange name. +%% +%% This function is only intended for use in tests. +%% +%% @private + +fetch_deletion(XName, Deletions) -> + case maps:find(XName, Deletions) of + {ok, #deletion{exchange = X, + deleted = Deleted, + bindings = Bindings}} -> + WasDeleted = case Deleted of + true -> + deleted; + false -> + not_deleted + end, + Bindings1 = sets:to_list(Bindings), + {X, WasDeleted, Bindings1}; + error -> + error end. diff --git a/deps/rabbit/src/rabbit_channel.erl b/deps/rabbit/src/rabbit_channel.erl index 908892781574..4be86370c390 100644 --- a/deps/rabbit/src/rabbit_channel.erl +++ b/deps/rabbit/src/rabbit_channel.erl @@ -985,7 +985,7 @@ check_msg_size(Content, GCThreshold) -> Size = rabbit_basic:maybe_gc_large_msg(Content, GCThreshold), case Size =< MaxMessageSize of true -> - ok; + rabbit_msg_size_metrics:observe(amqp091, Size); false -> Fmt = case MaxMessageSize of ?MAX_MSG_SIZE -> diff --git a/deps/rabbit/src/rabbit_channel_tracking.erl b/deps/rabbit/src/rabbit_channel_tracking.erl index 0931352416df..bd37d59bf9e8 100644 --- a/deps/rabbit/src/rabbit_channel_tracking.erl +++ b/deps/rabbit/src/rabbit_channel_tracking.erl @@ -126,7 +126,7 @@ unregister_tracked_by_pid(ChPid) when node(ChPid) == node() -> case ets:lookup(?TRACKED_CHANNEL_TABLE, ChPid) of [] -> ok; [#tracked_channel{username = Username}] -> - ets:update_counter(?TRACKED_CHANNEL_TABLE_PER_USER, Username, -1), + _ = ets:update_counter(?TRACKED_CHANNEL_TABLE_PER_USER, Username, -1), ets:delete(?TRACKED_CHANNEL_TABLE, ChPid) end. @@ -139,7 +139,7 @@ unregister_tracked(ChId = {Node, _Name}) when Node == node() -> case get_tracked_channel_by_id(ChId) of [] -> ok; [#tracked_channel{pid = ChPid, username = Username}] -> - ets:update_counter(?TRACKED_CHANNEL_TABLE_PER_USER, Username, -1), + _ = ets:update_counter(?TRACKED_CHANNEL_TABLE_PER_USER, Username, -1), ets:delete(?TRACKED_CHANNEL_TABLE, ChPid) end. diff --git a/deps/rabbit/src/rabbit_classic_queue.erl b/deps/rabbit/src/rabbit_classic_queue.erl index 5878347349d2..b7ed084ac0a3 100644 --- a/deps/rabbit/src/rabbit_classic_queue.erl +++ b/deps/rabbit/src/rabbit_classic_queue.erl @@ -168,8 +168,12 @@ delete(Q0, IfUnused, IfEmpty, ActingUser) when ?amqqueue_is_classic(Q0) -> rabbit_log:warning("Queue ~ts in vhost ~ts is down. " "Forcing queue deletion.", [Name, Vhost]), - delete_crashed_internal(Q, ActingUser), - {ok, 0} + case delete_crashed_internal(Q, ActingUser) of + ok -> + {ok, 0}; + {error, timeout} = Err -> + Err + end end end; {error, not_found} -> @@ -297,9 +301,9 @@ consume(Q, Spec, State0) when ?amqqueue_is_classic(Q) -> Err end. -%% Delete this function when feature flag credit_api_v2 becomes required. +%% Delete this function when feature flag rabbitmq_4.0.0 becomes required. consume_backwards_compat({simple_prefetch, PrefetchCount} = Mode, Args) -> - case rabbit_feature_flags:is_enabled(credit_api_v2) of + case rabbit_feature_flags:is_enabled('rabbitmq_4.0.0') of true -> {Mode, Args}; false -> {PrefetchCount, Args} end; @@ -314,8 +318,8 @@ consume_backwards_compat({credited, credit_api_v1}, Args) -> {<<"drain">>, bool, false}]} | Args]}. cancel(Q, Spec, State) -> - %% Cancel API v2 reuses feature flag credit_api_v2. - Request = case rabbit_feature_flags:is_enabled(credit_api_v2) of + %% Cancel API v2 reuses feature flag rabbitmq_4.0.0. + Request = case rabbit_feature_flags:is_enabled('rabbitmq_4.0.0') of true -> {stop_consumer, Spec#{pid => self()}}; false -> @@ -333,6 +337,15 @@ cancel(Q, Spec, State) -> -spec settle(rabbit_amqqueue:name(), rabbit_queue_type:settle_op(), rabbit_types:ctag(), [non_neg_integer()], state()) -> {state(), rabbit_queue_type:actions()}. +settle(QName, {modify, _DelFailed, Undel, _Anns}, CTag, MsgIds, State) -> + %% translate modify into other op + Op = case Undel of + true -> + discard; + false -> + requeue + end, + settle(QName, Op, CTag, MsgIds, State); settle(_QName, Op, _CTag, MsgIds, State = #?STATE{pid = Pid}) -> Arg = case Op of complete -> @@ -413,7 +426,7 @@ handle_event(_QName, Action, State) {ok, State, [Action]}; handle_event(_QName, {send_drained, {Ctag, Credit}}, State) -> %% This function clause should be deleted when feature flag - %% credit_api_v2 becomes required. + %% rabbitmq_4.0.0 becomes required. Action = {credit_reply_v1, Ctag, Credit, _Available = 0, _Drain = true}, {ok, State, [Action]}. @@ -542,7 +555,7 @@ delete_crashed(Q, ActingUser) -> delete_crashed_internal(Q, ActingUser) -> delete_crashed_in_backing_queue(Q), - ok = rabbit_amqqueue:internal_delete(Q, ActingUser). + rabbit_amqqueue:internal_delete(Q, ActingUser). delete_crashed_in_backing_queue(Q) -> {ok, BQ} = application:get_env(rabbit, backing_queue_module), @@ -568,7 +581,7 @@ capabilities() -> <<"x-max-length-bytes">>, <<"x-max-priority">>, <<"x-overflow">>, <<"x-queue-mode">>, <<"x-queue-version">>, <<"x-single-active-consumer">>, <<"x-queue-type">>, <<"x-queue-master-locator">>] - ++ case rabbit_feature_flags:is_enabled(classic_queue_leader_locator) of + ++ case rabbit_feature_flags:is_enabled('rabbitmq_4.0.0') of true -> [<<"x-queue-leader-locator">>]; false -> [] end, @@ -645,12 +658,12 @@ deliver_to_consumer(Pid, QName, CTag, AckRequired, Message) -> Evt = {deliver, CTag, AckRequired, [Message]}, send_queue_event(Pid, QName, Evt). -%% Delete this function when feature flag credit_api_v2 becomes required. +%% Delete this function when feature flag rabbitmq_4.0.0 becomes required. send_credit_reply_credit_api_v1(Pid, QName, Available) -> Evt = {send_credit_reply, Available}, send_queue_event(Pid, QName, Evt). -%% Delete this function when feature flag credit_api_v2 becomes required. +%% Delete this function when feature flag rabbitmq_4.0.0 becomes required. send_drained_credit_api_v1(Pid, QName, Ctag, Credit) -> Evt = {send_drained, {Ctag, Credit}}, send_queue_event(Pid, QName, Evt). diff --git a/deps/rabbit/src/rabbit_connection_tracking.erl b/deps/rabbit/src/rabbit_connection_tracking.erl index da906fa41144..4ac1b8065324 100644 --- a/deps/rabbit/src/rabbit_connection_tracking.erl +++ b/deps/rabbit/src/rabbit_connection_tracking.erl @@ -151,8 +151,8 @@ unregister_tracked(ConnId = {Node, _Name}) when Node =:= node() -> case ets:lookup(?TRACKED_CONNECTION_TABLE, ConnId) of [] -> ok; [#tracked_connection{vhost = VHost, username = Username}] -> - ets:update_counter(?TRACKED_CONNECTION_TABLE_PER_USER, Username, -1), - ets:update_counter(?TRACKED_CONNECTION_TABLE_PER_VHOST, VHost, -1), + _ = ets:update_counter(?TRACKED_CONNECTION_TABLE_PER_USER, Username, -1), + _ = ets:update_counter(?TRACKED_CONNECTION_TABLE_PER_VHOST, VHost, -1), ets:delete(?TRACKED_CONNECTION_TABLE, ConnId) end. @@ -428,6 +428,6 @@ close_connection(#tracked_connection{pid = Pid, type = direct}, Message) -> Node = node(Pid), rpc:call(Node, amqp_direct_connection, server_close, [Pid, 320, Message]); close_connection(#tracked_connection{pid = Pid}, Message) -> - % best effort, this will work for connections to the stream plugin - Node = node(Pid), - rpc:call(Node, gen_server, call, [Pid, {shutdown, Message}, infinity]). + %% Best effort will work for following plugins: + %% rabbitmq_stream, rabbitmq_mqtt, rabbitmq_web_mqtt + Pid ! {shutdown, Message}. diff --git a/deps/rabbit/src/rabbit_core_ff.erl b/deps/rabbit/src/rabbit_core_ff.erl index 67270f4c1c30..5475909eec54 100644 --- a/deps/rabbit/src/rabbit_core_ff.erl +++ b/deps/rabbit/src/rabbit_core_ff.erl @@ -16,14 +16,14 @@ -rabbit_feature_flag( {quorum_queue, #{desc => "Support queues of type `quorum`", - doc_url => "https://www.rabbitmq.com/quorum-queues.html", + doc_url => "https://www.rabbitmq.com/docs/quorum-queues", stability => required }}). -rabbit_feature_flag( {stream_queue, #{desc => "Support queues of type `stream`", - doc_url => "https://www.rabbitmq.com/stream.html", + doc_url => "https://www.rabbitmq.com/docs/stream", stability => required, depends_on => [quorum_queue] }}). @@ -56,7 +56,7 @@ -rabbit_feature_flag( {stream_single_active_consumer, #{desc => "Single active consumer for streams", - doc_url => "https://www.rabbitmq.com/stream.html", + doc_url => "https://www.rabbitmq.com/docs/stream", stability => required, depends_on => [stream_queue] }}). @@ -129,8 +129,8 @@ -rabbit_feature_flag( {khepri_db, - #{desc => "Use the new Khepri Raft-based metadata store", - doc_url => "", %% TODO + #{desc => "New Raft-based metadata store. Fully supported as of RabbitMQ 4.0", + doc_url => "https://www.rabbitmq.com/docs/next/metadata-store", stability => experimental, depends_on => [feature_flags_v2, direct_exchange_routing_v2, @@ -165,19 +165,6 @@ depends_on => [quorum_queue] }}). --rabbit_feature_flag( - {credit_api_v2, - #{desc => "Credit and cancel API v2 between queue clients and queue processes", - stability => stable - }}). - --rabbit_feature_flag( - {message_containers_store_amqp_v1, - #{desc => "Support storing messages in message containers AMQP 1.0 disk format v1", - stability => stable, - depends_on => [message_containers] - }}). - -rabbit_feature_flag( {message_containers_deaths_v2, #{desc => "Bug fix for dead letter cycle detection", @@ -186,9 +173,16 @@ depends_on => [message_containers] }}). +%% We bundle the following separate concerns (which could have been separate feature flags) +%% into a single feature flag for better user experience: +%% 1. credit API v2 between classic / quorum queue client and classic / quorum queue server +%% 2. cancel API v2 betweeen classic queue client and classic queue server +%% 3. more compact quorum queue commands in quorum queue v4 +%% 4. store messages in message containers AMQP 1.0 disk format v1 +%% 5. support queue leader locator in classic queues -rabbit_feature_flag( - {classic_queue_leader_locator, - #{desc => "queue-leader-locator support in classic queues", - doc_url => "https://www.rabbitmq.com/docs/clustering#replica-placement", - stability => stable + {'rabbitmq_4.0.0', + #{desc => "Allows rolling upgrades from 3.13.x to 4.0.x", + stability => stable, + depends_on => [message_containers] }}). diff --git a/deps/rabbit/src/rabbit_core_metrics_gc.erl b/deps/rabbit/src/rabbit_core_metrics_gc.erl index 792dcb790ab2..ea4f222cef90 100644 --- a/deps/rabbit/src/rabbit_core_metrics_gc.erl +++ b/deps/rabbit/src/rabbit_core_metrics_gc.erl @@ -6,6 +6,8 @@ %% -module(rabbit_core_metrics_gc). +-behaviour(gen_server). + -record(state, {timer, interval }). @@ -17,7 +19,7 @@ -spec start_link() -> rabbit_types:ok_pid_or_error(). start_link() -> - gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). + gen_server:start_link({local, ?MODULE}, ?MODULE, [], [{hibernate_after, 0}]). init(_) -> Interval = rabbit_misc:get_env(rabbit, core_metrics_gc_interval, 120000), diff --git a/deps/rabbit/src/rabbit_cuttlefish.erl b/deps/rabbit/src/rabbit_cuttlefish.erl index 18dbc282d46f..f43b4a1f4745 100644 --- a/deps/rabbit/src/rabbit_cuttlefish.erl +++ b/deps/rabbit/src/rabbit_cuttlefish.erl @@ -9,7 +9,10 @@ -export([ aggregate_props/2, - aggregate_props/3 + aggregate_props/3, + + optionally_tagged_binary/2, + optionally_tagged_string/2 ]). -type keyed_props() :: [{binary(), [{binary(), any()}]}]. @@ -41,3 +44,25 @@ aggregate_props(Conf, Prefix, KeyFun) -> FlatList ) ). + +optionally_tagged_binary(Key, Conf) -> + case cuttlefish:conf_get(Key, Conf) of + undefined -> cuttlefish:unset(); + {encrypted, Bin} when is_binary(Bin) -> {encrypted, Bin}; + {_, Bin} when is_binary(Bin) -> {encrypted, Bin}; + {encrypted, Str} when is_list(Str) -> {encrypted, list_to_binary(Str)}; + {_, Str} when is_list(Str) -> {encrypted, list_to_binary(Str)}; + Bin when is_binary(Bin) -> Bin; + Str when is_list(Str) -> list_to_binary(Str) + end. + +optionally_tagged_string(Key, Conf) -> + case cuttlefish:conf_get(Key, Conf) of + undefined -> cuttlefish:unset(); + {encrypted, Str} when is_list(Str) -> {encrypted, Str}; + {_, Str} when is_list(Str) -> {encrypted, Str}; + {encrypted, Bin} when is_binary(Bin) -> {encrypted, binary_to_list(Bin)}; + {_, Bin} when is_binary(Bin) -> {encrypted, binary_to_list(Bin)}; + Str when is_list(Str) -> Str; + Bin when is_binary(Bin) -> binary_to_list(Bin) + end. \ No newline at end of file diff --git a/deps/rabbit/src/rabbit_db.erl b/deps/rabbit/src/rabbit_db.erl index 7dcae084876b..6dd2ae7d01cf 100644 --- a/deps/rabbit/src/rabbit_db.erl +++ b/deps/rabbit/src/rabbit_db.erl @@ -67,8 +67,8 @@ init() -> end, Ret = case rabbit_khepri:is_enabled() of - true -> init_using_khepri(); - false -> init_using_mnesia() + true -> init_using_khepri(IsVirgin); + false -> init_using_mnesia(IsVirgin) end, case Ret of ok -> @@ -91,7 +91,7 @@ pre_init(IsVirgin) -> OtherMembers = rabbit_nodes:nodes_excl_me(Members), rabbit_db_cluster:ensure_feature_flags_are_in_sync(OtherMembers, IsVirgin). -init_using_mnesia() -> +init_using_mnesia(_IsVirgin) -> ?LOG_DEBUG( "DB: initialize Mnesia", #{domain => ?RMQLOG_DOMAIN_DB}), @@ -99,16 +99,11 @@ init_using_mnesia() -> ?assertEqual(rabbit:data_dir(), mnesia_dir()), rabbit_sup:start_child(mnesia_sync). -init_using_khepri() -> - case rabbit_khepri:members() of - [] -> - timer:sleep(1000), - init_using_khepri(); - Members -> - ?LOG_WARNING( - "Found the following metadata store members: ~p", [Members], - #{domain => ?RMQLOG_DOMAIN_DB}) - end. +init_using_khepri(IsVirgin) -> + ?LOG_DEBUG( + "DB: initialize Khepri", + #{domain => ?RMQLOG_DOMAIN_DB}), + rabbit_khepri:init(IsVirgin). init_finished() -> %% Used during initialisation by rabbit_logger_exchange_h.erl diff --git a/deps/rabbit/src/rabbit_db_binding.erl b/deps/rabbit/src/rabbit_db_binding.erl index cc03de705412..9bb02277ca52 100644 --- a/deps/rabbit/src/rabbit_db_binding.erl +++ b/deps/rabbit/src/rabbit_db_binding.erl @@ -32,13 +32,13 @@ delete_transient_for_destination_in_mnesia/1, has_for_source_in_mnesia/1, has_for_source_in_khepri/1, - match_source_and_destination_in_khepri_tx/2 + match_source_and_destination_in_khepri_tx/2, + clear_in_khepri/0 ]). -export([ - khepri_route_path/1, - khepri_routes_path/0, - khepri_route_exchange_path/1 + khepri_route_path/1, khepri_route_path/5, + khepri_route_path_to_args/1 ]). %% Recovery is only needed for transient entities. Once mnesia is removed, these @@ -53,7 +53,7 @@ -define(MNESIA_SEMI_DURABLE_TABLE, rabbit_semi_durable_route). -define(MNESIA_REVERSE_TABLE, rabbit_reverse_route). -define(MNESIA_INDEX_TABLE, rabbit_index_route). --define(KHEPRI_BINDINGS_PROJECTION, rabbit_khepri_bindings). +-define(KHEPRI_BINDINGS_PROJECTION, rabbit_khepri_binding). -define(KHEPRI_INDEX_ROUTE_PROJECTION, rabbit_khepri_index_route). %% ------------------------------------------------------------------- @@ -201,8 +201,6 @@ create_in_khepri(#binding{source = SrcName, MaybeSerial = rabbit_exchange:serialise_events(Src), Serial = rabbit_khepri:transaction( fun() -> - ExchangePath = khepri_route_exchange_path(SrcName), - ok = khepri_tx:put(ExchangePath, #{type => Src#exchange.type}), case khepri_tx:get(RoutePath) of {ok, Set} -> case sets:is_element(Binding, Set) of @@ -304,7 +302,10 @@ delete_in_mnesia(Src, Dst, B) -> should_index_table(Src), fun delete/3), Deletions0 = maybe_auto_delete_exchange_in_mnesia( B#binding.source, [B], rabbit_binding:new_deletions(), false), - fun() -> {ok, rabbit_binding:process_deletions(Deletions0)} end. + fun() -> + ok = rabbit_binding:process_deletions(Deletions0), + {ok, Deletions0} + end. absent_errs_only_in_mnesia(Names) -> Errs = [E || Name <- Names, @@ -354,7 +355,8 @@ delete_in_khepri(#binding{source = SrcName, {error, _} = Err -> Err; Deletions -> - {ok, rabbit_binding:process_deletions(Deletions)} + ok = rabbit_binding:process_deletions(Deletions), + {ok, Deletions} end. exists_in_khepri(Path, Binding) -> @@ -381,15 +383,18 @@ delete_in_khepri(Binding) -> end. maybe_auto_delete_exchange_in_khepri(XName, Bindings, Deletions, OnlyDurable) -> - {Entry, Deletions1} = - case rabbit_db_exchange:maybe_auto_delete_in_khepri(XName, OnlyDurable) of - {not_deleted, X} -> - {{X, not_deleted, Bindings}, Deletions}; - {deleted, X, Deletions2} -> - {{X, deleted, Bindings}, - rabbit_binding:combine_deletions(Deletions, Deletions2)} - end, - rabbit_binding:add_deletion(XName, Entry, Deletions1). + case rabbit_db_exchange:maybe_auto_delete_in_khepri(XName, OnlyDurable) of + {not_deleted, undefined} -> + Deletions; + {not_deleted, X} -> + rabbit_binding:add_deletion( + XName, X, not_deleted, Bindings, Deletions); + {deleted, X, Deletions1} -> + Deletions2 = rabbit_binding:combine_deletions( + Deletions, Deletions1), + rabbit_binding:add_deletion( + XName, X, deleted, Bindings, Deletions2) + end. %% ------------------------------------------------------------------- %% get_all(). @@ -610,9 +615,12 @@ fold_in_mnesia(Fun, Acc) -> end, Acc, ?MNESIA_TABLE). fold_in_khepri(Fun, Acc) -> - Path = khepri_routes_path() ++ [_VHost = ?KHEPRI_WILDCARD_STAR, - _SrcName = ?KHEPRI_WILDCARD_STAR, - rabbit_khepri:if_has_data_wildcard()], + Path = khepri_route_path( + _VHost = ?KHEPRI_WILDCARD_STAR, + _SrcName = ?KHEPRI_WILDCARD_STAR, + _Kind = ?KHEPRI_WILDCARD_STAR, + _DstName = ?KHEPRI_WILDCARD_STAR, + _RoutingKey = #if_has_data{}), {ok, Res} = rabbit_khepri:fold( Path, fun(_, #{data := SetOfBindings}, Acc0) -> @@ -828,10 +836,14 @@ delete_all_for_exchange_in_khepri(X = #exchange{name = XName}, OnlyDurable, Remo {deleted, X, Bindings, delete_for_destination_in_khepri(XName, OnlyDurable)}. delete_for_source_in_khepri(#resource{virtual_host = VHost, name = Name}) -> - Path = khepri_routes_path() ++ [VHost, Name], - {ok, Bindings} = khepri_tx:get_many(Path ++ [rabbit_khepri:if_has_data_wildcard()]), - ok = khepri_tx:delete(Path), - maps:fold(fun(_P, Set, Acc) -> + Path = khepri_route_path( + VHost, + Name, + _Kind = ?KHEPRI_WILDCARD_STAR, + _DstName = ?KHEPRI_WILDCARD_STAR, + _RoutingKey = #if_has_data{}), + {ok, Bindings} = khepri_tx_adv:delete_many(Path), + maps:fold(fun(_P, #{data := Set}, Acc) -> sets:to_list(Set) ++ Acc end, [], Bindings). @@ -875,20 +887,20 @@ delete_for_destination_in_mnesia(DstName, OnlyDurable, Fun) -> OnlyDurable :: boolean(), Deletions :: rabbit_binding:deletions(). -delete_for_destination_in_khepri(DstName, OnlyDurable) -> - BindingsMap = match_destination_in_khepri(DstName), - maps:foreach(fun(K, _V) -> khepri_tx:delete(K) end, BindingsMap), - Bindings = maps:fold(fun(_, Set, Acc) -> +delete_for_destination_in_khepri(#resource{virtual_host = VHost, kind = Kind, name = Name}, OnlyDurable) -> + Pattern = khepri_route_path( + VHost, + _SrcName = ?KHEPRI_WILDCARD_STAR, + Kind, + Name, + _RoutingKey = ?KHEPRI_WILDCARD_STAR), + {ok, BindingsMap} = khepri_tx_adv:delete_many(Pattern), + Bindings = maps:fold(fun(_, #{data := Set}, Acc) -> sets:to_list(Set) ++ Acc end, [], BindingsMap), rabbit_binding:group_bindings_fold(fun maybe_auto_delete_exchange_in_khepri/4, lists:keysort(#binding.source, Bindings), OnlyDurable). -match_destination_in_khepri(#resource{virtual_host = VHost, kind = Kind, name = Name}) -> - Path = khepri_routes_path() ++ [VHost, ?KHEPRI_WILDCARD_STAR, Kind, Name, ?KHEPRI_WILDCARD_STAR_STAR], - {ok, Map} = khepri_tx:get_many(Path), - Map. - %% ------------------------------------------------------------------- %% delete_transient_for_destination_in_mnesia(). %% ------------------------------------------------------------------- @@ -926,7 +938,12 @@ has_for_source_in_mnesia(SrcName) -> -spec has_for_source_in_khepri(rabbit_types:binding_source()) -> boolean(). has_for_source_in_khepri(#resource{virtual_host = VHost, name = Name}) -> - Path = khepri_routes_path() ++ [VHost, Name, rabbit_khepri:if_has_data_wildcard()], + Path = khepri_route_path( + VHost, + Name, + _Kind = ?KHEPRI_WILDCARD_STAR, + _DstName = ?KHEPRI_WILDCARD_STAR, + _RoutingKey = #if_has_data{}), case khepri_tx:get_many(Path) of {ok, Map} -> maps:size(Map) > 0; @@ -945,7 +962,8 @@ has_for_source_in_khepri(#resource{virtual_host = VHost, name = Name}) -> match_source_and_destination_in_khepri_tx(#resource{virtual_host = VHost, name = Name}, #resource{kind = Kind, name = DstName}) -> - Path = khepri_routes_path() ++ [VHost, Name, Kind, DstName, rabbit_khepri:if_has_data_wildcard()], + Path = khepri_route_path( + VHost, Name, Kind, DstName, _RoutingKey = #if_has_data{}), case khepri_tx:get_many(Path) of {ok, Map} -> maps:values(Map); _ -> [] @@ -974,7 +992,12 @@ clear_in_mnesia() -> ok. clear_in_khepri() -> - Path = khepri_routes_path(), + Path = khepri_route_path( + _VHost = ?KHEPRI_WILDCARD_STAR, + _SrcName = ?KHEPRI_WILDCARD_STAR, + _Kind = ?KHEPRI_WILDCARD_STAR, + _DstName = ?KHEPRI_WILDCARD_STAR, + _RoutingKey = ?KHEPRI_WILDCARD_STAR), case rabbit_khepri:delete(Path) of ok -> ok; Error -> throw(Error) @@ -983,16 +1006,47 @@ clear_in_khepri() -> %% -------------------------------------------------------------- %% Paths %% -------------------------------------------------------------- -khepri_route_path(#binding{source = #resource{virtual_host = VHost, name = SrcName}, - destination = #resource{kind = Kind, name = DstName}, - key = RoutingKey}) -> - [?MODULE, routes, VHost, SrcName, Kind, DstName, RoutingKey]. - -khepri_routes_path() -> - [?MODULE, routes]. -khepri_route_exchange_path(#resource{virtual_host = VHost, name = SrcName}) -> - [?MODULE, routes, VHost, SrcName]. +khepri_route_path( + #binding{source = #resource{virtual_host = VHost, + kind = exchange, + name = SrcName}, + destination = #resource{virtual_host = VHost, + kind = Kind, + name = DstName}, + key = RoutingKey}) -> + khepri_route_path(VHost, SrcName, Kind, DstName, RoutingKey). + +khepri_route_path(VHost, SrcName, Kind, DstName, RoutingKey) + when ?IS_KHEPRI_PATH_CONDITION(Kind) andalso + ?IS_KHEPRI_PATH_CONDITION(DstName) andalso + ?IS_KHEPRI_PATH_CONDITION(RoutingKey) -> + ExchangePath = rabbit_db_exchange:khepri_exchange_path(VHost, SrcName), + ExchangePath ++ [bindings, Kind, DstName, RoutingKey]. + +khepri_route_path_to_args(Path) -> + Pattern = khepri_route_path( + '$VHost', '$SrcName', '$Kind', '$DstName', '$RoutingKey'), + khepri_route_path_to_args(Pattern, Path, #{}). + +khepri_route_path_to_args([Var | Pattern], [Value | Path], Result) + when Var =:= '$VHost' orelse + Var =:= '$SrcName' orelse + Var =:= '$Kind' orelse + Var =:= '$DstName' orelse + Var =:= '$RoutingKey' -> + Result1 = Result#{Var => Value}, + khepri_route_path_to_args(Pattern, Path, Result1); +khepri_route_path_to_args([Comp | Pattern], [Comp | Path], Result) -> + khepri_route_path_to_args(Pattern, Path, Result); +khepri_route_path_to_args( + [], _, + #{'$VHost' := VHost, + '$SrcName' := SrcName, + '$Kind' := Kind, + '$DstName' := DstName, + '$RoutingKey' := RoutingKey}) -> + {VHost, SrcName, Kind, DstName, RoutingKey}. %% -------------------------------------------------------------- %% Internal @@ -1105,15 +1159,18 @@ sync_index_route(_, _, _) -> OnlyDurable :: boolean(), Ret :: rabbit_binding:deletions(). maybe_auto_delete_exchange_in_mnesia(XName, Bindings, Deletions, OnlyDurable) -> - {Entry, Deletions1} = - case rabbit_db_exchange:maybe_auto_delete_in_mnesia(XName, OnlyDurable) of - {not_deleted, X} -> - {{X, not_deleted, Bindings}, Deletions}; - {deleted, X, Deletions2} -> - {{X, deleted, Bindings}, - rabbit_binding:combine_deletions(Deletions, Deletions2)} - end, - rabbit_binding:add_deletion(XName, Entry, Deletions1). + case rabbit_db_exchange:maybe_auto_delete_in_mnesia(XName, OnlyDurable) of + {not_deleted, undefined} -> + Deletions; + {not_deleted, X} -> + rabbit_binding:add_deletion( + XName, X, not_deleted, Bindings, Deletions); + {deleted, X, Deletions1} -> + Deletions2 = rabbit_binding:combine_deletions( + Deletions, Deletions1), + rabbit_binding:add_deletion( + XName, X, deleted, Bindings, Deletions2) + end. %% Instead of locking entire table on remove operations we can lock the %% affected resource only. diff --git a/deps/rabbit/src/rabbit_db_binding_m2k_converter.erl b/deps/rabbit/src/rabbit_db_binding_m2k_converter.erl index 0bef352db141..6f568105be18 100644 --- a/deps/rabbit/src/rabbit_db_binding_m2k_converter.erl +++ b/deps/rabbit/src/rabbit_db_binding_m2k_converter.erl @@ -10,6 +10,7 @@ -behaviour(mnesia_to_khepri_converter). -include_lib("kernel/include/logger.hrl"). +-include_lib("khepri/include/khepri.hrl"). -include_lib("khepri_mnesia_migration/src/kmm_logging.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). @@ -44,7 +45,7 @@ init_copy_to_khepri(_StoreId, _MigrationId, Tables) -> %% @private copy_to_khepri(rabbit_route = Table, - #route{binding = #binding{source = XName} = Binding}, + #route{binding = #binding{} = Binding}, State) -> ?LOG_DEBUG( "Mnesia->Khepri data copy: [~0p] key: ~0p", @@ -54,18 +55,12 @@ copy_to_khepri(rabbit_route = Table, rabbit_db_m2k_converter:with_correlation_id( fun(CorrId) -> Extra = #{async => CorrId}, - XPath = rabbit_db_binding:khepri_route_exchange_path(XName), ?LOG_DEBUG( "Mnesia->Khepri data copy: [~0p] path: ~0p corr: ~0p", [Table, Path, CorrId], #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), rabbit_khepri:transaction( fun() -> - %% Store the exchange's type in the exchange name - %% branch of the tree. - [#exchange{type = XType}] = - rabbit_db_exchange:get_in_khepri_tx(XName), - ok = khepri_tx:put(XPath, #{type => XType}), %% Add the binding to the set at the binding's %% path. Set = case khepri_tx:get(Path) of @@ -111,8 +106,4 @@ delete_from_khepri(rabbit_route = Table, Key, State) -> Table :: atom(). clear_data_in_khepri(rabbit_route) -> - Path = rabbit_db_binding:khepri_routes_path(), - case rabbit_khepri:delete(Path) of - ok -> ok; - Error -> throw(Error) - end. + rabbit_db_binding:clear_in_khepri(). diff --git a/deps/rabbit/src/rabbit_db_cluster.erl b/deps/rabbit/src/rabbit_db_cluster.erl index b1f8cb5348ef..b7fc1d5b9dce 100644 --- a/deps/rabbit/src/rabbit_db_cluster.erl +++ b/deps/rabbit/src/rabbit_db_cluster.erl @@ -57,7 +57,7 @@ can_join(RemoteNode) -> "DB: checking if `~ts` can join cluster using remote node `~ts`", [node(), RemoteNode], #{domain => ?RMQLOG_DOMAIN_DB}), - case rabbit_feature_flags:check_node_compatibility(RemoteNode) of + case rabbit_feature_flags:check_node_compatibility(RemoteNode, true) of ok -> case rabbit_khepri:is_enabled(RemoteNode) of true -> can_join_using_khepri(RemoteNode); @@ -176,6 +176,15 @@ join(RemoteNode, NodeType) false -> join_using_mnesia(ClusterNodes, NodeType) end, + case Ret of + ok -> + ok; + {error, _} -> + %% We reset feature flags states again and make sure the + %% recorded states on disk are deleted. + rabbit_feature_flags:reset() + end, + %% Restart RabbitMQ afterwards, if it was running before the join. %% Likewise for the Feature flags controller and Mnesia (if we %% still need it). @@ -201,10 +210,6 @@ join(RemoteNode, NodeType) rabbit_node_monitor:notify_joined_cluster(), ok; {error, _} = Error -> - %% We reset feature flags states again and make sure the - %% recorded states on disk are deleted. - rabbit_feature_flags:reset(), - Error end; {ok, already_member} -> diff --git a/deps/rabbit/src/rabbit_db_exchange.erl b/deps/rabbit/src/rabbit_db_exchange.erl index e45edd6dda66..ef6b9f3c61aa 100644 --- a/deps/rabbit/src/rabbit_db_exchange.erl +++ b/deps/rabbit/src/rabbit_db_exchange.erl @@ -10,6 +10,8 @@ -include_lib("khepri/include/khepri.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). +-include("include/khepri.hrl"). + -export([ get_all/0, get_all/1, @@ -24,6 +26,7 @@ peek_serial/1, next_serial/1, delete/2, + delete_all/1, delete_serial/1, recover/1, match/1, @@ -41,17 +44,16 @@ get_in_khepri_tx/1, update_in_mnesia_tx/2, update_in_khepri_tx/2, - path/1 + clear_exchanges_in_khepri/0, + clear_exchange_serials_in_khepri/0 ]). %% For testing -export([clear/0]). -export([ - khepri_exchange_path/1, - khepri_exchange_serial_path/1, - khepri_exchanges_path/0, - khepri_exchange_serials_path/0 + khepri_exchange_path/1, khepri_exchange_path/2, + khepri_exchange_serial_path/1, khepri_exchange_serial_path/2 ]). -define(MNESIA_TABLE, rabbit_exchange). @@ -81,7 +83,8 @@ get_all_in_mnesia() -> rabbit_db:list_in_mnesia(?MNESIA_TABLE, #exchange{_ = '_'}). get_all_in_khepri() -> - rabbit_db:list_in_khepri(khepri_exchanges_path() ++ [rabbit_khepri:if_has_data_wildcard()]). + Path = khepri_exchange_path(?KHEPRI_WILDCARD_STAR, #if_has_data{}), + rabbit_db:list_in_khepri(Path). -spec get_all(VHostName) -> [Exchange] when VHostName :: vhost:name(), @@ -103,7 +106,8 @@ get_all_in_mnesia(VHost) -> rabbit_db:list_in_mnesia(?MNESIA_TABLE, Match). get_all_in_khepri(VHost) -> - rabbit_db:list_in_khepri(khepri_exchanges_path() ++ [VHost, rabbit_khepri:if_has_data_wildcard()]). + Path = khepri_exchange_path(VHost, #if_has_data{}), + rabbit_db:list_in_khepri(Path). %% ------------------------------------------------------------------- %% get_all_durable(). @@ -127,7 +131,7 @@ get_all_durable_in_mnesia() -> rabbit_db:list_in_mnesia(rabbit_durable_exchange, #exchange{_ = '_'}). get_all_durable_in_khepri() -> - rabbit_db:list_in_khepri(khepri_exchanges_path() ++ [rabbit_khepri:if_has_data_wildcard()]). + get_all_in_khepri(). %% ------------------------------------------------------------------- %% list(). @@ -151,11 +155,13 @@ list_in_mnesia() -> mnesia:dirty_all_keys(?MNESIA_TABLE). list_in_khepri() -> - case rabbit_khepri:match(khepri_exchanges_path() ++ - [rabbit_khepri:if_has_data_wildcard()]) of - {ok, Map} -> - maps:fold(fun(_K, X, Acc) -> [X#exchange.name | Acc] end, [], Map); - _ -> + try + ets:foldr( + fun(#exchange{name = Name}, Acc) -> + [Name | Acc] + end, [], ?KHEPRI_PROJECTION) + catch + error:badarg -> [] end. @@ -200,7 +206,8 @@ get_in_khepri(Name) -> Ret :: [Exchange :: rabbit_types:exchange()]. get_in_khepri_tx(Name) -> - case khepri_tx:get(khepri_exchange_path(Name)) of + Path = khepri_exchange_path(Name), + case khepri_tx:get(Path) of {ok, X} -> [X]; _ -> [] end. @@ -259,7 +266,11 @@ count_in_mnesia() -> mnesia:table_info(?MNESIA_TABLE, size). count_in_khepri() -> - rabbit_khepri:count_children(khepri_exchanges_path() ++ [?KHEPRI_WILDCARD_STAR]). + Path = khepri_exchange_path(?KHEPRI_WILDCARD_STAR, ?KHEPRI_WILDCARD_STAR), + case rabbit_khepri:count(Path) of + {ok, Count} -> Count; + _ -> 0 + end. %% ------------------------------------------------------------------- %% update(). @@ -562,7 +573,7 @@ next_serial_in_khepri_tx(#exchange{name = XName}) -> IfUnused :: boolean(), Exchange :: rabbit_types:exchange(), Binding :: rabbit_types:binding(), - Deletions :: dict:dict(), + Deletions :: rabbit_binding:deletions(), Ret :: {deleted, Exchange, [Binding], Deletions} | {error, not_found} | {error, in_use} | @@ -613,7 +624,7 @@ unconditional_delete_in_mnesia(X, OnlyDurable) -> RemoveBindingsForSource :: boolean(), Exchange :: rabbit_types:exchange(), Binding :: rabbit_types:binding(), - Deletions :: dict:dict(), + Deletions :: rabbit_binding:deletions(), Ret :: {error, not_found} | {error, in_use} | {deleted, Exchange, [Binding], Deletions}. delete_in_mnesia(X = #exchange{name = XName}, OnlyDurable, RemoveBindingsForSource) -> ok = mnesia:delete({?MNESIA_TABLE, XName}), @@ -647,6 +658,69 @@ delete_in_khepri(X = #exchange{name = XName}, OnlyDurable, RemoveBindingsForSour ok = khepri_tx:delete(khepri_exchange_path(XName)), rabbit_db_binding:delete_all_for_exchange_in_khepri(X, OnlyDurable, RemoveBindingsForSource). +%% ------------------------------------------------------------------- +%% delete_all(). +%% ------------------------------------------------------------------- + +-spec delete_all(VHostName) -> Ret when + VHostName :: vhost:name(), + Deletions :: rabbit_binding:deletions(), + Ret :: {ok, Deletions}. +%% @doc Deletes all exchanges for a given vhost. +%% +%% @returns an `{ok, Deletions}' tuple containing the {@link +%% rabbit_binding:deletions()} caused by deleting the exchanges under the given +%% vhost. +%% +%% @private + +delete_all(VHostName) -> + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> delete_all_in_mnesia(VHostName) end, + khepri => fun() -> delete_all_in_khepri(VHostName) end + }). + +delete_all_in_mnesia(VHostName) -> + rabbit_mnesia:execute_mnesia_transaction( + fun() -> + delete_all_in_mnesia_tx(VHostName) + end). + +delete_all_in_mnesia_tx(VHostName) -> + Match = #exchange{name = rabbit_misc:r(VHostName, exchange), _ = '_'}, + Xs = mnesia:match_object(?MNESIA_TABLE, Match, write), + Deletions = + lists:foldl( + fun(X, Acc) -> + {deleted, #exchange{name = XName}, Bindings, XDeletions} = + unconditional_delete_in_mnesia( X, false), + XDeletions1 = rabbit_binding:add_deletion( + XName, X, deleted, Bindings, XDeletions), + rabbit_binding:combine_deletions(Acc, XDeletions1) + end, rabbit_binding:new_deletions(), Xs), + {ok, Deletions}. + +delete_all_in_khepri(VHostName) -> + rabbit_khepri:transaction( + fun() -> + delete_all_in_khepri_tx(VHostName) + end, rw, #{timeout => infinity}). + +delete_all_in_khepri_tx(VHostName) -> + Pattern = khepri_exchange_path(VHostName, ?KHEPRI_WILDCARD_STAR), + {ok, NodeProps} = khepri_tx_adv:delete_many(Pattern), + Deletions = + maps:fold( + fun(_Path, #{data := X}, Deletions) -> + {deleted, #exchange{name = XName}, Bindings, XDeletions} = + rabbit_db_binding:delete_all_for_exchange_in_khepri( + X, false, true), + Deletions1 = rabbit_binding:add_deletion( + XName, X, deleted, Bindings, XDeletions), + rabbit_binding:combine_deletions(Deletions, Deletions1) + end, rabbit_binding:new_deletions(), NodeProps), + {ok, Deletions}. + %% ------------------------------------------------------------------- %% delete_serial(). %% ------------------------------------------------------------------- @@ -717,8 +791,8 @@ recover_in_khepri(VHost) -> %% cannot be skipped and stopping the node is not an option - %% the next boot most likely would behave the same way. %% Any other request stays with the default timeout, currently 30s. - Exchanges0 = rabbit_db:list_in_khepri(khepri_exchanges_path() ++ [VHost, rabbit_khepri:if_has_data_wildcard()], - #{timeout => infinity}), + Path = khepri_exchange_path(VHost, #if_has_data{}), + Exchanges0 = rabbit_db:list_in_khepri(Path, #{timeout => infinity}), Exchanges = [rabbit_exchange_decorator:set(X) || X <- Exchanges0], rabbit_khepri:transaction( @@ -763,7 +837,8 @@ match_in_mnesia(Pattern) -> match_in_khepri(Pattern0) -> Pattern = #if_data_matches{pattern = Pattern0}, - rabbit_db:list_in_khepri(khepri_exchanges_path() ++ [?KHEPRI_WILDCARD_STAR, Pattern]). + Path = khepri_exchange_path(?KHEPRI_WILDCARD_STAR, Pattern), + rabbit_db:list_in_khepri(Path). %% ------------------------------------------------------------------- %% exists(). @@ -812,8 +887,17 @@ clear_in_mnesia() -> ok. clear_in_khepri() -> - khepri_delete(khepri_exchanges_path()), - khepri_delete(khepri_exchange_serials_path()). + clear_exchanges_in_khepri(), + clear_exchange_serials_in_khepri(). + +clear_exchanges_in_khepri() -> + Path = khepri_exchange_path(?KHEPRI_WILDCARD_STAR, ?KHEPRI_WILDCARD_STAR), + khepri_delete(Path). + +clear_exchange_serials_in_khepri() -> + Path = khepri_exchange_serial_path( + ?KHEPRI_WILDCARD_STAR, ?KHEPRI_WILDCARD_STAR), + khepri_delete(Path). khepri_delete(Path) -> case rabbit_khepri:delete(Path) of @@ -873,25 +957,14 @@ maybe_auto_delete_in_khepri(XName, OnlyDurable) -> %% Khepri paths %% ------------------------------------------------------------------- -khepri_exchanges_path() -> - [?MODULE, exchanges]. - khepri_exchange_path(#resource{virtual_host = VHost, name = Name}) -> - [?MODULE, exchanges, VHost, Name]. - -khepri_exchange_serials_path() -> - [?MODULE, exchange_serials]. + khepri_exchange_path(VHost, Name). -khepri_exchange_serial_path(#resource{virtual_host = VHost, name = Name}) -> - [?MODULE, exchange_serials, VHost, Name]. +khepri_exchange_path(VHost, Name) when ?IS_KHEPRI_PATH_CONDITION(Name) -> + rabbit_db_vhost:khepri_vhost_path(VHost) ++ [exchanges, Name]. -%% ------------------------------------------------------------------- -%% path(). -%% ------------------------------------------------------------------- - --spec path(ExchangeName) -> Path when - ExchangeName :: rabbit_exchange:name(), - Path :: khepri_path:path(). +khepri_exchange_serial_path(#resource{} = Resource) -> + khepri_exchange_path(Resource) ++ [serial]. -path(Name) -> - khepri_exchange_path(Name). +khepri_exchange_serial_path(VHost, Name) -> + khepri_exchange_path(VHost, Name) ++ [serial]. diff --git a/deps/rabbit/src/rabbit_db_exchange_m2k_converter.erl b/deps/rabbit/src/rabbit_db_exchange_m2k_converter.erl index 320d6fc7a034..426b71c3037e 100644 --- a/deps/rabbit/src/rabbit_db_exchange_m2k_converter.erl +++ b/deps/rabbit/src/rabbit_db_exchange_m2k_converter.erl @@ -129,12 +129,6 @@ delete_from_khepri(rabbit_exchange_serial = Table, Key, State) -> Table :: atom(). clear_data_in_khepri(rabbit_exchange) -> - khepri_delete(rabbit_db_exchange:khepri_exchanges_path()); + rabbit_db_exchange:clear_exchanges_in_khepri(); clear_data_in_khepri(rabbit_exchange_serial) -> - khepri_delete(rabbit_db_exchange:khepri_exchange_serials_path()). - -khepri_delete(Path) -> - case rabbit_khepri:delete(Path) of - ok -> ok; - Error -> throw(Error) - end. + rabbit_db_exchange:clear_exchange_serials_in_khepri(). diff --git a/deps/rabbit/src/rabbit_db_maintenance.erl b/deps/rabbit/src/rabbit_db_maintenance.erl index 0a39e8db4506..de7162ee70ae 100644 --- a/deps/rabbit/src/rabbit_db_maintenance.erl +++ b/deps/rabbit/src/rabbit_db_maintenance.erl @@ -7,8 +7,11 @@ -module(rabbit_db_maintenance). +-include_lib("khepri/include/khepri.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). +-include("include/khepri.hrl"). + -export([ table_definitions/0, set/1, @@ -17,8 +20,7 @@ ]). -export([ - khepri_maintenance_path/1, - khepri_maintenance_path/0 + khepri_maintenance_path/1 ]). -define(TABLE, rabbit_node_maintenance_states). @@ -155,11 +157,7 @@ get_consistent_in_mnesia(Node) -> get_consistent_in_khepri(Node) -> Path = khepri_maintenance_path(Node), - %% FIXME: Ra consistent queries are fragile in the sense that the query - %% function may run on a remote node and the function reference or MFA may - %% not be valid on that node. That's why we force a local query for now. - %Options = #{favor => consistent}, - Options = #{favor => local}, + Options = #{favor => consistency}, case rabbit_khepri:get(Path, Options) of {ok, #node_maintenance_state{status = Status}} -> Status; @@ -171,8 +169,5 @@ get_consistent_in_khepri(Node) -> %% Khepri paths %% ------------------------------------------------------------------- -khepri_maintenance_path() -> - [?MODULE, maintenance]. - -khepri_maintenance_path(Node) -> - [?MODULE, maintenance, Node]. +khepri_maintenance_path(Node) when ?IS_KHEPRI_PATH_CONDITION(Node) -> + ?KHEPRI_ROOT_PATH ++ [node_maintenance, Node]. diff --git a/deps/rabbit/src/rabbit_db_maintenance_m2k_converter.erl b/deps/rabbit/src/rabbit_db_maintenance_m2k_converter.erl index 815b8a41e543..62122ac631ed 100644 --- a/deps/rabbit/src/rabbit_db_maintenance_m2k_converter.erl +++ b/deps/rabbit/src/rabbit_db_maintenance_m2k_converter.erl @@ -10,6 +10,7 @@ -behaviour(mnesia_to_khepri_converter). -include_lib("kernel/include/logger.hrl"). +-include_lib("khepri/include/khepri.hrl"). -include_lib("khepri_mnesia_migration/src/kmm_logging.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). @@ -95,7 +96,7 @@ delete_from_khepri(rabbit_node_maintenance_states = Table, Key, State) -> Table :: atom(). clear_data_in_khepri(rabbit_node_maintenance_states) -> - Path = rabbit_db_maintenance:khepri_maintenance_path(), + Path = rabbit_db_maintenance:khepri_maintenance_path(?KHEPRI_WILDCARD_STAR), case rabbit_khepri:delete(Path) of ok -> ok; Error -> throw(Error) diff --git a/deps/rabbit/src/rabbit_db_msup.erl b/deps/rabbit/src/rabbit_db_msup.erl index 3939efa6ae60..152cb71f9acb 100644 --- a/deps/rabbit/src/rabbit_db_msup.erl +++ b/deps/rabbit/src/rabbit_db_msup.erl @@ -10,6 +10,8 @@ -include_lib("khepri/include/khepri.hrl"). -include("mirrored_supervisor.hrl"). +-include("include/khepri.hrl"). + -export([ create_tables/0, table_definitions/0, @@ -17,15 +19,13 @@ find_mirror/2, update_all/2, delete/2, - delete_all/1 + delete_all/1, + clear_in_khepri/0 ]). -export([clear/0]). --export([ - khepri_mirrored_supervisor_path/2, - khepri_mirrored_supervisor_path/0 - ]). +-export([khepri_mirrored_supervisor_path/2]). -define(TABLE, mirrored_sup_childspec). -define(TABLE_DEF, @@ -251,7 +251,9 @@ update_all_in_khepri(Overall, OldOverall) -> Pattern = #mirrored_sup_childspec{mirroring_pid = OldOverall, _ = '_'}, Conditions = [?KHEPRI_WILDCARD_STAR_STAR, #if_data_matches{pattern = Pattern}], - PathPattern = khepri_mirrored_supervisor_path() ++ [#if_all{conditions = Conditions}], + PathPattern = khepri_mirrored_supervisor_path( + ?KHEPRI_WILDCARD_STAR, + #if_all{conditions = Conditions}), rabbit_khepri:transaction( fun() -> case khepri_tx:get_many(PathPattern) of @@ -291,8 +293,9 @@ delete_all_in_khepri(Group) -> Pattern = #mirrored_sup_childspec{key = {Group, '_'}, _ = '_'}, Conditions = [?KHEPRI_WILDCARD_STAR_STAR, #if_data_matches{pattern = Pattern}], - rabbit_khepri:delete(khepri_mirrored_supervisor_path() ++ - [#if_all{conditions = Conditions}]). + rabbit_khepri:delete(khepri_mirrored_supervisor_path( + ?KHEPRI_WILDCARD_STAR, + #if_all{conditions = Conditions})). %% ------------------------------------------------------------------- %% clear(). @@ -311,7 +314,8 @@ clear_in_mnesia() -> ok. clear_in_khepri() -> - Path = khepri_mirrored_supervisor_path(), + Path = khepri_mirrored_supervisor_path( + ?KHEPRI_WILDCARD_STAR, ?KHEPRI_WILDCARD_STAR_STAR), case rabbit_khepri:delete(Path) of ok -> ok; Error -> throw(Error) @@ -321,12 +325,11 @@ clear_in_khepri() -> %% Khepri paths %% ------------------------------------------------------------------- -khepri_mirrored_supervisor_path() -> - [?MODULE, mirrored_supervisor_childspec]. - khepri_mirrored_supervisor_path(Group, Id) - when is_atom(Id) orelse is_binary(Id) -> - [?MODULE, mirrored_supervisor_childspec, Group, Id]; -khepri_mirrored_supervisor_path(Group, Id) -> + when ?IS_KHEPRI_PATH_CONDITION(Group) andalso + ?IS_KHEPRI_PATH_CONDITION(Id) -> + ?KHEPRI_ROOT_PATH ++ [mirrored_supervisors, Group, Id]; +khepri_mirrored_supervisor_path(Group, Id) + when is_atom(Group) -> IdPath = Group:id_to_khepri_path(Id), - [?MODULE, mirrored_supervisor_childspec, Group] ++ IdPath. + ?KHEPRI_ROOT_PATH ++ [mirrored_supervisors, Group] ++ IdPath. diff --git a/deps/rabbit/src/rabbit_db_msup_m2k_converter.erl b/deps/rabbit/src/rabbit_db_msup_m2k_converter.erl index a1610716835c..5e78603f4392 100644 --- a/deps/rabbit/src/rabbit_db_msup_m2k_converter.erl +++ b/deps/rabbit/src/rabbit_db_msup_m2k_converter.erl @@ -10,6 +10,7 @@ -behaviour(mnesia_to_khepri_converter). -include_lib("kernel/include/logger.hrl"). +-include_lib("khepri/include/khepri.hrl"). -include_lib("khepri_mnesia_migration/src/kmm_logging.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). -include("mirrored_supervisor.hrl"). @@ -96,8 +97,4 @@ delete_from_khepri( Table :: atom(). clear_data_in_khepri(mirrored_sup_childspec) -> - Path = rabbit_db_msup:khepri_mirrored_supervisor_path(), - case rabbit_khepri:delete(Path) of - ok -> ok; - Error -> throw(Error) - end. + rabbit_db_msup:clear_in_khepri(). diff --git a/deps/rabbit/src/rabbit_db_queue.erl b/deps/rabbit/src/rabbit_db_queue.erl index 3ffa50594df1..30251f4d5598 100644 --- a/deps/rabbit/src/rabbit_db_queue.erl +++ b/deps/rabbit/src/rabbit_db_queue.erl @@ -25,7 +25,6 @@ count/1, create_or_get/1, set/1, - set_many/1, delete/2, update/2, update_decorators/2, @@ -41,7 +40,8 @@ update_durable/2, get_durable/1, get_many_durable/1, - consistent_exists/1 + consistent_exists/1, + clear_in_khepri/0 ]). %% Used by on_node_up and on_node_down. @@ -71,10 +71,7 @@ %% For testing -export([clear/0]). --export([ - khepri_queue_path/1, - khepri_queues_path/0 - ]). +-export([khepri_queue_path/1, khepri_queue_path/2]). -dialyzer({nowarn_function, [foreach_transient/1, foreach_transient_in_khepri/1]}). @@ -377,7 +374,9 @@ list_for_count_in_khepri(VHostName) -> -spec delete(QName, Reason) -> Ret when QName :: rabbit_amqqueue:name(), Reason :: atom(), - Ret :: ok | Deletions :: rabbit_binding:deletions(). + Ret :: ok | + Deletions :: rabbit_binding:deletions() | + rabbit_khepri:timeout_error(). delete(QueueName, Reason) -> rabbit_khepri:handle_fallback( @@ -703,10 +702,10 @@ update_durable_in_mnesia(UpdateFun, FilterFun) -> ok. update_durable_in_khepri(UpdateFun, FilterFun) -> - PathPattern = khepri_queues_path() ++ - [?KHEPRI_WILDCARD_STAR, - #if_data_matches{ - pattern = amqqueue:pattern_match_on_durable(true)}], + PathPattern = khepri_queue_path( + ?KHEPRI_WILDCARD_STAR, + #if_data_matches{ + pattern = amqqueue:pattern_match_on_durable(true)}), %% The `FilterFun' or `UpdateFun' might attempt to do something %% incompatible with Khepri transactions (such as dynamic apply, sending %% a message, etc.), so this function cannot be written as a regular @@ -831,7 +830,10 @@ get_all_by_pattern_in_mnesia(Pattern) -> rabbit_db:list_in_mnesia(?MNESIA_TABLE, Pattern). get_all_by_pattern_in_khepri(Pattern) -> - rabbit_db:list_in_khepri(khepri_queues_path() ++ [rabbit_khepri:if_has_data([?KHEPRI_WILDCARD_STAR_STAR, #if_data_matches{pattern = Pattern}])]). + Path = khepri_queue_path( + ?KHEPRI_WILDCARD_STAR, + #if_data_matches{pattern = Pattern}), + rabbit_db:list_in_khepri(Path). %% ------------------------------------------------------------------- %% get_all_by_type_and_node(). @@ -866,7 +868,8 @@ get_all_by_type_and_node_in_mnesia(VHostName, Type, Node) -> get_all_by_type_and_node_in_khepri(VHostName, Type, Node) -> Pattern = amqqueue:pattern_match_on_type(Type), - Qs = rabbit_db:list_in_khepri(khepri_queues_path() ++ [VHostName, rabbit_khepri:if_has_data([?KHEPRI_WILDCARD_STAR_STAR, #if_data_matches{pattern = Pattern}])]), + Path = khepri_queue_path(VHostName, #if_data_matches{pattern = Pattern}), + Qs = rabbit_db:list_in_khepri(Path), [Q || Q <- Qs, amqqueue:qnode(Q) == Node]. %% ------------------------------------------------------------------- @@ -875,7 +878,10 @@ get_all_by_type_and_node_in_khepri(VHostName, Type, Node) -> -spec create_or_get(Queue) -> Ret when Queue :: amqqueue:amqqueue(), - Ret :: {created, Queue} | {existing, Queue} | {absent, Queue, nodedown}. + Ret :: {created, Queue} | + {existing, Queue} | + {absent, Queue, nodedown} | + rabbit_khepri:timeout_error(). %% @doc Writes a queue record if it doesn't exist already or returns the existing one %% %% @returns the existing record if there is one in the database already, or the newly @@ -924,8 +930,9 @@ create_or_get_in_khepri(Q) -> %% set(). %% ------------------------------------------------------------------- --spec set(Queue) -> ok when - Queue :: amqqueue:amqqueue(). +-spec set(Queue) -> Ret when + Queue :: amqqueue:amqqueue(), + Ret :: ok | rabbit_khepri:timeout_error(). %% @doc Writes a queue record. If the queue is durable, it writes both instances: %% durable and transient. For the durable one, it resets decorators. %% The transient one is left as it is. @@ -958,52 +965,6 @@ set_in_khepri(Q) -> Path = khepri_queue_path(amqqueue:get_name(Q)), rabbit_khepri:put(Path, Q). -%% ------------------------------------------------------------------- -%% set_many(). -%% ------------------------------------------------------------------- - --spec set_many([Queue]) -> ok when - Queue :: amqqueue:amqqueue(). -%% @doc Writes a list of durable queue records. -%% -%% It is responsibility of the calling function to ensure all records are -%% durable. -%% -%% @private - -set_many(Qs) -> - rabbit_khepri:handle_fallback( - #{mnesia => fun() -> set_many_in_mnesia(Qs) end, - khepri => fun() -> set_many_in_khepri(Qs) end - }). - -set_many_in_mnesia(Qs) -> - {atomic, ok} = - %% Just to be nested in forget_node_for_queue - mnesia:transaction( - fun() -> - [begin - true = amqqueue:is_durable(Q), - ok = mnesia:write(?MNESIA_DURABLE_TABLE, Q, write) - end || Q <- Qs], - ok - end), - ok. - -set_many_in_khepri(Qs) -> - rabbit_khepri:transaction( - fun() -> - [begin - true = amqqueue:is_durable(Q), - Path = khepri_queue_path(amqqueue:get_name(Q)), - case khepri_tx:put(Path, Q) of - ok -> ok; - Error -> khepri_tx:abort(Error) - end - end || Q <- Qs] - end), - ok. - %% ------------------------------------------------------------------- %% delete_transient(). %% ------------------------------------------------------------------- @@ -1012,7 +973,8 @@ set_many_in_khepri(Qs) -> Queue :: amqqueue:amqqueue(), FilterFun :: fun((Queue) -> boolean()), QName :: rabbit_amqqueue:name(), - Ret :: {[QName], [Deletions :: rabbit_binding:deletions()]}. + Ret :: {[QName], [Deletions :: rabbit_binding:deletions()]} + | rabbit_khepri:timeout_error(). %% @doc Deletes all transient queues that match `FilterFun'. %% %% @private @@ -1063,36 +1025,69 @@ partition_queues(T) -> [T]. delete_transient_in_khepri(FilterFun) -> - PathPattern = khepri_queues_path() ++ - [?KHEPRI_WILDCARD_STAR, - #if_data_matches{ - pattern = amqqueue:pattern_match_on_durable(false)}], + PathPattern = khepri_queue_path( + ?KHEPRI_WILDCARD_STAR, + #if_data_matches{ + pattern = amqqueue:pattern_match_on_durable(false)}), %% The `FilterFun' might try to determine if the queue's process is alive. %% This can cause a `calling_self' exception if we use the `FilterFun' %% within the function passed to `khepri:fold/5' since the Khepri server %% process might call itself. Instead we can fetch all of the transient %% queues with `get_many' and then filter and fold the results outside of %% Khepri's Ra server process. - case rabbit_khepri:get_many(PathPattern) of - {ok, Qs} -> - Items = maps:fold( - fun(Path, Queue, Acc) when ?is_amqqueue(Queue) -> - case FilterFun(Queue) of - true -> - QueueName = khepri_queue_path_to_name( - Path), - case delete_in_khepri(QueueName, false) of - ok -> - Acc; - Deletions -> - [{QueueName, Deletions} | Acc] - end; - false -> - Acc - end - end, [], Qs), - {QueueNames, Deletions} = lists:unzip(Items), - {QueueNames, lists:flatten(Deletions)}; + case rabbit_khepri:adv_get_many(PathPattern) of + {ok, Props} -> + Qs = maps:fold( + fun(Path0, #{data := Q, payload_version := Vsn}, Acc) + when ?is_amqqueue(Q) -> + case FilterFun(Q) of + true -> + Path = khepri_path:combine_with_conditions( + Path0, + [#if_payload_version{version = Vsn}]), + QName = amqqueue:get_name(Q), + [{Path, QName} | Acc]; + false -> + Acc + end + end, [], Props), + do_delete_transient_queues_in_khepri(Qs, FilterFun); + {error, _} = Error -> + Error + end. + +do_delete_transient_queues_in_khepri([], _FilterFun) -> + %% If there are no changes to make, avoid performing a transaction. When + %% Khepri is in a minority this avoids a long timeout waiting for the + %% transaction command to be processed. Otherwise it avoids appending a + %% somewhat large transaction command to Khepri's log. + {[], []}; +do_delete_transient_queues_in_khepri(Qs, FilterFun) -> + Res = rabbit_khepri:transaction( + fun() -> + rabbit_misc:fold_while_ok( + fun({Path, QName}, Acc) -> + %% Also see `delete_in_khepri/2'. + case khepri_tx_adv:delete(Path) of + {ok, #{data := _}} -> + Deletions = rabbit_db_binding:delete_for_destination_in_khepri( + QName, false), + {ok, [{QName, Deletions} | Acc]}; + {ok, _} -> + {ok, Acc}; + {error, _} = Error -> + Error + end + end, [], Qs) + end), + case Res of + {ok, Items} -> + {QNames, Deletions} = lists:unzip(Items), + {QNames, lists:flatten(Deletions)}; + {error, {khepri, mismatching_node, _}} -> + %% One of the queues changed while attempting to update all + %% queues. Retry the operation. + delete_transient_in_khepri(FilterFun); {error, _} = Error -> Error end. @@ -1124,10 +1119,10 @@ foreach_transient_in_mnesia(UpdateFun) -> end). foreach_transient_in_khepri(UpdateFun) -> - PathPattern = khepri_queues_path() ++ - [?KHEPRI_WILDCARD_STAR, - #if_data_matches{ - pattern = amqqueue:pattern_match_on_durable(false)}], + PathPattern = khepri_queue_path( + ?KHEPRI_WILDCARD_STAR, + #if_data_matches{ + pattern = amqqueue:pattern_match_on_durable(false)}), %% The `UpdateFun' might try to determine if the queue's process is alive. %% This can cause a `calling_self' exception if we use the `UpdateFun' %% within the function passed to `khepri:fold/5' since the Khepri server @@ -1175,10 +1170,10 @@ foreach_durable_in_mnesia(UpdateFun, FilterFun) -> ok. foreach_durable_in_khepri(UpdateFun, FilterFun) -> - Path = khepri_queues_path() ++ - [?KHEPRI_WILDCARD_STAR, - #if_data_matches{ - pattern = amqqueue:pattern_match_on_durable(true)}], + Path = khepri_queue_path( + ?KHEPRI_WILDCARD_STAR, + #if_data_matches{ + pattern = amqqueue:pattern_match_on_durable(true)}), case rabbit_khepri:filter(Path, fun(_, #{data := Q}) -> FilterFun(Q) end) of @@ -1294,7 +1289,7 @@ clear_in_mnesia() -> ok. clear_in_khepri() -> - Path = khepri_queues_path(), + Path = khepri_queue_path(?KHEPRI_WILDCARD_STAR, ?KHEPRI_WILDCARD_STAR), case rabbit_khepri:delete(Path) of ok -> ok; Error -> throw(Error) @@ -1361,11 +1356,8 @@ list_with_possible_retry_in_khepri(Fun) -> %% Khepri paths %% -------------------------------------------------------------- -khepri_queues_path() -> - [?MODULE, queues]. - khepri_queue_path(#resource{virtual_host = VHost, name = Name}) -> - [?MODULE, queues, VHost, Name]. + khepri_queue_path(VHost, Name). -khepri_queue_path_to_name([?MODULE, queues, VHost, Name]) -> - rabbit_misc:r(VHost, queue, Name). +khepri_queue_path(VHost, Name) when ?IS_KHEPRI_PATH_CONDITION(Name) -> + rabbit_db_vhost:khepri_vhost_path(VHost) ++ [queues, Name]. diff --git a/deps/rabbit/src/rabbit_db_queue_m2k_converter.erl b/deps/rabbit/src/rabbit_db_queue_m2k_converter.erl index fd9f88b0ee8f..5e75f773ffb8 100644 --- a/deps/rabbit/src/rabbit_db_queue_m2k_converter.erl +++ b/deps/rabbit/src/rabbit_db_queue_m2k_converter.erl @@ -95,12 +95,6 @@ delete_from_khepri(rabbit_queue = Table, Key, State) -> Table :: atom(). clear_data_in_khepri(rabbit_queue) -> - khepri_delete(rabbit_db_queue:khepri_queues_path()); + rabbit_db_queue:clear_in_khepri(); clear_data_in_khepri(rabbit_durable_queue) -> - khepri_delete(rabbit_db_queue:khepri_queues_path()). - -khepri_delete(Path) -> - case rabbit_khepri:delete(Path) of - ok -> ok; - Error -> throw(Error) - end. + rabbit_db_queue:clear_in_khepri(). diff --git a/deps/rabbit/src/rabbit_db_rtparams.erl b/deps/rabbit/src/rabbit_db_rtparams.erl index 0f07bf82b483..f57642ee953b 100644 --- a/deps/rabbit/src/rabbit_db_rtparams.erl +++ b/deps/rabbit/src/rabbit_db_rtparams.erl @@ -10,6 +10,8 @@ -include_lib("khepri/include/khepri.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). +-include("include/khepri.hrl"). + -export([set/2, set/4, get/1, get_all/0, get_all/2, @@ -17,12 +19,12 @@ delete_vhost/1]). -export([khepri_vhost_rp_path/3, - khepri_global_rp_path/1, - khepri_rp_path/0 + khepri_global_rp_path/1 ]). -define(MNESIA_TABLE, rabbit_runtime_parameters). --define(KHEPRI_PROJECTION, rabbit_khepri_runtime_parameters). +-define(KHEPRI_GLOBAL_PROJECTION, rabbit_khepri_global_rtparam). +-define(KHEPRI_VHOST_PROJECTION, rabbit_khepri_per_vhost_rtparam). -define(any(Value), case Value of '_' -> ?KHEPRI_WILDCARD_STAR; _ -> Value @@ -150,8 +152,16 @@ get_in_mnesia(Key) -> [Record] -> Record end. -get_in_khepri(Key) -> - try ets:lookup(?KHEPRI_PROJECTION, Key) of +get_in_khepri(Key) when is_atom(Key) -> + try ets:lookup(?KHEPRI_GLOBAL_PROJECTION, Key) of + [] -> undefined; + [Record] -> Record + catch + error:badarg -> + undefined + end; +get_in_khepri(Key) when is_tuple(Key) -> + try ets:lookup(?KHEPRI_VHOST_PROJECTION, Key) of [] -> undefined; [Record] -> Record catch @@ -181,7 +191,8 @@ get_all_in_mnesia() -> get_all_in_khepri() -> try - ets:tab2list(?KHEPRI_PROJECTION) + ets:tab2list(?KHEPRI_GLOBAL_PROJECTION) ++ + ets:tab2list(?KHEPRI_VHOST_PROJECTION) catch error:badarg -> [] @@ -225,7 +236,7 @@ get_all_in_khepri(VHostName, Comp) -> try Match = #runtime_parameters{key = {VHostName, Comp, '_'}, _ = '_'}, - ets:match_object(?KHEPRI_PROJECTION, Match) + ets:match_object(?KHEPRI_VHOST_PROJECTION, Match) catch error:badarg -> [] @@ -347,17 +358,16 @@ delete_vhost_in_khepri(VHostName) -> %% ------------------------------------------------------------------- -khepri_rp_path() -> - [?MODULE]. - khepri_rp_path({VHost, Component, Name}) -> khepri_vhost_rp_path(VHost, Component, Name); khepri_rp_path(Key) -> khepri_global_rp_path(Key). -khepri_global_rp_path(Key) -> - [?MODULE, global, Key]. - -khepri_vhost_rp_path(VHost, Component, Name) -> - [?MODULE, per_vhost, VHost, Component, Name]. +khepri_global_rp_path(Key) when ?IS_KHEPRI_PATH_CONDITION(Key) -> + ?KHEPRI_ROOT_PATH ++ [runtime_params, Key]. +khepri_vhost_rp_path(VHost, Component, Name) + when ?IS_KHEPRI_PATH_CONDITION(Component) andalso + ?IS_KHEPRI_PATH_CONDITION(Name) -> + VHostPath = rabbit_db_vhost:khepri_vhost_path(VHost), + VHostPath ++ [runtime_params, Component, Name]. diff --git a/deps/rabbit/src/rabbit_db_rtparams_m2k_converter.erl b/deps/rabbit/src/rabbit_db_rtparams_m2k_converter.erl index fdc8fd9a20b9..9756640fbce9 100644 --- a/deps/rabbit/src/rabbit_db_rtparams_m2k_converter.erl +++ b/deps/rabbit/src/rabbit_db_rtparams_m2k_converter.erl @@ -10,6 +10,7 @@ -behaviour(mnesia_to_khepri_converter). -include_lib("kernel/include/logger.hrl"). +-include_lib("khepri/include/khepri.hrl"). -include_lib("khepri_mnesia_migration/src/kmm_logging.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). @@ -99,8 +100,16 @@ rtparams_path(Key) -> Table :: atom(). clear_data_in_khepri(rabbit_runtime_parameters) -> - Path = rabbit_db_rtparams:khepri_rp_path(), - case rabbit_khepri:delete(Path) of + Path1 = rabbit_db_rtparams:khepri_global_rp_path(?KHEPRI_WILDCARD_STAR), + case rabbit_khepri:delete(Path1) of ok -> ok; - Error -> throw(Error) + Error1 -> throw(Error1) + end, + Path2 = rabbit_db_rtparams:khepri_vhost_rp_path( + ?KHEPRI_WILDCARD_STAR, + ?KHEPRI_WILDCARD_STAR, + ?KHEPRI_WILDCARD_STAR), + case rabbit_khepri:delete(Path2) of + ok -> ok; + Error2 -> throw(Error2) end. diff --git a/deps/rabbit/src/rabbit_db_user.erl b/deps/rabbit/src/rabbit_db_user.erl index fb00b01a5daa..e1589db3d082 100644 --- a/deps/rabbit/src/rabbit_db_user.erl +++ b/deps/rabbit/src/rabbit_db_user.erl @@ -12,10 +12,13 @@ -include_lib("khepri/include/khepri.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). +-include("include/khepri.hrl"). + -export([create/1, update/2, get/1, get_all/0, + count_all/0, with_fun_in_mnesia_tx/2, with_fun_in_khepri_tx/2, get_user_permissions/2, @@ -28,11 +31,11 @@ set_topic_permissions/1, clear_topic_permissions/3, clear_matching_topic_permissions/3, + clear_in_khepri/0, delete/1, clear_all_permissions_for_vhost/1]). --export([khepri_users_path/0, - khepri_user_path/1, +-export([khepri_user_path/1, khepri_user_permission_path/2, khepri_topic_permission_path/3]). @@ -72,8 +75,8 @@ -define(MNESIA_TABLE, rabbit_user). -define(PERM_MNESIA_TABLE, rabbit_user_permission). -define(TOPIC_PERM_MNESIA_TABLE, rabbit_topic_permission). --define(KHEPRI_USERS_PROJECTION, rabbit_khepri_users). --define(KHEPRI_PERMISSIONS_PROJECTION, rabbit_khepri_user_permissions). +-define(KHEPRI_USERS_PROJECTION, rabbit_khepri_user). +-define(KHEPRI_PERMISSIONS_PROJECTION, rabbit_khepri_user_permission). %% ------------------------------------------------------------------- %% create(). @@ -218,12 +221,39 @@ get_all_in_mnesia() -> internal_user:pattern_match_all()). get_all_in_khepri() -> - Path = khepri_users_path(), - case rabbit_khepri:list(Path) of + Path = khepri_user_path(?KHEPRI_WILDCARD_STAR), + case rabbit_khepri:get_many(Path) of {ok, Users} -> maps:values(Users); _ -> [] end. +%% ------------------------------------------------------------------- +%% count_all(). +%% ------------------------------------------------------------------- + +-spec count_all() -> {ok, Count} | {error, any()} when + Count :: non_neg_integer(). +%% @doc Returns all user records. +%% +%% @returns the count of internal user records. +%% +%% @private + +count_all() -> + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> count_all_in_mnesia() end, + khepri => fun() -> count_all_in_khepri() end}). + +count_all_in_mnesia() -> + List = mnesia:dirty_match_object( + ?MNESIA_TABLE, + internal_user:pattern_match_all()), + {ok, length(List)}. + +count_all_in_khepri() -> + Path = khepri_user_path(?KHEPRI_WILDCARD_STAR), + rabbit_khepri:count(Path). + %% ------------------------------------------------------------------- %% with_fun_in_*(). %% ------------------------------------------------------------------- @@ -461,13 +491,12 @@ set_user_permissions_in_khepri(Username, VHostName, UserPermission) -> end)), rw). set_user_permissions_in_khepri_tx(Username, VHostName, UserPermission) -> + %% TODO: Check user presence in a transaction. Path = khepri_user_permission_path( - #if_all{conditions = - [Username, - #if_node_exists{exists = true}]}, + Username, VHostName), Extra = #{keep_while => - #{rabbit_db_vhost:khepri_vhost_path(VHostName) => + #{rabbit_db_user:khepri_user_path(Username) => #if_node_exists{exists = true}}}, Ret = khepri_tx:put( Path, UserPermission, Extra), @@ -611,7 +640,7 @@ clear_all_permissions_for_vhost_in_khepri(VHostName) -> TopicProps, rabbit_khepri:collect_payloads(UserProps)), {ok, Deletions} - end, rw). + end, rw, #{timeout => infinity}). %% ------------------------------------------------------------------- %% get_topic_permissions(). @@ -849,14 +878,13 @@ set_topic_permissions_in_khepri(Username, VHostName, TopicPermission) -> set_topic_permissions_in_khepri_tx(Username, VHostName, TopicPermission) -> #topic_permission{topic_permission_key = #topic_permission_key{exchange = ExchangeName}} = TopicPermission, + %% TODO: Check user presence in a transaction. Path = khepri_topic_permission_path( - #if_all{conditions = - [Username, - #if_node_exists{exists = true}]}, + Username, VHostName, ExchangeName), Extra = #{keep_while => - #{rabbit_db_vhost:khepri_vhost_path(VHostName) => + #{rabbit_db_user:khepri_user_path(Username) => #if_node_exists{exists = true}}}, Ret = khepri_tx:put(Path, TopicPermission, Extra), case Ret of @@ -1054,7 +1082,7 @@ clear_in_mnesia() -> ok. clear_in_khepri() -> - Path = khepri_users_path(), + Path = khepri_user_path(?KHEPRI_WILDCARD_STAR), case rabbit_khepri:delete(Path) of ok -> ok; Error -> throw(Error) @@ -1064,11 +1092,16 @@ clear_in_khepri() -> %% Paths %% -------------------------------------------------------------- -khepri_users_path() -> [?MODULE, users]. -khepri_user_path(Username) -> [?MODULE, users, Username]. +khepri_user_path(Username) + when ?IS_KHEPRI_PATH_CONDITION(Username) -> + ?KHEPRI_ROOT_PATH ++ [users, Username]. -khepri_user_permission_path(Username, VHostName) -> - [?MODULE, users, Username, user_permissions, VHostName]. +khepri_user_permission_path(Username, VHostName) + when ?IS_KHEPRI_PATH_CONDITION(Username) -> + (rabbit_db_vhost:khepri_vhost_path(VHostName) ++ + [user_permissions, Username]). -khepri_topic_permission_path(Username, VHostName, Exchange) -> - [?MODULE, users, Username, topic_permissions, VHostName, Exchange]. +khepri_topic_permission_path(Username, VHostName, Exchange) + when ?IS_KHEPRI_PATH_CONDITION(Username) -> + (rabbit_db_exchange:khepri_exchange_path(VHostName, Exchange) ++ + [user_permissions, Username]). diff --git a/deps/rabbit/src/rabbit_db_user_m2k_converter.erl b/deps/rabbit/src/rabbit_db_user_m2k_converter.erl index 194514e2afc9..fb2969e77ee7 100644 --- a/deps/rabbit/src/rabbit_db_user_m2k_converter.erl +++ b/deps/rabbit/src/rabbit_db_user_m2k_converter.erl @@ -73,14 +73,12 @@ copy_to_khepri( [Table, Username, VHost], #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), Path = rabbit_db_user:khepri_user_permission_path( - #if_all{conditions = - [Username, - #if_node_exists{exists = true}]}, + Username, VHost), rabbit_db_m2k_converter:with_correlation_id( fun(CorrId) -> Extra = #{keep_while => - #{rabbit_db_vhost:khepri_vhost_path(VHost) => + #{rabbit_db_user:khepri_user_path(Username) => #if_node_exists{exists = true}}, async => CorrId}, ?LOG_DEBUG( @@ -103,15 +101,13 @@ copy_to_khepri( [Table, Username, VHost], #{domain => ?KMM_M2K_TABLE_COPY_LOG_DOMAIN}), Path = rabbit_db_user:khepri_topic_permission_path( - #if_all{conditions = - [Username, - #if_node_exists{exists = true}]}, + Username, VHost, Exchange), rabbit_db_m2k_converter:with_correlation_id( fun(CorrId) -> Extra = #{keep_while => - #{rabbit_db_vhost:khepri_vhost_path(VHost) => + #{rabbit_db_user:khepri_user_path(Username) => #if_node_exists{exists = true}}, async => CorrId}, ?LOG_DEBUG( @@ -192,10 +188,6 @@ delete_from_khepri(rabbit_topic_permission = Table, Key, State) -> Table :: atom(). clear_data_in_khepri(rabbit_user) -> - Path = rabbit_db_user:khepri_users_path(), - case rabbit_khepri:delete(Path) of - ok -> ok; - Error -> throw(Error) - end; + rabbit_db_user:clear_in_khepri(); clear_data_in_khepri(_) -> ok. diff --git a/deps/rabbit/src/rabbit_db_vhost.erl b/deps/rabbit/src/rabbit_db_vhost.erl index 247acb4632af..42453faea251 100644 --- a/deps/rabbit/src/rabbit_db_vhost.erl +++ b/deps/rabbit/src/rabbit_db_vhost.erl @@ -11,6 +11,7 @@ -include_lib("rabbit_common/include/logging.hrl"). -include_lib("khepri/include/khepri.hrl"). +-include("include/khepri.hrl"). -include("vhost.hrl"). -export([create_or_get/3, @@ -19,14 +20,15 @@ exists/1, get/1, get_all/0, + count_all/0, list/0, update/2, with_fun_in_mnesia_tx/2, with_fun_in_khepri_tx/2, - delete/1]). + delete/1, + clear_in_khepri/0]). --export([khepri_vhost_path/1, - khepri_vhosts_path/0]). +-export([khepri_vhost_path/1]). %% For testing -export([clear/0]). @@ -313,6 +315,33 @@ get_all_in_khepri() -> [] end. +%% ------------------------------------------------------------------- +%% count_all(). +%% ------------------------------------------------------------------- + +-spec count_all() -> {ok, Count} | {error, any()} when + Count :: non_neg_integer(). +%% @doc Returns all virtual host records. +%% +%% @returns the count of virtual host records. +%% +%% @private + +count_all() -> + rabbit_khepri:handle_fallback( + #{mnesia => fun() -> count_all_in_mnesia() end, + khepri => fun() -> count_all_in_khepri() end}). + +count_all_in_mnesia() -> + List = mnesia:dirty_match_object( + ?MNESIA_TABLE, + vhost:pattern_match_all()), + {ok, length(List)}. + +count_all_in_khepri() -> + Path = khepri_vhost_path(?KHEPRI_WILDCARD_STAR), + rabbit_khepri:count(Path). + %% ------------------------------------------------------------------- %% list(). %% ------------------------------------------------------------------- @@ -493,7 +522,7 @@ clear_in_mnesia() -> ok. clear_in_khepri() -> - Path = khepri_vhosts_path(), + Path = khepri_vhost_path(?KHEPRI_WILDCARD_STAR), case rabbit_khepri:delete(Path) of ok -> ok; Error -> throw(Error) @@ -503,5 +532,5 @@ clear_in_khepri() -> %% Paths %% -------------------------------------------------------------- -khepri_vhosts_path() -> [?MODULE]. -khepri_vhost_path(VHost) -> [?MODULE, VHost]. +khepri_vhost_path(VHost) when ?IS_KHEPRI_PATH_CONDITION(VHost) -> + ?KHEPRI_ROOT_PATH ++ [vhosts, VHost]. diff --git a/deps/rabbit/src/rabbit_db_vhost_m2k_converter.erl b/deps/rabbit/src/rabbit_db_vhost_m2k_converter.erl index 4e4e14cf5457..1ce4a82efd0c 100644 --- a/deps/rabbit/src/rabbit_db_vhost_m2k_converter.erl +++ b/deps/rabbit/src/rabbit_db_vhost_m2k_converter.erl @@ -10,6 +10,7 @@ -behaviour(mnesia_to_khepri_converter). -include_lib("kernel/include/logger.hrl"). +-include_lib("khepri/include/khepri.hrl"). -include_lib("khepri_mnesia_migration/src/kmm_logging.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). -include("vhost.hrl"). @@ -95,8 +96,4 @@ delete_from_khepri(rabbit_vhost = Table, Key, State) -> Table :: atom(). clear_data_in_khepri(rabbit_vhost) -> - Path = rabbit_db_vhost:khepri_vhosts_path(), - case rabbit_khepri:delete(Path) of - ok -> ok; - Error -> throw(Error) - end. + rabbit_db_vhost:clear_in_khepri(). diff --git a/deps/rabbit/src/rabbit_depr_ff_extra.erl b/deps/rabbit/src/rabbit_depr_ff_extra.erl index 5267c3efbfb6..2b4998433167 100644 --- a/deps/rabbit/src/rabbit_depr_ff_extra.erl +++ b/deps/rabbit/src/rabbit_depr_ff_extra.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2023 Broadcom. All Rights Reserved. The term “Broadcom” +%% Copyright (c) 2023-2024 Broadcom. All Rights Reserved. The term “Broadcom” %% refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @doc diff --git a/deps/rabbit/src/rabbit_deprecated_features.erl b/deps/rabbit/src/rabbit_deprecated_features.erl index 93289be033eb..ffafec5757b9 100644 --- a/deps/rabbit/src/rabbit_deprecated_features.erl +++ b/deps/rabbit/src/rabbit_deprecated_features.erl @@ -2,11 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% Copyright (c) 2023-2024 Broadcom. All Rights Reserved. The term “Broadcom” +%% refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @author The RabbitMQ team -%% @copyright 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% @copyright 2023-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. +%% and/or its subsidiaries. All rights reserved. %% %% @doc %% This module provides an API to manage deprecated features in RabbitMQ. It diff --git a/deps/rabbit/src/rabbit_exchange.erl b/deps/rabbit/src/rabbit_exchange.erl index 5a00d4de80da..391b6b8934e0 100644 --- a/deps/rabbit/src/rabbit_exchange.erl +++ b/deps/rabbit/src/rabbit_exchange.erl @@ -14,7 +14,7 @@ update_scratch/3, update_decorators/2, immutable/1, info_keys/0, info/1, info/2, info_all/1, info_all/2, info_all/4, route/2, route/3, delete/3, validate_binding/2, count/0, - ensure_deleted/3]). + ensure_deleted/3, delete_all/2]). -export([list_names/0]). -export([serialise_events/1]). -export([serial/1, peek_serial/1]). @@ -470,13 +470,15 @@ delete(XName, IfUnused, Username) -> _ = rabbit_runtime_parameters:set(XName#resource.virtual_host, ?EXCHANGE_DELETE_IN_PROGRESS_COMPONENT, XName#resource.name, true, Username), - Deletions = process_deletions(rabbit_db_exchange:delete(XName, IfUnused)), - case Deletions of - {error, _} -> - Deletions; - _ -> - rabbit_binding:notify_deletions(Deletions, Username), - ok + case rabbit_db_exchange:delete(XName, IfUnused) of + {deleted, #exchange{name = XName} = X, Bs, Deletions} -> + Deletions1 = rabbit_binding:add_deletion( + XName, X, deleted, Bs, Deletions), + ok = rabbit_binding:process_deletions(Deletions1), + ok = rabbit_binding:notify_deletions(Deletions1, Username), + ok; + {error, _} = Err -> + Err end after rabbit_runtime_parameters:clear(XName#resource.virtual_host, @@ -484,12 +486,16 @@ delete(XName, IfUnused, Username) -> XName#resource.name, Username) end. -process_deletions({error, _} = E) -> - E; -process_deletions({deleted, #exchange{name = XName} = X, Bs, Deletions}) -> - rabbit_binding:process_deletions( - rabbit_binding:add_deletion( - XName, {X, deleted, Bs}, Deletions)). +-spec delete_all(VHostName, ActingUser) -> Ret when + VHostName :: vhost:name(), + ActingUser :: rabbit_types:username(), + Ret :: ok. + +delete_all(VHostName, ActingUser) -> + {ok, Deletions} = rabbit_db_exchange:delete_all(VHostName), + ok = rabbit_binding:process_deletions(Deletions), + ok = rabbit_binding:notify_deletions(Deletions, ActingUser), + ok. -spec ensure_deleted(ExchangeName, IfUnused, Username) -> Ret when ExchangeName :: name(), diff --git a/deps/rabbit/src/rabbit_feature_flags.erl b/deps/rabbit/src/rabbit_feature_flags.erl index f635e50d2b5f..3d2b19f8c7c6 100644 --- a/deps/rabbit/src/rabbit_feature_flags.erl +++ b/deps/rabbit/src/rabbit_feature_flags.erl @@ -2,11 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% Copyright (c) 2019-2024 Broadcom. All Rights Reserved. The term “Broadcom” +%% refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @author The RabbitMQ team -%% @copyright 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% @copyright 2019-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. +%% and/or its subsidiaries. All rights reserved. %% %% @doc %% This module offers a framework to declare capabilities a RabbitMQ node @@ -103,7 +105,7 @@ init/0, get_state/1, get_stability/1, - check_node_compatibility/1, + check_node_compatibility/1, check_node_compatibility/2, sync_feature_flags_with_cluster/2, refresh_feature_flags_after_app_load/0, enabled_feature_flags_list_file/0 @@ -1302,7 +1304,9 @@ does_node_support(Node, FeatureNames, Timeout) -> false end. --spec check_node_compatibility(node()) -> ok | {error, any()}. +-spec check_node_compatibility(RemoteNode) -> Ret when + RemoteNode :: node(), + Ret :: ok | {error, any()}. %% @doc %% Checks if a node is compatible with the local node. %% @@ -1314,11 +1318,40 @@ does_node_support(Node, FeatureNames, Timeout) -> %% local node %% %% -%% @param Node the name of the remote node to test. +%% @param RemoteNode the name of the remote node to test. +%% @returns `ok' if they are compatible, `{error, Reason}' if they are not. + +check_node_compatibility(RemoteNode) -> + check_node_compatibility(RemoteNode, false). + +-spec check_node_compatibility(RemoteNode, LocalNodeAsVirgin) -> Ret when + RemoteNode :: node(), + LocalNodeAsVirgin :: boolean(), + Ret :: ok | {error, any()}. +%% @doc +%% Checks if a node is compatible with the local node. +%% +%% To be compatible, the following two conditions must be met: +%%
      +%%
    1. feature flags enabled on the local node must be supported by the +%% remote node
    2. +%%
    3. feature flags enabled on the remote node must be supported by the +%% local node
    4. +%%
    +%% +%% Unlike {@link check_node_compatibility/1}, the local node's feature flags +%% inventory is evaluated as if the node was virgin if `LocalNodeAsVirgin' is +%% true. This is useful if the local node will be reset as part of joining a +%% remote cluster for instance. +%% +%% @param RemoteNode the name of the remote node to test. +%% @param LocalNodeAsVirgin flag to indicate if the local node should be +%% evaluated as if it was virgin. %% @returns `ok' if they are compatible, `{error, Reason}' if they are not. -check_node_compatibility(Node) -> - rabbit_ff_controller:check_node_compatibility(Node). +check_node_compatibility(RemoteNode, LocalNodeAsVirgin) -> + rabbit_ff_controller:check_node_compatibility( + RemoteNode, LocalNodeAsVirgin). run_feature_flags_mod_on_remote_node(Node, Function, Args, Timeout) -> rabbit_ff_controller:rpc_call(Node, ?MODULE, Function, Args, Timeout). diff --git a/deps/rabbit/src/rabbit_ff_controller.erl b/deps/rabbit/src/rabbit_ff_controller.erl index f82ed6000e16..c522e1cd6c16 100644 --- a/deps/rabbit/src/rabbit_ff_controller.erl +++ b/deps/rabbit/src/rabbit_ff_controller.erl @@ -2,11 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% Copyright (c) 2019-2024 Broadcom. All Rights Reserved. The term “Broadcom” +%% refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @author The RabbitMQ team -%% @copyright 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% @copyright 2019-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. +%% and/or its subsidiaries. All rights reserved. %% %% @doc %% The feature flag controller is responsible for synchronization and managing @@ -36,7 +38,7 @@ -export([is_supported/1, is_supported/2, enable/1, enable_default/0, - check_node_compatibility/1, + check_node_compatibility/2, sync_cluster/1, refresh_after_app_load/0, get_forced_feature_flag_names/0]). @@ -134,12 +136,22 @@ enable_default() -> Ret end. -check_node_compatibility(RemoteNode) -> +check_node_compatibility(RemoteNode, LocalNodeAsVirgin) -> ThisNode = node(), - ?LOG_DEBUG( - "Feature flags: CHECKING COMPATIBILITY between nodes `~ts` and `~ts`", - [ThisNode, RemoteNode], - #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}), + case LocalNodeAsVirgin of + true -> + ?LOG_DEBUG( + "Feature flags: CHECKING COMPATIBILITY between nodes `~ts` " + "and `~ts`; consider node `~ts` as virgin", + [ThisNode, RemoteNode, ThisNode], + #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}); + false -> + ?LOG_DEBUG( + "Feature flags: CHECKING COMPATIBILITY between nodes `~ts` " + "and `~ts`", + [ThisNode, RemoteNode], + #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}) + end, %% We don't go through the controller process to check nodes compatibility %% because this function is used while `rabbit' is stopped usually. %% @@ -147,7 +159,7 @@ check_node_compatibility(RemoteNode) -> %% because it would not guaranty that the compatibility remains true after %% this function finishes and before the node starts and synchronizes %% feature flags. - check_node_compatibility_task(ThisNode, RemoteNode). + check_node_compatibility_task(ThisNode, RemoteNode, LocalNodeAsVirgin). sync_cluster(Nodes) -> ?LOG_DEBUG( @@ -382,12 +394,14 @@ notify_waiting_controller({ControlerPid, _} = From) -> %% Code to check compatibility between nodes. %% -------------------------------------------------------------------- --spec check_node_compatibility_task(Node, Node) -> Ret when - Node :: node(), +-spec check_node_compatibility_task(NodeA, NodeB, NodeAAsVirigin) -> Ret when + NodeA :: node(), + NodeB :: node(), + NodeAAsVirigin :: boolean(), Ret :: ok | {error, Reason}, Reason :: incompatible_feature_flags. -check_node_compatibility_task(NodeA, NodeB) -> +check_node_compatibility_task(NodeA, NodeB, NodeAAsVirigin) -> ?LOG_NOTICE( "Feature flags: checking nodes `~ts` and `~ts` compatibility...", [NodeA, NodeB], @@ -400,7 +414,8 @@ check_node_compatibility_task(NodeA, NodeB) -> _ when is_list(NodesB) -> check_node_compatibility_task1( NodeA, NodesA, - NodeB, NodesB); + NodeB, NodesB, + NodeAAsVirigin); Error -> ?LOG_WARNING( "Feature flags: " @@ -419,10 +434,12 @@ check_node_compatibility_task(NodeA, NodeB) -> {error, {aborted_feature_flags_compat_check, Error}} end. -check_node_compatibility_task1(NodeA, NodesA, NodeB, NodesB) +check_node_compatibility_task1(NodeA, NodesA, NodeB, NodesB, NodeAAsVirigin) when is_list(NodesA) andalso is_list(NodesB) -> case collect_inventory_on_nodes(NodesA) of - {ok, InventoryA} -> + {ok, InventoryA0} -> + InventoryA = virtually_reset_inventory( + InventoryA0, NodeAAsVirigin), ?LOG_DEBUG( "Feature flags: inventory of node `~ts`:~n~tp", [NodeA, InventoryA], @@ -488,6 +505,42 @@ list_nodes_clustered_with(Node) -> ListOrError -> ListOrError end. +virtually_reset_inventory( + #{feature_flags := FeatureFlags, + states_per_node := StatesPerNode} = Inventory, + true = _NodeAsVirgin) -> + [Node | _] = maps:keys(StatesPerNode), + FeatureStates0 = maps:get(Node, StatesPerNode), + FeatureStates1 = maps:map( + fun(FeatureName, _FeatureState) -> + FeatureProps = maps:get( + FeatureName, FeatureFlags), + state_after_virtual_state( + FeatureName, FeatureProps) + end, FeatureStates0), + StatesPerNode1 = maps:map( + fun(_Node, _FeatureStates) -> + FeatureStates1 + end, StatesPerNode), + Inventory1 = Inventory#{states_per_node => StatesPerNode1}, + Inventory1; +virtually_reset_inventory( + Inventory, + false = _NodeAsVirgin) -> + Inventory. + +state_after_virtual_state(_FeatureName, FeatureProps) + when ?IS_FEATURE_FLAG(FeatureProps) -> + Stability = rabbit_feature_flags:get_stability(FeatureProps), + case Stability of + required -> true; + _ -> false + end; +state_after_virtual_state(FeatureName, FeatureProps) + when ?IS_DEPRECATION(FeatureProps) -> + not rabbit_deprecated_features:should_be_permitted( + FeatureName, FeatureProps). + -spec are_compatible(Inventory, Inventory) -> AreCompatible when Inventory :: rabbit_feature_flags:cluster_inventory(), AreCompatible :: boolean(). @@ -772,12 +825,29 @@ refresh_after_app_load_task() -> Ret :: ok | {error, Reason}, Reason :: term(). -enable_many(#{states_per_node := _} = Inventory, [FeatureName | Rest]) -> +enable_many(#{states_per_node := _} = Inventory, FeatureNames) -> + %% We acquire a lock before making any change to the registry. This is not + %% used by the controller (because it is already using a globally + %% registered name to prevent concurrent runs). But this is used in + %% `rabbit_feature_flags:is_enabled()' to block while the state is + %% `state_changing'. + rabbit_ff_registry_factory:acquire_state_change_lock(), + Ret = enable_many_locked(Inventory, FeatureNames), + rabbit_ff_registry_factory:release_state_change_lock(), + Ret. + +-spec enable_many_locked(Inventory, FeatureNames) -> Ret when + Inventory :: rabbit_feature_flags:cluster_inventory(), + FeatureNames :: [rabbit_feature_flags:feature_name()], + Ret :: ok | {error, Reason}, + Reason :: term(). + +enable_many_locked(#{states_per_node := _} = Inventory, [FeatureName | Rest]) -> case enable_if_supported(Inventory, FeatureName) of - {ok, Inventory1} -> enable_many(Inventory1, Rest); + {ok, Inventory1} -> enable_many_locked(Inventory1, Rest); Error -> Error end; -enable_many(_Inventory, []) -> +enable_many_locked(_Inventory, []) -> ok. -spec enable_if_supported(Inventory, FeatureName) -> Ret when @@ -794,7 +864,7 @@ enable_if_supported(#{states_per_node := _} = Inventory, FeatureName) -> "Feature flags: `~ts`: supported; continuing", [FeatureName], #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}), - lock_registry_and_enable(Inventory, FeatureName); + enable_with_registry_locked(Inventory, FeatureName); false -> ?LOG_DEBUG( "Feature flags: `~ts`: unsupported; aborting", @@ -803,23 +873,6 @@ enable_if_supported(#{states_per_node := _} = Inventory, FeatureName) -> {error, unsupported} end. --spec lock_registry_and_enable(Inventory, FeatureName) -> Ret when - Inventory :: rabbit_feature_flags:cluster_inventory(), - FeatureName :: rabbit_feature_flags:feature_name(), - Ret :: {ok, Inventory} | {error, Reason}, - Reason :: term(). - -lock_registry_and_enable(#{states_per_node := _} = Inventory, FeatureName) -> - %% We acquire a lock before making any change to the registry. This is not - %% used by the controller (because it is already using a globally - %% registered name to prevent concurrent runs). But this is used in - %% `rabbit_feature_flags:is_enabled()' to block while the state is - %% `state_changing'. - rabbit_ff_registry_factory:acquire_state_change_lock(), - Ret = enable_with_registry_locked(Inventory, FeatureName), - rabbit_ff_registry_factory:release_state_change_lock(), - Ret. - -spec enable_with_registry_locked(Inventory, FeatureName) -> Ret when Inventory :: rabbit_feature_flags:cluster_inventory(), FeatureName :: rabbit_feature_flags:feature_name(), diff --git a/deps/rabbit/src/rabbit_ff_extra.erl b/deps/rabbit/src/rabbit_ff_extra.erl index 9eba72185936..0171c4200856 100644 --- a/deps/rabbit/src/rabbit_ff_extra.erl +++ b/deps/rabbit/src/rabbit_ff_extra.erl @@ -2,7 +2,8 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% @copyright 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% @copyright 2019-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. +%% and/or its subsidiaries. All rights reserved. %% %% @doc %% This module provides extra functions unused by the feature flags diff --git a/deps/rabbit/src/rabbit_ff_registry.erl b/deps/rabbit/src/rabbit_ff_registry.erl index 864ff564dc64..eca99ebd9ec0 100644 --- a/deps/rabbit/src/rabbit_ff_registry.erl +++ b/deps/rabbit/src/rabbit_ff_registry.erl @@ -2,11 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% Copyright (c) 2019-2024 Broadcom. All Rights Reserved. The term “Broadcom” +%% refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @author The RabbitMQ team -%% @copyright 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% @copyright 2019-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. +%% and/or its subsidiaries. All rights reserved. %% %% @doc %% This module exposes the API of the {@link rabbit_feature_flags} diff --git a/deps/rabbit/src/rabbit_ff_registry_factory.erl b/deps/rabbit/src/rabbit_ff_registry_factory.erl index 0d91a7b64955..68d81be6cf46 100644 --- a/deps/rabbit/src/rabbit_ff_registry_factory.erl +++ b/deps/rabbit/src/rabbit_ff_registry_factory.erl @@ -2,7 +2,8 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% Copyright (c) 2019-2024 Broadcom. All Rights Reserved. The term “Broadcom” +%% refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_ff_registry_factory). diff --git a/deps/rabbit/src/rabbit_ff_registry_wrapper.erl b/deps/rabbit/src/rabbit_ff_registry_wrapper.erl index beef32f657cf..a5f63eb64de4 100644 --- a/deps/rabbit/src/rabbit_ff_registry_wrapper.erl +++ b/deps/rabbit/src/rabbit_ff_registry_wrapper.erl @@ -2,11 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% Copyright (c) 2019-2024 Broadcom. All Rights Reserved. The term “Broadcom” +%% refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @author The RabbitMQ team -%% @copyright 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% @copyright 2019-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. +%% and/or its subsidiaries. All rights reserved. %% %% @doc %% This module sits in front of {@link rabbit_ff_registry}. diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index bc1a85af08d8..1960eaf03a65 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -14,7 +14,28 @@ -dialyzer(no_improper_lists). -include("rabbit_fifo.hrl"). --include_lib("rabbit_common/include/rabbit.hrl"). + +-define(STATE, ?MODULE). + +-define(CONSUMER_PID(Pid), #consumer{cfg = #consumer_cfg{pid = Pid}}). +-define(CONSUMER_PRIORITY(P), #consumer{cfg = #consumer_cfg{priority = P}}). +-define(CONSUMER_TAG_PID(Tag, Pid), + #consumer{cfg = #consumer_cfg{tag = Tag, + pid = Pid}}). + +-ifdef(TEST). +-define(SIZE(Msg), + case mc:is(Msg) of + true -> + mc:size(Msg); + false when is_binary(Msg) -> + {0, byte_size(Msg)}; + false -> + {0, erts_debug:size(Msg)} + end). +-else. +-define(SIZE(Msg), mc:size(Msg)). +-endif. -export([ %% ra_machine callbacks @@ -30,7 +51,7 @@ which_module/1, %% aux init_aux/1, - handle_aux/6, + handle_aux/5, % queries query_messages_ready/1, query_messages_checked_out/1, @@ -47,12 +68,12 @@ query_peek/2, query_notify_decorators_info/1, usage/1, + is_v4/0, %% misc - dehydrate_state/1, - normalize/1, get_msg_header/1, get_header/2, + annotate_msg/2, get_msg/1, %% protocol helpers @@ -61,8 +82,10 @@ make_checkout/3, make_settle/2, make_return/2, + is_return/1, make_discard/2, make_credit/4, + make_modify/5, make_purge/0, make_purge_nodes/1, make_update_config/1, @@ -71,16 +94,23 @@ -ifdef(TEST). -export([update_header/4, - chunk_disk_msgs/3]). + chunk_disk_msgs/3, + smallest_raft_index/1, + make_requeue/4]). -endif. -import(serial_number, [add/2, diff/2]). +-define(ENQ_V2, e). %% command records representing all the protocol actions that are supported -record(enqueue, {pid :: option(pid()), seq :: option(msg_seqno()), msg :: raw_msg()}). --record(requeue, {consumer_id :: consumer_id(), +-record(?ENQ_V2, {seq :: option(msg_seqno()), + msg :: raw_msg(), + size :: {MetadataSize :: non_neg_integer(), + PayloadSize :: non_neg_integer()}}). +-record(requeue, {consumer_key :: consumer_key(), msg_id :: msg_id(), index :: ra:index(), header :: msg_header(), @@ -89,23 +119,30 @@ -record(checkout, {consumer_id :: consumer_id(), spec :: checkout_spec(), meta :: consumer_meta()}). --record(settle, {consumer_id :: consumer_id(), +-record(settle, {consumer_key :: consumer_key(), msg_ids :: [msg_id()]}). --record(return, {consumer_id :: consumer_id(), +-record(return, {consumer_key :: consumer_key(), msg_ids :: [msg_id()]}). --record(discard, {consumer_id :: consumer_id(), +-record(discard, {consumer_key :: consumer_key(), msg_ids :: [msg_id()]}). --record(credit, {consumer_id :: consumer_id(), +-record(credit, {consumer_key :: consumer_key(), credit :: non_neg_integer(), delivery_count :: rabbit_queue_type:delivery_count(), drain :: boolean()}). +-record(modify, {consumer_key :: consumer_key(), + msg_ids :: [msg_id()], + delivery_failed :: boolean(), + undeliverable_here :: boolean(), + annotations :: mc:annotations()}). -record(purge, {}). -record(purge_nodes, {nodes :: [node()]}). -record(update_config, {config :: config()}). -record(garbage_collection, {}). +% -record(eval_consumer_timeouts, {consumer_keys :: [consumer_key()]}). -opaque protocol() :: #enqueue{} | + #?ENQ_V2{} | #requeue{} | #register_enqueuer{} | #checkout{} | @@ -113,6 +150,7 @@ #return{} | #discard{} | #credit{} | + #modify{} | #purge{} | #purge_nodes{} | #update_config{} | @@ -126,7 +164,7 @@ -type client_msg() :: delivery(). %% the messages `rabbit_fifo' can send to consumers. --opaque state() :: #?MODULE{}. +-opaque state() :: #?STATE{}. -export_type([protocol/0, delivery/0, @@ -134,6 +172,7 @@ credit_mode/0, consumer_meta/0, consumer_id/0, + consumer_key/0, client_msg/0, msg/0, msg_id/0, @@ -147,17 +186,23 @@ -spec init(config()) -> state(). init(#{name := Name, queue_resource := Resource} = Conf) -> - update_config(Conf, #?MODULE{cfg = #cfg{name = Name, - resource = Resource}}). + update_config(Conf, #?STATE{cfg = #cfg{name = Name, + resource = Resource}}). update_config(Conf, State) -> DLH = maps:get(dead_letter_handler, Conf, undefined), BLH = maps:get(become_leader_handler, Conf, undefined), - RCI = maps:get(release_cursor_interval, Conf, ?RELEASE_CURSOR_EVERY), Overflow = maps:get(overflow_strategy, Conf, drop_head), MaxLength = maps:get(max_length, Conf, undefined), MaxBytes = maps:get(max_bytes, Conf, undefined), - DeliveryLimit = maps:get(delivery_limit, Conf, undefined), + DeliveryLimit = case maps:get(delivery_limit, Conf, undefined) of + DL when is_number(DL) andalso + DL < 0 -> + undefined; + DL -> + DL + end, + Expires = maps:get(expires, Conf, undefined), MsgTTL = maps:get(msg_ttl, Conf, undefined), ConsumerStrategy = case maps:get(single_active_consumer_on, Conf, false) of @@ -166,21 +211,19 @@ update_config(Conf, State) -> false -> competing end, - Cfg = State#?MODULE.cfg, - RCISpec = {RCI, RCI}, + Cfg = State#?STATE.cfg, LastActive = maps:get(created, Conf, undefined), - State#?MODULE{cfg = Cfg#cfg{release_cursor_interval = RCISpec, - dead_letter_handler = DLH, - become_leader_handler = BLH, - overflow_strategy = Overflow, - max_length = MaxLength, - max_bytes = MaxBytes, - consumer_strategy = ConsumerStrategy, - delivery_limit = DeliveryLimit, - expires = Expires, - msg_ttl = MsgTTL}, - last_active = LastActive}. + State#?STATE{cfg = Cfg#cfg{dead_letter_handler = DLH, + become_leader_handler = BLH, + overflow_strategy = Overflow, + max_length = MaxLength, + max_bytes = MaxBytes, + consumer_strategy = ConsumerStrategy, + delivery_limit = DeliveryLimit, + expires = Expires, + msg_ttl = MsgTTL}, + last_active = LastActive}. % msg_ids are scoped per consumer % ra_indexes holds all raft indexes for enqueues currently on queue @@ -189,16 +232,19 @@ update_config(Conf, State) -> {state(), ra_machine:reply()}. apply(Meta, #enqueue{pid = From, seq = Seq, msg = RawMsg}, State00) -> - apply_enqueue(Meta, From, Seq, RawMsg, State00); + apply_enqueue(Meta, From, Seq, RawMsg, message_size(RawMsg), State00); +apply(#{reply_mode := {notify, _Corr, EnqPid}} = Meta, + #?ENQ_V2{seq = Seq, msg = RawMsg, size = Size}, State00) -> + apply_enqueue(Meta, EnqPid, Seq, RawMsg, Size, State00); apply(_Meta, #register_enqueuer{pid = Pid}, - #?MODULE{enqueuers = Enqueuers0, - cfg = #cfg{overflow_strategy = Overflow}} = State0) -> + #?STATE{enqueuers = Enqueuers0, + cfg = #cfg{overflow_strategy = Overflow}} = State0) -> State = case maps:is_key(Pid, Enqueuers0) of true -> %% if the enqueuer exits just echo the overflow state State0; false -> - State0#?MODULE{enqueuers = Enqueuers0#{Pid => #enqueuer{}}} + State0#?STATE{enqueuers = Enqueuers0#{Pid => #enqueuer{}}} end, Res = case is_over_limit(State) of true when Overflow == reject_publish -> @@ -207,234 +253,198 @@ apply(_Meta, #register_enqueuer{pid = Pid}, ok end, {State, Res, [{monitor, process, Pid}]}; -apply(Meta, - #settle{msg_ids = MsgIds, consumer_id = ConsumerId}, - #?MODULE{consumers = Cons0} = State) -> - case Cons0 of - #{ConsumerId := Con0} -> - complete_and_checkout(Meta, MsgIds, ConsumerId, +apply(Meta, #settle{msg_ids = MsgIds, + consumer_key = Key}, + #?STATE{consumers = Consumers} = State) -> + case find_consumer(Key, Consumers) of + {ConsumerKey, Con0} -> + %% find_consumer/2 returns the actual consumer key even if + %% if id was passed instead for example + complete_and_checkout(Meta, MsgIds, ConsumerKey, Con0, [], State); _ -> {State, ok} end; -apply(Meta, #discard{msg_ids = MsgIds, consumer_id = ConsumerId}, - #?MODULE{consumers = Cons, - dlx = DlxState0, - cfg = #cfg{dead_letter_handler = DLH}} = State0) -> - case Cons of - #{ConsumerId := #consumer{checked_out = Checked} = Con} -> - % Publishing to dead-letter exchange must maintain same order as messages got rejected. - DiscardMsgs = lists:filtermap(fun(Id) -> - case maps:get(Id, Checked, undefined) of - undefined -> - false; - Msg -> - {true, Msg} - end - end, MsgIds), - {DlxState, Effects} = rabbit_fifo_dlx:discard(DiscardMsgs, rejected, DLH, DlxState0), - State = State0#?MODULE{dlx = DlxState}, - complete_and_checkout(Meta, MsgIds, ConsumerId, Con, Effects, State); +apply(Meta, #discard{consumer_key = ConsumerKey, + msg_ids = MsgIds}, + #?STATE{consumers = Consumers } = State0) -> + case find_consumer(ConsumerKey, Consumers) of + {ConsumerKey, #consumer{} = Con} -> + discard(Meta, MsgIds, ConsumerKey, Con, true, #{}, State0); _ -> {State0, ok} end; -apply(Meta, #return{msg_ids = MsgIds, consumer_id = ConsumerId}, - #?MODULE{consumers = Cons0} = State) -> - case Cons0 of - #{ConsumerId := #consumer{checked_out = Checked0}} -> - Returned = maps:with(MsgIds, Checked0), - return(Meta, ConsumerId, Returned, [], State); +apply(Meta, #return{consumer_key = ConsumerKey, + msg_ids = MsgIds}, + #?STATE{consumers = Cons} = State) -> + case find_consumer(ConsumerKey, Cons) of + {ActualConsumerKey, #consumer{checked_out = Checked}} -> + return(Meta, ActualConsumerKey, MsgIds, false, + #{}, Checked, [], State); + _ -> + {State, ok} + end; +apply(Meta, #modify{consumer_key = ConsumerKey, + delivery_failed = DelFailed, + undeliverable_here = Undel, + annotations = Anns, + msg_ids = MsgIds}, + #?STATE{consumers = Cons} = State) -> + case find_consumer(ConsumerKey, Cons) of + {ConsumerKey, #consumer{checked_out = Checked}} + when Undel == false -> + return(Meta, ConsumerKey, MsgIds, DelFailed, + Anns, Checked, [], State); + {ConsumerKey, #consumer{} = Con} + when Undel == true -> + discard(Meta, MsgIds, ConsumerKey, Con, DelFailed, Anns, State); _ -> {State, ok} end; apply(#{index := Idx} = Meta, - #requeue{consumer_id = ConsumerId, + #requeue{consumer_key = ConsumerKey, msg_id = MsgId, index = OldIdx, - header = Header0, - msg = _Msg}, - #?MODULE{consumers = Cons0, - messages = Messages, - ra_indexes = Indexes0, - enqueue_count = EnqCount} = State00) -> - case Cons0 of - #{ConsumerId := #consumer{checked_out = Checked0} = Con0} + header = Header0}, + #?STATE{consumers = Cons, + messages = Messages, + ra_indexes = Indexes0, + enqueue_count = EnqCount} = State00) -> + %% the actual consumer key was looked up in the aux handler so we + %% dont need to use find_consumer/2 here + case Cons of + #{ConsumerKey := #consumer{checked_out = Checked0} = Con0} when is_map_key(MsgId, Checked0) -> %% construct a message with the current raft index - %% and update delivery count before adding it to the message queue - Header = update_header(delivery_count, fun incr/1, 1, Header0), + %% and update acquired count before adding it to the message queue + Header = update_header(acquired_count, fun incr/1, 1, Header0), State0 = add_bytes_return(Header, State00), Con = Con0#consumer{checked_out = maps:remove(MsgId, Checked0), - credit = increase_credit(Meta, Con0, 1)}, - State1 = State0#?MODULE{ra_indexes = rabbit_fifo_index:delete(OldIdx, Indexes0), - messages = lqueue:in(?MSG(Idx, Header), Messages), - enqueue_count = EnqCount + 1}, - State2 = update_or_remove_sub(Meta, ConsumerId, Con, State1), - {State, Ret, Effs} = checkout(Meta, State0, State2, []), - update_smallest_raft_index(Idx, Ret, - maybe_store_release_cursor(Idx, State), - Effs); + credit = increase_credit(Con0, 1)}, + State1 = State0#?STATE{ra_indexes = rabbit_fifo_index:delete(OldIdx, + Indexes0), + messages = rabbit_fifo_q:in(no, + ?MSG(Idx, Header), + Messages), + enqueue_count = EnqCount + 1}, + State2 = update_or_remove_con(Meta, ConsumerKey, Con, State1), + checkout(Meta, State0, State2, []); _ -> {State00, ok, []} end; -apply(Meta, #credit{credit = LinkCreditRcv, delivery_count = DeliveryCountRcv, - drain = Drain, consumer_id = ConsumerId = {CTag, CPid}}, - #?MODULE{consumers = Cons0, - service_queue = ServiceQueue0, - waiting_consumers = Waiting0} = State0) -> - case Cons0 of - #{ConsumerId := #consumer{delivery_count = DeliveryCountSnd, - cfg = Cfg} = Con0} -> - LinkCreditSnd = link_credit_snd(DeliveryCountRcv, LinkCreditRcv, DeliveryCountSnd, Cfg), - %% grant the credit - Con1 = Con0#consumer{credit = LinkCreditSnd}, - ServiceQueue = maybe_queue_consumer(ConsumerId, Con1, ServiceQueue0), - State1 = State0#?MODULE{service_queue = ServiceQueue, - consumers = maps:update(ConsumerId, Con1, Cons0)}, - {State2, ok, Effects} = checkout(Meta, State0, State1, []), - - #?MODULE{consumers = Cons1 = #{ConsumerId := Con2}} = State2, - #consumer{credit = PostCred, - delivery_count = PostDeliveryCount} = Con2, - Available = messages_ready(State2), - case credit_api_v2(Cfg) of - true -> - {Credit, DeliveryCount, State} = - case Drain andalso PostCred > 0 of - true -> - AdvancedDeliveryCount = add(PostDeliveryCount, PostCred), - ZeroCredit = 0, - Con = Con2#consumer{delivery_count = AdvancedDeliveryCount, - credit = ZeroCredit}, - Cons = maps:update(ConsumerId, Con, Cons1), - State3 = State2#?MODULE{consumers = Cons}, - {ZeroCredit, AdvancedDeliveryCount, State3}; - false -> - {PostCred, PostDeliveryCount, State2} - end, - %% We must send to queue client delivery effects before credit_reply such - %% that session process can send to AMQP 1.0 client TRANSFERs before FLOW. - {State, ok, Effects ++ [{send_msg, CPid, - {credit_reply, CTag, DeliveryCount, Credit, Available, Drain}, - ?DELIVERY_SEND_MSG_OPTS}]}; - false -> - %% We must always send a send_credit_reply because basic.credit is synchronous. - %% Additionally, we keep the bug of credit API v1 that we send to queue client the - %% send_drained reply before the delivery effects (resulting in the wrong behaviour - %% that the session process sends to AMQP 1.0 client the FLOW before the TRANSFERs). - %% We have to keep this bug because old rabbit_fifo_client implementations expect - %% a send_drained Ra reply (they can't handle such a Ra effect). - CreditReply = {send_credit_reply, Available}, - case Drain of - true -> - AdvancedDeliveryCount = PostDeliveryCount + PostCred, - Con = Con2#consumer{delivery_count = AdvancedDeliveryCount, - credit = 0}, - Cons = maps:update(ConsumerId, Con, Cons1), - State = State2#?MODULE{consumers = Cons}, - Reply = {multi, [CreditReply, {send_drained, {CTag, PostCred}}]}, - {State, Reply, Effects}; - false -> - {State2, CreditReply, Effects} - end - end; - _ when Waiting0 /= [] -> - %%TODO next time when we bump the machine version: - %% 1. Do not put consumer at head of waiting_consumers if NewCredit == 0 - %% to reduce likelihood of activating a 0 credit consumer. - %% 2. Support Drain == true, i.e. advance delivery-count, consuming all link-credit since there - %% are no messages available for an inactive consumer and send credit_reply with Drain=true. - case lists:keytake(ConsumerId, 1, Waiting0) of - {value, {_, Con0 = #consumer{delivery_count = DeliveryCountSnd, - cfg = Cfg}}, Waiting} -> - LinkCreditSnd = link_credit_snd(DeliveryCountRcv, LinkCreditRcv, DeliveryCountSnd, Cfg), - %% grant the credit - Con = Con0#consumer{credit = LinkCreditSnd}, - State = State0#?MODULE{waiting_consumers = - [{ConsumerId, Con} | Waiting]}, - %% No messages are available for inactive consumers. - Available = 0, - case credit_api_v2(Cfg) of - true -> - {State, ok, - {send_msg, CPid, - {credit_reply, CTag, DeliveryCountSnd, LinkCreditSnd, Available, false}, - ?DELIVERY_SEND_MSG_OPTS}}; - false -> - {State, {send_credit_reply, Available}} - end; - false -> - {State0, ok} - end; +apply(Meta, #credit{consumer_key = ConsumerKey} = Credit, + #?STATE{consumers = Cons} = State) -> + case Cons of + #{ConsumerKey := Con} -> + credit_active_consumer(Credit, Con, Meta, State); _ -> - %% credit for unknown consumer - just ignore - {State0, ok} + case lists:keytake(ConsumerKey, 1, State#?STATE.waiting_consumers) of + {value, {_, Con}, Waiting} -> + credit_inactive_consumer(Credit, Con, Waiting, State); + false -> + %% credit for unknown consumer - just ignore + {State, ok} + end end; apply(_, #checkout{spec = {dequeue, _}}, - #?MODULE{cfg = #cfg{consumer_strategy = single_active}} = State0) -> + #?STATE{cfg = #cfg{consumer_strategy = single_active}} = State0) -> {State0, {error, {unsupported, single_active_consumer}}}; apply(#{index := Index, system_time := Ts, from := From} = Meta, #checkout{spec = {dequeue, Settlement}, meta = ConsumerMeta, consumer_id = ConsumerId}, - #?MODULE{consumers = Consumers} = State00) -> + #?STATE{consumers = Consumers} = State00) -> %% dequeue always updates last_active - State0 = State00#?MODULE{last_active = Ts}, + State0 = State00#?STATE{last_active = Ts}, %% all dequeue operations result in keeping the queue from expiring - Exists = maps:is_key(ConsumerId, Consumers), + Exists = find_consumer(ConsumerId, Consumers) /= undefined, case messages_ready(State0) of 0 -> - update_smallest_raft_index(Index, {dequeue, empty}, State0, []); + {State0, {dequeue, empty}, []}; _ when Exists -> %% a dequeue using the same consumer_id isn't possible at this point {State0, {dequeue, empty}}; _ -> - {_, State1} = update_consumer(Meta, ConsumerId, ConsumerMeta, - {once, 1, simple_prefetch}, 0, + {_, State1} = update_consumer(Meta, ConsumerId, ConsumerId, ConsumerMeta, + {once, {simple_prefetch, 1}}, 0, State0), case checkout_one(Meta, false, State1, []) of - {success, _, MsgId, ?MSG(RaftIdx, Header), ExpiredMsg, State2, Effects0} -> - {State4, Effects1} = case Settlement of - unsettled -> - {_, Pid} = ConsumerId, - {State2, [{monitor, process, Pid} | Effects0]}; - settled -> - %% immediately settle the checkout - {State3, _, SettleEffects} = - apply(Meta, make_settle(ConsumerId, [MsgId]), - State2), - {State3, SettleEffects ++ Effects0} - end, - Effects2 = [reply_log_effect(RaftIdx, MsgId, Header, messages_ready(State4), From) | Effects1], - {State, DroppedMsg, Effects} = evaluate_limit(Index, false, State0, State4, - Effects2), - Reply = '$ra_no_reply', - case {DroppedMsg, ExpiredMsg} of - {false, false} -> - {State, Reply, Effects}; - _ -> - update_smallest_raft_index(Index, Reply, State, Effects) - end; + {success, _, MsgId, + ?MSG(RaftIdx, Header), _ExpiredMsg, State2, Effects0} -> + {State4, Effects1} = + case Settlement of + unsettled -> + {_, Pid} = ConsumerId, + {State2, [{monitor, process, Pid} | Effects0]}; + settled -> + %% immediately settle the checkout + {State3, _, SettleEffects} = + apply(Meta, make_settle(ConsumerId, [MsgId]), + State2), + {State3, SettleEffects ++ Effects0} + end, + Effects2 = [reply_log_effect(RaftIdx, MsgId, Header, + messages_ready(State4), From) + | Effects1], + {State, _DroppedMsg, Effects} = + evaluate_limit(Index, false, State0, State4, Effects2), + {State, '$ra_no_reply', Effects}; {nochange, _ExpiredMsg = true, State2, Effects0} -> %% All ready messages expired. - State3 = State2#?MODULE{consumers = maps:remove(ConsumerId, State2#?MODULE.consumers)}, - {State, _, Effects} = evaluate_limit(Index, false, State0, State3, Effects0), - update_smallest_raft_index(Index, {dequeue, empty}, State, Effects) + State3 = State2#?STATE{consumers = + maps:remove(ConsumerId, + State2#?STATE.consumers)}, + {State, _, Effects} = evaluate_limit(Index, false, State0, + State3, Effects0), + {State, {dequeue, empty}, Effects} end end; +apply(#{index := _Idx} = Meta, + #checkout{spec = Spec, + consumer_id = ConsumerId}, State0) + when Spec == cancel orelse + Spec == remove -> + case consumer_key_from_id(ConsumerId, State0) of + {ok, ConsumerKey} -> + {State1, Effects1} = activate_next_consumer( + cancel_consumer(Meta, ConsumerKey, State0, [], + Spec)), + Reply = {ok, consumer_cancel_info(ConsumerKey, State1)}, + {State, _, Effects} = checkout(Meta, State0, State1, Effects1), + {State, Reply, Effects}; + error -> + {State0, {error, consumer_not_found}, []} + end; apply(#{index := Idx} = Meta, - #checkout{spec = cancel, - consumer_id = ConsumerId}, State0) -> - {State1, Effects1} = cancel_consumer(Meta, ConsumerId, State0, [], - consumer_cancel), - {State, Reply, Effects} = checkout(Meta, State0, State1, Effects1), - update_smallest_raft_index(Idx, Reply, State, Effects); -apply(Meta, #checkout{spec = Spec, meta = ConsumerMeta, - consumer_id = {_, Pid} = ConsumerId}, State0) -> - Priority = get_priority_from_args(ConsumerMeta), - {Consumer, State1} = update_consumer(Meta, ConsumerId, ConsumerMeta, - Spec, Priority, State0), + #checkout{spec = Spec0, + meta = ConsumerMeta, + consumer_id = {_, Pid} = ConsumerId}, State0) -> + %% might be better to check machine_version + IsV4 = tuple_size(Spec0) == 2, + %% normalise spec format + Spec = case Spec0 of + {_, _} -> + Spec0; + {Life, Prefetch, simple_prefetch} -> + {Life, {simple_prefetch, Prefetch}}; + {Life, _Credit, credited} -> + {Life, credited} + end, + Priority = get_priority(ConsumerMeta), + ConsumerKey = case consumer_key_from_id(ConsumerId, State0) of + {ok, K} -> + K; + error when IsV4 -> + %% if the consumer does not already exist use the + %% raft index as it's unique identifier in future + %% settle, credit, return and discard operations + Idx; + error -> + ConsumerId + end, + {Consumer, State1} = update_consumer(Meta, ConsumerKey, ConsumerId, + ConsumerMeta, Spec, Priority, State0), {State2, Effs} = activate_next_consumer(State1, []), #consumer{checked_out = Checked, credit = Credit, @@ -444,90 +454,88 @@ apply(Meta, #checkout{spec = Spec, meta = ConsumerMeta, %% reply with a consumer summary Reply = {ok, #{next_msg_id => NextMsgId, credit => Credit, + key => ConsumerKey, delivery_count => DeliveryCount, + is_active => is_active(ConsumerKey, State2), num_checked_out => map_size(Checked)}}, checkout(Meta, State0, State2, [{monitor, process, Pid} | Effs], Reply); apply(#{index := Index}, #purge{}, - #?MODULE{messages_total = Total, - returns = Returns, - ra_indexes = Indexes0 - } = State0) -> + #?STATE{messages_total = Total, + returns = Returns, + ra_indexes = Indexes0 + } = State0) -> NumReady = messages_ready(State0), Indexes = case Total of NumReady -> - %% All messages are either in 'messages' queue or 'returns' queue. + %% All messages are either in 'messages' queue or + %% 'returns' queue. %% No message is awaiting acknowledgement. %% Optimization: empty all 'ra_indexes'. rabbit_fifo_index:empty(); _ -> - %% Some messages are checked out to consumers awaiting acknowledgement. + %% Some messages are checked out to consumers + %% awaiting acknowledgement. %% Therefore we cannot empty all 'ra_indexes'. - %% We only need to delete the indexes from the 'returns' queue because - %% messages of the 'messages' queue are not part of the 'ra_indexes'. + %% We only need to delete the indexes from the 'returns' + %% queue because messages of the 'messages' queue are + %% not part of the 'ra_indexes'. lqueue:fold(fun(?MSG(I, _), Acc) -> rabbit_fifo_index:delete(I, Acc) end, Indexes0, Returns) end, - State1 = State0#?MODULE{ra_indexes = Indexes, - messages = lqueue:new(), - messages_total = Total - NumReady, - returns = lqueue:new(), - msg_bytes_enqueue = 0 - }, - Effects0 = [garbage_collection], + State1 = State0#?STATE{ra_indexes = Indexes, + messages = rabbit_fifo_q:new(), + messages_total = Total - NumReady, + returns = lqueue:new(), + msg_bytes_enqueue = 0 + }, + Effects0 = [{aux, force_checkpoint}, garbage_collection], Reply = {purge, NumReady}, {State, _, Effects} = evaluate_limit(Index, false, State0, State1, Effects0), - update_smallest_raft_index(Index, Reply, State, Effects); -apply(#{index := Idx}, #garbage_collection{}, State) -> - update_smallest_raft_index(Idx, ok, State, [{aux, garbage_collection}]); + {State, Reply, Effects}; +apply(#{index := _Idx}, #garbage_collection{}, State) -> + {State, ok, [{aux, garbage_collection}]}; apply(Meta, {timeout, expire_msgs}, State) -> checkout(Meta, State, State, []); -apply(#{system_time := Ts, machine_version := MachineVersion} = Meta, +apply(#{system_time := Ts} = Meta, {down, Pid, noconnection}, - #?MODULE{consumers = Cons0, - cfg = #cfg{consumer_strategy = single_active}, - waiting_consumers = Waiting0, - enqueuers = Enqs0} = State0) -> + #?STATE{consumers = Cons0, + cfg = #cfg{consumer_strategy = single_active}, + waiting_consumers = Waiting0, + enqueuers = Enqs0} = State0) -> Node = node(Pid), %% if the pid refers to an active or cancelled consumer, %% mark it as suspected and return it to the waiting queue {State1, Effects0} = - maps:fold(fun({_, P} = Cid, C0, {S0, E0}) - when node(P) =:= Node -> - %% the consumer should be returned to waiting - %% and checked out messages should be returned - Effs = consumer_update_active_effects( - S0, Cid, C0, false, suspected_down, E0), - C1 = case MachineVersion of - V when V >= 3 -> - C0; - 2 -> - Checked = C0#consumer.checked_out, - Credit = increase_credit(Meta, C0, maps:size(Checked)), - C0#consumer{credit = Credit} - end, - {St, Effs1} = return_all(Meta, S0, Effs, Cid, C1), - %% if the consumer was cancelled there is a chance it got - %% removed when returning hence we need to be defensive here - Waiting = case St#?MODULE.consumers of - #{Cid := C} -> - Waiting0 ++ [{Cid, C}]; - _ -> - Waiting0 - end, - {St#?MODULE{consumers = maps:remove(Cid, St#?MODULE.consumers), - waiting_consumers = Waiting, - last_active = Ts}, - Effs1}; - (_, _, S) -> - S - end, {State0, []}, Cons0), + maps:fold( + fun(CKey, ?CONSUMER_PID(P) = C0, {S0, E0}) + when node(P) =:= Node -> + %% the consumer should be returned to waiting + %% and checked out messages should be returned + Effs = consumer_update_active_effects( + S0, C0, false, suspected_down, E0), + {St, Effs1} = return_all(Meta, S0, Effs, CKey, C0, true), + %% if the consumer was cancelled there is a chance it got + %% removed when returning hence we need to be defensive here + Waiting = case St#?STATE.consumers of + #{CKey := C} -> + Waiting0 ++ [{CKey, C}]; + _ -> + Waiting0 + end, + {St#?STATE{consumers = maps:remove(CKey, St#?STATE.consumers), + waiting_consumers = Waiting, + last_active = Ts}, + Effs1}; + (_, _, S) -> + S + end, {State0, []}, Cons0), WaitingConsumers = update_waiting_consumer_status(Node, State1, suspected_down), %% select a new consumer from the waiting queue and run a checkout - State2 = State1#?MODULE{waiting_consumers = WaitingConsumers}, + State2 = State1#?STATE{waiting_consumers = WaitingConsumers}, {State, Effects1} = activate_next_consumer(State2, Effects0), %% mark any enquers as suspected @@ -536,10 +544,10 @@ apply(#{system_time := Ts, machine_version := MachineVersion} = Meta, (_, E) -> E end, Enqs0), Effects = [{monitor, node, Node} | Effects1], - checkout(Meta, State0, State#?MODULE{enqueuers = Enqs}, Effects); -apply(#{system_time := Ts, machine_version := MachineVersion} = Meta, + checkout(Meta, State0, State#?STATE{enqueuers = Enqs}, Effects); +apply(#{system_time := Ts} = Meta, {down, Pid, noconnection}, - #?MODULE{consumers = Cons0, + #?STATE{consumers = Cons0, enqueuers = Enqs0} = State0) -> %% A node has been disconnected. This doesn't necessarily mean that %% any processes on this node are down, they _may_ come back so here @@ -553,19 +561,12 @@ apply(#{system_time := Ts, machine_version := MachineVersion} = Meta, {State, Effects1} = maps:fold( - fun({_, P} = Cid, #consumer{checked_out = Checked0, - status = up} = C0, + fun(CKey, #consumer{cfg = #consumer_cfg{pid = P}, + status = up} = C0, {St0, Eff}) when node(P) =:= Node -> - C = case MachineVersion of - V when V >= 3 -> - C0#consumer{status = suspected_down}; - 2 -> - Credit = increase_credit(Meta, C0, map_size(Checked0)), - C0#consumer{status = suspected_down, - credit = Credit} - end, - {St, Eff0} = return_all(Meta, St0, Eff, Cid, C), - Eff1 = consumer_update_active_effects(St, Cid, C, false, + C = C0#consumer{status = suspected_down}, + {St, Eff0} = return_all(Meta, St0, Eff, CKey, C, true), + Eff1 = consumer_update_active_effects(St, C, false, suspected_down, Eff0), {St, Eff1}; (_, _, {St, Eff}) -> @@ -581,15 +582,14 @@ apply(#{system_time := Ts, machine_version := MachineVersion} = Meta, % these processes Effects = [{monitor, node, Node} | Effects1], - checkout(Meta, State0, State#?MODULE{enqueuers = Enqs, - last_active = Ts}, Effects); -apply(#{index := Idx} = Meta, {down, Pid, _Info}, State0) -> - {State1, Effects1} = handle_down(Meta, Pid, State0), - {State, Reply, Effects} = checkout(Meta, State0, State1, Effects1), - update_smallest_raft_index(Idx, Reply, State, Effects); -apply(Meta, {nodeup, Node}, #?MODULE{consumers = Cons0, - enqueuers = Enqs0, - service_queue = _SQ0} = State0) -> + checkout(Meta, State0, State#?STATE{enqueuers = Enqs, + last_active = Ts}, Effects); +apply(Meta, {down, Pid, _Info}, State0) -> + {State1, Effects1} = activate_next_consumer(handle_down(Meta, Pid, State0)), + checkout(Meta, State0, State1, Effects1); +apply(Meta, {nodeup, Node}, #?STATE{consumers = Cons0, + enqueuers = Enqs0, + service_queue = _SQ0} = State0) -> %% A node we are monitoring has come back. %% If we have suspected any processes of being %% down we should now re-issue the monitors for them to detect if they're @@ -604,293 +604,189 @@ apply(Meta, {nodeup, Node}, #?MODULE{consumers = Cons0, ConsumerUpdateActiveFun = consumer_active_flag_update_function(State0), %% mark all consumers as up {State1, Effects1} = - maps:fold(fun({_, P} = ConsumerId, C, {SAcc, EAcc}) + maps:fold(fun(ConsumerKey, ?CONSUMER_PID(P) = C, {SAcc, EAcc}) when (node(P) =:= Node) and (C#consumer.status =/= cancelled) -> - EAcc1 = ConsumerUpdateActiveFun(SAcc, ConsumerId, + EAcc1 = ConsumerUpdateActiveFun(SAcc, ConsumerKey, C, true, up, EAcc), - {update_or_remove_sub(Meta, ConsumerId, + {update_or_remove_con(Meta, ConsumerKey, C#consumer{status = up}, SAcc), EAcc1}; (_, _, Acc) -> Acc end, {State0, Monitors}, Cons0), Waiting = update_waiting_consumer_status(Node, State1, up), - State2 = State1#?MODULE{enqueuers = Enqs1, - waiting_consumers = Waiting}, + State2 = State1#?STATE{enqueuers = Enqs1, + waiting_consumers = Waiting}, {State, Effects} = activate_next_consumer(State2, Effects1), checkout(Meta, State0, State, Effects); apply(_, {nodedown, _Node}, State) -> {State, ok}; -apply(#{index := Idx} = Meta, #purge_nodes{nodes = Nodes}, State0) -> +apply(Meta, #purge_nodes{nodes = Nodes}, State0) -> {State, Effects} = lists:foldl(fun(Node, {S, E}) -> purge_node(Meta, Node, S, E) end, {State0, []}, Nodes), - update_smallest_raft_index(Idx, ok, State, Effects); -apply(#{index := Idx} = Meta, - #update_config{config = #{dead_letter_handler := NewDLH} = Conf}, - #?MODULE{cfg = #cfg{dead_letter_handler = OldDLH, - resource = QRes}, - dlx = DlxState0} = State0) -> - {DlxState, Effects0} = rabbit_fifo_dlx:update_config(OldDLH, NewDLH, QRes, DlxState0), - State1 = update_config(Conf, State0#?MODULE{dlx = DlxState}), - {State, Reply, Effects} = checkout(Meta, State0, State1, Effects0), - update_smallest_raft_index(Idx, Reply, State, Effects); -apply(_Meta, {machine_version, FromVersion, ToVersion}, V0State) -> - State = convert(FromVersion, ToVersion, V0State), + {State, ok, Effects}; +apply(Meta, + #update_config{config = #{} = Conf}, + #?STATE{cfg = #cfg{dead_letter_handler = OldDLH, + resource = QRes}, + dlx = DlxState0} = State0) -> + NewDLH = maps:get(dead_letter_handler, Conf, OldDLH), + {DlxState, Effects0} = rabbit_fifo_dlx:update_config(OldDLH, NewDLH, QRes, + DlxState0), + State1 = update_config(Conf, State0#?STATE{dlx = DlxState}), + checkout(Meta, State0, State1, Effects0); +apply(Meta, {machine_version, FromVersion, ToVersion}, V0State) -> + State = convert(Meta, FromVersion, ToVersion, V0State), {State, ok, [{aux, {dlx, setup}}]}; -apply(#{index := IncomingRaftIdx} = Meta, {dlx, _} = Cmd, - #?MODULE{cfg = #cfg{dead_letter_handler = DLH}, +apply(Meta, {dlx, _} = Cmd, + #?STATE{cfg = #cfg{dead_letter_handler = DLH}, dlx = DlxState0} = State0) -> {DlxState, Effects0} = rabbit_fifo_dlx:apply(Meta, Cmd, DLH, DlxState0), - State1 = State0#?MODULE{dlx = DlxState}, - {State, ok, Effects} = checkout(Meta, State0, State1, Effects0), - update_smallest_raft_index(IncomingRaftIdx, State, Effects); + State1 = State0#?STATE{dlx = DlxState}, + checkout(Meta, State0, State1, Effects0); apply(_Meta, Cmd, State) -> %% handle unhandled commands gracefully rabbit_log:debug("rabbit_fifo: unhandled command ~W", [Cmd, 10]), {State, ok, []}. -convert_msg({RaftIdx, {Header, empty}}) when is_integer(RaftIdx) -> - ?MSG(RaftIdx, Header); -convert_msg({RaftIdx, {Header, _Msg}}) when is_integer(RaftIdx) -> - ?MSG(RaftIdx, Header); -convert_msg({'$empty_msg', Header}) -> - %% dummy index - ?MSG(undefined, Header); -convert_msg({'$prefix_msg', Header}) -> - %% dummy index - ?MSG(undefined, Header); -convert_msg({Header, empty}) -> - convert_msg(Header); -convert_msg(Header) when ?IS_HEADER(Header) -> - ?MSG(undefined, Header). - -convert_consumer_v1_to_v2({ConsumerTag, Pid}, CV1) -> - Meta = element(2, CV1), - CheckedOut = element(3, CV1), - NextMsgId = element(4, CV1), - Credit = element(5, CV1), - DeliveryCount = element(6, CV1), - CreditMode = element(7, CV1), - LifeTime = element(8, CV1), - Status = element(9, CV1), - Priority = element(10, CV1), - #consumer{cfg = #consumer_cfg{tag = ConsumerTag, - pid = Pid, - meta = Meta, - credit_mode = CreditMode, - lifetime = LifeTime, - priority = Priority}, - credit = Credit, - status = Status, - delivery_count = DeliveryCount, - next_msg_id = NextMsgId, - checked_out = maps:map( - fun (_, {Tag, _} = Msg) when is_atom(Tag) -> - convert_msg(Msg); - (_, {_Seq, Msg}) -> - convert_msg(Msg) - end, CheckedOut) - }. - -convert_v1_to_v2(V1State0) -> - V1State = rabbit_fifo_v1:enqueue_all_pending(V1State0), - IndexesV1 = rabbit_fifo_v1:get_field(ra_indexes, V1State), - ReturnsV1 = rabbit_fifo_v1:get_field(returns, V1State), - MessagesV1 = rabbit_fifo_v1:get_field(messages, V1State), - ConsumersV1 = rabbit_fifo_v1:get_field(consumers, V1State), - WaitingConsumersV1 = rabbit_fifo_v1:get_field(waiting_consumers, V1State), - %% remove all raft idx in messages from index - {_, PrefReturns, _, PrefMsgs} = rabbit_fifo_v1:get_field(prefix_msgs, V1State), - V2PrefMsgs = lists:foldl(fun(Hdr, Acc) -> - lqueue:in(convert_msg(Hdr), Acc) - end, lqueue:new(), PrefMsgs), - V2PrefReturns = lists:foldl(fun(Hdr, Acc) -> - lqueue:in(convert_msg(Hdr), Acc) - end, lqueue:new(), PrefReturns), - MessagesV2 = lqueue:fold(fun ({_, Msg}, Acc) -> - lqueue:in(convert_msg(Msg), Acc) - end, V2PrefMsgs, MessagesV1), - ReturnsV2 = lqueue:fold(fun ({_SeqId, Msg}, Acc) -> - lqueue:in(convert_msg(Msg), Acc) - end, V2PrefReturns, ReturnsV1), - ConsumersV2 = maps:map( - fun (ConsumerId, CV1) -> - convert_consumer_v1_to_v2(ConsumerId, CV1) - end, ConsumersV1), - WaitingConsumersV2 = lists:map( - fun ({ConsumerId, CV1}) -> - {ConsumerId, convert_consumer_v1_to_v2(ConsumerId, CV1)} - end, WaitingConsumersV1), - EnqueuersV1 = rabbit_fifo_v1:get_field(enqueuers, V1State), - EnqueuersV2 = maps:map(fun (_EnqPid, Enq) -> - Enq#enqueuer{unused = undefined} - end, EnqueuersV1), - - %% do after state conversion - %% The (old) format of dead_letter_handler in RMQ < v3.10 is: - %% {Module, Function, Args} - %% The (new) format of dead_letter_handler in RMQ >= v3.10 is: - %% undefined | {at_most_once, {Module, Function, Args}} | at_least_once - %% - %% Note that the conversion must convert both from old format to new format - %% as well as from new format to new format. The latter is because quorum queues - %% created in RMQ >= v3.10 are still initialised with rabbit_fifo_v0 as described in - %% https://github.com/rabbitmq/ra/blob/e0d1e6315a45f5d3c19875d66f9d7bfaf83a46e3/src/ra_machine.erl#L258-L265 - DLH = case rabbit_fifo_v1:get_cfg_field(dead_letter_handler, V1State) of - {_M, _F, _A = [_DLX = undefined|_]} -> - %% queue was declared in RMQ < v3.10 and no DLX configured - undefined; - {_M, _F, _A} = MFA -> - %% queue was declared in RMQ < v3.10 and DLX configured - {at_most_once, MFA}; - Other -> - Other - end, - - Cfg = #cfg{name = rabbit_fifo_v1:get_cfg_field(name, V1State), - resource = rabbit_fifo_v1:get_cfg_field(resource, V1State), - release_cursor_interval = rabbit_fifo_v1:get_cfg_field(release_cursor_interval, V1State), - dead_letter_handler = DLH, - become_leader_handler = rabbit_fifo_v1:get_cfg_field(become_leader_handler, V1State), - %% TODO: what if policy enabling reject_publish was applied before conversion? - overflow_strategy = rabbit_fifo_v1:get_cfg_field(overflow_strategy, V1State), - max_length = rabbit_fifo_v1:get_cfg_field(max_length, V1State), - max_bytes = rabbit_fifo_v1:get_cfg_field(max_bytes, V1State), - consumer_strategy = rabbit_fifo_v1:get_cfg_field(consumer_strategy, V1State), - delivery_limit = rabbit_fifo_v1:get_cfg_field(delivery_limit, V1State), - expires = rabbit_fifo_v1:get_cfg_field(expires, V1State) - }, - - MessagesConsumersV2 = maps:fold(fun(_ConsumerId, #consumer{checked_out = Checked}, Acc) -> - Acc + maps:size(Checked) - end, 0, ConsumersV2), - MessagesWaitingConsumersV2 = lists:foldl(fun({_ConsumerId, #consumer{checked_out = Checked}}, Acc) -> - Acc + maps:size(Checked) - end, 0, WaitingConsumersV2), - MessagesTotal = lqueue:len(MessagesV2) + - lqueue:len(ReturnsV2) + - MessagesConsumersV2 + - MessagesWaitingConsumersV2, - - #?MODULE{cfg = Cfg, - messages = MessagesV2, - messages_total = MessagesTotal, - returns = ReturnsV2, - enqueue_count = rabbit_fifo_v1:get_field(enqueue_count, V1State), - enqueuers = EnqueuersV2, - ra_indexes = IndexesV1, - release_cursors = rabbit_fifo_v1:get_field(release_cursors, V1State), - consumers = ConsumersV2, - service_queue = rabbit_fifo_v1:get_field(service_queue, V1State), - msg_bytes_enqueue = rabbit_fifo_v1:get_field(msg_bytes_enqueue, V1State), - msg_bytes_checkout = rabbit_fifo_v1:get_field(msg_bytes_checkout, V1State), - waiting_consumers = WaitingConsumersV2, - last_active = rabbit_fifo_v1:get_field(last_active, V1State) - }. - -convert_v2_to_v3(#rabbit_fifo{consumers = ConsumersV2} = StateV2) -> - ConsumersV3 = maps:map(fun(_, C) -> - convert_consumer_v2_to_v3(C) - end, ConsumersV2), - StateV2#rabbit_fifo{consumers = ConsumersV3}. - -convert_consumer_v2_to_v3(C = #consumer{cfg = Cfg = #consumer_cfg{credit_mode = simple_prefetch, - meta = #{prefetch := Prefetch}}}) -> - C#consumer{cfg = Cfg#consumer_cfg{credit_mode = {simple_prefetch, Prefetch}}}; -convert_consumer_v2_to_v3(C) -> - C. +convert_v3_to_v4(#{} = _Meta, StateV3) -> + %% TODO: consider emitting release cursors as checkpoints + Messages0 = rabbit_fifo_v3:get_field(messages, StateV3), + Returns0 = lqueue:to_list(rabbit_fifo_v3:get_field(returns, StateV3)), + Consumers0 = rabbit_fifo_v3:get_field(consumers, StateV3), + Consumers = maps:map( + fun (_, #consumer{checked_out = Ch0} = C) -> + Ch = maps:map( + fun (_, ?MSG(I, #{delivery_count := DC} = H)) -> + ?MSG(I, H#{acquired_count => DC}); + (_, Msg) -> + Msg + end, Ch0), + C#consumer{checked_out = Ch} + end, Consumers0), + Returns = lqueue:from_list( + lists:map(fun (?MSG(I, #{delivery_count := DC} = H)) -> + ?MSG(I, H#{acquired_count => DC}); + (Msg) -> + Msg + end, Returns0)), + + Messages = rabbit_fifo_q:from_lqueue(Messages0), + Cfg = rabbit_fifo_v3:get_field(cfg, StateV3), + #?STATE{cfg = Cfg#cfg{unused_1 = ?NIL}, + messages = Messages, + messages_total = rabbit_fifo_v3:get_field(messages_total, StateV3), + returns = Returns, + enqueue_count = rabbit_fifo_v3:get_field(enqueue_count, StateV3), + enqueuers = rabbit_fifo_v3:get_field(enqueuers, StateV3), + ra_indexes = rabbit_fifo_v3:get_field(ra_indexes, StateV3), + consumers = Consumers, + service_queue = rabbit_fifo_v3:get_field(service_queue, StateV3), + dlx = rabbit_fifo_v3:get_field(dlx, StateV3), + msg_bytes_enqueue = rabbit_fifo_v3:get_field(msg_bytes_enqueue, StateV3), + msg_bytes_checkout = rabbit_fifo_v3:get_field(msg_bytes_checkout, StateV3), + waiting_consumers = rabbit_fifo_v3:get_field(waiting_consumers, StateV3), + last_active = rabbit_fifo_v3:get_field(last_active, StateV3), + msg_cache = rabbit_fifo_v3:get_field(msg_cache, StateV3), + unused_1 = []}. purge_node(Meta, Node, State, Effects) -> lists:foldl(fun(Pid, {S0, E0}) -> {S, E} = handle_down(Meta, Pid, S0), {S, E0 ++ E} - end, {State, Effects}, all_pids_for(Node, State)). + end, {State, Effects}, + all_pids_for(Node, State)). %% any downs that are not noconnection -handle_down(Meta, Pid, #?MODULE{consumers = Cons0, - enqueuers = Enqs0} = State0) -> +handle_down(Meta, Pid, #?STATE{consumers = Cons0, + enqueuers = Enqs0} = State0) -> % Remove any enqueuer for the down pid - State1 = State0#?MODULE{enqueuers = maps:remove(Pid, Enqs0)}, + State1 = State0#?STATE{enqueuers = maps:remove(Pid, Enqs0)}, {Effects1, State2} = handle_waiting_consumer_down(Pid, State1), % return checked out messages to main queue % Find the consumers for the down pid - DownConsumers = maps:keys( - maps:filter(fun({_, P}, _) -> P =:= Pid end, Cons0)), - lists:foldl(fun(ConsumerId, {S, E}) -> - cancel_consumer(Meta, ConsumerId, S, E, down) + DownConsumers = maps:keys(maps:filter(fun(_CKey, ?CONSUMER_PID(P)) -> + P =:= Pid + end, Cons0)), + lists:foldl(fun(ConsumerKey, {S, E}) -> + cancel_consumer(Meta, ConsumerKey, S, E, down) end, {State2, Effects1}, DownConsumers). consumer_active_flag_update_function( - #?MODULE{cfg = #cfg{consumer_strategy = competing}}) -> - fun(State, ConsumerId, Consumer, Active, ActivityStatus, Effects) -> - consumer_update_active_effects(State, ConsumerId, Consumer, Active, + #?STATE{cfg = #cfg{consumer_strategy = competing}}) -> + fun(State, _ConsumerKey, Consumer, Active, ActivityStatus, Effects) -> + consumer_update_active_effects(State, Consumer, Active, ActivityStatus, Effects) end; consumer_active_flag_update_function( - #?MODULE{cfg = #cfg{consumer_strategy = single_active}}) -> + #?STATE{cfg = #cfg{consumer_strategy = single_active}}) -> fun(_, _, _, _, _, Effects) -> Effects end. handle_waiting_consumer_down(_Pid, - #?MODULE{cfg = #cfg{consumer_strategy = competing}} = State) -> + #?STATE{cfg = #cfg{consumer_strategy = competing}} + = State) -> {[], State}; handle_waiting_consumer_down(_Pid, - #?MODULE{cfg = #cfg{consumer_strategy = single_active}, - waiting_consumers = []} = State) -> + #?STATE{cfg = #cfg{consumer_strategy = single_active}, + waiting_consumers = []} = State) -> {[], State}; handle_waiting_consumer_down(Pid, - #?MODULE{cfg = #cfg{consumer_strategy = single_active}, - waiting_consumers = WaitingConsumers0} = State0) -> + #?STATE{cfg = #cfg{consumer_strategy = single_active}, + waiting_consumers = WaitingConsumers0} + = State0) -> % get cancel effects for down waiting consumers - Down = lists:filter(fun({{_, P}, _}) -> P =:= Pid end, + Down = lists:filter(fun({_, ?CONSUMER_PID(P)}) -> P =:= Pid end, WaitingConsumers0), - Effects = lists:foldl(fun ({ConsumerId, _}, Effects) -> + Effects = lists:foldl(fun ({_ConsumerKey, Consumer}, Effects) -> + ConsumerId = consumer_id(Consumer), cancel_consumer_effects(ConsumerId, State0, Effects) end, [], Down), % update state to have only up waiting consumers - StillUp = lists:filter(fun({{_, P}, _}) -> P =/= Pid end, + StillUp = lists:filter(fun({_CKey, ?CONSUMER_PID(P)}) -> + P =/= Pid + end, WaitingConsumers0), - State = State0#?MODULE{waiting_consumers = StillUp}, + State = State0#?STATE{waiting_consumers = StillUp}, {Effects, State}. update_waiting_consumer_status(Node, - #?MODULE{waiting_consumers = WaitingConsumers}, + #?STATE{waiting_consumers = WaitingConsumers}, Status) -> - [begin - case node(Pid) of - Node -> - {ConsumerId, Consumer#consumer{status = Status}}; - _ -> - {ConsumerId, Consumer} - end - end || {{_, Pid} = ConsumerId, Consumer} <- WaitingConsumers, - Consumer#consumer.status =/= cancelled]. + sort_waiting( + [case node(Pid) of + Node -> + {ConsumerKey, Consumer#consumer{status = Status}}; + _ -> + {ConsumerKey, Consumer} + end || {ConsumerKey, ?CONSUMER_PID(Pid) = Consumer} + <- WaitingConsumers, Consumer#consumer.status =/= cancelled]). -spec state_enter(ra_server:ra_state() | eol, state()) -> ra_machine:effects(). -state_enter(RaState, #?MODULE{cfg = #cfg{dead_letter_handler = DLH, - resource = QRes}, - dlx = DlxState} = State) -> +state_enter(RaState, #?STATE{cfg = #cfg{dead_letter_handler = DLH, + resource = QRes}, + dlx = DlxState} = State) -> Effects = rabbit_fifo_dlx:state_enter(RaState, QRes, DLH, DlxState), state_enter0(RaState, State, Effects). -state_enter0(leader, #?MODULE{consumers = Cons, - enqueuers = Enqs, - waiting_consumers = WaitingConsumers, - cfg = #cfg{name = Name, - resource = Resource, - become_leader_handler = BLH} - } = State, +state_enter0(leader, #?STATE{consumers = Cons, + enqueuers = Enqs, + waiting_consumers = WaitingConsumers, + cfg = #cfg{name = Name, + resource = Resource, + become_leader_handler = BLH} + } = State, Effects0) -> TimerEffs = timer_effect(erlang:system_time(millisecond), State, Effects0), % return effects to monitor all current consumers and enqueuers Pids = lists:usort(maps:keys(Enqs) - ++ [P || {_, P} <- maps:keys(Cons)] - ++ [P || {{_, P}, _} <- WaitingConsumers]), + ++ [P || ?CONSUMER_PID(P) <- maps:values(Cons)] + ++ [P || {_, ?CONSUMER_PID(P)} <- WaitingConsumers]), Mons = [{monitor, process, P} || P <- Pids], Nots = [{send_msg, P, leader_change, ra_event} || P <- Pids], NodeMons = lists:usort([{monitor, node, node(P)} || P <- Pids]), @@ -902,24 +798,27 @@ state_enter0(leader, #?MODULE{consumers = Cons, {Mod, Fun, Args} -> [{mod_call, Mod, Fun, Args ++ [Name]} | Effects] end; -state_enter0(eol, #?MODULE{enqueuers = Enqs, - consumers = Custs0, - waiting_consumers = WaitingConsumers0}, +state_enter0(eol, #?STATE{enqueuers = Enqs, + consumers = Cons0, + waiting_consumers = WaitingConsumers0}, Effects) -> - Custs = maps:fold(fun({_, P}, V, S) -> S#{P => V} end, #{}, Custs0), - WaitingConsumers1 = lists:foldl(fun({{_, P}, V}, Acc) -> Acc#{P => V} end, - #{}, WaitingConsumers0), + Custs = maps:fold(fun(_K, ?CONSUMER_PID(P) = V, S) -> + S#{P => V} + end, #{}, Cons0), + WaitingConsumers1 = lists:foldl(fun({_, ?CONSUMER_PID(P) = V}, Acc) -> + Acc#{P => V} + end, #{}, WaitingConsumers0), AllConsumers = maps:merge(Custs, WaitingConsumers1), [{send_msg, P, eol, ra_event} || P <- maps:keys(maps:merge(Enqs, AllConsumers))] ++ - [{aux, eol} | Effects]; + [{aux, eol} + | Effects]; state_enter0(_, _, Effects) -> %% catch all as not handling all states Effects. -spec tick(non_neg_integer(), state()) -> ra_machine:effects(). -tick(Ts, #?MODULE{cfg = #cfg{name = _Name, - resource = QName}} = State) -> +tick(Ts, #?STATE{cfg = #cfg{resource = QName}} = State) -> case is_expired(Ts, State) of true -> [{mod_call, rabbit_quorum_queue, spawn_deleter, [QName]}]; @@ -928,18 +827,18 @@ tick(Ts, #?MODULE{cfg = #cfg{name = _Name, end. -spec overview(state()) -> map(). -overview(#?MODULE{consumers = Cons, - enqueuers = Enqs, - release_cursors = Cursors, - enqueue_count = EnqCount, - msg_bytes_enqueue = EnqueueBytes, - msg_bytes_checkout = CheckoutBytes, - cfg = Cfg, - dlx = DlxState, - waiting_consumers = WaitingConsumers} = State) -> +overview(#?STATE{consumers = Cons, + enqueuers = Enqs, + enqueue_count = EnqCount, + msg_bytes_enqueue = EnqueueBytes, + msg_bytes_checkout = CheckoutBytes, + cfg = Cfg, + dlx = DlxState, + messages = Messages, + returns = Returns, + waiting_consumers = WaitingConsumers} = State) -> Conf = #{name => Cfg#cfg.name, resource => Cfg#cfg.resource, - release_cursor_interval => Cfg#cfg.release_cursor_interval, dead_lettering_enabled => undefined =/= Cfg#cfg.dead_letter_handler, max_length => Cfg#cfg.max_length, max_bytes => Cfg#cfg.max_bytes, @@ -949,38 +848,47 @@ overview(#?MODULE{consumers = Cons, delivery_limit => Cfg#cfg.delivery_limit }, SacOverview = case active_consumer(Cons) of - {SacConsumerId, _} -> + {SacConsumerKey, SacCon} -> + SacConsumerId = consumer_id(SacCon), NumWaiting = length(WaitingConsumers), #{single_active_consumer_id => SacConsumerId, + single_active_consumer_key => SacConsumerKey, single_active_num_waiting_consumers => NumWaiting}; _ -> #{} end, - Overview = #{type => ?MODULE, + MsgsRet = lqueue:len(Returns), + #{num_hi := MsgsHi, + num_no := MsgsNo} = rabbit_fifo_q:overview(Messages), + + Overview = #{type => ?STATE, config => Conf, num_consumers => map_size(Cons), num_active_consumers => query_consumer_count(State), num_checked_out => num_checked_out(State), num_enqueuers => maps:size(Enqs), num_ready_messages => messages_ready(State), - num_in_memory_ready_messages => 0, %% backwards compat + num_ready_messages_high => MsgsHi, + num_ready_messages_normal => MsgsNo, + num_ready_messages_return => MsgsRet, num_messages => messages_total(State), - num_release_cursors => lqueue:len(Cursors), - release_cursors => [I || {_, I, _} <- lqueue:to_list(Cursors)], - release_cursor_enqueue_counter => EnqCount, + num_release_cursors => 0, %% backwards compat enqueue_message_bytes => EnqueueBytes, checkout_message_bytes => CheckoutBytes, + release_cursors => [], %% backwards compat in_memory_message_bytes => 0, %% backwards compat + num_in_memory_ready_messages => 0, %% backwards compat + release_cursor_enqueue_counter => EnqCount, smallest_raft_index => smallest_raft_index(State) }, DlxOverview = rabbit_fifo_dlx:overview(DlxState), maps:merge(maps:merge(Overview, DlxOverview), SacOverview). --spec get_checked_out(consumer_id(), msg_id(), msg_id(), state()) -> +-spec get_checked_out(consumer_key(), msg_id(), msg_id(), state()) -> [delivery_msg()]. -get_checked_out(Cid, From, To, #?MODULE{consumers = Consumers}) -> - case Consumers of - #{Cid := #consumer{checked_out = Checked}} -> +get_checked_out(CKey, From, To, #?STATE{consumers = Consumers}) -> + case find_consumer(CKey, Consumers) of + {_CKey, #consumer{checked_out = Checked}} -> [begin ?MSG(I, H) = maps:get(K, Checked), {K, {I, H}} @@ -990,15 +898,22 @@ get_checked_out(Cid, From, To, #?MODULE{consumers = Consumers}) -> end. -spec version() -> pos_integer(). -version() -> 3. +version() -> 4. which_module(0) -> rabbit_fifo_v0; which_module(1) -> rabbit_fifo_v1; -which_module(2) -> ?MODULE; -which_module(3) -> ?MODULE. - --define(AUX, aux_v2). - +which_module(2) -> rabbit_fifo_v3; +which_module(3) -> rabbit_fifo_v3; +which_module(4) -> ?MODULE. + +-define(AUX, aux_v3). + +-record(checkpoint, {index :: ra:index(), + timestamp :: milliseconds(), + smallest_index :: undefined | ra:index(), + messages_total :: non_neg_integer(), + indexes = ?CHECK_MIN_INDEXES :: non_neg_integer(), + unused_1 = ?NIL}). -record(aux_gc, {last_raft_idx = 0 :: ra:index()}). -record(aux, {name :: atom(), capacity :: term(), @@ -1007,58 +922,100 @@ which_module(3) -> ?MODULE. last_decorators_state :: term(), capacity :: term(), gc = #aux_gc{} :: #aux_gc{}, - tick_pid, - cache = #{} :: map()}). + tick_pid :: undefined | pid(), + cache = #{} :: map(), + last_checkpoint :: #checkpoint{}}). init_aux(Name) when is_atom(Name) -> %% TODO: catch specific exception throw if table already exists ok = ra_machine_ets:create_table(rabbit_fifo_usage, [named_table, set, public, {write_concurrency, true}]), - Now = erlang:monotonic_time(micro_seconds), + Now = erlang:monotonic_time(microsecond), #?AUX{name = Name, - capacity = {inactive, Now, 1, 1.0}}. + capacity = {inactive, Now, 1, 1.0}, + last_checkpoint = #checkpoint{index = 0, + timestamp = erlang:system_time(millisecond), + messages_total = 0, + unused_1 = ?NIL}}. handle_aux(RaftState, Tag, Cmd, #aux{name = Name, capacity = Cap, - gc = Gc}, Log, MacState) -> + gc = Gc}, RaAux) -> %% convert aux state to new version - Aux = #?AUX{name = Name, - capacity = Cap, - gc = Gc}, - handle_aux(RaftState, Tag, Cmd, Aux, Log, MacState); -handle_aux(leader, _, garbage_collection, Aux, Log, MacState) -> - {no_reply, force_eval_gc(Log, MacState, Aux), Log}; -handle_aux(follower, _, garbage_collection, Aux, Log, MacState) -> - {no_reply, force_eval_gc(Log, MacState, Aux), Log}; -handle_aux(_RaftState, cast, {#return{msg_ids = MsgIds, - consumer_id = ConsumerId}, Corr, Pid}, - Aux0, Log0, #?MODULE{cfg = #cfg{delivery_limit = undefined}, - consumers = Consumers}) -> - case Consumers of - #{ConsumerId := #consumer{checked_out = Checked}} -> - {Log, ToReturn} = - maps:fold( - fun (MsgId, ?MSG(Idx, Header), {L0, Acc}) -> - %% it is possible this is not found if the consumer - %% crashed and the message got removed - case ra_log:fetch(Idx, L0) of - {{_, _, {_, _, Cmd, _}}, L} -> - Msg = get_msg(Cmd), - {L, [{MsgId, Idx, Header, Msg} | Acc]}; - {undefined, L} -> - {L, Acc} - end - end, {Log0, []}, maps:with(MsgIds, Checked)), + AuxV2 = init_aux(Name), + Aux = AuxV2#?AUX{capacity = Cap, + gc = Gc}, + handle_aux(RaftState, Tag, Cmd, Aux, RaAux); +handle_aux(RaftState, Tag, Cmd, AuxV2, RaAux) + when element(1, AuxV2) == aux_v2 -> + Name = element(2, AuxV2), + AuxV3 = init_aux(Name), + handle_aux(RaftState, Tag, Cmd, AuxV3, RaAux); +handle_aux(leader, cast, eval, + #?AUX{last_decorators_state = LastDec, + last_checkpoint = Check0} = Aux0, + RaAux) -> + #?STATE{cfg = #cfg{resource = QName}} = MacState = + ra_aux:machine_state(RaAux), - Appends = make_requeue(ConsumerId, {notify, Corr, Pid}, - lists:sort(ToReturn), []), - {no_reply, Aux0, Log, Appends}; + Ts = erlang:system_time(millisecond), + {Check, Effects0} = do_checkpoints(Ts, Check0, RaAux, false), + + %% this is called after each batch of commands have been applied + %% set timer for message expire + %% should really be the last applied index ts but this will have to do + Effects1 = timer_effect(Ts, MacState, Effects0), + case query_notify_decorators_info(MacState) of + LastDec -> + {no_reply, Aux0#?AUX{last_checkpoint = Check}, RaAux, Effects1}; + {MaxActivePriority, IsEmpty} = NewLast -> + Effects = [notify_decorators_effect(QName, MaxActivePriority, IsEmpty) + | Effects1], + {no_reply, Aux0#?AUX{last_checkpoint = Check, + last_decorators_state = NewLast}, RaAux, Effects} + end; +handle_aux(_RaftState, cast, eval, + #?AUX{last_checkpoint = Check0} = Aux0, + RaAux) -> + Ts = erlang:system_time(millisecond), + {Check, Effects} = do_checkpoints(Ts, Check0, RaAux, false), + {no_reply, Aux0#?AUX{last_checkpoint = Check}, RaAux, Effects}; +handle_aux(_RaftState, cast, {#return{msg_ids = MsgIds, + consumer_key = Key} = Ret, Corr, Pid}, + Aux0, RaAux0) -> + case ra_aux:machine_state(RaAux0) of + #?STATE{cfg = #cfg{delivery_limit = undefined}, + consumers = Consumers} -> + case find_consumer(Key, Consumers) of + {ConsumerKey, #consumer{checked_out = Checked}} -> + {RaAux, ToReturn} = + maps:fold( + fun (MsgId, ?MSG(Idx, Header), {RA0, Acc}) -> + %% it is possible this is not found if the consumer + %% crashed and the message got removed + case ra_aux:log_fetch(Idx, RA0) of + {{_Term, _Meta, Cmd}, RA} -> + Msg = get_msg(Cmd), + {RA, [{MsgId, Idx, Header, Msg} | Acc]}; + {undefined, RA} -> + {RA, Acc} + end + end, {RaAux0, []}, maps:with(MsgIds, Checked)), + + Appends = make_requeue(ConsumerKey, {notify, Corr, Pid}, + lists:sort(ToReturn), []), + {no_reply, Aux0, RaAux, Appends}; + _ -> + {no_reply, Aux0, RaAux0} + end; _ -> - {no_reply, Aux0, Log0} + %% for returns with a delivery limit set we can just return as before + {no_reply, Aux0, RaAux0, [{append, Ret, {notify, Corr, Pid}}]} end; -handle_aux(leader, _, {handle_tick, [QName, Overview, Nodes]}, - #?AUX{tick_pid = Pid} = Aux, Log, _) -> +handle_aux(leader, _, {handle_tick, [QName, Overview0, Nodes]}, + #?AUX{tick_pid = Pid} = Aux, RaAux) -> + Overview = Overview0#{members_info => ra_aux:members_info(RaAux)}, NewPid = case process_is_alive(Pid) of false -> @@ -1069,110 +1026,105 @@ handle_aux(leader, _, {handle_tick, [QName, Overview, Nodes]}, %% Active TICK pid, do nothing Pid end, - {no_reply, Aux#?AUX{tick_pid = NewPid}, Log}; -handle_aux(_, _, {get_checked_out, ConsumerId, MsgIds}, - Aux0, Log0, #?MODULE{cfg = #cfg{}, - consumers = Consumers}) -> + + %% TODO: check consumer timeouts + {no_reply, Aux#?AUX{tick_pid = NewPid}, RaAux, []}; +handle_aux(_, _, {get_checked_out, ConsumerKey, MsgIds}, Aux0, RaAux0) -> + #?STATE{cfg = #cfg{}, + consumers = Consumers} = ra_aux:machine_state(RaAux0), case Consumers of - #{ConsumerId := #consumer{checked_out = Checked}} -> - {Log, IdMsgs} = + #{ConsumerKey := #consumer{checked_out = Checked}} -> + {RaState, IdMsgs} = maps:fold( - fun (MsgId, ?MSG(Idx, Header), {L0, Acc}) -> + fun (MsgId, ?MSG(Idx, Header), {S0, Acc}) -> %% it is possible this is not found if the consumer %% crashed and the message got removed - case ra_log:fetch(Idx, L0) of - {{_, _, {_, _, Cmd, _}}, L} -> + case ra_aux:log_fetch(Idx, S0) of + {{_Term, _Meta, Cmd}, S} -> Msg = get_msg(Cmd), - {L, [{MsgId, {Header, Msg}} | Acc]}; - {undefined, L} -> - {L, Acc} + {S, [{MsgId, {Header, Msg}} | Acc]}; + {undefined, S} -> + {S, Acc} end - end, {Log0, []}, maps:with(MsgIds, Checked)), - {reply, {ok, IdMsgs}, Aux0, Log}; + end, {RaAux0, []}, maps:with(MsgIds, Checked)), + {reply, {ok, IdMsgs}, Aux0, RaState}; _ -> - {reply, {error, consumer_not_found}, Aux0, Log0} - end; -handle_aux(leader, cast, {#return{} = Ret, Corr, Pid}, - Aux0, Log, #?MODULE{}) -> - %% for returns with a delivery limit set we can just return as before - {no_reply, Aux0, Log, [{append, Ret, {notify, Corr, Pid}}]}; -handle_aux(leader, cast, eval, #?AUX{last_decorators_state = LastDec} = Aux0, - Log, #?MODULE{cfg = #cfg{resource = QName}} = MacState) -> - %% this is called after each batch of commands have been applied - %% set timer for message expire - %% should really be the last applied index ts but this will have to do - Ts = erlang:system_time(millisecond), - Effects0 = timer_effect(Ts, MacState, []), - case query_notify_decorators_info(MacState) of - LastDec -> - {no_reply, Aux0, Log, Effects0}; - {MaxActivePriority, IsEmpty} = NewLast -> - Effects = [notify_decorators_effect(QName, MaxActivePriority, IsEmpty) - | Effects0], - {no_reply, Aux0#?AUX{last_decorators_state = NewLast}, Log, Effects} + {reply, {error, consumer_not_found}, Aux0, RaAux0} end; -handle_aux(_RaftState, cast, eval, Aux0, Log, _MacState) -> - {no_reply, Aux0, Log}; -handle_aux(_RaState, cast, Cmd, #?AUX{capacity = Use0} = Aux0, - Log, _MacState) +handle_aux(_RaState, cast, Cmd, #?AUX{capacity = Use0} = Aux0, RaAux) when Cmd == active orelse Cmd == inactive -> - {no_reply, Aux0#?AUX{capacity = update_use(Use0, Cmd)}, Log}; + {no_reply, Aux0#?AUX{capacity = update_use(Use0, Cmd)}, RaAux}; handle_aux(_RaState, cast, tick, #?AUX{name = Name, capacity = Use0} = State0, - Log, MacState) -> + RaAux) -> true = ets:insert(rabbit_fifo_usage, {Name, capacity(Use0)}), - Aux = eval_gc(Log, MacState, State0), - {no_reply, Aux, Log}; -handle_aux(_RaState, cast, eol, #?AUX{name = Name} = Aux, Log, _) -> + Aux = eval_gc(RaAux, ra_aux:machine_state(RaAux), State0), + Effs = case smallest_raft_index(ra_aux:machine_state(RaAux)) of + undefined -> + [{release_cursor, ra_aux:last_applied(RaAux)}]; + Smallest -> + [{release_cursor, Smallest - 1}] + end, + {no_reply, Aux, RaAux, Effs}; +handle_aux(_RaState, cast, eol, #?AUX{name = Name} = Aux, RaAux) -> ets:delete(rabbit_fifo_usage, Name), - {no_reply, Aux, Log}; + {no_reply, Aux, RaAux}; handle_aux(_RaState, {call, _From}, oldest_entry_timestamp, - #?AUX{cache = Cache} = Aux0, - Log0, #?MODULE{} = State) -> - {CachedIdx, CachedTs} = maps:get(oldest_entry, Cache, {undefined, undefined}), - case smallest_raft_index(State) of + #?AUX{cache = Cache} = Aux0, RaAux0) -> + {CachedIdx, CachedTs} = maps:get(oldest_entry, Cache, + {undefined, undefined}), + case smallest_raft_index(ra_aux:machine_state(RaAux0)) of %% if there are no entries, we return current timestamp %% so that any previously obtained entries are considered %% older than this undefined -> Aux1 = Aux0#?AUX{cache = maps:remove(oldest_entry, Cache)}, - {reply, {ok, erlang:system_time(millisecond)}, Aux1, Log0}; + {reply, {ok, erlang:system_time(millisecond)}, Aux1, RaAux0}; CachedIdx -> %% cache hit - {reply, {ok, CachedTs}, Aux0, Log0}; + {reply, {ok, CachedTs}, Aux0, RaAux0}; Idx when is_integer(Idx) -> - case ra_log:fetch(Idx, Log0) of - {{_, _, {_, #{ts := Timestamp}, _, _}}, Log1} -> + case ra_aux:log_fetch(Idx, RaAux0) of + {{_Term, #{ts := Timestamp}, _Cmd}, RaAux} -> Aux1 = Aux0#?AUX{cache = Cache#{oldest_entry => {Idx, Timestamp}}}, - {reply, {ok, Timestamp}, Aux1, Log1}; - {undefined, Log1} -> + {reply, {ok, Timestamp}, Aux1, RaAux}; + {undefined, RaAux} -> %% fetch failed - {reply, {error, failed_to_get_timestamp}, Aux0, Log1} + {reply, {error, failed_to_get_timestamp}, Aux0, RaAux} end end; handle_aux(_RaState, {call, _From}, {peek, Pos}, Aux0, - Log0, MacState) -> + RaAux0) -> + MacState = ra_aux:machine_state(RaAux0), case query_peek(Pos, MacState) of {ok, ?MSG(Idx, Header)} -> %% need to re-hydrate from the log - {{_, _, {_, _, Cmd, _}}, Log} = ra_log:fetch(Idx, Log0), + {{_, _, Cmd}, RaAux} = ra_aux:log_fetch(Idx, RaAux0), Msg = get_msg(Cmd), - {reply, {ok, {Header, Msg}}, Aux0, Log}; + {reply, {ok, {Header, Msg}}, Aux0, RaAux}; Err -> - {reply, Err, Aux0, Log0} + {reply, Err, Aux0, RaAux0} end; -handle_aux(RaState, _, {dlx, _} = Cmd, Aux0, Log, - #?MODULE{dlx = DlxState, - cfg = #cfg{dead_letter_handler = DLH, - resource = QRes}}) -> +handle_aux(_, _, garbage_collection, Aux, RaAux) -> + {no_reply, force_eval_gc(RaAux, Aux), RaAux}; +handle_aux(_RaState, _, force_checkpoint, + #?AUX{last_checkpoint = Check0} = Aux, RaAux) -> + Ts = erlang:system_time(millisecond), + {Check, Effects} = do_checkpoints(Ts, Check0, RaAux, true), + {no_reply, Aux#?AUX{last_checkpoint= Check}, RaAux, Effects}; +handle_aux(RaState, _, {dlx, _} = Cmd, Aux0, RaAux) -> + #?STATE{dlx = DlxState, + cfg = #cfg{dead_letter_handler = DLH, + resource = QRes}} = ra_aux:machine_state(RaAux), Aux = rabbit_fifo_dlx:handle_aux(RaState, Cmd, Aux0, QRes, DLH, DlxState), - {no_reply, Aux, Log}. + {no_reply, Aux, RaAux}. -eval_gc(Log, #?MODULE{cfg = #cfg{resource = QR}} = MacState, +eval_gc(RaAux, MacState, #?AUX{gc = #aux_gc{last_raft_idx = LastGcIdx} = Gc} = AuxState) -> - {Idx, _} = ra_log:last_index_term(Log), + {Idx, _} = ra_aux:log_last_index_term(RaAux), + #?STATE{cfg = #cfg{resource = QR}} = ra_aux:machine_state(RaAux), {memory, Mem} = erlang:process_info(self(), memory), case messages_total(MacState) of 0 when Idx > LastGcIdx andalso @@ -1187,9 +1139,10 @@ eval_gc(Log, #?MODULE{cfg = #cfg{resource = QR}} = MacState, AuxState end. -force_eval_gc(Log, #?MODULE{cfg = #cfg{resource = QR}}, +force_eval_gc(RaAux, #?AUX{gc = #aux_gc{last_raft_idx = LastGcIdx} = Gc} = AuxState) -> - {Idx, _} = ra_log:last_index_term(Log), + {Idx, _} = ra_aux:log_last_index_term(RaAux), + #?STATE{cfg = #cfg{resource = QR}} = ra_aux:machine_state(RaAux), {memory, Mem} = erlang:process_info(self(), memory), case Idx > LastGcIdx of true -> @@ -1212,7 +1165,7 @@ process_is_alive(_) -> query_messages_ready(State) -> messages_ready(State). -query_messages_checked_out(#?MODULE{consumers = Consumers}) -> +query_messages_checked_out(#?STATE{consumers = Consumers}) -> maps:fold(fun (_, #consumer{checked_out = C}, S) -> maps:size(C) + S end, 0, Consumers). @@ -1220,32 +1173,34 @@ query_messages_checked_out(#?MODULE{consumers = Consumers}) -> query_messages_total(State) -> messages_total(State). -query_processes(#?MODULE{enqueuers = Enqs, consumers = Cons0}) -> - Cons = maps:fold(fun({_, P}, V, S) -> S#{P => V} end, #{}, Cons0), +query_processes(#?STATE{enqueuers = Enqs, consumers = Cons0}) -> + Cons = maps:fold(fun(_, ?CONSUMER_PID(P) = V, S) -> + S#{P => V} + end, #{}, Cons0), maps:keys(maps:merge(Enqs, Cons)). -query_ra_indexes(#?MODULE{ra_indexes = RaIndexes}) -> +query_ra_indexes(#?STATE{ra_indexes = RaIndexes}) -> RaIndexes. -query_waiting_consumers(#?MODULE{waiting_consumers = WaitingConsumers}) -> +query_waiting_consumers(#?STATE{waiting_consumers = WaitingConsumers}) -> WaitingConsumers. -query_consumer_count(#?MODULE{consumers = Consumers, - waiting_consumers = WaitingConsumers}) -> - Up = maps:filter(fun(_ConsumerId, #consumer{status = Status}) -> +query_consumer_count(#?STATE{consumers = Consumers, + waiting_consumers = WaitingConsumers}) -> + Up = maps:filter(fun(_ConsumerKey, #consumer{status = Status}) -> Status =/= suspected_down end, Consumers), maps:size(Up) + length(WaitingConsumers). -query_consumers(#?MODULE{consumers = Consumers, - waiting_consumers = WaitingConsumers, - cfg = #cfg{consumer_strategy = ConsumerStrategy}} = State) -> +query_consumers(#?STATE{consumers = Consumers, + waiting_consumers = WaitingConsumers, + cfg = #cfg{consumer_strategy = ConsumerStrategy}} + = State) -> ActiveActivityStatusFun = - case ConsumerStrategy of + case ConsumerStrategy of competing -> - fun(_ConsumerId, - #consumer{status = Status}) -> + fun(_ConsumerKey, #consumer{status = Status}) -> case Status of suspected_down -> {false, Status}; @@ -1255,7 +1210,7 @@ query_consumers(#?MODULE{consumers = Consumers, end; single_active -> SingleActiveConsumer = query_single_active_consumer(State), - fun({Tag, Pid} = _Consumer, _) -> + fun(_, ?CONSUMER_TAG_PID(Tag, Pid)) -> case SingleActiveConsumer of {value, {Tag, Pid}} -> {true, single_active}; @@ -1267,11 +1222,13 @@ query_consumers(#?MODULE{consumers = Consumers, FromConsumers = maps:fold(fun (_, #consumer{status = cancelled}, Acc) -> Acc; - (Key = {Tag, Pid}, - #consumer{cfg = #consumer_cfg{meta = Meta}} = Consumer, + (Key, + #consumer{cfg = #consumer_cfg{tag = Tag, + pid = Pid, + meta = Meta}} = Consumer, Acc) -> {Active, ActivityStatus} = - ActiveActivityStatusFun(Key, Consumer), + ActiveActivityStatusFun(Key, Consumer), maps:put(Key, {Pid, Tag, maps:get(ack, Meta, undefined), @@ -1282,46 +1239,49 @@ query_consumers(#?MODULE{consumers = Consumers, maps:get(username, Meta, undefined)}, Acc) end, #{}, Consumers), - FromWaitingConsumers = - lists:foldl(fun ({_, #consumer{status = cancelled}}, Acc) -> - Acc; - (Key = {{Tag, Pid}, - #consumer{cfg = #consumer_cfg{meta = Meta}} = Consumer}, - Acc) -> - {Active, ActivityStatus} = - ActiveActivityStatusFun(Key, Consumer), - maps:put(Key, - {Pid, Tag, - maps:get(ack, Meta, undefined), - maps:get(prefetch, Meta, undefined), - Active, - ActivityStatus, - maps:get(args, Meta, []), - maps:get(username, Meta, undefined)}, - Acc) - end, #{}, WaitingConsumers), - maps:merge(FromConsumers, FromWaitingConsumers). - - -query_single_active_consumer( - #?MODULE{cfg = #cfg{consumer_strategy = single_active}, - consumers = Consumers}) -> + FromWaitingConsumers = + lists:foldl( + fun ({_, #consumer{status = cancelled}}, + Acc) -> + Acc; + ({Key, + #consumer{cfg = #consumer_cfg{tag = Tag, + pid = Pid, + meta = Meta}} = Consumer}, + Acc) -> + {Active, ActivityStatus} = + ActiveActivityStatusFun(Key, Consumer), + maps:put(Key, + {Pid, Tag, + maps:get(ack, Meta, undefined), + maps:get(prefetch, Meta, undefined), + Active, + ActivityStatus, + maps:get(args, Meta, []), + maps:get(username, Meta, undefined)}, + Acc) + end, #{}, WaitingConsumers), + maps:merge(FromConsumers, FromWaitingConsumers). + + +query_single_active_consumer(#?STATE{cfg = #cfg{consumer_strategy = single_active}, + consumers = Consumers}) -> case active_consumer(Consumers) of undefined -> {error, no_value}; - {ActiveCid, _} -> - {value, ActiveCid} + {_CKey, ?CONSUMER_TAG_PID(Tag, Pid)} -> + {value, {Tag, Pid}} end; query_single_active_consumer(_) -> disabled. -query_stat(#?MODULE{consumers = Consumers} = State) -> +query_stat(#?STATE{consumers = Consumers} = State) -> {messages_ready(State), maps:size(Consumers)}. -query_in_memory_usage(#?MODULE{ }) -> +query_in_memory_usage(#?STATE{ }) -> {0, 0}. -query_stat_dlx(#?MODULE{dlx = DlxState}) -> +query_stat_dlx(#?STATE{dlx = DlxState}) -> rabbit_fifo_dlx:stat(DlxState). query_peek(Pos, State0) when Pos > 0 -> @@ -1335,7 +1295,7 @@ query_peek(Pos, State0) when Pos > 0 -> query_peek(Pos-1, State) end. -query_notify_decorators_info(#?MODULE{consumers = Consumers} = State) -> +query_notify_decorators_info(#?STATE{consumers = Consumers} = State) -> MaxActivePriority = maps:fold( fun(_, #consumer{credit = C, status = up, @@ -1359,14 +1319,19 @@ usage(Name) when is_atom(Name) -> [{_, Use}] -> Use end. +-spec is_v4() -> boolean(). +is_v4() -> + %% Quorum queue v4 is introduced in RabbitMQ 4.0.0 + rabbit_feature_flags:is_enabled('rabbitmq_4.0.0'). + %%% Internal -messages_ready(#?MODULE{messages = M, - returns = R}) -> - lqueue:len(M) + lqueue:len(R). +messages_ready(#?STATE{messages = M, + returns = R}) -> + rabbit_fifo_q:len(M) + lqueue:len(R). -messages_total(#?MODULE{messages_total = Total, - dlx = DlxState}) -> +messages_total(#?STATE{messages_total = Total, + dlx = DlxState}) -> {DlxTotal, _} = rabbit_fifo_dlx:stat(DlxState), Total + DlxTotal. @@ -1375,18 +1340,18 @@ update_use({inactive, _, _, _} = CUInfo, inactive) -> update_use({active, _, _} = CUInfo, active) -> CUInfo; update_use({active, Since, Avg}, inactive) -> - Now = erlang:monotonic_time(micro_seconds), + Now = erlang:monotonic_time(microsecond), {inactive, Now, Now - Since, Avg}; update_use({inactive, Since, Active, Avg}, active) -> - Now = erlang:monotonic_time(micro_seconds), + Now = erlang:monotonic_time(microsecond), {active, Now, use_avg(Active, Now - Since, Avg)}. capacity({active, Since, Avg}) -> - use_avg(erlang:monotonic_time(micro_seconds) - Since, 0, Avg); + use_avg(erlang:monotonic_time(microsecond) - Since, 0, Avg); capacity({inactive, _, 1, 1.0}) -> 1.0; capacity({inactive, Since, Active, Avg}) -> - use_avg(Active, erlang:monotonic_time(micro_seconds) - Since, Avg). + use_avg(Active, erlang:monotonic_time(microsecond) - Since, Avg). use_avg(0, 0, Avg) -> Avg; @@ -1400,119 +1365,161 @@ moving_average(Time, HalfLife, Next, Current) -> Weight = math:exp(Time * math:log(0.5) / HalfLife), Next * (1 - Weight) + Current * Weight. -num_checked_out(#?MODULE{consumers = Cons}) -> +num_checked_out(#?STATE{consumers = Cons}) -> maps:fold(fun (_, #consumer{checked_out = C}, Acc) -> maps:size(C) + Acc end, 0, Cons). -cancel_consumer(Meta, ConsumerId, - #?MODULE{cfg = #cfg{consumer_strategy = competing}} = State, +cancel_consumer(Meta, ConsumerKey, + #?STATE{cfg = #cfg{consumer_strategy = competing}} = State, Effects, Reason) -> - cancel_consumer0(Meta, ConsumerId, State, Effects, Reason); -cancel_consumer(Meta, ConsumerId, - #?MODULE{cfg = #cfg{consumer_strategy = single_active}, - waiting_consumers = []} = State, + cancel_consumer0(Meta, ConsumerKey, State, Effects, Reason); +cancel_consumer(Meta, ConsumerKey, + #?STATE{cfg = #cfg{consumer_strategy = single_active}, + waiting_consumers = []} = State, Effects, Reason) -> %% single active consumer on, no consumers are waiting - cancel_consumer0(Meta, ConsumerId, State, Effects, Reason); -cancel_consumer(Meta, ConsumerId, - #?MODULE{consumers = Cons0, - cfg = #cfg{consumer_strategy = single_active}, - waiting_consumers = Waiting0} = State0, + cancel_consumer0(Meta, ConsumerKey, State, Effects, Reason); +cancel_consumer(Meta, ConsumerKey, + #?STATE{consumers = Cons0, + cfg = #cfg{consumer_strategy = single_active}, + waiting_consumers = Waiting0} = State0, Effects0, Reason) -> %% single active consumer on, consumers are waiting case Cons0 of - #{ConsumerId := #consumer{status = _}} -> + #{ConsumerKey := #consumer{status = _}} -> % The active consumer is to be removed - {State1, Effects1} = cancel_consumer0(Meta, ConsumerId, State0, - Effects0, Reason), - activate_next_consumer(State1, Effects1); + cancel_consumer0(Meta, ConsumerKey, State0, + Effects0, Reason); _ -> % The cancelled consumer is not active or cancelled % Just remove it from idle_consumers - Waiting = lists:keydelete(ConsumerId, 1, Waiting0), - Effects = cancel_consumer_effects(ConsumerId, State0, Effects0), - % A waiting consumer isn't supposed to have any checked out messages, - % so nothing special to do here - {State0#?MODULE{waiting_consumers = Waiting}, Effects} + case lists:keyfind(ConsumerKey, 1, Waiting0) of + {_, ?CONSUMER_TAG_PID(T, P)} -> + Waiting = lists:keydelete(ConsumerKey, 1, Waiting0), + Effects = cancel_consumer_effects({T, P}, State0, Effects0), + % A waiting consumer isn't supposed to have any checked out messages, + % so nothing special to do here + {State0#?STATE{waiting_consumers = Waiting}, Effects}; + _ -> + {State0, Effects0} + end end. -consumer_update_active_effects(#?MODULE{cfg = #cfg{resource = QName}}, - ConsumerId, - #consumer{cfg = #consumer_cfg{meta = Meta}}, +consumer_update_active_effects(#?STATE{cfg = #cfg{resource = QName}}, + #consumer{cfg = #consumer_cfg{pid = CPid, + tag = CTag, + meta = Meta}}, Active, ActivityStatus, Effects) -> Ack = maps:get(ack, Meta, undefined), Prefetch = maps:get(prefetch, Meta, undefined), Args = maps:get(args, Meta, []), [{mod_call, rabbit_quorum_queue, update_consumer_handler, - [QName, ConsumerId, false, Ack, Prefetch, Active, ActivityStatus, Args]} + [QName, {CTag, CPid}, false, Ack, Prefetch, Active, ActivityStatus, Args]} | Effects]. -cancel_consumer0(Meta, ConsumerId, - #?MODULE{consumers = C0} = S0, Effects0, Reason) -> +cancel_consumer0(Meta, ConsumerKey, + #?STATE{consumers = C0} = S0, Effects0, Reason) -> case C0 of - #{ConsumerId := Consumer} -> - {S, Effects2} = maybe_return_all(Meta, ConsumerId, Consumer, + #{ConsumerKey := Consumer} -> + {S, Effects2} = maybe_return_all(Meta, ConsumerKey, Consumer, S0, Effects0, Reason), %% The effects are emitted before the consumer is actually removed %% if the consumer has unacked messages. This is a bit weird but %% in line with what classic queues do (from an external point of %% view) - Effects = cancel_consumer_effects(ConsumerId, S, Effects2), + Effects = cancel_consumer_effects(consumer_id(Consumer), S, Effects2), {S, Effects}; _ -> %% already removed: do nothing {S0, Effects0} end. -activate_next_consumer(#?MODULE{cfg = #cfg{consumer_strategy = competing}} = State0, - Effects0) -> - {State0, Effects0}; -activate_next_consumer(#?MODULE{consumers = Cons, - waiting_consumers = Waiting0} = State0, +activate_next_consumer({State, Effects}) -> + activate_next_consumer(State, Effects). + +activate_next_consumer(#?STATE{cfg = #cfg{consumer_strategy = competing}} = State, + Effects) -> + {State, Effects}; +activate_next_consumer(#?STATE{consumers = Cons0, + waiting_consumers = Waiting0} = State0, Effects0) -> - case has_active_consumer(Cons) of - false -> - case lists:filter(fun ({_, #consumer{status = Status}}) -> - Status == up - end, Waiting0) of - [{NextConsumerId, #consumer{cfg = NextCCfg} = NextConsumer} | _] -> - Remaining = lists:keydelete(NextConsumerId, 1, Waiting0), - Consumer = case maps:get(NextConsumerId, Cons, undefined) of - undefined -> - NextConsumer; - Existing -> - %% there was an exisiting non-active consumer - %% just update the existing cancelled consumer - %% with the new config - Existing#consumer{cfg = NextCCfg} - end, - #?MODULE{service_queue = ServiceQueue} = State0, - ServiceQueue1 = maybe_queue_consumer(NextConsumerId, - Consumer, - ServiceQueue), - State = State0#?MODULE{consumers = Cons#{NextConsumerId => Consumer}, - service_queue = ServiceQueue1, - waiting_consumers = Remaining}, - Effects = consumer_update_active_effects(State, NextConsumerId, - Consumer, true, - single_active, Effects0), - {State, Effects}; - [] -> - {State0, Effects0} - end; - true -> + %% invariant, the waiting list always need to be sorted by consumers that are + %% up - then by priority + NextConsumer = + case Waiting0 of + [{_, #consumer{status = up}} = Next | _] -> + Next; + _ -> + undefined + end, + + case {active_consumer(Cons0), NextConsumer} of + {undefined, {NextCKey, #consumer{cfg = NextCCfg} = NextC}} -> + Remaining = tl(Waiting0), + %% TODO: can this happen? + Consumer = case maps:get(NextCKey, Cons0, undefined) of + undefined -> + NextC; + Existing -> + %% there was an exisiting non-active consumer + %% just update the existing cancelled consumer + %% with the new config + Existing#consumer{cfg = NextCCfg} + end, + #?STATE{service_queue = ServiceQueue} = State0, + ServiceQueue1 = maybe_queue_consumer(NextCKey, + Consumer, + ServiceQueue), + State = State0#?STATE{consumers = Cons0#{NextCKey => Consumer}, + service_queue = ServiceQueue1, + waiting_consumers = Remaining}, + Effects = consumer_update_active_effects(State, Consumer, + true, single_active, + Effects0), + {State, Effects}; + {{ActiveCKey, ?CONSUMER_PRIORITY(ActivePriority) = + #consumer{checked_out = ActiveChecked} = Active}, + {NextCKey, ?CONSUMER_PRIORITY(WaitingPriority) = Consumer}} + when WaitingPriority > ActivePriority andalso + map_size(ActiveChecked) == 0 -> + Remaining = tl(Waiting0), + %% the next consumer is a higher priority and should take over + %% and this consumer does not have any pending messages + #?STATE{service_queue = ServiceQueue} = State0, + ServiceQueue1 = maybe_queue_consumer(NextCKey, + Consumer, + ServiceQueue), + Cons1 = Cons0#{NextCKey => Consumer}, + Cons = maps:remove(ActiveCKey, Cons1), + Waiting = add_waiting({ActiveCKey, Active}, Remaining), + State = State0#?STATE{consumers = Cons, + service_queue = ServiceQueue1, + waiting_consumers = Waiting}, + Effects = consumer_update_active_effects(State, Consumer, + true, single_active, + Effects0), + {State, Effects}; + {{ActiveCKey, ?CONSUMER_PRIORITY(ActivePriority) = Active}, + {_NextCKey, ?CONSUMER_PRIORITY(WaitingPriority)}} + when WaitingPriority > ActivePriority -> + %% A higher priority consumer has attached but the current one has + %% pending messages + Cons = maps:update(ActiveCKey, + Active#consumer{status = quiescing}, + Cons0), + {State0#?STATE{consumers = Cons}, Effects0}; + _ -> + %% no activation {State0, Effects0} end. -has_active_consumer(Consumers) -> - active_consumer(Consumers) /= undefined. - -active_consumer({Cid, #consumer{status = up} = Consumer, _I}) -> - {Cid, Consumer}; -active_consumer({_Cid, #consumer{status = _}, I}) -> +active_consumer({CKey, #consumer{status = Status} = Consumer, _I}) + when Status == up orelse Status == quiescing -> + {CKey, Consumer}; +active_consumer({_CKey, #consumer{status = _}, I}) -> active_consumer(maps:next(I)); active_consumer(none) -> undefined; @@ -1520,68 +1527,63 @@ active_consumer(M) when is_map(M) -> I = maps:iterator(M), active_consumer(maps:next(I)). -maybe_return_all(#{system_time := Ts} = Meta, ConsumerId, +is_active(_ConsumerKey, #?STATE{cfg = #cfg{consumer_strategy = competing}}) -> + %% all competing consumers are potentially active + true; +is_active(ConsumerKey, #?STATE{cfg = #cfg{consumer_strategy = single_active}, + consumers = Consumers}) -> + ConsumerKey == active_consumer(Consumers). + +maybe_return_all(#{system_time := Ts} = Meta, ConsumerKey, #consumer{cfg = CCfg} = Consumer, S0, Effects0, Reason) -> case Reason of - consumer_cancel -> - {update_or_remove_sub( - Meta, ConsumerId, + cancel -> + {update_or_remove_con( + Meta, ConsumerKey, Consumer#consumer{cfg = CCfg#consumer_cfg{lifetime = once}, credit = 0, status = cancelled}, S0), Effects0}; - down -> - {S1, Effects1} = return_all(Meta, S0, Effects0, ConsumerId, Consumer), - {S1#?MODULE{consumers = maps:remove(ConsumerId, S1#?MODULE.consumers), - last_active = Ts}, - Effects1} + _ -> + {S1, Effects} = return_all(Meta, S0, Effects0, ConsumerKey, + Consumer, Reason == down), + {S1#?STATE{consumers = maps:remove(ConsumerKey, S1#?STATE.consumers), + last_active = Ts}, + Effects} end. apply_enqueue(#{index := RaftIdx, - system_time := Ts} = Meta, From, Seq, RawMsg, State0) -> - case maybe_enqueue(RaftIdx, Ts, From, Seq, RawMsg, [], State0) of + system_time := Ts} = Meta, From, + Seq, RawMsg, Size, State0) -> + case maybe_enqueue(RaftIdx, Ts, From, Seq, RawMsg, Size, [], State0) of {ok, State1, Effects1} -> - {State, ok, Effects} = checkout(Meta, State0, State1, Effects1), - {maybe_store_release_cursor(RaftIdx, State), ok, Effects}; + checkout(Meta, State0, State1, Effects1); {out_of_sequence, State, Effects} -> {State, not_enqueued, Effects}; {duplicate, State, Effects} -> {State, ok, Effects} end. -decr_total(#?MODULE{messages_total = Tot} = State) -> - State#?MODULE{messages_total = Tot - 1}. +decr_total(#?STATE{messages_total = Tot} = State) -> + State#?STATE{messages_total = Tot - 1}. -drop_head(#?MODULE{ra_indexes = Indexes0} = State0, Effects) -> +drop_head(#?STATE{ra_indexes = Indexes0} = State0, Effects) -> case take_next_msg(State0) of {?MSG(Idx, Header) = Msg, State1} -> Indexes = rabbit_fifo_index:delete(Idx, Indexes0), - State2 = State1#?MODULE{ra_indexes = Indexes}, + State2 = State1#?STATE{ra_indexes = Indexes}, State3 = decr_total(add_bytes_drop(Header, State2)), - #?MODULE{cfg = #cfg{dead_letter_handler = DLH}, - dlx = DlxState} = State = State3, + #?STATE{cfg = #cfg{dead_letter_handler = DLH}, + dlx = DlxState} = State = State3, {_, DlxEffects} = rabbit_fifo_dlx:discard([Msg], maxlen, DLH, DlxState), {State, DlxEffects ++ Effects}; empty -> {State0, Effects} end. -maybe_set_msg_ttl(#basic_message{content = #content{properties = none}}, - RaCmdTs, Header, - #?MODULE{cfg = #cfg{msg_ttl = PerQueueMsgTTL}}) -> - update_expiry_header(RaCmdTs, PerQueueMsgTTL, Header); -maybe_set_msg_ttl(#basic_message{content = #content{properties = Props}}, - RaCmdTs, Header, - #?MODULE{cfg = #cfg{msg_ttl = PerQueueMsgTTL}}) -> - %% rabbit_quorum_queue will leave the properties decoded if and only if - %% per message message TTL is set. - %% We already check in the channel that expiration must be valid. - {ok, PerMsgMsgTTL} = rabbit_basic:parse_expiration(Props), - TTL = min(PerMsgMsgTTL, PerQueueMsgTTL), - update_expiry_header(RaCmdTs, TTL, Header); maybe_set_msg_ttl(Msg, RaCmdTs, Header, - #?MODULE{cfg = #cfg{msg_ttl = MsgTTL}}) -> + #?STATE{cfg = #cfg{msg_ttl = MsgTTL}}) -> case mc:is(Msg) of true -> TTL = min(MsgTTL, mc:ttl(Msg)), @@ -1590,6 +1592,20 @@ maybe_set_msg_ttl(Msg, RaCmdTs, Header, Header end. +maybe_set_msg_delivery_count(Msg, Header) -> + case mc:is(Msg) of + true -> + case mc:get_annotation(delivery_count, Msg) of + undefined -> + Header; + DelCnt -> + update_header(delivery_count, fun (_) -> DelCnt end, + DelCnt, Header) + end; + false -> + Header + end. + update_expiry_header(_, undefined, Header) -> Header; update_expiry_header(RaCmdTs, 0, Header) -> @@ -1605,64 +1621,43 @@ update_expiry_header(RaCmdTs, TTL, Header) -> update_expiry_header(ExpiryTs, Header) -> update_header(expiry, fun(Ts) -> Ts end, ExpiryTs, Header). -maybe_store_release_cursor(RaftIdx, - #?MODULE{cfg = #cfg{release_cursor_interval = {Base, C}} = Cfg, - enqueue_count = EC, - release_cursors = Cursors0} = State0) - when EC >= C -> - case messages_total(State0) of - 0 -> - %% message must have been immediately dropped - State0#?MODULE{enqueue_count = 0}; - Total -> - Interval = case Base of - 0 -> 0; - _ -> - min(max(Total, Base), ?RELEASE_CURSOR_EVERY_MAX) - end, - State = State0#?MODULE{cfg = Cfg#cfg{release_cursor_interval = - {Base, Interval}}}, - Dehydrated = dehydrate_state(State), - Cursor = {release_cursor, RaftIdx, Dehydrated}, - Cursors = lqueue:in(Cursor, Cursors0), - State#?MODULE{enqueue_count = 0, - release_cursors = Cursors} - end; -maybe_store_release_cursor(_RaftIdx, State) -> - State. - -maybe_enqueue(RaftIdx, Ts, undefined, undefined, RawMsg, Effects, - #?MODULE{msg_bytes_enqueue = Enqueue, - enqueue_count = EnqCount, - messages = Messages, - messages_total = Total} = State0) -> +maybe_enqueue(RaftIdx, Ts, undefined, undefined, RawMsg, + {_MetaSize, BodySize}, + Effects, #?STATE{msg_bytes_enqueue = Enqueue, + enqueue_count = EnqCount, + messages = Messages, + messages_total = Total} = State0) -> % direct enqueue without tracking - Size = message_size(RawMsg), - Header = maybe_set_msg_ttl(RawMsg, Ts, Size, State0), + Size = BodySize, + Header0 = maybe_set_msg_ttl(RawMsg, Ts, BodySize, State0), + Header = maybe_set_msg_delivery_count(RawMsg, Header0), Msg = ?MSG(RaftIdx, Header), - State = State0#?MODULE{msg_bytes_enqueue = Enqueue + Size, - enqueue_count = EnqCount + 1, - messages_total = Total + 1, - messages = lqueue:in(Msg, Messages) - }, + PTag = priority_tag(RawMsg), + State = State0#?STATE{msg_bytes_enqueue = Enqueue + Size, + enqueue_count = EnqCount + 1, + messages_total = Total + 1, + messages = rabbit_fifo_q:in(PTag, Msg, Messages) + }, {ok, State, Effects}; -maybe_enqueue(RaftIdx, Ts, From, MsgSeqNo, RawMsg, Effects0, - #?MODULE{msg_bytes_enqueue = Enqueue, - enqueue_count = EnqCount, - enqueuers = Enqueuers0, - messages = Messages, - messages_total = Total} = State0) -> +maybe_enqueue(RaftIdx, Ts, From, MsgSeqNo, RawMsg, + {_MetaSize, BodySize} = Size, + Effects0, #?STATE{msg_bytes_enqueue = Enqueue, + enqueue_count = EnqCount, + enqueuers = Enqueuers0, + messages = Messages, + messages_total = Total} = State0) -> case maps:get(From, Enqueuers0, undefined) of undefined -> - State1 = State0#?MODULE{enqueuers = Enqueuers0#{From => #enqueuer{}}}, + State1 = State0#?STATE{enqueuers = Enqueuers0#{From => #enqueuer{}}}, {Res, State, Effects} = maybe_enqueue(RaftIdx, Ts, From, MsgSeqNo, - RawMsg, Effects0, State1), + RawMsg, Size, Effects0, + State1), {Res, State, [{monitor, process, From} | Effects]}; #enqueuer{next_seqno = MsgSeqNo} = Enq0 -> % it is the next expected seqno - Size = message_size(RawMsg), - Header = maybe_set_msg_ttl(RawMsg, Ts, Size, State0), + Header0 = maybe_set_msg_ttl(RawMsg, Ts, BodySize, State0), + Header = maybe_set_msg_delivery_count(RawMsg, Header0), Msg = ?MSG(RaftIdx, Header), Enq = Enq0#enqueuer{next_seqno = MsgSeqNo + 1}, MsgCache = case can_immediately_deliver(State0) of @@ -1671,13 +1666,14 @@ maybe_enqueue(RaftIdx, Ts, From, MsgSeqNo, RawMsg, Effects0, false -> undefined end, - State = State0#?MODULE{msg_bytes_enqueue = Enqueue + Size, - enqueue_count = EnqCount + 1, - messages_total = Total + 1, - messages = lqueue:in(Msg, Messages), - enqueuers = Enqueuers0#{From => Enq}, - msg_cache = MsgCache - }, + PTag = priority_tag(RawMsg), + State = State0#?STATE{msg_bytes_enqueue = Enqueue + BodySize, + enqueue_count = EnqCount + 1, + messages_total = Total + 1, + messages = rabbit_fifo_q:in(PTag, Msg, Messages), + enqueuers = Enqueuers0#{From => Enq}, + msg_cache = MsgCache + }, {ok, State, Effects0}; #enqueuer{next_seqno = Next} when MsgSeqNo > Next -> @@ -1688,52 +1684,53 @@ maybe_enqueue(RaftIdx, Ts, From, MsgSeqNo, RawMsg, Effects0, {duplicate, State0, Effects0} end. -return(#{index := IncomingRaftIdx, machine_version := MachineVersion} = Meta, - ConsumerId, Returned, Effects0, State0) -> - {State1, Effects1} = maps:fold( - fun(MsgId, Msg, {S0, E0}) -> - return_one(Meta, MsgId, Msg, S0, E0, ConsumerId) - end, {State0, Effects0}, Returned), - State2 = - case State1#?MODULE.consumers of - #{ConsumerId := Con} - when MachineVersion >= 3 -> - update_or_remove_sub(Meta, ConsumerId, Con, State1); - #{ConsumerId := Con0} - when MachineVersion =:= 2 -> - Credit = increase_credit(Meta, Con0, map_size(Returned)), - Con = Con0#consumer{credit = Credit}, - update_or_remove_sub(Meta, ConsumerId, Con, State1); - _ -> - State1 - end, - {State, ok, Effects} = checkout(Meta, State0, State2, Effects1), - update_smallest_raft_index(IncomingRaftIdx, State, Effects). +return(#{} = Meta, ConsumerKey, MsgIds, IncrDelCount, Anns, + Checked, Effects0, State0) + when is_map(Anns) -> + %% We requeue in the same order as messages got returned by the client. + {State1, Effects1} = + lists:foldl( + fun(MsgId, Acc = {S0, E0}) -> + case Checked of + #{MsgId := Msg} -> + return_one(Meta, MsgId, Msg, IncrDelCount, Anns, + S0, E0, ConsumerKey); + #{} -> + Acc + end + end, {State0, Effects0}, MsgIds), + State2 = case State1#?STATE.consumers of + #{ConsumerKey := Con} -> + update_or_remove_con(Meta, ConsumerKey, Con, State1); + _ -> + State1 + end, + checkout(Meta, State0, State2, Effects1). % used to process messages that are finished -complete(Meta, ConsumerId, [DiscardedMsgId], +complete(Meta, ConsumerKey, [MsgId], #consumer{checked_out = Checked0} = Con0, - #?MODULE{ra_indexes = Indexes0, - msg_bytes_checkout = BytesCheckout, - messages_total = Tot} = State0) -> - case maps:take(DiscardedMsgId, Checked0) of + #?STATE{ra_indexes = Indexes0, + msg_bytes_checkout = BytesCheckout, + messages_total = Tot} = State0) -> + case maps:take(MsgId, Checked0) of {?MSG(Idx, Hdr), Checked} -> SettledSize = get_header(size, Hdr), Indexes = rabbit_fifo_index:delete(Idx, Indexes0), Con = Con0#consumer{checked_out = Checked, - credit = increase_credit(Meta, Con0, 1)}, - State1 = update_or_remove_sub(Meta, ConsumerId, Con, State0), - State1#?MODULE{ra_indexes = Indexes, - msg_bytes_checkout = BytesCheckout - SettledSize, - messages_total = Tot - 1}; + credit = increase_credit(Con0, 1)}, + State1 = update_or_remove_con(Meta, ConsumerKey, Con, State0), + State1#?STATE{ra_indexes = Indexes, + msg_bytes_checkout = BytesCheckout - SettledSize, + messages_total = Tot - 1}; error -> State0 end; -complete(Meta, ConsumerId, DiscardedMsgIds, +complete(Meta, ConsumerKey, MsgIds, #consumer{checked_out = Checked0} = Con0, - #?MODULE{ra_indexes = Indexes0, - msg_bytes_checkout = BytesCheckout, - messages_total = Tot} = State0) -> + #?STATE{ra_indexes = Indexes0, + msg_bytes_checkout = BytesCheckout, + messages_total = Tot} = State0) -> {SettledSize, Checked, Indexes} = lists:foldl( fun (MsgId, {S0, Ch0, Idxs}) -> @@ -1744,94 +1741,51 @@ complete(Meta, ConsumerId, DiscardedMsgIds, error -> {S0, Ch0, Idxs} end - end, {0, Checked0, Indexes0}, DiscardedMsgIds), + end, {0, Checked0, Indexes0}, MsgIds), Len = map_size(Checked0) - map_size(Checked), Con = Con0#consumer{checked_out = Checked, - credit = increase_credit(Meta, Con0, Len)}, - State1 = update_or_remove_sub(Meta, ConsumerId, Con, State0), - State1#?MODULE{ra_indexes = Indexes, - msg_bytes_checkout = BytesCheckout - SettledSize, - messages_total = Tot - Len}. - -increase_credit(_Meta, #consumer{cfg = #consumer_cfg{lifetime = once}, - credit = Credit}, _) -> + credit = increase_credit(Con0, Len)}, + State1 = update_or_remove_con(Meta, ConsumerKey, Con, State0), + State1#?STATE{ra_indexes = Indexes, + msg_bytes_checkout = BytesCheckout - SettledSize, + messages_total = Tot - Len}. + +increase_credit(#consumer{cfg = #consumer_cfg{lifetime = once}, + credit = Credit}, _) -> %% once consumers cannot increment credit Credit; -increase_credit(_Meta, #consumer{cfg = #consumer_cfg{lifetime = auto, - credit_mode = credited}, - credit = Credit}, _) -> +increase_credit(#consumer{cfg = #consumer_cfg{lifetime = auto, + credit_mode = credited}, + credit = Credit}, _) -> %% credit_mode: `credited' also doesn't automatically increment credit Credit; -increase_credit(#{machine_version := MachineVersion}, - #consumer{cfg = #consumer_cfg{credit_mode = {simple_prefetch, MaxCredit}}, +increase_credit(#consumer{cfg = #consumer_cfg{lifetime = auto, + credit_mode = {credited, _}}, + credit = Credit}, _) -> + %% credit_mode: `credited' also doesn't automatically increment credit + Credit; +increase_credit(#consumer{cfg = #consumer_cfg{credit_mode = + {simple_prefetch, MaxCredit}}, credit = Current}, Credit) - when MachineVersion >= 3 andalso MaxCredit > 0 -> + when MaxCredit > 0 -> min(MaxCredit, Current + Credit); -increase_credit(_Meta, #consumer{credit = Current}, Credit) -> +increase_credit(#consumer{credit = Current}, Credit) -> Current + Credit. -complete_and_checkout(#{index := IncomingRaftIdx} = Meta, MsgIds, ConsumerId, +complete_and_checkout(#{} = Meta, MsgIds, ConsumerKey, #consumer{} = Con0, Effects0, State0) -> - State1 = complete(Meta, ConsumerId, MsgIds, Con0, State0), - {State, ok, Effects} = checkout(Meta, State0, State1, Effects0), - update_smallest_raft_index(IncomingRaftIdx, State, Effects). + State1 = complete(Meta, ConsumerKey, MsgIds, Con0, State0), + %% a completion could have removed the active/quiescing consumer + {State2, Effects1} = activate_next_consumer(State1, Effects0), + checkout(Meta, State0, State2, Effects1). cancel_consumer_effects(ConsumerId, - #?MODULE{cfg = #cfg{resource = QName}} = _State, - Effects) -> + #?STATE{cfg = #cfg{resource = QName}}, + Effects) when is_tuple(ConsumerId) -> [{mod_call, rabbit_quorum_queue, cancel_consumer_handler, [QName, ConsumerId]} | Effects]. -update_smallest_raft_index(Idx, State, Effects) -> - update_smallest_raft_index(Idx, ok, State, Effects). - -update_smallest_raft_index(IncomingRaftIdx, Reply, - #?MODULE{cfg = Cfg, - release_cursors = Cursors0} = State0, - Effects) -> - Total = messages_total(State0), - %% TODO: optimise - case smallest_raft_index(State0) of - undefined when Total == 0 -> - % there are no messages on queue anymore and no pending enqueues - % we can forward release_cursor all the way until - % the last received command, hooray - %% reset the release cursor interval - #cfg{release_cursor_interval = {Base, _}} = Cfg, - RCI = {Base, Base}, - State = State0#?MODULE{cfg = Cfg#cfg{release_cursor_interval = RCI}, - release_cursors = lqueue:new(), - enqueue_count = 0}, - {State, Reply, Effects ++ [{release_cursor, IncomingRaftIdx, State}]}; - undefined -> - {State0, Reply, Effects}; - Smallest when is_integer(Smallest) -> - case find_next_cursor(Smallest, Cursors0) of - empty -> - {State0, Reply, Effects}; - {Cursor, Cursors} -> - %% we can emit a release cursor when we've passed the smallest - %% release cursor available. - {State0#?MODULE{release_cursors = Cursors}, Reply, - Effects ++ [Cursor]} - end - end. - -find_next_cursor(Idx, Cursors) -> - find_next_cursor(Idx, Cursors, empty). - -find_next_cursor(Smallest, Cursors0, Potential) -> - case lqueue:out(Cursors0) of - {{value, {_, Idx, _} = Cursor}, Cursors} when Idx < Smallest -> - %% we found one but it may not be the largest one - find_next_cursor(Smallest, Cursors, Cursor); - _ when Potential == empty -> - empty; - _ -> - {Potential, Cursors0} - end. - update_msg_header(Key, Fun, Def, ?MSG(Idx, Header)) -> ?MSG(Idx, update_header(Key, Fun, Def, Header)). @@ -1842,11 +1796,12 @@ update_header(Key, UpdateFun, Default, Size) when is_integer(Size) -> update_header(Key, UpdateFun, Default, #{size => Size}); update_header(Key, UpdateFun, Default, ?TUPLE(Size, Expiry)) - when is_integer(Size), is_integer(Expiry) -> + when is_integer(Size) andalso + is_integer(Expiry) -> update_header(Key, UpdateFun, Default, #{size => Size, expiry => Expiry}); update_header(Key, UpdateFun, Default, Header) - when is_map(Header), is_map_key(size, Header) -> + when is_map_key(size, Header) -> maps:update_with(Key, UpdateFun, Default, Header). get_msg_header(?MSG(_Idx, Header)) -> @@ -1871,90 +1826,111 @@ get_header(Key, Header) when is_map(Header) andalso is_map_key(size, Header) -> maps:get(Key, Header, undefined). -return_one(#{machine_version := MachineVersion} = Meta, - MsgId, Msg0, - #?MODULE{returns = Returns, - consumers = Consumers, - dlx = DlxState0, - cfg = #cfg{delivery_limit = DeliveryLimit, - dead_letter_handler = DLH}} = State0, - Effects0, ConsumerId) -> - #consumer{checked_out = Checked0} = Con0 = maps:get(ConsumerId, Consumers), - Msg = update_msg_header(delivery_count, fun incr/1, 1, Msg0), +annotate_msg(Header, Msg0) -> + case mc:is(Msg0) of + true when is_map(Header) -> + Msg = maps:fold(fun (K, V, Acc) -> + mc:set_annotation(K, V, Acc) + end, Msg0, maps:get(anns, Header, #{})), + case Header of + #{delivery_count := DelCount} -> + mc:set_annotation(delivery_count, DelCount, Msg); + _ -> + Msg + end; + _ -> + Msg0 + end. + +return_one(Meta, MsgId, ?MSG(_, _) = Msg0, DelivFailed, Anns, + #?STATE{returns = Returns, + consumers = Consumers, + dlx = DlxState0, + cfg = #cfg{delivery_limit = DeliveryLimit, + dead_letter_handler = DLH}} = State0, + Effects0, ConsumerKey) -> + #consumer{checked_out = Checked0} = Con0 = maps:get(ConsumerKey, Consumers), + Msg = incr_msg(Msg0, DelivFailed, Anns), Header = get_msg_header(Msg), - case get_header(delivery_count, Header) of - DeliveryCount when DeliveryCount > DeliveryLimit -> - {DlxState, DlxEffects} = rabbit_fifo_dlx:discard([Msg], delivery_limit, DLH, DlxState0), - State1 = State0#?MODULE{dlx = DlxState}, - State = complete(Meta, ConsumerId, [MsgId], Con0, State1), + case get_header(acquired_count, Header) of + AcquiredCount when AcquiredCount > DeliveryLimit -> + {DlxState, DlxEffects} = + rabbit_fifo_dlx:discard([Msg], delivery_limit, DLH, DlxState0), + State1 = State0#?STATE{dlx = DlxState}, + State = complete(Meta, ConsumerKey, [MsgId], Con0, State1), {State, DlxEffects ++ Effects0}; _ -> Checked = maps:remove(MsgId, Checked0), - Con = case MachineVersion of - V when V >= 3 -> - Con0#consumer{checked_out = Checked, - credit = increase_credit(Meta, Con0, 1)}; - 2 -> - Con0#consumer{checked_out = Checked} - end, + Con = Con0#consumer{checked_out = Checked, + credit = increase_credit(Con0, 1)}, {add_bytes_return( Header, - State0#?MODULE{consumers = Consumers#{ConsumerId => Con}, - returns = lqueue:in(Msg, Returns)}), + State0#?STATE{consumers = Consumers#{ConsumerKey => Con}, + returns = lqueue:in(Msg, Returns)}), Effects0} end. -return_all(Meta, #?MODULE{consumers = Cons} = State0, Effects0, ConsumerId, - #consumer{checked_out = Checked} = Con) -> - State = State0#?MODULE{consumers = Cons#{ConsumerId => Con}}, +return_all(Meta, #?STATE{consumers = Cons} = State0, Effects0, ConsumerKey, + #consumer{checked_out = Checked} = Con, DelivFailed) -> + State = State0#?STATE{consumers = Cons#{ConsumerKey => Con}}, lists:foldl(fun ({MsgId, Msg}, {S, E}) -> - return_one(Meta, MsgId, Msg, S, E, ConsumerId) + return_one(Meta, MsgId, Msg, DelivFailed, #{}, + S, E, ConsumerKey) end, {State, Effects0}, lists:sort(maps:to_list(Checked))). checkout(Meta, OldState, State0, Effects0) -> checkout(Meta, OldState, State0, Effects0, ok). checkout(#{index := Index} = Meta, - #?MODULE{cfg = #cfg{resource = _QName}} = OldState, + #?STATE{} = OldState, State0, Effects0, Reply) -> - {#?MODULE{cfg = #cfg{dead_letter_handler = DLH}, - dlx = DlxState0} = State1, ExpiredMsg, Effects1} = + {#?STATE{cfg = #cfg{dead_letter_handler = DLH}, + dlx = DlxState0} = State1, _ExpiredMsg, Effects1} = checkout0(Meta, checkout_one(Meta, false, State0, Effects0), #{}), {DlxState, DlxDeliveryEffects} = rabbit_fifo_dlx:checkout(DLH, DlxState0), %% TODO: only update dlx state if it has changed? - State2 = State1#?MODULE{msg_cache = undefined, %% by this time the cache should be used - dlx = DlxState}, + %% by this time the cache should be used + State2 = State1#?STATE{msg_cache = undefined, + dlx = DlxState}, Effects2 = DlxDeliveryEffects ++ Effects1, case evaluate_limit(Index, false, OldState, State2, Effects2) of - {State, false, Effects} when ExpiredMsg == false -> - {State, Reply, Effects}; {State, _, Effects} -> - update_smallest_raft_index(Index, Reply, State, Effects) + {State, Reply, Effects} end. -checkout0(Meta, {success, ConsumerId, MsgId, - ?MSG(_RaftIdx, _Header) = Msg, ExpiredMsg, State, Effects}, +checkout0(Meta, {success, ConsumerKey, MsgId, + ?MSG(_, _) = Msg, ExpiredMsg, State, Effects}, SendAcc0) -> DelMsg = {MsgId, Msg}, - SendAcc = case maps:get(ConsumerId, SendAcc0, undefined) of + SendAcc = case maps:get(ConsumerKey, SendAcc0, undefined) of undefined -> - SendAcc0#{ConsumerId => [DelMsg]}; + SendAcc0#{ConsumerKey => [DelMsg]}; LogMsgs -> - SendAcc0#{ConsumerId => [DelMsg | LogMsgs]} + SendAcc0#{ConsumerKey => [DelMsg | LogMsgs]} end, checkout0(Meta, checkout_one(Meta, ExpiredMsg, State, Effects), SendAcc); checkout0(_Meta, {_Activity, ExpiredMsg, State0, Effects0}, SendAcc) -> Effects = add_delivery_effects(Effects0, SendAcc, State0), {State0, ExpiredMsg, lists:reverse(Effects)}. -evaluate_limit(_Index, Result, _BeforeState, - #?MODULE{cfg = #cfg{max_length = undefined, - max_bytes = undefined}} = State, +evaluate_limit(_Index, Result, + #?STATE{cfg = #cfg{max_length = undefined, + max_bytes = undefined}}, + #?STATE{cfg = #cfg{max_length = undefined, + max_bytes = undefined}} = State, Effects) -> {State, Result, Effects}; +evaluate_limit(_Index, Result, _BeforeState, + #?STATE{cfg = #cfg{max_length = undefined, + max_bytes = undefined}, + enqueuers = Enqs0} = State0, + Effects0) -> + %% max_length and/or max_bytes policies have just been deleted + {Enqs, Effects} = unblock_enqueuers(Enqs0, Effects0), + {State0#?STATE{enqueuers = Enqs}, Result, Effects}; evaluate_limit(Index, Result, BeforeState, - #?MODULE{cfg = #cfg{overflow_strategy = Strategy}, - enqueuers = Enqs0} = State0, + #?STATE{cfg = #cfg{overflow_strategy = Strategy}, + enqueuers = Enqs0} = State0, Effects0) -> case is_over_limit(State0) of true when Strategy == drop_head -> @@ -1965,7 +1941,7 @@ evaluate_limit(Index, Result, BeforeState, %% they need to block {Enqs, Effects} = maps:fold( - fun (P, #enqueuer{blocked = undefined} = E0, {Enqs, Acc}) -> + fun (P, #enqueuer{blocked = undefined} = E0, {Enqs, Acc}) -> E = E0#enqueuer{blocked = Index}, {Enqs#{P => E}, [{send_msg, P, {queue_status, reject_publish}, @@ -1973,7 +1949,7 @@ evaluate_limit(Index, Result, BeforeState, (_P, _E, Acc) -> Acc end, {Enqs0, Effects0}, Enqs0), - {State0#?MODULE{enqueuers = Enqs}, Result, Effects}; + {State0#?STATE{enqueuers = Enqs}, Result, Effects}; false when Strategy == reject_publish -> %% TODO: optimise as this case gets called for every command %% pretty much @@ -1981,17 +1957,8 @@ evaluate_limit(Index, Result, BeforeState, case {Before, is_below_soft_limit(State0)} of {false, true} -> %% we have moved below the lower limit - {Enqs, Effects} = - maps:fold( - fun (P, #enqueuer{} = E0, {Enqs, Acc}) -> - E = E0#enqueuer{blocked = undefined}, - {Enqs#{P => E}, - [{send_msg, P, {queue_status, go}, [ra_event]} - | Acc]}; - (_P, _E, Acc) -> - Acc - end, {Enqs0, Effects0}, Enqs0), - {State0#?MODULE{enqueuers = Enqs}, Result, Effects}; + {Enqs, Effects} = unblock_enqueuers(Enqs0, Effects0), + {State0#?STATE{enqueuers = Enqs}, Result, Effects}; _ -> {State0, Result, Effects0} end; @@ -1999,6 +1966,16 @@ evaluate_limit(Index, Result, BeforeState, {State0, Result, Effects0} end. +unblock_enqueuers(Enqs0, Effects0) -> + maps:fold( + fun (P, #enqueuer{} = E0, {Enqs, Acc}) -> + E = E0#enqueuer{blocked = undefined}, + {Enqs#{P => E}, + [{send_msg, P, {queue_status, go}, [ra_event]} + | Acc]}; + (_P, _E, Acc) -> + Acc + end, {Enqs0, Effects0}, Enqs0). %% [6,5,4,3,2,1] -> [[1,2],[3,4],[5,6]] chunk_disk_msgs([], _Bytes, [[] | Chunks]) -> @@ -2028,39 +2005,41 @@ add_delivery_effects(Effects0, AccMap, State) -> end, Efs, chunk_disk_msgs(DiskMsgs, 0, [[]])) end, Effects0, AccMap). -take_next_msg(#?MODULE{returns = Returns0, - messages = Messages0, - ra_indexes = Indexes0 - } = State) -> +take_next_msg(#?STATE{returns = Returns0, + messages = Messages0, + ra_indexes = Indexes0 + } = State) -> case lqueue:out(Returns0) of {{value, NextMsg}, Returns} -> - {NextMsg, State#?MODULE{returns = Returns}}; + {NextMsg, State#?STATE{returns = Returns}}; {empty, _} -> - case lqueue:out(Messages0) of - {empty, _} -> + case rabbit_fifo_q:out(Messages0) of + empty -> empty; - {{value, ?MSG(RaftIdx, _) = Msg}, Messages} -> + {?MSG(RaftIdx, _) = Msg, Messages} -> %% add index here Indexes = rabbit_fifo_index:append(RaftIdx, Indexes0), - {Msg, State#?MODULE{messages = Messages, - ra_indexes = Indexes}} + {Msg, State#?STATE{messages = Messages, + ra_indexes = Indexes}} end end. -get_next_msg(#?MODULE{returns = Returns0, - messages = Messages0}) -> +get_next_msg(#?STATE{returns = Returns0, + messages = Messages0}) -> case lqueue:get(Returns0, empty) of empty -> - lqueue:get(Messages0, empty); + rabbit_fifo_q:get(Messages0); Msg -> Msg end. -delivery_effect({CTag, CPid}, [{MsgId, ?MSG(Idx, Header)}], - #?MODULE{msg_cache = {Idx, RawMsg}}) -> +delivery_effect(ConsumerKey, [{MsgId, ?MSG(Idx, Header)}], + #?STATE{msg_cache = {Idx, RawMsg}} = State) -> + {CTag, CPid} = consumer_id(ConsumerKey, State), {send_msg, CPid, {delivery, CTag, [{MsgId, {Header, RawMsg}}]}, ?DELIVERY_SEND_MSG_OPTS}; -delivery_effect({CTag, CPid}, Msgs, _State) -> +delivery_effect(ConsumerKey, Msgs, State) -> + {CTag, CPid} = consumer_id(ConsumerKey, State), RaftIdxs = lists:foldr(fun ({_, ?MSG(I, _)}, Acc) -> [I | Acc] end, [], Msgs), @@ -2070,7 +2049,8 @@ delivery_effect({CTag, CPid}, Msgs, _State) -> fun (Cmd, {MsgId, ?MSG(_Idx, Header)}) -> {MsgId, {Header, get_msg(Cmd)}} end, Log, Msgs), - [{send_msg, CPid, {delivery, CTag, DelMsgs}, ?DELIVERY_SEND_MSG_OPTS}] + [{send_msg, CPid, {delivery, CTag, DelMsgs}, + ?DELIVERY_SEND_MSG_OPTS}] end, {local, node(CPid)}}. @@ -2084,38 +2064,39 @@ reply_log_effect(RaftIdx, MsgId, Header, Ready, From) -> checkout_one(#{system_time := Ts} = Meta, ExpiredMsg0, InitState0, Effects0) -> %% Before checking out any messsage to any consumer, %% first remove all expired messages from the head of the queue. - {ExpiredMsg, #?MODULE{service_queue = SQ0, - messages = Messages0, - msg_bytes_checkout = BytesCheckout, - msg_bytes_enqueue = BytesEnqueue, - consumers = Cons0} = InitState, Effects1} = + {ExpiredMsg, #?STATE{service_queue = SQ0, + messages = Messages0, + msg_bytes_checkout = BytesCheckout, + msg_bytes_enqueue = BytesEnqueue, + consumers = Cons0} = InitState, Effects1} = expire_msgs(Ts, ExpiredMsg0, InitState0, Effects0), case priority_queue:out(SQ0) of - {{value, ConsumerId}, SQ1} - when is_map_key(ConsumerId, Cons0) -> + {{value, ConsumerKey}, SQ1} + when is_map_key(ConsumerKey, Cons0) -> case take_next_msg(InitState) of - {ConsumerMsg, State0} -> + {Msg, State0} -> %% there are consumers waiting to be serviced %% process consumer checkout - case maps:get(ConsumerId, Cons0) of - #consumer{credit = 0} -> - %% no credit but was still on queue - %% can happen when draining - %% recurse without consumer on queue + case maps:get(ConsumerKey, Cons0) of + #consumer{credit = Credit, + status = Status} + when Credit =:= 0 orelse + Status =/= up -> + %% not an active consumer but still in the consumers + %% map - this can happen when draining + %% or when higher priority single active consumers + %% take over, recurse without consumer in service + %% queue checkout_one(Meta, ExpiredMsg, - InitState#?MODULE{service_queue = SQ1}, Effects1); - #consumer{status = S} - when S =:= cancelled orelse - S =:= suspected_down -> - checkout_one(Meta, ExpiredMsg, - InitState#?MODULE{service_queue = SQ1}, Effects1); + InitState#?STATE{service_queue = SQ1}, + Effects1); #consumer{checked_out = Checked0, next_msg_id = Next, credit = Credit, delivery_count = DelCnt0, cfg = Cfg} = Con0 -> - Checked = maps:put(Next, ConsumerMsg, Checked0), + Checked = maps:put(Next, Msg, Checked0), DelCnt = case credit_api_v2(Cfg) of true -> add(DelCnt0, 1); false -> DelCnt0 + 1 @@ -2124,24 +2105,25 @@ checkout_one(#{system_time := Ts} = Meta, ExpiredMsg0, InitState0, Effects0) -> next_msg_id = Next + 1, credit = Credit - 1, delivery_count = DelCnt}, - Size = get_header(size, get_msg_header(ConsumerMsg)), - State = update_or_remove_sub( - Meta, ConsumerId, Con, - State0#?MODULE{service_queue = SQ1, - msg_bytes_checkout = BytesCheckout + Size, - msg_bytes_enqueue = BytesEnqueue - Size}), - {success, ConsumerId, Next, ConsumerMsg, ExpiredMsg, + Size = get_header(size, get_msg_header(Msg)), + State1 = + State0#?STATE{service_queue = SQ1, + msg_bytes_checkout = BytesCheckout + Size, + msg_bytes_enqueue = BytesEnqueue - Size}, + State = update_or_remove_con( + Meta, ConsumerKey, Con, State1), + {success, ConsumerKey, Next, Msg, ExpiredMsg, State, Effects1} end; empty -> {nochange, ExpiredMsg, InitState, Effects1} end; {{value, _ConsumerId}, SQ1} -> - %% consumer did not exist but was queued, recurse + %% consumer was not active but was queued, recurse checkout_one(Meta, ExpiredMsg, - InitState#?MODULE{service_queue = SQ1}, Effects1); + InitState#?STATE{service_queue = SQ1}, Effects1); {empty, _} -> - case lqueue:len(Messages0) of + case rabbit_fifo_q:len(Messages0) of 0 -> {nochange, ExpiredMsg, InitState, Effects1}; _ -> @@ -2167,25 +2149,30 @@ expire_msgs(RaCmdTs, Result, State, Effects) -> expire(RaCmdTs, State0, Effects) -> {?MSG(Idx, Header) = Msg, - #?MODULE{cfg = #cfg{dead_letter_handler = DLH}, - dlx = DlxState0, - ra_indexes = Indexes0, - messages_total = Tot, - msg_bytes_enqueue = MsgBytesEnqueue} = State1} = take_next_msg(State0), - {DlxState, DlxEffects} = rabbit_fifo_dlx:discard([Msg], expired, DLH, DlxState0), + #?STATE{cfg = #cfg{dead_letter_handler = DLH}, + dlx = DlxState0, + ra_indexes = Indexes0, + messages_total = Tot, + msg_bytes_enqueue = MsgBytesEnqueue} = State1} = + take_next_msg(State0), + {DlxState, DlxEffects} = rabbit_fifo_dlx:discard([Msg], expired, + DLH, DlxState0), Indexes = rabbit_fifo_index:delete(Idx, Indexes0), - State = State1#?MODULE{dlx = DlxState, - ra_indexes = Indexes, - messages_total = Tot - 1, - msg_bytes_enqueue = MsgBytesEnqueue - get_header(size, Header)}, + State = State1#?STATE{dlx = DlxState, + ra_indexes = Indexes, + messages_total = Tot - 1, + msg_bytes_enqueue = + MsgBytesEnqueue - get_header(size, Header)}, expire_msgs(RaCmdTs, true, State, DlxEffects ++ Effects). timer_effect(RaCmdTs, State, Effects) -> T = case get_next_msg(State) of ?MSG(_, ?TUPLE(Size, Expiry)) - when is_integer(Size), is_integer(Expiry) -> + when is_integer(Size) andalso + is_integer(Expiry) -> %% Next message contains 'expiry' header. - %% (Re)set timer so that mesage will be dropped or dead-lettered on time. + %% (Re)set timer so that message will be dropped or + %% dead-lettered on time. max(0, Expiry - RaCmdTs); ?MSG(_, #{expiry := Expiry}) when is_integer(Expiry) -> @@ -2197,31 +2184,42 @@ timer_effect(RaCmdTs, State, Effects) -> end, [{timer, expire_msgs, T} | Effects]. -update_or_remove_sub(Meta, ConsumerId, +update_or_remove_con(Meta, ConsumerKey, #consumer{cfg = #consumer_cfg{lifetime = once}, checked_out = Checked, credit = 0} = Con, - #?MODULE{consumers = Cons} = State) -> + #?STATE{consumers = Cons} = State) -> case map_size(Checked) of 0 -> #{system_time := Ts} = Meta, % we're done with this consumer - State#?MODULE{consumers = maps:remove(ConsumerId, Cons), - last_active = Ts}; + State#?STATE{consumers = maps:remove(ConsumerKey, Cons), + last_active = Ts}; _ -> % there are unsettled items so need to keep around - State#?MODULE{consumers = maps:put(ConsumerId, Con, Cons)} + State#?STATE{consumers = maps:put(ConsumerKey, Con, Cons)} end; -update_or_remove_sub(_Meta, ConsumerId, - #consumer{cfg = #consumer_cfg{}} = Con, - #?MODULE{consumers = Cons, - service_queue = ServiceQueue} = State) -> - State#?MODULE{consumers = maps:put(ConsumerId, Con, Cons), - service_queue = maybe_queue_consumer(ConsumerId, Con, ServiceQueue)}. +update_or_remove_con(_Meta, ConsumerKey, + #consumer{status = quiescing, + checked_out = Checked} = Con0, + #?STATE{consumers = Cons, + waiting_consumers = Waiting} = State) + when map_size(Checked) == 0 -> + Con = Con0#consumer{status = up}, + State#?STATE{consumers = maps:remove(ConsumerKey, Cons), + waiting_consumers = add_waiting({ConsumerKey, Con}, Waiting)}; +update_or_remove_con(_Meta, ConsumerKey, + #consumer{} = Con, + #?STATE{consumers = Cons, + service_queue = ServiceQueue} = State) -> + State#?STATE{consumers = maps:put(ConsumerKey, Con, Cons), + service_queue = maybe_queue_consumer(ConsumerKey, Con, + ServiceQueue)}. maybe_queue_consumer(Key, #consumer{credit = Credit, status = up, - cfg = #consumer_cfg{priority = P}}, ServiceQueue) + cfg = #consumer_cfg{priority = P}}, + ServiceQueue) when Credit > 0 -> % TODO: queue:member could surely be quite expensive, however the practical % number of unique consumers may not be large enough for it to matter @@ -2234,15 +2232,17 @@ maybe_queue_consumer(Key, #consumer{credit = Credit, maybe_queue_consumer(_Key, _Consumer, ServiceQueue) -> ServiceQueue. -update_consumer(Meta, {Tag, Pid} = ConsumerId, ConsumerMeta, - {Life, Credit, Mode0} = Spec, Priority, - #?MODULE{cfg = #cfg{consumer_strategy = competing}, - consumers = Cons0} = State0) -> +update_consumer(Meta, ConsumerKey, {Tag, Pid}, ConsumerMeta, + {Life, Mode} = Spec, Priority, + #?STATE{cfg = #cfg{consumer_strategy = competing}, + consumers = Cons0} = State0) -> Consumer = case Cons0 of - #{ConsumerId := #consumer{} = Consumer0} -> - merge_consumer(Meta, Consumer0, ConsumerMeta, Spec, Priority); + #{ConsumerKey := #consumer{} = Consumer0} -> + merge_consumer(Meta, Consumer0, ConsumerMeta, + Spec, Priority); _ -> - Mode = credit_mode(Meta, Credit, Mode0), + Credit = included_credit(Mode), + DeliveryCount = initial_delivery_count(Mode), #consumer{cfg = #consumer_cfg{tag = Tag, pid = Pid, lifetime = Life, @@ -2250,34 +2250,35 @@ update_consumer(Meta, {Tag, Pid} = ConsumerId, ConsumerMeta, priority = Priority, credit_mode = Mode}, credit = Credit, - delivery_count = initial_delivery_count(ConsumerMeta)} + delivery_count = DeliveryCount} end, - {Consumer, update_or_remove_sub(Meta, ConsumerId, Consumer, State0)}; -update_consumer(Meta, {Tag, Pid} = ConsumerId, ConsumerMeta, - {Life, Credit, Mode0} = Spec, Priority, - #?MODULE{cfg = #cfg{consumer_strategy = single_active}, - consumers = Cons0, - waiting_consumers = Waiting, - service_queue = _ServiceQueue0} = State0) -> + {Consumer, update_or_remove_con(Meta, ConsumerKey, Consumer, State0)}; +update_consumer(Meta, ConsumerKey, {Tag, Pid}, ConsumerMeta, + {Life, Mode} = Spec, Priority, + #?STATE{cfg = #cfg{consumer_strategy = single_active}, + consumers = Cons0, + waiting_consumers = Waiting0, + service_queue = _ServiceQueue0} = State) -> %% if it is the current active consumer, just update %% if it is a cancelled active consumer, add to waiting unless it is the only %% one, then merge case active_consumer(Cons0) of - {ConsumerId, #consumer{status = up} = Consumer0} -> + {ConsumerKey, #consumer{status = up} = Consumer0} -> Consumer = merge_consumer(Meta, Consumer0, ConsumerMeta, Spec, Priority), - {Consumer, update_or_remove_sub(Meta, ConsumerId, Consumer, State0)}; - undefined when is_map_key(ConsumerId, Cons0) -> + {Consumer, update_or_remove_con(Meta, ConsumerKey, Consumer, State)}; + undefined when is_map_key(ConsumerKey, Cons0) -> %% there is no active consumer and the current consumer is in the %% consumers map and thus must be cancelled, in this case we can just %% merge and effectively make this the current active one - Consumer0 = maps:get(ConsumerId, Cons0), + Consumer0 = maps:get(ConsumerKey, Cons0), Consumer = merge_consumer(Meta, Consumer0, ConsumerMeta, Spec, Priority), - {Consumer, update_or_remove_sub(Meta, ConsumerId, Consumer, State0)}; + {Consumer, update_or_remove_con(Meta, ConsumerKey, Consumer, State)}; _ -> %% add as a new waiting consumer - Mode = credit_mode(Meta, Credit, Mode0), + Credit = included_credit(Mode), + DeliveryCount = initial_delivery_count(Mode), Consumer = #consumer{cfg = #consumer_cfg{tag = Tag, pid = Pid, lifetime = Life, @@ -2285,17 +2286,43 @@ update_consumer(Meta, {Tag, Pid} = ConsumerId, ConsumerMeta, priority = Priority, credit_mode = Mode}, credit = Credit, - delivery_count = initial_delivery_count(ConsumerMeta)}, - {Consumer, - State0#?MODULE{waiting_consumers = - Waiting ++ [{ConsumerId, Consumer}]}} + delivery_count = DeliveryCount}, + Waiting = add_waiting({ConsumerKey, Consumer}, Waiting0), + {Consumer, State#?STATE{waiting_consumers = Waiting}} end. -merge_consumer(Meta, #consumer{cfg = CCfg, checked_out = Checked} = Consumer, - ConsumerMeta, {Life, Credit, Mode0}, Priority) -> +add_waiting({Key, _} = New, Waiting) -> + sort_waiting(lists:keystore(Key, 1, Waiting, New)). + +sort_waiting(Waiting) -> + lists:sort(fun + ({_, ?CONSUMER_PRIORITY(P1) = #consumer{status = up}}, + {_, ?CONSUMER_PRIORITY(P2) = #consumer{status = up}}) + when P1 =/= P2 -> + P2 =< P1; + ({C1, #consumer{status = up, + credit = Cr1}}, + {C2, #consumer{status = up, + credit = Cr2}}) -> + %% both are up, priority the same + if Cr1 == Cr2 -> + %% same credit + %% sort by key, first attached priority + C1 =< C2; + true -> + %% else sort by credit + Cr2 =< Cr1 + end; + (_, {_, #consumer{status = Status}}) -> + %% not up + Status /= up + end, Waiting). + +merge_consumer(_Meta, #consumer{cfg = CCfg, checked_out = Checked} = Consumer, + ConsumerMeta, {Life, Mode}, Priority) -> + Credit = included_credit(Mode), NumChecked = map_size(Checked), NewCredit = max(0, Credit - NumChecked), - Mode = credit_mode(Meta, Credit, Mode0), Consumer#consumer{cfg = CCfg#consumer_cfg{priority = Priority, meta = ConsumerMeta, credit_mode = Mode, @@ -2303,39 +2330,138 @@ merge_consumer(Meta, #consumer{cfg = CCfg, checked_out = Checked} = Consumer, status = up, credit = NewCredit}. -credit_mode(#{machine_version := Vsn}, Credit, simple_prefetch) - when Vsn >= 3 -> - {simple_prefetch, Credit}; -credit_mode(_, _, Mode) -> - Mode. - -%% creates a dehydrated version of the current state to be cached and -%% potentially used to for a snaphot at a later point -dehydrate_state(#?MODULE{cfg = #cfg{}, - dlx = DlxState} = State) -> - % no messages are kept in memory, no need to - % overly mutate the current state apart from removing indexes and cursors - State#?MODULE{ra_indexes = rabbit_fifo_index:empty(), - release_cursors = lqueue:new(), - enqueue_count = 0, - msg_cache = undefined, - dlx = rabbit_fifo_dlx:dehydrate(DlxState)}. - -%% make the state suitable for equality comparison -normalize(#?MODULE{ra_indexes = _Indexes, - returns = Returns, - messages = Messages, - release_cursors = Cursors, - dlx = DlxState} = State) -> - State#?MODULE{returns = lqueue:from_list(lqueue:to_list(Returns)), - messages = lqueue:from_list(lqueue:to_list(Messages)), - release_cursors = lqueue:from_list(lqueue:to_list(Cursors)), - dlx = rabbit_fifo_dlx:normalize(DlxState)}. - -is_over_limit(#?MODULE{cfg = #cfg{max_length = undefined, +included_credit({simple_prefetch, Credit}) -> + Credit; +included_credit({credited, _}) -> + 0; +included_credit(credited) -> + 0. + +credit_active_consumer( + #credit{credit = LinkCreditRcv, + delivery_count = DeliveryCountRcv, + drain = Drain, + consumer_key = ConsumerKey}, + #consumer{delivery_count = DeliveryCountSnd, + cfg = Cfg} = Con0, + Meta, + #?STATE{consumers = Cons0, + service_queue = ServiceQueue0} = State0) -> + LinkCreditSnd = link_credit_snd(DeliveryCountRcv, LinkCreditRcv, + DeliveryCountSnd, Cfg), + %% grant the credit + Con1 = Con0#consumer{credit = LinkCreditSnd}, + ServiceQueue = maybe_queue_consumer(ConsumerKey, Con1, ServiceQueue0), + State1 = State0#?STATE{service_queue = ServiceQueue, + consumers = maps:update(ConsumerKey, Con1, Cons0)}, + {State2, ok, Effects} = checkout(Meta, State0, State1, []), + + #?STATE{consumers = Cons1 = #{ConsumerKey := Con2}} = State2, + #consumer{cfg = #consumer_cfg{pid = CPid, + tag = CTag}, + credit = PostCred, + delivery_count = PostDeliveryCount} = Con2, + Available = messages_ready(State2), + case credit_api_v2(Cfg) of + true -> + {Credit, DeliveryCount, State} = + case Drain andalso PostCred > 0 of + true -> + AdvancedDeliveryCount = add(PostDeliveryCount, PostCred), + ZeroCredit = 0, + Con = Con2#consumer{delivery_count = AdvancedDeliveryCount, + credit = ZeroCredit}, + Cons = maps:update(ConsumerKey, Con, Cons1), + State3 = State2#?STATE{consumers = Cons}, + {ZeroCredit, AdvancedDeliveryCount, State3}; + false -> + {PostCred, PostDeliveryCount, State2} + end, + %% We must send the delivery effects to the queue client + %% before credit_reply such that session process can send to + %% AMQP 1.0 client TRANSFERs before FLOW. + {State, ok, Effects ++ [{send_msg, CPid, + {credit_reply, CTag, DeliveryCount, + Credit, Available, Drain}, + ?DELIVERY_SEND_MSG_OPTS}]}; + false -> + %% We must always send a send_credit_reply because basic.credit + %% is synchronous. + %% Additionally, we keep the bug of credit API v1 that we + %% send to queue client the + %% send_drained reply before the delivery effects (resulting + %% in the wrong behaviour that the session process sends to + %% AMQP 1.0 client the FLOW before the TRANSFERs). + %% We have to keep this bug because old rabbit_fifo_client + %% implementations expect a send_drained Ra reply + %% (they can't handle such a Ra effect). + CreditReply = {send_credit_reply, Available}, + case Drain of + true -> + AdvancedDeliveryCount = PostDeliveryCount + PostCred, + Con = Con2#consumer{delivery_count = AdvancedDeliveryCount, + credit = 0}, + Cons = maps:update(ConsumerKey, Con, Cons1), + State = State2#?STATE{consumers = Cons}, + Reply = {multi, [CreditReply, + {send_drained, {CTag, PostCred}}]}, + {State, Reply, Effects}; + false -> + {State2, CreditReply, Effects} + end + end. + +credit_inactive_consumer( + #credit{credit = LinkCreditRcv, + delivery_count = DeliveryCountRcv, + drain = Drain, + consumer_key = ConsumerKey}, + #consumer{cfg = #consumer_cfg{pid = CPid, + tag = CTag} = Cfg, + delivery_count = DeliveryCountSnd} = Con0, + Waiting0, State0) -> + %% No messages are available for inactive consumers. + Available = 0, + LinkCreditSnd = link_credit_snd(DeliveryCountRcv, + LinkCreditRcv, + DeliveryCountSnd, + Cfg), + case credit_api_v2(Cfg) of + true -> + {Credit, DeliveryCount} = + case Drain of + true -> + %% By issuing drain=true, the client says "either send a transfer or a flow frame". + %% Since there are no messages to send to an inactive consumer, we advance the + %% delivery-count consuming all link-credit and send a credit_reply with drain=true + %% to the session which causes the session to send a flow frame to the client. + AdvancedDeliveryCount = add(DeliveryCountSnd, LinkCreditSnd), + {0, AdvancedDeliveryCount}; + false -> + {LinkCreditSnd, DeliveryCountSnd} + end, + %% Grant the credit. + Con = Con0#consumer{credit = Credit, + delivery_count = DeliveryCount}, + Waiting = add_waiting({ConsumerKey, Con}, Waiting0), + State = State0#?STATE{waiting_consumers = Waiting}, + {State, ok, + {send_msg, CPid, + {credit_reply, CTag, DeliveryCount, Credit, Available, Drain}, + ?DELIVERY_SEND_MSG_OPTS}}; + false -> + %% Credit API v1 doesn't support draining an inactive consumer. + %% Grant the credit. + Con = Con0#consumer{credit = LinkCreditSnd}, + Waiting = add_waiting({ConsumerKey, Con}, Waiting0), + State = State0#?STATE{waiting_consumers = Waiting}, + {State, {send_credit_reply, Available}} + end. + +is_over_limit(#?STATE{cfg = #cfg{max_length = undefined, max_bytes = undefined}}) -> false; -is_over_limit(#?MODULE{cfg = #cfg{max_length = MaxLength, +is_over_limit(#?STATE{cfg = #cfg{max_length = MaxLength, max_bytes = MaxBytes}, msg_bytes_enqueue = BytesEnq, dlx = DlxState} = State) -> @@ -2343,10 +2469,10 @@ is_over_limit(#?MODULE{cfg = #cfg{max_length = MaxLength, (messages_ready(State) + NumDlx > MaxLength) orelse (BytesEnq + BytesDlx > MaxBytes). -is_below_soft_limit(#?MODULE{cfg = #cfg{max_length = undefined, +is_below_soft_limit(#?STATE{cfg = #cfg{max_length = undefined, max_bytes = undefined}}) -> false; -is_below_soft_limit(#?MODULE{cfg = #cfg{max_length = MaxLength, +is_below_soft_limit(#?STATE{cfg = #cfg{max_length = MaxLength, max_bytes = MaxBytes}, msg_bytes_enqueue = BytesEnq, dlx = DlxState} = State) -> @@ -2359,40 +2485,82 @@ is_below(undefined, _Num) -> is_below(Val, Num) when is_integer(Val) andalso is_integer(Num) -> Num =< trunc(Val * ?LOW_LIMIT). --spec make_enqueue(option(pid()), option(msg_seqno()), raw_msg()) -> protocol(). +-spec make_enqueue(option(pid()), option(msg_seqno()), raw_msg()) -> + protocol(). make_enqueue(Pid, Seq, Msg) -> - #enqueue{pid = Pid, seq = Seq, msg = Msg}. + case is_v4() of + true when is_pid(Pid) andalso + is_integer(Seq) -> + %% more compact format + #?ENQ_V2{seq = Seq, + msg = Msg, + size = ?SIZE(Msg)}; + _ -> + #enqueue{pid = Pid, seq = Seq, msg = Msg} + end. -spec make_register_enqueuer(pid()) -> protocol(). make_register_enqueuer(Pid) -> #register_enqueuer{pid = Pid}. --spec make_checkout(consumer_id(), - checkout_spec(), consumer_meta()) -> protocol(). -make_checkout({_, _} = ConsumerId, Spec, Meta) -> +-spec make_checkout(consumer_id(), checkout_spec(), consumer_meta()) -> + protocol(). +make_checkout({_, _} = ConsumerId, Spec0, Meta) -> + Spec = case is_v4() of + false when Spec0 == remove -> + %% if v4 is not active, fall back to cancel spec + cancel; + _ -> + Spec0 + end, #checkout{consumer_id = ConsumerId, spec = Spec, meta = Meta}. --spec make_settle(consumer_id(), [msg_id()]) -> protocol(). -make_settle(ConsumerId, MsgIds) when is_list(MsgIds) -> - #settle{consumer_id = ConsumerId, msg_ids = MsgIds}. +-spec make_settle(consumer_key(), [msg_id()]) -> protocol(). +make_settle(ConsumerKey, MsgIds) when is_list(MsgIds) -> + #settle{consumer_key = ConsumerKey, msg_ids = MsgIds}. -spec make_return(consumer_id(), [msg_id()]) -> protocol(). -make_return(ConsumerId, MsgIds) -> - #return{consumer_id = ConsumerId, msg_ids = MsgIds}. +make_return(ConsumerKey, MsgIds) -> + #return{consumer_key = ConsumerKey, msg_ids = MsgIds}. + +-spec is_return(protocol()) -> boolean(). +is_return(Command) -> + is_record(Command, return). -spec make_discard(consumer_id(), [msg_id()]) -> protocol(). -make_discard(ConsumerId, MsgIds) -> - #discard{consumer_id = ConsumerId, msg_ids = MsgIds}. +make_discard(ConsumerKey, MsgIds) -> + #discard{consumer_key = ConsumerKey, msg_ids = MsgIds}. --spec make_credit(consumer_id(), rabbit_queue_type:credit(), +-spec make_credit(consumer_key(), rabbit_queue_type:credit(), non_neg_integer(), boolean()) -> protocol(). -make_credit(ConsumerId, Credit, DeliveryCount, Drain) -> - #credit{consumer_id = ConsumerId, +make_credit(Key, Credit, DeliveryCount, Drain) -> + #credit{consumer_key = Key, credit = Credit, delivery_count = DeliveryCount, drain = Drain}. +-spec make_modify(consumer_key(), [msg_id()], + boolean(), boolean(), mc:annotations()) -> protocol(). +make_modify(ConsumerKey, MsgIds, DeliveryFailed, UndeliverableHere, Anns) + when is_list(MsgIds) andalso + is_boolean(DeliveryFailed) andalso + is_boolean(UndeliverableHere) andalso + is_map(Anns) -> + case is_v4() of + true -> + #modify{consumer_key = ConsumerKey, + msg_ids = MsgIds, + delivery_failed = DeliveryFailed, + undeliverable_here = UndeliverableHere, + annotations = Anns}; + false when UndeliverableHere -> + make_discard(ConsumerKey, MsgIds); + false -> + make_return(ConsumerKey, MsgIds) + end. + + -spec make_purge() -> protocol(). make_purge() -> #purge{}. @@ -2408,52 +2576,47 @@ make_update_config(Config) -> #update_config{config = Config}. add_bytes_drop(Header, - #?MODULE{msg_bytes_enqueue = Enqueue} = State) -> + #?STATE{msg_bytes_enqueue = Enqueue} = State) -> Size = get_header(size, Header), - State#?MODULE{msg_bytes_enqueue = Enqueue - Size}. + State#?STATE{msg_bytes_enqueue = Enqueue - Size}. add_bytes_return(Header, - #?MODULE{msg_bytes_checkout = Checkout, + #?STATE{msg_bytes_checkout = Checkout, msg_bytes_enqueue = Enqueue} = State) -> Size = get_header(size, Header), - State#?MODULE{msg_bytes_checkout = Checkout - Size, - msg_bytes_enqueue = Enqueue + Size}. + State#?STATE{msg_bytes_checkout = Checkout - Size, + msg_bytes_enqueue = Enqueue + Size}. -message_size(#basic_message{content = Content}) -> - #content{payload_fragments_rev = PFR} = Content, - iolist_size(PFR); message_size(B) when is_binary(B) -> byte_size(B); message_size(Msg) -> case mc:is(Msg) of true -> - {_, PayloadSize} = mc:size(Msg), - PayloadSize; + mc:size(Msg); false -> %% probably only hit this for testing so ok to use erts_debug - erts_debug:size(Msg) + {0, erts_debug:size(Msg)} end. - -all_nodes(#?MODULE{consumers = Cons0, - enqueuers = Enqs0, - waiting_consumers = WaitingConsumers0}) -> - Nodes0 = maps:fold(fun({_, P}, _, Acc) -> +all_nodes(#?STATE{consumers = Cons0, + enqueuers = Enqs0, + waiting_consumers = WaitingConsumers0}) -> + Nodes0 = maps:fold(fun(_, ?CONSUMER_PID(P), Acc) -> Acc#{node(P) => ok} end, #{}, Cons0), Nodes1 = maps:fold(fun(P, _, Acc) -> Acc#{node(P) => ok} end, Nodes0, Enqs0), maps:keys( - lists:foldl(fun({{_, P}, _}, Acc) -> + lists:foldl(fun({_, ?CONSUMER_PID(P)}, Acc) -> Acc#{node(P) => ok} end, Nodes1, WaitingConsumers0)). -all_pids_for(Node, #?MODULE{consumers = Cons0, - enqueuers = Enqs0, - waiting_consumers = WaitingConsumers0}) -> - Cons = maps:fold(fun({_, P}, _, Acc) +all_pids_for(Node, #?STATE{consumers = Cons0, + enqueuers = Enqs0, + waiting_consumers = WaitingConsumers0}) -> + Cons = maps:fold(fun(_, ?CONSUMER_PID(P), Acc) when node(P) =:= Node -> [P | Acc]; (_, _, Acc) -> Acc @@ -2463,17 +2626,18 @@ all_pids_for(Node, #?MODULE{consumers = Cons0, [P | Acc]; (_, _, Acc) -> Acc end, Cons, Enqs0), - lists:foldl(fun({{_, P}, _}, Acc) + lists:foldl(fun({_, ?CONSUMER_PID(P)}, Acc) when node(P) =:= Node -> [P | Acc]; (_, Acc) -> Acc end, Enqs, WaitingConsumers0). -suspected_pids_for(Node, #?MODULE{consumers = Cons0, - enqueuers = Enqs0, - waiting_consumers = WaitingConsumers0}) -> - Cons = maps:fold(fun({_, P}, - #consumer{status = suspected_down}, +suspected_pids_for(Node, #?STATE{consumers = Cons0, + enqueuers = Enqs0, + waiting_consumers = WaitingConsumers0}) -> + Cons = maps:fold(fun(_Key, + #consumer{cfg = #consumer_cfg{pid = P}, + status = suspected_down}, Acc) when node(P) =:= Node -> [P | Acc]; @@ -2484,16 +2648,17 @@ suspected_pids_for(Node, #?MODULE{consumers = Cons0, [P | Acc]; (_, _, Acc) -> Acc end, Cons, Enqs0), - lists:foldl(fun({{_, P}, - #consumer{status = suspected_down}}, Acc) + lists:foldl(fun({_Key, + #consumer{cfg = #consumer_cfg{pid = P}, + status = suspected_down}}, Acc) when node(P) =:= Node -> [P | Acc]; (_, Acc) -> Acc end, Enqs, WaitingConsumers0). -is_expired(Ts, #?MODULE{cfg = #cfg{expires = Expires}, - last_active = LastActive, - consumers = Consumers}) +is_expired(Ts, #?STATE{cfg = #cfg{expires = Expires}, + last_active = LastActive, + consumers = Consumers}) when is_number(LastActive) andalso is_number(Expires) -> %% TODO: should it be active consumers? Active = maps:filter(fun (_, #consumer{status = suspected_down}) -> @@ -2506,13 +2671,17 @@ is_expired(Ts, #?MODULE{cfg = #cfg{expires = Expires}, is_expired(_Ts, _State) -> false. -get_priority_from_args(#{args := Args}) -> +get_priority(#{priority := Priority}) -> + Priority; +get_priority(#{args := Args}) -> + %% fallback, v3 option case rabbit_misc:table_lookup(Args, <<"x-priority">>) of - {_Key, Value} -> + {_Type, Value} -> Value; - _ -> 0 + _ -> + 0 end; -get_priority_from_args(_) -> +get_priority(_) -> 0. notify_decorators_effect(QName, MaxActivePriority, IsEmpty) -> @@ -2523,41 +2692,38 @@ notify_decorators_startup(QName) -> {mod_call, rabbit_quorum_queue, spawn_notify_decorators, [QName, startup, []]}. -convert(To, To, State) -> +convert(_Meta, To, To, State) -> State; -convert(0, To, State) -> - convert(1, To, rabbit_fifo_v1:convert_v0_to_v1(State)); -convert(1, To, State) -> - convert(2, To, convert_v1_to_v2(State)); -convert(2, To, State) -> - convert(3, To, convert_v2_to_v3(State)). - -smallest_raft_index(#?MODULE{messages = Messages, - ra_indexes = Indexes, - dlx = DlxState}) -> +convert(Meta, 0, To, State) -> + convert(Meta, 1, To, rabbit_fifo_v1:convert_v0_to_v1(State)); +convert(Meta, 1, To, State) -> + convert(Meta, 2, To, rabbit_fifo_v3:convert_v1_to_v2(State)); +convert(Meta, 2, To, State) -> + convert(Meta, 3, To, rabbit_fifo_v3:convert_v2_to_v3(State)); +convert(Meta, 3, To, State) -> + convert(Meta, 4, To, convert_v3_to_v4(Meta, State)). + +smallest_raft_index(#?STATE{messages = Messages, + ra_indexes = Indexes, + dlx = DlxState}) -> SmallestDlxRaIdx = rabbit_fifo_dlx:smallest_raft_index(DlxState), - SmallestMsgsRaIdx = case lqueue:get(Messages, undefined) of - ?MSG(I, _) when is_integer(I) -> - I; - _ -> - undefined - end, + SmallestMsgsRaIdx = rabbit_fifo_q:get_lowest_index(Messages), SmallestRaIdx = rabbit_fifo_index:smallest(Indexes), lists:min([SmallestDlxRaIdx, SmallestMsgsRaIdx, SmallestRaIdx]). -make_requeue(ConsumerId, Notify, [{MsgId, Idx, Header, Msg}], Acc) -> +make_requeue(ConsumerKey, Notify, [{MsgId, Idx, Header, Msg}], Acc) -> lists:reverse([{append, - #requeue{consumer_id = ConsumerId, + #requeue{consumer_key = ConsumerKey, index = Idx, header = Header, msg_id = MsgId, msg = Msg}, Notify} | Acc]); -make_requeue(ConsumerId, Notify, [{MsgId, Idx, Header, Msg} | Rem], Acc) -> - make_requeue(ConsumerId, Notify, Rem, +make_requeue(ConsumerKey, Notify, [{MsgId, Idx, Header, Msg} | Rem], Acc) -> + make_requeue(ConsumerKey, Notify, Rem, [{append, - #requeue{consumer_id = ConsumerId, + #requeue{consumer_key = ConsumerKey, index = Idx, header = Header, msg_id = MsgId, @@ -2567,8 +2733,8 @@ make_requeue(ConsumerId, Notify, [{MsgId, Idx, Header, Msg} | Rem], Acc) -> make_requeue(_ConsumerId, _Notify, [], []) -> []. -can_immediately_deliver(#?MODULE{service_queue = SQ, - consumers = Consumers} = State) -> +can_immediately_deliver(#?STATE{service_queue = SQ, + consumers = Consumers} = State) -> case messages_ready(State) of 0 when map_size(Consumers) > 0 -> %% TODO: is is probably good enough but to be 100% we'd need to @@ -2581,24 +2747,24 @@ can_immediately_deliver(#?MODULE{service_queue = SQ, incr(I) -> I + 1. +get_msg(#?ENQ_V2{msg = M}) -> + M; get_msg(#enqueue{msg = M}) -> M; get_msg(#requeue{msg = M}) -> M. --spec initial_delivery_count(consumer_meta()) -> - rabbit_queue_type:delivery_count(). -initial_delivery_count(#{initial_delivery_count := Count}) -> +initial_delivery_count({credited, Count}) -> %% credit API v2 Count; initial_delivery_count(_) -> %% credit API v1 0. --spec credit_api_v2(#consumer_cfg{}) -> - boolean(). -credit_api_v2(#consumer_cfg{meta = ConsumerMeta}) -> - maps:is_key(initial_delivery_count, ConsumerMeta). +credit_api_v2(#consumer_cfg{credit_mode = {credited, _}}) -> + true; +credit_api_v2(_) -> + false. link_credit_snd(DeliveryCountRcv, LinkCreditRcv, DeliveryCountSnd, ConsumerCfg) -> case credit_api_v2(ConsumerCfg) of @@ -2609,3 +2775,188 @@ link_credit_snd(DeliveryCountRcv, LinkCreditRcv, DeliveryCountSnd, ConsumerCfg) %% C can be negative when receiver decreases credits while messages are in flight. max(0, C) end. + +consumer_id(#consumer{cfg = Cfg}) -> + {Cfg#consumer_cfg.tag, Cfg#consumer_cfg.pid}. + +consumer_id(Key, #?STATE{consumers = Consumers}) + when is_integer(Key) -> + consumer_id(maps:get(Key, Consumers)); +consumer_id({_, _} = ConsumerId, _State) -> + ConsumerId. + + +consumer_key_from_id(ConsumerId, #?STATE{consumers = Consumers}) + when is_map_key(ConsumerId, Consumers) -> + {ok, ConsumerId}; +consumer_key_from_id(ConsumerId, #?STATE{consumers = Consumers, + waiting_consumers = Waiting}) -> + case consumer_key_from_id(ConsumerId, maps:next(maps:iterator(Consumers))) of + {ok, _} = Res -> + Res; + error -> + %% scan the waiting consumers + case lists:search(fun ({_K, ?CONSUMER_TAG_PID(T, P)}) -> + {T, P} == ConsumerId + end, Waiting) of + {value, {K, _}} -> + {ok, K}; + false -> + error + end + end; +consumer_key_from_id({CTag, CPid}, {Key, ?CONSUMER_TAG_PID(T, P), _I}) + when T == CTag andalso P == CPid -> + {ok, Key}; +consumer_key_from_id(ConsumerId, {_, _, I}) -> + consumer_key_from_id(ConsumerId, maps:next(I)); +consumer_key_from_id(_ConsumerId, none) -> + error. + +consumer_cancel_info(ConsumerKey, #?STATE{consumers = Consumers}) -> + case Consumers of + #{ConsumerKey := #consumer{checked_out = Checked}} -> + #{key => ConsumerKey, + num_checked_out => map_size(Checked)}; + _ -> + #{} + end. + +find_consumer(Key, Consumers) -> + case Consumers of + #{Key := Con} -> + {Key, Con}; + _ when is_tuple(Key) -> + %% sometimes rabbit_fifo_client may send a settle, return etc + %% by it's ConsumerId even if it was created with an integer key + %% as it may have lost it's state after a consumer cancel + maps_search(fun (_K, ?CONSUMER_TAG_PID(Tag, Pid)) -> + Key == {Tag, Pid} + end, Consumers); + _ -> + undefined + end. + +maps_search(_Pred, none) -> + undefined; +maps_search(Pred, {K, V, I}) -> + case Pred(K, V) of + true -> + {K, V}; + false -> + maps_search(Pred, maps:next(I)) + end; +maps_search(Pred, Map) when is_map(Map) -> + maps_search(Pred, maps:next(maps:iterator(Map))). + +priority_tag(Msg) -> + case mc:is(Msg) of + true -> + case mc:priority(Msg) of + P when is_integer(P) andalso + P > 4 -> + hi; + _ -> + no + end; + false -> + no + end. + + +do_checkpoints(Ts, + #checkpoint{index = ChIdx, + timestamp = ChTime, + smallest_index = LastSmallest, + indexes = MinIndexes} = Check0, RaAux, Force) -> + LastAppliedIdx = ra_aux:last_applied(RaAux), + IndexesSince = LastAppliedIdx - ChIdx, + #?STATE{} = MacState = ra_aux:machine_state(RaAux), + TimeSince = Ts - ChTime, + NewSmallest = case smallest_raft_index(MacState) of + undefined -> + LastAppliedIdx; + Smallest -> + Smallest + end, + MsgsTot = messages_total(MacState), + {CheckMinInterval, CheckMinIndexes, CheckMaxIndexes} = + persistent_term:get(quorum_queue_checkpoint_config, + {?CHECK_MIN_INTERVAL_MS, ?CHECK_MIN_INDEXES, + ?CHECK_MAX_INDEXES}), + EnoughTimeHasPassed = TimeSince > CheckMinInterval, + + %% enough time has passed and enough indexes have been committed + case (IndexesSince > MinIndexes andalso + EnoughTimeHasPassed) orelse + %% the queue is empty and some commands have been + %% applied since the last checkpoint + (MsgsTot == 0 andalso + IndexesSince > CheckMinIndexes andalso + EnoughTimeHasPassed) orelse + Force of + true -> + %% take fewer checkpoints the more messages there are on queue + NextIndexes = min(max(MsgsTot, CheckMinIndexes), CheckMaxIndexes), + %% take a checkpoint; + {#checkpoint{index = LastAppliedIdx, + timestamp = Ts, + smallest_index = NewSmallest, + messages_total = MsgsTot, + indexes = NextIndexes}, + [{checkpoint, LastAppliedIdx, MacState} | + release_cursor(LastSmallest, NewSmallest)]}; + false -> + {Check0#checkpoint{smallest_index = NewSmallest}, + release_cursor(LastSmallest, NewSmallest)} + end. + +release_cursor(LastSmallest, Smallest) + when is_integer(LastSmallest) andalso + is_integer(Smallest) andalso + Smallest > LastSmallest -> + [{release_cursor, Smallest - 1}]; +release_cursor(undefined, Smallest) + when is_integer(Smallest) -> + [{release_cursor, Smallest - 1}]; +release_cursor(_, _) -> + []. + +discard(Meta, MsgIds, ConsumerKey, + #consumer{checked_out = Checked} = Con, + DelFailed, Anns, + #?STATE{cfg = #cfg{dead_letter_handler = DLH}, + dlx = DlxState0} = State0) -> + %% We publish to dead-letter exchange in the same order + %% as messages got rejected by the client. + DiscardMsgs = lists:filtermap( + fun(Id) -> + case maps:get(Id, Checked, undefined) of + undefined -> + false; + Msg0 -> + {true, incr_msg(Msg0, DelFailed, Anns)} + end + end, MsgIds), + {DlxState, Effects} = rabbit_fifo_dlx:discard(DiscardMsgs, rejected, + DLH, DlxState0), + State = State0#?STATE{dlx = DlxState}, + complete_and_checkout(Meta, MsgIds, ConsumerKey, Con, Effects, State). + +incr_msg(Msg0, DelFailed, Anns) -> + Msg1 = update_msg_header(acquired_count, fun incr/1, 1, Msg0), + Msg2 = case map_size(Anns) > 0 of + true -> + update_msg_header(anns, fun(A) -> + maps:merge(A, Anns) + end, Anns, + Msg1); + false -> + Msg1 + end, + case DelFailed of + true -> + update_msg_header(delivery_count, fun incr/1, 1, Msg2); + false -> + Msg2 + end. diff --git a/deps/rabbit/src/rabbit_fifo.hrl b/deps/rabbit/src/rabbit_fifo.hrl index 92e15ef91268..7828759de748 100644 --- a/deps/rabbit/src/rabbit_fifo.hrl +++ b/deps/rabbit/src/rabbit_fifo.hrl @@ -12,6 +12,8 @@ %% Raw message data is always stored on disk. -define(MSG(Index, Header), ?TUPLE(Index, Header)). +-define(NIL, []). + -define(IS_HEADER(H), (is_integer(H) andalso H >= 0) orelse is_list(H) orelse @@ -39,12 +41,14 @@ -type msg_header() :: msg_size() | optimised_tuple(msg_size(), Expiry :: milliseconds()) | #{size := msg_size(), + acquired_count => non_neg_integer(), delivery_count => non_neg_integer(), expiry => milliseconds()}. %% The message header: %% size: The size of the message payload in bytes. -%% delivery_count: the number of unsuccessful delivery attempts. +%% delivery_count: The number of unsuccessful delivery attempts. %% A non-zero value indicates a previous attempt. +%% return_count: The number of explicit returns. %% expiry: Epoch time in ms when a message expires. Set during enqueue. %% Value is determined by per-queue or per-message message TTL. %% If it contains only the size it can be condensed to an integer. @@ -53,7 +57,7 @@ -type msg_size() :: non_neg_integer(). %% the size in bytes of the msg payload --type msg() :: optimised_tuple(option(ra:index()), msg_header()). +-type msg() :: optimised_tuple(ra:index(), msg_header()). -type delivery_msg() :: {msg_id(), {msg_header(), raw_msg()}}. %% A tuple consisting of the message id, and the headered message. @@ -64,32 +68,41 @@ -type consumer_id() :: {rabbit_types:ctag(), pid()}. %% The entity that receives messages. Uniquely identifies a consumer. --type credit_mode() :: credited | - %% machine_version 2 - simple_prefetch | - %% machine_version 3 - {simple_prefetch, MaxCredit :: non_neg_integer()}. +-type consumer_idx() :: ra:index(). +%% v4 can reference consumers by the raft index they were added at. +%% The entity that receives messages. Uniquely identifies a consumer. +-type consumer_key() :: consumer_id() | consumer_idx(). + +-type credit_mode() :: + {credited, InitialDeliveryCount :: rabbit_queue_type:delivery_count()} | + %% machine_version 2 + {simple_prefetch, MaxCredit :: non_neg_integer()}. %% determines how credit is replenished --type checkout_spec() :: {once | auto, Num :: non_neg_integer(), - credit_mode()} | +-type checkout_spec() :: {once | auto, + Num :: non_neg_integer(), + credited | simple_prefetch} | + {dequeue, settled | unsettled} | - cancel. + cancel | remove | + %% new v4 format + {once | auto, credit_mode()}. -type consumer_meta() :: #{ack => boolean(), username => binary(), prefetch => non_neg_integer(), args => list(), - %% set if and only if credit API v2 is in use - initial_delivery_count => rabbit_queue_type:delivery_count() + priority => non_neg_integer() }. %% static meta data associated with a consumer -type applied_mfa() :: {module(), atom(), list()}. % represents a partially applied module call --define(RELEASE_CURSOR_EVERY, 2048). --define(RELEASE_CURSOR_EVERY_MAX, 3_200_000). +-define(CHECK_MIN_INTERVAL_MS, 1000). +-define(CHECK_MIN_INDEXES, 4096). +-define(CHECK_MAX_INDEXES, 666_667). + -define(USE_AVG_HALF_LIFE, 10000.0). %% an average QQ without any message uses about 100KB so setting this limit %% to ~10 times that should be relatively safe. @@ -99,6 +112,7 @@ -define(LOW_LIMIT, 0.8). -define(DELIVERY_CHUNK_LIMIT_B, 128_000). +-type milliseconds() :: non_neg_integer(). -record(consumer_cfg, {meta = #{} :: consumer_meta(), pid :: pid(), @@ -107,15 +121,15 @@ %% simple_prefetch: credit is re-filled as deliveries are settled %% or returned. %% credited: credit can only be changed by receiving a consumer_credit - %% command: `{consumer_credit, ReceiverDeliveryCount, Credit}' - credit_mode :: credit_mode(), % part of snapshot data + %% command: `{credit, ReceiverDeliveryCount, Credit}' + credit_mode :: credited | credit_mode(), lifetime = once :: once | auto, priority = 0 :: integer()}). -record(consumer, {cfg = #consumer_cfg{}, - status = up :: up | suspected_down | cancelled | waiting, - next_msg_id = 0 :: msg_id(), % part of snapshot data + status = up :: up | suspected_down | cancelled | quiescing, + next_msg_id = 0 :: msg_id(), checked_out = #{} :: #{msg_id() => msg()}, %% max number of messages that can be sent %% decremented for each delivery @@ -128,27 +142,25 @@ -type consumer_strategy() :: competing | single_active. --type milliseconds() :: non_neg_integer(). - -type dead_letter_handler() :: option({at_most_once, applied_mfa()} | at_least_once). -record(enqueuer, {next_seqno = 1 :: msg_seqno(), % out of order enqueues - sorted list - unused, + unused = ?NIL, status = up :: up | suspected_down, %% it is useful to have a record of when this was blocked %% so that we can retry sending the block effect if %% the publisher did not receive the initial one blocked :: option(ra:index()), - unused_1, - unused_2 + unused_1 = ?NIL, + unused_2 = ?NIL }). -record(cfg, {name :: atom(), resource :: rabbit_types:r('queue'), - release_cursor_interval :: option({non_neg_integer(), non_neg_integer()}), + unused_1 = ?NIL, dead_letter_handler :: dead_letter_handler(), become_leader_handler :: option(applied_mfa()), overflow_strategy = drop_head :: drop_head | reject_publish, @@ -160,18 +172,14 @@ delivery_limit :: option(non_neg_integer()), expires :: option(milliseconds()), msg_ttl :: option(milliseconds()), - unused_1, - unused_2 + unused_2 = ?NIL, + unused_3 = ?NIL }). --type prefix_msgs() :: {list(), list()} | - {non_neg_integer(), list(), - non_neg_integer(), list()}. - -record(rabbit_fifo, {cfg :: #cfg{}, % unassigned messages - messages = lqueue:new() :: lqueue:lqueue(msg()), + messages = rabbit_fifo_q:new() :: rabbit_fifo_q:state(), messages_total = 0 :: non_neg_integer(), % queue of returned msg_in_ids - when checking out it picks from returns = lqueue:new() :: lqueue:lqueue(term()), @@ -187,13 +195,9 @@ % index when there are large gaps but should be faster than gb_trees % for normal appending operations as it's backed by a map ra_indexes = rabbit_fifo_index:empty() :: rabbit_fifo_index:state(), - %% A release cursor is essentially a snapshot for a past raft index. - %% Working assumption: Messages are consumed in a FIFO-ish order because - %% the log is truncated only until the oldest message. - release_cursors = lqueue:new() :: lqueue:lqueue({release_cursor, - ra:index(), #rabbit_fifo{}}), + unused_1 = ?NIL, % consumers need to reflect consumer state at time of snapshot - consumers = #{} :: #{consumer_id() => consumer()}, + consumers = #{} :: #{consumer_key() => consumer()}, % consumers that require further service are queued here service_queue = priority_queue:new() :: priority_queue:q(), %% state for at-least-once dead-lettering @@ -202,24 +206,23 @@ msg_bytes_checkout = 0 :: non_neg_integer(), %% one is picked if active consumer is cancelled or dies %% used only when single active consumer is on - waiting_consumers = [] :: [{consumer_id(), consumer()}], + waiting_consumers = [] :: [{consumer_key(), consumer()}], last_active :: option(non_neg_integer()), msg_cache :: option({ra:index(), raw_msg()}), - unused_2 + unused_2 = ?NIL }). -type config() :: #{name := atom(), queue_resource := rabbit_types:r('queue'), dead_letter_handler => dead_letter_handler(), become_leader_handler => applied_mfa(), - release_cursor_interval => non_neg_integer(), + checkpoint_min_indexes => non_neg_integer(), + checkpoint_max_indexes => non_neg_integer(), max_length => non_neg_integer(), max_bytes => non_neg_integer(), - max_in_memory_length => non_neg_integer(), - max_in_memory_bytes => non_neg_integer(), overflow_strategy => drop_head | reject_publish, single_active_consumer_on => boolean(), - delivery_limit => non_neg_integer(), + delivery_limit => non_neg_integer() | -1, expires => non_neg_integer(), msg_ttl => non_neg_integer(), created => non_neg_integer() diff --git a/deps/rabbit/src/rabbit_fifo_client.erl b/deps/rabbit/src/rabbit_fifo_client.erl index 70ced853751e..20d57d89577f 100644 --- a/deps/rabbit/src/rabbit_fifo_client.erl +++ b/deps/rabbit/src/rabbit_fifo_client.erl @@ -14,14 +14,15 @@ -export([ init/1, init/2, - checkout/5, - cancel_checkout/2, + checkout/4, + cancel_checkout/3, enqueue/3, enqueue/4, dequeue/4, settle/3, return/3, discard/3, + modify/6, credit_v1/4, credit/5, handle_ra_event/4, @@ -38,13 +39,17 @@ -define(SOFT_LIMIT, 32). -define(TIMER_TIME, 10000). -define(COMMAND_TIMEOUT, 30000). +-define(UNLIMITED_PREFETCH_COUNT, 2000). %% something large for ra -type seq() :: non_neg_integer(). --record(consumer, {last_msg_id :: seq() | -1 | undefined, +-record(consumer, {key :: rabbit_fifo:consumer_key(), + % status = up :: up | cancelled, + last_msg_id :: seq() | -1 | undefined, ack = false :: boolean(), - %% Remove this field when feature flag credit_api_v2 becomes required. - delivery_count :: {credit_api_v1, rabbit_queue_type:delivery_count()} | credit_api_v2 + %% Remove this field when feature flag rabbitmq_4.0.0 becomes required. + delivery_count :: {credit_api_v1, rabbit_queue_type:delivery_count()} | + credit_api_v2 }). -record(cfg, {servers = [] :: [ra:server_id()], @@ -59,12 +64,11 @@ next_enqueue_seq = 1 :: seq(), %% indicates that we've exceeded the soft limit slow = false :: boolean(), - unsent_commands = #{} :: #{rabbit_fifo:consumer_id() => + unsent_commands = #{} :: #{rabbit_fifo:consumer_key() => {[seq()], [seq()], [seq()]}}, pending = #{} :: #{seq() => {term(), rabbit_fifo:command()}}, - consumer_deliveries = #{} :: #{rabbit_types:ctag() => - #consumer{}}, + consumers = #{} :: #{rabbit_types:ctag() => #consumer{}}, timer_state :: term() }). @@ -112,6 +116,9 @@ enqueue(QName, Correlation, Msg, cfg = #cfg{servers = Servers, timeout = Timeout}} = State0) -> %% the first publish, register and enqueuer for this process. + %% TODO: we _only_ need to pre-register an enqueuer to discover if the + %% queue overflow is `reject_publish` and the queue can accept new messages + %% if the queue does not have `reject_publish` set we can skip this step Reg = rabbit_fifo:make_register_enqueuer(self()), case ra:process_command(Servers, Reg, Timeout) of {ok, reject_publish, Leader} -> @@ -135,7 +142,7 @@ enqueue(_QName, _Correlation, _Msg, cfg = #cfg{}} = State) -> {reject_publish, State}; enqueue(QName, Correlation, Msg, - #state{slow = Slow, + #state{slow = WasSlow, pending = Pending, queue_status = go, next_seq = Seq, @@ -145,19 +152,15 @@ enqueue(QName, Correlation, Msg, % by default there is no correlation id Cmd = rabbit_fifo:make_enqueue(self(), EnqueueSeq, Msg), ok = ra:pipeline_command(ServerId, Cmd, Seq, low), - Tag = case map_size(Pending) >= SftLmt of - true -> slow; - false -> ok - end, + IsSlow = map_size(Pending) >= SftLmt, State = State0#state{pending = Pending#{Seq => {Correlation, Cmd}}, next_seq = Seq + 1, next_enqueue_seq = EnqueueSeq + 1, - slow = Tag == slow}, - case Tag of - slow when not Slow -> - {ok, set_timer(QName, State), [{block, cluster_name(State)}]}; - _ -> - {ok, State, []} + slow = IsSlow}, + if IsSlow andalso not WasSlow -> + {ok, set_timer(QName, State), [{block, cluster_name(State)}]}; + true -> + {ok, State, []} end. %% @doc Enqueues a message. @@ -194,6 +197,8 @@ enqueue(QName, Msg, State) -> dequeue(QueueName, ConsumerTag, Settlement, #state{cfg = #cfg{timeout = Timeout}} = State0) -> ServerId = pick_server(State0), + %% dequeue never really needs to assign a consumer key so we just use + %% the old ConsumerId format here ConsumerId = consumer_id(ConsumerTag), case ra:process_command(ServerId, rabbit_fifo:make_checkout(ConsumerId, @@ -203,14 +208,9 @@ dequeue(QueueName, ConsumerTag, Settlement, {ok, {dequeue, empty}, Leader} -> {empty, State0#state{leader = Leader}}; {ok, {dequeue, {MsgId, {MsgHeader, Msg0}}, MsgsReady}, Leader} -> - Count = case MsgHeader of - #{delivery_count := C} -> C; - _ -> 0 - end, - IsDelivered = Count > 0, - Msg = add_delivery_count_header(Msg0, Count), + {Msg, Redelivered} = add_delivery_count_header(Msg0, MsgHeader), {ok, MsgsReady, - {QueueName, qref(Leader), MsgId, IsDelivered, Msg}, + {QueueName, qref(Leader), MsgId, Redelivered, Msg}, State0#state{leader = Leader}}; {ok, {error, _} = Err, _Leader} -> Err; @@ -218,15 +218,25 @@ dequeue(QueueName, ConsumerTag, Settlement, Err end. -add_delivery_count_header(Msg, Count) -> - case mc:is(Msg) of - true when is_integer(Count) andalso - Count > 0 -> - mc:set_annotation(<<"x-delivery-count">>, Count, Msg); - _ -> - Msg - end. - +add_delivery_count_header(Msg0, #{acquired_count := AcqCount} = Header) + when is_integer(AcqCount) -> + Msg = case mc:is(Msg0) of + true -> + Msg1 = mc:set_annotation(<<"x-delivery-count">>, AcqCount, Msg0), + %% the "delivery-count" header in the AMQP spec does not include + %% returns (released outcomes) + rabbit_fifo:annotate_msg(Header, Msg1); + false -> + Msg0 + end, + Redelivered = AcqCount > 0, + {Msg, Redelivered}; +add_delivery_count_header(Msg, #{delivery_count := DC} = Header) -> + %% there was a delivery count but no acquired count, this means the message + %% was delivered from a quorum queue running v3 so we patch this up here + add_delivery_count_header(Msg, Header#{acquired_count => DC}); +add_delivery_count_header(Msg, _Header) -> + {Msg, false}. %% @doc Settle a message. Permanently removes message from the queue. %% @param ConsumerTag the tag uniquely identifying the consumer. @@ -236,15 +246,16 @@ add_delivery_count_header(Msg, Count) -> -spec settle(rabbit_types:ctag(), [rabbit_fifo:msg_id()], state()) -> {state(), list()}. settle(ConsumerTag, [_|_] = MsgIds, #state{slow = false} = State0) -> + ConsumerKey = consumer_key(ConsumerTag, State0), ServerId = pick_server(State0), - Cmd = rabbit_fifo:make_settle(consumer_id(ConsumerTag), MsgIds), + Cmd = rabbit_fifo:make_settle(ConsumerKey, MsgIds), {send_command(ServerId, undefined, Cmd, normal, State0), []}; settle(ConsumerTag, [_|_] = MsgIds, #state{unsent_commands = Unsent0} = State0) -> - ConsumerId = consumer_id(ConsumerTag), + ConsumerKey = consumer_key(ConsumerTag, State0), %% we've reached the soft limit so will stash the command to be %% sent once we have seen enough notifications - Unsent = maps:update_with(ConsumerId, + Unsent = maps:update_with(ConsumerKey, fun ({Settles, Returns, Discards}) -> %% MsgIds has fewer elements than Settles. %% Therefore put it on the left side of the ++ operator. @@ -264,16 +275,16 @@ settle(ConsumerTag, [_|_] = MsgIds, -spec return(rabbit_types:ctag(), [rabbit_fifo:msg_id()], state()) -> {state(), list()}. return(ConsumerTag, [_|_] = MsgIds, #state{slow = false} = State0) -> + ConsumerKey = consumer_key(ConsumerTag, State0), ServerId = pick_server(State0), - % TODO: make rabbit_fifo return support lists of message ids - Cmd = rabbit_fifo:make_return(consumer_id(ConsumerTag), MsgIds), + Cmd = rabbit_fifo:make_return(ConsumerKey, MsgIds), {send_command(ServerId, undefined, Cmd, normal, State0), []}; return(ConsumerTag, [_|_] = MsgIds, #state{unsent_commands = Unsent0} = State0) -> - ConsumerId = consumer_id(ConsumerTag), + ConsumerKey = consumer_key(ConsumerTag, State0), %% we've reached the soft limit so will stash the command to be %% sent once we have seen enough notifications - Unsent = maps:update_with(ConsumerId, + Unsent = maps:update_with(ConsumerKey, fun ({Settles, Returns, Discards}) -> {Settles, Returns ++ MsgIds, Discards} end, {[], MsgIds, []}, Unsent0), @@ -289,20 +300,35 @@ return(ConsumerTag, [_|_] = MsgIds, -spec discard(rabbit_types:ctag(), [rabbit_fifo:msg_id()], state()) -> {state(), list()}. discard(ConsumerTag, [_|_] = MsgIds, #state{slow = false} = State0) -> + ConsumerKey = consumer_key(ConsumerTag, State0), ServerId = pick_server(State0), - Cmd = rabbit_fifo:make_discard(consumer_id(ConsumerTag), MsgIds), + Cmd = rabbit_fifo:make_discard(ConsumerKey, MsgIds), {send_command(ServerId, undefined, Cmd, normal, State0), []}; discard(ConsumerTag, [_|_] = MsgIds, #state{unsent_commands = Unsent0} = State0) -> - ConsumerId = consumer_id(ConsumerTag), + ConsumerKey = consumer_key(ConsumerTag, State0), %% we've reached the soft limit so will stash the command to be %% sent once we have seen enough notifications - Unsent = maps:update_with(ConsumerId, + Unsent = maps:update_with(ConsumerKey, fun ({Settles, Returns, Discards}) -> {Settles, Returns, Discards ++ MsgIds} end, {[], [], MsgIds}, Unsent0), {State0#state{unsent_commands = Unsent}, []}. +-spec modify(rabbit_types:ctag(), [rabbit_fifo:msg_id()], + boolean(), boolean(), mc:annotations(), state()) -> + {state(), list()}. +modify(ConsumerTag, [_|_] = MsgIds, DelFailed, Undel, Anns, + #state{} = State0) -> + ConsumerKey = consumer_key(ConsumerTag, State0), + %% we need to send any pending settles, discards or returns before we + %% send the modify as this cannot be batched + %% as it contains message specific annotations + State1 = send_pending(ConsumerKey, State0), + ServerId = pick_server(State1), + Cmd = rabbit_fifo:make_modify(ConsumerKey, MsgIds, DelFailed, Undel, Anns), + {send_command(ServerId, undefined, Cmd, normal, State1), []}. + %% @doc Register with the rabbit_fifo queue to "checkout" messages as they %% become available. %% @@ -320,29 +346,45 @@ discard(ConsumerTag, [_|_] = MsgIds, %% %% @returns `{ok, State}' or `{error | timeout, term()}' -spec checkout(rabbit_types:ctag(), - NumUnsettled :: non_neg_integer(), CreditMode :: rabbit_fifo:credit_mode(), Meta :: rabbit_fifo:consumer_meta(), - state()) -> {ok, state()} | {error | timeout, term()}. -checkout(ConsumerTag, NumUnsettled, CreditMode, Meta, - #state{consumer_deliveries = CDels0} = State0) -> + state()) -> + {ok, ConsumerInfos :: map(), state()} | + {error | timeout, term()}. +checkout(ConsumerTag, CreditMode, #{} = Meta, + #state{consumers = CDels0} = State0) + when is_binary(ConsumerTag) andalso + is_tuple(CreditMode) -> Servers = sorted_servers(State0), - ConsumerId = {ConsumerTag, self()}, - Cmd = rabbit_fifo:make_checkout(ConsumerId, - {auto, NumUnsettled, CreditMode}, - Meta), + ConsumerId = consumer_id(ConsumerTag), + Spec = case rabbit_fifo:is_v4() of + true -> + case CreditMode of + {simple_prefetch, 0} -> + {auto, {simple_prefetch, + ?UNLIMITED_PREFETCH_COUNT}}; + _ -> + {auto, CreditMode} + end; + false -> + case CreditMode of + {credited, _} -> + {auto, 0, credited}; + {simple_prefetch, 0} -> + {auto, ?UNLIMITED_PREFETCH_COUNT, simple_prefetch}; + {simple_prefetch, Num} -> + {auto, Num, simple_prefetch} + end + end, + Cmd = rabbit_fifo:make_checkout(ConsumerId, Spec, Meta), %% ??? Ack = maps:get(ack, Meta, true), case try_process_command(Servers, Cmd, State0) of - {ok, Reply, Leader} -> + {ok, {ok, Reply}, Leader} -> LastMsgId = case Reply of - ok -> - %% this is the pre 3.11.1 / 3.10.9 - %% reply format - -1; - {ok, #{num_checked_out := NumChecked, - next_msg_id := NextMsgId}} -> + #{num_checked_out := NumChecked, + next_msg_id := NextMsgId} -> case NumChecked > 0 of true -> %% we cannot know if the pending messages @@ -356,19 +398,21 @@ checkout(ConsumerTag, NumUnsettled, CreditMode, Meta, NextMsgId - 1 end end, - DeliveryCount = case maps:is_key(initial_delivery_count, Meta) of + DeliveryCount = case rabbit_fifo:is_v4() of true -> credit_api_v2; false -> {credit_api_v1, 0} end, + ConsumerKey = maps:get(key, Reply, ConsumerId), SDels = maps:update_with( ConsumerTag, fun (C) -> C#consumer{ack = Ack} end, - #consumer{last_msg_id = LastMsgId, + #consumer{key = ConsumerKey, + last_msg_id = LastMsgId, ack = Ack, delivery_count = DeliveryCount}, CDels0), - {ok, State0#state{leader = Leader, - consumer_deliveries = SDels}}; + {ok, Reply, State0#state{leader = Leader, + consumers = SDels}}; Err -> Err end. @@ -392,7 +436,7 @@ query_single_active_consumer(#state{leader = Leader}) -> state()) -> {state(), rabbit_queue_type:actions()}. credit_v1(ConsumerTag, Credit, Drain, - State = #state{consumer_deliveries = CDels}) -> + #state{consumers = CDels} = State) -> #consumer{delivery_count = {credit_api_v1, Count}} = maps:get(ConsumerTag, CDels), credit(ConsumerTag, Count, Credit, Drain, State). @@ -412,12 +456,12 @@ credit_v1(ConsumerTag, Credit, Drain, state()) -> {state(), rabbit_queue_type:actions()}. credit(ConsumerTag, DeliveryCount, Credit, Drain, State) -> - ConsumerId = consumer_id(ConsumerTag), + ConsumerKey = consumer_key(ConsumerTag, State), ServerId = pick_server(State), - Cmd = rabbit_fifo:make_credit(ConsumerId, Credit, DeliveryCount, Drain), + Cmd = rabbit_fifo:make_credit(ConsumerKey, Credit, DeliveryCount, Drain), {send_command(ServerId, undefined, Cmd, normal, State), []}. -%% @doc Cancels a checkout with the rabbit_fifo queue for the consumer tag +%% @doc Cancels a checkout with the rabbit_fifo queue for the consumer tag %% %% This is a synchronous call. I.e. the call will block until the command %% has been accepted by the ra process or it times out. @@ -426,18 +470,29 @@ credit(ConsumerTag, DeliveryCount, Credit, Drain, State) -> %% @param State The {@module} state. %% %% @returns `{ok, State}' or `{error | timeout, term()}' --spec cancel_checkout(rabbit_types:ctag(), state()) -> +-spec cancel_checkout(rabbit_types:ctag(), rabbit_queue_type:cancel_reason(), state()) -> {ok, state()} | {error | timeout, term()}. -cancel_checkout(ConsumerTag, #state{consumer_deliveries = CDels} = State0) -> - Servers = sorted_servers(State0), - ConsumerId = {ConsumerTag, self()}, - Cmd = rabbit_fifo:make_checkout(ConsumerId, cancel, #{}), - State = State0#state{consumer_deliveries = maps:remove(ConsumerTag, CDels)}, - case try_process_command(Servers, Cmd, State) of - {ok, _, Leader} -> - {ok, State#state{leader = Leader}}; - Err -> - Err +cancel_checkout(ConsumerTag, Reason, + #state{consumers = Consumers} = State0) + when is_atom(Reason) -> + case Consumers of + #{ConsumerTag := #consumer{key = Cid}} -> + Servers = sorted_servers(State0), + ConsumerId = {ConsumerTag, self()}, + State1 = send_pending(Cid, State0), + Cmd = rabbit_fifo:make_checkout(ConsumerId, Reason, #{}), + State = State1#state{consumers = maps:remove(ConsumerTag, Consumers)}, + case try_process_command(Servers, Cmd, State) of + {ok, _, Leader} -> + {ok, State#state{leader = Leader}}; + Err -> + Err + end; + _ -> + %% TODO: when we implement the `delete' checkout spec we could + %% fallback to that to make sure there is little chance a consumer + %% sticks around in the machine + {ok, State0} end. %% @doc Purges all the messages from a rabbit_fifo queue and returns the number @@ -549,7 +604,7 @@ handle_ra_event(QName, From, {applied, Seqs}, %% is sequence numer agnostic: it handles any correlation terms. [{settled, QName, Corrs} | Actions0] end, - case maps:size(State1#state.pending) < SftLmt of + case map_size(State1#state.pending) < SftLmt of true when State1#state.slow == true -> % we have exited soft limit state % send any unsent commands and cancel the time as @@ -666,7 +721,7 @@ seq_applied({Seq, Response}, when Response /= not_enqueued -> {[Corr | Corrs], Actions, State#state{pending = Pending}}; _ -> - {Corrs, Actions, State#state{}} + {Corrs, Actions, State} end; seq_applied(_Seq, Acc) -> Acc. @@ -681,7 +736,7 @@ maybe_add_action({multi, Actions}, Acc0, State0) -> end, {Acc0, State0}, Actions); maybe_add_action({send_drained, {Tag, Credit}}, Acc, State0) -> %% This function clause should be deleted when - %% feature flag credit_api_v2 becomes required. + %% feature flag rabbitmq_4.0.0 becomes required. State = add_delivery_count(Credit, Tag, State0), Action = {credit_reply_v1, Tag, Credit, _Avail = 0, _Drain = true}, {[Action | Acc], State}; @@ -713,7 +768,7 @@ maybe_auto_ack(false, {deliver, Tag, _Ack, Msgs} = Deliver, State0) -> {ok, State, [Deliver] ++ Actions}. handle_delivery(QName, Leader, {delivery, Tag, [{FstId, _} | _] = IdMsgs}, - #state{consumer_deliveries = CDels0} = State0) + #state{consumers = CDels0} = State0) when is_map_key(Tag, CDels0) -> QRef = qref(Leader), {LastId, _} = lists:last(IdMsgs), @@ -729,7 +784,7 @@ handle_delivery(QName, Leader, {delivery, Tag, [{FstId, _} | _] = IdMsgs}, %% In this case we can't reliably know what the next expected message %% id should be so have to accept whatever message comes next maybe_auto_ack(Ack, Del, - State0#state{consumer_deliveries = + State0#state{consumers = update_consumer(Tag, LastId, length(IdMsgs), C, CDels0)}); @@ -749,7 +804,7 @@ handle_delivery(QName, Leader, {delivery, Tag, [{FstId, _} | _] = IdMsgs}, XDel = {deliver, Tag, Ack, transform_msgs(QName, QRef, Missing ++ IdMsgs)}, maybe_auto_ack(Ack, XDel, - State0#state{consumer_deliveries = + State0#state{consumers = update_consumer(Tag, LastId, length(IdMsgs) + NumMissing, C, CDels0)}) @@ -765,14 +820,14 @@ handle_delivery(QName, Leader, {delivery, Tag, [{FstId, _} | _] = IdMsgs}, C when FstId =:= 0 -> % the very first delivery maybe_auto_ack(Ack, Del, - State0#state{consumer_deliveries = + State0#state{consumers = update_consumer(Tag, LastId, length(IdMsgs), C#consumer{last_msg_id = LastId}, CDels0)}) end; handle_delivery(_QName, _Leader, {delivery, Tag, [_ | _] = IdMsgs}, - #state{consumer_deliveries = CDels0} = State0) + #state{consumers = CDels0} = State0) when not is_map_key(Tag, CDels0) -> %% Note: %% https://github.com/rabbitmq/rabbitmq-server/issues/3729 @@ -785,13 +840,7 @@ handle_delivery(_QName, _Leader, {delivery, Tag, [_ | _] = IdMsgs}, transform_msgs(QName, QRef, Msgs) -> lists:map( fun({MsgId, {MsgHeader, Msg0}}) -> - {Msg, Redelivered} = case MsgHeader of - #{delivery_count := C} -> - {add_delivery_count_header(Msg0, C), true}; - _ -> - {Msg0, false} - end, - + {Msg, Redelivered} = add_delivery_count_header(Msg0, MsgHeader), {QName, QRef, MsgId, Redelivered, Msg} end, Msgs). @@ -805,17 +854,17 @@ update_consumer(Tag, LastId, DelCntIncr, Consumer, Consumers) -> delivery_count = D}, Consumers). -add_delivery_count(DelCntIncr, Tag, #state{consumer_deliveries = CDels0} = State) -> +add_delivery_count(DelCntIncr, Tag, #state{consumers = CDels0} = State) -> Con = #consumer{last_msg_id = LastMsgId} = maps:get(Tag, CDels0), CDels = update_consumer(Tag, LastMsgId, DelCntIncr, Con, CDels0), - State#state{consumer_deliveries = CDels}. + State#state{consumers = CDels}. get_missing_deliveries(State, From, To, ConsumerTag) -> %% find local server - ConsumerId = consumer_id(ConsumerTag), - rabbit_log:debug("get_missing_deliveries for ~w from ~b to ~b", - [ConsumerId, From, To]), - Cmd = {get_checked_out, ConsumerId, lists:seq(From, To)}, + ConsumerKey = consumer_key(ConsumerTag, State), + rabbit_log:debug("get_missing_deliveries for consumer '~s' from ~b to ~b", + [ConsumerTag, From, To]), + Cmd = {get_checked_out, ConsumerKey, lists:seq(From, To)}, ServerId = find_local_or_leader(State), case ra:aux_command(ServerId, Cmd) of {ok, Missing} -> @@ -843,35 +892,32 @@ sorted_servers(#state{leader = Leader, cfg = #cfg{servers = Servers}}) -> [Leader | lists:delete(Leader, Servers)]. -consumer_id(ConsumerTag) -> +consumer_key(ConsumerTag, #state{consumers = Consumers}) -> + case Consumers of + #{ConsumerTag := #consumer{key = Key}} -> + Key; + _ -> + %% if no consumer found fall back to using the ConsumerId + consumer_id(ConsumerTag) + end. + +consumer_id(ConsumerTag) when is_binary(ConsumerTag) -> {ConsumerTag, self()}. -send_command(Server, Correlation, Command, _Priority, - #state{pending = Pending, - next_seq = Seq, - cfg = #cfg{soft_limit = SftLmt}} = State) - when element(1, Command) == return -> - %% returns are sent to the aux machine for pre-evaluation - ok = ra:cast_aux_command(Server, {Command, Seq, self()}), - Tag = case map_size(Pending) >= SftLmt of - true -> slow; - false -> ok - end, - State#state{pending = Pending#{Seq => {Correlation, Command}}, - next_seq = Seq + 1, - slow = Tag == slow}; send_command(Server, Correlation, Command, Priority, #state{pending = Pending, next_seq = Seq, cfg = #cfg{soft_limit = SftLmt}} = State) -> - ok = ra:pipeline_command(Server, Command, Seq, Priority), - Tag = case map_size(Pending) >= SftLmt of - true -> slow; - false -> ok - end, + ok = case rabbit_fifo:is_return(Command) of + true -> + %% returns are sent to the aux machine for pre-evaluation + ra:cast_aux_command(Server, {Command, Seq, self()}); + _ -> + ra:pipeline_command(Server, Command, Seq, Priority) + end, State#state{pending = Pending#{Seq => {Correlation, Command}}, next_seq = Seq + 1, - slow = Tag == slow}. + slow = map_size(Pending) >= SftLmt}. resend_command(ServerId, Correlation, Command, #state{pending = Pending, @@ -940,3 +986,21 @@ qref(Ref) -> Ref. atom(). cluster_name(#state{cfg = #cfg{servers = [{Name, _Node} | _]}}) -> Name. + +send_pending(Cid, #state{unsent_commands = Unsent} = State0) -> + Commands = case Unsent of + #{Cid := {Settled, Returns, Discards}} -> + add_command(Cid, settle, Settled, + add_command(Cid, return, Returns, + add_command(Cid, discard, + Discards, []))); + _ -> + [] + end, + ServerId = pick_server(State0), + %% send all the settlements, discards and returns + State1 = lists:foldl(fun (C, S0) -> + send_command(ServerId, undefined, C, + normal, S0) + end, State0, Commands), + State1#state{unsent_commands = maps:remove(Cid, Unsent)}. diff --git a/deps/rabbit/src/rabbit_fifo_dlx.erl b/deps/rabbit/src/rabbit_fifo_dlx.erl index 12326a13c490..4e787172d1a4 100644 --- a/deps/rabbit/src/rabbit_fifo_dlx.erl +++ b/deps/rabbit/src/rabbit_fifo_dlx.erl @@ -23,7 +23,6 @@ state_enter/4, handle_aux/6, dehydrate/1, - normalize/1, stat/1, update_config/4, smallest_raft_index/1 @@ -160,21 +159,20 @@ discard(Msgs0, Reason, {at_most_once, {Mod, Fun, Args}}, State) -> Lookup = maps:from_list(lists:zip(Idxs, Log)), Msgs = [begin Cmd = maps:get(Idx, Lookup), - rabbit_fifo:get_msg(Cmd) - end || ?MSG(Idx, _) <- Msgs0], + %% ensure header delivery count + %% is copied to the message container + annotate_msg(H, rabbit_fifo:get_msg(Cmd)) + end || ?MSG(Idx, H) <- Msgs0], [{mod_call, Mod, Fun, Args ++ [Reason, Msgs]}] end}, {State, [Effect]}; discard(Msgs, Reason, at_least_once, State0) when Reason =/= maxlen -> - State = lists:foldl(fun(?MSG(Idx, _) = Msg0, + State = lists:foldl(fun(?MSG(Idx, _) = Msg, #?MODULE{discards = D0, msg_bytes = B0, ra_indexes = I0} = S0) -> - MsgSize = size_in_bytes(Msg0), - %% Condense header to an integer representing the message size. - %% We need neither delivery_count nor expiry anymore. - Msg = ?MSG(Idx, MsgSize), + MsgSize = size_in_bytes(Msg), D = lqueue:in(?TUPLE(Reason, Msg), D0), B = B0 + MsgSize, I = rabbit_fifo_index:append(Idx, I0), @@ -192,8 +190,8 @@ checkout(at_least_once, #?MODULE{consumer = #dlx_consumer{}} = State) -> checkout(_, State) -> {State, []}. -checkout0({success, MsgId, ?TUPLE(Reason, ?MSG(Idx, _)), State}, SendAcc) -> - DelMsg = {Idx, {Reason, MsgId}}, +checkout0({success, MsgId, ?TUPLE(Reason, ?MSG(Idx, H)), State}, SendAcc) -> + DelMsg = {Idx, {Reason, H, MsgId}}, checkout0(checkout_one(State), [DelMsg | SendAcc]); checkout0(#?MODULE{consumer = #dlx_consumer{pid = Pid}} = State, SendAcc) -> Effects = delivery_effects(Pid, SendAcc), @@ -233,9 +231,11 @@ delivery_effects(CPid, Msgs0) -> {RaftIdxs, RsnIds} = lists:unzip(Msgs1), [{log, RaftIdxs, fun(Log) -> - Msgs = lists:zipwith(fun (Cmd, {Reason, MsgId}) -> - {MsgId, {Reason, rabbit_fifo:get_msg(Cmd)}} - end, Log, RsnIds), + Msgs = lists:zipwith( + fun (Cmd, {Reason, H, MsgId}) -> + {MsgId, {Reason, + annotate_msg(H, rabbit_fifo:get_msg(Cmd))}} + end, Log, RsnIds), [{send_msg, CPid, {dlx_event, self(), {dlx_delivery, Msgs}}, [cast]}] end}]. @@ -357,14 +357,10 @@ handle_aux(_, _, Aux, _, _, _) -> dehydrate(State) -> State#?MODULE{ra_indexes = rabbit_fifo_index:empty()}. --spec normalize(state()) -> - state(). -normalize(#?MODULE{discards = Discards, - ra_indexes = Indexes} = State) -> - State#?MODULE{discards = lqueue:from_list(lqueue:to_list(Discards)), - ra_indexes = rabbit_fifo_index:normalize(Indexes)}. - -spec smallest_raft_index(state()) -> option(non_neg_integer()). smallest_raft_index(#?MODULE{ra_indexes = Indexes}) -> rabbit_fifo_index:smallest(Indexes). + +annotate_msg(H, Msg) -> + rabbit_fifo:annotate_msg(H, Msg). diff --git a/deps/rabbit/src/rabbit_fifo_index.erl b/deps/rabbit/src/rabbit_fifo_index.erl index b20604386b8d..8a8fbbdb9e07 100644 --- a/deps/rabbit/src/rabbit_fifo_index.erl +++ b/deps/rabbit/src/rabbit_fifo_index.erl @@ -7,8 +7,7 @@ delete/2, size/1, smallest/1, - map/2, - normalize/1 + map/2 ]). -compile({no_auto_import, [size/1]}). @@ -105,10 +104,6 @@ find_next(Next, Last, Map) -> find_next(Next+1, Last, Map) end. --spec normalize(state()) -> state(). -normalize(State) -> - State#?MODULE{largest = undefined}. - -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). diff --git a/deps/rabbit/src/rabbit_fifo_q.erl b/deps/rabbit/src/rabbit_fifo_q.erl new file mode 100644 index 000000000000..3ddf165a03bc --- /dev/null +++ b/deps/rabbit/src/rabbit_fifo_q.erl @@ -0,0 +1,152 @@ +-module(rabbit_fifo_q). + +-include("rabbit_fifo.hrl"). +-export([ + new/0, + in/3, + out/1, + get/1, + len/1, + from_lqueue/1, + get_lowest_index/1, + overview/1 + ]). + +-define(WEIGHT, 2). +-define(NON_EMPTY, {_, [_|_]}). +-define(EMPTY, {[], []}). + +%% a weighted priority queue with only two priorities + +-record(?MODULE, {hi = ?EMPTY :: {list(msg()), list(msg())}, %% high + no = ?EMPTY :: {list(msg()), list(msg())}, %% normal + len = 0 :: non_neg_integer(), + dequeue_counter = 0 :: non_neg_integer()}). + +-opaque state() :: #?MODULE{}. + +-export_type([state/0]). + +-spec new() -> state(). +new() -> + #?MODULE{}. + +-spec in(hi | no, msg(), state()) -> state(). +in(hi, Item, #?MODULE{hi = Hi, len = Len} = State) -> + State#?MODULE{hi = in(Item, Hi), + len = Len + 1}; +in(no, Item, #?MODULE{no = No, len = Len} = State) -> + State#?MODULE{no = in(Item, No), + len = Len + 1}. + +-spec out(state()) -> + empty | {msg(), state()}. +out(#?MODULE{len = 0}) -> + empty; +out(#?MODULE{hi = Hi0, + no = No0, + len = Len, + dequeue_counter = C0} = State) -> + C = case C0 of + ?WEIGHT -> + 0; + _ -> + C0 + 1 + end, + case next(State) of + {hi, Msg} -> + {Msg, State#?MODULE{hi = drop(Hi0), + dequeue_counter = C, + len = Len - 1}}; + {no, Msg} -> + {Msg, State#?MODULE{no = drop(No0), + dequeue_counter = C, + len = Len - 1}} + end. + +-spec get(state()) -> empty | msg(). +get(#?MODULE{len = 0}) -> + empty; +get(#?MODULE{} = State) -> + {_, Msg} = next(State), + Msg. + +-spec len(state()) -> non_neg_integer(). +len(#?MODULE{len = Len}) -> + Len. + +-spec from_lqueue(lqueue:lqueue(msg())) -> state(). +from_lqueue(LQ) -> + lqueue:fold(fun (Item, Acc) -> + in(no, Item, Acc) + end, new(), LQ). + +-spec get_lowest_index(state()) -> undefined | ra:index(). +get_lowest_index(#?MODULE{len = 0}) -> + undefined; +get_lowest_index(#?MODULE{hi = Hi, no = No}) -> + case peek(Hi) of + empty -> + ?MSG(NoIdx, _) = peek(No), + NoIdx; + ?MSG(HiIdx, _) -> + case peek(No) of + ?MSG(NoIdx, _) -> + min(HiIdx, NoIdx); + empty -> + HiIdx + end + end. + +-spec overview(state()) -> + #{len := non_neg_integer(), + num_hi := non_neg_integer(), + num_no := non_neg_integer(), + lowest_index := ra:index()}. +overview(#?MODULE{len = Len, + hi = {Hi1, Hi2}, + no = _} = State) -> + %% TODO: this could be very slow with large backlogs, + %% consider keeping a separate counter for 'hi', 'no' messages + NumHi = length(Hi1) + length(Hi2), + #{len => Len, + num_hi => NumHi, + num_no => Len - NumHi, + lowest_index => get_lowest_index(State)}. + +%% internals + +next(#?MODULE{hi = ?NON_EMPTY = Hi, + no = ?NON_EMPTY = No, + dequeue_counter = ?WEIGHT}) -> + ?MSG(HiIdx, _) = HiMsg = peek(Hi), + ?MSG(NoIdx, _) = NoMsg = peek(No), + %% always favour hi priority messages when it is safe to do so, + %% i.e. the index is lower than the next index for the 'no' queue + case HiIdx < NoIdx of + true -> + {hi, HiMsg}; + false -> + {no, NoMsg} + end; +next(#?MODULE{hi = ?NON_EMPTY = Hi}) -> + {hi, peek(Hi)}; +next(#?MODULE{no = No}) -> + {no, peek(No)}. + +%% invariant, if the queue is non empty so is the Out (right) list. +in(X, ?EMPTY) -> + {[], [X]}; +in(X, {In, Out}) -> + {[X | In], Out}. + +peek(?EMPTY) -> + empty; +peek({_, [H | _]}) -> + H. + +drop({In, [_]}) -> + %% the last Out one + {[], lists:reverse(In)}; +drop({In, [_ | Out]}) -> + {In, Out}. diff --git a/deps/rabbit/src/rabbit_fifo_v3.erl b/deps/rabbit/src/rabbit_fifo_v3.erl new file mode 100644 index 000000000000..60ee6be9dc4b --- /dev/null +++ b/deps/rabbit/src/rabbit_fifo_v3.erl @@ -0,0 +1,2574 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +-module(rabbit_fifo_v3). + +-behaviour(ra_machine). + +-compile(inline_list_funcs). +-compile(inline). +-compile({no_auto_import, [apply/3]}). +-dialyzer(no_improper_lists). + +-include("rabbit_fifo_v3.hrl"). +-include_lib("rabbit_common/include/rabbit.hrl"). + +-define(STATE, rabbit_fifo). + +-export([ + %% ra_machine callbacks + init/1, + apply/3, + state_enter/2, + tick/2, + overview/1, + + get_checked_out/4, + %% versioning + version/0, + which_module/1, + %% aux + init_aux/1, + handle_aux/6, + % queries + query_messages_ready/1, + query_messages_checked_out/1, + query_messages_total/1, + query_processes/1, + query_ra_indexes/1, + query_waiting_consumers/1, + query_consumer_count/1, + query_consumers/1, + query_stat/1, + query_stat_dlx/1, + query_single_active_consumer/1, + query_in_memory_usage/1, + query_peek/2, + query_notify_decorators_info/1, + usage/1, + + %% misc + dehydrate_state/1, + get_msg_header/1, + get_header/2, + get_msg/1, + + %% protocol helpers + make_enqueue/3, + make_register_enqueuer/1, + make_checkout/3, + make_settle/2, + make_return/2, + make_discard/2, + make_credit/4, + make_purge/0, + make_purge_nodes/1, + make_update_config/1, + make_garbage_collection/0, + convert_v1_to_v2/1, + convert_v2_to_v3/1, + + get_field/2 + ]). + +-ifdef(TEST). +-export([update_header/4, + chunk_disk_msgs/3]). +-endif. + +%% command records representing all the protocol actions that are supported +-record(enqueue, {pid :: option(pid()), + seq :: option(msg_seqno()), + msg :: raw_msg()}). +-record(requeue, {consumer_id :: consumer_id(), + msg_id :: msg_id(), + index :: ra:index(), + header :: msg_header(), + msg :: raw_msg()}). +-record(register_enqueuer, {pid :: pid()}). +-record(checkout, {consumer_id :: consumer_id(), + spec :: checkout_spec(), + meta :: consumer_meta()}). +-record(settle, {consumer_id :: consumer_id(), + msg_ids :: [msg_id()]}). +-record(return, {consumer_id :: consumer_id(), + msg_ids :: [msg_id()]}). +-record(discard, {consumer_id :: consumer_id(), + msg_ids :: [msg_id()]}). +-record(credit, {consumer_id :: consumer_id(), + credit :: non_neg_integer(), + delivery_count :: non_neg_integer(), + drain :: boolean()}). +-record(purge, {}). +-record(purge_nodes, {nodes :: [node()]}). +-record(update_config, {config :: config()}). +-record(garbage_collection, {}). + +-opaque protocol() :: + #enqueue{} | + #requeue{} | + #register_enqueuer{} | + #checkout{} | + #settle{} | + #return{} | + #discard{} | + #credit{} | + #purge{} | + #purge_nodes{} | + #update_config{} | + #garbage_collection{}. + +-type command() :: protocol() | + rabbit_fifo_dlx:protocol() | + ra_machine:builtin_command(). +%% all the command types supported by ra fifo + +-type client_msg() :: delivery(). +%% the messages `rabbit_fifo' can send to consumers. + +-opaque state() :: #?STATE{}. + +-export_type([protocol/0, + delivery/0, + command/0, + credit_mode/0, + consumer_tag/0, + consumer_meta/0, + consumer_id/0, + client_msg/0, + msg/0, + msg_id/0, + msg_seqno/0, + delivery_msg/0, + state/0, + config/0]). + +%% This function is never called since only rabbit_fifo_v0:init/1 is called. +%% See https://github.com/rabbitmq/ra/blob/e0d1e6315a45f5d3c19875d66f9d7bfaf83a46e3/src/ra_machine.erl#L258-L265 +-spec init(config()) -> state(). +init(#{name := Name, + queue_resource := Resource} = Conf) -> + update_config(Conf, #?STATE{cfg = #cfg{name = Name, + resource = Resource}}). + +update_config(Conf, State) -> + DLH = maps:get(dead_letter_handler, Conf, undefined), + BLH = maps:get(become_leader_handler, Conf, undefined), + RCI = maps:get(release_cursor_interval, Conf, ?RELEASE_CURSOR_EVERY), + Overflow = maps:get(overflow_strategy, Conf, drop_head), + MaxLength = maps:get(max_length, Conf, undefined), + MaxBytes = maps:get(max_bytes, Conf, undefined), + DeliveryLimit = maps:get(delivery_limit, Conf, undefined), + Expires = maps:get(expires, Conf, undefined), + MsgTTL = maps:get(msg_ttl, Conf, undefined), + ConsumerStrategy = case maps:get(single_active_consumer_on, Conf, false) of + true -> + single_active; + false -> + competing + end, + Cfg = State#?STATE.cfg, + RCISpec = {RCI, RCI}, + + LastActive = maps:get(created, Conf, undefined), + State#?STATE{cfg = Cfg#cfg{release_cursor_interval = RCISpec, + dead_letter_handler = DLH, + become_leader_handler = BLH, + overflow_strategy = Overflow, + max_length = MaxLength, + max_bytes = MaxBytes, + consumer_strategy = ConsumerStrategy, + delivery_limit = DeliveryLimit, + expires = Expires, + msg_ttl = MsgTTL}, + last_active = LastActive}. + +% msg_ids are scoped per consumer +% ra_indexes holds all raft indexes for enqueues currently on queue +-spec apply(ra_machine:command_meta_data(), command(), state()) -> + {state(), Reply :: term(), ra_machine:effects()} | + {state(), Reply :: term()}. +apply(Meta, #enqueue{pid = From, seq = Seq, + msg = RawMsg}, State00) -> + apply_enqueue(Meta, From, Seq, RawMsg, State00); +apply(_Meta, #register_enqueuer{pid = Pid}, + #?STATE{enqueuers = Enqueuers0, + cfg = #cfg{overflow_strategy = Overflow}} = State0) -> + State = case maps:is_key(Pid, Enqueuers0) of + true -> + %% if the enqueuer exits just echo the overflow state + State0; + false -> + State0#?STATE{enqueuers = Enqueuers0#{Pid => #enqueuer{}}} + end, + Res = case is_over_limit(State) of + true when Overflow == reject_publish -> + reject_publish; + _ -> + ok + end, + {State, Res, [{monitor, process, Pid}]}; +apply(Meta, + #settle{msg_ids = MsgIds, consumer_id = ConsumerId}, + #?STATE{consumers = Cons0} = State) -> + case Cons0 of + #{ConsumerId := Con0} -> + complete_and_checkout(Meta, MsgIds, ConsumerId, + Con0, [], State); + _ -> + {State, ok} + end; +apply(Meta, #discard{msg_ids = MsgIds, consumer_id = ConsumerId}, + #?STATE{consumers = Cons, + dlx = DlxState0, + cfg = #cfg{dead_letter_handler = DLH}} = State0) -> + case Cons of + #{ConsumerId := #consumer{checked_out = Checked} = Con} -> + % Publishing to dead-letter exchange must maintain same order as messages got rejected. + DiscardMsgs = lists:filtermap(fun(Id) -> + case maps:get(Id, Checked, undefined) of + undefined -> + false; + Msg -> + {true, Msg} + end + end, MsgIds), + {DlxState, Effects} = rabbit_fifo_dlx:discard(DiscardMsgs, rejected, DLH, DlxState0), + State = State0#?STATE{dlx = DlxState}, + complete_and_checkout(Meta, MsgIds, ConsumerId, Con, Effects, State); + _ -> + {State0, ok} + end; +apply(Meta, #return{msg_ids = MsgIds, consumer_id = ConsumerId}, + #?STATE{consumers = Cons0} = State) -> + case Cons0 of + #{ConsumerId := #consumer{checked_out = Checked0}} -> + Returned = maps:with(MsgIds, Checked0), + return(Meta, ConsumerId, Returned, [], State); + _ -> + {State, ok} + end; +apply(#{index := Idx} = Meta, + #requeue{consumer_id = ConsumerId, + msg_id = MsgId, + index = OldIdx, + header = Header0, + msg = _Msg}, + #?STATE{consumers = Cons0, + messages = Messages, + ra_indexes = Indexes0, + enqueue_count = EnqCount} = State00) -> + case Cons0 of + #{ConsumerId := #consumer{checked_out = Checked0} = Con0} + when is_map_key(MsgId, Checked0) -> + %% construct a message with the current raft index + %% and update delivery count before adding it to the message queue + Header = update_header(delivery_count, fun incr/1, 1, Header0), + State0 = add_bytes_return(Header, State00), + Con = Con0#consumer{checked_out = maps:remove(MsgId, Checked0), + credit = increase_credit(Meta, Con0, 1)}, + State1 = State0#?STATE{ra_indexes = rabbit_fifo_index:delete(OldIdx, Indexes0), + messages = lqueue:in(?MSG(Idx, Header), Messages), + enqueue_count = EnqCount + 1}, + State2 = update_or_remove_sub(Meta, ConsumerId, Con, State1), + {State, Ret, Effs} = checkout(Meta, State0, State2, []), + update_smallest_raft_index(Idx, Ret, + maybe_store_release_cursor(Idx, State), + Effs); + _ -> + {State00, ok, []} + end; +apply(Meta, #credit{credit = NewCredit, delivery_count = RemoteDelCnt, + drain = Drain, consumer_id = ConsumerId}, + #?STATE{consumers = Cons0, + service_queue = ServiceQueue0, + waiting_consumers = Waiting0} = State0) -> + case Cons0 of + #{ConsumerId := #consumer{delivery_count = DelCnt} = Con0} -> + %% this can go below 0 when credit is reduced + C = max(0, RemoteDelCnt + NewCredit - DelCnt), + %% grant the credit + Con1 = Con0#consumer{credit = C}, + ServiceQueue = maybe_queue_consumer(ConsumerId, Con1, + ServiceQueue0), + Cons = maps:put(ConsumerId, Con1, Cons0), + {State1, ok, Effects} = + checkout(Meta, State0, + State0#?STATE{service_queue = ServiceQueue, + consumers = Cons}, []), + Response = {send_credit_reply, messages_ready(State1)}, + %% by this point all checkouts for the updated credit value + %% should be processed so we can evaluate the drain + case Drain of + false -> + %% just return the result of the checkout + {State1, Response, Effects}; + true -> + Con = #consumer{credit = PostCred} = + maps:get(ConsumerId, State1#?STATE.consumers), + %% add the outstanding credit to the delivery count + DeliveryCount = Con#consumer.delivery_count + PostCred, + Consumers = maps:put(ConsumerId, + Con#consumer{delivery_count = DeliveryCount, + credit = 0}, + State1#?STATE.consumers), + Drained = Con#consumer.credit, + {CTag, _} = ConsumerId, + {State1#?STATE{consumers = Consumers}, + %% returning a multi response with two client actions + %% for the channel to execute + {multi, [Response, {send_drained, {CTag, Drained}}]}, + Effects} + end; + _ when Waiting0 /= [] -> + %% there are waiting consuemrs + case lists:keytake(ConsumerId, 1, Waiting0) of + {value, {_, Con0 = #consumer{delivery_count = DelCnt}}, Waiting} -> + %% the consumer is a waiting one + %% grant the credit + C = max(0, RemoteDelCnt + NewCredit - DelCnt), + Con = Con0#consumer{credit = C}, + State = State0#?STATE{waiting_consumers = + [{ConsumerId, Con} | Waiting]}, + {State, {send_credit_reply, messages_ready(State)}}; + false -> + {State0, ok} + end; + _ -> + %% credit for unknown consumer - just ignore + {State0, ok} + end; +apply(_, #checkout{spec = {dequeue, _}}, + #?STATE{cfg = #cfg{consumer_strategy = single_active}} = State0) -> + {State0, {error, {unsupported, single_active_consumer}}}; +apply(#{index := Index, + system_time := Ts, + from := From} = Meta, #checkout{spec = {dequeue, Settlement}, + meta = ConsumerMeta, + consumer_id = ConsumerId}, + #?STATE{consumers = Consumers} = State00) -> + %% dequeue always updates last_active + State0 = State00#?STATE{last_active = Ts}, + %% all dequeue operations result in keeping the queue from expiring + Exists = maps:is_key(ConsumerId, Consumers), + case messages_ready(State0) of + 0 -> + update_smallest_raft_index(Index, {dequeue, empty}, State0, []); + _ when Exists -> + %% a dequeue using the same consumer_id isn't possible at this point + {State0, {dequeue, empty}}; + _ -> + {_, State1} = update_consumer(Meta, ConsumerId, ConsumerMeta, + {once, 1, simple_prefetch}, 0, + State0), + case checkout_one(Meta, false, State1, []) of + {success, _, MsgId, ?MSG(RaftIdx, Header), ExpiredMsg, State2, Effects0} -> + {State4, Effects1} = case Settlement of + unsettled -> + {_, Pid} = ConsumerId, + {State2, [{monitor, process, Pid} | Effects0]}; + settled -> + %% immediately settle the checkout + {State3, _, SettleEffects} = + apply(Meta, make_settle(ConsumerId, [MsgId]), + State2), + {State3, SettleEffects ++ Effects0} + end, + Effects2 = [reply_log_effect(RaftIdx, MsgId, Header, messages_ready(State4), From) | Effects1], + {State, DroppedMsg, Effects} = evaluate_limit(Index, false, State0, State4, + Effects2), + Reply = '$ra_no_reply', + case {DroppedMsg, ExpiredMsg} of + {false, false} -> + {State, Reply, Effects}; + _ -> + update_smallest_raft_index(Index, Reply, State, Effects) + end; + {nochange, _ExpiredMsg = true, State2, Effects0} -> + %% All ready messages expired. + State3 = State2#?STATE{consumers = maps:remove(ConsumerId, State2#?STATE.consumers)}, + {State, _, Effects} = evaluate_limit(Index, false, State0, State3, Effects0), + update_smallest_raft_index(Index, {dequeue, empty}, State, Effects) + end + end; +apply(#{index := Idx} = Meta, + #checkout{spec = cancel, + consumer_id = ConsumerId}, State0) -> + {State1, Effects1} = cancel_consumer(Meta, ConsumerId, State0, [], + consumer_cancel), + {State, Reply, Effects} = checkout(Meta, State0, State1, Effects1), + update_smallest_raft_index(Idx, Reply, State, Effects); +apply(Meta, #checkout{spec = Spec, meta = ConsumerMeta, + consumer_id = {_, Pid} = ConsumerId}, State0) -> + Priority = get_priority_from_args(ConsumerMeta), + {Consumer, State1} = update_consumer(Meta, ConsumerId, ConsumerMeta, + Spec, Priority, State0), + {State2, Effs} = activate_next_consumer(State1, []), + #consumer{checked_out = Checked, + credit = Credit, + delivery_count = DeliveryCount, + next_msg_id = NextMsgId} = Consumer, + + %% reply with a consumer summary + Reply = {ok, #{next_msg_id => NextMsgId, + credit => Credit, + delivery_count => DeliveryCount, + num_checked_out => map_size(Checked)}}, + checkout(Meta, State0, State2, [{monitor, process, Pid} | Effs], Reply); +apply(#{index := Index}, #purge{}, + #?STATE{messages_total = Total, + returns = Returns, + ra_indexes = Indexes0 + } = State0) -> + NumReady = messages_ready(State0), + Indexes = case Total of + NumReady -> + %% All messages are either in 'messages' queue or 'returns' queue. + %% No message is awaiting acknowledgement. + %% Optimization: empty all 'ra_indexes'. + rabbit_fifo_index:empty(); + _ -> + %% Some messages are checked out to consumers awaiting acknowledgement. + %% Therefore we cannot empty all 'ra_indexes'. + %% We only need to delete the indexes from the 'returns' queue because + %% messages of the 'messages' queue are not part of the 'ra_indexes'. + lqueue:fold(fun(?MSG(I, _), Acc) -> + rabbit_fifo_index:delete(I, Acc) + end, Indexes0, Returns) + end, + State1 = State0#?STATE{ra_indexes = Indexes, + messages = lqueue:new(), + messages_total = Total - NumReady, + returns = lqueue:new(), + msg_bytes_enqueue = 0 + }, + Effects0 = [garbage_collection], + Reply = {purge, NumReady}, + {State, _, Effects} = evaluate_limit(Index, false, State0, + State1, Effects0), + update_smallest_raft_index(Index, Reply, State, Effects); +apply(#{index := Idx}, #garbage_collection{}, State) -> + update_smallest_raft_index(Idx, ok, State, [{aux, garbage_collection}]); +apply(Meta, {timeout, expire_msgs}, State) -> + checkout(Meta, State, State, []); +apply(#{system_time := Ts, machine_version := MachineVersion} = Meta, + {down, Pid, noconnection}, + #?STATE{consumers = Cons0, + cfg = #cfg{consumer_strategy = single_active}, + waiting_consumers = Waiting0, + enqueuers = Enqs0} = State0) -> + Node = node(Pid), + %% if the pid refers to an active or cancelled consumer, + %% mark it as suspected and return it to the waiting queue + {State1, Effects0} = + maps:fold(fun({_, P} = Cid, C0, {S0, E0}) + when node(P) =:= Node -> + %% the consumer should be returned to waiting + %% and checked out messages should be returned + Effs = consumer_update_active_effects( + S0, Cid, C0, false, suspected_down, E0), + C1 = case MachineVersion of + V when V >= 3 -> + C0; + 2 -> + Checked = C0#consumer.checked_out, + Credit = increase_credit(Meta, C0, maps:size(Checked)), + C0#consumer{credit = Credit} + end, + {St, Effs1} = return_all(Meta, S0, Effs, Cid, C1), + %% if the consumer was cancelled there is a chance it got + %% removed when returning hence we need to be defensive here + Waiting = case St#?STATE.consumers of + #{Cid := C} -> + Waiting0 ++ [{Cid, C}]; + _ -> + Waiting0 + end, + {St#?STATE{consumers = maps:remove(Cid, St#?STATE.consumers), + waiting_consumers = Waiting, + last_active = Ts}, + Effs1}; + (_, _, S) -> + S + end, {State0, []}, Cons0), + WaitingConsumers = update_waiting_consumer_status(Node, State1, + suspected_down), + + %% select a new consumer from the waiting queue and run a checkout + State2 = State1#?STATE{waiting_consumers = WaitingConsumers}, + {State, Effects1} = activate_next_consumer(State2, Effects0), + + %% mark any enquers as suspected + Enqs = maps:map(fun(P, E) when node(P) =:= Node -> + E#enqueuer{status = suspected_down}; + (_, E) -> E + end, Enqs0), + Effects = [{monitor, node, Node} | Effects1], + checkout(Meta, State0, State#?STATE{enqueuers = Enqs}, Effects); +apply(#{system_time := Ts, machine_version := MachineVersion} = Meta, + {down, Pid, noconnection}, + #?STATE{consumers = Cons0, + enqueuers = Enqs0} = State0) -> + %% A node has been disconnected. This doesn't necessarily mean that + %% any processes on this node are down, they _may_ come back so here + %% we just mark them as suspected (effectively deactivated) + %% and return all checked out messages to the main queue for delivery to any + %% live consumers + %% + %% all pids for the disconnected node will be marked as suspected not just + %% the one we got the `down' command for + Node = node(Pid), + + {State, Effects1} = + maps:fold( + fun({_, P} = Cid, #consumer{checked_out = Checked0, + status = up} = C0, + {St0, Eff}) when node(P) =:= Node -> + C = case MachineVersion of + V when V >= 3 -> + C0#consumer{status = suspected_down}; + 2 -> + Credit = increase_credit(Meta, C0, map_size(Checked0)), + C0#consumer{status = suspected_down, + credit = Credit} + end, + {St, Eff0} = return_all(Meta, St0, Eff, Cid, C), + Eff1 = consumer_update_active_effects(St, Cid, C, false, + suspected_down, Eff0), + {St, Eff1}; + (_, _, {St, Eff}) -> + {St, Eff} + end, {State0, []}, Cons0), + Enqs = maps:map(fun(P, E) when node(P) =:= Node -> + E#enqueuer{status = suspected_down}; + (_, E) -> E + end, Enqs0), + + % Monitor the node so that we can "unsuspect" these processes when the node + % comes back, then re-issue all monitors and discover the final fate of + % these processes + + Effects = [{monitor, node, Node} | Effects1], + checkout(Meta, State0, State#?STATE{enqueuers = Enqs, + last_active = Ts}, Effects); +apply(#{index := Idx} = Meta, {down, Pid, _Info}, State0) -> + {State1, Effects1} = handle_down(Meta, Pid, State0), + {State, Reply, Effects} = checkout(Meta, State0, State1, Effects1), + update_smallest_raft_index(Idx, Reply, State, Effects); +apply(Meta, {nodeup, Node}, #?STATE{consumers = Cons0, + enqueuers = Enqs0, + service_queue = _SQ0} = State0) -> + %% A node we are monitoring has come back. + %% If we have suspected any processes of being + %% down we should now re-issue the monitors for them to detect if they're + %% actually down or not + Monitors = [{monitor, process, P} + || P <- suspected_pids_for(Node, State0)], + + Enqs1 = maps:map(fun(P, E) when node(P) =:= Node -> + E#enqueuer{status = up}; + (_, E) -> E + end, Enqs0), + ConsumerUpdateActiveFun = consumer_active_flag_update_function(State0), + %% mark all consumers as up + {State1, Effects1} = + maps:fold(fun({_, P} = ConsumerId, C, {SAcc, EAcc}) + when (node(P) =:= Node) and + (C#consumer.status =/= cancelled) -> + EAcc1 = ConsumerUpdateActiveFun(SAcc, ConsumerId, + C, true, up, EAcc), + {update_or_remove_sub(Meta, ConsumerId, + C#consumer{status = up}, + SAcc), EAcc1}; + (_, _, Acc) -> + Acc + end, {State0, Monitors}, Cons0), + Waiting = update_waiting_consumer_status(Node, State1, up), + State2 = State1#?STATE{enqueuers = Enqs1, + waiting_consumers = Waiting}, + {State, Effects} = activate_next_consumer(State2, Effects1), + checkout(Meta, State0, State, Effects); +apply(_, {nodedown, _Node}, State) -> + {State, ok}; +apply(#{index := Idx} = Meta, #purge_nodes{nodes = Nodes}, State0) -> + {State, Effects} = lists:foldl(fun(Node, {S, E}) -> + purge_node(Meta, Node, S, E) + end, {State0, []}, Nodes), + update_smallest_raft_index(Idx, ok, State, Effects); +apply(#{index := Idx} = Meta, + #update_config{config = #{dead_letter_handler := NewDLH} = Conf}, + #?STATE{cfg = #cfg{dead_letter_handler = OldDLH, + resource = QRes}, + dlx = DlxState0} = State0) -> + {DlxState, Effects0} = rabbit_fifo_dlx:update_config(OldDLH, NewDLH, QRes, DlxState0), + State1 = update_config(Conf, State0#?STATE{dlx = DlxState}), + {State, Reply, Effects} = checkout(Meta, State0, State1, Effects0), + update_smallest_raft_index(Idx, Reply, State, Effects); +apply(_Meta, {machine_version, FromVersion, ToVersion}, V0State) -> + State = convert(FromVersion, ToVersion, V0State), + {State, ok, [{aux, {dlx, setup}}]}; +apply(#{index := IncomingRaftIdx} = Meta, {dlx, _} = Cmd, + #?STATE{cfg = #cfg{dead_letter_handler = DLH}, + dlx = DlxState0} = State0) -> + {DlxState, Effects0} = rabbit_fifo_dlx:apply(Meta, Cmd, DLH, DlxState0), + State1 = State0#?STATE{dlx = DlxState}, + {State, ok, Effects} = checkout(Meta, State0, State1, Effects0), + update_smallest_raft_index(IncomingRaftIdx, State, Effects); +apply(_Meta, Cmd, State) -> + %% handle unhandled commands gracefully + rabbit_log:debug("rabbit_fifo: unhandled command ~W", [Cmd, 10]), + {State, ok, []}. + +convert_msg({RaftIdx, {Header, empty}}) when is_integer(RaftIdx) -> + ?MSG(RaftIdx, Header); +convert_msg({RaftIdx, {Header, _Msg}}) when is_integer(RaftIdx) -> + ?MSG(RaftIdx, Header); +convert_msg({'$empty_msg', Header}) -> + %% dummy index + ?MSG(undefined, Header); +convert_msg({'$prefix_msg', Header}) -> + %% dummy index + ?MSG(undefined, Header); +convert_msg({Header, empty}) -> + convert_msg(Header); +convert_msg(Header) when ?IS_HEADER(Header) -> + ?MSG(undefined, Header). + +convert_consumer_v1_to_v2({ConsumerTag, Pid}, CV1) -> + Meta = element(2, CV1), + CheckedOut = element(3, CV1), + NextMsgId = element(4, CV1), + Credit = element(5, CV1), + DeliveryCount = element(6, CV1), + CreditMode = element(7, CV1), + LifeTime = element(8, CV1), + Status = element(9, CV1), + Priority = element(10, CV1), + #consumer{cfg = #consumer_cfg{tag = ConsumerTag, + pid = Pid, + meta = Meta, + credit_mode = CreditMode, + lifetime = LifeTime, + priority = Priority}, + credit = Credit, + status = Status, + delivery_count = DeliveryCount, + next_msg_id = NextMsgId, + checked_out = maps:map( + fun (_, {Tag, _} = Msg) when is_atom(Tag) -> + convert_msg(Msg); + (_, {_Seq, Msg}) -> + convert_msg(Msg) + end, CheckedOut) + }. + +convert_v1_to_v2(V1State0) -> + V1State = rabbit_fifo_v1:enqueue_all_pending(V1State0), + IndexesV1 = rabbit_fifo_v1:get_field(ra_indexes, V1State), + ReturnsV1 = rabbit_fifo_v1:get_field(returns, V1State), + MessagesV1 = rabbit_fifo_v1:get_field(messages, V1State), + ConsumersV1 = rabbit_fifo_v1:get_field(consumers, V1State), + WaitingConsumersV1 = rabbit_fifo_v1:get_field(waiting_consumers, V1State), + %% remove all raft idx in messages from index + {_, PrefReturns, _, PrefMsgs} = rabbit_fifo_v1:get_field(prefix_msgs, V1State), + V2PrefMsgs = lists:foldl(fun(Hdr, Acc) -> + lqueue:in(convert_msg(Hdr), Acc) + end, lqueue:new(), PrefMsgs), + V2PrefReturns = lists:foldl(fun(Hdr, Acc) -> + lqueue:in(convert_msg(Hdr), Acc) + end, lqueue:new(), PrefReturns), + MessagesV2 = lqueue:fold(fun ({_, Msg}, Acc) -> + lqueue:in(convert_msg(Msg), Acc) + end, V2PrefMsgs, MessagesV1), + ReturnsV2 = lqueue:fold(fun ({_SeqId, Msg}, Acc) -> + lqueue:in(convert_msg(Msg), Acc) + end, V2PrefReturns, ReturnsV1), + ConsumersV2 = maps:map( + fun (ConsumerId, CV1) -> + convert_consumer_v1_to_v2(ConsumerId, CV1) + end, ConsumersV1), + WaitingConsumersV2 = lists:map( + fun ({ConsumerId, CV1}) -> + {ConsumerId, convert_consumer_v1_to_v2(ConsumerId, CV1)} + end, WaitingConsumersV1), + EnqueuersV1 = rabbit_fifo_v1:get_field(enqueuers, V1State), + EnqueuersV2 = maps:map(fun (_EnqPid, Enq) -> + Enq#enqueuer{unused = undefined} + end, EnqueuersV1), + + %% do after state conversion + %% The (old) format of dead_letter_handler in RMQ < v3.10 is: + %% {Module, Function, Args} + %% The (new) format of dead_letter_handler in RMQ >= v3.10 is: + %% undefined | {at_most_once, {Module, Function, Args}} | at_least_once + %% + %% Note that the conversion must convert both from old format to new format + %% as well as from new format to new format. The latter is because quorum queues + %% created in RMQ >= v3.10 are still initialised with rabbit_fifo_v0 as described in + %% https://github.com/rabbitmq/ra/blob/e0d1e6315a45f5d3c19875d66f9d7bfaf83a46e3/src/ra_machine.erl#L258-L265 + DLH = case rabbit_fifo_v1:get_cfg_field(dead_letter_handler, V1State) of + {_M, _F, _A = [_DLX = undefined|_]} -> + %% queue was declared in RMQ < v3.10 and no DLX configured + undefined; + {_M, _F, _A} = MFA -> + %% queue was declared in RMQ < v3.10 and DLX configured + {at_most_once, MFA}; + Other -> + Other + end, + + Cfg = #cfg{name = rabbit_fifo_v1:get_cfg_field(name, V1State), + resource = rabbit_fifo_v1:get_cfg_field(resource, V1State), + release_cursor_interval = rabbit_fifo_v1:get_cfg_field(release_cursor_interval, V1State), + dead_letter_handler = DLH, + become_leader_handler = rabbit_fifo_v1:get_cfg_field(become_leader_handler, V1State), + %% TODO: what if policy enabling reject_publish was applied before conversion? + overflow_strategy = rabbit_fifo_v1:get_cfg_field(overflow_strategy, V1State), + max_length = rabbit_fifo_v1:get_cfg_field(max_length, V1State), + max_bytes = rabbit_fifo_v1:get_cfg_field(max_bytes, V1State), + consumer_strategy = rabbit_fifo_v1:get_cfg_field(consumer_strategy, V1State), + delivery_limit = rabbit_fifo_v1:get_cfg_field(delivery_limit, V1State), + expires = rabbit_fifo_v1:get_cfg_field(expires, V1State) + }, + + MessagesConsumersV2 = maps:fold(fun(_ConsumerId, #consumer{checked_out = Checked}, Acc) -> + Acc + maps:size(Checked) + end, 0, ConsumersV2), + MessagesWaitingConsumersV2 = lists:foldl(fun({_ConsumerId, #consumer{checked_out = Checked}}, Acc) -> + Acc + maps:size(Checked) + end, 0, WaitingConsumersV2), + MessagesTotal = lqueue:len(MessagesV2) + + lqueue:len(ReturnsV2) + + MessagesConsumersV2 + + MessagesWaitingConsumersV2, + + #?STATE{cfg = Cfg, + messages = MessagesV2, + messages_total = MessagesTotal, + returns = ReturnsV2, + enqueue_count = rabbit_fifo_v1:get_field(enqueue_count, V1State), + enqueuers = EnqueuersV2, + ra_indexes = IndexesV1, + release_cursors = rabbit_fifo_v1:get_field(release_cursors, V1State), + consumers = ConsumersV2, + service_queue = rabbit_fifo_v1:get_field(service_queue, V1State), + msg_bytes_enqueue = rabbit_fifo_v1:get_field(msg_bytes_enqueue, V1State), + msg_bytes_checkout = rabbit_fifo_v1:get_field(msg_bytes_checkout, V1State), + waiting_consumers = WaitingConsumersV2, + last_active = rabbit_fifo_v1:get_field(last_active, V1State) + }. + +convert_v2_to_v3(#rabbit_fifo{consumers = ConsumersV2} = StateV2) -> + ConsumersV3 = maps:map(fun(_, C) -> + convert_consumer_v2_to_v3(C) + end, ConsumersV2), + StateV2#rabbit_fifo{consumers = ConsumersV3}. + +get_field(Field, State) -> + Fields = record_info(fields, ?STATE), + Index = record_index_of(Field, Fields), + element(Index, State). + +record_index_of(F, Fields) -> + index_of(2, F, Fields). + +index_of(_, F, []) -> + exit({field_not_found, F}); +index_of(N, F, [F | _]) -> + N; +index_of(N, F, [_ | T]) -> + index_of(N+1, F, T). + +convert_consumer_v2_to_v3(C = #consumer{cfg = Cfg = #consumer_cfg{credit_mode = simple_prefetch, + meta = #{prefetch := Prefetch}}}) -> + C#consumer{cfg = Cfg#consumer_cfg{credit_mode = {simple_prefetch, Prefetch}}}; +convert_consumer_v2_to_v3(C) -> + C. + +purge_node(Meta, Node, State, Effects) -> + lists:foldl(fun(Pid, {S0, E0}) -> + {S, E} = handle_down(Meta, Pid, S0), + {S, E0 ++ E} + end, {State, Effects}, all_pids_for(Node, State)). + +%% any downs that re not noconnection +handle_down(Meta, Pid, #?STATE{consumers = Cons0, + enqueuers = Enqs0} = State0) -> + % Remove any enqueuer for the down pid + State1 = State0#?STATE{enqueuers = maps:remove(Pid, Enqs0)}, + {Effects1, State2} = handle_waiting_consumer_down(Pid, State1), + % return checked out messages to main queue + % Find the consumers for the down pid + DownConsumers = maps:keys( + maps:filter(fun({_, P}, _) -> P =:= Pid end, Cons0)), + lists:foldl(fun(ConsumerId, {S, E}) -> + cancel_consumer(Meta, ConsumerId, S, E, down) + end, {State2, Effects1}, DownConsumers). + +consumer_active_flag_update_function( + #?STATE{cfg = #cfg{consumer_strategy = competing}}) -> + fun(State, ConsumerId, Consumer, Active, ActivityStatus, Effects) -> + consumer_update_active_effects(State, ConsumerId, Consumer, Active, + ActivityStatus, Effects) + end; +consumer_active_flag_update_function( + #?STATE{cfg = #cfg{consumer_strategy = single_active}}) -> + fun(_, _, _, _, _, Effects) -> + Effects + end. + +handle_waiting_consumer_down(_Pid, + #?STATE{cfg = #cfg{consumer_strategy = competing}} = State) -> + {[], State}; +handle_waiting_consumer_down(_Pid, + #?STATE{cfg = #cfg{consumer_strategy = single_active}, + waiting_consumers = []} = State) -> + {[], State}; +handle_waiting_consumer_down(Pid, + #?STATE{cfg = #cfg{consumer_strategy = single_active}, + waiting_consumers = WaitingConsumers0} = State0) -> + % get cancel effects for down waiting consumers + Down = lists:filter(fun({{_, P}, _}) -> P =:= Pid end, + WaitingConsumers0), + Effects = lists:foldl(fun ({ConsumerId, _}, Effects) -> + cancel_consumer_effects(ConsumerId, State0, + Effects) + end, [], Down), + % update state to have only up waiting consumers + StillUp = lists:filter(fun({{_, P}, _}) -> P =/= Pid end, + WaitingConsumers0), + State = State0#?STATE{waiting_consumers = StillUp}, + {Effects, State}. + +update_waiting_consumer_status(Node, + #?STATE{waiting_consumers = WaitingConsumers}, + Status) -> + [begin + case node(Pid) of + Node -> + {ConsumerId, Consumer#consumer{status = Status}}; + _ -> + {ConsumerId, Consumer} + end + end || {{_, Pid} = ConsumerId, Consumer} <- WaitingConsumers, + Consumer#consumer.status =/= cancelled]. + +-spec state_enter(ra_server:ra_state() | eol, state()) -> + ra_machine:effects(). +state_enter(RaState, #?STATE{cfg = #cfg{dead_letter_handler = DLH, + resource = QRes}, + dlx = DlxState} = State) -> + Effects = rabbit_fifo_dlx:state_enter(RaState, QRes, DLH, DlxState), + state_enter0(RaState, State, Effects). + +state_enter0(leader, #?STATE{consumers = Cons, + enqueuers = Enqs, + waiting_consumers = WaitingConsumers, + cfg = #cfg{name = Name, + resource = Resource, + become_leader_handler = BLH} + } = State, + Effects0) -> + TimerEffs = timer_effect(erlang:system_time(millisecond), State, Effects0), + % return effects to monitor all current consumers and enqueuers + Pids = lists:usort(maps:keys(Enqs) + ++ [P || {_, P} <- maps:keys(Cons)] + ++ [P || {{_, P}, _} <- WaitingConsumers]), + Mons = [{monitor, process, P} || P <- Pids], + Nots = [{send_msg, P, leader_change, ra_event} || P <- Pids], + NodeMons = lists:usort([{monitor, node, node(P)} || P <- Pids]), + FHReservation = [{mod_call, rabbit_quorum_queue, + file_handle_leader_reservation, [Resource]}], + NotifyDecs = notify_decorators_startup(Resource), + Effects = TimerEffs ++ Mons ++ Nots ++ NodeMons ++ FHReservation ++ [NotifyDecs], + case BLH of + undefined -> + Effects; + {Mod, Fun, Args} -> + [{mod_call, Mod, Fun, Args ++ [Name]} | Effects] + end; +state_enter0(eol, #?STATE{enqueuers = Enqs, + consumers = Custs0, + waiting_consumers = WaitingConsumers0}, + Effects) -> + Custs = maps:fold(fun({_, P}, V, S) -> S#{P => V} end, #{}, Custs0), + WaitingConsumers1 = lists:foldl(fun({{_, P}, V}, Acc) -> Acc#{P => V} end, + #{}, WaitingConsumers0), + AllConsumers = maps:merge(Custs, WaitingConsumers1), + [{send_msg, P, eol, ra_event} + || P <- maps:keys(maps:merge(Enqs, AllConsumers))] ++ + [{aux, eol}, + {mod_call, rabbit_quorum_queue, file_handle_release_reservation, []} | Effects]; +state_enter0(State, #?STATE{cfg = #cfg{resource = _Resource}}, Effects) + when State =/= leader -> + FHReservation = {mod_call, rabbit_quorum_queue, file_handle_other_reservation, []}, + [FHReservation | Effects]; +state_enter0(_, _, Effects) -> + %% catch all as not handling all states + Effects. + +-spec tick(non_neg_integer(), state()) -> ra_machine:effects(). +tick(Ts, #?STATE{cfg = #cfg{name = _Name, + resource = QName}} = State) -> + case is_expired(Ts, State) of + true -> + [{mod_call, rabbit_quorum_queue, spawn_deleter, [QName]}]; + false -> + [{aux, {handle_tick, [QName, overview(State), all_nodes(State)]}}] + end. + +-spec overview(state()) -> map(). +overview(#?STATE{consumers = Cons, + enqueuers = Enqs, + release_cursors = Cursors, + enqueue_count = EnqCount, + msg_bytes_enqueue = EnqueueBytes, + msg_bytes_checkout = CheckoutBytes, + cfg = Cfg, + dlx = DlxState, + waiting_consumers = WaitingConsumers} = State) -> + Conf = #{name => Cfg#cfg.name, + resource => Cfg#cfg.resource, + release_cursor_interval => Cfg#cfg.release_cursor_interval, + dead_lettering_enabled => undefined =/= Cfg#cfg.dead_letter_handler, + max_length => Cfg#cfg.max_length, + max_bytes => Cfg#cfg.max_bytes, + consumer_strategy => Cfg#cfg.consumer_strategy, + expires => Cfg#cfg.expires, + msg_ttl => Cfg#cfg.msg_ttl, + delivery_limit => Cfg#cfg.delivery_limit + }, + SacOverview = case active_consumer(Cons) of + {SacConsumerId, _} -> + NumWaiting = length(WaitingConsumers), + #{single_active_consumer_id => SacConsumerId, + single_active_num_waiting_consumers => NumWaiting}; + _ -> + #{} + end, + Overview = #{type => ?STATE, + config => Conf, + num_consumers => map_size(Cons), + num_active_consumers => query_consumer_count(State), + num_checked_out => num_checked_out(State), + num_enqueuers => maps:size(Enqs), + num_ready_messages => messages_ready(State), + num_in_memory_ready_messages => 0, %% backwards compat + num_messages => messages_total(State), + num_release_cursors => lqueue:len(Cursors), + release_cursors => [I || {_, I, _} <- lqueue:to_list(Cursors)], + release_cursor_enqueue_counter => EnqCount, + enqueue_message_bytes => EnqueueBytes, + checkout_message_bytes => CheckoutBytes, + in_memory_message_bytes => 0, %% backwards compat + smallest_raft_index => smallest_raft_index(State) + }, + DlxOverview = rabbit_fifo_dlx:overview(DlxState), + maps:merge(maps:merge(Overview, DlxOverview), SacOverview). + +-spec get_checked_out(consumer_id(), msg_id(), msg_id(), state()) -> + [delivery_msg()]. +get_checked_out(Cid, From, To, #?STATE{consumers = Consumers}) -> + case Consumers of + #{Cid := #consumer{checked_out = Checked}} -> + [begin + ?MSG(I, H) = maps:get(K, Checked), + {K, {I, H}} + end || K <- lists:seq(From, To), maps:is_key(K, Checked)]; + _ -> + [] + end. + +-spec version() -> pos_integer(). +version() -> 3. + +which_module(0) -> rabbit_fifo_v0; +which_module(1) -> rabbit_fifo_v1; +which_module(2) -> ?STATE; +which_module(3) -> ?STATE. + +-define(AUX, aux_v2). + +-record(aux_gc, {last_raft_idx = 0 :: ra:index()}). +-record(aux, {name :: atom(), + capacity :: term(), + gc = #aux_gc{} :: #aux_gc{}}). +-record(?AUX, {name :: atom(), + last_decorators_state :: term(), + capacity :: term(), + gc = #aux_gc{} :: #aux_gc{}, + tick_pid, + cache = #{} :: map()}). + +init_aux(Name) when is_atom(Name) -> + %% TODO: catch specific exception throw if table already exists + ok = ra_machine_ets:create_table(rabbit_fifo_usage, + [named_table, set, public, + {write_concurrency, true}]), + Now = erlang:monotonic_time(micro_seconds), + #?AUX{name = Name, + capacity = {inactive, Now, 1, 1.0}}. + +handle_aux(RaftState, Tag, Cmd, #aux{name = Name, + capacity = Cap, + gc = Gc}, Log, MacState) -> + %% convert aux state to new version + Aux = #?AUX{name = Name, + capacity = Cap, + gc = Gc}, + handle_aux(RaftState, Tag, Cmd, Aux, Log, MacState); +handle_aux(leader, _, garbage_collection, Aux, Log, MacState) -> + {no_reply, force_eval_gc(Log, MacState, Aux), Log}; +handle_aux(follower, _, garbage_collection, Aux, Log, MacState) -> + {no_reply, force_eval_gc(Log, MacState, Aux), Log}; +handle_aux(_RaftState, cast, {#return{msg_ids = MsgIds, + consumer_id = ConsumerId}, Corr, Pid}, + Aux0, Log0, #?STATE{cfg = #cfg{delivery_limit = undefined}, + consumers = Consumers}) -> + case Consumers of + #{ConsumerId := #consumer{checked_out = Checked}} -> + {Log, ToReturn} = + maps:fold( + fun (MsgId, ?MSG(Idx, Header), {L0, Acc}) -> + %% it is possible this is not found if the consumer + %% crashed and the message got removed + case ra_log:fetch(Idx, L0) of + {{_, _, {_, _, Cmd, _}}, L} -> + Msg = get_msg(Cmd), + {L, [{MsgId, Idx, Header, Msg} | Acc]}; + {undefined, L} -> + {L, Acc} + end + end, {Log0, []}, maps:with(MsgIds, Checked)), + + Appends = make_requeue(ConsumerId, {notify, Corr, Pid}, + lists:sort(ToReturn), []), + {no_reply, Aux0, Log, Appends}; + _ -> + {no_reply, Aux0, Log0} + end; +handle_aux(leader, _, {handle_tick, [QName, Overview, Nodes]}, + #?AUX{tick_pid = Pid} = Aux, Log, _) -> + NewPid = + case process_is_alive(Pid) of + false -> + %% No active TICK pid + %% this function spawns and returns the tick process pid + rabbit_quorum_queue:handle_tick(QName, Overview, Nodes); + true -> + %% Active TICK pid, do nothing + Pid + end, + {no_reply, Aux#?AUX{tick_pid = NewPid}, Log}; +handle_aux(_, _, {get_checked_out, ConsumerId, MsgIds}, + Aux0, Log0, #?STATE{cfg = #cfg{}, + consumers = Consumers}) -> + case Consumers of + #{ConsumerId := #consumer{checked_out = Checked}} -> + {Log, IdMsgs} = + maps:fold( + fun (MsgId, ?MSG(Idx, Header), {L0, Acc}) -> + %% it is possible this is not found if the consumer + %% crashed and the message got removed + case ra_log:fetch(Idx, L0) of + {{_, _, {_, _, Cmd, _}}, L} -> + Msg = get_msg(Cmd), + {L, [{MsgId, {Header, Msg}} | Acc]}; + {undefined, L} -> + {L, Acc} + end + end, {Log0, []}, maps:with(MsgIds, Checked)), + {reply, {ok, IdMsgs}, Aux0, Log}; + _ -> + {reply, {error, consumer_not_found}, Aux0, Log0} + end; +handle_aux(leader, cast, {#return{} = Ret, Corr, Pid}, + Aux0, Log, #?STATE{}) -> + %% for returns with a delivery limit set we can just return as before + {no_reply, Aux0, Log, [{append, Ret, {notify, Corr, Pid}}]}; +handle_aux(leader, cast, eval, #?AUX{last_decorators_state = LastDec} = Aux0, + Log, #?STATE{cfg = #cfg{resource = QName}} = MacState) -> + %% this is called after each batch of commands have been applied + %% set timer for message expire + %% should really be the last applied index ts but this will have to do + Ts = erlang:system_time(millisecond), + Effects0 = timer_effect(Ts, MacState, []), + case query_notify_decorators_info(MacState) of + LastDec -> + {no_reply, Aux0, Log, Effects0}; + {MaxActivePriority, IsEmpty} = NewLast -> + Effects = [notify_decorators_effect(QName, MaxActivePriority, IsEmpty) + | Effects0], + {no_reply, Aux0#?AUX{last_decorators_state = NewLast}, Log, Effects} + end; +handle_aux(_RaftState, cast, eval, Aux0, Log, _MacState) -> + {no_reply, Aux0, Log}; +handle_aux(_RaState, cast, Cmd, #?AUX{capacity = Use0} = Aux0, + Log, _MacState) + when Cmd == active orelse Cmd == inactive -> + {no_reply, Aux0#?AUX{capacity = update_use(Use0, Cmd)}, Log}; +handle_aux(_RaState, cast, tick, #?AUX{name = Name, + capacity = Use0} = State0, + Log, MacState) -> + true = ets:insert(rabbit_fifo_usage, + {Name, capacity(Use0)}), + Aux = eval_gc(Log, MacState, State0), + {no_reply, Aux, Log}; +handle_aux(_RaState, cast, eol, #?AUX{name = Name} = Aux, Log, _) -> + ets:delete(rabbit_fifo_usage, Name), + {no_reply, Aux, Log}; +handle_aux(_RaState, {call, _From}, oldest_entry_timestamp, + #?AUX{cache = Cache} = Aux0, + Log0, #?STATE{} = State) -> + {CachedIdx, CachedTs} = maps:get(oldest_entry, Cache, {undefined, undefined}), + case smallest_raft_index(State) of + %% if there are no entries, we return current timestamp + %% so that any previously obtained entries are considered + %% older than this + undefined -> + Aux1 = Aux0#?AUX{cache = maps:remove(oldest_entry, Cache)}, + {reply, {ok, erlang:system_time(millisecond)}, Aux1, Log0}; + CachedIdx -> + %% cache hit + {reply, {ok, CachedTs}, Aux0, Log0}; + Idx when is_integer(Idx) -> + case ra_log:fetch(Idx, Log0) of + {{_, _, {_, #{ts := Timestamp}, _, _}}, Log1} -> + Aux1 = Aux0#?AUX{cache = Cache#{oldest_entry => + {Idx, Timestamp}}}, + {reply, {ok, Timestamp}, Aux1, Log1}; + {undefined, Log1} -> + %% fetch failed + {reply, {error, failed_to_get_timestamp}, Aux0, Log1} + end + end; +handle_aux(_RaState, {call, _From}, {peek, Pos}, Aux0, + Log0, MacState) -> + case query_peek(Pos, MacState) of + {ok, ?MSG(Idx, Header)} -> + %% need to re-hydrate from the log + {{_, _, {_, _, Cmd, _}}, Log} = ra_log:fetch(Idx, Log0), + Msg = get_msg(Cmd), + {reply, {ok, {Header, Msg}}, Aux0, Log}; + Err -> + {reply, Err, Aux0, Log0} + end; +handle_aux(RaState, _, {dlx, _} = Cmd, Aux0, Log, + #?STATE{dlx = DlxState, + cfg = #cfg{dead_letter_handler = DLH, + resource = QRes}}) -> + Aux = rabbit_fifo_dlx:handle_aux(RaState, Cmd, Aux0, QRes, DLH, DlxState), + {no_reply, Aux, Log}. + +eval_gc(Log, #?STATE{cfg = #cfg{resource = QR}} = MacState, + #?AUX{gc = #aux_gc{last_raft_idx = LastGcIdx} = Gc} = AuxState) -> + {Idx, _} = ra_log:last_index_term(Log), + {memory, Mem} = erlang:process_info(self(), memory), + case messages_total(MacState) of + 0 when Idx > LastGcIdx andalso + Mem > ?GC_MEM_LIMIT_B -> + garbage_collect(), + {memory, MemAfter} = erlang:process_info(self(), memory), + rabbit_log:debug("~ts: full GC sweep complete. " + "Process memory changed from ~.2fMB to ~.2fMB.", + [rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]), + AuxState#?AUX{gc = Gc#aux_gc{last_raft_idx = Idx}}; + _ -> + AuxState + end. + +force_eval_gc(Log, #?STATE{cfg = #cfg{resource = QR}}, + #?AUX{gc = #aux_gc{last_raft_idx = LastGcIdx} = Gc} = AuxState) -> + {Idx, _} = ra_log:last_index_term(Log), + {memory, Mem} = erlang:process_info(self(), memory), + case Idx > LastGcIdx of + true -> + garbage_collect(), + {memory, MemAfter} = erlang:process_info(self(), memory), + rabbit_log:debug("~ts: full GC sweep complete. " + "Process memory changed from ~.2fMB to ~.2fMB.", + [rabbit_misc:rs(QR), Mem/?MB, MemAfter/?MB]), + AuxState#?AUX{gc = Gc#aux_gc{last_raft_idx = Idx}}; + false -> + AuxState + end. + +process_is_alive(Pid) when is_pid(Pid) -> + is_process_alive(Pid); +process_is_alive(_) -> + false. +%%% Queries + +query_messages_ready(State) -> + messages_ready(State). + +query_messages_checked_out(#?STATE{consumers = Consumers}) -> + maps:fold(fun (_, #consumer{checked_out = C}, S) -> + maps:size(C) + S + end, 0, Consumers). + +query_messages_total(State) -> + messages_total(State). + +query_processes(#?STATE{enqueuers = Enqs, consumers = Cons0}) -> + Cons = maps:fold(fun({_, P}, V, S) -> S#{P => V} end, #{}, Cons0), + maps:keys(maps:merge(Enqs, Cons)). + + +query_ra_indexes(#?STATE{ra_indexes = RaIndexes}) -> + RaIndexes. + +query_waiting_consumers(#?STATE{waiting_consumers = WaitingConsumers}) -> + WaitingConsumers. + +query_consumer_count(#?STATE{consumers = Consumers, + waiting_consumers = WaitingConsumers}) -> + Up = maps:filter(fun(_ConsumerId, #consumer{status = Status}) -> + Status =/= suspected_down + end, Consumers), + maps:size(Up) + length(WaitingConsumers). + +query_consumers(#?STATE{consumers = Consumers, + waiting_consumers = WaitingConsumers, + cfg = #cfg{consumer_strategy = ConsumerStrategy}} = State) -> + ActiveActivityStatusFun = + case ConsumerStrategy of + competing -> + fun(_ConsumerId, + #consumer{status = Status}) -> + case Status of + suspected_down -> + {false, Status}; + _ -> + {true, Status} + end + end; + single_active -> + SingleActiveConsumer = query_single_active_consumer(State), + fun({Tag, Pid} = _Consumer, _) -> + case SingleActiveConsumer of + {value, {Tag, Pid}} -> + {true, single_active}; + _ -> + {false, waiting} + end + end + end, + FromConsumers = + maps:fold(fun (_, #consumer{status = cancelled}, Acc) -> + Acc; + ({Tag, Pid}, + #consumer{cfg = #consumer_cfg{meta = Meta}} = Consumer, + Acc) -> + {Active, ActivityStatus} = + ActiveActivityStatusFun({Tag, Pid}, Consumer), + maps:put({Tag, Pid}, + {Pid, Tag, + maps:get(ack, Meta, undefined), + maps:get(prefetch, Meta, undefined), + Active, + ActivityStatus, + maps:get(args, Meta, []), + maps:get(username, Meta, undefined)}, + Acc) + end, #{}, Consumers), + FromWaitingConsumers = + lists:foldl(fun ({_, #consumer{status = cancelled}}, Acc) -> + Acc; + ({{Tag, Pid}, + #consumer{cfg = #consumer_cfg{meta = Meta}} = Consumer}, + Acc) -> + {Active, ActivityStatus} = + ActiveActivityStatusFun({Tag, Pid}, Consumer), + maps:put({Tag, Pid}, + {Pid, Tag, + maps:get(ack, Meta, undefined), + maps:get(prefetch, Meta, undefined), + Active, + ActivityStatus, + maps:get(args, Meta, []), + maps:get(username, Meta, undefined)}, + Acc) + end, #{}, WaitingConsumers), + maps:merge(FromConsumers, FromWaitingConsumers). + + +query_single_active_consumer( + #?STATE{cfg = #cfg{consumer_strategy = single_active}, + consumers = Consumers}) -> + case active_consumer(Consumers) of + undefined -> + {error, no_value}; + {ActiveCid, _} -> + {value, ActiveCid} + end; +query_single_active_consumer(_) -> + disabled. + +query_stat(#?STATE{consumers = Consumers} = State) -> + {messages_ready(State), maps:size(Consumers)}. + +query_in_memory_usage(#?STATE{ }) -> + {0, 0}. + +query_stat_dlx(#?STATE{dlx = DlxState}) -> + rabbit_fifo_dlx:stat(DlxState). + +query_peek(Pos, State0) when Pos > 0 -> + case take_next_msg(State0) of + empty -> + {error, no_message_at_pos}; + {Msg, _State} + when Pos == 1 -> + {ok, Msg}; + {_Msg, State} -> + query_peek(Pos-1, State) + end. + +query_notify_decorators_info(#?STATE{consumers = Consumers} = State) -> + MaxActivePriority = maps:fold( + fun(_, #consumer{credit = C, + status = up, + cfg = #consumer_cfg{priority = P}}, + MaxP) when C > 0 -> + case MaxP of + empty -> P; + MaxP when MaxP > P -> MaxP; + _ -> P + end; + (_, _, MaxP) -> + MaxP + end, empty, Consumers), + IsEmpty = (messages_ready(State) == 0), + {MaxActivePriority, IsEmpty}. + +-spec usage(atom()) -> float(). +usage(Name) when is_atom(Name) -> + case ets:lookup(rabbit_fifo_usage, Name) of + [] -> 0.0; + [{_, Use}] -> Use + end. + +%%% Internal + +messages_ready(#?STATE{messages = M, + returns = R}) -> + lqueue:len(M) + lqueue:len(R). + +messages_total(#?STATE{messages_total = Total, + dlx = DlxState}) -> + {DlxTotal, _} = rabbit_fifo_dlx:stat(DlxState), + Total + DlxTotal. + +update_use({inactive, _, _, _} = CUInfo, inactive) -> + CUInfo; +update_use({active, _, _} = CUInfo, active) -> + CUInfo; +update_use({active, Since, Avg}, inactive) -> + Now = erlang:monotonic_time(micro_seconds), + {inactive, Now, Now - Since, Avg}; +update_use({inactive, Since, Active, Avg}, active) -> + Now = erlang:monotonic_time(micro_seconds), + {active, Now, use_avg(Active, Now - Since, Avg)}. + +capacity({active, Since, Avg}) -> + use_avg(erlang:monotonic_time(micro_seconds) - Since, 0, Avg); +capacity({inactive, _, 1, 1.0}) -> + 1.0; +capacity({inactive, Since, Active, Avg}) -> + use_avg(Active, erlang:monotonic_time(micro_seconds) - Since, Avg). + +use_avg(0, 0, Avg) -> + Avg; +use_avg(Active, Inactive, Avg) -> + Time = Inactive + Active, + moving_average(Time, ?USE_AVG_HALF_LIFE, Active / Time, Avg). + +moving_average(_Time, _, Next, undefined) -> + Next; +moving_average(Time, HalfLife, Next, Current) -> + Weight = math:exp(Time * math:log(0.5) / HalfLife), + Next * (1 - Weight) + Current * Weight. + +num_checked_out(#?STATE{consumers = Cons}) -> + maps:fold(fun (_, #consumer{checked_out = C}, Acc) -> + maps:size(C) + Acc + end, 0, Cons). + +cancel_consumer(Meta, ConsumerId, + #?STATE{cfg = #cfg{consumer_strategy = competing}} = State, + Effects, Reason) -> + cancel_consumer0(Meta, ConsumerId, State, Effects, Reason); +cancel_consumer(Meta, ConsumerId, + #?STATE{cfg = #cfg{consumer_strategy = single_active}, + waiting_consumers = []} = State, + Effects, Reason) -> + %% single active consumer on, no consumers are waiting + cancel_consumer0(Meta, ConsumerId, State, Effects, Reason); +cancel_consumer(Meta, ConsumerId, + #?STATE{consumers = Cons0, + cfg = #cfg{consumer_strategy = single_active}, + waiting_consumers = Waiting0} = State0, + Effects0, Reason) -> + %% single active consumer on, consumers are waiting + case Cons0 of + #{ConsumerId := #consumer{status = _}} -> + % The active consumer is to be removed + {State1, Effects1} = cancel_consumer0(Meta, ConsumerId, State0, + Effects0, Reason), + activate_next_consumer(State1, Effects1); + _ -> + % The cancelled consumer is not active or cancelled + % Just remove it from idle_consumers + Waiting = lists:keydelete(ConsumerId, 1, Waiting0), + Effects = cancel_consumer_effects(ConsumerId, State0, Effects0), + % A waiting consumer isn't supposed to have any checked out messages, + % so nothing special to do here + {State0#?STATE{waiting_consumers = Waiting}, Effects} + end. + +consumer_update_active_effects(#?STATE{cfg = #cfg{resource = QName}}, + ConsumerId, + #consumer{cfg = #consumer_cfg{meta = Meta}}, + Active, ActivityStatus, + Effects) -> + Ack = maps:get(ack, Meta, undefined), + Prefetch = maps:get(prefetch, Meta, undefined), + Args = maps:get(args, Meta, []), + [{mod_call, rabbit_quorum_queue, update_consumer_handler, + [QName, ConsumerId, false, Ack, Prefetch, Active, ActivityStatus, Args]} + | Effects]. + +cancel_consumer0(Meta, ConsumerId, + #?STATE{consumers = C0} = S0, Effects0, Reason) -> + case C0 of + #{ConsumerId := Consumer} -> + {S, Effects2} = maybe_return_all(Meta, ConsumerId, Consumer, + S0, Effects0, Reason), + + %% The effects are emitted before the consumer is actually removed + %% if the consumer has unacked messages. This is a bit weird but + %% in line with what classic queues do (from an external point of + %% view) + Effects = cancel_consumer_effects(ConsumerId, S, Effects2), + {S, Effects}; + _ -> + %% already removed: do nothing + {S0, Effects0} + end. + +activate_next_consumer(#?STATE{cfg = #cfg{consumer_strategy = competing}} = State0, + Effects0) -> + {State0, Effects0}; +activate_next_consumer(#?STATE{consumers = Cons, + waiting_consumers = Waiting0} = State0, + Effects0) -> + case has_active_consumer(Cons) of + false -> + case lists:filter(fun ({_, #consumer{status = Status}}) -> + Status == up + end, Waiting0) of + [{NextConsumerId, #consumer{cfg = NextCCfg} = NextConsumer} | _] -> + Remaining = lists:keydelete(NextConsumerId, 1, Waiting0), + Consumer = case maps:get(NextConsumerId, Cons, undefined) of + undefined -> + NextConsumer; + Existing -> + %% there was an exisiting non-active consumer + %% just update the existing cancelled consumer + %% with the new config + Existing#consumer{cfg = NextCCfg} + end, + #?STATE{service_queue = ServiceQueue} = State0, + ServiceQueue1 = maybe_queue_consumer(NextConsumerId, + Consumer, + ServiceQueue), + State = State0#?STATE{consumers = Cons#{NextConsumerId => Consumer}, + service_queue = ServiceQueue1, + waiting_consumers = Remaining}, + Effects = consumer_update_active_effects(State, NextConsumerId, + Consumer, true, + single_active, Effects0), + {State, Effects}; + [] -> + {State0, Effects0} + end; + true -> + {State0, Effects0} + end. + +has_active_consumer(Consumers) -> + active_consumer(Consumers) /= undefined. + +active_consumer({Cid, #consumer{status = up} = Consumer, _I}) -> + {Cid, Consumer}; +active_consumer({_Cid, #consumer{status = _}, I}) -> + active_consumer(maps:next(I)); +active_consumer(none) -> + undefined; +active_consumer(M) when is_map(M) -> + I = maps:iterator(M), + active_consumer(maps:next(I)). + +maybe_return_all(#{system_time := Ts} = Meta, ConsumerId, + #consumer{cfg = CCfg} = Consumer, S0, + Effects0, Reason) -> + case Reason of + consumer_cancel -> + {update_or_remove_sub( + Meta, ConsumerId, + Consumer#consumer{cfg = CCfg#consumer_cfg{lifetime = once}, + credit = 0, + status = cancelled}, + S0), Effects0}; + down -> + {S1, Effects1} = return_all(Meta, S0, Effects0, ConsumerId, Consumer), + {S1#?STATE{consumers = maps:remove(ConsumerId, S1#?STATE.consumers), + last_active = Ts}, + Effects1} + end. + +apply_enqueue(#{index := RaftIdx, + system_time := Ts} = Meta, From, Seq, RawMsg, State0) -> + case maybe_enqueue(RaftIdx, Ts, From, Seq, RawMsg, [], State0) of + {ok, State1, Effects1} -> + {State, ok, Effects} = checkout(Meta, State0, State1, Effects1), + {maybe_store_release_cursor(RaftIdx, State), ok, Effects}; + {out_of_sequence, State, Effects} -> + {State, not_enqueued, Effects}; + {duplicate, State, Effects} -> + {State, ok, Effects} + end. + +decr_total(#?STATE{messages_total = Tot} = State) -> + State#?STATE{messages_total = Tot - 1}. + +drop_head(#?STATE{ra_indexes = Indexes0} = State0, Effects) -> + case take_next_msg(State0) of + {?MSG(Idx, Header) = Msg, State1} -> + Indexes = rabbit_fifo_index:delete(Idx, Indexes0), + State2 = State1#?STATE{ra_indexes = Indexes}, + State3 = decr_total(add_bytes_drop(Header, State2)), + #?STATE{cfg = #cfg{dead_letter_handler = DLH}, + dlx = DlxState} = State = State3, + {_, DlxEffects} = rabbit_fifo_dlx:discard([Msg], maxlen, DLH, DlxState), + {State, DlxEffects ++ Effects}; + empty -> + {State0, Effects} + end. + +maybe_set_msg_ttl(#basic_message{content = #content{properties = none}}, + RaCmdTs, Header, + #?STATE{cfg = #cfg{msg_ttl = PerQueueMsgTTL}}) -> + update_expiry_header(RaCmdTs, PerQueueMsgTTL, Header); +maybe_set_msg_ttl(#basic_message{content = #content{properties = Props}}, + RaCmdTs, Header, + #?STATE{cfg = #cfg{msg_ttl = PerQueueMsgTTL}}) -> + %% rabbit_quorum_queue will leave the properties decoded if and only if + %% per message message TTL is set. + %% We already check in the channel that expiration must be valid. + {ok, PerMsgMsgTTL} = rabbit_basic:parse_expiration(Props), + TTL = min(PerMsgMsgTTL, PerQueueMsgTTL), + update_expiry_header(RaCmdTs, TTL, Header); +maybe_set_msg_ttl(Msg, RaCmdTs, Header, + #?STATE{cfg = #cfg{msg_ttl = MsgTTL}}) -> + case mc:is(Msg) of + true -> + TTL = min(MsgTTL, mc:ttl(Msg)), + update_expiry_header(RaCmdTs, TTL, Header); + false -> + Header + end. + +update_expiry_header(_, undefined, Header) -> + Header; +update_expiry_header(RaCmdTs, 0, Header) -> + %% We do not comply exactly with the "TTL=0 models AMQP immediate flag" semantics + %% as done for classic queues where the message is discarded if it cannot be + %% consumed immediately. + %% Instead, we discard the message if it cannot be consumed within the same millisecond + %% when it got enqueued. This behaviour should be good enough. + update_expiry_header(RaCmdTs + 1, Header); +update_expiry_header(RaCmdTs, TTL, Header) -> + update_expiry_header(RaCmdTs + TTL, Header). + +update_expiry_header(ExpiryTs, Header) -> + update_header(expiry, fun(Ts) -> Ts end, ExpiryTs, Header). + +maybe_store_release_cursor(RaftIdx, + #?STATE{cfg = #cfg{release_cursor_interval = {Base, C}} = Cfg, + enqueue_count = EC, + release_cursors = Cursors0} = State0) + when EC >= C -> + case messages_total(State0) of + 0 -> + %% message must have been immediately dropped + State0#?STATE{enqueue_count = 0}; + Total -> + Interval = case Base of + 0 -> 0; + _ -> + min(max(Total, Base), ?RELEASE_CURSOR_EVERY_MAX) + end, + State = State0#?STATE{cfg = Cfg#cfg{release_cursor_interval = + {Base, Interval}}}, + Dehydrated = dehydrate_state(State), + Cursor = {release_cursor, RaftIdx, Dehydrated}, + Cursors = lqueue:in(Cursor, Cursors0), + State#?STATE{enqueue_count = 0, + release_cursors = Cursors} + end; +maybe_store_release_cursor(_RaftIdx, State) -> + State. + +maybe_enqueue(RaftIdx, Ts, undefined, undefined, RawMsg, Effects, + #?STATE{msg_bytes_enqueue = Enqueue, + enqueue_count = EnqCount, + messages = Messages, + messages_total = Total} = State0) -> + % direct enqueue without tracking + Size = message_size(RawMsg), + Header = maybe_set_msg_ttl(RawMsg, Ts, Size, State0), + Msg = ?MSG(RaftIdx, Header), + State = State0#?STATE{msg_bytes_enqueue = Enqueue + Size, + enqueue_count = EnqCount + 1, + messages_total = Total + 1, + messages = lqueue:in(Msg, Messages) + }, + {ok, State, Effects}; +maybe_enqueue(RaftIdx, Ts, From, MsgSeqNo, RawMsg, Effects0, + #?STATE{msg_bytes_enqueue = Enqueue, + enqueue_count = EnqCount, + enqueuers = Enqueuers0, + messages = Messages, + messages_total = Total} = State0) -> + + case maps:get(From, Enqueuers0, undefined) of + undefined -> + State1 = State0#?STATE{enqueuers = Enqueuers0#{From => #enqueuer{}}}, + {Res, State, Effects} = maybe_enqueue(RaftIdx, Ts, From, MsgSeqNo, + RawMsg, Effects0, State1), + {Res, State, [{monitor, process, From} | Effects]}; + #enqueuer{next_seqno = MsgSeqNo} = Enq0 -> + % it is the next expected seqno + Size = message_size(RawMsg), + Header = maybe_set_msg_ttl(RawMsg, Ts, Size, State0), + Msg = ?MSG(RaftIdx, Header), + Enq = Enq0#enqueuer{next_seqno = MsgSeqNo + 1}, + MsgCache = case can_immediately_deliver(State0) of + true -> + {RaftIdx, RawMsg}; + false -> + undefined + end, + State = State0#?STATE{msg_bytes_enqueue = Enqueue + Size, + enqueue_count = EnqCount + 1, + messages_total = Total + 1, + messages = lqueue:in(Msg, Messages), + enqueuers = Enqueuers0#{From => Enq}, + msg_cache = MsgCache + }, + {ok, State, Effects0}; + #enqueuer{next_seqno = Next} + when MsgSeqNo > Next -> + %% TODO: when can this happen? + {out_of_sequence, State0, Effects0}; + #enqueuer{next_seqno = Next} when MsgSeqNo =< Next -> + % duplicate delivery + {duplicate, State0, Effects0} + end. + +return(#{index := IncomingRaftIdx, machine_version := MachineVersion} = Meta, + ConsumerId, Returned, Effects0, State0) -> + {State1, Effects1} = maps:fold( + fun(MsgId, Msg, {S0, E0}) -> + return_one(Meta, MsgId, Msg, S0, E0, ConsumerId) + end, {State0, Effects0}, Returned), + State2 = + case State1#?STATE.consumers of + #{ConsumerId := Con} + when MachineVersion >= 3 -> + update_or_remove_sub(Meta, ConsumerId, Con, State1); + #{ConsumerId := Con0} + when MachineVersion =:= 2 -> + Credit = increase_credit(Meta, Con0, map_size(Returned)), + Con = Con0#consumer{credit = Credit}, + update_or_remove_sub(Meta, ConsumerId, Con, State1); + _ -> + State1 + end, + {State, ok, Effects} = checkout(Meta, State0, State2, Effects1), + update_smallest_raft_index(IncomingRaftIdx, State, Effects). + +% used to process messages that are finished +complete(Meta, ConsumerId, [DiscardedMsgId], + #consumer{checked_out = Checked0} = Con0, + #?STATE{ra_indexes = Indexes0, + msg_bytes_checkout = BytesCheckout, + messages_total = Tot} = State0) -> + case maps:take(DiscardedMsgId, Checked0) of + {?MSG(Idx, Hdr), Checked} -> + SettledSize = get_header(size, Hdr), + Indexes = rabbit_fifo_index:delete(Idx, Indexes0), + Con = Con0#consumer{checked_out = Checked, + credit = increase_credit(Meta, Con0, 1)}, + State1 = update_or_remove_sub(Meta, ConsumerId, Con, State0), + State1#?STATE{ra_indexes = Indexes, + msg_bytes_checkout = BytesCheckout - SettledSize, + messages_total = Tot - 1}; + error -> + State0 + end; +complete(Meta, ConsumerId, DiscardedMsgIds, + #consumer{checked_out = Checked0} = Con0, + #?STATE{ra_indexes = Indexes0, + msg_bytes_checkout = BytesCheckout, + messages_total = Tot} = State0) -> + {SettledSize, Checked, Indexes} + = lists:foldl( + fun (MsgId, {S0, Ch0, Idxs}) -> + case maps:take(MsgId, Ch0) of + {?MSG(Idx, Hdr), Ch} -> + S = get_header(size, Hdr) + S0, + {S, Ch, rabbit_fifo_index:delete(Idx, Idxs)}; + error -> + {S0, Ch0, Idxs} + end + end, {0, Checked0, Indexes0}, DiscardedMsgIds), + Len = map_size(Checked0) - map_size(Checked), + Con = Con0#consumer{checked_out = Checked, + credit = increase_credit(Meta, Con0, Len)}, + State1 = update_or_remove_sub(Meta, ConsumerId, Con, State0), + State1#?STATE{ra_indexes = Indexes, + msg_bytes_checkout = BytesCheckout - SettledSize, + messages_total = Tot - Len}. + +increase_credit(_Meta, #consumer{cfg = #consumer_cfg{lifetime = once}, + credit = Credit}, _) -> + %% once consumers cannot increment credit + Credit; +increase_credit(_Meta, #consumer{cfg = #consumer_cfg{lifetime = auto, + credit_mode = credited}, + credit = Credit}, _) -> + %% credit_mode: `credited' also doesn't automatically increment credit + Credit; +increase_credit(#{machine_version := MachineVersion}, + #consumer{cfg = #consumer_cfg{credit_mode = {simple_prefetch, MaxCredit}}, + credit = Current}, Credit) + when MachineVersion >= 3 andalso MaxCredit > 0 -> + min(MaxCredit, Current + Credit); +increase_credit(_Meta, #consumer{credit = Current}, Credit) -> + Current + Credit. + +complete_and_checkout(#{index := IncomingRaftIdx} = Meta, MsgIds, ConsumerId, + #consumer{} = Con0, + Effects0, State0) -> + State1 = complete(Meta, ConsumerId, MsgIds, Con0, State0), + {State, ok, Effects} = checkout(Meta, State0, State1, Effects0), + update_smallest_raft_index(IncomingRaftIdx, State, Effects). + +cancel_consumer_effects(ConsumerId, + #?STATE{cfg = #cfg{resource = QName}} = _State, + Effects) -> + [{mod_call, rabbit_quorum_queue, + cancel_consumer_handler, [QName, ConsumerId]} | Effects]. + +update_smallest_raft_index(Idx, State, Effects) -> + update_smallest_raft_index(Idx, ok, State, Effects). + +update_smallest_raft_index(IncomingRaftIdx, Reply, + #?STATE{cfg = Cfg, + release_cursors = Cursors0} = State0, + Effects) -> + Total = messages_total(State0), + %% TODO: optimise + case smallest_raft_index(State0) of + undefined when Total == 0 -> + % there are no messages on queue anymore and no pending enqueues + % we can forward release_cursor all the way until + % the last received command, hooray + %% reset the release cursor interval + #cfg{release_cursor_interval = {Base, _}} = Cfg, + RCI = {Base, Base}, + State = State0#?STATE{cfg = Cfg#cfg{release_cursor_interval = RCI}, + release_cursors = lqueue:new(), + enqueue_count = 0}, + {State, Reply, Effects ++ [{release_cursor, IncomingRaftIdx, State}]}; + undefined -> + {State0, Reply, Effects}; + Smallest when is_integer(Smallest) -> + case find_next_cursor(Smallest, Cursors0) of + empty -> + {State0, Reply, Effects}; + {Cursor, Cursors} -> + %% we can emit a release cursor when we've passed the smallest + %% release cursor available. + {State0#?STATE{release_cursors = Cursors}, Reply, + Effects ++ [Cursor]} + end + end. + +find_next_cursor(Idx, Cursors) -> + find_next_cursor(Idx, Cursors, empty). + +find_next_cursor(Smallest, Cursors0, Potential) -> + case lqueue:out(Cursors0) of + {{value, {_, Idx, _} = Cursor}, Cursors} when Idx < Smallest -> + %% we found one but it may not be the largest one + find_next_cursor(Smallest, Cursors, Cursor); + _ when Potential == empty -> + empty; + _ -> + {Potential, Cursors0} + end. + +update_msg_header(Key, Fun, Def, ?MSG(Idx, Header)) -> + ?MSG(Idx, update_header(Key, Fun, Def, Header)). + +update_header(expiry, _, Expiry, Size) + when is_integer(Size) -> + ?TUPLE(Size, Expiry); +update_header(Key, UpdateFun, Default, Size) + when is_integer(Size) -> + update_header(Key, UpdateFun, Default, #{size => Size}); +update_header(Key, UpdateFun, Default, ?TUPLE(Size, Expiry)) + when is_integer(Size), is_integer(Expiry) -> + update_header(Key, UpdateFun, Default, #{size => Size, + expiry => Expiry}); +update_header(Key, UpdateFun, Default, Header) + when is_map(Header), is_map_key(size, Header) -> + maps:update_with(Key, UpdateFun, Default, Header). + +get_msg_header(?MSG(_Idx, Header)) -> + Header. + +get_header(size, Size) + when is_integer(Size) -> + Size; +get_header(_Key, Size) + when is_integer(Size) -> + undefined; +get_header(size, ?TUPLE(Size, Expiry)) + when is_integer(Size), is_integer(Expiry) -> + Size; +get_header(expiry, ?TUPLE(Size, Expiry)) + when is_integer(Size), is_integer(Expiry) -> + Expiry; +get_header(_Key, ?TUPLE(Size, Expiry)) + when is_integer(Size), is_integer(Expiry) -> + undefined; +get_header(Key, Header) + when is_map(Header) andalso is_map_key(size, Header) -> + maps:get(Key, Header, undefined). + +return_one(#{machine_version := MachineVersion} = Meta, + MsgId, Msg0, + #?STATE{returns = Returns, + consumers = Consumers, + dlx = DlxState0, + cfg = #cfg{delivery_limit = DeliveryLimit, + dead_letter_handler = DLH}} = State0, + Effects0, ConsumerId) -> + #consumer{checked_out = Checked0} = Con0 = maps:get(ConsumerId, Consumers), + Msg = update_msg_header(delivery_count, fun incr/1, 1, Msg0), + Header = get_msg_header(Msg), + case get_header(delivery_count, Header) of + DeliveryCount when DeliveryCount > DeliveryLimit -> + {DlxState, DlxEffects} = rabbit_fifo_dlx:discard([Msg], delivery_limit, DLH, DlxState0), + State1 = State0#?STATE{dlx = DlxState}, + State = complete(Meta, ConsumerId, [MsgId], Con0, State1), + {State, DlxEffects ++ Effects0}; + _ -> + Checked = maps:remove(MsgId, Checked0), + Con = case MachineVersion of + V when V >= 3 -> + Con0#consumer{checked_out = Checked, + credit = increase_credit(Meta, Con0, 1)}; + 2 -> + Con0#consumer{checked_out = Checked} + end, + {add_bytes_return( + Header, + State0#?STATE{consumers = Consumers#{ConsumerId => Con}, + returns = lqueue:in(Msg, Returns)}), + Effects0} + end. + +return_all(Meta, #?STATE{consumers = Cons} = State0, Effects0, ConsumerId, + #consumer{checked_out = Checked} = Con) -> + State = State0#?STATE{consumers = Cons#{ConsumerId => Con}}, + lists:foldl(fun ({MsgId, Msg}, {S, E}) -> + return_one(Meta, MsgId, Msg, S, E, ConsumerId) + end, {State, Effects0}, lists:sort(maps:to_list(Checked))). + +checkout(Meta, OldState, State0, Effects0) -> + checkout(Meta, OldState, State0, Effects0, ok). + +checkout(#{index := Index} = Meta, + #?STATE{cfg = #cfg{resource = _QName}} = OldState, + State0, Effects0, Reply) -> + {#?STATE{cfg = #cfg{dead_letter_handler = DLH}, + dlx = DlxState0} = State1, ExpiredMsg, Effects1} = + checkout0(Meta, checkout_one(Meta, false, State0, Effects0), #{}), + {DlxState, DlxDeliveryEffects} = rabbit_fifo_dlx:checkout(DLH, DlxState0), + %% TODO: only update dlx state if it has changed? + State2 = State1#?STATE{msg_cache = undefined, %% by this time the cache should be used + dlx = DlxState}, + Effects2 = DlxDeliveryEffects ++ Effects1, + case evaluate_limit(Index, false, OldState, State2, Effects2) of + {State, false, Effects} when ExpiredMsg == false -> + {State, Reply, Effects}; + {State, _, Effects} -> + update_smallest_raft_index(Index, Reply, State, Effects) + end. + +checkout0(Meta, {success, ConsumerId, MsgId, + ?MSG(_RaftIdx, _Header) = Msg, ExpiredMsg, State, Effects}, + SendAcc0) -> + DelMsg = {MsgId, Msg}, + SendAcc = case maps:get(ConsumerId, SendAcc0, undefined) of + undefined -> + SendAcc0#{ConsumerId => [DelMsg]}; + LogMsgs -> + SendAcc0#{ConsumerId => [DelMsg | LogMsgs]} + end, + checkout0(Meta, checkout_one(Meta, ExpiredMsg, State, Effects), SendAcc); +checkout0(_Meta, {_Activity, ExpiredMsg, State0, Effects0}, SendAcc) -> + Effects = add_delivery_effects(Effects0, SendAcc, State0), + {State0, ExpiredMsg, lists:reverse(Effects)}. + +evaluate_limit(_Index, Result, _BeforeState, + #?STATE{cfg = #cfg{max_length = undefined, + max_bytes = undefined}} = State, + Effects) -> + {State, Result, Effects}; +evaluate_limit(Index, Result, BeforeState, + #?STATE{cfg = #cfg{overflow_strategy = Strategy}, + enqueuers = Enqs0} = State0, + Effects0) -> + case is_over_limit(State0) of + true when Strategy == drop_head -> + {State, Effects} = drop_head(State0, Effects0), + evaluate_limit(Index, true, BeforeState, State, Effects); + true when Strategy == reject_publish -> + %% generate send_msg effect for each enqueuer to let them know + %% they need to block + {Enqs, Effects} = + maps:fold( + fun (P, #enqueuer{blocked = undefined} = E0, {Enqs, Acc}) -> + E = E0#enqueuer{blocked = Index}, + {Enqs#{P => E}, + [{send_msg, P, {queue_status, reject_publish}, + [ra_event]} | Acc]}; + (_P, _E, Acc) -> + Acc + end, {Enqs0, Effects0}, Enqs0), + {State0#?STATE{enqueuers = Enqs}, Result, Effects}; + false when Strategy == reject_publish -> + %% TODO: optimise as this case gets called for every command + %% pretty much + Before = is_below_soft_limit(BeforeState), + case {Before, is_below_soft_limit(State0)} of + {false, true} -> + %% we have moved below the lower limit + {Enqs, Effects} = + maps:fold( + fun (P, #enqueuer{} = E0, {Enqs, Acc}) -> + E = E0#enqueuer{blocked = undefined}, + {Enqs#{P => E}, + [{send_msg, P, {queue_status, go}, [ra_event]} + | Acc]}; + (_P, _E, Acc) -> + Acc + end, {Enqs0, Effects0}, Enqs0), + {State0#?STATE{enqueuers = Enqs}, Result, Effects}; + _ -> + {State0, Result, Effects0} + end; + false -> + {State0, Result, Effects0} + end. + + +%% [6,5,4,3,2,1] -> [[1,2],[3,4],[5,6]] +chunk_disk_msgs([], _Bytes, [[] | Chunks]) -> + Chunks; +chunk_disk_msgs([], _Bytes, Chunks) -> + Chunks; +chunk_disk_msgs([{_MsgId, ?MSG(_RaftIdx, Header)} = Msg | Rem], + Bytes, Chunks) + when Bytes >= ?DELIVERY_CHUNK_LIMIT_B -> + Size = get_header(size, Header), + chunk_disk_msgs(Rem, Size, [[Msg] | Chunks]); +chunk_disk_msgs([{_MsgId, ?MSG(_RaftIdx, Header)} = Msg | Rem], Bytes, + [CurChunk | Chunks]) -> + Size = get_header(size, Header), + chunk_disk_msgs(Rem, Bytes + Size, [[Msg | CurChunk] | Chunks]). + +add_delivery_effects(Effects0, AccMap, _State) + when map_size(AccMap) == 0 -> + %% does this ever happen? + Effects0; +add_delivery_effects(Effects0, AccMap, State) -> + maps:fold(fun (C, DiskMsgs, Efs) + when is_list(DiskMsgs) -> + lists:foldl( + fun (Msgs, E) -> + [delivery_effect(C, Msgs, State) | E] + end, Efs, chunk_disk_msgs(DiskMsgs, 0, [[]])) + end, Effects0, AccMap). + +take_next_msg(#?STATE{returns = Returns0, + messages = Messages0, + ra_indexes = Indexes0 + } = State) -> + case lqueue:out(Returns0) of + {{value, NextMsg}, Returns} -> + {NextMsg, State#?STATE{returns = Returns}}; + {empty, _} -> + case lqueue:out(Messages0) of + {empty, _} -> + empty; + {{value, ?MSG(RaftIdx, _) = Msg}, Messages} -> + %% add index here + Indexes = rabbit_fifo_index:append(RaftIdx, Indexes0), + {Msg, State#?STATE{messages = Messages, + ra_indexes = Indexes}} + end + end. + +get_next_msg(#?STATE{returns = Returns0, + messages = Messages0}) -> + case lqueue:get(Returns0, empty) of + empty -> + lqueue:get(Messages0, empty); + Msg -> + Msg + end. + +delivery_effect({CTag, CPid}, [{MsgId, ?MSG(Idx, Header)}], + #?STATE{msg_cache = {Idx, RawMsg}}) -> + {send_msg, CPid, {delivery, CTag, [{MsgId, {Header, RawMsg}}]}, + [local, ra_event]}; +delivery_effect({CTag, CPid}, Msgs, _State) -> + RaftIdxs = lists:foldr(fun ({_, ?MSG(I, _)}, Acc) -> + [I | Acc] + end, [], Msgs), + {log, RaftIdxs, + fun(Log) -> + DelMsgs = lists:zipwith( + fun (Cmd, {MsgId, ?MSG(_Idx, Header)}) -> + {MsgId, {Header, get_msg(Cmd)}} + end, Log, Msgs), + [{send_msg, CPid, {delivery, CTag, DelMsgs}, [local, ra_event]}] + end, + {local, node(CPid)}}. + +reply_log_effect(RaftIdx, MsgId, Header, Ready, From) -> + {log, [RaftIdx], + fun ([Cmd]) -> + [{reply, From, {wrap_reply, + {dequeue, {MsgId, {Header, get_msg(Cmd)}}, Ready}}}] + end}. + +checkout_one(#{system_time := Ts} = Meta, ExpiredMsg0, InitState0, Effects0) -> + %% Before checking out any messsage to any consumer, + %% first remove all expired messages from the head of the queue. + {ExpiredMsg, #?STATE{service_queue = SQ0, + messages = Messages0, + msg_bytes_checkout = BytesCheckout, + msg_bytes_enqueue = BytesEnqueue, + consumers = Cons0} = InitState, Effects1} = + expire_msgs(Ts, ExpiredMsg0, InitState0, Effects0), + + case priority_queue:out(SQ0) of + {{value, ConsumerId}, SQ1} + when is_map_key(ConsumerId, Cons0) -> + case take_next_msg(InitState) of + {ConsumerMsg, State0} -> + %% there are consumers waiting to be serviced + %% process consumer checkout + case maps:get(ConsumerId, Cons0) of + #consumer{credit = 0} -> + %% no credit but was still on queue + %% can happen when draining + %% recurse without consumer on queue + checkout_one(Meta, ExpiredMsg, + InitState#?STATE{service_queue = SQ1}, Effects1); + #consumer{status = cancelled} -> + checkout_one(Meta, ExpiredMsg, + InitState#?STATE{service_queue = SQ1}, Effects1); + #consumer{status = suspected_down} -> + checkout_one(Meta, ExpiredMsg, + InitState#?STATE{service_queue = SQ1}, Effects1); + #consumer{checked_out = Checked0, + next_msg_id = Next, + credit = Credit, + delivery_count = DelCnt} = Con0 -> + Checked = maps:put(Next, ConsumerMsg, Checked0), + Con = Con0#consumer{checked_out = Checked, + next_msg_id = Next + 1, + credit = Credit - 1, + delivery_count = DelCnt + 1}, + Size = get_header(size, get_msg_header(ConsumerMsg)), + State = update_or_remove_sub( + Meta, ConsumerId, Con, + State0#?STATE{service_queue = SQ1, + msg_bytes_checkout = BytesCheckout + Size, + msg_bytes_enqueue = BytesEnqueue - Size}), + {success, ConsumerId, Next, ConsumerMsg, ExpiredMsg, + State, Effects1} + end; + empty -> + {nochange, ExpiredMsg, InitState, Effects1} + end; + {{value, _ConsumerId}, SQ1} -> + %% consumer did not exist but was queued, recurse + checkout_one(Meta, ExpiredMsg, + InitState#?STATE{service_queue = SQ1}, Effects1); + {empty, _} -> + case lqueue:len(Messages0) of + 0 -> + {nochange, ExpiredMsg, InitState, Effects1}; + _ -> + {inactive, ExpiredMsg, InitState, Effects1} + end + end. + +%% dequeue all expired messages +expire_msgs(RaCmdTs, Result, State, Effects) -> + %% In the normal case, there are no expired messages. + %% Therefore, first lqueue:get/2 to check whether we need to lqueue:out/1 + %% because the latter can be much slower than the former. + case get_next_msg(State) of + ?MSG(_, ?TUPLE(Size, Expiry)) + when is_integer(Size), is_integer(Expiry), RaCmdTs >= Expiry -> + expire(RaCmdTs, State, Effects); + ?MSG(_, #{expiry := Expiry}) + when is_integer(Expiry), RaCmdTs >= Expiry -> + expire(RaCmdTs, State, Effects); + _ -> + {Result, State, Effects} + end. + +expire(RaCmdTs, State0, Effects) -> + {?MSG(Idx, Header) = Msg, + #?STATE{cfg = #cfg{dead_letter_handler = DLH}, + dlx = DlxState0, + ra_indexes = Indexes0, + messages_total = Tot, + msg_bytes_enqueue = MsgBytesEnqueue} = State1} = take_next_msg(State0), + {DlxState, DlxEffects} = rabbit_fifo_dlx:discard([Msg], expired, DLH, DlxState0), + Indexes = rabbit_fifo_index:delete(Idx, Indexes0), + State = State1#?STATE{dlx = DlxState, + ra_indexes = Indexes, + messages_total = Tot - 1, + msg_bytes_enqueue = MsgBytesEnqueue - get_header(size, Header)}, + expire_msgs(RaCmdTs, true, State, DlxEffects ++ Effects). + +timer_effect(RaCmdTs, State, Effects) -> + T = case get_next_msg(State) of + ?MSG(_, ?TUPLE(Size, Expiry)) + when is_integer(Size), is_integer(Expiry) -> + %% Next message contains 'expiry' header. + %% (Re)set timer so that mesage will be dropped or dead-lettered on time. + max(0, Expiry - RaCmdTs); + ?MSG(_, #{expiry := Expiry}) + when is_integer(Expiry) -> + max(0, Expiry - RaCmdTs); + _ -> + %% Next message does not contain 'expiry' header. + %% Therefore, do not set timer or cancel timer if it was set. + infinity + end, + [{timer, expire_msgs, T} | Effects]. + +update_or_remove_sub(Meta, ConsumerId, + #consumer{cfg = #consumer_cfg{lifetime = once}, + checked_out = Checked, + credit = 0} = Con, + #?STATE{consumers = Cons} = State) -> + case map_size(Checked) of + 0 -> + #{system_time := Ts} = Meta, + % we're done with this consumer + State#?STATE{consumers = maps:remove(ConsumerId, Cons), + last_active = Ts}; + _ -> + % there are unsettled items so need to keep around + State#?STATE{consumers = maps:put(ConsumerId, Con, Cons)} + end; +update_or_remove_sub(_Meta, ConsumerId, + #consumer{cfg = #consumer_cfg{}} = Con, + #?STATE{consumers = Cons, + service_queue = ServiceQueue} = State) -> + State#?STATE{consumers = maps:put(ConsumerId, Con, Cons), + service_queue = uniq_queue_in(ConsumerId, Con, ServiceQueue)}. + +uniq_queue_in(Key, #consumer{credit = Credit, + status = up, + cfg = #consumer_cfg{priority = P}}, ServiceQueue) + when Credit > 0 -> + % TODO: queue:member could surely be quite expensive, however the practical + % number of unique consumers may not be large enough for it to matter + case priority_queue:member(Key, ServiceQueue) of + true -> + ServiceQueue; + false -> + priority_queue:in(Key, P, ServiceQueue) + end; +uniq_queue_in(_Key, _Consumer, ServiceQueue) -> + ServiceQueue. + +update_consumer(Meta, {Tag, Pid} = ConsumerId, ConsumerMeta, + {Life, Credit, Mode0} = Spec, Priority, + #?STATE{cfg = #cfg{consumer_strategy = competing}, + consumers = Cons0} = State0) -> + Consumer = case Cons0 of + #{ConsumerId := #consumer{} = Consumer0} -> + merge_consumer(Meta, Consumer0, ConsumerMeta, Spec, Priority); + _ -> + Mode = credit_mode(Meta, Credit, Mode0), + #consumer{cfg = #consumer_cfg{tag = Tag, + pid = Pid, + lifetime = Life, + meta = ConsumerMeta, + priority = Priority, + credit_mode = Mode}, + credit = Credit} + end, + {Consumer, update_or_remove_sub(Meta, ConsumerId, Consumer, State0)}; +update_consumer(Meta, {Tag, Pid} = ConsumerId, ConsumerMeta, + {Life, Credit, Mode0} = Spec, Priority, + #?STATE{cfg = #cfg{consumer_strategy = single_active}, + consumers = Cons0, + waiting_consumers = Waiting, + service_queue = _ServiceQueue0} = State0) -> + %% if it is the current active consumer, just update + %% if it is a cancelled active consumer, add to waiting unless it is the only + %% one, then merge + case active_consumer(Cons0) of + {ConsumerId, #consumer{status = up} = Consumer0} -> + Consumer = merge_consumer(Meta, Consumer0, ConsumerMeta, + Spec, Priority), + {Consumer, update_or_remove_sub(Meta, ConsumerId, Consumer, State0)}; + undefined when is_map_key(ConsumerId, Cons0) -> + %% there is no active consumer and the current consumer is in the + %% consumers map and thus must be cancelled, in this case we can just + %% merge and effectively make this the current active one + Consumer0 = maps:get(ConsumerId, Cons0), + Consumer = merge_consumer(Meta, Consumer0, ConsumerMeta, + Spec, Priority), + {Consumer, update_or_remove_sub(Meta, ConsumerId, Consumer, State0)}; + _ -> + %% add as a new waiting consumer + Mode = credit_mode(Meta, Credit, Mode0), + Consumer = #consumer{cfg = #consumer_cfg{tag = Tag, + pid = Pid, + lifetime = Life, + meta = ConsumerMeta, + priority = Priority, + credit_mode = Mode}, + credit = Credit}, + + {Consumer, + State0#?STATE{waiting_consumers = + Waiting ++ [{ConsumerId, Consumer}]}} + end. + +merge_consumer(Meta, #consumer{cfg = CCfg, checked_out = Checked} = Consumer, + ConsumerMeta, {Life, Credit, Mode0}, Priority) -> + NumChecked = map_size(Checked), + NewCredit = max(0, Credit - NumChecked), + Mode = credit_mode(Meta, Credit, Mode0), + Consumer#consumer{cfg = CCfg#consumer_cfg{priority = Priority, + meta = ConsumerMeta, + credit_mode = Mode, + lifetime = Life}, + status = up, + credit = NewCredit}. + +credit_mode(#{machine_version := Vsn}, Credit, simple_prefetch) + when Vsn >= 3 -> + {simple_prefetch, Credit}; +credit_mode(_, _, Mode) -> + Mode. + +maybe_queue_consumer(ConsumerId, #consumer{credit = Credit} = Con, + ServiceQueue0) -> + case Credit > 0 of + true -> + % consumer needs service - check if already on service queue + uniq_queue_in(ConsumerId, Con, ServiceQueue0); + false -> + ServiceQueue0 + end. + +%% creates a dehydrated version of the current state to be cached and +%% potentially used to for a snaphot at a later point +dehydrate_state(#?STATE{cfg = #cfg{}, + dlx = DlxState} = State) -> + % no messages are kept in memory, no need to + % overly mutate the current state apart from removing indexes and cursors + State#?STATE{ra_indexes = rabbit_fifo_index:empty(), + release_cursors = lqueue:new(), + enqueue_count = 0, + msg_cache = undefined, + dlx = rabbit_fifo_dlx:dehydrate(DlxState)}. + +is_over_limit(#?STATE{cfg = #cfg{max_length = undefined, + max_bytes = undefined}}) -> + false; +is_over_limit(#?STATE{cfg = #cfg{max_length = MaxLength, + max_bytes = MaxBytes}, + msg_bytes_enqueue = BytesEnq, + dlx = DlxState} = State) -> + {NumDlx, BytesDlx} = rabbit_fifo_dlx:stat(DlxState), + (messages_ready(State) + NumDlx > MaxLength) orelse + (BytesEnq + BytesDlx > MaxBytes). + +is_below_soft_limit(#?STATE{cfg = #cfg{max_length = undefined, + max_bytes = undefined}}) -> + false; +is_below_soft_limit(#?STATE{cfg = #cfg{max_length = MaxLength, + max_bytes = MaxBytes}, + msg_bytes_enqueue = BytesEnq, + dlx = DlxState} = State) -> + {NumDlx, BytesDlx} = rabbit_fifo_dlx:stat(DlxState), + is_below(MaxLength, messages_ready(State) + NumDlx) andalso + is_below(MaxBytes, BytesEnq + BytesDlx). + +is_below(undefined, _Num) -> + true; +is_below(Val, Num) when is_integer(Val) andalso is_integer(Num) -> + Num =< trunc(Val * ?LOW_LIMIT). + +-spec make_enqueue(option(pid()), option(msg_seqno()), raw_msg()) -> protocol(). +make_enqueue(Pid, Seq, Msg) -> + #enqueue{pid = Pid, seq = Seq, msg = Msg}. + +-spec make_register_enqueuer(pid()) -> protocol(). +make_register_enqueuer(Pid) -> + #register_enqueuer{pid = Pid}. + +-spec make_checkout(consumer_id(), + checkout_spec(), consumer_meta()) -> protocol(). +make_checkout({_, _} = ConsumerId, Spec, Meta) -> + #checkout{consumer_id = ConsumerId, + spec = Spec, meta = Meta}. + +-spec make_settle(consumer_id(), [msg_id()]) -> protocol(). +make_settle(ConsumerId, MsgIds) when is_list(MsgIds) -> + #settle{consumer_id = ConsumerId, msg_ids = MsgIds}. + +-spec make_return(consumer_id(), [msg_id()]) -> protocol(). +make_return(ConsumerId, MsgIds) -> + #return{consumer_id = ConsumerId, msg_ids = MsgIds}. + +-spec make_discard(consumer_id(), [msg_id()]) -> protocol(). +make_discard(ConsumerId, MsgIds) -> + #discard{consumer_id = ConsumerId, msg_ids = MsgIds}. + +-spec make_credit(consumer_id(), non_neg_integer(), non_neg_integer(), + boolean()) -> protocol(). +make_credit(ConsumerId, Credit, DeliveryCount, Drain) -> + #credit{consumer_id = ConsumerId, + credit = Credit, + delivery_count = DeliveryCount, + drain = Drain}. + +-spec make_purge() -> protocol(). +make_purge() -> #purge{}. + +-spec make_garbage_collection() -> protocol(). +make_garbage_collection() -> #garbage_collection{}. + +-spec make_purge_nodes([node()]) -> protocol(). +make_purge_nodes(Nodes) -> + #purge_nodes{nodes = Nodes}. + +-spec make_update_config(config()) -> protocol(). +make_update_config(Config) -> + #update_config{config = Config}. + +add_bytes_drop(Header, + #?STATE{msg_bytes_enqueue = Enqueue} = State) -> + Size = get_header(size, Header), + State#?STATE{msg_bytes_enqueue = Enqueue - Size}. + + +add_bytes_return(Header, + #?STATE{msg_bytes_checkout = Checkout, + msg_bytes_enqueue = Enqueue} = State) -> + Size = get_header(size, Header), + State#?STATE{msg_bytes_checkout = Checkout - Size, + msg_bytes_enqueue = Enqueue + Size}. + +message_size(#basic_message{content = Content}) -> + #content{payload_fragments_rev = PFR} = Content, + iolist_size(PFR); +message_size(B) when is_binary(B) -> + byte_size(B); +message_size(Msg) -> + case mc:is(Msg) of + true -> + {_, PayloadSize} = mc:size(Msg), + PayloadSize; + false -> + %% probably only hit this for testing so ok to use erts_debug + erts_debug:size(Msg) + end. + + +all_nodes(#?STATE{consumers = Cons0, + enqueuers = Enqs0, + waiting_consumers = WaitingConsumers0}) -> + Nodes0 = maps:fold(fun({_, P}, _, Acc) -> + Acc#{node(P) => ok} + end, #{}, Cons0), + Nodes1 = maps:fold(fun(P, _, Acc) -> + Acc#{node(P) => ok} + end, Nodes0, Enqs0), + maps:keys( + lists:foldl(fun({{_, P}, _}, Acc) -> + Acc#{node(P) => ok} + end, Nodes1, WaitingConsumers0)). + +all_pids_for(Node, #?STATE{consumers = Cons0, + enqueuers = Enqs0, + waiting_consumers = WaitingConsumers0}) -> + Cons = maps:fold(fun({_, P}, _, Acc) + when node(P) =:= Node -> + [P | Acc]; + (_, _, Acc) -> Acc + end, [], Cons0), + Enqs = maps:fold(fun(P, _, Acc) + when node(P) =:= Node -> + [P | Acc]; + (_, _, Acc) -> Acc + end, Cons, Enqs0), + lists:foldl(fun({{_, P}, _}, Acc) + when node(P) =:= Node -> + [P | Acc]; + (_, Acc) -> Acc + end, Enqs, WaitingConsumers0). + +suspected_pids_for(Node, #?STATE{consumers = Cons0, + enqueuers = Enqs0, + waiting_consumers = WaitingConsumers0}) -> + Cons = maps:fold(fun({_, P}, + #consumer{status = suspected_down}, + Acc) + when node(P) =:= Node -> + [P | Acc]; + (_, _, Acc) -> Acc + end, [], Cons0), + Enqs = maps:fold(fun(P, #enqueuer{status = suspected_down}, Acc) + when node(P) =:= Node -> + [P | Acc]; + (_, _, Acc) -> Acc + end, Cons, Enqs0), + lists:foldl(fun({{_, P}, + #consumer{status = suspected_down}}, Acc) + when node(P) =:= Node -> + [P | Acc]; + (_, Acc) -> Acc + end, Enqs, WaitingConsumers0). + +is_expired(Ts, #?STATE{cfg = #cfg{expires = Expires}, + last_active = LastActive, + consumers = Consumers}) + when is_number(LastActive) andalso is_number(Expires) -> + %% TODO: should it be active consumers? + Active = maps:filter(fun (_, #consumer{status = suspected_down}) -> + false; + (_, _) -> + true + end, Consumers), + + Ts > (LastActive + Expires) andalso maps:size(Active) == 0; +is_expired(_Ts, _State) -> + false. + +get_priority_from_args(#{args := Args}) -> + case rabbit_misc:table_lookup(Args, <<"x-priority">>) of + {_Key, Value} -> + Value; + _ -> 0 + end; +get_priority_from_args(_) -> + 0. + +notify_decorators_effect(QName, MaxActivePriority, IsEmpty) -> + {mod_call, rabbit_quorum_queue, spawn_notify_decorators, + [QName, consumer_state_changed, [MaxActivePriority, IsEmpty]]}. + +notify_decorators_startup(QName) -> + {mod_call, rabbit_quorum_queue, spawn_notify_decorators, + [QName, startup, []]}. + +convert(To, To, State) -> + State; +convert(0, To, State) -> + convert(1, To, rabbit_fifo_v1:convert_v0_to_v1(State)); +convert(1, To, State) -> + convert(2, To, convert_v1_to_v2(State)); +convert(2, To, State) -> + convert(3, To, convert_v2_to_v3(State)). + +smallest_raft_index(#?STATE{messages = Messages, + ra_indexes = Indexes, + dlx = DlxState}) -> + SmallestDlxRaIdx = rabbit_fifo_dlx:smallest_raft_index(DlxState), + SmallestMsgsRaIdx = case lqueue:get(Messages, undefined) of + ?MSG(I, _) when is_integer(I) -> + I; + _ -> + undefined + end, + SmallestRaIdx = rabbit_fifo_index:smallest(Indexes), + lists:min([SmallestDlxRaIdx, SmallestMsgsRaIdx, SmallestRaIdx]). + +make_requeue(ConsumerId, Notify, [{MsgId, Idx, Header, Msg}], Acc) -> + lists:reverse([{append, + #requeue{consumer_id = ConsumerId, + index = Idx, + header = Header, + msg_id = MsgId, + msg = Msg}, + Notify} + | Acc]); +make_requeue(ConsumerId, Notify, [{MsgId, Idx, Header, Msg} | Rem], Acc) -> + make_requeue(ConsumerId, Notify, Rem, + [{append, + #requeue{consumer_id = ConsumerId, + index = Idx, + header = Header, + msg_id = MsgId, + msg = Msg}, + noreply} + | Acc]); +make_requeue(_ConsumerId, _Notify, [], []) -> + []. + +can_immediately_deliver(#?STATE{service_queue = SQ, + consumers = Consumers} = State) -> + case messages_ready(State) of + 0 when map_size(Consumers) > 0 -> + %% TODO: is is probably good enough but to be 100% we'd need to + %% scan all consumers and ensure at least one has credit + priority_queue:is_empty(SQ) == false; + _ -> + false + end. + +incr(I) -> + I + 1. + +get_msg(#enqueue{msg = M}) -> + M; +get_msg(#requeue{msg = M}) -> + M. diff --git a/deps/rabbit/src/rabbit_fifo_v3.hrl b/deps/rabbit/src/rabbit_fifo_v3.hrl new file mode 100644 index 000000000000..9b1078265dc6 --- /dev/null +++ b/deps/rabbit/src/rabbit_fifo_v3.hrl @@ -0,0 +1,226 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +%% macros for memory optimised tuple structures +%% [A|B] saves 1 byte compared to {A,B} +-define(TUPLE(A, B), [A | B]). + +%% We only hold Raft index and message header in memory. +%% Raw message data is always stored on disk. +-define(MSG(Index, Header), ?TUPLE(Index, Header)). + +-define(IS_HEADER(H), + (is_integer(H) andalso H >= 0) orelse + is_list(H) orelse + (is_map(H) andalso is_map_key(size, H))). + +-type optimised_tuple(A, B) :: nonempty_improper_list(A, B). + +-type option(T) :: undefined | T. + +-type raw_msg() :: term(). +%% The raw message. It is opaque to rabbit_fifo. + +-type msg_id() :: non_neg_integer(). +%% A consumer-scoped monotonically incrementing integer included with a +%% {@link delivery/0.}. Used to settle deliveries using +%% {@link rabbit_fifo_client:settle/3.} + +-type msg_seqno() :: non_neg_integer(). +%% A sender process scoped monotonically incrementing integer included +%% in enqueue messages. Used to ensure ordering of messages send from the +%% same process + +-type msg_header() :: msg_size() | + optimised_tuple(msg_size(), Expiry :: milliseconds()) | + #{size := msg_size(), + delivery_count => non_neg_integer(), + expiry => milliseconds()}. +%% The message header: +%% size: The size of the message payload in bytes. +%% delivery_count: the number of unsuccessful delivery attempts. +%% A non-zero value indicates a previous attempt. +%% expiry: Epoch time in ms when a message expires. Set during enqueue. +%% Value is determined by per-queue or per-message message TTL. +%% If it contains only the size it can be condensed to an integer. +%% If it contains only the size and expiry it can be condensed to an improper list. + +-type msg_size() :: non_neg_integer(). +%% the size in bytes of the msg payload + +-type msg() :: optimised_tuple(option(ra:index()), msg_header()). + +-type delivery_msg() :: {msg_id(), {msg_header(), raw_msg()}}. +%% A tuple consisting of the message id, and the headered message. + +-type consumer_tag() :: binary(). +%% An arbitrary binary tag used to distinguish between different consumers +%% set up by the same process. See: {@link rabbit_fifo_client:checkout/3.} + +-type delivery() :: {delivery, consumer_tag(), [delivery_msg()]}. +%% Represents the delivery of one or more rabbit_fifo messages. + +-type consumer_id() :: {consumer_tag(), pid()}. +%% The entity that receives messages. Uniquely identifies a consumer. + +-type credit_mode() :: credited | + %% machine_version 2 + simple_prefetch | + %% machine_version 3 + {simple_prefetch, MaxCredit :: non_neg_integer()}. +%% determines how credit is replenished + +-type checkout_spec() :: {once | auto, Num :: non_neg_integer(), + credit_mode()} | + {dequeue, settled | unsettled} | + cancel. + +-type consumer_meta() :: #{ack => boolean(), + username => binary(), + prefetch => non_neg_integer(), + args => list()}. +%% static meta data associated with a consumer + +-type applied_mfa() :: {module(), atom(), list()}. +% represents a partially applied module call + +-define(RELEASE_CURSOR_EVERY, 2048). +-define(RELEASE_CURSOR_EVERY_MAX, 3_200_000). +-define(USE_AVG_HALF_LIFE, 10000.0). +%% an average QQ without any message uses about 100KB so setting this limit +%% to ~10 times that should be relatively safe. +-define(GC_MEM_LIMIT_B, 2_000_000). + +-define(MB, 1_048_576). +-define(LOW_LIMIT, 0.8). +-define(DELIVERY_CHUNK_LIMIT_B, 128_000). + +-record(consumer_cfg, + {meta = #{} :: consumer_meta(), + pid :: pid(), + tag :: consumer_tag(), + %% the mode of how credit is incremented + %% simple_prefetch: credit is re-filled as deliveries are settled + %% or returned. + %% credited: credit can only be changed by receiving a consumer_credit + %% command: `{consumer_credit, ReceiverDeliveryCount, Credit}' + credit_mode :: credit_mode(), % part of snapshot data + lifetime = once :: once | auto, + priority = 0 :: non_neg_integer()}). + +-record(consumer, + {cfg = #consumer_cfg{}, + status = up :: up | suspected_down | cancelled | waiting, + next_msg_id = 0 :: msg_id(), % part of snapshot data + checked_out = #{} :: #{msg_id() => msg()}, + %% max number of messages that can be sent + %% decremented for each delivery + credit = 0 : non_neg_integer(), + %% total number of checked out messages - ever + %% incremented for each delivery + delivery_count = 0 :: non_neg_integer() + }). + +-type consumer() :: #consumer{}. + +-type consumer_strategy() :: competing | single_active. + +-type milliseconds() :: non_neg_integer(). + +-type dead_letter_handler() :: option({at_most_once, applied_mfa()} | at_least_once). + +-record(enqueuer, + {next_seqno = 1 :: msg_seqno(), + % out of order enqueues - sorted list + unused, + status = up :: up | suspected_down, + %% it is useful to have a record of when this was blocked + %% so that we can retry sending the block effect if + %% the publisher did not receive the initial one + blocked :: option(ra:index()), + unused_1, + unused_2 + }). + +-record(cfg, + {name :: atom(), + resource :: rabbit_types:r('queue'), + release_cursor_interval :: option({non_neg_integer(), non_neg_integer()}), + dead_letter_handler :: dead_letter_handler(), + become_leader_handler :: option(applied_mfa()), + overflow_strategy = drop_head :: drop_head | reject_publish, + max_length :: option(non_neg_integer()), + max_bytes :: option(non_neg_integer()), + %% whether single active consumer is on or not for this queue + consumer_strategy = competing :: consumer_strategy(), + %% the maximum number of unsuccessful delivery attempts permitted + delivery_limit :: option(non_neg_integer()), + expires :: option(milliseconds()), + msg_ttl :: option(milliseconds()), + unused_1, + unused_2 + }). + +-type prefix_msgs() :: {list(), list()} | + {non_neg_integer(), list(), + non_neg_integer(), list()}. + +-record(rabbit_fifo, + {cfg :: #cfg{}, + % unassigned messages + messages = lqueue:new() :: lqueue:lqueue(msg()), + messages_total = 0 :: non_neg_integer(), + % queue of returned msg_in_ids - when checking out it picks from + returns = lqueue:new() :: lqueue:lqueue(term()), + % a counter of enqueues - used to trigger shadow copy points + % reset to 0 when release_cursor gets stored + enqueue_count = 0 :: non_neg_integer(), + % a map containing all the live processes that have ever enqueued + % a message to this queue + enqueuers = #{} :: #{pid() => #enqueuer{}}, + % index of all messages that have been delivered at least once + % used to work out the smallest live raft index + % rabbit_fifo_index can be slow when calculating the smallest + % index when there are large gaps but should be faster than gb_trees + % for normal appending operations as it's backed by a map + ra_indexes = rabbit_fifo_index:empty() :: rabbit_fifo_index:state(), + %% A release cursor is essentially a snapshot for a past raft index. + %% Working assumption: Messages are consumed in a FIFO-ish order because + %% the log is truncated only until the oldest message. + release_cursors = lqueue:new() :: lqueue:lqueue({release_cursor, + ra:index(), #rabbit_fifo{}}), + % consumers need to reflect consumer state at time of snapshot + consumers = #{} :: #{consumer_id() => consumer()}, + % consumers that require further service are queued here + service_queue = priority_queue:new() :: priority_queue:q(), + %% state for at-least-once dead-lettering + dlx = rabbit_fifo_dlx:init() :: rabbit_fifo_dlx:state(), + msg_bytes_enqueue = 0 :: non_neg_integer(), + msg_bytes_checkout = 0 :: non_neg_integer(), + %% waiting consumers, one is picked active consumer is cancelled or dies + %% used only when single active consumer is on + waiting_consumers = [] :: [{consumer_id(), consumer()}], + last_active :: option(non_neg_integer()), + msg_cache :: option({ra:index(), raw_msg()}), + unused_2 + }). + +-type config() :: #{name := atom(), + queue_resource := rabbit_types:r('queue'), + dead_letter_handler => dead_letter_handler(), + become_leader_handler => applied_mfa(), + release_cursor_interval => non_neg_integer(), + max_length => non_neg_integer(), + max_bytes => non_neg_integer(), + max_in_memory_length => non_neg_integer(), + max_in_memory_bytes => non_neg_integer(), + overflow_strategy => drop_head | reject_publish, + single_active_consumer_on => boolean(), + delivery_limit => non_neg_integer(), + expires => non_neg_integer(), + msg_ttl => non_neg_integer(), + created => non_neg_integer() + }. diff --git a/deps/rabbit/src/rabbit_global_counters.erl b/deps/rabbit/src/rabbit_global_counters.erl index b5cdc5b627e1..f9239163d850 100644 --- a/deps/rabbit/src/rabbit_global_counters.erl +++ b/deps/rabbit/src/rabbit_global_counters.erl @@ -132,12 +132,14 @@ boot_step() -> [begin %% Protocol counters - init([{protocol, Proto}]), + Protocol = {protocol, Proto}, + init([Protocol]), + rabbit_msg_size_metrics:init(Proto), %% Protocol & Queue Type counters - init([{protocol, Proto}, {queue_type, rabbit_classic_queue}]), - init([{protocol, Proto}, {queue_type, rabbit_quorum_queue}]), - init([{protocol, Proto}, {queue_type, rabbit_stream_queue}]) + init([Protocol, {queue_type, rabbit_classic_queue}]), + init([Protocol, {queue_type, rabbit_quorum_queue}]), + init([Protocol, {queue_type, rabbit_stream_queue}]) end || Proto <- [amqp091, amqp10]], %% Dead Letter counters @@ -247,13 +249,13 @@ publisher_created(Protocol) -> counters:add(fetch(Protocol), ?PUBLISHERS, 1). publisher_deleted(Protocol) -> - counters:add(fetch(Protocol), ?PUBLISHERS, -1). + counters:sub(fetch(Protocol), ?PUBLISHERS, 1). consumer_created(Protocol) -> counters:add(fetch(Protocol), ?CONSUMERS, 1). consumer_deleted(Protocol) -> - counters:add(fetch(Protocol), ?CONSUMERS, -1). + counters:sub(fetch(Protocol), ?CONSUMERS, 1). messages_dead_lettered(Reason, QueueType, DeadLetterStrategy, Num) -> Index = case Reason of diff --git a/deps/rabbit/src/rabbit_health_check.erl b/deps/rabbit/src/rabbit_health_check.erl index 32223e1a43f5..9f959994828f 100644 --- a/deps/rabbit/src/rabbit_health_check.erl +++ b/deps/rabbit/src/rabbit_health_check.erl @@ -29,7 +29,7 @@ node(Node, Timeout) -> local() -> rabbit_log:warning("rabbitmqctl node_health_check and its HTTP API counterpart are DEPRECATED. " - "See https://www.rabbitmq.com/monitoring.html#health-checks for replacement options."), + "See https://www.rabbitmq.com/docs/monitoring#health-checks for replacement options."), run_checks([list_channels, list_queues, alarms, rabbit_node_monitor]). %%---------------------------------------------------------------------------- diff --git a/deps/rabbit/src/rabbit_khepri.erl b/deps/rabbit/src/rabbit_khepri.erl index fabeda694637..3f2d2921c0f6 100644 --- a/deps/rabbit/src/rabbit_khepri.erl +++ b/deps/rabbit/src/rabbit_khepri.erl @@ -87,6 +87,8 @@ -module(rabbit_khepri). +-feature(maybe_expr, enable). + -include_lib("kernel/include/logger.hrl"). -include_lib("stdlib/include/assert.hrl"). @@ -94,8 +96,12 @@ -include_lib("rabbit_common/include/logging.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). +-include("include/khepri.hrl"). + -export([setup/0, setup/1, + register_projections/0, + init/1, can_join_cluster/1, add_member/2, remove_member/1, @@ -118,6 +124,7 @@ get/1, get/2, + count/1, count/2, get_many/1, adv_get/1, adv_get_many/1, @@ -143,6 +150,7 @@ dir/0, info/0, + root_path/0, handle_async_ret/1, @@ -165,10 +173,6 @@ -export([cluster_status_from_khepri/0, cli_cluster_status/0]). -%% Path functions --export([if_has_data/1, - if_has_data_wildcard/0]). - -export([force_shrink_member_to_current_member/0]). %% Helpers for working with the Khepri API / types. @@ -256,25 +260,29 @@ setup(_) -> Timeout = application:get_env(rabbit, khepri_default_timeout, 30000), ok = application:set_env( [{khepri, [{default_timeout, Timeout}, - {default_store_id, ?STORE_ID}]}], + {default_store_id, ?STORE_ID}, + {default_ra_system, ?RA_SYSTEM}]}], [{persistent, true}]), RaServerConfig = #{cluster_name => ?RA_CLUSTER_NAME, friendly_name => ?RA_FRIENDLY_NAME}, case khepri:start(?RA_SYSTEM, RaServerConfig) of {ok, ?STORE_ID} -> - wait_for_leader(), - wait_for_register_projections(), - ?LOG_DEBUG( - "Khepri-based " ?RA_FRIENDLY_NAME " ready", - #{domain => ?RMQLOG_DOMAIN_GLOBAL}), - ok; + RetryTimeout = retry_timeout(), + case khepri_cluster:wait_for_leader(?STORE_ID, RetryTimeout) of + ok -> + ?LOG_DEBUG( + "Khepri-based " ?RA_FRIENDLY_NAME " ready", + #{domain => ?RMQLOG_DOMAIN_GLOBAL}), + ok; + {error, timeout} -> + exit(timeout_waiting_for_leader); + {error, _} = Error -> + exit(Error) + end; {error, _} = Error -> exit(Error) end. -wait_for_leader() -> - wait_for_leader(retry_timeout(), retry_limit()). - retry_timeout() -> case application:get_env(rabbit, khepri_leader_wait_retry_timeout) of {ok, T} -> T; @@ -287,38 +295,60 @@ retry_limit() -> undefined -> 10 end. -wait_for_leader(_Timeout, 0) -> - exit(timeout_waiting_for_leader); -wait_for_leader(Timeout, Retries) -> - rabbit_log:info("Waiting for Khepri leader for ~tp ms, ~tp retries left", - [Timeout, Retries - 1]), - Options = #{timeout => Timeout, - favor => low_latency}, - case khepri:exists(?STORE_ID, [], Options) of - Exists when is_boolean(Exists) -> - rabbit_log:info("Khepri leader elected"), - ok; - {error, timeout} -> %% Khepri >= 0.14.0 - wait_for_leader(Timeout, Retries -1); - {error, {timeout, _ServerId}} -> %% Khepri < 0.14.0 - wait_for_leader(Timeout, Retries -1); - {error, Reason} -> - throw(Reason) +%% @private + +-spec init(IsVirgin) -> Ret when + IsVirgin :: boolean(), + Ret :: ok | timeout_error(). + +init(IsVirgin) -> + case members() of + [] -> + timer:sleep(1000), + init(IsVirgin); + Members -> + ?LOG_NOTICE( + "Found the following metadata store members: ~p", [Members], + #{domain => ?RMQLOG_DOMAIN_DB}), + maybe + ok ?= await_replication(), + ?LOG_DEBUG( + "local Khepri-based " ?RA_FRIENDLY_NAME " member is caught " + "up to the Raft cluster leader", [], + #{domain => ?RMQLOG_DOMAIN_DB}), + ok ?= case IsVirgin of + true -> + register_projections(); + false -> + ok + end, + %% Delete transient queues on init. + %% Note that we also do this in the + %% `rabbit_amqqueue:on_node_down/1' callback. We must try this + %% deletion during init because the cluster may have been in a + %% minority when this node went down. We wait for a majority + %% while registering projections above though so this deletion + %% is likely to succeed. + rabbit_amqqueue:delete_transient_queues_on_node(node()) + end end. -wait_for_register_projections() -> - wait_for_register_projections(retry_timeout(), retry_limit()). +await_replication() -> + await_replication(retry_timeout(), retry_limit()). -wait_for_register_projections(_Timeout, 0) -> - exit(timeout_waiting_for_khepri_projections); -wait_for_register_projections(Timeout, Retries) -> - rabbit_log:info("Waiting for Khepri projections for ~tp ms, ~tp retries left", - [Timeout, Retries - 1]), - try - register_projections() - catch - throw : timeout -> - wait_for_register_projections(Timeout, Retries -1) +await_replication(_Timeout, 0) -> + {error, timeout}; +await_replication(Timeout, Retries) -> + ?LOG_DEBUG( + "Khepri-based " ?RA_FRIENDLY_NAME " waiting to catch up on replication " + "to the Raft cluster leader. Waiting for ~tb ms, ~tb retries left", + [Timeout, Retries], + #{domain => ?RMQLOG_DOMAIN_DB}), + case fence(Timeout) of + ok -> + ok; + {error, timeout} -> + await_replication(Timeout, Retries -1) end. %% @private @@ -347,20 +377,42 @@ add_member(JoiningNode, JoinedNode) when is_atom(JoinedNode) -> JoiningNode, rabbit_khepri, do_join, [JoinedNode]), post_add_member(JoiningNode, JoinedNode, Ret); add_member(JoiningNode, [_ | _] = Cluster) -> - JoinedNode = pick_node_in_cluster(Cluster), - ?LOG_INFO( - "Khepri clustering: Attempt to add node ~p to cluster ~0p " - "through node ~p", - [JoiningNode, Cluster, JoinedNode], - #{domain => ?RMQLOG_DOMAIN_GLOBAL}), - %% Recurse with a single node taken in the `Cluster' list. - add_member(JoiningNode, JoinedNode). + case pick_node_in_cluster(Cluster) of + {ok, JoinedNode} -> + ?LOG_INFO( + "Khepri clustering: Attempt to add node ~p to cluster ~0p " + "through node ~p", + [JoiningNode, Cluster, JoinedNode], + #{domain => ?RMQLOG_DOMAIN_GLOBAL}), + %% Recurse with a single node taken in the `Cluster' list. + add_member(JoiningNode, JoinedNode); + {error, _} = Error -> + Error + end. -pick_node_in_cluster([_ | _] = Cluster) when is_list(Cluster) -> - ThisNode = node(), - case lists:member(ThisNode, Cluster) of - true -> ThisNode; - false -> hd(Cluster) +pick_node_in_cluster([_ | _] = Cluster) -> + RunningNodes = lists:filter( + fun(Node) -> + try + erpc:call( + Node, + khepri_cluster, is_store_running, + [?STORE_ID]) + catch + _:_ -> + false + end + end, Cluster), + case RunningNodes of + [_ | _] -> + ThisNode = node(), + SelectedNode = case lists:member(ThisNode, RunningNodes) of + true -> ThisNode; + false -> hd(RunningNodes) + end, + {ok, SelectedNode}; + [] -> + {error, {no_nodes_to_cluster_with, Cluster}} end. do_join(RemoteNode) when RemoteNode =/= node() -> @@ -486,6 +538,7 @@ remove_reachable_member(NodeToRemove) -> NodeToRemove, khepri_cluster, reset, [?RA_CLUSTER_NAME]), case Ret of ok -> + rabbit_amqqueue:forget_all_durable(NodeToRemove), ?LOG_DEBUG( "Node ~s removed from Khepri cluster \"~s\"", [NodeToRemove, ?RA_CLUSTER_NAME], @@ -507,6 +560,7 @@ remove_down_member(NodeToRemove) -> Ret = ra:remove_member(ServerRef, ServerId, Timeout), case Ret of {ok, _, _} -> + rabbit_amqqueue:forget_all_durable(NodeToRemove), ?LOG_DEBUG( "Node ~s removed from Khepri cluster \"~s\"", [NodeToRemove, ?RA_CLUSTER_NAME], @@ -817,10 +871,7 @@ check_cluster_consistency(Node, CheckNodesConsistency) -> Error end; {_OTP, _Rabbit, {ok, Status}} -> - case rabbit_db_cluster:check_compatibility(Node) of - ok -> {ok, Status}; - Error -> Error - end + {ok, Status} end. remote_node_info(Node) -> @@ -865,6 +916,15 @@ cluster_status_from_khepri() -> {error, khepri_not_running} end. +-spec root_path() -> RootPath when + RootPath :: khepri_path:path(). +%% @doc Returns the path where RabbitMQ stores every metadata. +%% +%% This path must be prepended to all paths used by RabbitMQ subsystems. + +root_path() -> + ?KHEPRI_ROOT_PATH. + %% ------------------------------------------------------------------- %% "Proxy" functions to Khepri API. %% ------------------------------------------------------------------- @@ -892,50 +952,53 @@ cas(Path, Pattern, Data) -> ?STORE_ID, Path, Pattern, Data, ?DEFAULT_COMMAND_OPTIONS). fold(Path, Pred, Acc) -> - khepri:fold(?STORE_ID, Path, Pred, Acc, #{favor => low_latency}). + khepri:fold(?STORE_ID, Path, Pred, Acc). fold(Path, Pred, Acc, Options) -> - Options1 = Options#{favor => low_latency}, - khepri:fold(?STORE_ID, Path, Pred, Acc, Options1). + khepri:fold(?STORE_ID, Path, Pred, Acc, Options). foreach(Path, Pred) -> - khepri:foreach(?STORE_ID, Path, Pred, #{favor => low_latency}). + khepri:foreach(?STORE_ID, Path, Pred). filter(Path, Pred) -> - khepri:filter(?STORE_ID, Path, Pred, #{favor => low_latency}). + khepri:filter(?STORE_ID, Path, Pred). get(Path) -> - khepri:get(?STORE_ID, Path, #{favor => low_latency}). + khepri:get(?STORE_ID, Path). get(Path, Options) -> + khepri:get(?STORE_ID, Path, Options). + +count(PathPattern) -> + khepri:count(?STORE_ID, PathPattern, #{favor => low_latency}). + +count(Path, Options) -> Options1 = Options#{favor => low_latency}, - khepri:get(?STORE_ID, Path, Options1). + khepri:count(?STORE_ID, Path, Options1). get_many(PathPattern) -> - khepri:get_many(?STORE_ID, PathPattern, #{favor => low_latency}). + khepri:get_many(?STORE_ID, PathPattern). adv_get(Path) -> - khepri_adv:get(?STORE_ID, Path, #{favor => low_latency}). + khepri_adv:get(?STORE_ID, Path). adv_get_many(PathPattern) -> - khepri_adv:get_many(?STORE_ID, PathPattern, #{favor => low_latency}). + khepri_adv:get_many(?STORE_ID, PathPattern). match(Path) -> match(Path, #{}). match(Path, Options) -> - Options1 = Options#{favor => low_latency}, - khepri:get_many(?STORE_ID, Path, Options1). + khepri:get_many(?STORE_ID, Path, Options). -exists(Path) -> khepri:exists(?STORE_ID, Path, #{favor => low_latency}). +exists(Path) -> khepri:exists(?STORE_ID, Path). list(Path) -> khepri:get_many( - ?STORE_ID, Path ++ [?KHEPRI_WILDCARD_STAR], #{favor => low_latency}). + ?STORE_ID, Path ++ [?KHEPRI_WILDCARD_STAR]). list_child_nodes(Path) -> - Options = #{props_to_return => [child_names], - favor => low_latency}, + Options = #{props_to_return => [child_names]}, case khepri_adv:get_many(?STORE_ID, Path, Options) of {ok, Result} -> case maps:values(Result) of @@ -949,8 +1012,7 @@ list_child_nodes(Path) -> end. count_children(Path) -> - Options = #{props_to_return => [child_list_length], - favor => low_latency}, + Options = #{props_to_return => [child_list_length]}, case khepri_adv:get_many(?STORE_ID, Path, Options) of {ok, Map} -> lists:sum([L || #{child_list_length := L} <- maps:values(Map)]); @@ -1001,18 +1063,9 @@ transaction(Fun) -> transaction(Fun, ReadWrite) -> transaction(Fun, ReadWrite, #{}). -transaction(Fun, ReadWrite, Options0) -> - %% If the transaction is read-only, use the same default options we use - %% for most queries. - DefaultQueryOptions = case ReadWrite of - ro -> - #{favor => low_latency}; - _ -> - #{} - end, - Options1 = maps:merge(DefaultQueryOptions, Options0), - Options = maps:merge(?DEFAULT_COMMAND_OPTIONS, Options1), - case khepri:transaction(?STORE_ID, Fun, ReadWrite, Options) of +transaction(Fun, ReadWrite, Options) -> + Options1 = maps:merge(?DEFAULT_COMMAND_OPTIONS, Options), + case khepri:transaction(?STORE_ID, Fun, ReadWrite, Options1) of ok -> ok; {ok, Result} -> Result; {error, Reason} -> throw({error, Reason}) @@ -1028,6 +1081,9 @@ info() -> handle_async_ret(RaEvent) -> khepri:handle_async_ret(?STORE_ID, RaEvent). +fence(Timeout) -> + khepri:fence(?STORE_ID, Timeout). + %% ------------------------------------------------------------------- %% collect_payloads(). %% ------------------------------------------------------------------- @@ -1070,105 +1126,123 @@ collect_payloads(Props, Acc0) when is_map(Props) andalso is_list(Acc0) -> Acc end, Acc0, Props). -%% ------------------------------------------------------------------- -%% if_has_data_wildcard(). -%% ------------------------------------------------------------------- - --spec if_has_data_wildcard() -> Condition when - Condition :: khepri_condition:condition(). - -if_has_data_wildcard() -> - if_has_data([?KHEPRI_WILDCARD_STAR_STAR]). - -%% ------------------------------------------------------------------- -%% if_has_data(). -%% ------------------------------------------------------------------- - --spec if_has_data(Conditions) -> Condition when - Conditions :: [Condition], - Condition :: khepri_condition:condition(). +-spec unregister_legacy_projections() -> Ret when + Ret :: ok | timeout_error(). +%% @doc Unregisters any projections which were registered in RabbitMQ 3.13.x +%% versions. +%% +%% In 3.13.x until 3.13.8 we mistakenly registered these projections even if +%% Khepri was not enabled. This function is used by the `khepri_db' enable +%% callback to remove those projections before we register the ones necessary +%% for 4.0.x. +%% +%% @private -if_has_data(Conditions) -> - #if_all{conditions = Conditions ++ [#if_has_data{has_data = true}]}. +unregister_legacy_projections() -> + %% Note that we don't use `all' since `khepri_mnesia_migration' also + %% creates a projection table which we don't want to unregister. Instead + %% we list all of the legacy projection names: + LegacyNames = [ + rabbit_khepri_exchange, + rabbit_khepri_queue, + rabbit_khepri_vhost, + rabbit_khepri_users, + rabbit_khepri_global_rtparams, + rabbit_khepri_per_vhost_rtparams, + rabbit_khepri_user_permissions, + rabbit_khepri_bindings, + rabbit_khepri_index_route, + rabbit_khepri_topic_trie + ], + khepri:unregister_projections(?STORE_ID, LegacyNames). register_projections() -> - RegisterFuns = [fun register_rabbit_exchange_projection/0, - fun register_rabbit_queue_projection/0, - fun register_rabbit_vhost_projection/0, - fun register_rabbit_users_projection/0, - fun register_rabbit_runtime_parameters_projection/0, - fun register_rabbit_user_permissions_projection/0, - fun register_rabbit_bindings_projection/0, - fun register_rabbit_index_route_projection/0, - fun register_rabbit_topic_graph_projection/0], - [case RegisterFun() of - ok -> - ok; - %% Before Khepri v0.13.0, `khepri:register_projection/1,2,3` would - %% return `{error, exists}` for projections which already exist. - {error, exists} -> - ok; - %% In v0.13.0+, Khepri returns a `?khepri_error(..)` instead. - {error, {khepri, projection_already_exists, _Info}} -> - ok; - {error, Error} -> - throw(Error) - end || RegisterFun <- RegisterFuns], - ok. + RegFuns = [fun register_rabbit_exchange_projection/0, + fun register_rabbit_queue_projection/0, + fun register_rabbit_vhost_projection/0, + fun register_rabbit_users_projection/0, + fun register_rabbit_global_runtime_parameters_projection/0, + fun register_rabbit_per_vhost_runtime_parameters_projection/0, + fun register_rabbit_user_permissions_projection/0, + fun register_rabbit_bindings_projection/0, + fun register_rabbit_index_route_projection/0, + fun register_rabbit_topic_graph_projection/0], + rabbit_misc:for_each_while_ok( + fun(RegisterFun) -> + case RegisterFun() of + ok -> + ok; + %% Before Khepri v0.13.0, `khepri:register_projection/1,2,3` + %% would return `{error, exists}` for projections which + %% already exist. + {error, exists} -> + ok; + %% In v0.13.0+, Khepri returns a `?khepri_error(..)` instead. + {error, {khepri, projection_already_exists, _Info}} -> + ok; + {error, _} = Error -> + Error + end + end, RegFuns). register_rabbit_exchange_projection() -> Name = rabbit_khepri_exchange, - PathPattern = [rabbit_db_exchange, - exchanges, - _VHost = ?KHEPRI_WILDCARD_STAR, - _Name = ?KHEPRI_WILDCARD_STAR], + PathPattern = rabbit_db_exchange:khepri_exchange_path( + _VHost = ?KHEPRI_WILDCARD_STAR, + _Name = ?KHEPRI_WILDCARD_STAR), KeyPos = #exchange.name, register_simple_projection(Name, PathPattern, KeyPos). register_rabbit_queue_projection() -> Name = rabbit_khepri_queue, - PathPattern = [rabbit_db_queue, - queues, - _VHost = ?KHEPRI_WILDCARD_STAR, - _Name = ?KHEPRI_WILDCARD_STAR], + PathPattern = rabbit_db_queue:khepri_queue_path( + _VHost = ?KHEPRI_WILDCARD_STAR, + _Name = ?KHEPRI_WILDCARD_STAR), KeyPos = 2, %% #amqqueue.name register_simple_projection(Name, PathPattern, KeyPos). register_rabbit_vhost_projection() -> Name = rabbit_khepri_vhost, - PathPattern = [rabbit_db_vhost, _VHost = ?KHEPRI_WILDCARD_STAR], + PathPattern = rabbit_db_vhost:khepri_vhost_path( + _VHost = ?KHEPRI_WILDCARD_STAR), KeyPos = 2, %% #vhost.virtual_host register_simple_projection(Name, PathPattern, KeyPos). register_rabbit_users_projection() -> - Name = rabbit_khepri_users, - PathPattern = [rabbit_db_user, - users, - _UserName = ?KHEPRI_WILDCARD_STAR], + Name = rabbit_khepri_user, + PathPattern = rabbit_db_user:khepri_user_path( + _UserName = ?KHEPRI_WILDCARD_STAR), KeyPos = 2, %% #internal_user.username register_simple_projection(Name, PathPattern, KeyPos). -register_rabbit_runtime_parameters_projection() -> - Name = rabbit_khepri_runtime_parameters, - PathPattern = [rabbit_db_rtparams, - ?KHEPRI_WILDCARD_STAR_STAR], +register_rabbit_global_runtime_parameters_projection() -> + Name = rabbit_khepri_global_rtparam, + PathPattern = rabbit_db_rtparams:khepri_global_rp_path( + _Key = ?KHEPRI_WILDCARD_STAR_STAR), + KeyPos = #runtime_parameters.key, + register_simple_projection(Name, PathPattern, KeyPos). + +register_rabbit_per_vhost_runtime_parameters_projection() -> + Name = rabbit_khepri_per_vhost_rtparam, + PathPattern = rabbit_db_rtparams:khepri_vhost_rp_path( + _VHost = ?KHEPRI_WILDCARD_STAR_STAR, + _Component = ?KHEPRI_WILDCARD_STAR_STAR, + _Name = ?KHEPRI_WILDCARD_STAR_STAR), KeyPos = #runtime_parameters.key, register_simple_projection(Name, PathPattern, KeyPos). register_rabbit_user_permissions_projection() -> - Name = rabbit_khepri_user_permissions, - PathPattern = [rabbit_db_user, - users, - _UserName = ?KHEPRI_WILDCARD_STAR, - user_permissions, - _VHost = ?KHEPRI_WILDCARD_STAR], + Name = rabbit_khepri_user_permission, + PathPattern = rabbit_db_user:khepri_user_permission_path( + _UserName = ?KHEPRI_WILDCARD_STAR, + _VHost = ?KHEPRI_WILDCARD_STAR), KeyPos = #user_permission.user_vhost, register_simple_projection(Name, PathPattern, KeyPos). register_simple_projection(Name, PathPattern, KeyPos) -> Options = #{keypos => KeyPos}, Projection = khepri_projection:new(Name, copy, Options), - khepri:register_projection(?RA_CLUSTER_NAME, PathPattern, Projection). + khepri:register_projection(?STORE_ID, PathPattern, Projection). register_rabbit_bindings_projection() -> MapFun = fun(_Path, Binding) -> @@ -1177,20 +1251,24 @@ register_rabbit_bindings_projection() -> ProjectionFun = projection_fun_for_sets(MapFun), Options = #{keypos => #route.binding}, Projection = khepri_projection:new( - rabbit_khepri_bindings, ProjectionFun, Options), - PathPattern = [rabbit_db_binding, - routes, - _VHost = ?KHEPRI_WILDCARD_STAR, - _ExchangeName = ?KHEPRI_WILDCARD_STAR, - _Kind = ?KHEPRI_WILDCARD_STAR, - _DstName = ?KHEPRI_WILDCARD_STAR, - _RoutingKey = ?KHEPRI_WILDCARD_STAR], - khepri:register_projection(?RA_CLUSTER_NAME, PathPattern, Projection). + rabbit_khepri_binding, ProjectionFun, Options), + PathPattern = rabbit_db_binding:khepri_route_path( + _VHost = ?KHEPRI_WILDCARD_STAR, + _ExchangeName = ?KHEPRI_WILDCARD_STAR, + _Kind = ?KHEPRI_WILDCARD_STAR, + _DstName = ?KHEPRI_WILDCARD_STAR, + _RoutingKey = ?KHEPRI_WILDCARD_STAR), + khepri:register_projection(?STORE_ID, PathPattern, Projection). register_rabbit_index_route_projection() -> MapFun = fun(Path, _) -> - [rabbit_db_binding, routes, VHost, ExchangeName, Kind, - DstName, RoutingKey] = Path, + { + VHost, + ExchangeName, + Kind, + DstName, + RoutingKey + } = rabbit_db_binding:khepri_route_path_to_args(Path), Exchange = rabbit_misc:r(VHost, exchange, ExchangeName), Destination = rabbit_misc:r(VHost, Kind, DstName), SourceKey = {Exchange, RoutingKey}, @@ -1201,18 +1279,18 @@ register_rabbit_index_route_projection() -> Options = #{type => bag, keypos => #index_route.source_key}, Projection = khepri_projection:new( rabbit_khepri_index_route, ProjectionFun, Options), - DirectOrFanout = #if_data_matches{pattern = #{type => '$1'}, - conditions = [{'andalso', - {'=/=', '$1', headers}, - {'=/=', '$1', topic}}]}, - PathPattern = [rabbit_db_binding, - routes, - _VHost = ?KHEPRI_WILDCARD_STAR, - _Exchange = DirectOrFanout, - _Kind = ?KHEPRI_WILDCARD_STAR, - _DstName = ?KHEPRI_WILDCARD_STAR, - _RoutingKey = ?KHEPRI_WILDCARD_STAR], - khepri:register_projection(?RA_CLUSTER_NAME, PathPattern, Projection). + DirectOrFanout = #if_data_matches{ + pattern = #exchange{type = '$1', _ = '_'}, + conditions = [{'andalso', + {'=/=', '$1', headers}, + {'=/=', '$1', topic}}]}, + PathPattern = rabbit_db_binding:khepri_route_path( + _VHost = ?KHEPRI_WILDCARD_STAR, + _Exchange = DirectOrFanout, + _Kind = ?KHEPRI_WILDCARD_STAR, + _DstName = ?KHEPRI_WILDCARD_STAR, + _RoutingKey = ?KHEPRI_WILDCARD_STAR), + khepri:register_projection(?STORE_ID, PathPattern, Projection). %% Routing information is stored in the Khepri store as a `set'. %% In order to turn these bindings into records in an ETS `bag', we use a @@ -1269,8 +1347,13 @@ register_rabbit_topic_graph_projection() -> #{should_process_function => ShouldProcessFun}}, ProjectionFun = fun(Table, Path, OldProps, NewProps) -> - [rabbit_db_binding, routes, - VHost, ExchangeName, _Kind, _DstName, RoutingKey] = Path, + { + VHost, + ExchangeName, + _Kind, + _DstName, + RoutingKey + } = rabbit_db_binding:khepri_route_path_to_args(Path), Exchange = rabbit_misc:r(VHost, exchange, ExchangeName), Words = rabbit_db_topic_exchange:split_topic_key_binary(RoutingKey), case {OldProps, NewProps} of @@ -1301,14 +1384,14 @@ register_rabbit_topic_graph_projection() -> end end, Projection = khepri_projection:new(Name, ProjectionFun, Options), - PathPattern = [rabbit_db_binding, - routes, - _VHost = ?KHEPRI_WILDCARD_STAR, - _Exchange = #if_data_matches{pattern = #{type => topic}}, - _Kind = ?KHEPRI_WILDCARD_STAR, - _DstName = ?KHEPRI_WILDCARD_STAR, - _RoutingKey = ?KHEPRI_WILDCARD_STAR], - khepri:register_projection(?RA_CLUSTER_NAME, PathPattern, Projection). + PathPattern = rabbit_db_binding:khepri_route_path( + _VHost = ?KHEPRI_WILDCARD_STAR, + _Exchange = #if_data_matches{ + pattern = #exchange{type = topic, _ = '_'}}, + _Kind = ?KHEPRI_WILDCARD_STAR, + _DstName = ?KHEPRI_WILDCARD_STAR, + _RoutingKey = ?KHEPRI_WILDCARD_STAR), + khepri:register_projection(?STORE_ID, PathPattern, Projection). -spec follow_down_update(Table, Exchange, Words, UpdateFn) -> Ret when Table :: ets:tid(), @@ -1486,9 +1569,19 @@ get_feature_state(Node) -> %% @private khepri_db_migration_enable(#{feature_name := FeatureName}) -> - case sync_cluster_membership_from_mnesia(FeatureName) of - ok -> migrate_mnesia_tables(FeatureName); - Error -> Error + maybe + ok ?= sync_cluster_membership_from_mnesia(FeatureName), + ?LOG_INFO( + "Feature flag `~s`: unregistering legacy projections", + [FeatureName], + #{domain => ?RMQLOG_DOMAIN_DB}), + ok ?= unregister_legacy_projections(), + ?LOG_INFO( + "Feature flag `~s`: registering projections", + [FeatureName], + #{domain => ?RMQLOG_DOMAIN_DB}), + ok ?= register_projections(), + migrate_mnesia_tables(FeatureName) end. %% @private diff --git a/deps/rabbit/src/rabbit_logger_exchange_h.erl b/deps/rabbit/src/rabbit_logger_exchange_h.erl index 781e4ce6203a..f94e76e2e7b3 100644 --- a/deps/rabbit/src/rabbit_logger_exchange_h.erl +++ b/deps/rabbit/src/rabbit_logger_exchange_h.erl @@ -44,8 +44,18 @@ log(#{meta := #{mfa := {?MODULE, _, _}}}, _) -> ok; log(LogEvent, Config) -> case rabbit_boot_state:get() of - ready -> do_log(LogEvent, Config); - _ -> ok + ready -> + try + do_log(LogEvent, Config) + catch + C:R:S -> + %% don't let logging crash, because then OTP logger + %% removes the logger_exchange handler, which in + %% turn deletes the log exchange and its bindings + erlang:display({?MODULE, crashed, {C, R, S}}) + end, + ok; + _ -> ok end. do_log(LogEvent, #{config := #{exchange := Exchange}} = Config) -> @@ -100,12 +110,18 @@ make_headers(_, _) -> [{<<"node">>, longstr, Node}]. try_format_body(LogEvent, #{formatter := {Formatter, FormatterConfig}}) -> - Formatted = try_format_body(LogEvent, Formatter, FormatterConfig), - erlang:iolist_to_binary(Formatted). + try_format_body(LogEvent, Formatter, FormatterConfig). try_format_body(LogEvent, Formatter, FormatterConfig) -> try - Formatter:format(LogEvent, FormatterConfig) + Formatted = Formatter:format(LogEvent, FormatterConfig), + case unicode:characters_to_binary(Formatted) of + Binary when is_binary(Binary) -> + Binary; + Error -> + %% The formatter returned invalid or incomplete unicode + throw(Error) + end catch C:R:S -> case {?DEFAULT_FORMATTER, ?DEFAULT_FORMATTER_CONFIG} of diff --git a/deps/rabbit/src/rabbit_maintenance.erl b/deps/rabbit/src/rabbit_maintenance.erl index e21526bee337..5e22a8217bbf 100644 --- a/deps/rabbit/src/rabbit_maintenance.erl +++ b/deps/rabbit/src/rabbit_maintenance.erl @@ -291,24 +291,21 @@ random_nth(Nodes) -> revive_local_quorum_queue_replicas() -> Queues = rabbit_amqqueue:list_local_followers(), - [begin - Name = amqqueue:get_name(Q), - rabbit_log:debug("Will trigger a leader election for local quorum queue ~ts", - [rabbit_misc:rs(Name)]), - %% start local QQ replica (Ra server) of this queue - {Prefix, _Node} = amqqueue:get_pid(Q), - RaServer = {Prefix, node()}, - rabbit_log:debug("Will start Ra server ~tp", [RaServer]), - case rabbit_quorum_queue:restart_server(RaServer) of - ok -> - rabbit_log:debug("Successfully restarted Ra server ~tp", [RaServer]); - {error, {already_started, _Pid}} -> - rabbit_log:debug("Ra server ~tp is already running", [RaServer]); - {error, nodedown} -> - rabbit_log:error("Failed to restart Ra server ~tp: target node was reported as down") - end - end || Q <- Queues], - rabbit_log:info("Restart of local quorum queue replicas is complete"). + %% NB: this function ignores the first argument so we can just pass the + %% empty binary as the vhost name. + {Recovered, Failed} = rabbit_quorum_queue:recover(<<>>, Queues), + rabbit_log:debug("Successfully revived ~b quorum queue replicas", + [length(Recovered)]), + case length(Failed) of + 0 -> + ok; + NumFailed -> + rabbit_log:error("Failed to revive ~b quorum queue replicas", + [NumFailed]) + end, + + rabbit_log:info("Restart of local quorum queue replicas is complete"), + ok. %% %% Implementation diff --git a/deps/rabbit/src/rabbit_message_interceptor.erl b/deps/rabbit/src/rabbit_message_interceptor.erl index 1158f89874d1..436284e5454a 100644 --- a/deps/rabbit/src/rabbit_message_interceptor.erl +++ b/deps/rabbit/src/rabbit_message_interceptor.erl @@ -27,9 +27,9 @@ intercept(Msg, set_header_routing_node, Overwrite) -> Node = atom_to_binary(node()), set_annotation(Msg, ?HEADER_ROUTING_NODE, Node, Overwrite); intercept(Msg0, set_header_timestamp, Overwrite) -> - Millis = os:system_time(millisecond), - Msg = set_annotation(Msg0, ?HEADER_TIMESTAMP, Millis, Overwrite), - set_timestamp(Msg, Millis, Overwrite). + Ts = mc:get_annotation(?ANN_RECEIVED_AT_TIMESTAMP, Msg0), + Msg = set_annotation(Msg0, ?HEADER_TIMESTAMP, Ts, Overwrite), + set_timestamp(Msg, Ts, Overwrite). -spec set_annotation(mc:state(), mc:ann_key(), mc:ann_value(), boolean()) -> mc:state(). set_annotation(Msg, Key, Value, Overwrite) -> diff --git a/deps/rabbit/src/rabbit_mirror_queue_misc.erl b/deps/rabbit/src/rabbit_mirror_queue_misc.erl index 14f9f3d884ef..40caca897ae1 100644 --- a/deps/rabbit/src/rabbit_mirror_queue_misc.erl +++ b/deps/rabbit/src/rabbit_mirror_queue_misc.erl @@ -75,7 +75,7 @@ are_cmqs_used(_) -> %% may be unavailable. For instance, Mnesia needs another %% replica on another node before it considers it to be %% available. - rabbit_table:wait( + rabbit_table:wait_silent( [rabbit_runtime_parameters], _Retry = true), are_cmqs_used1(); false -> diff --git a/deps/rabbit/src/rabbit_mnesia.erl b/deps/rabbit/src/rabbit_mnesia.erl index 0aa4ae5360b5..ffa87ba131e5 100644 --- a/deps/rabbit/src/rabbit_mnesia.erl +++ b/deps/rabbit/src/rabbit_mnesia.erl @@ -407,7 +407,24 @@ cluster_nodes(WhichNodes) -> cluster_status(WhichNodes). cluster_status_from_mnesia() -> case is_running() of false -> - {error, mnesia_not_running}; + case rabbit_khepri:get_feature_state() of + enabled -> + %% To keep this API compatible with older remote nodes who + %% don't know about Khepri, we take the cluster status + %% from `rabbit_khepri' and reformat the return value to + %% ressemble the node from this module. + %% + %% Both nodes won't be compatible, but let's leave that + %% decision to the Feature flags subsystem. + case rabbit_khepri:cluster_status_from_khepri() of + {ok, {All, Running}} -> + {ok, {All, All, Running}}; + {error, _} = Error -> + Error + end; + _ -> + {error, mnesia_not_running} + end; true -> %% If the tables are not present, it means that %% `init_db/3' hasn't been run yet. In other words, either @@ -475,8 +492,23 @@ members() -> end. node_info() -> + %% Once Khepri is enabled, the Mnesia protocol is irrelevant obviously. + %% + %% That said, older remote nodes who don't known about Khepri will request + %% this information anyway as part of calling `node_info/0'. Here, we + %% simply return `unsupported' as the Mnesia protocol. Older versions of + %% RabbitMQ will skip the protocol negotiation and use other ways. + %% + %% The goal is mostly to let older nodes which check Mnesia before feature + %% flags to reach the feature flags check. This one will correctly + %% indicate that they are incompatible. That's why we return `unsupported' + %% here, even if we could return the actual Mnesia protocol. + MnesiaProtocol = case rabbit_khepri:get_feature_state() of + enabled -> unsupported; + _ -> mnesia:system_info(protocol_version) + end, {rabbit_misc:otp_release(), rabbit_misc:version(), - mnesia:system_info(protocol_version), + MnesiaProtocol, cluster_status_from_mnesia()}. -spec node_type() -> rabbit_db_cluster:node_type(). @@ -694,10 +726,7 @@ check_cluster_consistency(Node, CheckNodesConsistency) -> Error end; {_OTP, _Rabbit, _Protocol, {ok, Status}} -> - case rabbit_db_cluster:check_compatibility(Node) of - ok -> {ok, Status}; - Error -> Error - end + {ok, Status} end. remote_node_info(Node) -> diff --git a/deps/rabbit/src/rabbit_msg_size_metrics.erl b/deps/rabbit/src/rabbit_msg_size_metrics.erl new file mode 100644 index 000000000000..1faaa311a515 --- /dev/null +++ b/deps/rabbit/src/rabbit_msg_size_metrics.erl @@ -0,0 +1,143 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +%% This module tracks received message size distribution as histogram. +%% (A histogram is represented by a set of counters, one for each bucket.) +-module(rabbit_msg_size_metrics). + +-export([init/1, + observe/2, + prometheus_format/0]). + +%% Integration tests. +-export([raw_buckets/1, + diff_raw_buckets/2]). + +-ifdef(TEST). +-export([cleanup/1]). +-endif. + +-define(BUCKET_1, 100). +-define(BUCKET_2, 1_000). +-define(BUCKET_3, 10_000). +-define(BUCKET_4, 100_000). +-define(BUCKET_5, 1_000_000). +-define(BUCKET_6, 10_000_000). +%% rabbit.max_message_size up to RabbitMQ 3.13 was 128 MiB. +%% rabbit.max_message_size since RabbitMQ 4.0 is 16 MiB. +%% To help finding an appropriate rabbit.max_message_size we also add a bucket for 50 MB. +-define(BUCKET_7, 50_000_000). +-define(BUCKET_8, 100_000_000). +%% 'infinity' means practically 512 MiB as hard limited in +%% https://github.com/rabbitmq/rabbitmq-server/blob/v4.0.2/deps/rabbit_common/include/rabbit.hrl#L254-L257 +-define(BUCKET_9, 'infinity'). + +-define(MSG_SIZE_BUCKETS, + [{1, ?BUCKET_1}, + {2, ?BUCKET_2}, + {3, ?BUCKET_3}, + {4, ?BUCKET_4}, + {5, ?BUCKET_5}, + {6, ?BUCKET_6}, + {7, ?BUCKET_7}, + {8, ?BUCKET_8}, + {9, ?BUCKET_9}]). + +-define(POS_MSG_SIZE_SUM, 10). + +-type raw_buckets() :: [{BucketUpperBound :: non_neg_integer(), + NumObservations :: non_neg_integer()}]. + +-spec init(atom()) -> ok. +init(Protocol) -> + Size = ?POS_MSG_SIZE_SUM, + Counters = counters:new(Size, [write_concurrency]), + put_counters(Protocol, Counters). + +-spec observe(atom(), non_neg_integer()) -> ok. +observe(Protocol, MessageSize) -> + BucketPos = find_bucket_pos(MessageSize), + Counters = get_counters(Protocol), + counters:add(Counters, BucketPos, 1), + counters:add(Counters, ?POS_MSG_SIZE_SUM, MessageSize). + +-spec prometheus_format() -> #{atom() => map()}. +prometheus_format() -> + Values = [prometheus_values(Counters) || Counters <- get_labels_counters()], + #{message_size_bytes => #{type => histogram, + help => "Size of messages received from publishers", + values => Values}}. + +find_bucket_pos(Size) when Size =< ?BUCKET_1 -> 1; +find_bucket_pos(Size) when Size =< ?BUCKET_2 -> 2; +find_bucket_pos(Size) when Size =< ?BUCKET_3 -> 3; +find_bucket_pos(Size) when Size =< ?BUCKET_4 -> 4; +find_bucket_pos(Size) when Size =< ?BUCKET_5 -> 5; +find_bucket_pos(Size) when Size =< ?BUCKET_6 -> 6; +find_bucket_pos(Size) when Size =< ?BUCKET_7 -> 7; +find_bucket_pos(Size) when Size =< ?BUCKET_8 -> 8; +find_bucket_pos(_Size) -> 9. + +raw_buckets(Protocol) + when is_atom(Protocol) -> + Counters = get_counters(Protocol), + raw_buckets(Counters); +raw_buckets(Counters) -> + [{UpperBound, counters:get(Counters, Pos)} + || {Pos, UpperBound} <- ?MSG_SIZE_BUCKETS]. + +-spec diff_raw_buckets(raw_buckets(), raw_buckets()) -> raw_buckets(). +diff_raw_buckets(After, Before) -> + diff_raw_buckets(After, Before, []). + +diff_raw_buckets([], [], Acc) -> + lists:reverse(Acc); +diff_raw_buckets([{UpperBound, CounterAfter} | After], + [{UpperBound, CounterBefore} | Before], + Acc) -> + case CounterAfter - CounterBefore of + 0 -> + diff_raw_buckets(After, Before, Acc); + Diff -> + diff_raw_buckets(After, Before, [{UpperBound, Diff} | Acc]) + end. + +%% "If you have looked at a /metrics for a histogram, you probably noticed that the buckets +%% aren’t just a count of events that fall into them. The buckets also include a count of +%% events in all the smaller buckets, all the way up to the +Inf, bucket which is the total +%% number of events. This is known as a cumulative histogram, and why the bucket label +%% is called le, standing for less than or equal to. +%% This is in addition to buckets being counters, so Prometheus histograms are cumula‐ +%% tive in two different ways." +%% [Prometheus: Up & Running] +prometheus_values({Labels, Counters}) -> + {Buckets, Count} = lists:mapfoldl( + fun({UpperBound, NumObservations}, Acc0) -> + Acc = Acc0 + NumObservations, + {{UpperBound, Acc}, Acc} + end, 0, raw_buckets(Counters)), + Sum = counters:get(Counters, ?POS_MSG_SIZE_SUM), + {Labels, Buckets, Count, Sum}. + +put_counters(Protocol, Counters) -> + persistent_term:put({?MODULE, Protocol}, Counters). + +get_counters(Protocol) -> + persistent_term:get({?MODULE, Protocol}). + +get_labels_counters() -> + [{[{protocol, Protocol}], Counters} + || {{?MODULE, Protocol}, Counters} <- persistent_term:get()]. + +-ifdef(TEST). +%% "Counters are not tied to the current process and are automatically +%% garbage collected when they are no longer referenced." +-spec cleanup(atom()) -> ok. +cleanup(Protocol) -> + persistent_term:erase({?MODULE, Protocol}), + ok. +-endif. diff --git a/deps/rabbit/src/rabbit_msg_store.erl b/deps/rabbit/src/rabbit_msg_store.erl index c5b02f6eb9c4..b28506ab2ab8 100644 --- a/deps/rabbit/src/rabbit_msg_store.erl +++ b/deps/rabbit/src/rabbit_msg_store.erl @@ -1050,7 +1050,7 @@ internal_sync(State = #msstate { current_file_handle = CurHdl, flying_write(Key, #msstate { flying_ets = FlyingEts }) -> case ets:lookup(FlyingEts, Key) of [{_, ?FLYING_WRITE}] -> - ets:update_counter(FlyingEts, Key, ?FLYING_WRITE_DONE), + _ = ets:update_counter(FlyingEts, Key, ?FLYING_WRITE_DONE), %% We only remove the object if it hasn't changed %% (a remove may be sent while we were processing the write). true = ets:delete_object(FlyingEts, {Key, ?FLYING_IS_WRITTEN}), @@ -1318,7 +1318,7 @@ update_msg_cache(CacheEts, MsgId, Msg) -> %% but without the debug log that we don't want as the update is %% more likely to fail following recent reworkings. try - ets:update_counter(CacheEts, MsgId, {3, +1}), + _ = ets:update_counter(CacheEts, MsgId, {3, +1}), ok catch error:badarg -> %% The entry must have been removed between diff --git a/deps/rabbit/src/rabbit_networking.erl b/deps/rabbit/src/rabbit_networking.erl index 508e0a0e2b9f..827c4f666e7b 100644 --- a/deps/rabbit/src/rabbit_networking.erl +++ b/deps/rabbit/src/rabbit_networking.erl @@ -25,9 +25,9 @@ node_listeners/1, node_client_listeners/1, register_connection/1, unregister_connection/1, register_non_amqp_connection/1, unregister_non_amqp_connection/1, - connections/0, non_amqp_connections/0, connection_info_keys/0, - connection_info/1, connection_info/2, - connection_info_all/0, connection_info_all/1, + connections/0, non_amqp_connections/0, + connection_info/2, + connection_info_all/1, emit_connection_info_all/4, emit_connection_info_local/3, close_connection/2, close_connections/2, close_all_connections/1, close_all_user_connections/2, @@ -482,23 +482,11 @@ non_amqp_connections() -> local_non_amqp_connections() -> pg_local:get_members(rabbit_non_amqp_connections). --spec connection_info_keys() -> rabbit_types:info_keys(). - -connection_info_keys() -> rabbit_reader:info_keys(). - --spec connection_info(rabbit_types:connection()) -> rabbit_types:infos(). - -connection_info(Pid) -> rabbit_reader:info(Pid). - -spec connection_info(rabbit_types:connection(), rabbit_types:info_keys()) -> rabbit_types:infos(). connection_info(Pid, Items) -> rabbit_reader:info(Pid, Items). --spec connection_info_all() -> [rabbit_types:infos()]. - -connection_info_all() -> cmap(fun (Q) -> connection_info(Q) end). - -spec connection_info_all(rabbit_types:info_keys()) -> [rabbit_types:infos()]. @@ -531,9 +519,8 @@ close_connections(Pids, Explanation) -> -spec close_all_user_connections(rabbit_types:username(), string()) -> 'ok'. close_all_user_connections(Username, Explanation) -> - Pids = [Pid || #tracked_connection{pid = Pid} <- rabbit_connection_tracking:list_of_user(Username)], - [close_connection(Pid, Explanation) || Pid <- Pids], - ok. + Tracked = rabbit_connection_tracking:list_of_user(Username), + rabbit_connection_tracking:close_connections(Tracked, Explanation, 0). %% Meant to be used by tests only -spec close_all_connections(string()) -> 'ok'. @@ -560,7 +547,7 @@ failed_to_recv_proxy_header(Ref, Error) -> end, rabbit_log:debug(Msg, [Error]), % The following call will clean up resources then exit - _ = catch ranch:handshake(Ref), + _ = ranch:handshake(Ref), exit({shutdown, failed_to_recv_proxy_header}). handshake(Ref, ProxyProtocolEnabled) -> @@ -572,22 +559,14 @@ handshake(Ref, ProxyProtocolEnabled) -> {error, protocol_error, Error} -> failed_to_recv_proxy_header(Ref, Error); {ok, ProxyInfo} -> - case catch ranch:handshake(Ref) of - {'EXIT', normal} -> - {error, handshake_failed}; - {ok, Sock} -> - ok = tune_buffer_size(Sock), - {ok, {rabbit_proxy_socket, Sock, ProxyInfo}} - end + {ok, Sock} = ranch:handshake(Ref), + ok = tune_buffer_size(Sock), + {ok, {rabbit_proxy_socket, Sock, ProxyInfo}} end; false -> - case catch ranch:handshake(Ref) of - {'EXIT', normal} -> - {error, handshake_failed}; - {ok, Sock} -> - ok = tune_buffer_size(Sock), - {ok, Sock} - end + {ok, Sock} = ranch:handshake(Ref), + ok = tune_buffer_size(Sock), + {ok, Sock} end. tune_buffer_size(Sock) -> diff --git a/deps/rabbit/src/rabbit_peer_discovery_classic_config.erl b/deps/rabbit/src/rabbit_peer_discovery_classic_config.erl index 3c46f36e2384..6aa50602c673 100644 --- a/deps/rabbit/src/rabbit_peer_discovery_classic_config.erl +++ b/deps/rabbit/src/rabbit_peer_discovery_classic_config.erl @@ -21,8 +21,12 @@ list_nodes() -> case application:get_env(rabbit, cluster_nodes, {[], disc}) of {Nodes, NodeType} -> + check_local_node(Nodes), + check_duplicates(Nodes), {ok, {add_this_node(Nodes), NodeType}}; Nodes when is_list(Nodes) -> + check_local_node(Nodes), + check_duplicates(Nodes), {ok, {add_this_node(Nodes), disc}} end. @@ -33,6 +37,26 @@ add_this_node(Nodes) -> false -> [ThisNode | Nodes] end. +check_duplicates(Nodes) -> + case (length(lists:usort(Nodes)) == length(Nodes)) of + true -> + ok; + false -> + rabbit_log:warning("Classic peer discovery backend: list of " + "nodes contains duplicates ~0tp", + [Nodes]) + end. + +check_local_node(Nodes) -> + case lists:member(node(), Nodes) of + true -> + ok; + false -> + rabbit_log:warning("Classic peer discovery backend: list of " + "nodes does not contain the local node ~0tp", + [Nodes]) + end. + -spec lock(Nodes :: [node()]) -> {ok, {{ResourceId :: string(), LockRequesterId :: node()}, Nodes :: [node()]}} | {error, Reason :: string()}. diff --git a/deps/rabbit/src/rabbit_policies.erl b/deps/rabbit/src/rabbit_policies.erl index 66224ce6aa1b..5637e9e46251 100644 --- a/deps/rabbit/src/rabbit_policies.erl +++ b/deps/rabbit/src/rabbit_policies.erl @@ -165,7 +165,7 @@ validate_policy0(<<"overflow">>, Value) -> {error, "~tp is not a valid overflow value", [Value]}; validate_policy0(<<"delivery-limit">>, Value) - when is_integer(Value), Value >= 0 -> + when is_integer(Value) -> ok; validate_policy0(<<"delivery-limit">>, Value) -> {error, "~tp is not a valid delivery limit", [Value]}; @@ -208,14 +208,35 @@ validate_policy0(<<"stream-filter-size-bytes">>, Value) validate_policy0(<<"stream-filter-size-bytes">>, Value) -> {error, "~tp is not a valid filter size. Valid range is 16-255", [Value]}. -merge_policy_value(<<"message-ttl">>, Val, OpVal) -> min(Val, OpVal); -merge_policy_value(<<"max-length">>, Val, OpVal) -> min(Val, OpVal); -merge_policy_value(<<"max-length-bytes">>, Val, OpVal) -> min(Val, OpVal); -merge_policy_value(<<"max-in-memory-length">>, Val, OpVal) -> min(Val, OpVal); -merge_policy_value(<<"max-in-memory-bytes">>, Val, OpVal) -> min(Val, OpVal); -merge_policy_value(<<"expires">>, Val, OpVal) -> min(Val, OpVal); -merge_policy_value(<<"delivery-limit">>, Val, OpVal) -> min(Val, OpVal); -merge_policy_value(<<"queue-version">>, _Val, OpVal) -> OpVal; -merge_policy_value(<<"overflow">>, _Val, OpVal) -> OpVal; -%% use operator policy value for booleans -merge_policy_value(_Key, Val, OpVal) when is_boolean(Val) andalso is_boolean(OpVal) -> OpVal. +merge_policy_value(<<"message-ttl">>, Val, OpVal) -> + min(Val, OpVal); +merge_policy_value(<<"max-length">>, Val, OpVal) -> + min(Val, OpVal); +merge_policy_value(<<"max-length-bytes">>, Val, OpVal) -> + min(Val, OpVal); +merge_policy_value(<<"max-in-memory-length">>, Val, OpVal) -> + min(Val, OpVal); +merge_policy_value(<<"max-in-memory-bytes">>, Val, OpVal) -> + min(Val, OpVal); +merge_policy_value(<<"expires">>, Val, OpVal) -> + min(Val, OpVal); +merge_policy_value(<<"delivery-limit">>, Val, OpVal) -> + case (is_integer(Val) andalso Val < 0) orelse + (is_integer(OpVal) andalso OpVal < 0) of + true -> + %% one of the policies define an unlimited delivery-limit (negative value) + %% choose the more conservative value + max(Val, OpVal); + false -> + %% else choose the lower value + min(Val, OpVal) + end; +merge_policy_value(<<"queue-version">>, _Val, OpVal) -> + OpVal; +merge_policy_value(<<"overflow">>, _Val, OpVal) -> + OpVal; +merge_policy_value(_Key, Val, OpVal) + when is_boolean(Val) andalso + is_boolean(OpVal) -> + %% use operator policy value for booleans + OpVal. diff --git a/deps/rabbit/src/rabbit_queue_consumers.erl b/deps/rabbit/src/rabbit_queue_consumers.erl index 62ae7bd20c20..a36efe3cb94c 100644 --- a/deps/rabbit/src/rabbit_queue_consumers.erl +++ b/deps/rabbit/src/rabbit_queue_consumers.erl @@ -22,7 +22,8 @@ -define(QUEUE, lqueue). --define(UNSENT_MESSAGE_LIMIT, 200). +-define(KEY_UNSENT_MESSAGE_LIMIT, classic_queue_consumer_unsent_message_limit). +-define(DEFAULT_UNSENT_MESSAGE_LIMIT, 200). %% Utilisation average calculations are all in μs. -define(USE_AVG_HALF_LIFE, 1000000.0). @@ -32,7 +33,7 @@ -record(consumer, {tag, ack_required, prefetch, args, user}). %% AMQP 1.0 link flow control state, see §2.6.7 -%% Delete atom credit_api_v1 when feature flag credit_api_v2 becomes required. +%% Delete atom credit_api_v1 when feature flag rabbitmq_4.0.0 becomes required. -record(link_state, {delivery_count :: rabbit_queue_type:delivery_count() | credit_api_v1, credit :: rabbit_queue_type:credit()}). @@ -72,10 +73,15 @@ -spec new() -> state(). -new() -> #state{consumers = priority_queue:new(), - use = {active, - erlang:monotonic_time(micro_seconds), - 1.0}}. +new() -> + Val = application:get_env(rabbit, + ?KEY_UNSENT_MESSAGE_LIMIT, + ?DEFAULT_UNSENT_MESSAGE_LIMIT), + persistent_term:put(?KEY_UNSENT_MESSAGE_LIMIT, Val), + #state{consumers = priority_queue:new(), + use = {active, + erlang:monotonic_time(microsecond), + 1.0}}. -spec max_active_priority(state()) -> integer() | 'infinity' | 'empty'. @@ -286,7 +292,6 @@ deliver_to_consumer(FetchFun, E = {ChPid, Consumer = #consumer{tag = CTag}}, QName) -> C = #cr{link_states = LinkStates} = lookup_ch(ChPid), - ChBlocked = is_ch_blocked(C), case LinkStates of #{CTag := #link_state{delivery_count = DeliveryCount0, credit = Credit} = LinkState0} -> @@ -308,22 +313,24 @@ deliver_to_consumer(FetchFun, block_consumer(C, E), undelivered end; - _ when ChBlocked -> - %% not a link credit consumer, use credit flow - block_consumer(C, E), - undelivered; _ -> %% not a link credit consumer, use credit flow - case rabbit_limiter:can_send(C#cr.limiter, - Consumer#consumer.ack_required, - CTag) of - {suspend, Limiter} -> - block_consumer(C#cr{limiter = Limiter}, E), + case is_ch_blocked(C) of + true -> + block_consumer(C, E), undelivered; - {continue, Limiter} -> - {delivered, deliver_to_consumer( - FetchFun, Consumer, - C#cr{limiter = Limiter}, QName)} + false -> + case rabbit_limiter:can_send(C#cr.limiter, + Consumer#consumer.ack_required, + CTag) of + {suspend, Limiter} -> + block_consumer(C#cr{limiter = Limiter}, E), + undelivered; + {continue, Limiter} -> + {delivered, deliver_to_consumer( + FetchFun, Consumer, + C#cr{limiter = Limiter}, QName)} + end end end. @@ -589,7 +596,7 @@ parse_credit_mode({simple_prefetch, Prefetch}, _Args) -> parse_credit_mode({credited, InitialDeliveryCount}, _Args) -> {InitialDeliveryCount, manual}; %% credit API v1 -%% i.e. below function clause should be deleted when feature flag credit_api_v2 becomes required: +%% i.e. below function clause should be deleted when feature flag rabbitmq_4.0.0 becomes required: parse_credit_mode(Prefetch, Args) when is_integer(Prefetch) -> case rabbit_misc:table_lookup(Args, <<"x-credit">>) of @@ -653,7 +660,8 @@ block_consumer(C = #cr{blocked_consumers = Blocked}, QEntry) -> update_ch_record(C#cr{blocked_consumers = add_consumer(QEntry, Blocked)}). is_ch_blocked(#cr{unsent_message_count = Count, limiter = Limiter}) -> - Count >= ?UNSENT_MESSAGE_LIMIT orelse rabbit_limiter:is_suspended(Limiter). + UnsentMessageLimit = persistent_term:get(?KEY_UNSENT_MESSAGE_LIMIT), + Count >= UnsentMessageLimit orelse rabbit_limiter:is_suspended(Limiter). tags(CList) -> [CTag || {_P, {_ChPid, #consumer{tag = CTag}}} <- CList]. diff --git a/deps/rabbit/src/rabbit_queue_type.erl b/deps/rabbit/src/rabbit_queue_type.erl index 4f3db9a3231c..938588da6662 100644 --- a/deps/rabbit/src/rabbit_queue_type.erl +++ b/deps/rabbit/src/rabbit_queue_type.erl @@ -114,8 +114,9 @@ -opaque state() :: #?STATE{}. -%% Delete atom 'credit_api_v1' when feature flag credit_api_v2 becomes required. --type consume_mode() :: {simple_prefetch, non_neg_integer()} | {credited, Initial :: delivery_count() | credit_api_v1}. +%% Delete atom 'credit_api_v1' when feature flag rabbitmq_4.0.0 becomes required. +-type consume_mode() :: {simple_prefetch, Prefetch :: non_neg_integer()} | + {credited, Initial :: delivery_count() | credit_api_v1}. -type consume_spec() :: #{no_ack := boolean(), channel_pid := pid(), limiter_pid => pid() | none, @@ -135,7 +136,13 @@ -type delivery_options() :: #{correlation => correlation(), atom() => term()}. --type settle_op() :: 'complete' | 'requeue' | 'discard'. +-type settle_op() :: complete | + requeue | + discard | + {modify, + DeliveryFailed :: boolean(), + UndeliverableHere :: boolean(), + Annotations :: mc:annotations()}. -export_type([state/0, consume_mode/0, @@ -189,7 +196,8 @@ -callback is_stateful() -> boolean(). %% intitialise and return a queue type specific session context --callback init(amqqueue:amqqueue()) -> {ok, queue_state()} | {error, Reason :: term()}. +-callback init(amqqueue:amqqueue()) -> + {ok, queue_state()} | {error, Reason :: term()}. -callback close(queue_state()) -> ok. %% update the queue type state from amqqrecord @@ -225,7 +233,7 @@ {queue_state(), actions()} | {'protocol_error', Type :: atom(), Reason :: string(), Args :: term()}. -%% Delete this callback when feature flag credit_api_v2 becomes required. +%% Delete this callback when feature flag rabbitmq_4.0.0 becomes required. -callback credit_v1(queue_name(), rabbit_types:ctag(), credit(), Drain :: boolean(), queue_state()) -> {queue_state(), actions()}. @@ -292,14 +300,23 @@ short_alias_of(<<"rabbit_quorum_queue">>) -> <<"quorum">>; short_alias_of(rabbit_quorum_queue) -> <<"quorum">>; +%% AMQP 1.0 management client +short_alias_of({utf8, <<"quorum">>}) -> + <<"quorum">>; short_alias_of(<<"rabbit_classic_queue">>) -> <<"classic">>; short_alias_of(rabbit_classic_queue) -> <<"classic">>; +%% AMQP 1.0 management client +short_alias_of({utf8, <<"classic">>}) -> + <<"classic">>; short_alias_of(<<"rabbit_stream_queue">>) -> <<"stream">>; short_alias_of(rabbit_stream_queue) -> <<"stream">>; +%% AMQP 1.0 management client +short_alias_of({utf8, <<"stream">>}) -> + <<"stream">>; short_alias_of(_Other) -> undefined. @@ -366,6 +383,7 @@ declare(Q0, Node) -> boolean(), rabbit_types:username()) -> rabbit_types:ok(non_neg_integer()) | rabbit_types:error(in_use | not_empty) | + rabbit_types:error(timeout) | {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}. delete(Q, IfUnused, IfEmpty, ActingUser) -> Mod = amqqueue:get_type(Q), @@ -707,7 +725,7 @@ settle(#resource{kind = queue} = QRef, Op, CTag, MsgIds, Ctxs) -> end end. -%% Delete this function when feature flag credit_api_v2 becomes required. +%% Delete this function when feature flag rabbitmq_4.0.0 becomes required. -spec credit_v1(queue_name(), rabbit_types:ctag(), credit(), boolean(), state()) -> {ok, state(), actions()}. credit_v1(QName, CTag, LinkCreditSnd, Drain, Ctxs) -> diff --git a/deps/rabbit/src/rabbit_quorum_queue.erl b/deps/rabbit/src/rabbit_quorum_queue.erl index a6020b0e02b5..f936891d0560 100644 --- a/deps/rabbit/src/rabbit_quorum_queue.erl +++ b/deps/rabbit/src/rabbit_quorum_queue.erl @@ -76,8 +76,14 @@ -export([force_shrink_member_to_current_member/2, force_all_queues_shrink_member_to_current_member/0]). +%% for backwards compatibility +-export([file_handle_leader_reservation/1, + file_handle_other_reservation/0, + file_handle_release_reservation/0]). + -ifdef(TEST). --export([filter_promotable/2]). +-export([filter_promotable/2, + ra_machine_config/1]). -endif. -import(rabbit_queue_type_util, [args_policy_lookup/3, @@ -98,6 +104,8 @@ -define(RA_SYSTEM, quorum_queues). -define(RA_WAL_NAME, ra_log_wal). +-define(DEFAULT_DELIVERY_LIMIT, 20). + -define(INFO(Str, Args), rabbit_log:info("[~s:~s/~b] " Str, [?MODULE, ?FUNCTION_NAME, ?FUNCTION_ARITY | Args])). @@ -129,11 +137,12 @@ -define(RPC_TIMEOUT, 1000). -define(START_CLUSTER_TIMEOUT, 5000). -define(START_CLUSTER_RPC_TIMEOUT, 60_000). %% needs to be longer than START_CLUSTER_TIMEOUT --define(TICK_TIMEOUT, 5000). %% the ra server tick time +-define(TICK_INTERVAL, 5000). %% the ra server tick time -define(DELETE_TIMEOUT, 5000). -define(MEMBER_CHANGE_TIMEOUT, 20_000). -define(SNAPSHOT_INTERVAL, 8192). %% the ra default is 4096 --define(UNLIMITED_PREFETCH_COUNT, 2000). %% something large for ra +% -define(UNLIMITED_PREFETCH_COUNT, 2000). %% something large for ra +-define(MIN_CHECKPOINT_INTERVAL, 8192). %% the ra default is 16384 %%----------- QQ policies --------------------------------------------------- @@ -180,7 +189,7 @@ is_compatible(_, _, _) -> init(Q) when ?is_amqqueue(Q) -> {ok, SoftLimit} = application:get_env(rabbit, quorum_commands_soft_limit), {Name, _} = MaybeLeader = amqqueue:get_pid(Q), - Leader = case ra_leaderboard:lookup_leader(Name) of + Leader = case find_leader(Q) of undefined -> %% leader from queue record will have to suffice MaybeLeader; @@ -289,7 +298,12 @@ start_cluster(Q) -> declare_queue_error(Error, NewQ, LeaderNode, ActingUser) end; {existing, _} = Ex -> - Ex + Ex; + {error, timeout} -> + {protocol_error, internal_error, + "Could not declare quorum ~ts on node '~ts' because the metadata " + "store operation timed out", + [rabbit_misc:rs(QName), node()]} end. declare_queue_error(Error, Queue, Leader, ActingUser) -> @@ -306,12 +320,18 @@ ra_machine_config(Q) when ?is_amqqueue(Q) -> {Name, _} = amqqueue:get_pid(Q), %% take the minimum value of the policy and the queue arg if present MaxLength = args_policy_lookup(<<"max-length">>, fun min/2, Q), - OverflowBin = args_policy_lookup(<<"overflow">>, fun policyHasPrecedence/2, Q), + OverflowBin = args_policy_lookup(<<"overflow">>, fun policy_has_precedence/2, Q), Overflow = overflow(OverflowBin, drop_head, QName), MaxBytes = args_policy_lookup(<<"max-length-bytes">>, fun min/2, Q), - MaxMemoryLength = args_policy_lookup(<<"max-in-memory-length">>, fun min/2, Q), - MaxMemoryBytes = args_policy_lookup(<<"max-in-memory-bytes">>, fun min/2, Q), - DeliveryLimit = args_policy_lookup(<<"delivery-limit">>, fun min/2, Q), + DeliveryLimit = case args_policy_lookup(<<"delivery-limit">>, + fun resolve_delivery_limit/2, Q) of + undefined -> + rabbit_log:info("~ts: delivery_limit not set, defaulting to ~b", + [rabbit_misc:rs(QName), ?DEFAULT_DELIVERY_LIMIT]), + ?DEFAULT_DELIVERY_LIMIT; + DL -> + DL + end, Expires = args_policy_lookup(<<"expires">>, fun min/2, Q), MsgTTL = args_policy_lookup(<<"message-ttl">>, fun min/2, Q), #{name => Name, @@ -320,8 +340,6 @@ ra_machine_config(Q) when ?is_amqqueue(Q) -> become_leader_handler => {?MODULE, become_leader, [QName]}, max_length => MaxLength, max_bytes => MaxBytes, - max_in_memory_length => MaxMemoryLength, - max_in_memory_bytes => MaxMemoryBytes, single_active_consumer_on => single_active_consumer_on(Q), delivery_limit => DeliveryLimit, overflow_strategy => Overflow, @@ -330,9 +348,16 @@ ra_machine_config(Q) when ?is_amqqueue(Q) -> msg_ttl => MsgTTL }. -policyHasPrecedence(Policy, _QueueArg) -> +resolve_delivery_limit(PolVal, ArgVal) + when PolVal < 0 orelse ArgVal < 0 -> + max(PolVal, ArgVal); +resolve_delivery_limit(PolVal, ArgVal) -> + min(PolVal, ArgVal). + +policy_has_precedence(Policy, _QueueArg) -> Policy. -queueArgHasPrecedence(_Policy, QueueArg) -> + +queue_arg_has_precedence(_Policy, QueueArg) -> QueueArg. single_active_consumer_on(Q) -> @@ -501,11 +526,12 @@ spawn_notify_decorators(QName, Fun, Args) -> catch notify_decorators(QName, Fun, Args). handle_tick(QName, - #{config := #{name := Name}, + #{config := #{name := Name} = Cfg, num_active_consumers := NumConsumers, num_checked_out := NumCheckedOut, num_ready_messages := NumReadyMsgs, num_messages := NumMessages, + num_enqueuers := NumEnqueuers, enqueue_message_bytes := EnqueueBytes, checkout_message_bytes := CheckoutBytes, num_discarded := NumDiscarded, @@ -529,6 +555,7 @@ handle_tick(QName, 0 -> 0; _ -> rabbit_fifo:usage(Name) end, + Keys = ?STATISTICS_KEYS -- [leader, consumers, messages_dlx, @@ -538,9 +565,20 @@ handle_tick(QName, ], {SacTag, SacPid} = maps:get(single_active_consumer_id, Overview, {'', ''}), + Infos0 = maps:fold( + fun(num_ready_messages_high, V, Acc) -> + [{messages_ready_high, V} | Acc]; + (num_ready_messages_normal, V, Acc) -> + [{messages_ready_normal, V} | Acc]; + (num_ready_messages_return, V, Acc) -> + [{messages_ready_returned, V} | Acc]; + (_, _, Acc) -> + Acc + end, info(Q, Keys), Overview), MsgBytesDiscarded = DiscardBytes + DiscardCheckoutBytes, MsgBytes = EnqueueBytes + CheckoutBytes + MsgBytesDiscarded, Infos = [{consumers, NumConsumers}, + {publishers, NumEnqueuers}, {consumer_capacity, Util}, {consumer_utilisation, Util}, {message_bytes_ready, EnqueueBytes}, @@ -552,8 +590,15 @@ handle_tick(QName, {message_bytes_dlx, MsgBytesDiscarded}, {single_active_consumer_tag, SacTag}, {single_active_consumer_pid, SacPid}, - {leader, node()} - | info(Q, Keys)], + {leader, node()}, + {delivery_limit, case maps:get(delivery_limit, Cfg, + undefined) of + undefined -> + unlimited; + Limit -> + Limit + end} + | Infos0], rabbit_core_metrics:queue_stats(QName, Infos), ok = repair_leader_record(Q, Self), case repair_amqqueue_nodes(Q) of @@ -569,12 +614,12 @@ handle_tick(QName, Stale when length(ExpectedNodes) > 0 -> %% rabbit_nodes:list_members/0 returns [] when there %% is an error so we need to handle that case - rabbit_log:debug("~ts: stale nodes detected. Purging ~w", + rabbit_log:debug("~ts: stale nodes detected in quorum " + "queue state. Purging ~w", [rabbit_misc:rs(QName), Stale]), %% pipeline purge command ok = ra:pipeline_command(amqqueue:get_pid(Q), rabbit_fifo:make_purge_nodes(Stale)), - ok; _ -> ok @@ -761,16 +806,23 @@ delete(Q, _IfUnused, _IfEmpty, ActingUser) when ?amqqueue_is_quorum(Q) -> MRef = erlang:monitor(process, Leader), receive {'DOWN', MRef, process, _, _} -> + %% leader is down, + %% force delete remaining members + ok = force_delete_queue(lists:delete(Leader, Servers)), ok after Timeout -> erlang:demonitor(MRef, [flush]), ok = force_delete_queue(Servers) end, notify_decorators(QName, shutdown), - ok = delete_queue_data(Q, ActingUser), - _ = erpc:call(LeaderNode, rabbit_core_metrics, queue_deleted, [QName], - ?RPC_TIMEOUT), - {ok, ReadyMsgs}; + case delete_queue_data(Q, ActingUser) of + ok -> + _ = erpc:call(LeaderNode, rabbit_core_metrics, queue_deleted, [QName], + ?RPC_TIMEOUT), + {ok, ReadyMsgs}; + {error, timeout} = Err -> + Err + end; {error, {no_more_servers_to_try, Errs}} -> case lists:all(fun({{error, noproc}, _}) -> true; (_) -> false @@ -778,8 +830,7 @@ delete(Q, _IfUnused, _IfEmpty, ActingUser) when ?amqqueue_is_quorum(Q) -> true -> %% If all ra nodes were already down, the delete %% has succeed - delete_queue_data(Q, ActingUser), - {ok, ReadyMsgs}; + ok; false -> %% attempt forced deletion of all servers rabbit_log:warning( @@ -788,9 +839,13 @@ delete(Q, _IfUnused, _IfEmpty, ActingUser) when ?amqqueue_is_quorum(Q) -> " Attempting force delete.", [rabbit_misc:rs(QName), Errs]), ok = force_delete_queue(Servers), - notify_decorators(QName, shutdown), - delete_queue_data(Q, ActingUser), - {ok, ReadyMsgs} + notify_decorators(QName, shutdown) + end, + case delete_queue_data(Q, ActingUser) of + ok -> + {ok, ReadyMsgs}; + {error, timeout} = Err -> + Err end end. @@ -808,9 +863,13 @@ force_delete_queue(Servers) -> end || S <- Servers], ok. +-spec delete_queue_data(Queue, ActingUser) -> Ret when + Queue :: amqqueue:amqqueue(), + ActingUser :: rabbit_types:username(), + Ret :: ok | {error, timeout}. + delete_queue_data(Queue, ActingUser) -> - _ = rabbit_amqqueue:internal_delete(Queue, ActingUser), - ok. + rabbit_amqqueue:internal_delete(Queue, ActingUser). delete_immediately(Queue) -> @@ -824,7 +883,10 @@ settle(_QName, complete, CTag, MsgIds, QState) -> settle(_QName, requeue, CTag, MsgIds, QState) -> rabbit_fifo_client:return(quorum_ctag(CTag), MsgIds, QState); settle(_QName, discard, CTag, MsgIds, QState) -> - rabbit_fifo_client:discard(quorum_ctag(CTag), MsgIds, QState). + rabbit_fifo_client:discard(quorum_ctag(CTag), MsgIds, QState); +settle(_QName, {modify, DelFailed, Undel, Anns}, CTag, MsgIds, QState) -> + rabbit_fifo_client:modify(quorum_ctag(CTag), MsgIds, DelFailed, Undel, + Anns, QState). credit_v1(_QName, CTag, Credit, Drain, QState) -> rabbit_fifo_client:credit_v1(quorum_ctag(CTag), Credit, Drain, QState). @@ -871,31 +933,26 @@ consume(Q, Spec, QState0) when ?amqqueue_is_quorum(Q) -> ConsumerTag = quorum_ctag(ConsumerTag0), %% consumer info is used to describe the consumer properties AckRequired = not NoAck, - {CreditMode, EffectivePrefetch, DeclaredPrefetch, ConsumerMeta0} = - case Mode of - {credited, C} -> - Meta = if C =:= credit_api_v1 -> - #{}; - is_integer(C) -> - #{initial_delivery_count => C} - end, - {credited, 0, 0, Meta}; - {simple_prefetch = M, Declared} -> - Effective = case Declared of - 0 -> ?UNLIMITED_PREFETCH_COUNT; - _ -> Declared - end, - {M, Effective, Declared, #{}} - end, - ConsumerMeta = maps:merge( - ConsumerMeta0, - #{ack => AckRequired, - prefetch => DeclaredPrefetch, - args => Args, - username => ActingUser}), - {ok, QState} = rabbit_fifo_client:checkout(ConsumerTag, EffectivePrefetch, - CreditMode, ConsumerMeta, - QState0), + Prefetch = case Mode of + {simple_prefetch, Declared} -> + Declared; + _ -> + 0 + end, + Priority = case rabbit_misc:table_lookup(Args, <<"x-priority">>) of + {_Key, Value} -> + Value; + _ -> + 0 + end, + ConsumerMeta = #{ack => AckRequired, + prefetch => Prefetch, + args => Args, + username => ActingUser, + priority => Priority}, + {ok, _Infos, QState} = rabbit_fifo_client:checkout(ConsumerTag, + Mode, ConsumerMeta, + QState0), case single_active_consumer_on(Q) of true -> %% get the leader from state @@ -910,10 +967,10 @@ consume(Q, Spec, QState0) when ?amqqueue_is_quorum(Q) -> rabbit_core_metrics:consumer_created( ChPid, ConsumerTag, ExclusiveConsume, AckRequired, QName, - DeclaredPrefetch, ActivityStatus == single_active, %% Active + Prefetch, ActivityStatus == single_active, %% Active ActivityStatus, Args), emit_consumer_created(ChPid, ConsumerTag, ExclusiveConsume, - AckRequired, QName, DeclaredPrefetch, + AckRequired, QName, Prefetch, Args, none, ActingUser), {ok, QState}; {error, Error} -> @@ -925,17 +982,18 @@ consume(Q, Spec, QState0) when ?amqqueue_is_quorum(Q) -> rabbit_core_metrics:consumer_created( ChPid, ConsumerTag, ExclusiveConsume, AckRequired, QName, - DeclaredPrefetch, true, %% Active + Prefetch, true, %% Active up, Args), emit_consumer_created(ChPid, ConsumerTag, ExclusiveConsume, - AckRequired, QName, DeclaredPrefetch, + AckRequired, QName, Prefetch, Args, none, ActingUser), {ok, QState} end. cancel(_Q, #{consumer_tag := ConsumerTag} = Spec, State) -> maybe_send_reply(self(), maps:get(ok_msg, Spec, undefined)), - rabbit_fifo_client:cancel_checkout(quorum_ctag(ConsumerTag), State). + Reason = maps:get(reason, Spec, cancel), + rabbit_fifo_client:cancel_checkout(quorum_ctag(ConsumerTag), Reason, State). emit_consumer_created(ChPid, CTag, Exclusive, AckRequired, QName, PrefetchCount, Args, Ref, ActingUser) -> rabbit_event:notify(consumer_created, @@ -1349,6 +1407,23 @@ shrink_all(Node) -> case delete_member(Q, Node) of ok -> {QName, {ok, Size-1}}; + {error, cluster_change_not_permitted} -> + %% this could be timing related and due to a new leader just being + %% elected but it's noop command not been committed yet. + %% lets sleep and retry once + rabbit_log:info("~ts: failed to remove member (replica) on node ~w " + "as cluster change is not permitted. " + "retrying once in 500ms", + [rabbit_misc:rs(QName), Node]), + timer:sleep(500), + case delete_member(Q, Node) of + ok -> + {QName, {ok, Size-1}}; + {error, Err} -> + rabbit_log:warning("~ts: failed to remove member (replica) on node ~w, error: ~w", + [rabbit_misc:rs(QName), Node, Err]), + {QName, {error, Size, Err}} + end; {error, Err} -> rabbit_log:warning("~ts: failed to remove member (replica) on node ~w, error: ~w", [rabbit_misc:rs(QName), Node, Err]), @@ -1454,9 +1529,9 @@ reclaim_memory(Vhost, QueueName) -> %%---------------------------------------------------------------------------- dead_letter_handler(Q, Overflow) -> - Exchange = args_policy_lookup(<<"dead-letter-exchange">>, fun queueArgHasPrecedence/2, Q), - RoutingKey = args_policy_lookup(<<"dead-letter-routing-key">>, fun queueArgHasPrecedence/2, Q), - Strategy = args_policy_lookup(<<"dead-letter-strategy">>, fun queueArgHasPrecedence/2, Q), + Exchange = args_policy_lookup(<<"dead-letter-exchange">>, fun queue_arg_has_precedence/2, Q), + RoutingKey = args_policy_lookup(<<"dead-letter-routing-key">>, fun queue_arg_has_precedence/2, Q), + Strategy = args_policy_lookup(<<"dead-letter-strategy">>, fun queue_arg_has_precedence/2, Q), QName = amqqueue:get_name(Q), dlh(Exchange, RoutingKey, Strategy, Overflow, QName). @@ -1663,10 +1738,16 @@ open_files(Name) -> end. leader(Q) when ?is_amqqueue(Q) -> - {Name, Leader} = amqqueue:get_pid(Q), - case is_process_alive(Name, Leader) of - true -> Leader; - false -> '' + case find_leader(Q) of + undefined -> + ''; + {Name, LeaderNode} -> + case is_process_alive(Name, LeaderNode) of + true -> + LeaderNode; + false -> + '' + end end. peek(Vhost, Queue, Pos) -> @@ -1742,12 +1823,6 @@ format(Q, Ctx) when ?is_amqqueue(Q) -> {leader, LeaderNode}, {online, Online}]. -is_process_alive(Name, Node) -> - %% don't attempt rpc if node is not already connected - %% as this function is used for metrics and stats and the additional - %% latency isn't warranted - erlang:is_pid(erpc_call(Node, erlang, whereis, [Name], ?RPC_TIMEOUT)). - -spec quorum_messages(rabbit_amqqueue:name()) -> non_neg_integer(). quorum_messages(QName) -> @@ -1783,18 +1858,27 @@ make_ra_conf(Q, ServerId) -> make_ra_conf(Q, ServerId, Membership) -> TickTimeout = application:get_env(rabbit, quorum_tick_interval, - ?TICK_TIMEOUT), + ?TICK_INTERVAL), SnapshotInterval = application:get_env(rabbit, quorum_snapshot_interval, ?SNAPSHOT_INTERVAL), - make_ra_conf(Q, ServerId, TickTimeout, SnapshotInterval, Membership). - -make_ra_conf(Q, ServerId, TickTimeout, SnapshotInterval, Membership) -> + CheckpointInterval = application:get_env(rabbit, + quorum_min_checkpoint_interval, + ?MIN_CHECKPOINT_INTERVAL), + make_ra_conf(Q, ServerId, TickTimeout, + SnapshotInterval, CheckpointInterval, Membership). + +make_ra_conf(Q, ServerId, TickTimeout, + SnapshotInterval, CheckpointInterval, Membership) -> QName = amqqueue:get_name(Q), RaMachine = ra_machine(Q), [{ClusterName, _} | _] = Members = members(Q), UId = ra:new_uid(ra_lib:to_binary(ClusterName)), FName = rabbit_misc:rs(QName), Formatter = {?MODULE, format_ra_event, [QName]}, + LogCfg = #{uid => UId, + snapshot_interval => SnapshotInterval, + min_checkpoint_interval => CheckpointInterval, + max_checkpoints => 3}, rabbit_misc:maps_put_truthy(membership, Membership, #{cluster_name => ClusterName, id => ServerId, @@ -1802,8 +1886,7 @@ make_ra_conf(Q, ServerId, TickTimeout, SnapshotInterval, Membership) -> friendly_name => FName, metrics_key => QName, initial_members => Members, - log_init_args => #{uid => UId, - snapshot_interval => SnapshotInterval}, + log_init_args => LogCfg, tick_timeout => TickTimeout, machine => RaMachine, ra_event_formatter => Formatter}). @@ -1811,13 +1894,11 @@ make_ra_conf(Q, ServerId, TickTimeout, SnapshotInterval, Membership) -> make_mutable_config(Q) -> QName = amqqueue:get_name(Q), TickTimeout = application:get_env(rabbit, quorum_tick_interval, - ?TICK_TIMEOUT), + ?TICK_INTERVAL), Formatter = {?MODULE, format_ra_event, [QName]}, #{tick_timeout => TickTimeout, ra_event_formatter => Formatter}. - - get_nodes(Q) when ?is_amqqueue(Q) -> #{nodes := Nodes} = amqqueue:get_type_state(Q), Nodes. @@ -1930,3 +2011,40 @@ wait_for_projections(Node, QName, N) -> timer:sleep(100), wait_for_projections(Node, QName, N - 1) end. + +find_leader(Q) when ?is_amqqueue(Q) -> + %% the get_pid field in the queue record is updated async after a leader + %% change, so is likely to be the more stale than the leaderboard + {Name, _Node} = MaybeLeader = amqqueue:get_pid(Q), + Leaders = case ra_leaderboard:lookup_leader(Name) of + undefined -> + %% leader from queue record will have to suffice + [MaybeLeader]; + LikelyLeader -> + [LikelyLeader, MaybeLeader] + end, + Nodes = [node() | nodes()], + case lists:search(fun ({_Nm, Nd}) -> + lists:member(Nd, Nodes) + end, Leaders) of + {value, Leader} -> + Leader; + false -> + undefined + end. + +is_process_alive(Name, Node) -> + %% don't attempt rpc if node is not already connected + %% as this function is used for metrics and stats and the additional + %% latency isn't warranted + erlang:is_pid(erpc_call(Node, erlang, whereis, [Name], ?RPC_TIMEOUT)). + +%% backwards compat +file_handle_leader_reservation(_QName) -> + ok. + +file_handle_other_reservation() -> + ok. + +file_handle_release_reservation() -> + ok. diff --git a/deps/rabbit/src/rabbit_reader.erl b/deps/rabbit/src/rabbit_reader.erl index 9b805502741d..42e7e70a75fe 100644 --- a/deps/rabbit/src/rabbit_reader.erl +++ b/deps/rabbit/src/rabbit_reader.erl @@ -43,7 +43,7 @@ -include_lib("rabbit_common/include/rabbit_framing.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). --export([start_link/2, info_keys/0, info/1, info/2, force_event_refresh/2, +-export([start_link/2, info/2, force_event_refresh/2, shutdown/2]). -export([system_continue/3, system_terminate/4, system_code_change/4]). @@ -60,6 +60,8 @@ %% from connection storms and DoS. -define(SILENT_CLOSE_DELAY, 3). -define(CHANNEL_MIN, 1). +%% AMQP 1.0 §5.3 +-define(PROTOCOL_ID_SASL, 3). %%-------------------------------------------------------------------------- @@ -114,10 +116,6 @@ connection_blocked_message_sent }). --define(STATISTICS_KEYS, [pid, recv_oct, recv_cnt, send_oct, send_cnt, - send_pend, state, channels, reductions, - garbage_collection]). - -define(SIMPLE_METRICS, [pid, recv_oct, send_oct, reductions]). -define(OTHER_METRICS, [recv_cnt, send_cnt, send_pend, state, channels, garbage_collection]). @@ -130,8 +128,6 @@ timeout, frame_max, channel_max, client_properties, connected_at, node, user_who_performed_action]). --define(INFO_KEYS, ?CREATION_EVENT_KEYS ++ ?STATISTICS_KEYS -- [pid]). - -define(AUTH_NOTIFICATION_INFO_KEYS, [host, name, peer_host, peer_port, protocol, auth_mechanism, ssl, ssl_protocol, ssl_cipher, peer_cert_issuer, peer_cert_subject, @@ -162,10 +158,6 @@ shutdown(Pid, Explanation) -> no_return(). init(Parent, HelperSups, Ref) -> ?LG_PROCESS_TYPE(reader), - %% Note: - %% This function could return an error if the handshake times out. - %% It is less likely to happen here as compared to MQTT, so - %% crashing with a `badmatch` seems appropriate. {ok, Sock} = rabbit_networking:handshake(Ref, application:get_env(rabbit, proxy_protocol, false)), Deb = sys:debug_options([]), @@ -186,15 +178,6 @@ system_terminate(Reason, _Parent, _Deb, _State) -> system_code_change(Misc, _Module, _OldVsn, _Extra) -> {ok, Misc}. --spec info_keys() -> rabbit_types:info_keys(). - -info_keys() -> ?INFO_KEYS. - --spec info(pid()) -> rabbit_types:infos(). - -info(Pid) -> - gen_server:call(Pid, info, infinity). - -spec info(pid(), rabbit_types:info_keys()) -> rabbit_types:infos(). info(Pid, Items) -> @@ -316,6 +299,7 @@ start_connection(Parent, HelperSups, RanchRef, Deb, Sock) -> {PeerHost, PeerPort, Host, Port} = socket_op(Sock, fun (S) -> rabbit_net:socket_ends(S, inbound) end), ?store_proc_name(Name), + ConnectedAt = os:system_time(milli_seconds), State = #v1{parent = Parent, ranch_ref = RanchRef, sock = RealSocket, @@ -335,8 +319,7 @@ start_connection(Parent, HelperSups, RanchRef, Deb, Sock) -> capabilities = [], auth_mechanism = none, auth_state = none, - connected_at = os:system_time( - milli_seconds)}, + connected_at = ConnectedAt}, callback = uninitialized_callback, recv_len = 0, pending_recv = false, @@ -360,17 +343,23 @@ start_connection(Parent, HelperSups, RanchRef, Deb, Sock) -> handshake, 8)]}) of %% connection was closed cleanly by the client #v1{connection = #connection{user = #user{username = Username}, - vhost = VHost}} -> - rabbit_log_connection:info("closing AMQP connection (~ts, vhost: '~ts', user: '~ts')", - [dynamic_connection_name(Name), VHost, Username]); + vhost = VHost, + connected_at = ConnectedAt0}} -> + ConnName = dynamic_connection_name(Name), + ConnDuration = connection_duration(ConnectedAt0), + rabbit_log_connection:info("closing AMQP connection (~ts, vhost: '~ts', user: '~ts', duration: '~ts')", + [ConnName, VHost, Username, ConnDuration]); %% just to be more defensive _ -> - rabbit_log_connection:info("closing AMQP connection (~ts)", - [dynamic_connection_name(Name)]) - end + ConnName = dynamic_connection_name(Name), + ConnDuration = connection_duration(ConnectedAt), + rabbit_log_connection:info("closing AMQP connection (~ts, duration: '~ts')", + [ConnName, ConnDuration]) + end catch Ex -> - log_connection_exception(dynamic_connection_name(Name), Ex) + ConnNameEx = dynamic_connection_name(Name), + log_connection_exception(ConnNameEx, ConnectedAt, Ex) after %% We don't call gen_tcp:close/1 here since it waits for %% pending output to be sent, which results in unnecessary @@ -398,50 +387,67 @@ start_connection(Parent, HelperSups, RanchRef, Deb, Sock) -> end, done. -log_connection_exception(Name, Ex) -> +log_connection_exception(Name, ConnectedAt, Ex) -> Severity = case Ex of connection_closed_with_no_data_received -> debug; {connection_closed_abruptly, _} -> warning; connection_closed_abruptly -> warning; _ -> error end, - log_connection_exception(Severity, Name, Ex). + log_connection_exception(Severity, Name, ConnectedAt, Ex). -log_connection_exception(Severity, Name, {heartbeat_timeout, TimeoutSec}) -> +log_connection_exception(Severity, Name, ConnectedAt, {heartbeat_timeout, TimeoutSec}) -> + ConnDuration = connection_duration(ConnectedAt), + Fmt = "closing AMQP connection ~tp (~ts, duration: '~ts'):~n" + "missed heartbeats from client, timeout: ~ps", %% Long line to avoid extra spaces and line breaks in log - log_connection_exception_with_severity(Severity, - "closing AMQP connection ~tp (~ts):~n" - "missed heartbeats from client, timeout: ~ps", - [self(), Name, TimeoutSec]); -log_connection_exception(Severity, Name, {connection_closed_abruptly, - #v1{connection = #connection{user = #user{username = Username}, - vhost = VHost}}}) -> - log_connection_exception_with_severity(Severity, - "closing AMQP connection ~tp (~ts, vhost: '~ts', user: '~ts'):~nclient unexpectedly closed TCP connection", - [self(), Name, VHost, Username]); + log_connection_exception_with_severity(Severity, Fmt, + [self(), Name, ConnDuration, TimeoutSec]); +log_connection_exception(Severity, Name, _ConnectedAt, + {connection_closed_abruptly, + #v1{connection = #connection{user = #user{username = Username}, + vhost = VHost, + connected_at = ConnectedAt}}}) -> + ConnDuration = connection_duration(ConnectedAt), + Fmt = "closing AMQP connection ~tp (~ts, vhost: '~ts', user: '~ts', duration: '~ts'):~n" + "client unexpectedly closed TCP connection", + log_connection_exception_with_severity(Severity, Fmt, + [self(), Name, VHost, Username, ConnDuration]); %% when client abruptly closes connection before connection.open/authentication/authorization %% succeeded, don't log username and vhost as 'none' -log_connection_exception(Severity, Name, {connection_closed_abruptly, _}) -> - log_connection_exception_with_severity(Severity, - "closing AMQP connection ~tp (~ts):~nclient unexpectedly closed TCP connection", - [self(), Name]); +log_connection_exception(Severity, Name, ConnectedAt, {connection_closed_abruptly, _}) -> + ConnDuration = connection_duration(ConnectedAt), + Fmt = "closing AMQP connection ~tp (~ts, duration: '~ts'):~n" + "client unexpectedly closed TCP connection", + log_connection_exception_with_severity(Severity, Fmt, + [self(), Name, ConnDuration]); %% failed connection.tune negotiations -log_connection_exception(Severity, Name, {handshake_error, tuning, _Channel, - {exit, #amqp_error{explanation = Explanation}, - _Method, _Stacktrace}}) -> - log_connection_exception_with_severity(Severity, - "closing AMQP connection ~tp (~ts):~nfailed to negotiate connection parameters: ~ts", - [self(), Name, Explanation]); +log_connection_exception(Severity, Name, ConnectedAt, {handshake_error, tuning, + {exit, #amqp_error{explanation = Explanation}, + _Method, _Stacktrace}}) -> + ConnDuration = connection_duration(ConnectedAt), + Fmt = "closing AMQP connection ~tp (~ts):~n" + "failed to negotiate connection parameters: ~ts", + log_connection_exception_with_severity(Severity, Fmt, [self(), Name, ConnDuration, Explanation]); +log_connection_exception(Severity, Name, ConnectedAt, {sasl_required, ProtocolId}) -> + ConnDuration = connection_duration(ConnectedAt), + Fmt = "closing AMQP 1.0 connection (~ts, duration: '~ts'): RabbitMQ requires SASL " + "security layer (expected protocol ID 3, but client sent protocol ID ~b)", + log_connection_exception_with_severity(Severity, Fmt, + [Name, ConnDuration, ProtocolId]); %% old exception structure -log_connection_exception(Severity, Name, connection_closed_abruptly) -> - log_connection_exception_with_severity(Severity, - "closing AMQP connection ~tp (~ts):~n" - "client unexpectedly closed TCP connection", - [self(), Name]); -log_connection_exception(Severity, Name, Ex) -> - log_connection_exception_with_severity(Severity, - "closing AMQP connection ~tp (~ts):~n~tp", - [self(), Name, Ex]). +log_connection_exception(Severity, Name, ConnectedAt, connection_closed_abruptly) -> + ConnDuration = connection_duration(ConnectedAt), + Fmt = "closing AMQP connection ~tp (~ts, duration: '~ts'):~n" + "client unexpectedly closed TCP connection", + log_connection_exception_with_severity(Severity, Fmt, + [self(), Name, ConnDuration]); +log_connection_exception(Severity, Name, ConnectedAt, Ex) -> + ConnDuration = connection_duration(ConnectedAt), + Fmt = "closing AMQP connection ~tp (~ts, duration: '~ts'):~n" + "~tp", + log_connection_exception_with_severity(Severity, Fmt, + [self(), Name, ConnDuration, Ex]). log_connection_exception_with_severity(Severity, Fmt, Args) -> case Severity of @@ -608,9 +614,6 @@ handle_other({'$gen_call', From, {shutdown, Explanation}}, State) -> force -> stop; normal -> NewState end; -handle_other({'$gen_call', From, info}, State) -> - gen_server:reply(From, infos(?INFO_KEYS, State)), - State; handle_other({'$gen_call', From, {info, Items}}, State) -> gen_server:reply(From, try {ok, infos(Items, State)} catch Error -> {error, Error} @@ -848,11 +851,11 @@ handle_exception(State = #v1{connection = #connection{protocol = Protocol, " user: '~ts', state: ~tp):~n~ts", [self(), ConnName, User#user.username, tuning, ErrMsg]), send_error_on_channel0_and_close(Channel, Protocol, Reason, State); -handle_exception(State, Channel, Reason) -> +handle_exception(State, _Channel, Reason) -> %% We don't trust the client at this point - force them to wait %% for a bit so they can't DOS us with repeated failed logins etc. timer:sleep(?SILENT_CLOSE_DELAY * 1000), - throw({handshake_error, State#v1.connection_state, Channel, Reason}). + throw({handshake_error, State#v1.connection_state, Reason}). %% we've "lost sync" with the client and hence must not accept any %% more input @@ -1086,8 +1089,11 @@ handle_input(Callback, Data, _State) -> throw({bad_input, Callback, Data}). %% AMQP 1.0 §2.2 -version_negotiation({Id, 1, 0, 0}, State) -> - become_10(Id, State); +version_negotiation({?PROTOCOL_ID_SASL, 1, 0, 0}, State) -> + become_10(State); +version_negotiation({ProtocolId, 1, 0, 0}, #v1{sock = Sock}) -> + %% AMQP 1.0 figure 2.13: We require SASL security layer. + refuse_connection(Sock, {sasl_required, ProtocolId}); version_negotiation({0, 0, 9, 1}, State) -> start_091_connection({0, 9, 1}, rabbit_framing_amqp_0_9_1, State); version_negotiation({1, 1, 0, 9}, State) -> @@ -1126,14 +1132,13 @@ start_091_connection({ProtocolMajor, ProtocolMinor, _ProtocolRevision}, -spec refuse_connection(rabbit_net:socket(), any()) -> no_return(). refuse_connection(Sock, Exception) -> - refuse_connection(Sock, Exception, {0, 1, 0, 0}). + refuse_connection(Sock, Exception, {?PROTOCOL_ID_SASL, 1, 0, 0}). -spec refuse_connection(_, _, _) -> no_return(). refuse_connection(Sock, Exception, {A, B, C, D}) -> ok = inet_op(fun () -> rabbit_net:send(Sock, <<"AMQP",A,B,C,D>>) end), throw(Exception). - ensure_stats_timer(State = #v1{connection_state = running}) -> rabbit_event:ensure_stats_timer(State, #v1.stats_timer, emit_stats); ensure_stats_timer(State) -> @@ -1600,6 +1605,7 @@ ic(client_properties, #connection{client_properties = CP}) -> CP; ic(auth_mechanism, #connection{auth_mechanism = none}) -> none; ic(auth_mechanism, #connection{auth_mechanism = {Name, _Mod}}) -> Name; ic(connected_at, #connection{connected_at = T}) -> T; +ic(container_id, _) -> ''; % AMQP 1.0 specific field ic(Item, #connection{}) -> throw({bad_argument, Item}). socket_info(Get, Select, #v1{sock = Sock}) -> @@ -1626,24 +1632,14 @@ emit_stats(State) -> State1 = rabbit_event:reset_stats_timer(State, #v1.stats_timer), ensure_stats_timer(State1). -%% 1.0 stub --spec become_10(non_neg_integer(), #v1{}) -> no_return(). -become_10(Id, State = #v1{sock = Sock}) -> - Mode = case Id of - 0 -> amqp; - 3 -> sasl; - _ -> refuse_connection( - Sock, {unsupported_amqp1_0_protocol_id, Id}, - {3, 1, 0, 0}) - end, - F = fun (_Deb, Buf, BufLen, State0) -> - {rabbit_amqp_reader, init, - [Mode, pack_for_1_0(Buf, BufLen, State0)]} - end, - State#v1{connection_state = {become, F}}. +become_10(State) -> + Fun = fun(_Deb, Buf, BufLen, State0) -> + {rabbit_amqp_reader, init, + [pack_for_1_0(Buf, BufLen, State0)]} + end, + State#v1{connection_state = {become, Fun}}. pack_for_1_0(Buf, BufLen, #v1{sock = Sock, - recv_len = RecvLen, pending_recv = PendingRecv, helper_sup = {_HelperSup091, HelperSup10}, proxy_socket = ProxySocket, @@ -1654,7 +1650,7 @@ pack_for_1_0(Buf, BufLen, #v1{sock = Sock, port = Port, peer_port = PeerPort, connected_at = ConnectedAt}}) -> - {Sock, RecvLen, PendingRecv, HelperSup10, Buf, BufLen, ProxySocket, + {Sock, PendingRecv, HelperSup10, Buf, BufLen, ProxySocket, Name, Host, PeerHost, Port, PeerPort, ConnectedAt}. respond_and_close(State, Channel, Protocol, Reason, LogErr) -> @@ -1828,3 +1824,23 @@ get_client_value_detail(channel_max, 0) -> " (no limit)"; get_client_value_detail(_Field, _ClientValue) -> "". + +connection_duration(ConnectedAt) -> + Now = os:system_time(milli_seconds), + DurationMillis = Now - ConnectedAt, + if + DurationMillis >= 1000 -> + DurationSecs = DurationMillis div 1000, + case calendar:seconds_to_daystime(DurationSecs) of + {0, {0, 0, Seconds}} -> + io_lib:format("~Bs", [Seconds]); + {0, {0, Minutes, Seconds}} -> + io_lib:format("~BM, ~Bs", [Minutes, Seconds]); + {0, {Hours, Minutes, Seconds}} -> + io_lib:format("~BH, ~BM, ~Bs", [Hours, Minutes, Seconds]); + {Days, {Hours, Minutes, Seconds}} -> + io_lib:format("~BD, ~BH, ~BM, ~Bs", [Days, Hours, Minutes, Seconds]) + end; + true -> + io_lib:format("~Bms", [DurationMillis]) + end. diff --git a/deps/rabbit/src/rabbit_ssl.erl b/deps/rabbit/src/rabbit_ssl.erl index ffb56cd08c7b..bf4048e09c54 100644 --- a/deps/rabbit/src/rabbit_ssl.erl +++ b/deps/rabbit/src/rabbit_ssl.erl @@ -10,7 +10,7 @@ -include_lib("public_key/include/public_key.hrl"). -export([peer_cert_issuer/1, peer_cert_subject/1, peer_cert_validity/1]). --export([peer_cert_subject_items/2, peer_cert_auth_name/1]). +-export([peer_cert_subject_items/2, peer_cert_auth_name/1, peer_cert_auth_name/2]). -export([cipher_suites_erlang/2, cipher_suites_erlang/1, cipher_suites_openssl/2, cipher_suites_openssl/1, cipher_suites/1]). @@ -18,7 +18,7 @@ %%-------------------------------------------------------------------------- --export_type([certificate/0]). +-export_type([certificate/0, ssl_cert_login_type/0]). % Due to API differences between OTP releases. -dialyzer(no_missing_calls). @@ -109,28 +109,51 @@ peer_cert_subject_alternative_names(Cert, Type) -> peer_cert_validity(Cert) -> rabbit_cert_info:validity(Cert). +-type ssl_cert_login_type() :: + {subject_alternative_name | subject_alt_name, atom(), integer()} | + {distinguished_name | common_name, undefined, undefined }. + +-spec extract_ssl_cert_login_settings() -> none | ssl_cert_login_type(). +extract_ssl_cert_login_settings() -> + case application:get_env(rabbit, ssl_cert_login_from) of + {ok, Mode} -> + case Mode of + subject_alternative_name -> extract_san_login_type(Mode); + subject_alt_name -> extract_san_login_type(Mode); + _ -> {Mode, undefined, undefined} + end; + undefined -> none + end. + +extract_san_login_type(Mode) -> + {Mode, + application:get_env(rabbit, ssl_cert_login_san_type, dns), + application:get_env(rabbit, ssl_cert_login_san_index, 0) + }. + %% Extract a username from the certificate -spec peer_cert_auth_name(certificate()) -> binary() | 'not_found' | 'unsafe'. peer_cert_auth_name(Cert) -> - {ok, Mode} = application:get_env(rabbit, ssl_cert_login_from), - peer_cert_auth_name(Mode, Cert). + case extract_ssl_cert_login_settings() of + none -> 'not_found'; + Settings -> peer_cert_auth_name(Settings, Cert) + end. --spec peer_cert_auth_name(atom(), certificate()) -> binary() | 'not_found' | 'unsafe'. -peer_cert_auth_name(distinguished_name, Cert) -> +-spec peer_cert_auth_name(ssl_cert_login_type(), certificate()) -> binary() | 'not_found' | 'unsafe'. +peer_cert_auth_name({distinguished_name, _, _}, Cert) -> case auth_config_sane() of true -> iolist_to_binary(peer_cert_subject(Cert)); false -> unsafe end; -peer_cert_auth_name(subject_alt_name, Cert) -> - peer_cert_auth_name(subject_alternative_name, Cert); +peer_cert_auth_name({subject_alt_name, Type, Index0}, Cert) -> + peer_cert_auth_name({subject_alternative_name, Type, Index0}, Cert); -peer_cert_auth_name(subject_alternative_name, Cert) -> +peer_cert_auth_name({subject_alternative_name, Type, Index0}, Cert) -> case auth_config_sane() of true -> - Type = application:get_env(rabbit, ssl_cert_login_san_type, dns), %% lists:nth/2 is 1-based - Index = application:get_env(rabbit, ssl_cert_login_san_index, 0) + 1, + Index = Index0 + 1, OfType = peer_cert_subject_alternative_names(Cert, otp_san_type(Type)), rabbit_log:debug("Peer certificate SANs of type ~ts: ~tp, index to use with lists:nth/2: ~b", [Type, OfType, Index]), case length(OfType) of @@ -152,7 +175,7 @@ peer_cert_auth_name(subject_alternative_name, Cert) -> false -> unsafe end; -peer_cert_auth_name(common_name, Cert) -> +peer_cert_auth_name({common_name, _, _}, Cert) -> %% If there is more than one CN then we join them with "," in a %% vaguely DN-like way. But this is more just so we do something %% more intelligent than crashing, if you actually want to escape @@ -171,7 +194,7 @@ auth_config_sane() -> verify_peer -> true; V -> rabbit_log:warning("TLS peer verification (authentication) is " "disabled, ssl_options.verify value used: ~tp. " - "See https://www.rabbitmq.com/ssl.html#peer-verification to learn more.", [V]), + "See https://www.rabbitmq.com/docs/ssl#peer-verification to learn more.", [V]), false end. diff --git a/deps/rabbit/src/rabbit_stream_coordinator.erl b/deps/rabbit/src/rabbit_stream_coordinator.erl index 954030b98581..6eac47fc781e 100644 --- a/deps/rabbit/src/rabbit_stream_coordinator.erl +++ b/deps/rabbit/src/rabbit_stream_coordinator.erl @@ -189,8 +189,12 @@ delete_stream(Q, ActingUser) #{name := StreamId} = amqqueue:get_type_state(Q), case process_command({delete_stream, StreamId, #{}}) of {ok, ok, _} -> - _ = rabbit_amqqueue:internal_delete(Q, ActingUser), - {ok, {ok, 0}}; + case rabbit_amqqueue:internal_delete(Q, ActingUser) of + ok -> + {ok, {ok, 0}}; + {error, timeout} = Err -> + Err + end; Err -> Err end. @@ -1231,7 +1235,7 @@ phase_update_mnesia(StreamId, Args, #{reference := QName, #{name := S} when S == StreamId -> rabbit_log:debug("~ts: initializing queue record for stream id ~ts", [?MODULE, StreamId]), - _ = rabbit_amqqueue:ensure_rabbit_queue_record_is_initialized(Fun(Q)), + ok = rabbit_amqqueue:ensure_rabbit_queue_record_is_initialized(Fun(Q)), ok; _ -> ok @@ -1747,6 +1751,12 @@ eval_listener({P, member}, {ListNode, ListMPid0}, {Lsts0, Effs0}, {queue_event, QRef, {stream_local_member_change, MemberPid}}, cast} | Efs]}; + (_MNode, #member{state = {running, _, MemberPid}, + role = {replica, _}, + target = deleted}, {_, Efs}) -> + {MemberPid, [{send_msg, P, + {queue_event, QRef, deleted_replica}, + cast} | Efs]}; (_N, _M, Acc) -> %% not a replica, nothing to do Acc diff --git a/deps/rabbit/src/rabbit_stream_queue.erl b/deps/rabbit/src/rabbit_stream_queue.erl index 37f3b52e2e42..a7aa3a5a18cc 100644 --- a/deps/rabbit/src/rabbit_stream_queue.erl +++ b/deps/rabbit/src/rabbit_stream_queue.erl @@ -92,7 +92,8 @@ leader :: pid(), local_pid :: undefined | pid(), next_seq = 1 :: non_neg_integer(), - correlation = #{} :: #{appender_seq() => {rabbit_queue_type:correlation(), msg()}}, + correlation = #{} :: #{appender_seq() => + {rabbit_queue_type:correlation(), msg()}}, soft_limit :: non_neg_integer(), slow = false :: boolean(), readers = #{} :: #{rabbit_types:ctag() => #stream{}}, @@ -177,37 +178,52 @@ create_stream(Q0) -> case rabbit_stream_coordinator:new_stream(Q, Leader) of {ok, {ok, LeaderPid}, _} -> %% update record with leader pid - set_leader_pid(LeaderPid, amqqueue:get_name(Q)), - rabbit_event:notify(queue_created, - [{name, QName}, - {durable, true}, - {auto_delete, false}, - {arguments, Arguments}, - {type, amqqueue:get_type(Q1)}, - {user_who_performed_action, - ActingUser}]), - {new, Q}; + case set_leader_pid(LeaderPid, amqqueue:get_name(Q)) of + ok -> + rabbit_event:notify(queue_created, + [{name, QName}, + {durable, true}, + {auto_delete, false}, + {arguments, Arguments}, + {type, amqqueue:get_type(Q1)}, + {user_who_performed_action, + ActingUser}]), + {new, Q}; + {error, timeout} -> + {protocol_error, internal_error, + "Could not set leader PID for ~ts on node '~ts' " + "because the metadata store operation timed out", + [rabbit_misc:rs(QName), node()]} + end; Error -> _ = rabbit_amqqueue:internal_delete(Q, ActingUser), - {protocol_error, internal_error, "Cannot declare a queue '~ts' on node '~ts': ~255p", + {protocol_error, internal_error, "Cannot declare ~ts on node '~ts': ~255p", [rabbit_misc:rs(QName), node(), Error]} end; {existing, Q} -> {existing, Q}; {absent, Q, Reason} -> - {absent, Q, Reason} + {absent, Q, Reason}; + {error, timeout} -> + {protocol_error, internal_error, + "Could not declare ~ts on node '~ts' because the metadata store " + "operation timed out", + [rabbit_misc:rs(QName), node()]} end. -spec delete(amqqueue:amqqueue(), boolean(), boolean(), rabbit_types:username()) -> rabbit_types:ok(non_neg_integer()) | - rabbit_types:error(in_use | not_empty). + rabbit_types:error(timeout) | + {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}. delete(Q, _IfUnused, _IfEmpty, ActingUser) -> case rabbit_stream_coordinator:delete_stream(Q, ActingUser) of {ok, Reply} -> Reply; + {error, timeout} = Err -> + Err; Error -> - {protocol_error, internal_error, "Cannot delete queue '~ts' on node '~ts': ~255p ", + {protocol_error, internal_error, "Cannot delete ~ts on node '~ts': ~255p ", [rabbit_misc:rs(amqqueue:get_name(Q)), node(), Error]} end. @@ -509,7 +525,7 @@ deliver(QSs, Msg, Options) -> {[{Q, S} | Qs], Actions} end, {[], []}, QSs). -deliver0(MsgId, Msg, +deliver0(Corr, Msg, #stream_client{name = Name, leader = LeaderPid, writer_id = WriterId, @@ -519,11 +535,11 @@ deliver0(MsgId, Msg, slow = Slow0} = State, Actions0) -> ok = osiris:write(LeaderPid, WriterId, Seq, stream_message(Msg)), - Correlation = case MsgId of + Correlation = case Corr of undefined -> Correlation0; _ -> - Correlation0#{Seq => {MsgId, Msg}} + Correlation0#{Seq => {Corr, Msg}} end, {Slow, Actions} = case maps:size(Correlation) >= SftLmt of true when not Slow0 -> @@ -614,7 +630,9 @@ handle_event(_QName, {stream_local_member_change, Pid}, end, #{}, Readers0), {ok, State#stream_client{local_pid = Pid, readers = Readers1}, []}; handle_event(_QName, eol, #stream_client{name = Name}) -> - {eol, [{unblock, Name}]}. + {eol, [{unblock, Name}]}; +handle_event(QName, deleted_replica, State) -> + {ok, State, [{queue_down, QName}]}. is_recoverable(Q) -> Node = node(), @@ -1279,7 +1297,7 @@ notify_decorators(Q) when ?is_amqqueue(Q) -> resend_all(#stream_client{leader = LeaderPid, writer_id = WriterId, correlation = Corrs} = State) -> - Msgs = lists:sort(maps:values(Corrs)), + Msgs = lists:sort(maps:to_list(Corrs)), case Msgs of [] -> ok; [{Seq, _} | _] -> @@ -1288,9 +1306,14 @@ resend_all(#stream_client{leader = LeaderPid, end, [begin ok = osiris:write(LeaderPid, WriterId, Seq, stream_message(Msg)) - end || {Seq, Msg} <- Msgs], + end || {Seq, {_Corr, Msg}} <- Msgs], State. +-spec set_leader_pid(Pid, QName) -> Ret when + Pid :: pid(), + QName :: rabbit_amqqueue:name(), + Ret :: ok | {error, timeout}. + set_leader_pid(Pid, QName) -> %% TODO this should probably be a single khepri transaction for better performance. Fun = fun (Q) -> diff --git a/deps/rabbit/src/rabbit_table.erl b/deps/rabbit/src/rabbit_table.erl index 22b39bb30c64..1febbb76265e 100644 --- a/deps/rabbit/src/rabbit_table.erl +++ b/deps/rabbit/src/rabbit_table.erl @@ -9,7 +9,7 @@ -export([ create/0, create/2, ensure_local_copies/1, ensure_table_copy/3, - wait_for_replicated/1, wait/1, wait/2, + wait_for_replicated/1, wait/1, wait/2, wait_silent/2, force_load/0, is_present/0, is_empty/0, needs_default_data/0, check_schema_integrity/1, clear_ram_only_tables/0, maybe_clear_ram_only_tables/0, @@ -109,19 +109,40 @@ wait(TableNames, Retry) -> {Timeout, Retries} = retry_timeout(Retry), wait(TableNames, Timeout, Retries). +wait_silent(TableNames, Retry) -> + %% The check to validate if the deprecated feature + %% Classic Mirrored Queues is in use, calls this wait + %% for tables to ensure `rabbit_runtime_parameters` are + %% ready. This happens every time a user clicks on any + %% tab on the management UI (to warn about deprecated ff + %% in use), which generates some suspicious + %% `Waiting for Mnesia tables...` log messages. + %% They're normal, but better to avoid them as it might + %% confuse users, wondering if there is any issue with Mnesia. + {Timeout, Retries} = retry_timeout(Retry), + wait(TableNames, Timeout, Retries, _Silent = true). + wait(TableNames, Timeout, Retries) -> + wait(TableNames, Timeout, Retries, _Silent = false). + +wait(TableNames, Timeout, Retries, Silent) -> %% Wait for tables must only wait for tables that have already been declared. %% Otherwise, node boot returns a timeout when the Khepri ff is enabled from the start ExistingTables = mnesia:system_info(tables), MissingTables = TableNames -- ExistingTables, TablesToMigrate = TableNames -- MissingTables, - wait1(TablesToMigrate, Timeout, Retries). + wait1(TablesToMigrate, Timeout, Retries, Silent). -wait1(TableNames, Timeout, Retries) -> +wait1(TableNames, Timeout, Retries, Silent) -> %% We might be in ctl here for offline ops, in which case we can't %% get_env() for the rabbit app. - rabbit_log:info("Waiting for Mnesia tables for ~tp ms, ~tp retries left", - [Timeout, Retries - 1]), + case Silent of + true -> + ok; + false -> + rabbit_log:info("Waiting for Mnesia tables for ~tp ms, ~tp retries left", + [Timeout, Retries - 1]) + end, Result = case mnesia:wait_for_tables(TableNames, Timeout) of ok -> ok; @@ -134,13 +155,23 @@ wait1(TableNames, Timeout, Retries) -> end, case {Retries, Result} of {_, ok} -> - rabbit_log:info("Successfully synced tables from a peer"), - ok; + case Silent of + true -> + ok; + false -> + rabbit_log:info("Successfully synced tables from a peer"), + ok + end; {1, {error, _} = Error} -> throw(Error); {_, {error, Error}} -> - rabbit_log:warning("Error while waiting for Mnesia tables: ~tp", [Error]), - wait1(TableNames, Timeout, Retries - 1) + case Silent of + true -> + ok; + false -> + rabbit_log:warning("Error while waiting for Mnesia tables: ~tp", [Error]) + end, + wait1(TableNames, Timeout, Retries - 1, Silent) end. retry_timeout(_Retry = false) -> @@ -183,15 +214,8 @@ needs_default_data() -> end. needs_default_data_in_khepri() -> - Paths = [rabbit_db_vhost:khepri_vhosts_path(), - rabbit_db_user:khepri_users_path()], - lists:all( - fun(Path) -> - case rabbit_khepri:list(Path) of - {ok, List} when is_map(List) andalso List =:= #{} -> true; - _ -> false - end - end, Paths). + rabbit_db_user:count_all() =:= {ok, 0} orelse + rabbit_db_vhost:count_all() =:= {ok, 0}. needs_default_data_in_mnesia() -> is_empty([rabbit_user, rabbit_user_permission, diff --git a/deps/rabbit/src/rabbit_time_travel_dbg.erl b/deps/rabbit/src/rabbit_time_travel_dbg.erl index 4ab6674514de..7d8b480e5ac9 100644 --- a/deps/rabbit/src/rabbit_time_travel_dbg.erl +++ b/deps/rabbit/src/rabbit_time_travel_dbg.erl @@ -28,7 +28,7 @@ start(Pid, Apps) -> TracerPid = spawn_link(?MODULE, init, []), {ok, _} = dbg:tracer(process, {fun (Msg, _) -> TracerPid ! Msg end, []}), _ = [dbg:tpl(M, []) || M <- Mods], - dbg:p(Pid, [c]), + _ = dbg:p(Pid, [c]), ok. apps_to_mods([], Acc) -> diff --git a/deps/rabbit/src/rabbit_upgrade_preparation.erl b/deps/rabbit/src/rabbit_upgrade_preparation.erl index 3de38740b1da..89614af53f0e 100644 --- a/deps/rabbit/src/rabbit_upgrade_preparation.erl +++ b/deps/rabbit/src/rabbit_upgrade_preparation.erl @@ -15,6 +15,7 @@ %% -define(SAMPLING_INTERVAL, 200). +-define(LOGGING_FREQUENCY, ?SAMPLING_INTERVAL * 100). await_online_quorum_plus_one(Timeout) -> Iterations = ceil(Timeout / ?SAMPLING_INTERVAL), @@ -30,7 +31,11 @@ online_members(Component) -> erlang, whereis, [Component])). endangered_critical_components() -> - CriticalComponents = [rabbit_stream_coordinator], + CriticalComponents = [rabbit_stream_coordinator] ++ + case rabbit_feature_flags:is_enabled(khepri_db) of + true -> [rabbitmq_metadata]; + false -> [] + end, Nodes = rabbit_nodes:list_members(), lists:filter(fun (Component) -> NumAlive = length(online_members(Component)), @@ -57,6 +62,21 @@ do_await_safe_online_quorum(IterationsLeft) -> case EndangeredQueues =:= [] andalso endangered_critical_components() =:= [] of true -> true; false -> + case IterationsLeft rem ?LOGGING_FREQUENCY of + 0 -> + case length(EndangeredQueues) of + 0 -> ok; + N -> rabbit_log:info("Waiting for ~p queues and streams to have quorum+1 replicas online. " + "You can list them with `rabbitmq-diagnostics check_if_node_is_quorum_critical`", [N]) + end, + case endangered_critical_components() of + [] -> ok; + _ -> rabbit_log:info("Waiting for the following critical components to have quorum+1 replicas online: ~p.", + [endangered_critical_components()]) + end; + _ -> + ok + end, timer:sleep(?SAMPLING_INTERVAL), do_await_safe_online_quorum(IterationsLeft - 1) end. @@ -70,6 +90,6 @@ list_with_minimum_quorum_for_cli() -> [#{ <<"readable_name">> => C, <<"name">> => C, - <<"virtual_host">> => "-", + <<"virtual_host">> => <<"(not applicable)">>, <<"type">> => process } || C <- endangered_critical_components()]. diff --git a/deps/rabbit/src/rabbit_vhost.erl b/deps/rabbit/src/rabbit_vhost.erl index 0f3da8fdd14c..00c148e275ea 100644 --- a/deps/rabbit/src/rabbit_vhost.erl +++ b/deps/rabbit/src/rabbit_vhost.erl @@ -299,8 +299,7 @@ delete(VHost, ActingUser) -> assert_benign(rabbit_amqqueue:with(Name, QDelFun), ActingUser) end || Q <- rabbit_amqqueue:list(VHost)], rabbit_log:info("Deleting exchanges in vhost '~ts' because it's being deleted", [VHost]), - [ok = rabbit_exchange:ensure_deleted(Name, false, ActingUser) || - #exchange{name = Name} <- rabbit_exchange:list(VHost)], + ok = rabbit_exchange:delete_all(VHost, ActingUser), rabbit_log:info("Clearing policies and runtime parameters in vhost '~ts' because it's being deleted", [VHost]), _ = rabbit_runtime_parameters:clear_vhost(VHost, ActingUser), rabbit_log:debug("Removing vhost '~ts' from the metadata storage because it's being deleted", [VHost]), @@ -467,7 +466,7 @@ assert_benign({error, not_found}, _) -> ok; assert_benign({error, {absent, Q, _}}, ActingUser) -> %% Removing the database entries here is safe. If/when the down node %% restarts, it will clear out the on-disk storage of the queue. - rabbit_amqqueue:internal_delete(Q, ActingUser). + ok = rabbit_amqqueue:internal_delete(Q, ActingUser). -spec exists(vhost:name()) -> boolean(). diff --git a/deps/rabbit/test/amqp_address_SUITE.erl b/deps/rabbit/test/amqp_address_SUITE.erl index eaa0ffaf0b3d..910e1068eeed 100644 --- a/deps/rabbit/test/amqp_address_SUITE.erl +++ b/deps/rabbit/test/amqp_address_SUITE.erl @@ -54,7 +54,8 @@ common_tests() -> target_per_message_queue, target_per_message_unset_to_address, target_per_message_bad_to_address, - target_per_message_exchange_absent, + target_per_message_exchange_absent_settled, + target_per_message_exchange_absent_unsettled, target_bad_address, source_bad_address ]. @@ -393,16 +394,15 @@ target_per_message_unset_to_address(Config) -> %% Send message with 'to' unset. DTag = <<1>>, ok = amqp10_client:send_msg(Sender, amqp10_msg:new(DTag, <<0>>)), - ok = wait_for_settled(released, DTag), - receive {amqp10_event, - {link, Sender, - {detached, - #'v1_0.error'{ - condition = ?V_1_0_AMQP_ERROR_PRECONDITION_FAILED, - description = {utf8, <<"anonymous terminus requires 'to' address to be set">>}}}}} -> ok - after 5000 -> ct:fail("server did not close our outgoing link") + ExpectedError = #'v1_0.error'{ + condition = ?V_1_0_AMQP_ERROR_PRECONDITION_FAILED, + description = {utf8, <<"anonymous terminus requires 'to' address to be set">>}}, + ok = wait_for_settled({rejected, ExpectedError}, DTag), + + ok = amqp10_client:detach_link(Sender), + receive {amqp10_event, {link, Sender, {detached, normal}}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) end, - ok = amqp10_client:end_session(Session), ok = amqp10_client:close_connection(Connection). @@ -449,34 +449,32 @@ bad_v2_addresses() -> %% Test v2 target address 'null' with an invalid 'to' addresses. target_per_message_bad_to_address(Config) -> - lists:foreach(fun(Addr) -> - ok = target_per_message_bad_to_address0(Addr, Config) - end, bad_v2_addresses()). - -target_per_message_bad_to_address0(Address, Config) -> OpnConf = connection_config(Config), {ok, Connection} = amqp10_client:open_connection(OpnConf), {ok, Session} = amqp10_client:begin_session_sync(Connection), {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"sender">>, null), ok = wait_for_credit(Sender), - DTag = <<255>>, - Msg = amqp10_msg:set_properties(#{to => Address}, amqp10_msg:new(DTag, <<0>>)), - ok = amqp10_client:send_msg(Sender, Msg), - ok = wait_for_settled(released, DTag), - receive {amqp10_event, - {link, Sender, - {detached, - #'v1_0.error'{ - condition = ?V_1_0_AMQP_ERROR_PRECONDITION_FAILED, - description = {utf8, <<"bad 'to' address", _Rest/binary>>}}}}} -> ok - after 5000 -> ct:fail("server did not close our outgoing link") - end, + lists:foreach( + fun(Addr) -> + DTag = <<"some delivery tag">>, + Msg = amqp10_msg:set_properties(#{to => Addr}, amqp10_msg:new(DTag, <<0>>, false)), + ok = amqp10_client:send_msg(Sender, Msg), + receive + {amqp10_disposition, {{rejected, Error}, DTag}} -> + ?assertMatch(#'v1_0.error'{condition = ?V_1_0_AMQP_ERROR_PRECONDITION_FAILED, + description = {utf8, <<"bad 'to' address", _Rest/binary>>}}, + Error) + after 5000 -> + flush(missing_disposition), + ct:fail(missing_disposition) + end + end, bad_v2_addresses()), ok = amqp10_client:end_session(Session), ok = amqp10_client:close_connection(Connection). -target_per_message_exchange_absent(Config) -> +target_per_message_exchange_absent_settled(Config) -> Init = {_, LinkPair = #link_pair{session = Session}} = init(Config), XName = <<"🎈"/utf8>>, Address = rabbitmq_amqp_address:exchange(XName), @@ -492,20 +490,59 @@ target_per_message_exchange_absent(Config) -> ok = rabbitmq_amqp_client:delete_exchange(LinkPair, XName), DTag2 = <<2>>, - Msg2 = amqp10_msg:set_properties(#{to => Address}, amqp10_msg:new(DTag2, <<"m2">>)), + Msg2 = amqp10_msg:set_properties(#{to => Address}, amqp10_msg:new(DTag2, <<"m2">>, true)), ok = amqp10_client:send_msg(Sender, Msg2), - ok = wait_for_settled(released, DTag2), + + %% "the routing node MUST detach the link over which the message was sent with an error. + %% [...] Additionally the info field of error MUST contain an entry with symbolic key delivery-tag + %% and binary value of the delivery-tag of the message which caused the failure." + %% https://docs.oasis-open.org/amqp/anonterm/v1.0/cs01/anonterm-v1.0-cs01.html#doc-routingerrors receive {amqp10_event, {link, Sender, {detached, Error}}} -> ?assertEqual( #'v1_0.error'{ condition = ?V_1_0_AMQP_ERROR_NOT_FOUND, - description = {utf8, <<"no exchange '", XName/binary, "' in vhost '/'">>}}, + description = {utf8, <<"no exchange '", XName/binary, "' in vhost '/'">>}, + info = {map, [{{symbol, <<"delivery-tag">>}, {binary, DTag2}}]} + }, Error) after 5000 -> ct:fail("server did not close our outgoing link") end, ok = cleanup(Init). +target_per_message_exchange_absent_unsettled(Config) -> + Init = {_, LinkPair = #link_pair{session = Session}} = init(Config), + XName = <<"🎈"/utf8>>, + Address = rabbitmq_amqp_address:exchange(XName), + ok = rabbitmq_amqp_client:declare_exchange(LinkPair, XName, #{}), + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"sender">>, null), + ok = wait_for_credit(Sender), + + DTag1 = <<"my tag">>, + Msg1 = amqp10_msg:set_properties(#{to => Address}, amqp10_msg:new(DTag1, <<"hey">>)), + ok = amqp10_client:send_msg(Sender, Msg1), + ok = wait_for_settled(released, DTag1), + + ok = rabbitmq_amqp_client:delete_exchange(LinkPair, XName), + + %% "If the source of the link supports the rejected outcome, and the message has not + %% already been settled by the sender, then the routing node MUST reject the message. + %% In this case the error field of rejected MUST contain the error which would have been communicated + %% in the detach which would have be sent if a link to the same address had been attempted." + %% https://docs.oasis-open.org/amqp/anonterm/v1.0/cs01/anonterm-v1.0-cs01.html#doc-routingerrors + %% We test here multiple rejections implicilty checking that link flow control works correctly. + ExpectedError = #'v1_0.error'{ + condition = ?V_1_0_AMQP_ERROR_NOT_FOUND, + description = {utf8, <<"no exchange '", XName/binary, "' in vhost '/'">>}}, + [begin + DTag = Body = integer_to_binary(N), + Msg = amqp10_msg:set_properties(#{to => Address}, amqp10_msg:new(DTag, Body, false)), + ok = amqp10_client:send_msg(Sender, Msg), + ok = wait_for_settled({rejected, ExpectedError}, DTag) + end || N <- lists:seq(1, 300)], + + ok = cleanup(Init). + target_bad_address(Config) -> %% bad v1 and bad v2 target address TargetAddr = <<"/qqq/🎈"/utf8>>, diff --git a/deps/rabbit/test/amqp_auth_SUITE.erl b/deps/rabbit/test/amqp_auth_SUITE.erl index 0ff70bf0c520..920f779172d4 100644 --- a/deps/rabbit/test/amqp_auth_SUITE.erl +++ b/deps/rabbit/test/amqp_auth_SUITE.erl @@ -58,11 +58,10 @@ groups() -> %% authn authn_failure_event, sasl_anonymous_success, - sasl_none_success, sasl_plain_success, sasl_anonymous_failure, - sasl_none_failure, sasl_plain_failure, + sasl_none_failure, vhost_absent, %% limits @@ -531,7 +530,7 @@ target_per_message_internal_exchange(Config) -> ExpectedErr = error_unauthorized( <<"forbidden to publish to internal exchange '", XName/binary, "' in vhost 'test vhost'">>), receive {amqp10_event, {session, Session1, {ended, ExpectedErr}}} -> ok - after 5000 -> flush(aaa), + after 5000 -> flush(missing_event), ct:fail({missing_event, ?LINE}) end, ok = close_connection_sync(Conn1), @@ -609,10 +608,6 @@ sasl_anonymous_success(Config) -> Mechanism = anon, ok = sasl_success(Mechanism, Config). -sasl_none_success(Config) -> - Mechanism = none, - ok = sasl_success(Mechanism, Config). - sasl_plain_success(Config) -> Mechanism = {plain, <<"guest">>, <<"guest">>}, ok = sasl_success(Mechanism, Config). @@ -627,38 +622,40 @@ sasl_success(Mechanism, Config) -> ok = amqp10_client:close_connection(Connection). sasl_anonymous_failure(Config) -> - Mechanism = anon, - ?assertEqual( - {sasl_not_supported, Mechanism}, - sasl_failure(Mechanism, Config) - ). - -sasl_none_failure(Config) -> - Mechanism = none, - sasl_failure(Mechanism, Config). - -sasl_plain_failure(Config) -> - Mechanism = {plain, <<"guest">>, <<"wrong password">>}, - ?assertEqual( - sasl_auth_failure, - sasl_failure(Mechanism, Config) - ). - -sasl_failure(Mechanism, Config) -> App = rabbit, - Par = amqp1_0_default_user, + Par = anonymous_login_user, {ok, Default} = rpc(Config, application, get_env, [App, Par]), + %% Prohibit anonymous login. ok = rpc(Config, application, set_env, [App, Par, none]), + Mechanism = anon, OpnConf0 = connection_config(Config, <<"/">>), OpnConf = OpnConf0#{sasl := Mechanism}, {ok, Connection} = amqp10_client:open_connection(OpnConf), - Reason = receive {amqp10_event, {connection, Connection, {closed, Reason0}}} -> Reason0 - after 5000 -> ct:fail(missing_closed) - end, + receive {amqp10_event, {connection, Connection, {closed, Reason}}} -> + ?assertEqual({sasl_not_supported, Mechanism}, Reason) + after 5000 -> ct:fail(missing_closed) + end, + + ok = rpc(Config, application, set_env, [App, Par, Default]). - ok = rpc(Config, application, set_env, [App, Par, Default]), - Reason. +sasl_plain_failure(Config) -> + OpnConf0 = connection_config(Config, <<"/">>), + OpnConf = OpnConf0#{sasl := {plain, <<"guest">>, <<"wrong password">>}}, + {ok, Connection} = amqp10_client:open_connection(OpnConf), + receive {amqp10_event, {connection, Connection, {closed, Reason}}} -> + ?assertEqual(sasl_auth_failure, Reason) + after 5000 -> ct:fail(missing_closed) + end. + +%% Skipping SASL is disallowed in RabbitMQ. +sasl_none_failure(Config) -> + OpnConf0 = connection_config(Config, <<"/">>), + OpnConf = OpnConf0#{sasl := none}, + {ok, Connection} = amqp10_client:open_connection(OpnConf), + receive {amqp10_event, {connection, Connection, {closed, _Reason}}} -> ok + after 5000 -> ct:fail(missing_closed) + end. vhost_absent(Config) -> OpnConf = connection_config(Config, <<"this vhost does not exist">>), diff --git a/deps/rabbit/test/amqp_client_SUITE.erl b/deps/rabbit/test/amqp_client_SUITE.erl index f48c6dcc8862..acc4dd004cd8 100644 --- a/deps/rabbit/test/amqp_client_SUITE.erl +++ b/deps/rabbit/test/amqp_client_SUITE.erl @@ -44,6 +44,7 @@ groups() -> sender_settle_mode_unsettled, sender_settle_mode_unsettled_fanout, sender_settle_mode_mixed, + invalid_transfer_settled_flag, quorum_queue_rejects, receiver_settle_mode_first, publishing_to_non_existing_queue_should_settle_with_released, @@ -61,7 +62,8 @@ groups() -> server_closes_link_classic_queue, server_closes_link_quorum_queue, server_closes_link_stream, - server_closes_link_exchange, + server_closes_link_exchange_settled, + server_closes_link_exchange_unsettled, link_target_classic_queue_deleted, link_target_quorum_queue_deleted, target_queues_deleted_accepted, @@ -81,10 +83,15 @@ groups() -> stop_classic_queue, stop_quorum_queue, stop_stream, + priority_classic_queue, + priority_quorum_queue, consumer_priority_classic_queue, consumer_priority_quorum_queue, single_active_consumer_classic_queue, single_active_consumer_quorum_queue, + single_active_consumer_priority_quorum_queue, + single_active_consumer_drain_classic_queue, + single_active_consumer_drain_quorum_queue, detach_requeues_one_session_classic_queue, detach_requeues_one_session_quorum_queue, detach_requeues_drop_head_classic_queue, @@ -105,11 +112,12 @@ groups() -> idle_time_out_on_server, idle_time_out_on_client, idle_time_out_too_short, - rabbit_status_connection_count, handshake_timeout, credential_expires, attach_to_exclusive_queue, - classic_priority_queue, + modified_classic_queue, + modified_quorum_queue, + modified_dead_letter_headers_exchange, dead_letter_headers_exchange, dead_letter_reject, dead_letter_reject_message_order_classic_queue, @@ -133,7 +141,9 @@ groups() -> incoming_window_closed_rabbitmq_internal_flow_classic_queue, incoming_window_closed_rabbitmq_internal_flow_quorum_queue, tcp_back_pressure_rabbitmq_internal_flow_classic_queue, - tcp_back_pressure_rabbitmq_internal_flow_quorum_queue + tcp_back_pressure_rabbitmq_internal_flow_quorum_queue, + session_max_per_connection, + link_max_per_session ]}, {cluster_size_3, [shuffle], @@ -154,6 +164,12 @@ groups() -> quorum_queue_on_old_node, quorum_queue_on_new_node, maintenance, + leader_transfer_quorum_queue_credit_single, + leader_transfer_quorum_queue_credit_batches, + leader_transfer_stream_credit_single, + leader_transfer_stream_credit_batches, + leader_transfer_quorum_queue_send, + leader_transfer_stream_send, list_connections, detach_requeues_two_connections_classic_queue, detach_requeues_two_connections_quorum_queue @@ -205,13 +221,21 @@ init_per_testcase(T, Config) T =:= drain_many_quorum_queue orelse T =:= timed_get_quorum_queue orelse T =:= available_messages_quorum_queue -> - case rpc(Config, rabbit_feature_flags, is_enabled, [credit_api_v2]) of + case rpc(Config, rabbit_feature_flags, is_enabled, ['rabbitmq_4.0.0']) of true -> rabbit_ct_helpers:testcase_started(Config, T); false -> {skip, "Receiving with drain from quorum queues in credit API v1 have a known " "bug that they reply with send_drained before delivering the message."} end; +init_per_testcase(single_active_consumer_drain_quorum_queue = T, Config) -> + case rpc(Config, rabbit_feature_flags, is_enabled, ['rabbitmq_4.0.0']) of + true -> + rabbit_ct_helpers:testcase_started(Config, T); + false -> + {skip, "Draining a SAC inactive quorum queue consumer with credit API v1 " + "is known to be unsupported."} + end; init_per_testcase(T, Config) when T =:= incoming_window_closed_close_link orelse T =:= incoming_window_closed_rabbitmq_internal_flow_classic_queue orelse @@ -221,40 +245,57 @@ init_per_testcase(T, Config) %% The new RabbitMQ internal flow control %% writer proc <- session proc <- queue proc %% is only available with credit API v2. - case rpc(Config, rabbit_feature_flags, is_enabled, [credit_api_v2]) of + case rpc(Config, rabbit_feature_flags, is_enabled, ['rabbitmq_4.0.0']) of true -> rabbit_ct_helpers:testcase_started(Config, T); false -> - {skip, "Feature flag credit_api_v2 is disabled"} + {skip, "Feature flag rabbitmq_4.0.0 is disabled"} end; init_per_testcase(T, Config) - when T =:= detach_requeues_one_session_classic_queue orelse - T =:= detach_requeues_one_session_quorum_queue orelse - T =:= detach_requeues_drop_head_classic_queue orelse - T =:= detach_requeues_two_connections_classic_queue orelse - T =:= detach_requeues_two_connections_quorum_queue orelse - T =:= single_active_consumer_classic_queue orelse - T =:= single_active_consumer_quorum_queue -> - %% Cancel API v2 reuses feature flag credit_api_v2. + when T =:= modified_quorum_queue orelse + T =:= modified_dead_letter_headers_exchange -> + case rpc(Config, rabbit_feature_flags, is_enabled, ['rabbitmq_4.0.0']) of + true -> + rabbit_ct_helpers:testcase_started(Config, T); + false -> + {skip, "Feature flag rabbitmq_4.0.0 is disabled, but needed for " + "the new #modify{} command being sent to quorum queues."} + end; +init_per_testcase(T, Config) + when T =:= detach_requeues_one_session_classic_queue orelse + T =:= detach_requeues_drop_head_classic_queue orelse + T =:= detach_requeues_two_connections_classic_queue orelse + T =:= single_active_consumer_classic_queue -> + %% Cancel API v2 reuses feature flag rabbitmq_4.0.0. %% In 3.13, with cancel API v1, when a receiver detaches with unacked messages, these messages %% will remain unacked and unacked message state will be left behind in the server session %% process state. %% In contrast, cancel API v2 in 4.x will requeue any unacked messages if the receiver detaches. %% We skip the single active consumer tests because these test cases assume that detaching a %% receiver link will requeue unacked messages. - case rpc(Config, rabbit_feature_flags, is_enabled, [credit_api_v2]) of + case rpc(Config, rabbit_feature_flags, is_enabled, ['rabbitmq_4.0.0']) of true -> rabbit_ct_helpers:testcase_started(Config, T); false -> - {skip, "Cancel API v2 is disabled due to feature flag credit_api_v2 being disabled."} + {skip, "Cancel API v2 is disabled due to feature flag rabbitmq_4.0.0 being disabled."} + end; +init_per_testcase(T, Config) + when T =:= detach_requeues_one_session_quorum_queue orelse + T =:= single_active_consumer_quorum_queue orelse + T =:= detach_requeues_two_connections_quorum_queue -> + case rabbit_ct_broker_helpers:enable_feature_flag(Config, 'rabbitmq_4.0.0') of + ok -> + rabbit_ct_helpers:testcase_started(Config, T); + {skip, _} -> + {skip, "Feature flag rabbitmq_4.0.0 enables the consumer removal API"} end; init_per_testcase(T = immutable_bare_message, Config) -> - case rpc(Config, rabbit_feature_flags, is_enabled, [message_containers_store_amqp_v1]) of + case rpc(Config, rabbit_feature_flags, is_enabled, ['rabbitmq_4.0.0']) of true -> rabbit_ct_helpers:testcase_started(Config, T); false -> {skip, "RabbitMQ is known to wrongfully modify the bare message with feature " - "flag message_containers_store_amqp_v1 disabled"} + "flag rabbitmq_4.0.0 disabled"} end; init_per_testcase(T = dead_letter_into_stream, Config) -> case rpc(Config, rabbit_feature_flags, is_enabled, [message_containers_deaths_v2]) of @@ -272,6 +313,19 @@ init_per_testcase(T = dead_letter_reject, Config) -> {skip, "This test is known to fail with feature flag message_containers_deaths_v2 disabled " "due bug https://github.com/rabbitmq/rabbitmq-server/issues/11159"} end; +init_per_testcase(T, Config) + when T =:= leader_transfer_quorum_queue_credit_single orelse + T =:= leader_transfer_quorum_queue_credit_batches orelse + T =:= leader_transfer_stream_credit_single orelse + T =:= leader_transfer_stream_credit_batches orelse + T =:= leader_transfer_quorum_queue_send orelse + T =:= leader_transfer_stream_send -> + case rpc(Config, rabbit_feature_flags, is_supported, ['rabbitmq_4.0.0']) of + true -> + rabbit_ct_helpers:testcase_started(Config, T); + false -> + {skip, "This test requires the AMQP management extension of RabbitMQ 4.0"} + end; init_per_testcase(T, Config) when T =:= classic_queue_on_new_node orelse T =:= quorum_queue_on_new_node -> @@ -303,7 +357,7 @@ reliable_send_receive_with_outcomes(QType, Config) -> Outcomes = [ accepted, modified, - {modified, true, false, #{<<"fruit">> => <<"banana">>}}, + {modified, true, false, #{<<"x-fruit">> => <<"banana">>}}, {modified, false, true, #{}}, rejected, released @@ -368,6 +422,234 @@ reliable_send_receive(QType, Outcome, Config) -> ok = end_session_sync(Session2), ok = amqp10_client:close_connection(Connection2). +%% We test the modified outcome with classic queues. +%% We expect that classic queues implement field undeliverable-here incorrectly +%% by discarding (if true) or requeueing (if false). +%% Fields delivery-failed and message-annotations are not implemented. +modified_classic_queue(Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + {Connection, Session, LinkPair} = init(Config), + {ok, #{type := <<"classic">>}} = rabbitmq_amqp_client:declare_queue( + LinkPair, QName, + #{arguments => #{<<"x-queue-type">> => {utf8, <<"classic">>}}}), + Address = rabbitmq_amqp_address:queue(QName), + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"sender">>, Address), + ok = wait_for_credit(Sender), + + Msg1 = amqp10_msg:new(<<"tag1">>, <<"m1">>, true), + Msg2 = amqp10_msg:new(<<"tag2">>, <<"m2">>, true), + ok = amqp10_client:send_msg(Sender, Msg1), + ok = amqp10_client:send_msg(Sender, Msg2), + ok = amqp10_client:detach_link(Sender), + + {ok, Receiver} = amqp10_client:attach_receiver_link(Session, <<"receiver">>, Address, unsettled), + + {ok, M1} = amqp10_client:get_msg(Receiver), + ?assertEqual([<<"m1">>], amqp10_msg:body(M1)), + ok = amqp10_client:settle_msg(Receiver, M1, {modified, false, true, #{}}), + + {ok, M2a} = amqp10_client:get_msg(Receiver), + ?assertEqual([<<"m2">>], amqp10_msg:body(M2a)), + ok = amqp10_client:settle_msg(Receiver, M2a, + {modified, false, false, #{}}), + + {ok, M2b} = amqp10_client:get_msg(Receiver), + ?assertEqual([<<"m2">>], amqp10_msg:body(M2b)), + ok = amqp10_client:settle_msg(Receiver, M2b, + {modified, true, false, #{<<"x-opt-key">> => <<"val">>}}), + + {ok, M2c} = amqp10_client:get_msg(Receiver), + ?assertEqual([<<"m2">>], amqp10_msg:body(M2c)), + ok = amqp10_client:settle_msg(Receiver, M2c, modified), + + ok = amqp10_client:detach_link(Receiver), + ?assertMatch({ok, #{message_count := 1}}, + rabbitmq_amqp_client:delete_queue(LinkPair, QName)), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection). + +%% We test the modified outcome with quorum queues. +%% We expect that quorum queues implement field +%% * delivery-failed correctly +%% * undeliverable-here incorrectly by discarding (if true) or requeueing (if false) +%% * message-annotations correctly +modified_quorum_queue(Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + {Connection, Session, LinkPair} = init(Config), + {ok, #{type := <<"quorum">>}} = rabbitmq_amqp_client:declare_queue( + LinkPair, QName, + #{arguments => #{<<"x-queue-type">> => {utf8, <<"quorum">>}}}), + Address = rabbitmq_amqp_address:queue(QName), + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"sender">>, Address), + ok = wait_for_credit(Sender), + + Msg1 = amqp10_msg:new(<<"tag1">>, <<"m1">>, true), + Msg2 = amqp10_msg:new(<<"tag2">>, <<"m2">>, true), + ok = amqp10_client:send_msg(Sender, Msg1), + ok = amqp10_client:send_msg(Sender, Msg2), + ok = amqp10_client:detach_link(Sender), + + {ok, Receiver} = amqp10_client:attach_receiver_link(Session, <<"receiver">>, Address, unsettled), + + {ok, M1} = amqp10_client:get_msg(Receiver), + ?assertEqual([<<"m1">>], amqp10_msg:body(M1)), + ?assertMatch(#{delivery_count := 0, + first_acquirer := true}, + amqp10_msg:headers(M1)), + ok = amqp10_client:settle_msg(Receiver, M1, {modified, false, true, #{}}), + + {ok, M2a} = amqp10_client:get_msg(Receiver), + ?assertEqual([<<"m2">>], amqp10_msg:body(M2a)), + ?assertMatch(#{delivery_count := 0, + first_acquirer := true}, + amqp10_msg:headers(M2a)), + ok = amqp10_client:settle_msg(Receiver, M2a, {modified, false, false, #{}}), + + {ok, M2b} = amqp10_client:get_msg(Receiver), + ?assertEqual([<<"m2">>], amqp10_msg:body(M2b)), + ?assertMatch(#{delivery_count := 0, + first_acquirer := false}, + amqp10_msg:headers(M2b)), + ok = amqp10_client:settle_msg(Receiver, M2b, {modified, true, false, #{}}), + + {ok, M2c} = amqp10_client:get_msg(Receiver), + ?assertEqual([<<"m2">>], amqp10_msg:body(M2c)), + ?assertMatch(#{delivery_count := 1, + first_acquirer := false}, + amqp10_msg:headers(M2c)), + ok = amqp10_client:settle_msg(Receiver, M2c, + {modified, true, false, + #{<<"x-opt-key">> => <<"val 1">>}}), + + {ok, M2d} = amqp10_client:get_msg(Receiver), + ?assertEqual([<<"m2">>], amqp10_msg:body(M2d)), + ?assertMatch(#{delivery_count := 2, + first_acquirer := false}, + amqp10_msg:headers(M2d)), + ?assertMatch(#{<<"x-opt-key">> := <<"val 1">>}, amqp10_msg:message_annotations(M2d)), + ok = amqp10_client:settle_msg(Receiver, M2d, + {modified, false, false, + #{<<"x-opt-key">> => <<"val 2">>, + <<"x-other">> => 99}}), + + {ok, M2e} = amqp10_client:get_msg(Receiver), + ?assertEqual([<<"m2">>], amqp10_msg:body(M2e)), + ?assertMatch(#{delivery_count := 2, + first_acquirer := false}, + amqp10_msg:headers(M2e)), + ?assertMatch(#{<<"x-opt-key">> := <<"val 2">>, + <<"x-other">> := 99}, amqp10_msg:message_annotations(M2e)), + ok = amqp10_client:settle_msg(Receiver, M2e, modified), + + ok = amqp10_client:detach_link(Receiver), + ?assertMatch({ok, #{message_count := 1}}, + rabbitmq_amqp_client:delete_queue(LinkPair, QName)), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection). + +%% Test that a message can be routed based on the message-annotations +%% provided in the modified outcome. +modified_dead_letter_headers_exchange(Config) -> + {Connection, Session, LinkPair} = init(Config), + SourceQName = <<"source quorum queue">>, + AppleQName = <<"dead letter classic queue receiving apples">>, + BananaQName = <<"dead letter quorum queue receiving bananas">>, + {ok, #{type := <<"quorum">>}} = rabbitmq_amqp_client:declare_queue( + LinkPair, + SourceQName, + #{arguments => #{<<"x-queue-type">> => {utf8, <<"quorum">>}, + <<"x-overflow">> => {utf8, <<"reject-publish">>}, + <<"x-dead-letter-strategy">> => {utf8, <<"at-least-once">>}, + <<"x-dead-letter-exchange">> => {utf8, <<"amq.headers">>}}}), + {ok, #{type := <<"classic">>}} = rabbitmq_amqp_client:declare_queue( + LinkPair, + AppleQName, + #{arguments => #{<<"x-queue-type">> => {utf8, <<"classic">>}}}), + {ok, #{type := <<"quorum">>}} = rabbitmq_amqp_client:declare_queue( + LinkPair, + BananaQName, + #{arguments => #{<<"x-queue-type">> => {utf8, <<"quorum">>}}}), + ok = rabbitmq_amqp_client:bind_queue( + LinkPair, AppleQName, <<"amq.headers">>, <<>>, + #{<<"x-fruit">> => {utf8, <<"apple">>}, + <<"x-match">> => {utf8, <<"any-with-x">>}}), + ok = rabbitmq_amqp_client:bind_queue( + LinkPair, BananaQName, <<"amq.headers">>, <<>>, + #{<<"x-fruit">> => {utf8, <<"banana">>}, + <<"x-match">> => {utf8, <<"any-with-x">>}}), + + {ok, Sender} = amqp10_client:attach_sender_link( + Session, <<"test-sender">>, rabbitmq_amqp_address:queue(SourceQName)), + wait_for_credit(Sender), + {ok, Receiver} = amqp10_client:attach_receiver_link( + Session, <<"receiver">>, rabbitmq_amqp_address:queue(SourceQName), unsettled), + {ok, ReceiverApple} = amqp10_client:attach_receiver_link( + Session, <<"receiver apple">>, rabbitmq_amqp_address:queue(AppleQName), unsettled), + {ok, ReceiverBanana} = amqp10_client:attach_receiver_link( + Session, <<"receiver banana">>, rabbitmq_amqp_address:queue(BananaQName), unsettled), + + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"t1">>, <<"m1">>)), + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"t2">>, <<"m2">>)), + ok = amqp10_client:send_msg(Sender, amqp10_msg:set_message_annotations( + #{"x-fruit" => <<"apple">>}, + amqp10_msg:new(<<"t3">>, <<"m3">>))), + ok = amqp10_client:send_msg(Sender, amqp10_msg:set_message_annotations( + #{"x-fruit" => <<"apple">>}, + amqp10_msg:new(<<"t4">>, <<"m4">>))), + ok = wait_for_accepts(3), + + {ok, Msg1} = amqp10_client:get_msg(Receiver), + ?assertMatch(#{delivery_count := 0, + first_acquirer := true}, + amqp10_msg:headers(Msg1)), + ok = amqp10_client:settle_msg(Receiver, Msg1, {modified, true, true, #{<<"x-fruit">> => <<"banana">>}}), + {ok, MsgBanana1} = amqp10_client:get_msg(ReceiverBanana), + ?assertEqual([<<"m1">>], amqp10_msg:body(MsgBanana1)), + ?assertMatch(#{delivery_count := 1, + first_acquirer := false}, + amqp10_msg:headers(MsgBanana1)), + ok = amqp10_client:accept_msg(ReceiverBanana, MsgBanana1), + + {ok, Msg2} = amqp10_client:get_msg(Receiver), + ok = amqp10_client:settle_msg(Receiver, Msg2, {modified, true, true, #{<<"x-fruit">> => <<"apple">>}}), + {ok, MsgApple1} = amqp10_client:get_msg(ReceiverApple), + ?assertEqual([<<"m2">>], amqp10_msg:body(MsgApple1)), + ?assertMatch(#{delivery_count := 1, + first_acquirer := false}, + amqp10_msg:headers(MsgApple1)), + ok = amqp10_client:accept_msg(ReceiverApple, MsgApple1), + + {ok, Msg3} = amqp10_client:get_msg(Receiver), + ok = amqp10_client:settle_msg(Receiver, Msg3, {modified, false, true, #{}}), + {ok, MsgApple2} = amqp10_client:get_msg(ReceiverApple), + ?assertEqual([<<"m3">>], amqp10_msg:body(MsgApple2)), + ?assertMatch(#{delivery_count := 0, + first_acquirer := false}, + amqp10_msg:headers(MsgApple2)), + ok = amqp10_client:accept_msg(ReceiverApple, MsgApple2), + + {ok, Msg4} = amqp10_client:get_msg(Receiver), + ok = amqp10_client:settle_msg(Receiver, Msg4, {modified, false, true, #{<<"x-fruit">> => <<"banana">>}}), + {ok, MsgBanana2} = amqp10_client:get_msg(ReceiverBanana), + ?assertEqual([<<"m4">>], amqp10_msg:body(MsgBanana2)), + ?assertMatch(#{delivery_count := 0, + first_acquirer := false}, + amqp10_msg:headers(MsgBanana2)), + ok = amqp10_client:accept_msg(ReceiverBanana, MsgBanana2), + + ok = detach_link_sync(Sender), + ok = detach_link_sync(Receiver), + ok = detach_link_sync(ReceiverApple), + ok = detach_link_sync(ReceiverBanana), + {ok, #{message_count := 0}} = rabbitmq_amqp_client:delete_queue(LinkPair, SourceQName), + {ok, #{message_count := 0}} = rabbitmq_amqp_client:delete_queue(LinkPair, AppleQName), + {ok, #{message_count := 0}} = rabbitmq_amqp_client:delete_queue(LinkPair, BananaQName), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection). + %% Tests that confirmations are returned correctly %% when sending many messages async to a quorum queue. sender_settle_mode_unsettled(Config) -> @@ -477,6 +759,51 @@ sender_settle_mode_mixed(Config) -> ok = end_session_sync(Session), ok = amqp10_client:close_connection(Connection). +invalid_transfer_settled_flag(Config) -> + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session1} = amqp10_client:begin_session(Connection), + {ok, Session2} = amqp10_client:begin_session(Connection), + TargetAddr = rabbitmq_amqp_address:exchange(<<"amq.fanout">>), + {ok, SenderSettled} = amqp10_client:attach_sender_link_sync( + Session1, <<"link 1">>, TargetAddr, settled), + {ok, SenderUnsettled} = amqp10_client:attach_sender_link_sync( + Session2, <<"link 2">>, TargetAddr, unsettled), + ok = wait_for_credit(SenderSettled), + ok = wait_for_credit(SenderUnsettled), + + ok = amqp10_client:send_msg(SenderSettled, amqp10_msg:new(<<"tag1">>, <<"m1">>, false)), + receive + {amqp10_event, + {session, Session1, + {ended, + #'v1_0.error'{ + condition = ?V_1_0_CONNECTION_ERROR_FRAMING_ERROR, + description = {utf8, Description1}}}}} -> + ?assertEqual( + <<"sender settle mode is 'settled' but transfer settled flag is interpreted as being 'false'">>, + Description1) + after 5000 -> flush(missing_ended), + ct:fail({missing_event, ?LINE}) + end, + + ok = amqp10_client:send_msg(SenderUnsettled, amqp10_msg:new(<<"tag2">>, <<"m2">>, true)), + receive + {amqp10_event, + {session, Session2, + {ended, + #'v1_0.error'{ + condition = ?V_1_0_CONNECTION_ERROR_FRAMING_ERROR, + description = {utf8, Description2}}}}} -> + ?assertEqual( + <<"sender settle mode is 'unsettled' but transfer settled flag is interpreted as being 'true'">>, + Description2) + after 5000 -> flush(missing_ended), + ct:fail({missing_event, ?LINE}) + end, + + ok = amqp10_client:close_connection(Connection). + quorum_queue_rejects(Config) -> {Connection, Session, LinkPair} = init(Config), QName = atom_to_binary(?FUNCTION_NAME), @@ -844,7 +1171,7 @@ amqp_amqpl(QType, Config) -> #{"my int" => -2}, amqp10_msg:new(<<>>, Body1, true)))), %% Send with footer - Footer = #'v1_0.footer'{content = [{{symbol, <<"my footer">>}, {ubyte, 255}}]}, + Footer = #'v1_0.footer'{content = [{{symbol, <<"x-my footer">>}, {ubyte, 255}}]}, ok = amqp10_client:send_msg( Sender, amqp10_msg:from_amqp_records( @@ -1187,7 +1514,13 @@ server_closes_link(QType, Config) -> ok = end_session_sync(Session), ok = amqp10_client:close_connection(Connection). -server_closes_link_exchange(Config) -> +server_closes_link_exchange_settled(Config) -> + server_closes_link_exchange(true, Config). + +server_closes_link_exchange_unsettled(Config) -> + server_closes_link_exchange(false, Config). + +server_closes_link_exchange(Settled, Config) -> XName = atom_to_binary(?FUNCTION_NAME), QName = <<"my queue">>, RoutingKey = <<"my routing key">>, @@ -1217,8 +1550,13 @@ server_closes_link_exchange(Config) -> %% When we publish the next message, we expect: %% 1. that the message is released because the exchange doesn't exist anymore, and DTag2 = <<255>>, - ok = amqp10_client:send_msg(Sender, amqp10_msg:new(DTag2, <<"m2">>, false)), - ok = wait_for_settlement(DTag2, released), + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(DTag2, <<"m2">>, Settled)), + case Settled of + true -> + ok; + false -> + ok = wait_for_settlement(DTag2, released) + end, %% 2. that the server closes the link, i.e. sends us a DETACH frame. receive {amqp10_event, {link, Sender, @@ -1382,18 +1720,19 @@ events(Config) -> Protocol = {protocol, {1, 0}}, AuthProps = [{name, <<"guest">>}, - {auth_mechanism, <<"PLAIN">>}, - {ssl, false}, - Protocol], + {auth_mechanism, <<"PLAIN">>}, + {ssl, false}, + Protocol], ?assertMatch( - {value, _}, - find_event(user_authentication_success, AuthProps, Events)), + {value, _}, + find_event(user_authentication_success, AuthProps, Events)), Node = get_node_config(Config, 0, nodename), ConnectionCreatedProps = [Protocol, {node, Node}, {vhost, <<"/">>}, {user, <<"guest">>}, + {container_id, <<"my container">>}, {type, network}], {value, ConnectionCreatedEvent} = find_event( connection_created, @@ -1414,8 +1753,8 @@ events(Config) -> Pid, ClientProperties], ?assertMatch( - {value, _}, - find_event(connection_closed, ConnectionClosedProps, Events)), + {value, _}, + find_event(connection_closed, ConnectionClosedProps, Events)), ok. sync_get_unsettled_classic_queue(Config) -> @@ -1932,12 +2271,147 @@ consumer_priority(QType, Config) -> ok = end_session_sync(Session), ok = amqp10_client:close_connection(Connection). +single_active_consumer_priority_quorum_queue(Config) -> + QType = <<"quorum">>, + QName = atom_to_binary(?FUNCTION_NAME), + {Connection, Session1, LinkPair} = init(Config), + QProps = #{arguments => #{<<"x-queue-type">> => {utf8, QType}, + <<"x-single-active-consumer">> => true}}, + {ok, #{type := QType}} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, QProps), + + %% Send 6 messages. + Address = rabbitmq_amqp_address:queue(QName), + {ok, Sender} = amqp10_client:attach_sender_link(Session1, <<"test-sender">>, Address), + ok = wait_for_credit(Sender), + NumMsgs = 6, + [begin + Bin = integer_to_binary(N), + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(Bin, Bin, true)) + end || N <- lists:seq(1, NumMsgs)], + ok = amqp10_client:detach_link(Sender), + + %% The 1st consumer (with default prio 0) will become active. + {ok, Recv1} = amqp10_client:attach_receiver_link( + Session1, <<"receiver 1">>, Address, unsettled), + receive {amqp10_event, {link, Recv1, attached}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + {ok, Msg1} = amqp10_client:get_msg(Recv1), + ?assertEqual([<<"1">>], amqp10_msg:body(Msg1)), + + %% The 2nd consumer should take over thanks to higher prio. + {ok, Recv2} = amqp10_client:attach_receiver_link( + Session1, <<"receiver 2">>, Address, unsettled, none, #{}, + #{<<"rabbitmq:priority">> => {int, 1}}), + receive {amqp10_event, {link, Recv2, attached}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + flush("attched receiver 2"), + + %% To ensure in-order processing and to avoid interrupting the 1st consumer during + %% its long running task processing, neither of the 2 consumers should receive more + %% messages until the 1st consumer settles all outstanding messages. + ?assertEqual({error, timeout}, amqp10_client:get_msg(Recv1, 5)), + ?assertEqual({error, timeout}, amqp10_client:get_msg(Recv2, 5)), + ok = amqp10_client:accept_msg(Recv1, Msg1), + receive {amqp10_msg, R1, Msg2} -> + ?assertEqual([<<"2">>], amqp10_msg:body(Msg2)), + ?assertEqual(Recv2, R1), + ok = amqp10_client:accept_msg(Recv2, Msg2) + after 5000 -> ct:fail({missing_msg, ?LINE}) + end, + + %% Attaching with same prio should not take over. + {ok, Session2} = amqp10_client:begin_session_sync(Connection), + {ok, Recv3} = amqp10_client:attach_receiver_link( + Session2, <<"receiver 3">>, Address, unsettled, none, #{}, + #{<<"rabbitmq:priority">> => {int, 1}}), + receive {amqp10_event, {link, Recv3, attached}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + ?assertEqual({error, timeout}, amqp10_client:get_msg(Recv3, 5)), + ok = end_session_sync(Session2), + + {ok, Recv4} = amqp10_client:attach_receiver_link( + Session1, <<"receiver 4">>, Address, unsettled, none, #{}, + #{<<"rabbitmq:priority">> => {int, 1}}), + receive {amqp10_event, {link, Recv4, attached}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + {ok, Recv5} = amqp10_client:attach_receiver_link( + Session1, <<"receiver 5">>, Address, unsettled, none, #{}, + #{<<"rabbitmq:priority">> => {int, 1}}), + receive {amqp10_event, {link, Recv5, attached}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + flush("attched receivers 4 and 5"), + + ok = amqp10_client:flow_link_credit(Recv4, 1, never), + ok = amqp10_client:flow_link_credit(Recv5, 2, never), + + %% Stop the active consumer. + ok = amqp10_client:detach_link(Recv2), + receive {amqp10_event, {link, Recv2, {detached, normal}}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + %% The 5th consumer should become the active one because it is up, + %% has highest prio (1), and most credits (2). + receive {amqp10_msg, R2, Msg3} -> + ?assertEqual([<<"3">>], amqp10_msg:body(Msg3)), + ?assertEqual(Recv5, R2), + ok = amqp10_client:accept_msg(Recv5, Msg3) + after 5000 -> ct:fail({missing_msg, ?LINE}) + end, + receive {amqp10_msg, R3, Msg4} -> + ?assertEqual([<<"4">>], amqp10_msg:body(Msg4)), + ?assertEqual(Recv5, R3), + ok = amqp10_client:accept_msg(Recv5, Msg4) + after 5000 -> ct:fail({missing_msg, ?LINE}) + end, + + %% Stop the active consumer. + ok = amqp10_client:detach_link(Recv5), + receive {amqp10_event, {link, Recv5, {detached, normal}}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + %% The 4th consumer should become the active one because it is up, + %% has highest prio (1), and most credits (1). + receive {amqp10_msg, R4, Msg5} -> + ?assertEqual([<<"5">>], amqp10_msg:body(Msg5)), + ?assertEqual(Recv4, R4), + ok = amqp10_client:accept_msg(Recv4, Msg5) + after 5000 -> ct:fail({missing_msg, ?LINE}) + end, + + %% Stop the active consumer. + ok = amqp10_client:detach_link(Recv4), + receive {amqp10_event, {link, Recv4, {detached, normal}}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + %% The only up consumer left is the 1st one (prio 0) which still has 1 credit. + receive {amqp10_msg, R5, Msg6} -> + ?assertEqual([<<"6">>], amqp10_msg:body(Msg6)), + ?assertEqual(Recv1, R5), + ok = amqp10_client:accept_msg(Recv1, Msg6) + after 5000 -> ct:fail({missing_msg, ?LINE}) + end, + + ok = amqp10_client:detach_link(Recv1), + {ok, #{message_count := 0}} = rabbitmq_amqp_client:delete_queue(LinkPair, QName), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = end_session_sync(Session1), + ok = amqp10_client:close_connection(Connection). + single_active_consumer_classic_queue(Config) -> single_active_consumer(<<"classic">>, Config). -single_active_consumer_quorum_queue(_Config) -> - % single_active_consumer(<<"quorum">>, Config). - {skip, "TODO: unskip when qq-v4 branch is merged"}. +single_active_consumer_quorum_queue(Config) -> + single_active_consumer(<<"quorum">>, Config). single_active_consumer(QType, Config) -> QName = atom_to_binary(?FUNCTION_NAME), @@ -2044,6 +2518,123 @@ single_active_consumer(QType, Config) -> ok = end_session_sync(Session), ok = amqp10_client:close_connection(Connection). +single_active_consumer_drain_classic_queue(Config) -> + single_active_consumer_drain(<<"classic">>, Config). + +single_active_consumer_drain_quorum_queue(Config) -> + single_active_consumer_drain(<<"quorum">>, Config). + +single_active_consumer_drain(QType, Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + {Connection, Session, LinkPair} = init(Config), + QProps = #{arguments => #{<<"x-queue-type">> => {utf8, QType}, + <<"x-single-active-consumer">> => true}}, + {ok, #{type := QType}} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, QProps), + + %% Attach 1 sender and 2 receivers to the queue. + Address = rabbitmq_amqp_address:queue(QName), + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"test-sender">>, Address), + ok = wait_for_credit(Sender), + + %% The 1st consumer will become active. + {ok, Receiver1} = amqp10_client:attach_receiver_link( + Session, + <<"test-receiver-1">>, + Address, + unsettled), + receive {amqp10_event, {link, Receiver1, attached}} -> ok + after 5000 -> ct:fail("missing attached") + end, + %% The 2nd consumer will become inactive. + {ok, Receiver2} = amqp10_client:attach_receiver_link( + Session, + <<"test-receiver-2">>, + Address, + unsettled), + receive {amqp10_event, {link, Receiver2, attached}} -> ok + after 5000 -> ct:fail("missing attached") + end, + flush(attached), + + %% Drain both active and inactive consumer for the 1st time. + ok = amqp10_client:flow_link_credit(Receiver1, 100, never, true), + ok = amqp10_client:flow_link_credit(Receiver2, 100, never, true), + receive {amqp10_event, {link, Receiver1, credit_exhausted}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + receive {amqp10_event, {link, Receiver2, credit_exhausted}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + %% Send 2 messages. + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"dtag1">>, <<"m1">>)), + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"dtag2">>, <<"m2">>)), + ok = wait_for_accepts(2), + + %% No consumer should receive a message since both should have 0 credits. + receive Unexpected0 -> ct:fail("received unexpected ~p", [Unexpected0]) + after 10 -> ok + end, + + %% Drain both active and inactive consumer for the 2nd time. + ok = amqp10_client:flow_link_credit(Receiver1, 200, never, true), + ok = amqp10_client:flow_link_credit(Receiver2, 200, never, true), + + %% Only the active consumer should receive messages. + receive {amqp10_msg, Receiver1, Msg1} -> + ?assertEqual([<<"m1">>], amqp10_msg:body(Msg1)), + ok = amqp10_client:accept_msg(Receiver1, Msg1) + after 5000 -> ct:fail({missing_msg, ?LINE}) + end, + receive {amqp10_msg, Receiver1, Msg2} -> + ?assertEqual([<<"m2">>], amqp10_msg:body(Msg2)), + ok = amqp10_client:accept_msg(Receiver1, Msg2) + after 5000 -> ct:fail({missing_msg, ?LINE}) + end, + receive {amqp10_event, {link, Receiver1, credit_exhausted}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + receive {amqp10_event, {link, Receiver2, credit_exhausted}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + %% Cancelling the active consumer should cause the inactive to become active. + ok = amqp10_client:detach_link(Receiver1), + receive {amqp10_event, {link, Receiver1, {detached, normal}}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + %% Send 1 more message. + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"dtag3">>, <<"m3">>)), + ok = wait_for_accepted(<<"dtag3">>), + + %% Our 2nd (now active) consumer should have 0 credits. + receive Unexpected1 -> ct:fail("received unexpected ~p", [Unexpected1]) + after 10 -> ok + end, + + %% Drain for the 3rd time. + ok = amqp10_client:flow_link_credit(Receiver2, 300, never, true), + + receive {amqp10_msg, Receiver2, Msg3} -> + ?assertEqual([<<"m3">>], amqp10_msg:body(Msg3)), + ok = amqp10_client:accept_msg(Receiver2, Msg3) + after 5000 -> ct:fail({missing_msg, ?LINE}) + end, + receive {amqp10_event, {link, Receiver2, credit_exhausted}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + + ok = amqp10_client:detach_link(Receiver2), + receive {amqp10_event, {link, Receiver2, {detached, normal}}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + ?assertMatch({ok, #{message_count := 0}}, + rabbitmq_amqp_client:delete_queue(LinkPair, QName)), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = end_session_sync(Session), + ok = amqp10_client:close_connection(Connection). + %% "A session endpoint can choose to unmap its output handle for a link. In this case, the endpoint MUST %% send a detach frame to inform the remote peer that the handle is no longer attached to the link endpoint. %% If both endpoints do this, the link MAY return to a fully detached state. Note that in this case the @@ -2061,16 +2652,17 @@ single_active_consumer(QType, Config) -> %% In addition to consumer cancellation, detaching a link therefore causes in flight deliveries to be requeued. %% That's okay given that AMQP receivers can stop a link (figure 2.46) before detaching. %% -%% Note that this behaviour is different from merely consumer cancellation in AMQP legacy: -%% "After a consumer is cancelled there will be no future deliveries dispatched to it. Note that there can -%% still be "in flight" deliveries dispatched previously. Cancelling a consumer will neither discard nor requeue them." -%% [https://www.rabbitmq.com/consumers.html#unsubscribing] +%% Note that this behaviour is different from merely consumer cancellation in +%% AMQP legacy: +%% "After a consumer is cancelled there will be no future deliveries dispatched to it. +%% Note that there can still be "in flight" deliveries dispatched previously. +%% Cancelling a consumer will neither discard nor requeue them." +%% [https://www.rabbitmq.com/docs/consumers#unsubscribing] detach_requeues_one_session_classic_queue(Config) -> detach_requeue_one_session(<<"classic">>, Config). -detach_requeues_one_session_quorum_queue(_Config) -> - % detach_requeue_one_session(<<"quorum">>, Config). - {skip, "TODO: unskip when qq-v4 branch is merged"}. +detach_requeues_one_session_quorum_queue(Config) -> + detach_requeue_one_session(<<"quorum">>, Config). detach_requeue_one_session(QType, Config) -> QName = atom_to_binary(?FUNCTION_NAME), @@ -2219,9 +2811,8 @@ detach_requeues_drop_head_classic_queue(Config) -> detach_requeues_two_connections_classic_queue(Config) -> detach_requeues_two_connections(<<"classic">>, Config). -detach_requeues_two_connections_quorum_queue(_Config) -> - % detach_requeues_two_connections(<<"quorum">>, Config). - {skip, "TODO: unskip when qq-v4 branch is merged"}. +detach_requeues_two_connections_quorum_queue(Config) -> + detach_requeues_two_connections(<<"quorum">>, Config). detach_requeues_two_connections(QType, Config) -> QName = atom_to_binary(?FUNCTION_NAME), @@ -2240,22 +2831,28 @@ detach_requeues_two_connections(QType, Config) -> {ok, LinkPair} = rabbitmq_amqp_client:attach_management_link_pair_sync(Session1, <<"my link pair">>), QProps = #{arguments => #{<<"x-queue-type">> => {utf8, QType}}}, {ok, #{type := QType}} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, QProps), + flush(link_pair_attached), %% Attach 1 sender and 2 receivers. {ok, Sender} = amqp10_client:attach_sender_link(Session0, <<"sender">>, Address, settled), ok = wait_for_credit(Sender), + {ok, Receiver0} = amqp10_client:attach_receiver_link(Session0, <<"receiver 0">>, Address, unsettled), receive {amqp10_event, {link, Receiver0, attached}} -> ok after 5000 -> ct:fail({missing_event, ?LINE}) end, + ok = gen_statem:cast(Session0, {flow_session, #'v1_0.flow'{incoming_window = {uint, 1}}}), + ok = amqp10_client:flow_link_credit(Receiver0, 50, never), + %% Wait for credit being applied to the queue. + timer:sleep(10), + {ok, Receiver1} = amqp10_client:attach_receiver_link(Session1, <<"receiver 1">>, Address, unsettled), receive {amqp10_event, {link, Receiver1, attached}} -> ok after 5000 -> ct:fail({missing_event, ?LINE}) end, - ok = gen_statem:cast(Session0, {flow_session, #'v1_0.flow'{incoming_window = {uint, 1}}}), - ok = amqp10_client:flow_link_credit(Receiver0, 50, never), - ok = amqp10_client:flow_link_credit(Receiver1, 50, never), - flush(attached), + ok = amqp10_client:flow_link_credit(Receiver1, 40, never), + %% Wait for credit being applied to the queue. + timer:sleep(10), NumMsgs = 6, [begin @@ -2767,7 +3364,7 @@ async_notify_settled_stream(Config) -> async_notify(settled, <<"stream">>, Config). async_notify_unsettled_classic_queue(Config) -> - case rabbit_ct_broker_helpers:enable_feature_flag(Config, credit_api_v2) of + case rabbit_ct_broker_helpers:enable_feature_flag(Config, 'rabbitmq_4.0.0') of ok -> async_notify(unsettled, <<"classic">>, Config); {skip, _} -> @@ -2812,17 +3409,7 @@ async_notify(SenderSettleMode, QType, Config) -> flush(settled), ok = detach_link_sync(Sender), - case QType of - <<"stream">> -> - %% If it is a stream we need to wait until there is a local member - %% on the node we want to subscibe from before proceeding. - rabbit_ct_helpers:await_condition( - fun() -> rpc(Config, 0, ?MODULE, has_local_member, - [rabbit_misc:r(<<"/">>, queue, QName)]) - end, 30_000); - _ -> - ok - end, + ok = wait_for_local_member(QType, QName, Config), Filter = consume_from_first(QType), {ok, Receiver} = amqp10_client:attach_receiver_link( Session, <<"test-receiver">>, Address, @@ -2948,7 +3535,14 @@ quorum_queue_on_old_node(Config) -> queue_and_client_different_nodes(1, 0, <<"quorum">>, Config). quorum_queue_on_new_node(Config) -> - queue_and_client_different_nodes(0, 1, <<"quorum">>, Config). + Versions = rabbit_ct_broker_helpers:rpc_all(Config, rabbit_fifo, version, []), + case lists:usort(Versions) of + [_] -> + %% all are one version, go ahead with the test + queue_and_client_different_nodes(0, 1, <<"quorum">>, Config); + _ -> + {skip, "this test cannot pass with mixed QQ machine versions"} + end. %% In mixed version tests, run the queue leader with old code %% and queue client with new code, or vice versa. @@ -2999,7 +3593,7 @@ queue_and_client_different_nodes(QueueLeaderNode, ClientNode, QueueType, Config) true, accepted), - case rpc(Config, rabbit_feature_flags, is_enabled, [credit_api_v2]) of + case rpc(Config, rabbit_feature_flags, is_enabled, ['rabbitmq_4.0.0']) of true -> %% Send another message and drain. Tag = <<"tag">>, @@ -3050,6 +3644,110 @@ maintenance(Config) -> ok = close_connection_sync(C0). +%% https://github.com/rabbitmq/rabbitmq-server/issues/11841 +leader_transfer_quorum_queue_credit_single(Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + leader_transfer_credit(QName, <<"quorum">>, 1, Config). + +leader_transfer_quorum_queue_credit_batches(Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + leader_transfer_credit(QName, <<"quorum">>, 3, Config). + +leader_transfer_stream_credit_single(Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + leader_transfer_credit(QName, <<"stream">>, 1, Config). + +leader_transfer_stream_credit_batches(Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + leader_transfer_credit(QName, <<"stream">>, 3, Config). + +leader_transfer_credit(QName, QType, Credit, Config) -> + %% Create queue with leader on node 1. + {Connection1, Session1, LinkPair1} = init(1, Config), + {ok, #{type := QType}} = rabbitmq_amqp_client:declare_queue( + LinkPair1, + QName, + #{arguments => #{<<"x-queue-type">> => {utf8, QType}, + <<"x-queue-leader-locator">> => {utf8, <<"client-local">>}}}), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair1), + ok = end_session_sync(Session1), + ok = close_connection_sync(Connection1), + + %% Consume from a follower. + OpnConf = connection_config(0, Config), + {ok, Connection0} = amqp10_client:open_connection(OpnConf), + {ok, Session0} = amqp10_client:begin_session_sync(Connection0), + Address = rabbitmq_amqp_address:queue(QName), + {ok, Sender} = amqp10_client:attach_sender_link( + Session0, <<"test-sender">>, Address), + ok = wait_for_credit(Sender), + + NumMsgs = 30, + ok = send_messages(Sender, NumMsgs, false), + ok = wait_for_accepts(NumMsgs), + ok = detach_link_sync(Sender), + + ok = wait_for_local_member(QType, QName, Config), + Filter = consume_from_first(QType), + {ok, Receiver} = amqp10_client:attach_receiver_link( + Session0, <<"receiver">>, Address, + settled, configuration, Filter), + flush(receiver_attached), + %% Top up credits very often during the leader change. + ok = amqp10_client:flow_link_credit(Receiver, Credit, Credit), + + %% After receiving the 1st message, let's move the leader away from node 1. + receive_messages(Receiver, 1), + ok = drain_node(Config, 1), + %% We expect to receive all remaining messages. + receive_messages(Receiver, NumMsgs - 1), + + ok = revive_node(Config, 1), + ok = amqp10_client:detach_link(Receiver), + ok = delete_queue(Session0, QName), + ok = end_session_sync(Session0), + ok = amqp10_client:close_connection(Connection0). + +leader_transfer_quorum_queue_send(Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + leader_transfer_send(QName, <<"quorum">>, Config). + +leader_transfer_stream_send(Config) -> + QName = atom_to_binary(?FUNCTION_NAME), + leader_transfer_send(QName, <<"stream">>, Config). + +%% Test a leader transfer while we send to the queue. +leader_transfer_send(QName, QType, Config) -> + %% Create queue with leader on node 1. + {Connection1, Session1, LinkPair1} = init(1, Config), + {ok, #{type := QType}} = rabbitmq_amqp_client:declare_queue( + LinkPair1, + QName, + #{arguments => #{<<"x-queue-type">> => {utf8, QType}, + <<"x-queue-leader-locator">> => {utf8, <<"client-local">>}}}), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair1), + ok = end_session_sync(Session1), + ok = close_connection_sync(Connection1), + + %% Send from a follower. + OpnConf = connection_config(0, Config), + {ok, Connection0} = amqp10_client:open_connection(OpnConf), + {ok, Session0} = amqp10_client:begin_session_sync(Connection0), + Address = rabbitmq_amqp_address:queue(QName), + {ok, Sender} = amqp10_client:attach_sender_link(Session0, <<"test-sender">>, Address), + ok = wait_for_credit(Sender), + + NumMsgs = 500, + ok = send_messages(Sender, NumMsgs, false), + ok = rabbit_ct_broker_helpers:kill_node(Config, 1), + ok = wait_for_accepts(NumMsgs), + + ok = rabbit_ct_broker_helpers:start_node(Config, 1), + ok = detach_link_sync(Sender), + ok = delete_queue(Session0, QName), + ok = end_session_sync(Session0), + ok = amqp10_client:close_connection(Connection0). + %% rabbitmqctl list_connections %% should list both AMQP 1.0 and AMQP 0.9.1 connections. list_connections(Config) -> @@ -3057,8 +3755,12 @@ list_connections(Config) -> [ok = rabbit_ct_client_helpers:close_channels_and_connection(Config, Node) || Node <- [0, 1, 2]], Connection091 = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 0), - {ok, C0} = amqp10_client:open_connection(connection_config(0, Config)), - {ok, C2} = amqp10_client:open_connection(connection_config(2, Config)), + ContainerId0 = <<"ID 0">>, + ContainerId2 = <<"ID 2">>, + Cfg0 = maps:put(container_id, ContainerId0, connection_config(0, Config)), + Cfg2 = maps:put(container_id, ContainerId2, connection_config(2, Config)), + {ok, C0} = amqp10_client:open_connection(Cfg0), + {ok, C2} = amqp10_client:open_connection(Cfg2), receive {amqp10_event, {connection, C0, opened}} -> ok after 5000 -> ct:fail({missing_event, ?LINE}) end, @@ -3066,8 +3768,8 @@ list_connections(Config) -> after 5000 -> ct:fail({missing_event, ?LINE}) end, - {ok, StdOut} = rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, ["list_connections", "--silent", "protocol"]), - Protocols0 = re:split(StdOut, <<"\n">>, [trim]), + {ok, StdOut0} = rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, ["list_connections", "--silent", "protocol"]), + Protocols0 = re:split(StdOut0, <<"\n">>, [trim]), %% Remove any whitespaces. Protocols1 = [binary:replace(Subject, <<" ">>, <<>>, [global]) || Subject <- Protocols0], Protocols = lists:sort(Protocols1), @@ -3076,6 +3778,13 @@ list_connections(Config) -> <<"{1,0}">>], Protocols), + %% CLI should list AMQP 1.0 container-id + {ok, StdOut1} = rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, ["list_connections", "--silent", "container_id"]), + ContainerIds0 = re:split(StdOut1, <<"\n">>, [trim]), + ContainerIds = lists:sort(ContainerIds0), + ?assertEqual([<<>>, ContainerId0, ContainerId2], + ContainerIds), + ok = rabbit_ct_client_helpers:close_connection(Connection091), ok = close_connection_sync(C0), ok = close_connection_sync(C2). @@ -3561,7 +4270,7 @@ trace(Config) -> <<"connection">> := <<"127.0.0.1:", _/binary>>, <<"node">> := Node, <<"vhost">> := <<"/">>, - <<"channel">> := 1, + <<"channel">> := 0, <<"user">> := <<"guest">>, <<"properties">> := #{<<"correlation_id">> := CorrelationId}, <<"routed_queues">> := [Q]}, @@ -3576,7 +4285,7 @@ trace(Config) -> <<"connection">> := <<"127.0.0.1:", _/binary>>, <<"node">> := Node, <<"vhost">> := <<"/">>, - <<"channel">> := 2, + <<"channel">> := 1, <<"user">> := <<"guest">>, <<"properties">> := #{<<"correlation_id">> := CorrelationId}, <<"redelivered">> := 0}, @@ -3596,7 +4305,7 @@ trace(Config) -> ok = end_session_sync(SessionReceiver), ok = amqp10_client:close_connection(Connection). -%% https://www.rabbitmq.com/validated-user-id.html +%% https://www.rabbitmq.com/docs/validated-user-id user_id(Config) -> OpnConf = connection_config(Config), {ok, Connection} = amqp10_client:open_connection(OpnConf), @@ -3762,21 +4471,6 @@ idle_time_out_too_short(Config) -> after 5000 -> ct:fail({missing_event, ?LINE}) end. -rabbit_status_connection_count(Config) -> - %% Close any open AMQP 0.9.1 connections from previous test cases. - ok = rabbit_ct_client_helpers:close_channels_and_connection(Config, 0), - - OpnConf = connection_config(Config), - {ok, Connection} = amqp10_client:open_connection(OpnConf), - receive {amqp10_event, {connection, Connection, opened}} -> ok - after 5000 -> ct:fail({missing_event, ?LINE}) - end, - - {ok, String} = rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, ["status"]), - ?assertNotEqual(nomatch, string:find(String, "Connection count: 1")), - - ok = amqp10_client:close_connection(Connection). - handshake_timeout(Config) -> App = rabbit, Par = ?FUNCTION_NAME, @@ -3842,31 +4536,43 @@ attach_to_exclusive_queue(Config) -> #'queue.delete_ok'{} = amqp_channel:call(Ch, #'queue.delete'{queue = QName}), ok = rabbit_ct_client_helpers:close_channel(Ch). -classic_priority_queue(Config) -> +priority_classic_queue(Config) -> + QArgs = #{<<"x-queue-type">> => {utf8, <<"classic">>}, + <<"x-max-priority">> => {ulong, 10}}, + priority(QArgs, Config). + +priority_quorum_queue(Config) -> + QArgs = #{<<"x-queue-type">> => {utf8, <<"quorum">>}}, + priority(QArgs, Config). + +priority(QArgs, Config) -> + {Connection, Session, LinkPair} = init(Config), QName = atom_to_binary(?FUNCTION_NAME), Address = rabbitmq_amqp_address:queue(QName), - Ch = rabbit_ct_client_helpers:open_channel(Config), - #'queue.declare_ok'{} = amqp_channel:call( - Ch, #'queue.declare'{ - queue = QName, - durable = true, - arguments = [{<<"x-max-priority">>, long, 10}]}), - OpnConf = connection_config(Config), - {ok, Connection} = amqp10_client:open_connection(OpnConf), - {ok, Session} = amqp10_client:begin_session_sync(Connection), + {ok, _} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, #{arguments => QArgs}), {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"test-sender">>, Address), wait_for_credit(Sender), - Out1 = amqp10_msg:set_headers(#{priority => 3, - durable => true}, amqp10_msg:new(<<"t1">>, <<"low prio">>, false)), - Out2 = amqp10_msg:set_headers(#{priority => 5, - durable => true}, amqp10_msg:new(<<"t2">>, <<"high prio">>, false)), - ok = amqp10_client:send_msg(Sender, Out1), - ok = amqp10_client:send_msg(Sender, Out2), + %% We don't set a priority on Msg1. + %% According to the AMQP spec, the default priority is 4. + Msg1 = amqp10_msg:set_headers( + #{durable => true}, + amqp10_msg:new(<<"t1">>, <<"low prio">>)), + %% Quorum queues implement 2 distinct priority levels. + %% "if 2 distinct priorities are implemented, then levels 0 to 4 are equivalent, + %% and levels 5 to 9 are equivalent and levels 4 and 5 are distinct." [§3.2.1] + %% Therefore, when we set a priority of 5 on Msg2, Msg2 will have a higher priority + %% than the default priority 4 of Msg1. + Msg2 = amqp10_msg:set_headers( + #{priority => 5, + durable => true}, + amqp10_msg:new(<<"t2">>, <<"high prio">>)), + ok = amqp10_client:send_msg(Sender, Msg1), + ok = amqp10_client:send_msg(Sender, Msg2), ok = wait_for_accepts(2), flush(accepted), - %% The high prio message should be delivered first. + %% The high prio Msg2 should overtake the low prio Msg1 and therefore be delivered first. {ok, Receiver1} = amqp10_client:attach_receiver_link(Session, <<"receiver 1">>, Address, unsettled), {ok, In1} = amqp10_client:get_msg(Receiver1), ?assertEqual([<<"high prio">>], amqp10_msg:body(In1)), @@ -3877,13 +4583,13 @@ classic_priority_queue(Config) -> {ok, Receiver2} = amqp10_client:attach_receiver_link(Session, <<"receiver 2">>, Address, settled), {ok, In2} = amqp10_client:get_msg(Receiver2), ?assertEqual([<<"low prio">>], amqp10_msg:body(In2)), - ?assertEqual(3, amqp10_msg:header(priority, In2)), ?assert(amqp10_msg:header(durable, In2)), ok = amqp10_client:detach_link(Receiver1), ok = amqp10_client:detach_link(Receiver2), ok = amqp10_client:detach_link(Sender), - ok = delete_queue(Session, QName), + {ok, #{message_count := 0}} = rabbitmq_amqp_client:delete_queue(LinkPair, QName), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), ok = end_session_sync(Session), ok = amqp10_client:close_connection(Connection). @@ -3994,6 +4700,8 @@ dead_letter_reject(Config) -> QName1, #{arguments => #{<<"x-queue-type">> => {utf8, <<"quorum">>}, <<"x-message-ttl">> => {ulong, 20}, + <<"x-overflow">> => {utf8, <<"reject-publish">>}, + <<"x-dead-letter-strategy">> => {utf8, <<"at-least-once">>}, <<"x-dead-letter-exchange">> => {utf8, <<>>}, <<"x-dead-letter-routing-key">> => {utf8, QName2} }}), @@ -4024,15 +4732,24 @@ dead_letter_reject(Config) -> ok = wait_for_accepted(Tag), {ok, Msg1} = amqp10_client:get_msg(Receiver), + ?assertMatch(#{delivery_count := 0}, amqp10_msg:headers(Msg1)), ok = amqp10_client:settle_msg(Receiver, Msg1, rejected), + {ok, Msg2} = amqp10_client:get_msg(Receiver), - ok = amqp10_client:settle_msg(Receiver, Msg2, rejected), + ?assertMatch(#{delivery_count := 1, + first_acquirer := false}, amqp10_msg:headers(Msg2)), + ok = amqp10_client:settle_msg(Receiver, Msg2, + {modified, true, true, + #{<<"x-opt-thekey">> => <<"val">>}}), + {ok, Msg3} = amqp10_client:get_msg(Receiver), - ok = amqp10_client:settle_msg(Receiver, Msg3, accepted), + ?assertMatch(#{delivery_count := 2, + first_acquirer := false}, amqp10_msg:headers(Msg3)), ?assertEqual(Body, amqp10_msg:body_bin(Msg3)), Annotations = amqp10_msg:message_annotations(Msg3), ?assertMatch( - #{<<"x-first-death-queue">> := QName1, + #{<<"x-opt-thekey">> := <<"val">>, + <<"x-first-death-queue">> := QName1, <<"x-first-death-exchange">> := <<>>, <<"x-first-death-reason">> := <<"expired">>, <<"x-last-death-queue">> := QName1, @@ -4070,6 +4787,7 @@ dead_letter_reject(Config) -> ]} = D3, ?assertEqual([Ts1, Ts3, Ts5, Ts4, Ts6, Ts2], lists:sort([Ts1, Ts2, Ts3, Ts4, Ts5, Ts6])), + ok = amqp10_client:settle_msg(Receiver, Msg3, accepted), ok = amqp10_client:detach_link(Receiver), ok = amqp10_client:detach_link(Sender), @@ -4101,7 +4819,7 @@ dead_letter_reject_message_order(QType, Config) -> {ok, _} = rabbitmq_amqp_client:declare_queue(LinkPair, QName2, #{}), {ok, Sender} = amqp10_client:attach_sender_link( - Session, <<"sender">>, rabbitmq_amqp_address:queue(QName1), unsettled), + Session, <<"sender">>, rabbitmq_amqp_address:queue(QName1), settled), wait_for_credit(Sender), {ok, Receiver1} = amqp10_client:attach_receiver_link( Session, <<"receiver 1">>, rabbitmq_amqp_address:queue(QName1), unsettled), @@ -4192,7 +4910,7 @@ dead_letter_reject_many_message_order(QType, Config) -> {ok, _} = rabbitmq_amqp_client:declare_queue(LinkPair, QName2, #{}), {ok, Sender} = amqp10_client:attach_sender_link( - Session, <<"sender">>, rabbitmq_amqp_address:queue(QName1), unsettled), + Session, <<"sender">>, rabbitmq_amqp_address:queue(QName1), settled), wait_for_credit(Sender), {ok, Receiver1} = amqp10_client:attach_receiver_link( Session, <<"receiver 1">>, rabbitmq_amqp_address:queue(QName1), unsettled), @@ -4481,7 +5199,7 @@ footer_checksum(FooterOpt, Config) -> SndAttachArgs = #{name => <<"my sender">>, role => {sender, #{address => Addr, durable => configuration}}, - snd_settle_mode => settled, + snd_settle_mode => mixed, rcv_settle_mode => first, footer_opt => FooterOpt}, {ok, Receiver} = amqp10_client:attach_link(Session, RecvAttachArgs), @@ -4495,7 +5213,7 @@ footer_checksum(FooterOpt, Config) -> priority => 7, ttl => 100_000}, amqp10_msg:set_delivery_annotations( - #{"a" => "b"}, + #{"x-a" => "b"}, amqp10_msg:set_message_annotations( #{"x-string" => "string-value", "x-int" => 3, @@ -4990,6 +5708,57 @@ tcp_back_pressure_rabbitmq_internal_flow(QType, Config) -> ok = end_session_sync(Session), ok = amqp10_client:close_connection(Connection). +session_max_per_connection(Config) -> + App = rabbit, + Par = session_max_per_connection, + {ok, Default} = rpc(Config, application, get_env, [App, Par]), + %% Let's allow only 1 session per connection. + ok = rpc(Config, application, set_env, [App, Par, 1]), + + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + receive {amqp10_event, {connection, Connection, opened}} -> ok + after 5000 -> ct:fail(opened_timeout) + end, + %% The 1st session should succeed. + {ok, _Session1} = amqp10_client:begin_session_sync(Connection), + %% The 2nd session should fail. + {ok, _Session2} = amqp10_client:begin_session(Connection), + receive {amqp10_event, {connection, Connection, {closed, Reason}}} -> + ?assertEqual( + {framing_error, <<"channel number (1) exceeds maximum channel number (0)">>}, + Reason) + after 5000 -> ct:fail(missing_closed) + end, + + ok = rpc(Config, application, set_env, [App, Par, Default]). + +link_max_per_session(Config) -> + App = rabbit, + Par = link_max_per_session, + {ok, Default} = rpc(Config, application, get_env, [App, Par]), + %% Let's allow only 1 link per session. + ok = rpc(Config, application, set_env, [App, Par, 1]), + + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + receive {amqp10_event, {connection, Connection, opened}} -> ok + after 5000 -> ct:fail(opened_timeout) + end, + {ok, Session} = amqp10_client:begin_session_sync(Connection), + Address1 = rabbitmq_amqp_address:exchange(<<"amq.direct">>, <<"k1">>), + Address2 = rabbitmq_amqp_address:exchange(<<"amq.direct">>, <<"k2">>), + %% The 1st link should succeed. + {ok, Link1} = amqp10_client:attach_sender_link_sync(Session, <<"link-1">>, Address1), + ok = wait_for_credit(Link1), + %% Since the 2nd link should fail, we expect our session process to die. + ?assert(is_process_alive(Session)), + {ok, _Link2} = amqp10_client:attach_sender_link(Session, <<"link-2">>, Address2), + eventually(?_assertNot(is_process_alive(Session))), + + flush(test_succeeded), + ok = rpc(Config, application, set_env, [App, Par, Default]). + %% internal %% @@ -5235,7 +6004,7 @@ assert_messages(QNameBin, NumTotalMsgs, NumUnackedMsgs, Config, Node) -> Infos = rpc(Config, Node, rabbit_amqqueue, info, [Q, [messages, messages_unacknowledged]]), lists:sort(Infos) end - ), 500, 5). + ), 500, 10). serial_number_increment(S) -> case S + 1 of @@ -5287,6 +6056,16 @@ ready_messages(QName, Config) ra_name(Q) -> binary_to_atom(<<"%2F_", Q/binary>>). +wait_for_local_member(<<"stream">>, QName, Config) -> + %% If it is a stream we need to wait until there is a local member + %% on the node we want to subscribe from before proceeding. + rabbit_ct_helpers:await_condition( + fun() -> rpc(Config, 0, ?MODULE, has_local_member, + [rabbit_misc:r(<<"/">>, queue, QName)]) + end, 30_000); +wait_for_local_member(_, _, _) -> + ok. + has_local_member(QName) -> case rabbit_amqqueue:lookup(QName) of {ok, Q} -> @@ -5312,8 +6091,8 @@ find_event(Type, Props, Events) when is_list(Props), is_list(Events) -> fun(#event{type = EventType, props = EventProps}) -> Type =:= EventType andalso lists:all( - fun({Key, _Value}) -> - lists:keymember(Key, 1, EventProps) + fun(Prop) -> + lists:member(Prop, EventProps) end, Props) end, Events). diff --git a/deps/rabbit/test/amqp_credit_api_v2_SUITE.erl b/deps/rabbit/test/amqp_credit_api_v2_SUITE.erl index 669eb54348e9..ba465e396fa3 100644 --- a/deps/rabbit/test/amqp_credit_api_v2_SUITE.erl +++ b/deps/rabbit/test/amqp_credit_api_v2_SUITE.erl @@ -48,19 +48,12 @@ end_per_group(_Group, Config) -> rabbit_ct_client_helpers:teardown_steps() ++ rabbit_ct_broker_helpers:teardown_steps()). -init_per_testcase(TestCase, Config) -> - case rabbit_ct_broker_helpers:is_feature_flag_supported(Config, TestCase) of - true -> - ?assertNot(rabbit_ct_broker_helpers:is_feature_flag_enabled(Config, TestCase)), - Config; - false -> - {skip, io_lib:format("feature flag ~s is unsupported", [TestCase])} - end. - -end_per_testcase(_TestCase, Config) -> - Config. credit_api_v2(Config) -> + %% Feature flag rabbitmq_4.0.0 enables credit API v2. + FeatureFlag = 'rabbitmq_4.0.0', + ?assertNot(rabbit_ct_broker_helpers:is_feature_flag_enabled(Config, FeatureFlag)), + CQ = <<"classic queue">>, QQ = <<"quorum queue">>, CQAddr = rabbitmq_amqp_address:queue(CQ), @@ -104,7 +97,7 @@ credit_api_v2(Config) -> ok = amqp10_client:detach_link(QQSender), %% Consume with credit API v1 - CQAttachArgs = #{handle => 300, + CQAttachArgs = #{handle => 100, name => <<"cq receiver 1">>, role => {receiver, #{address => CQAddr, durable => configuration}, self()}, @@ -112,7 +105,7 @@ credit_api_v2(Config) -> rcv_settle_mode => first, filter => #{}}, {ok, CQReceiver1} = amqp10_client:attach_link(Session, CQAttachArgs), - QQAttachArgs = #{handle => 400, + QQAttachArgs = #{handle => 200, name => <<"qq receiver 1">>, role => {receiver, #{address => QQAddr, durable => configuration}, self()}, @@ -124,8 +117,7 @@ credit_api_v2(Config) -> ok = consume_and_accept(10, CQReceiver1), ok = consume_and_accept(10, QQReceiver1), - ?assertEqual(ok, - rabbit_ct_broker_helpers:enable_feature_flag(Config, ?FUNCTION_NAME)), + ?assertEqual(ok, rabbit_ct_broker_helpers:enable_feature_flag(Config, FeatureFlag)), flush(enabled_feature_flag), %% Consume with credit API v2 diff --git a/deps/rabbit/test/amqp_system_SUITE.erl b/deps/rabbit/test/amqp_system_SUITE.erl index 9b3ed61e84a0..e1bf5abea72b 100644 --- a/deps/rabbit/test/amqp_system_SUITE.erl +++ b/deps/rabbit/test/amqp_system_SUITE.erl @@ -34,6 +34,7 @@ groups() -> %% TODO at_most_once, reject, redelivery, + released, routing, invalid_routes, auth_failure, @@ -68,11 +69,13 @@ init_per_group(Group, Config) -> dotnet -> fun build_dotnet_test_project/1; java -> fun build_maven_test_project/1 end, - rabbit_ct_helpers:run_setup_steps(Config1, [ - GroupSetupStep - ] ++ - rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps()). + Config2 = rabbit_ct_helpers:run_setup_steps( + Config1, + [GroupSetupStep] ++ + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()), + ok = rabbit_ct_broker_helpers:enable_feature_flag(Config2, 'rabbitmq_4.0.0'), + Config2. end_per_group(_, Config) -> rabbit_ct_helpers:run_teardown_steps(Config, @@ -115,22 +118,20 @@ build_maven_test_project(Config) -> %% ------------------------------------------------------------------- roundtrip(Config) -> + declare_queue(Config, ?FUNCTION_NAME, "quorum"), run(Config, [{dotnet, "roundtrip"}, {java, "RoundTripTest"}]). streams(Config) -> - _ = rabbit_ct_broker_helpers:enable_feature_flag(Config, - message_containers_store_amqp_v1), - Ch = rabbit_ct_client_helpers:open_channel(Config), - amqp_channel:call(Ch, #'queue.declare'{queue = <<"stream_q2">>, - durable = true, - arguments = [{<<"x-queue-type">>, longstr, "stream"}]}), + declare_queue(Config, ?FUNCTION_NAME, "stream"), run(Config, [{dotnet, "streams"}]). roundtrip_to_amqp_091(Config) -> + declare_queue(Config, ?FUNCTION_NAME, "classic"), run(Config, [{dotnet, "roundtrip_to_amqp_091"}]). default_outcome(Config) -> + declare_queue(Config, ?FUNCTION_NAME, "classic"), run(Config, [{dotnet, "default_outcome"}]). no_routes_is_released(Config) -> @@ -140,28 +141,41 @@ no_routes_is_released(Config) -> run(Config, [{dotnet, "no_routes_is_released"}]). outcomes(Config) -> + declare_queue(Config, ?FUNCTION_NAME, "classic"), run(Config, [{dotnet, "outcomes"}]). fragmentation(Config) -> + declare_queue(Config, ?FUNCTION_NAME, "classic"), run(Config, [{dotnet, "fragmentation"}]). message_annotations(Config) -> + declare_queue(Config, ?FUNCTION_NAME, "classic"), run(Config, [{dotnet, "message_annotations"}]). footer(Config) -> + declare_queue(Config, ?FUNCTION_NAME, "classic"), run(Config, [{dotnet, "footer"}]). data_types(Config) -> + declare_queue(Config, ?FUNCTION_NAME, "classic"), run(Config, [{dotnet, "data_types"}]). reject(Config) -> + declare_queue(Config, ?FUNCTION_NAME, "classic"), run(Config, [{dotnet, "reject"}]). redelivery(Config) -> + declare_queue(Config, ?FUNCTION_NAME, "quorum"), run(Config, [{dotnet, "redelivery"}]). +released(Config) -> + declare_queue(Config, ?FUNCTION_NAME, "quorum"), + run(Config, [{dotnet, "released"}]). + routing(Config) -> Ch = rabbit_ct_client_helpers:open_channel(Config), + amqp_channel:call(Ch, #'queue.declare'{queue = <<"test">>, + durable = true}), amqp_channel:call(Ch, #'queue.declare'{queue = <<"transient_q">>, durable = false}), amqp_channel:call(Ch, #'queue.declare'{queue = <<"durable_q">>, @@ -174,6 +188,18 @@ routing(Config) -> arguments = [{<<"x-queue-type">>, longstr, <<"stream">>}]}), amqp_channel:call(Ch, #'queue.declare'{queue = <<"autodel_q">>, auto_delete = true}), + amqp_channel:call(Ch, #'queue.declare'{queue = <<"fanout_q">>, + durable = false}), + amqp_channel:call(Ch, #'queue.bind'{queue = <<"fanout_q">>, + exchange = <<"amq.fanout">> + }), + amqp_channel:call(Ch, #'queue.declare'{queue = <<"direct_q">>, + durable = false}), + amqp_channel:call(Ch, #'queue.bind'{queue = <<"direct_q">>, + exchange = <<"amq.direct">>, + routing_key = <<"direct_q">> + }), + run(Config, [ {dotnet, "routing"} ]). @@ -227,6 +253,7 @@ run_dotnet_test(Config, Method) -> [ {cd, TestProjectDir} ]), + ct:pal("~s: result ~p", [?FUNCTION_NAME, Ret]), {ok, _} = Ret. run_java_test(Config, Class) -> @@ -239,3 +266,13 @@ run_java_test(Config, Class) -> ], [{cd, TestProjectDir}]), {ok, _} = Ret. + +declare_queue(Config, Name, Type) -> + Ch = rabbit_ct_client_helpers:open_channel(Config), + #'queue.declare_ok'{} = + amqp_channel:call(Ch, #'queue.declare'{queue = atom_to_binary(Name, utf8), + durable = true, + arguments = [{<<"x-queue-type">>, + longstr, Type}]}), + rabbit_ct_client_helpers:close_channel(Ch), + ok. diff --git a/deps/rabbit/test/amqp_system_SUITE_data/fsharp-tests/Program.fs b/deps/rabbit/test/amqp_system_SUITE_data/fsharp-tests/Program.fs index 7ed91f388f70..5a1a0aaa5392 100755 --- a/deps/rabbit/test/amqp_system_SUITE_data/fsharp-tests/Program.fs +++ b/deps/rabbit/test/amqp_system_SUITE_data/fsharp-tests/Program.fs @@ -48,8 +48,13 @@ module AmqpClient = let s = Session c { Conn = c; Session = s } - let connectWithOpen uri opn = - let c = Connection(Address uri, null, opn, null) + let connectAnon uri = + let c = Connection(Address uri, SaslProfile.Anonymous, null, null) + let s = Session c + { Conn = c; Session = s } + + let connectAnonWithOpen uri opn = + let c = Connection(Address uri, SaslProfile.Anonymous, opn, null) let s = Session c { Conn = c; Session = s } @@ -114,7 +119,7 @@ module Test = ] let testOutcome uri (attach: Attach) (cond: string) = - use ac = connect uri + use ac = connectAnon uri let trySet (mre: AutoResetEvent) = try mre.Set() |> ignore with _ -> () @@ -135,8 +140,8 @@ module Test = let no_routes_is_released uri = // tests that a message sent to an exchange that resolves no routes for the // binding key returns the Released outcome, rather than Accepted - use ac = connect uri - let address = "/exchange/no_routes_is_released" + use ac = connectAnon uri + let address = "/exchanges/no_routes_is_released" let sender = SenderLink(ac.Session, "released-sender", address) let trySet (mre: AutoResetEvent) = try mre.Set() |> ignore with _ -> () @@ -160,8 +165,8 @@ module Test = () let roundtrip uri = - use c = connect uri - let sender, receiver = senderReceiver c "test" "roundtrip-q" + use c = connectAnon uri + let sender, receiver = senderReceiver c "test" "/queues/roundtrip" for body in sampleTypes do let corr = "correlation" new Message(body, @@ -175,9 +180,9 @@ module Test = () let streams uri = - use c = connect uri + use c = connectAnon uri let name = "streams-test" - let address = "/amq/queue/stream_q2" + let address = "/queues/streams" let sender = SenderLink(c.Session, name + "-sender" , address) //for body in sampleTypes do let body = "hi"B :> obj @@ -216,10 +221,11 @@ module Test = open RabbitMQ.Client let roundtrip_to_amqp_091 uri = - use c = connect uri - let q = "roundtrip-091-q" + use c = connectAnon uri + let q = "roundtrip_to_amqp_091" + let target = "/queues/roundtrip_to_amqp_091" let corr = "correlation" - let sender = SenderLink(c.Session, q + "-sender" , q) + let sender = SenderLink(c.Session, q + "-sender" , target) new Message("hi"B, Header = Header(), Properties = new Properties(CorrelationId = corr)) @@ -242,13 +248,13 @@ module Test = assertEqual id corr () - let defaultOutcome uri = + let default_outcome uri = for (defOut, cond, defObj) in ["amqp:accepted:list", null, Accepted() :> Outcome "amqp:rejected:list", null, Rejected() :> Outcome "amqp:released:list", null, Released() :> Outcome] do - let source = new Source(Address = "default_outcome_q", + let source = new Source(Address = "/queues/default_outcome", DefaultOutcome = defObj) let attach = new Attach (Source = source, Target = Target()) @@ -263,7 +269,7 @@ module Test = "amqp:modified:list", null "amqp:madeup:list", "amqp:not-implemented"] do - let source = new Source(Address = "outcomes_q", + let source = new Source(Address = "/queues/outcomes", Outcomes = [| Symbol outcome |]) let attach = new Attach (Source = source, Target = Target()) @@ -281,21 +287,21 @@ module Test = let opn = Open(ContainerId = Guid.NewGuid().ToString(), HostName = addr.Host, ChannelMax = 256us, MaxFrameSize = frameSize) - use c = connectWithOpen uri opn - let sender, receiver = senderReceiver c "test" "framentation-q" + use c = connectAnonWithOpen uri opn + let sender, receiver = senderReceiver c "test" "/queues/fragmentation" let m = new Message(String.replicate size "a") sender.Send m let m' = receive receiver assertEqual (m.Body) (m'.Body) - let messageAnnotations uri = - use c = connect uri - let sender, receiver = senderReceiver c "test" "annotations-q" + let message_annotations uri = + use c = connectAnon uri + let sender, receiver = senderReceiver c "test" "/queues/message_annotations" let ann = MessageAnnotations() - let k1 = Symbol "key1" - let k2 = Symbol "key2" - ann.[Symbol "key1"] <- "value1" - ann.[Symbol "key2"] <- "value2" + let k1 = Symbol "x-key1" + let k2 = Symbol "x-key2" + ann.[Symbol "x-key1"] <- "value1" + ann.[Symbol "x-key2"] <- "value2" let m = new Message("testing annotations", MessageAnnotations = ann) sender.Send m let m' = receive receiver @@ -308,8 +314,8 @@ module Test = assertTrue (m.MessageAnnotations.[k2] = m'.MessageAnnotations.[k2]) let footer uri = - use c = connect uri - let sender, receiver = senderReceiver c "test" "footer-q" + use c = connectAnon uri + let sender, receiver = senderReceiver c "test" "/queues/footer" let footer = Footer() let k1 = Symbol "key1" let k2 = Symbol "key2" @@ -325,9 +331,9 @@ module Test = assertTrue (m.Footer.[k1] = m'.Footer.[k1]) assertTrue (m.Footer.[k2] = m'.Footer.[k2]) - let datatypes uri = - use c = connect uri - let sender, receiver = senderReceiver c "test" "datatypes-q" + let data_types uri = + use c = connectAnon uri + let sender, receiver = senderReceiver c "test" "/queues/data_types" let aSeq = amqpSequence sampleTypes (new Message(aSeq)) |> sender.Send let rtd = receive receiver @@ -336,87 +342,81 @@ module Test = List.exists ((=) a) sampleTypes |> assertTrue let reject uri = - use c = connect uri - let sender, receiver = senderReceiver c "test" "reject-q" + use c = connectAnon uri + let sender, receiver = senderReceiver c "test" "/queues/reject" new Message "testing reject" |> sender.Send let m = receiver.Receive() receiver.Reject(m) assertEqual null (receiver.Receive(TimeSpan.FromMilliseconds 100.)) let redelivery uri = - use c = connect uri - let sender, receiver = senderReceiver c "test" "redelivery-q" + use c = connectAnon uri + let sender, receiver = senderReceiver c "test" "/queues/redelivery" new Message "testing redelivery" |> sender.Send let m = receiver.Receive() assertTrue (m.Header.FirstAcquirer) - receiver.Close() c.Session.Close() + let session = Session(c.Conn) - let receiver = ReceiverLink(session, "test-receiver", "redelivery-q") + let receiver = ReceiverLink(session, "test-receiver", "/queues/redelivery") let m' = receive receiver assertEqual (m.Body :?> string) (m'.Body :?> string) assertTrue (not m'.Header.FirstAcquirer) + assertEqual 1u (m'.Header.DeliveryCount) assertEqual null (receiver.Receive(TimeSpan.FromMilliseconds 100.)) session.Close() + let released uri = + use c = connectAnon uri + let sender, receiver = senderReceiver c "test" "/queues/released" + new Message "testing released" |> sender.Send + let m = receiver.Receive() + assertTrue (m.Header.FirstAcquirer) + receiver.SetCredit(0, false) + receiver.Release m + + let m' = receive receiver + assertEqual (m.Body :?> string) (m'.Body :?> string) + assertTrue (not m'.Header.FirstAcquirer) + assertEqual 0u (m'.Header.DeliveryCount) + assertEqual null (receiver.Receive(TimeSpan.FromMilliseconds 100.)) + c.Session.Close() + let routing uri = - for target, source, routingKey, succeed in - ["/queue/test", "test", "", true - "test", "/queue/test", "", true - "test", "test", "", true - - "/topic/a.b.c.d", "/topic/#.c.*", "", true - "/exchange/amq.topic", "/topic/#.c.*", "a.b.c.d", true - "/topic/w.x.y.z", "/exchange/amq.topic/#.y.*", "", true - "/exchange/amq.topic", "/exchange/amq.topic/#.y.*", "w.x.y.z", true - - "/exchange/amq.fanout", "/exchange/amq.fanout/", "", true - "/exchange/amq.direct", "/exchange/amq.direct/", "", true - "/exchange/amq.direct", "/exchange/amq.direct/a", "a", true - "/queue", "/queue/b", "b", true - - (* FIXME: The following three tests rely on the queue "test" - * created by previous tests in this function. *) - "/queue/test", "/amq/queue/test", "", true - "/amq/queue/test", "/queue/test", "", true - "/amq/queue/test", "/amq/queue/test", "", true - - (* The following tests verify that a queue created out-of-band - * in AMQP is reachable from the AMQP 1.0 world. Queues are created - * from the common_test suite. *) - "/amq/queue/transient_q", "/amq/queue/transient_q", "", true - "/amq/queue/durable_q", "/amq/queue/durable_q", "", true - "/amq/queue/quorum_q", "/amq/queue/quorum_q", "", true - "/amq/queue/stream_q", "/amq/queue/stream_q", "", true - "/amq/queue/autodel_q", "/amq/queue/autodel_q", "", true] do + for target, source, toProp in + [ + "/queues/test", "/queues/test", "" + "/exchanges/amq.fanout", "/queues/fanout_q", "" + "/exchanges/amq.direct/direct_q", "/queues/direct_q", "" + null, "/queues/direct_q", "/exchanges/amq.direct/direct_q" + "/queues/transient_q", "/queues/transient_q", "" + "/queues/durable_q", "/queues/durable_q", "" + "/queues/quorum_q", "/queues/quorum_q", "" + "/queues/stream_q", "/queues/stream_q", "" + "/queues/autodel_q", "/queues/autodel_q", ""] do let rnd = Random() - use c = connect uri + use c = connectAnon uri let sender = SenderLink(c.Session, "test-sender", target) let receiver = ReceiverLink(c.Session, "test-receiver", source) receiver.SetCredit(100, true) - use m = new Message(rnd.Next(10000), Properties = Properties(Subject = routingKey)) + use m = new Message(rnd.Next(10000), + Properties = Properties(To = toProp)) sender.Send m - (* printfn "%s %s %s %A" target source routingKey succeed *) - - if succeed then - let m' = receiver.Receive(TimeSpan.FromMilliseconds 3000.) - receiver.Accept m' - assertTrue (m' <> null) - assertEqual (m.Body :?> int) (m'.Body :?> int) - else - use m' = receiver.Receive(TimeSpan.FromMilliseconds 100.) - assertEqual null m' - + (* printfn "%s %s %s %A" target source routingKey *) + let m' = receiver.Receive(TimeSpan.FromMilliseconds 3000.) + receiver.Accept m' + assertTrue (m' <> null) + assertEqual (m.Body :?> int) (m'.Body :?> int) let invalidRoutes uri = for dest, cond in - ["/exchange/missing", "amqp:not-found" + ["/exchanges/missing", "amqp:not-found" "/fruit/orange", "amqp:invalid-field"] do - use ac = connect uri + use ac = connectAnon uri let trySet (mre: AutoResetEvent) = try mre.Set() |> ignore with _ -> () @@ -454,7 +454,7 @@ module Test = let u = Uri uri let uri = sprintf "amqp://access_failure:boo@%s:%i" u.Host u.Port use ac = connect uri - let dest = "/amq/queue/test" + let dest = "/queues/test" ac.Session.add_Closed ( new ClosedCallback (fun _ err -> printfn "session err %A" err.Condition )) @@ -471,7 +471,7 @@ module Test = let u = Uri uri let uri = sprintf "amqp://access_failure:boo@%s:%i" u.Host u.Port use ac = connect uri - let dest = "/amq/queue/test" + let dest = "/queues/test" let receiver = ReceiverLink(ac.Session, "test-receiver", dest) receiver.Close() failwith "expected exception not received" @@ -485,7 +485,7 @@ module Test = let u = Uri uri let uri = sprintf "amqp://access_failure_not_allowed:boo@%s:%i" u.Host u.Port use ac = connect uri - let dest = "/amq/queue/test" + let dest = "/queues/test" let receiver = ReceiverLink(ac.Session, "test-receiver", dest) receiver.Close() failwith "expected exception not received" @@ -521,10 +521,10 @@ let main argv = roundtrip_to_amqp_091 uri 0 | [AsLower "data_types"; uri] -> - datatypes uri + data_types uri 0 | [AsLower "default_outcome"; uri] -> - defaultOutcome uri + default_outcome uri 0 | [AsLower "outcomes"; uri] -> outcomes uri @@ -533,7 +533,7 @@ let main argv = fragmentation uri 0 | [AsLower "message_annotations"; uri] -> - messageAnnotations uri + message_annotations uri 0 | [AsLower "footer"; uri] -> footer uri @@ -544,6 +544,9 @@ let main argv = | [AsLower "redelivery"; uri] -> redelivery uri 0 + | [AsLower "released"; uri] -> + released uri + 0 | [AsLower "routing"; uri] -> routing uri 0 diff --git a/deps/rabbit/test/amqp_system_SUITE_data/fsharp-tests/fsharp-tests.fsproj b/deps/rabbit/test/amqp_system_SUITE_data/fsharp-tests/fsharp-tests.fsproj index bd832eaac890..5c576b399c91 100755 --- a/deps/rabbit/test/amqp_system_SUITE_data/fsharp-tests/fsharp-tests.fsproj +++ b/deps/rabbit/test/amqp_system_SUITE_data/fsharp-tests/fsharp-tests.fsproj @@ -1,7 +1,7 @@  Exe - net6.0 + net8.0 diff --git a/deps/rabbit/test/backing_queue_SUITE.erl b/deps/rabbit/test/backing_queue_SUITE.erl index 10129201b9dc..2b4ce444c991 100644 --- a/deps/rabbit/test/backing_queue_SUITE.erl +++ b/deps/rabbit/test/backing_queue_SUITE.erl @@ -517,6 +517,7 @@ msg_store_file_scan1(Config) -> Expected = gen_result(Blocks), Path = gen_msg_file(Config, Blocks), Result = rabbit_msg_store:scan_file_for_valid_messages(Path), + ok = file:delete(Path), case Result of Expected -> ok; _ -> {expected, Expected, got, Result} diff --git a/deps/rabbit/test/classic_queue_SUITE.erl b/deps/rabbit/test/classic_queue_SUITE.erl index 09c427f67664..5b54d7150fb0 100644 --- a/deps/rabbit/test/classic_queue_SUITE.erl +++ b/deps/rabbit/test/classic_queue_SUITE.erl @@ -61,7 +61,7 @@ end_per_group(_, Config) -> rabbit_ct_broker_helpers:teardown_steps()). init_per_testcase(T, Config) -> - case rabbit_ct_broker_helpers:enable_feature_flag(Config, classic_queue_leader_locator) of + case rabbit_ct_broker_helpers:enable_feature_flag(Config, 'rabbitmq_4.0.0') of ok -> rabbit_ct_helpers:testcase_started(Config, T); Skip -> diff --git a/deps/rabbit/test/cli_forget_cluster_node_SUITE.erl b/deps/rabbit/test/cli_forget_cluster_node_SUITE.erl index 6a8293c66409..8ceb2825ea30 100644 --- a/deps/rabbit/test/cli_forget_cluster_node_SUITE.erl +++ b/deps/rabbit/test/cli_forget_cluster_node_SUITE.erl @@ -12,6 +12,7 @@ -include_lib("eunit/include/eunit.hrl"). -include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl"). +-compile(nowarn_export_all). -compile(export_all). -import(clustering_utils, [ @@ -36,7 +37,8 @@ groups() -> forget_cluster_node_with_all_last_streams, forget_cluster_node_with_quorum_queues_and_streams, forget_cluster_node_with_one_last_quorum_member_and_streams, - forget_cluster_node_with_one_last_stream_and_quorum_queues + forget_cluster_node_with_one_last_stream_and_quorum_queues, + forget_cluster_node_with_one_classic_queue ]} ]. @@ -353,6 +355,30 @@ forget_cluster_node_with_one_last_stream_and_quorum_queues(Config) -> ?awaitMatch(Members when length(Members) == 2, get_quorum_members(Rabbit, QQ1), 30000), ?awaitMatch(Members when length(Members) == 2, get_quorum_members(Rabbit, QQ2), 30000). +forget_cluster_node_with_one_classic_queue(Config) -> + [Rabbit, Hare, Bunny] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + assert_clustered([Rabbit, Hare, Bunny]), + + Ch = rabbit_ct_client_helpers:open_channel(Config, Bunny), + CQ1 = <<"classic-queue-1">>, + declare(Ch, CQ1, [{<<"x-queue-type">>, longstr, <<"classic">>}]), + + ?awaitMatch([_], rabbit_ct_broker_helpers:rabbitmqctl_list( + Config, Rabbit, + ["list_queues", "name", "--no-table-headers"]), + 30000), + + ?assertEqual(ok, rabbit_control_helper:command(stop_app, Bunny)), + ?assertEqual(ok, forget_cluster_node(Rabbit, Bunny)), + + assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Rabbit, Hare]}, + [Rabbit, Hare]), + ?awaitMatch([], rabbit_ct_broker_helpers:rabbitmqctl_list( + Config, Rabbit, + ["list_queues", "name", "--no-table-headers"]), + 30000). + forget_cluster_node(Node, Removee) -> rabbit_control_helper:command(forget_cluster_node, Node, [atom_to_list(Removee)], []). diff --git a/deps/rabbit/test/cluster_limit_SUITE.erl b/deps/rabbit/test/cluster_limit_SUITE.erl index c8aa31614587..22d5c24e0d65 100644 --- a/deps/rabbit/test/cluster_limit_SUITE.erl +++ b/deps/rabbit/test/cluster_limit_SUITE.erl @@ -54,8 +54,7 @@ init_per_group(Group, Config) -> [{rmq_nodes_count, ClusterSize}, {rmq_nodename_suffix, Group}, {tcp_ports_base}]), - Config1b = rabbit_ct_helpers:set_config(Config1, [{net_ticktime, 10}]), - rabbit_ct_helpers:run_steps(Config1b, + rabbit_ct_helpers:run_steps(Config1, [fun merge_app_env/1 ] ++ rabbit_ct_broker_helpers:setup_steps()) end. diff --git a/deps/rabbit/test/cluster_minority_SUITE.erl b/deps/rabbit/test/cluster_minority_SUITE.erl index a6a8f4759ba4..93ce3b72f29c 100644 --- a/deps/rabbit/test/cluster_minority_SUITE.erl +++ b/deps/rabbit/test/cluster_minority_SUITE.erl @@ -28,6 +28,7 @@ groups() -> declare_binding, delete_binding, declare_queue, + delete_queue, publish_to_exchange, publish_and_consume_to_local_classic_queue, consume_from_queue, @@ -97,6 +98,16 @@ init_per_group(Group, Config0) when Group == client_operations; %% To be used in consume_from_queue #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = <<"test-queue">>, arguments = [{<<"x-queue-type">>, longstr, <<"classic">>}]}), + %% To be used in consume_from_queue + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = <<"test-queue-delete-classic">>, + durable = true, + arguments = [{<<"x-queue-type">>, longstr, <<"classic">>}]}), + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = <<"test-queue-delete-stream">>, + durable = true, + arguments = [{<<"x-queue-type">>, longstr, <<"stream">>}]}), + #'queue.declare_ok'{} = amqp_channel:call(Ch, #'queue.declare'{queue = <<"test-queue-delete-quorum">>, + durable = true, + arguments = [{<<"x-queue-type">>, longstr, <<"quorum">>}]}), %% To be used in delete_binding #'exchange.bind_ok'{} = amqp_channel:call(Ch, #'exchange.bind'{destination = <<"amq.fanout">>, source = <<"amq.direct">>, @@ -188,6 +199,22 @@ declare_queue(Config) -> ?assertExit({{shutdown, {connection_closing, {server_initiated_close, 541, _}}}, _}, amqp_channel:call(Ch, #'queue.declare'{queue = <<"test-queue-2">>})). +delete_queue(Config) -> + [A | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + Conn1 = rabbit_ct_client_helpers:open_unmanaged_connection(Config, A), + {ok, Ch1} = amqp_connection:open_channel(Conn1), + ?assertExit({{shutdown, {connection_closing, {server_initiated_close, 541, _}}}, _}, + amqp_channel:call(Ch1, #'queue.delete'{queue = <<"test-queue-delete-classic">>})), + Conn2 = rabbit_ct_client_helpers:open_unmanaged_connection(Config, A), + {ok, Ch2} = amqp_connection:open_channel(Conn2), + ?assertExit({{shutdown, {connection_closing, {server_initiated_close, 541, _}}}, _}, + amqp_channel:call(Ch2, #'queue.delete'{queue = <<"test-queue-delete-stream">>})), + Conn3 = rabbit_ct_client_helpers:open_unmanaged_connection(Config, A), + {ok, Ch3} = amqp_connection:open_channel(Conn3), + ?assertExit({{shutdown, {connection_closing, {server_initiated_close, 541, _}}}, _}, + amqp_channel:call(Ch3, #'queue.delete'{queue = <<"test-queue-delete-quorum">>})), + ok. + publish_to_exchange(Config) -> [A | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), {_, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, A), @@ -218,7 +245,11 @@ update_vhost(Config) -> [<<"/">>, [carrots], <<"user">>])). delete_vhost(Config) -> - ?assertMatch({'EXIT', _}, rabbit_ct_broker_helpers:delete_vhost(Config, <<"vhost1">>)). + ?assertError( + {erpc, timeout}, + rabbit_ct_broker_helpers:rpc( + Config, 0, + rabbit_vhost, delete, [<<"vhost1">>, <<"acting-user">>], 1_000)). add_user(Config) -> ?assertMatch({error, timeout}, diff --git a/deps/rabbit/test/cluster_upgrade_SUITE.erl b/deps/rabbit/test/cluster_upgrade_SUITE.erl new file mode 100644 index 000000000000..2b78f119c904 --- /dev/null +++ b/deps/rabbit/test/cluster_upgrade_SUITE.erl @@ -0,0 +1,158 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(cluster_upgrade_SUITE). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). +-include_lib("common_test/include/ct.hrl"). + +-compile([export_all, nowarn_export_all]). + +all() -> + [ + {group, all_tests} + ]. + +groups() -> + [ + {all_tests, [], all_tests()} + ]. + +all_tests() -> + [ + queue_upgrade + ]. + +%% ------------------------------------------------------------------- +%% Test suite setup/teardown. +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + case rabbit_ct_helpers:is_mixed_versions() of + true -> + rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:run_setup_steps(Config); + false -> + {skip, "cluster upgrade tests must be run in mixed versions " + "testing only"} + end. + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). + +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(Testcase, Config) -> + Config1 = rabbit_ct_helpers:set_config(Config, [ + {rmq_nodename_suffix, Testcase}, + {rmq_nodes_count, 3}, + {force_secondary_umbrella, true} + ]), + Config2 = rabbit_ct_helpers:run_steps(Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()), + rabbit_ct_helpers:testcase_started(Config2, Testcase). + +end_per_testcase(Testcase, Config) -> + Config1 = rabbit_ct_helpers:run_steps(Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()), + rabbit_ct_helpers:testcase_finished(Config1, Testcase). + +%% --------------------------------------------------------------------------- +%% Test Cases +%% --------------------------------------------------------------------------- + +queue_upgrade(Config) -> + ok = print_cluster_versions(Config), + + %% Declare some resources before upgrading. + {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config), + ClassicQName = <<"classic-q">>, + QQName = <<"quorum-q">>, + StreamQName = <<"stream-q">>, + declare(Ch, ClassicQName, [{<<"x-queue-type">>, longstr, <<"classic">>}]), + declare(Ch, QQName, [{<<"x-queue-type">>, longstr, <<"quorum">>}]), + declare(Ch, StreamQName, [{<<"x-queue-type">>, longstr, <<"stream">>}]), + [begin + #'queue.bind_ok'{} = amqp_channel:call( + Ch, + #'queue.bind'{queue = Name, + exchange = <<"amq.fanout">>, + routing_key = Name}) + end || Name <- [ClassicQName, QQName, StreamQName]], + Msgs = [<<"msg">>, <<"msg">>, <<"msg">>], + publish_confirm(Ch, <<"amq.fanout">>, <<>>, Msgs), + ok = rabbit_ct_client_helpers:close_connection_and_channel(Conn, Ch), + + %% Restart the servers + Config1 = upgrade_cluster(Config), + ok = print_cluster_versions(Config1), + + %% Check that the resources are still there + queue_utils:wait_for_messages(Config, [[ClassicQName, <<"3">>, <<"3">>, <<"0">>], + [QQName, <<"3">>, <<"3">>, <<"0">>], + [StreamQName, <<"3">>, <<"3">>, <<"0">>]]), + + ok. + +%% ---------------------------------------------------------------------------- +%% Internal utils +%% ---------------------------------------------------------------------------- + +declare(Ch, Q, Args) -> + #'queue.declare_ok'{} = amqp_channel:call( + Ch, #'queue.declare'{queue = Q, + durable = true, + auto_delete = false, + arguments = Args}). + +publish(Ch, X, RK, Msg) -> + ok = amqp_channel:cast(Ch, + #'basic.publish'{exchange = X, + routing_key = RK}, + #amqp_msg{props = #'P_basic'{delivery_mode = 2}, + payload = Msg}). + +publish_confirm(Ch, X, RK, Msgs) -> + #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}), + amqp_channel:register_confirm_handler(Ch, self()), + [publish(Ch, X, RK, Msg) || Msg <- Msgs], + amqp_channel:wait_for_confirms(Ch, 5). + +cluster_members(Config) -> + rabbit_ct_broker_helpers:get_node_configs(Config, nodename). + +upgrade_cluster(Config) -> + Cluster = cluster_members(Config), + ct:pal(?LOW_IMPORTANCE, "Stopping cluster ~p", [Cluster]), + [ok = rabbit_ct_broker_helpers:stop_node(Config, N) + || N <- Cluster], + ct:pal(?LOW_IMPORTANCE, "Restarting cluster ~p", [Cluster]), + Config1 = rabbit_ct_helpers:set_config( + Config, {force_secondary_umbrella, false}), + [ok = rabbit_ct_broker_helpers:async_start_node(Config1, N) + || N <- Cluster], + [ok = rabbit_ct_broker_helpers:wait_for_async_start_node(N) + || N <- Cluster], + Config1. + +print_cluster_versions(Config) -> + Cluster = cluster_members(Config), + Versions = [begin + Version = rabbit_ct_broker_helpers:rpc( + Config, N, + rabbit, product_version, []), + {N, Version} + end || N <- Cluster], + ct:pal("Cluster versions: ~p", [Versions]), + ok. diff --git a/deps/rabbit/test/config_schema_SUITE_data/rabbit.snippets b/deps/rabbit/test/config_schema_SUITE_data/rabbit.snippets index 247dd0f92f14..ec706686466b 100644 --- a/deps/rabbit/test/config_schema_SUITE_data/rabbit.snippets +++ b/deps/rabbit/test/config_schema_SUITE_data/rabbit.snippets @@ -182,7 +182,7 @@ ssl_options.fail_if_no_peer_cert = true", [{rabbit, [{default_users, [ {<<"a">>, [{<<"vhost_pattern">>, "banana"}, {<<"tags">>, [administrator, operator]}, - {<<"password">>, "SECRET"}, + {<<"password">>, <<"SECRET">>}, {<<"read">>, ".*"}]}]}]}], []}, @@ -220,6 +220,8 @@ ssl_options.fail_if_no_peer_cert = true", {default_user_settings, "default_user = guest default_pass = guest +anonymous_login_user = guest +anonymous_login_pass = guest default_user_tags.administrator = true default_permissions.configure = .* default_permissions.read = .* @@ -227,9 +229,30 @@ default_permissions.write = .*", [{rabbit, [{default_user,<<"guest">>}, {default_pass,<<"guest">>}, + {anonymous_login_user,<<"guest">>}, + {anonymous_login_pass,<<"guest">>}, {default_user_tags,[administrator]}, {default_permissions,[<<".*">>,<<".*">>,<<".*">>]}]}], []}, + {anonymous_login_user, + "anonymous_login_user = none", + [{rabbit, + [{anonymous_login_user, none}]}], + []}, + + {auth_mechanisms_ordered, + "auth_mechanisms.1 = PLAIN +auth_mechanisms.2 = AMQPLAIN +auth_mechanisms.3 = ANONYMOUS", + [], + [{rabbit, + %% We expect the mechanisms in the order as declared. + [{auth_mechanisms, ['PLAIN', 'AMQPLAIN', 'ANONYMOUS']}] + }], + [], + nosort + }, + {cluster_formation, "cluster_formation.peer_discovery_backend = rabbit_peer_discovery_classic_config cluster_formation.classic_config.nodes.peer1 = rabbit@hostname1 @@ -349,6 +372,7 @@ tcp_listen_options.exit_on_close = false", vm_memory_high_watermark.relative = 0.4", [{rabbit,[{vm_memory_high_watermark,{absolute,1073741824}}]}], []}, + %% DEPRECATED; just for backwards compatibility {vm_memory_watermark_paging_ratio, "vm_memory_high_watermark_paging_ratio = 0.75 vm_memory_high_watermark.relative = 0.4", @@ -356,6 +380,7 @@ tcp_listen_options.exit_on_close = false", [{vm_memory_high_watermark_paging_ratio,0.75}, {vm_memory_high_watermark,0.4}]}], []}, + %% DEPRECATED; just for backwards compatibility {memory_monitor_interval, "memory_monitor_interval = 5000", [{rabbit, [{memory_monitor_interval, 5000}]}], @@ -404,6 +429,22 @@ tcp_listen_options.exit_on_close = false", "channel_max_per_node = infinity", [{rabbit,[{channel_max_per_node, infinity}]}], []}, + {session_max_per_connection_1, + "session_max_per_connection = 1", + [{rabbit,[{session_max_per_connection, 1}]}], + []}, + {session_max_per_connection, + "session_max_per_connection = 65000", + [{rabbit,[{session_max_per_connection, 65_000}]}], + []}, + {link_max_per_session_1, + "link_max_per_session = 1", + [{rabbit,[{link_max_per_session, 1}]}], + []}, + {link_max_per_session, + "link_max_per_session = 4200000000", + [{rabbit,[{link_max_per_session, 4_200_000_000}]}], + []}, {consumer_max_per_channel, "consumer_max_per_channel = 16", [{rabbit,[{consumer_max_per_channel, 16}]}], @@ -510,7 +551,7 @@ tcp_listen_options.exit_on_close = false", [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"}, {certfile,"test/config_schema_SUITE_data/certs/cert.pem"}, {keyfile,"test/config_schema_SUITE_data/certs/key.pem"}, - {password,"t0p$3kRe7"}]}]}], + {password,<<"t0p$3kRe7">>}]}]}], []}, {ssl_options_tls_ver_old, "listeners.ssl.1 = 5671 @@ -747,22 +788,6 @@ tcp_listen_options.exit_on_close = false", [{rabbit,[{tcp_listen_options,[{linger,{false,100}}]}]}], []}, - {cluster_formation_randomized_startup_delay_both_values, - "cluster_formation.randomized_startup_delay_range.min = 10 - cluster_formation.randomized_startup_delay_range.max = 30", - [], - []}, - - {cluster_formation_randomized_startup_delay_min_only, - "cluster_formation.randomized_startup_delay_range.min = 10", - [], - []}, - - {cluster_formation_randomized_startup_delay_max_only, - "cluster_formation.randomized_startup_delay_range.max = 30", - [], - []}, - {cluster_formation_internal_lock_retries, "cluster_formation.internal_lock_retries = 10", [{rabbit,[{cluster_formation,[{internal_lock_retries,10}]}]}], diff --git a/deps/rabbit/test/dead_lettering_SUITE.erl b/deps/rabbit/test/dead_lettering_SUITE.erl index 853f8fa59c64..6d0ad63b13d8 100644 --- a/deps/rabbit/test/dead_lettering_SUITE.erl +++ b/deps/rabbit/test/dead_lettering_SUITE.erl @@ -4,7 +4,7 @@ %% %% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -%% For the full spec see: https://www.rabbitmq.com/dlx.html +%% For the full spec see: https://www.rabbitmq.com/docs/dlx %% -module(dead_lettering_SUITE). diff --git a/deps/rabbit/test/deprecated_features_SUITE.erl b/deps/rabbit/test/deprecated_features_SUITE.erl index 6d8ead9d371a..3f4ea21eba8c 100644 --- a/deps/rabbit/test/deprecated_features_SUITE.erl +++ b/deps/rabbit/test/deprecated_features_SUITE.erl @@ -85,9 +85,7 @@ groups() -> init_per_suite(Config) -> rabbit_ct_helpers:log_environment(), logger:set_primary_config(level, debug), - rabbit_ct_helpers:run_setup_steps( - Config, - [fun rabbit_ct_helpers:redirect_logger_to_ct_logs/1]). + rabbit_ct_helpers:run_setup_steps(Config, []). end_per_suite(Config) -> Config. diff --git a/deps/rabbit/test/disconnect_detected_during_alarm_SUITE.erl b/deps/rabbit/test/disconnect_detected_during_alarm_SUITE.erl index b44c6de1440f..92bf9aedd8cc 100644 --- a/deps/rabbit/test/disconnect_detected_during_alarm_SUITE.erl +++ b/deps/rabbit/test/disconnect_detected_during_alarm_SUITE.erl @@ -96,7 +96,7 @@ disconnect_detected_during_alarm(Config) -> ListConnections = fun() -> - rpc:call(A, rabbit_networking, connection_info_all, []) + rpc:call(A, rabbit_networking, connection_info_all, [[state]]) end, %% We've already disconnected, but blocked connection still should still linger on. diff --git a/deps/rabbit/test/dynamic_qq_SUITE.erl b/deps/rabbit/test/dynamic_qq_SUITE.erl index e13237703fa8..e87f51c79c46 100644 --- a/deps/rabbit/test/dynamic_qq_SUITE.erl +++ b/deps/rabbit/test/dynamic_qq_SUITE.erl @@ -28,7 +28,7 @@ groups() -> {cluster_size_3, [], [ vhost_deletion, quorum_unaffected_after_vhost_failure, - recover_follower_after_standalone_restart, + forget_cluster_node, force_delete_if_no_consensus, takeover_on_failure, takeover_on_shutdown @@ -219,7 +219,7 @@ quorum_unaffected_after_vhost_failure(Config) -> end, 60000). -recover_follower_after_standalone_restart(Config) -> +forget_cluster_node(Config) -> %% Tests that quorum queues shrink when forget_cluster_node %% operations are issues. [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), @@ -243,10 +243,10 @@ recover_follower_after_standalone_restart(Config) -> rabbit_ct_client_helpers:close_channel(Ch), %% Restart one follower - forget_cluster_node(Config, B, C), - wait_for_messages_ready([B], Name, 15), - forget_cluster_node(Config, B, A), - wait_for_messages_ready([B], Name, 15), + forget_cluster_node(Config, C, B), + wait_for_messages_ready([C], Name, 15), + forget_cluster_node(Config, C, A), + wait_for_messages_ready([C], Name, 15), ok. diff --git a/deps/rabbit/test/feature_flags_SUITE.erl b/deps/rabbit/test/feature_flags_SUITE.erl index 55a469209202..cf1ff3e2e7eb 100644 --- a/deps/rabbit/test/feature_flags_SUITE.erl +++ b/deps/rabbit/test/feature_flags_SUITE.erl @@ -64,6 +64,7 @@ groups() -> [ enable_feature_flag_in_a_healthy_situation, enable_unsupported_feature_flag_in_a_healthy_situation, + enable_feature_flag_when_ff_file_is_unwritable, required_feature_flag_enabled_by_default, required_plugin_feature_flag_enabled_by_default, required_plugin_feature_flag_enabled_after_activation, @@ -73,6 +74,7 @@ groups() -> [ enable_feature_flag_in_a_healthy_situation, enable_unsupported_feature_flag_in_a_healthy_situation, + enable_feature_flag_when_ff_file_is_unwritable, enable_feature_flag_with_a_network_partition, mark_feature_flag_as_enabled_with_a_network_partition, required_feature_flag_enabled_by_default, @@ -122,9 +124,7 @@ end_per_suite(Config) -> init_per_group(registry, Config) -> logger:set_primary_config(level, debug), - rabbit_ct_helpers:run_steps( - Config, - [fun rabbit_ct_helpers:redirect_logger_to_ct_logs/1]); + rabbit_ct_helpers:run_steps(Config, []); init_per_group(feature_flags_v2, Config) -> %% `feature_flags_v2' is now required and won't work in mixed-version %% clusters if the other version doesn't support it. @@ -655,6 +655,7 @@ enable_unsupported_feature_flag_in_a_healthy_situation(Config) -> False, is_feature_flag_enabled(Config, FeatureName)). +%% This test case must run as an unprivileged user. enable_feature_flag_when_ff_file_is_unwritable(Config) -> Supported = rabbit_ct_broker_helpers:is_feature_flag_supported( Config, ff_from_testsuite), diff --git a/deps/rabbit/test/feature_flags_v2_SUITE.erl b/deps/rabbit/test/feature_flags_v2_SUITE.erl index 8678d7a2d877..534c5cbdd651 100644 --- a/deps/rabbit/test/feature_flags_v2_SUITE.erl +++ b/deps/rabbit/test/feature_flags_v2_SUITE.erl @@ -49,6 +49,7 @@ failed_enable_feature_flag_with_post_enable/1, have_required_feature_flag_in_cluster_and_add_member_with_it_disabled/1, have_required_feature_flag_in_cluster_and_add_member_without_it/1, + have_unknown_feature_flag_in_cluster_and_add_member_with_it_enabled/1, error_during_migration_after_initial_success/1, controller_waits_for_own_task_to_finish_before_exiting/1, controller_waits_for_remote_task_to_finish_before_exiting/1 @@ -98,6 +99,7 @@ groups() -> failed_enable_feature_flag_with_post_enable, have_required_feature_flag_in_cluster_and_add_member_with_it_disabled, have_required_feature_flag_in_cluster_and_add_member_without_it, + have_unknown_feature_flag_in_cluster_and_add_member_with_it_enabled, error_during_migration_after_initial_success, controller_waits_for_own_task_to_finish_before_exiting, controller_waits_for_remote_task_to_finish_before_exiting @@ -114,9 +116,7 @@ groups() -> init_per_suite(Config) -> rabbit_ct_helpers:log_environment(), logger:set_primary_config(level, debug), - rabbit_ct_helpers:run_steps( - Config, - [fun rabbit_ct_helpers:redirect_logger_to_ct_logs/1]). + rabbit_ct_helpers:run_steps(Config, []). end_per_suite(Config) -> Config. @@ -169,7 +169,15 @@ start_slave_node(Parent, Config, Testcase, N) -> Name = list_to_atom( rabbit_misc:format("~ts-~b", [Testcase, N])), ct:pal("- Starting slave node `~ts@...`", [Name]), - {ok, Node} = slave:start(net_adm:localhost(), Name), + {ok, NodePid, Node} = peer:start(#{ + name => Name, + connection => standard_io, + shutdown => close + }), + peer:call(NodePid, net_kernel, set_net_ticktime, [5]), + + persistent_term:put({?MODULE, Node}, NodePid), + ct:pal("- Slave node `~ts` started", [Node]), TestCodePath = filename:dirname(code:which(?MODULE)), @@ -185,8 +193,16 @@ stop_slave_nodes(Config) -> rabbit_ct_helpers:delete_config(Config, nodes). stop_slave_node(Node) -> - ct:pal("- Stopping slave node `~ts`...", [Node]), - ok = slave:stop(Node). + case persistent_term:get({?MODULE, Node}, undefined) of + undefined -> + %% Node was already stopped (e.g. by the test case). + ok; + NodePid -> + persistent_term:erase({?MODULE, Node}), + + ct:pal("- Stopping slave node `~ts`...", [Node]), + ok = peer:stop(NodePid) + end. connect_nodes([FirstNode | OtherNodes] = Nodes) -> lists:foreach( @@ -1492,6 +1508,53 @@ have_required_feature_flag_in_cluster_and_add_member_without_it( || Node <- AllNodes], ok. +have_unknown_feature_flag_in_cluster_and_add_member_with_it_enabled( + Config) -> + [NewNode | [FirstNode | _] = Nodes] = ?config(nodes, Config), + connect_nodes(Nodes), + override_running_nodes([NewNode]), + override_running_nodes(Nodes), + + FeatureName = ?FUNCTION_NAME, + FeatureFlags = #{FeatureName => + #{provided_by => rabbit, + stability => stable}}, + ?assertEqual(ok, inject_on_nodes([NewNode], FeatureFlags)), + + ct:pal( + "Checking the feature flag is unsupported on the cluster but enabled on " + "the standalone node"), + ok = run_on_node( + NewNode, + fun() -> + ?assertEqual(ok, rabbit_feature_flags:enable(FeatureName)), + ?assert(rabbit_feature_flags:is_enabled(FeatureName)), + ok + end, + []), + _ = [ok = + run_on_node( + Node, + fun() -> + ?assertNot(rabbit_feature_flags:is_supported(FeatureName)), + ?assertNot(rabbit_feature_flags:is_enabled(FeatureName)), + ok + end, + []) + || Node <- Nodes], + + %% Check compatibility between NewNodes and Nodes. + ok = run_on_node( + NewNode, + fun() -> + ?assertEqual( + ok, + rabbit_feature_flags:check_node_compatibility( + FirstNode, true)), + ok + end, []), + ok. + error_during_migration_after_initial_success(Config) -> AllNodes = [NewNode | [FirstNode | _] = Nodes] = ?config(nodes, Config), connect_nodes(Nodes), diff --git a/deps/rabbit/test/feature_flags_with_unpriveleged_user_SUITE.erl b/deps/rabbit/test/feature_flags_with_unpriveleged_user_SUITE.erl deleted file mode 100644 index d8b627da39d4..000000000000 --- a/deps/rabbit/test/feature_flags_with_unpriveleged_user_SUITE.erl +++ /dev/null @@ -1,72 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. -%% - --module(feature_flags_with_unpriveleged_user_SUITE). - --include_lib("eunit/include/eunit.hrl"). - --export([suite/0, - all/0, - groups/0, - init_per_suite/1, - end_per_suite/1, - init_per_group/2, - end_per_group/2, - init_per_testcase/2, - end_per_testcase/2, - - enable_feature_flag_when_ff_file_is_unwritable/1 - ]). - -suite() -> - [{timetrap, {minutes, 5}}]. - -all() -> - [ - {group, enabling_on_single_node}, - {group, enabling_in_cluster} - ]. - -groups() -> - [ - {enabling_on_single_node, [], - [ - enable_feature_flag_when_ff_file_is_unwritable - ]}, - {enabling_in_cluster, [], - [ - enable_feature_flag_when_ff_file_is_unwritable - ]} - ]. - -%% This suite exists to allow running a portion of the feature_flags_SUITE -%% under separate conditions in ci - -init_per_suite(Config) -> - feature_flags_SUITE:init_per_suite(Config). - -end_per_suite(Config) -> - feature_flags_SUITE:end_per_suite(Config). - -init_per_group(Group, Config) -> - feature_flags_SUITE:init_per_group(Group, Config). - -end_per_group(Group, Config) -> - feature_flags_SUITE:end_per_group(Group, Config). - -init_per_testcase(Testcase, Config) -> - feature_flags_SUITE:init_per_testcase(Testcase, Config). - -end_per_testcase(Testcase, Config) -> - feature_flags_SUITE:end_per_testcase(Testcase, Config). - -%% ------------------------------------------------------------------- -%% Testcases. -%% ------------------------------------------------------------------- - -enable_feature_flag_when_ff_file_is_unwritable(Config) -> - feature_flags_SUITE:enable_feature_flag_when_ff_file_is_unwritable(Config). diff --git a/deps/rabbit/test/logging_SUITE.erl b/deps/rabbit/test/logging_SUITE.erl index 0d2ecc8db510..2f7b0aad868c 100644 --- a/deps/rabbit/test/logging_SUITE.erl +++ b/deps/rabbit/test/logging_SUITE.erl @@ -1029,6 +1029,11 @@ logging_to_exchange_works(Config) -> #{domain => ?RMQLOG_DOMAIN_UPGRADE}, Config1), rabbit_ct_helpers:await_condition(ContainsLogEntry4, 30_000), + ContainsLogEntryUnicode = + ping_log(rmq_1_exchange, info, "unicode 257 is ā", + #{domain => ?RMQLOG_DOMAIN_UPGRADE}, Config1), + rabbit_ct_helpers:await_condition(ContainsLogEntryUnicode, 30_000), + %% increase log level ok = rabbit_ct_broker_helpers:rpc( Config, 0, @@ -1179,14 +1184,17 @@ ping_log(Id, Level, Metadata, Config) -> 32, "abcdefghijklmnopqrstuvwxyz" "ABCDEFGHIJKLMNOPQRSTUVWXYZ"), - ct:log("Logging \"~ts\" at level ~ts (~tp)", [RandomMsg, Level, Metadata]), + ping_log(Id, Level, RandomMsg, Metadata, Config). + +ping_log(Id, Level, Msg, Metadata, Config) -> + ct:log("Logging \"~ts\" at level ~ts (~tp)", [Msg, Level, Metadata]), case need_rpc(Config) of - false -> logger:log(Level, RandomMsg, Metadata); + false -> logger:log(Level, Msg, Metadata); true -> rabbit_ct_broker_helpers:rpc( Config, 0, - logger, log, [Level, RandomMsg, Metadata]) + logger, log, [Level, Msg, Metadata]) end, - check_log(Id, Level, RandomMsg, Config). + check_log(Id, Level, Msg, Config). need_rpc(Config) -> rabbit_ct_helpers:get_config( @@ -1216,7 +1224,7 @@ check_log1(#{id := Id, end, fun() -> {ok, Content} = file:read_file(Filename), - ReOpts = [{capture, none}, multiline], + ReOpts = [{capture, none}, multiline, unicode], match =:= re:run(Content, RandomMsg ++ "$", ReOpts) end; check_log1(#{module := Mod, @@ -1227,7 +1235,7 @@ check_log1(#{module := Mod, when ?IS_STD_H_COMPAT(Mod) andalso ?IS_STDDEV(Stddev) -> Filename = html_report_filename(Config), {ColorStart, ColorEnd} = get_color_config(Handler, Level), - ReOpts = [{capture, none}, multiline], + ReOpts = [{capture, none}, multiline, unicode], fun() -> {ok, Content} = file:read_file(Filename), Regex = @@ -1239,7 +1247,7 @@ check_log1(#{module := rabbit_logger_exchange_h}, RandomMsg, Config) -> {Chan, QName} = ?config(test_channel_and_queue, Config), - ReOpts = [{capture, none}, multiline], + ReOpts = [{capture, none}, multiline, unicode], fun() -> Ret = amqp_channel:call( Chan, #'basic.get'{queue = QName, no_ack = false}), @@ -1257,7 +1265,7 @@ check_log1(#{module := syslog_logger_h}, _Level, RandomMsg, Config) -> - ReOpts = [{capture, none}, multiline], + ReOpts = [{capture, none}, multiline, unicode], fun() -> Buffer = get_syslogd_messages(Config), match =:= re:run(Buffer, RandomMsg ++ "$", ReOpts) diff --git a/deps/rabbit/test/maintenance_mode_SUITE.erl b/deps/rabbit/test/maintenance_mode_SUITE.erl index 116c39205598..f02a5878455f 100644 --- a/deps/rabbit/test/maintenance_mode_SUITE.erl +++ b/deps/rabbit/test/maintenance_mode_SUITE.erl @@ -12,6 +12,7 @@ -include_lib("eunit/include/eunit.hrl"). -include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl"). +-compile(nowarn_export_all). -compile(export_all). all() -> diff --git a/deps/rabbit/test/mc_unit_SUITE.erl b/deps/rabbit/test/mc_unit_SUITE.erl index d7fc929005f0..529ffe072c28 100644 --- a/deps/rabbit/test/mc_unit_SUITE.erl +++ b/deps/rabbit/test/mc_unit_SUITE.erl @@ -100,7 +100,14 @@ amqpl_compat(_Config) -> XName= <<"exch">>, RoutingKey = <<"apple">>, - {ok, Msg} = rabbit_basic:message_no_id(XName, RoutingKey, Content), + {ok, Msg00} = rabbit_basic:message_no_id(XName, RoutingKey, Content), + + %% Quorum queues set the AMQP 1.0 specific annotation delivery_count. + %% This should be a no-op for mc_compat. + Msg0 = mc:set_annotation(delivery_count, 1, Msg00), + %% However, annotation x-delivery-count has a meaning for mc_compat messages. + Msg = mc:set_annotation(<<"x-delivery-count">>, 2, Msg0), + ?assertEqual({long, 2}, mc:x_header(<<"x-delivery-count">>, Msg)), ?assertEqual(98, mc:priority(Msg)), ?assertEqual(false, mc:is_persistent(Msg)), @@ -524,8 +531,6 @@ amqp_amqpl(_Config) -> durable = true}, MAC = [ {{symbol, <<"x-stream-filter">>}, {utf8, <<"apple">>}}, - thead2(list, [utf8(<<"l">>)]), - thead2(map, [{utf8(<<"k">>), utf8(<<"v">>)}]), thead2('x-list', list, [utf8(<<"l">>)]), thead2('x-map', map, [{utf8(<<"k">>), utf8(<<"v">>)}]) ], @@ -591,9 +596,6 @@ amqp_amqpl(_Config) -> ?assertMatch(#'P_basic'{expiration = <<"20000">>}, Props), ?assertMatch({_, longstr, <<"apple">>}, header(<<"x-stream-filter">>, HL)), - %% these are not coverted as not x- headers - ?assertEqual(undefined, header(<<"list">>, HL)), - ?assertEqual(undefined, header(<<"map">>, HL)), ?assertMatch({_ ,array, [{longstr,<<"l">>}]}, header(<<"x-list">>, HL)), ?assertMatch({_, table, [{<<"k">>,longstr,<<"v">>}]}, header(<<"x-map">>, HL)), diff --git a/deps/rabbit/test/metadata_store_clustering_SUITE.erl b/deps/rabbit/test/metadata_store_clustering_SUITE.erl index e9bf9584d56b..a33241d263cb 100644 --- a/deps/rabbit/test/metadata_store_clustering_SUITE.erl +++ b/deps/rabbit/test/metadata_store_clustering_SUITE.erl @@ -10,7 +10,40 @@ -include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). --compile([nowarn_export_all, export_all]). +-include_lib("rabbitmq_ct_helpers/include/rabbit_assert.hrl"). + +-export([suite/0, + all/0, + groups/0, + init_per_suite/1, + end_per_suite/1, + init_per_group/2, + end_per_group/2, + init_per_testcase/2, + end_per_testcase/2, + + join_khepri_khepri_cluster/1, + join_mnesia_khepri_cluster/1, + join_mnesia_khepri_cluster_reverse/1, + join_khepri_mnesia_cluster/1, + join_khepri_mnesia_cluster_reverse/1, + + join_khepri_khepri_khepri_cluster/1, + join_mnesia_khepri_khepri_cluster/1, + join_mnesia_khepri_khepri_cluster_reverse/1, + join_khepri_mnesia_khepri_cluster/1, + join_khepri_mnesia_khepri_cluster_reverse/1, + join_khepri_khepri_mnesia_cluster/1, + join_khepri_khepri_mnesia_cluster_reverse/1, + join_mnesia_mnesia_khepri_cluster/1, + join_mnesia_mnesia_khepri_cluster_reverse/1, + join_mnesia_khepri_mnesia_cluster/1, + join_mnesia_khepri_mnesia_cluster_reverse/1, + join_khepri_mnesia_mnesia_cluster/1, + join_khepri_mnesia_mnesia_cluster_reverse/1, + + join_khepri_while_in_minority/1 + ]). suite() -> [{timetrap, 5 * 60_000}]. @@ -23,7 +56,8 @@ all() -> groups() -> [ {unclustered, [], [{cluster_size_2, [], cluster_size_2_tests()}, - {cluster_size_3, [], cluster_size_3_tests()}]} + {cluster_size_3, [], cluster_size_3_tests()}, + {cluster_size_5, [], cluster_size_5_tests()}]} ]. cluster_size_2_tests() -> @@ -52,6 +86,11 @@ cluster_size_3_tests() -> join_khepri_mnesia_mnesia_cluster_reverse ]. +cluster_size_5_tests() -> + [ + join_khepri_while_in_minority + ]. + %% ------------------------------------------------------------------- %% Testsuite setup/teardown. %% ------------------------------------------------------------------- @@ -73,12 +112,13 @@ end_per_suite(Config) -> init_per_group(unclustered, Config) -> rabbit_ct_helpers:set_config(Config, [{metadata_store, mnesia}, {rmq_nodes_clustered, false}, - {tcp_ports_base}, - {net_ticktime, 10}]); + {tcp_ports_base}]); init_per_group(cluster_size_2, Config) -> rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 2}]); init_per_group(cluster_size_3, Config) -> - rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 3}]). + rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 3}]); +init_per_group(cluster_size_5, Config) -> + rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 5}]). end_per_group(_, Config) -> Config. @@ -343,3 +383,121 @@ declare(Ch, Q) -> durable = true, auto_delete = false, arguments = []}). + +join_khepri_while_in_minority(Config) -> + [Node1 | ClusteredNodes] = rabbit_ct_broker_helpers:get_node_configs( + Config, nodename), + [NodeToJoin | OtherNodes] = ClusteredNodes, + + %% Cluster nodes 2 to 5. + ct:pal("Cluster nodes ~p", [ClusteredNodes]), + lists:foreach( + fun(Node) -> + ?assertEqual( + ok, + rabbit_control_helper:command( + join_cluster, Node, [atom_to_list(NodeToJoin)], [])) + end, OtherNodes), + lists:foreach( + fun(Node) -> + ?awaitMatch( + ClusteredNodes, + lists:sort( + rabbit_ct_broker_helpers:rpc( + Config, Node, rabbit_nodes, list_members, [])), + 30000) + end, ClusteredNodes), + + %% Enable Khepri on all nodes. Only `Node2' is given here because it is + %% clustered with `OtherNodes'. + ct:pal("Enable `khepri_db` on nodes ~0p and ~0p", [Node1, NodeToJoin]), + Ret1 = rabbit_ct_broker_helpers:enable_feature_flag( + Config, [Node1, NodeToJoin], khepri_db), + case Ret1 of + ok -> + StoreId = rabbit_khepri:get_store_id(), + LeaderId = rabbit_ct_broker_helpers:rpc( + Config, NodeToJoin, + ra_leaderboard, lookup_leader, [StoreId]), + {StoreId, LeaderNode} = LeaderId, + + %% Stop all clustered nodes except one follower to create a + %% minority. In other words, we stop two followers, then the + %% leader. + %% + %% Using `lists:reverse/1', we keep the last running followe only + %% to see how clustering works if the first nodes in the cluster + %% are down. + Followers = ClusteredNodes -- [LeaderNode], + [FollowerToKeep | FollowersToStop] = lists:reverse(Followers), + + lists:foreach( + fun(Node) -> + ct:pal("Stop node ~0p", [Node]), + ok = rabbit_ct_broker_helpers:stop_node(Config, Node) + end, FollowersToStop ++ [LeaderNode]), + + %% Try and fail to cluster `Node1' with the others. + ct:pal("Try to cluster node ~0p with ~0p", [Node1, FollowerToKeep]), + Ret2 = rabbit_control_helper:command( + join_cluster, Node1, [atom_to_list(FollowerToKeep)], []), + ?assertMatch({error, 75, _}, Ret2), + {error, _, Msg} = Ret2, + ?assertEqual( + match, + re:run( + Msg, "Khepri cluster could be in minority", + [{capture, none}])), + + %% `Node1' should still be up and running correctly. + ct:pal("Open a connection + channel to node ~0p", [Node1]), + {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel( + Config, Node1), + + QName = atom_to_binary(?FUNCTION_NAME), + QArgs = [{<<"x-queue-type">>, longstr, <<"quorum">>}], + ct:pal("Declare queue ~0p", [QName]), + amqp_channel:call( + Ch, #'queue.declare'{durable = true, + queue = QName, + arguments = QArgs}), + + ct:pal("Enable publish confirms"), + amqp_channel:call(Ch, #'confirm.select'{}), + + ct:pal("Publish a message to queue ~0p", [QName]), + amqp_channel:cast( + Ch, + #'basic.publish'{routing_key = QName}, + #amqp_msg{props = #'P_basic'{delivery_mode = 2}}), + amqp_channel:wait_for_confirms(Ch), + + ct:pal("Subscribe to queue ~0p", [QName]), + CTag = <<"ctag">>, + amqp_channel:subscribe( + Ch, + #'basic.consume'{queue = QName, + consumer_tag = CTag}, + self()), + receive + #'basic.consume_ok'{consumer_tag = CTag} -> + ok + after 10000 -> + exit(consume_ok_timeout) + end, + + ct:pal("Consume a message from queue ~0p", [QName]), + receive + {#'basic.deliver'{consumer_tag = <<"ctag">>}, _} -> + ok + after 10000 -> + exit(deliver_timeout) + end, + + ct:pal("Close channel + connection"), + rabbit_ct_client_helpers:close_connection_and_channel(Conn, Ch), + + ok; + {skip, _} = Skip -> + Skip + end. diff --git a/deps/rabbit/test/metadata_store_phase1_SUITE.erl b/deps/rabbit/test/metadata_store_phase1_SUITE.erl index af5b8aca6ebe..cf080d170ce1 100644 --- a/deps/rabbit/test/metadata_store_phase1_SUITE.erl +++ b/deps/rabbit/test/metadata_store_phase1_SUITE.erl @@ -192,6 +192,7 @@ setup_khepri(Config) -> %% Configure Khepri. It takes care of configuring Ra system & cluster. It %% uses the Mnesia directory to store files. ok = rabbit_khepri:setup(undefined), + ok = rabbit_khepri:register_projections(), ct:pal("Khepri info below:"), rabbit_khepri:info(), @@ -293,15 +294,6 @@ init_feature_flags(Config) -> %% This simply avoids compiler warnings. -define(with(T), fun(_With) -> T end). --define(vhost_path(V), - [rabbit_db_vhost, V]). --define(user_path(U), - [rabbit_db_user, users, U]). --define(user_perm_path(U, V), - [rabbit_db_user, users, U, user_permissions, V]). --define(topic_perm_path(U, V, E), - [rabbit_db_user, users, U, topic_permissions, V, E]). - %% %% Virtual hosts. %% @@ -330,8 +322,8 @@ write_non_existing_vhost(_) -> ?with(check_storage( _With, [{mnesia, rabbit_vhost, [VHost]}, - {khepri, [rabbit_db_vhost], - #{?vhost_path(VHostName) => VHost}}])) + {khepri, none, + #{rabbit_db_vhost:khepri_vhost_path(VHostName) => VHost}}])) ], ?assertEqual( @@ -371,8 +363,8 @@ write_existing_vhost(_) -> ?with(check_storage( _With, [{mnesia, rabbit_vhost, [VHost]}, - {khepri, [rabbit_db_vhost], - #{?vhost_path(VHostName) => VHost}}])) + {khepri, none, + #{rabbit_db_vhost:khepri_vhost_path(VHostName) => VHost}}])) ], ?assertEqual( @@ -404,8 +396,8 @@ check_vhost_exists(_) -> ?with(check_storage( _With, [{mnesia, rabbit_vhost, [VHost]}, - {khepri, [rabbit_db_vhost], - #{?vhost_path(VHostName) => VHost}}])) + {khepri, none, + #{rabbit_db_vhost:khepri_vhost_path(VHostName) => VHost}}])) ], ?assertEqual( @@ -447,9 +439,9 @@ list_vhost_names(_) -> ?with(check_storage( _With, [{mnesia, rabbit_vhost, [VHostA, VHostB]}, - {khepri, [rabbit_db_vhost], - #{?vhost_path(VHostNameA) => VHostA, - ?vhost_path(VHostNameB) => VHostB}}])) + {khepri, none, + #{rabbit_db_vhost:khepri_vhost_path(VHostNameA) => VHostA, + rabbit_db_vhost:khepri_vhost_path(VHostNameB) => VHostB}}])) ], ?assertEqual( @@ -491,9 +483,9 @@ list_vhost_objects(_) -> ?with(check_storage( _With, [{mnesia, rabbit_vhost, [VHostA, VHostB]}, - {khepri, [rabbit_db_vhost], - #{?vhost_path(VHostNameA) => VHostA, - ?vhost_path(VHostNameB) => VHostB}}])) + {khepri, none, + #{rabbit_db_vhost:khepri_vhost_path(VHostNameA) => VHostA, + rabbit_db_vhost:khepri_vhost_path(VHostNameB) => VHostB}}])) ], ?assertEqual( @@ -530,8 +522,7 @@ update_non_existing_vhost(_) -> ?with(check_storage( _With, [{mnesia, rabbit_vhost, []}, - {khepri, [rabbit_db_vhost], - #{}}])) + {khepri, none, #{}}])) ], ?assertEqual( @@ -571,8 +562,8 @@ update_existing_vhost(_) -> ?with(check_storage( _With, [{mnesia, rabbit_vhost, [UpdatedVHost]}, - {khepri, [rabbit_db_vhost], - #{?vhost_path(VHostName) => UpdatedVHost}}])) + {khepri, none, + #{rabbit_db_vhost:khepri_vhost_path(VHostName) => UpdatedVHost}}])) ], ?assertEqual( @@ -601,8 +592,7 @@ update_non_existing_vhost_desc_and_tags(_) -> ?with(check_storage( _With, [{mnesia, rabbit_vhost, []}, - {khepri, [rabbit_db_vhost], - #{}}])) + {khepri, none, #{}}])) ], ?assertEqual( @@ -647,8 +637,8 @@ update_existing_vhost_desc_and_tags(_) -> ?with(check_storage( _With, [{mnesia, rabbit_vhost, [UpdatedVHost]}, - {khepri, [rabbit_db_vhost], - #{?vhost_path(VHostName) => UpdatedVHost}}])) + {khepri, none, + #{rabbit_db_vhost:khepri_vhost_path(VHostName) => UpdatedVHost}}])) ], ?assertEqual( @@ -675,8 +665,7 @@ delete_non_existing_vhost(_) -> ?with(check_storage( _With, [{mnesia, rabbit_vhost, []}, - {khepri, [rabbit_db_vhost], - #{}}])) + {khepri, none, #{}}])) ], ?assertEqual( @@ -713,8 +702,7 @@ delete_existing_vhost(_) -> ?with(check_storage( _With, [{mnesia, rabbit_vhost, []}, - {khepri, [rabbit_db_vhost], - #{}}])) + {khepri, none, #{}}])) ], ?assertEqual( @@ -746,8 +734,8 @@ write_non_existing_user(_) -> ?with(check_storage( _With, [{mnesia, rabbit_user, [User]}, - {khepri, [rabbit_db_user], - #{?user_path(Username) => User}}])) + {khepri, none, + #{rabbit_db_user:khepri_user_path(Username) => User}}])) ], ?assertEqual( @@ -781,8 +769,8 @@ write_existing_user(_) -> ?with(check_storage( _With, [{mnesia, rabbit_user, [User]}, - {khepri, [rabbit_db_user], - #{?user_path(Username) => User}}])) + {khepri, none, + #{rabbit_db_user:khepri_user_path(Username) => User}}])) ], ?assertEqual( @@ -812,9 +800,9 @@ list_users(_) -> ?with(check_storage( _With, [{mnesia, rabbit_user, [UserA, UserB]}, - {khepri, [rabbit_db_user], - #{?user_path(UsernameA) => UserA, - ?user_path(UsernameB) => UserB}}])) + {khepri, none, + #{rabbit_db_user:khepri_user_path(UsernameA) => UserA, + rabbit_db_user:khepri_user_path(UsernameB) => UserB}}])) ], ?assertEqual( @@ -846,8 +834,7 @@ update_non_existing_user(_) -> ?with(check_storage( _With, [{mnesia, rabbit_user, []}, - {khepri, [rabbit_db_user], - #{}}])) + {khepri, none, #{}}])) ], ?assertEqual( @@ -882,8 +869,8 @@ update_existing_user(_) -> ?with(check_storage( _With, [{mnesia, rabbit_user, [UpdatedUser]}, - {khepri, [rabbit_db_user], - #{?user_path(Username) => UpdatedUser}}])) + {khepri, none, + #{rabbit_db_user:khepri_user_path(Username) => UpdatedUser}}])) ], ?assertEqual( @@ -910,8 +897,7 @@ delete_non_existing_user(_) -> ?with(check_storage( _With, [{mnesia, rabbit_user, []}, - {khepri, [rabbit_db_user], - #{}}])) + {khepri, none, #{}}])) ], ?assertEqual( @@ -942,8 +928,7 @@ delete_existing_user(_) -> ?with(check_storage( _With, [{mnesia, rabbit_user, []}, - {khepri, [rabbit_db_user], - #{}}])) + {khepri, none, #{}}])) ], ?assertEqual( @@ -987,10 +972,8 @@ write_user_permission_for_non_existing_vhost(_) -> [{mnesia, rabbit_vhost, []}, {mnesia, rabbit_user, [User]}, {mnesia, rabbit_user_permission, []}, - {khepri, [rabbit_db_vhost], - #{}}, - {khepri, [rabbit_db_user], - #{?user_path(Username) => User}}])) + {khepri, none, + #{rabbit_db_user:khepri_user_path(Username) => User}}])) ], ?assertEqual( @@ -1036,10 +1019,8 @@ write_user_permission_for_non_existing_user(_) -> [{mnesia, rabbit_vhost, [VHost]}, {mnesia, rabbit_user, []}, {mnesia, rabbit_user_permission, []}, - {khepri, [rabbit_db_vhost], - #{?vhost_path(VHostName) => VHost}}, - {khepri, [rabbit_db_user], - #{}}])) + {khepri, none, + #{rabbit_db_vhost:khepri_vhost_path(VHostName) => VHost}}])) ], ?assertEqual( @@ -1091,11 +1072,10 @@ write_user_permission_for_existing_user(_) -> [{mnesia, rabbit_vhost, [VHost]}, {mnesia, rabbit_user, [User]}, {mnesia, rabbit_user_permission, [UserPermission]}, - {khepri, [rabbit_db_vhost], - #{?vhost_path(VHostName) => VHost}}, - {khepri, [rabbit_db_user], - #{?user_path(Username) => User, - ?user_perm_path(Username, VHostName) => UserPermission}}])) + {khepri, none, + #{rabbit_db_vhost:khepri_vhost_path(VHostName) => VHost, + rabbit_db_user:khepri_user_path(Username) => User, + rabbit_db_user:khepri_user_permission_path(Username, VHostName) => UserPermission}}])) ], ?assertEqual( @@ -1175,9 +1155,8 @@ list_user_permissions_on_non_existing_vhost(_) -> [{mnesia, rabbit_vhost, []}, {mnesia, rabbit_user, [User]}, {mnesia, rabbit_user_permission, []}, - {khepri, [rabbit_db_vhost], #{}}, - {khepri, [rabbit_db_user], - #{?user_path(Username) => User}}])) + {khepri, none, + #{rabbit_db_user:khepri_user_path(Username) => User}}])) ], ?assertEqual( @@ -1217,8 +1196,8 @@ list_user_permissions_for_non_existing_user(_) -> [{mnesia, rabbit_vhost, [VHost]}, {mnesia, rabbit_user, []}, {mnesia, rabbit_user_permission, []}, - {khepri, [rabbit_db_vhost], #{?vhost_path(VHostName) => VHost}}, - {khepri, [rabbit_db_user], #{}}])) + {khepri, none, + #{rabbit_db_vhost:khepri_vhost_path(VHostName) => VHost}}])) ], ?assertEqual( @@ -1320,17 +1299,16 @@ list_user_permissions(_) -> {mnesia, rabbit_user_permission, [UserPermissionA1, UserPermissionA2, UserPermissionB1]}, - {khepri, [rabbit_db_vhost], - #{?vhost_path(VHostNameA) => VHostA, - ?vhost_path(VHostNameB) => VHostB}}, - {khepri, [rabbit_db_user], - #{?user_path(UsernameA) => UserA, - ?user_path(UsernameB) => UserB, - ?user_perm_path(UsernameA, VHostNameA) => + {khepri, none, + #{rabbit_db_vhost:khepri_vhost_path(VHostNameA) => VHostA, + rabbit_db_vhost:khepri_vhost_path(VHostNameB) => VHostB, + rabbit_db_user:khepri_user_path(UsernameA) => UserA, + rabbit_db_user:khepri_user_path(UsernameB) => UserB, + rabbit_db_user:khepri_user_permission_path(UsernameA, VHostNameA) => UserPermissionA1, - ?user_perm_path(UsernameA, VHostNameB) => + rabbit_db_user:khepri_user_permission_path(UsernameA, VHostNameB) => UserPermissionA2, - ?user_perm_path(UsernameB, VHostNameA) => + rabbit_db_user:khepri_user_permission_path(UsernameB, VHostNameA) => UserPermissionB1}}])) ], @@ -1363,10 +1341,8 @@ clear_user_permission_for_non_existing_vhost(_) -> [{mnesia, rabbit_vhost, []}, {mnesia, rabbit_user, [User]}, {mnesia, rabbit_user_permission, []}, - {khepri, [rabbit_db_vhost], - #{}}, - {khepri, [rabbit_db_user], - #{?user_path(Username) => User}}])) + {khepri, none, + #{rabbit_db_user:khepri_user_path(Username) => User}}])) ], ?assertEqual( @@ -1404,10 +1380,8 @@ clear_user_permission_for_non_existing_user(_) -> [{mnesia, rabbit_vhost, [VHost]}, {mnesia, rabbit_user, []}, {mnesia, rabbit_user_permission, []}, - {khepri, [rabbit_db_vhost], - #{?vhost_path(VHostName) => VHost}}, - {khepri, [rabbit_db_user], - #{}}])) + {khepri, none, + #{rabbit_db_vhost:khepri_vhost_path(VHostName) => VHost}}])) ], ?assertEqual( @@ -1462,10 +1436,9 @@ clear_user_permission(_) -> [{mnesia, rabbit_vhost, [VHost]}, {mnesia, rabbit_user, [User]}, {mnesia, rabbit_user_permission, []}, - {khepri, [rabbit_db_vhost], - #{?vhost_path(VHostName) => VHost}}, - {khepri, [rabbit_db_user], - #{?user_path(Username) => User}}])) + {khepri, none, + #{rabbit_db_vhost:khepri_vhost_path(VHostName) => VHost, + rabbit_db_user:khepri_user_path(Username) => User}}])) ], ?assertEqual( @@ -1524,10 +1497,8 @@ delete_user_and_check_resource_access(_) -> [{mnesia, rabbit_vhost, [VHost]}, {mnesia, rabbit_user, []}, {mnesia, rabbit_user_permission, []}, - {khepri, [rabbit_db_vhost], - #{?vhost_path(VHostName) => VHost}}, - {khepri, [rabbit_db_user], - #{}}])) + {khepri, none, + #{rabbit_db_vhost:khepri_vhost_path(VHostName) => VHost}}])) ], ?assertEqual( @@ -1581,10 +1552,8 @@ delete_vhost_and_check_resource_access(_) -> [{mnesia, rabbit_vhost, []}, {mnesia, rabbit_user, [User]}, {mnesia, rabbit_user_permission, [UserPermission]}, - {khepri, [rabbit_db_vhost], - #{}}, - {khepri, [rabbit_db_user], - #{?user_path(Username) => User}}])) + {khepri, none, + #{rabbit_db_user:khepri_user_path(Username) => User}}])) ], %% In mnesia the permissions have to be deleted explicitly @@ -1657,10 +1626,8 @@ write_topic_permission_for_non_existing_vhost(_) -> [{mnesia, rabbit_vhost, []}, {mnesia, rabbit_user, [User]}, {mnesia, rabbit_topic_permission, []}, - {khepri, [rabbit_db_vhost], - #{}}, - {khepri, [rabbit_db_user], - #{?user_path(Username) => User}}])) + {khepri, none, + #{rabbit_db_user:khepri_user_path(Username) => User}}])) ], ?assertEqual( @@ -1718,10 +1685,8 @@ write_topic_permission_for_non_existing_user(_) -> [{mnesia, rabbit_vhost, [VHost]}, {mnesia, rabbit_user, []}, {mnesia, rabbit_topic_permission, []}, - {khepri, [rabbit_db_vhost], - #{?vhost_path(VHostName) => VHost}}, - {khepri, [rabbit_db_user], - #{}}])) + {khepri, none, + #{rabbit_db_vhost:khepri_vhost_path(VHostName) => VHost}}])) ], ?assertEqual( @@ -1786,11 +1751,10 @@ write_topic_permission_for_existing_user(_) -> [{mnesia, rabbit_vhost, [VHost]}, {mnesia, rabbit_user, [User]}, {mnesia, rabbit_topic_permission, [TopicPermission]}, - {khepri, [rabbit_db_vhost], - #{?vhost_path(VHostName) => VHost}}, - {khepri, [rabbit_db_user], - #{?user_path(Username) => User, - ?topic_perm_path(Username, VHostName, Exchange) => + {khepri, none, + #{rabbit_db_vhost:khepri_vhost_path(VHostName) => VHost, + rabbit_db_user:khepri_user_path(Username) => User, + rabbit_db_user:khepri_topic_permission_path(Username, VHostName, Exchange) => TopicPermission}}])) ], @@ -1823,10 +1787,8 @@ list_topic_permissions_on_non_existing_vhost(_) -> [{mnesia, rabbit_vhost, []}, {mnesia, rabbit_user, [User]}, {mnesia, rabbit_topic_permission, []}, - {khepri, [rabbit_db_vhost], - #{}}, - {khepri, [rabbit_db_user], - #{?user_path(Username) => User}}])) + {khepri, none, + #{rabbit_db_user:khepri_user_path(Username) => User}}])) ], ?assertEqual( @@ -1866,8 +1828,8 @@ list_topic_permissions_for_non_existing_user(_) -> [{mnesia, rabbit_vhost, [VHost]}, {mnesia, rabbit_user, []}, {mnesia, rabbit_topic_permission, []}, - {khepri, [rabbit_db_vhost], #{?vhost_path(VHostName) => VHost}}, - {khepri, [rabbit_db_user], #{}}])) + {khepri, none, + #{rabbit_db_vhost:khepri_vhost_path(VHostName) => VHost}}])) ], ?assertEqual( @@ -1980,17 +1942,16 @@ list_topic_permissions(_) -> {mnesia, rabbit_topic_permission, [TopicPermissionA1, TopicPermissionA2, TopicPermissionB1]}, - {khepri, [rabbit_db_vhost], - #{?vhost_path(VHostNameA) => VHostA, - ?vhost_path(VHostNameB) => VHostB}}, - {khepri, [rabbit_db_user], - #{?user_path(UsernameA) => UserA, - ?user_path(UsernameB) => UserB, - ?topic_perm_path(UsernameA, VHostNameA, ExchangeA) => + {khepri, none, + #{rabbit_db_vhost:khepri_vhost_path(VHostNameA) => VHostA, + rabbit_db_vhost:khepri_vhost_path(VHostNameB) => VHostB, + rabbit_db_user:khepri_user_path(UsernameA) => UserA, + rabbit_db_user:khepri_user_path(UsernameB) => UserB, + rabbit_db_user:khepri_topic_permission_path(UsernameA, VHostNameA, ExchangeA) => TopicPermissionA1, - ?topic_perm_path(UsernameA, VHostNameB, ExchangeB) => + rabbit_db_user:khepri_topic_permission_path(UsernameA, VHostNameB, ExchangeB) => TopicPermissionA2, - ?topic_perm_path(UsernameB, VHostNameA, ExchangeA) => + rabbit_db_user:khepri_topic_permission_path(UsernameB, VHostNameA, ExchangeA) => TopicPermissionB1}}])) ], @@ -2031,10 +1992,8 @@ clear_specific_topic_permission_for_non_existing_vhost(_) -> [{mnesia, rabbit_vhost, []}, {mnesia, rabbit_user, [User]}, {mnesia, rabbit_user_permission, []}, - {khepri, [rabbit_db_vhost], - #{}}, - {khepri, [rabbit_db_user], - #{?user_path(Username) => User}}])) + {khepri, none, + #{rabbit_db_user:khepri_user_path(Username) => User}}])) ], ?assertEqual( @@ -2080,10 +2039,8 @@ clear_specific_topic_permission_for_non_existing_user(_) -> [{mnesia, rabbit_vhost, [VHost]}, {mnesia, rabbit_user, []}, {mnesia, rabbit_user_permission, []}, - {khepri, [rabbit_db_vhost], - #{?vhost_path(VHostName) => VHost}}, - {khepri, [rabbit_db_user], - #{}}])) + {khepri, none, + #{rabbit_db_vhost:khepri_vhost_path(VHostName) => VHost}}])) ], ?assertEqual( @@ -2186,11 +2143,10 @@ clear_specific_topic_permission(_) -> [{mnesia, rabbit_vhost, [VHost]}, {mnesia, rabbit_user, [User]}, {mnesia, rabbit_topic_permission, [TopicPermissionB]}, - {khepri, [rabbit_db_vhost], - #{?vhost_path(VHostName) => VHost}}, - {khepri, [rabbit_db_user], - #{?user_path(Username) => User, - ?topic_perm_path(Username, VHostName, ExchangeB) => + {khepri, none, + #{rabbit_db_vhost:khepri_vhost_path(VHostName) => VHost, + rabbit_db_user:khepri_user_path(Username) => User, + rabbit_db_user:khepri_topic_permission_path(Username, VHostName, ExchangeB) => TopicPermissionB}}])) ], @@ -2231,10 +2187,8 @@ clear_all_topic_permission_for_non_existing_vhost(_) -> [{mnesia, rabbit_vhost, []}, {mnesia, rabbit_user, [User]}, {mnesia, rabbit_user_permission, []}, - {khepri, [rabbit_db_vhost], - #{}}, - {khepri, [rabbit_db_user], - #{?user_path(Username) => User}}])) + {khepri, none, + #{rabbit_db_user:khepri_user_path(Username) => User}}])) ], ?assertEqual( @@ -2280,10 +2234,8 @@ clear_all_topic_permission_for_non_existing_user(_) -> [{mnesia, rabbit_vhost, [VHost]}, {mnesia, rabbit_user, []}, {mnesia, rabbit_user_permission, []}, - {khepri, [rabbit_db_vhost], - #{?vhost_path(VHostName) => VHost}}, - {khepri, [rabbit_db_user], - #{}}])) + {khepri, none, + #{rabbit_db_vhost:khepri_vhost_path(VHostName) => VHost}}])) ], ?assertEqual( @@ -2388,10 +2340,9 @@ clear_all_topic_permissions(_) -> [{mnesia, rabbit_vhost, [VHost]}, {mnesia, rabbit_user, [User]}, {mnesia, rabbit_topic_permission, []}, - {khepri, [rabbit_db_vhost], - #{?vhost_path(VHostName) => VHost}}, - {khepri, [rabbit_db_user], - #{?user_path(Username) => User}}])) + {khepri, none, + #{rabbit_db_vhost:khepri_vhost_path(VHostName) => VHost, + rabbit_db_user:khepri_user_path(Username) => User}}])) ], ?assertEqual( @@ -2464,10 +2415,8 @@ delete_user_and_check_topic_access(_) -> [{mnesia, rabbit_vhost, [VHost]}, {mnesia, rabbit_user, []}, {mnesia, rabbit_topic_permission, []}, - {khepri, [rabbit_db_vhost], - #{?vhost_path(VHostName) => VHost}}, - {khepri, [rabbit_db_user], - #{}}])) + {khepri, none, + #{rabbit_db_vhost:khepri_vhost_path(VHostName) => VHost}}])) ], ?assertEqual( @@ -2530,10 +2479,8 @@ delete_vhost_and_check_topic_access(_) -> [{mnesia, rabbit_vhost, []}, {mnesia, rabbit_user, [User]}, {mnesia, rabbit_topic_permission, [TopicPermission]}, - {khepri, [rabbit_db_vhost], - #{}}, - {khepri, [rabbit_db_user], - #{?user_path(Username) => User}}])) + {khepri, none, + #{rabbit_db_user:khepri_user_path(Username) => User}}])) ], %% In mnesia the permissions have to be deleted explicitly @@ -2768,8 +2715,8 @@ check_storage(_, []) -> check_storage(mnesia, Table, Content) -> ?assertEqual(Content, lists:sort(ets:tab2list(Table))); -check_storage(khepri, Path, Content) -> +check_storage(khepri, none, Content) -> rabbit_khepri:info(), - Path1 = Path ++ [#if_all{conditions = [?KHEPRI_WILDCARD_STAR_STAR, - #if_has_data{has_data = true}]}], - ?assertEqual({ok, Content}, rabbit_khepri:match(Path1)). + Path = [#if_all{conditions = [?KHEPRI_WILDCARD_STAR_STAR, + #if_has_data{}]}], + ?assertEqual({ok, Content}, rabbit_khepri:match(Path)). diff --git a/deps/rabbit/test/msg_size_metrics_SUITE.erl b/deps/rabbit/test/msg_size_metrics_SUITE.erl new file mode 100644 index 000000000000..0b33ecf1a36b --- /dev/null +++ b/deps/rabbit/test/msg_size_metrics_SUITE.erl @@ -0,0 +1,154 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(msg_size_metrics_SUITE). + +-compile([export_all, nowarn_export_all]). +-include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). + +-import(rabbit_ct_broker_helpers, + [rpc/4]). + +all() -> + [ + {group, tests} + ]. + +groups() -> + [ + {tests, [shuffle], + [message_size, + over_max_message_size]} + ]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown. +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + {ok, _} = application:ensure_all_started(amqp10_client), + rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:run_setup_steps(Config). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config). + +init_per_group(_Group, Config) -> + rabbit_ct_helpers:run_steps( + Config, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). + +end_per_group(_Group, Config) -> + rabbit_ct_helpers:run_steps( + Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()). + +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase). + +end_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_finished(Config, Testcase). + +%% ------------------------------------------------------------------- +%% Test cases +%% ------------------------------------------------------------------- + +message_size(Config) -> + AmqplBefore = get_msg_size_metrics(amqp091, Config), + AmqpBefore = get_msg_size_metrics(amqp10, Config), + + Binary2B = <<"12">>, + Binary200K = binary:copy(<<"x">>, 200_000), + Payloads = [Binary2B, Binary200K, Binary2B], + + {AmqplConn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config), + [amqp_channel:call(Ch, + #'basic.publish'{routing_key = <<"nowhere">>}, + #amqp_msg{payload = Payload}) + || Payload <- Payloads], + + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + Address = rabbitmq_amqp_address:exchange(<<"amq.fanout">>), + {ok, Sender} = amqp10_client:attach_sender_link_sync(Session, <<"sender">>, Address), + receive {amqp10_event, {link, Sender, credited}} -> ok + after 5000 -> ct:fail(credited_timeout) + end, + + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"tag1">>, Binary2B)), + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"tag2">>, Binary200K)), + ok = amqp10_client:send_msg(Sender, amqp10_msg:new(<<"tag3">>, Binary2B)), + + ok = wait_for_settlement(released, <<"tag1">>), + ok = wait_for_settlement(released, <<"tag2">>), + ok = wait_for_settlement(released, <<"tag3">>), + + AmqplAfter = get_msg_size_metrics(amqp091, Config), + AmqpAfter = get_msg_size_metrics(amqp10, Config), + + ExpectedDiff = [{100, 2}, + {1_000_000, 1}], + ?assertEqual(ExpectedDiff, + rabbit_msg_size_metrics:diff_raw_buckets(AmqplAfter, AmqplBefore)), + ?assertEqual(ExpectedDiff, + rabbit_msg_size_metrics:diff_raw_buckets(AmqpAfter, AmqpBefore)), + + ok = amqp10_client:close_connection(Connection), + ok = rabbit_ct_client_helpers:close_connection_and_channel(AmqplConn, Ch). + +over_max_message_size(Config) -> + DefaultMaxMessageSize = rpc(Config, persistent_term, get, [max_message_size]), + %% Limit the server to only accept messages up to 2KB. + MaxMessageSize = 2_000, + ok = rpc(Config, persistent_term, put, [max_message_size, MaxMessageSize]), + + Before = get_msg_size_metrics(amqp091, Config), + {Conn, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), + MonitorRef = erlang:monitor(process, Ch), + MessageTooLarge = binary:copy(<<"x">>, MaxMessageSize + 1), + amqp_channel:call(Ch, + #'basic.publish'{routing_key = <<"none">>}, + #amqp_msg{payload = MessageTooLarge}), + receive {'DOWN', MonitorRef, process, Ch, Info} -> + ?assertEqual({shutdown, + {server_initiated_close, + 406, + <<"PRECONDITION_FAILED - message size 2001 is larger than configured max size 2000">>}}, + Info) + after 2000 -> ct:fail(expected_channel_closed) + end, + + After = get_msg_size_metrics(amqp091, Config), + %% No metrics should be increased if client sent message that is too large. + ?assertEqual(Before, After), + + ok = rabbit_ct_client_helpers:close_connection(Conn), + ok = rpc(Config, persistent_term, put, [max_message_size, DefaultMaxMessageSize]). + +get_msg_size_metrics(Protocol, Config) -> + rpc(Config, rabbit_msg_size_metrics, raw_buckets, [Protocol]). + +connection_config(Config) -> + Host = ?config(rmq_hostname, Config), + Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp), + #{address => Host, + port => Port, + container_id => <<"my container">>, + sasl => anon}. + +wait_for_settlement(State, Tag) -> + receive + {amqp10_disposition, {State, Tag}} -> + ok + after 5000 -> + ct:fail({disposition_timeout, Tag}) + end. diff --git a/deps/rabbit/test/per_vhost_connection_limit_SUITE.erl b/deps/rabbit/test/per_vhost_connection_limit_SUITE.erl index 1e18f808ceef..8862ddd3dd7a 100644 --- a/deps/rabbit/test/per_vhost_connection_limit_SUITE.erl +++ b/deps/rabbit/test/per_vhost_connection_limit_SUITE.erl @@ -105,9 +105,8 @@ init_per_multinode_group(_Group, Config, NodeCount) -> rabbit_ct_broker_helpers:setup_steps() ++ rabbit_ct_client_helpers:setup_steps()). -end_per_group(Group, Config) when Group == tests; - Group == khepri_migration -> - % The broker is managed by {init,end}_per_testcase(). +end_per_group(Group, Config) when Group == tests -> + % The broker is managed by sub-groups. Config; end_per_group(_Group, Config) -> rabbit_ct_helpers:run_steps(Config, diff --git a/deps/rabbit/test/policy_SUITE.erl b/deps/rabbit/test/policy_SUITE.erl index c95175b377a1..68ab85912d7f 100644 --- a/deps/rabbit/test/policy_SUITE.erl +++ b/deps/rabbit/test/policy_SUITE.erl @@ -268,7 +268,7 @@ overflow_policies(Config) -> passed. -%% See supported policies in https://www.rabbitmq.com/parameters.html#operator-policies +%% See supported policies in https://www.rabbitmq.com/docs/parameters#operator-policies %% This test applies all supported operator policies to all queue types, %% and later verifies the effective policy definitions. %% Just those supported by each queue type should be present. diff --git a/deps/rabbit/test/queue_parallel_SUITE.erl b/deps/rabbit/test/queue_parallel_SUITE.erl index 2b4c4735bcd6..5ee1c3232639 100644 --- a/deps/rabbit/test/queue_parallel_SUITE.erl +++ b/deps/rabbit/test/queue_parallel_SUITE.erl @@ -646,7 +646,11 @@ delete_immediately_by_resource(Config) -> ok. cc_header_non_array_should_close_channel(Config) -> - {C, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), + %% We use an unmanaged connection to avoid issues with + %% tests running in parallel: in this test we expect the + %% channel to close, but that channel is reused in other tests. + C = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 0), + {ok, Ch} = amqp_connection:open_channel(C), Name0 = ?FUNCTION_NAME, Name = atom_to_binary(Name0), QName = <<"queue_cc_header_non_array", Name/binary>>, diff --git a/deps/rabbit/test/queue_type_SUITE.erl b/deps/rabbit/test/queue_type_SUITE.erl index d89859e4703b..28352212dfb1 100644 --- a/deps/rabbit/test/queue_type_SUITE.erl +++ b/deps/rabbit/test/queue_type_SUITE.erl @@ -56,8 +56,7 @@ init_per_group(Group, Config) -> {tcp_ports_base, {skip_n_nodes, ClusterSize}} ]), Config1b = rabbit_ct_helpers:set_config(Config1, - [{queue_type, atom_to_binary(Group, utf8)}, - {net_ticktime, 5} + [{queue_type, atom_to_binary(Group, utf8)} ]), Config2 = rabbit_ct_helpers:run_steps(Config1b, [fun merge_app_env/1 ] ++ diff --git a/deps/rabbit/test/quorum_queue_SUITE.erl b/deps/rabbit/test/quorum_queue_SUITE.erl index 15b75fac4a69..0643842bf511 100644 --- a/deps/rabbit/test/quorum_queue_SUITE.erl +++ b/deps/rabbit/test/quorum_queue_SUITE.erl @@ -90,7 +90,9 @@ groups() -> leader_locator_policy, status, format, - add_member_2 + add_member_2, + single_active_consumer_priority_take_over, + single_active_consumer_priority ] ++ all_tests()}, {cluster_size_5, [], [start_queue, @@ -142,13 +144,16 @@ all_tests() -> server_system_recover, vhost_with_quorum_queue_is_deleted, vhost_with_default_queue_type_declares_quorum_queue, + node_wide_default_queue_type_declares_quorum_queue, delete_immediately_by_resource, consume_redelivery_count, subscribe_redelivery_count, message_bytes_metrics, queue_length_limit_drop_head, queue_length_limit_reject_publish, + queue_length_limit_policy_cleared, subscribe_redelivery_limit, + subscribe_redelivery_limit_disable, subscribe_redelivery_limit_many, subscribe_redelivery_policy, subscribe_redelivery_limit_with_dead_letter, @@ -171,7 +176,11 @@ all_tests() -> cancel_consumer_gh_3729, cancel_and_consume_with_same_tag, validate_messages_on_queue, - amqpl_headers + amqpl_headers, + priority_queue_fifo, + priority_queue_2_1_ratio, + requeue_multiple_true, + requeue_multiple_false ]. memory_tests() -> @@ -236,6 +245,9 @@ init_per_group(Group, Config) -> {skip, _} -> Ret; Config2 -> + Res = rabbit_ct_broker_helpers:enable_feature_flag( + Config2, 'rabbitmq_4.0.0'), + ct:pal("rabbitmq_4.0.0 enable result ~p", [Res]), ok = rabbit_ct_broker_helpers:rpc( Config2, 0, application, set_env, [rabbit, channel_tick_interval, 100]), @@ -595,7 +607,7 @@ start_queue_concurrent(Config) -> quorum_cluster_size_3(Config) -> case rabbit_ct_helpers:is_mixed_versions() of true -> - {skip, "quorum_cluster_size_3 tests isn't mixed version reliable"}; + {skip, "quorum_cluster_size_3 test isn't mixed version reliable"}; false -> quorum_cluster_size_x(Config, 3, 3) end. @@ -820,6 +832,40 @@ vhost_with_default_queue_type_declares_quorum_queue(Config) -> amqp_connection:close(Conn), ok. +node_wide_default_queue_type_declares_quorum_queue(Config) -> + case rabbit_ct_helpers:is_mixed_versions() of + true -> + {skip, "node_wide_default_queue_type_declares_quorum_queue test isn't mixed version compatible"}; + false -> + node_wide_default_queue_type_declares_quorum_queue0(Config) + end. + +node_wide_default_queue_type_declares_quorum_queue0(Config) -> + Node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + rpc:call(Node, application, set_env, [rabbit, default_queue_type, rabbit_quorum_queue]), + VHost = atom_to_binary(?FUNCTION_NAME, utf8), + QName = atom_to_binary(?FUNCTION_NAME, utf8), + User = ?config(rmq_username, Config), + + AddVhostArgs = [VHost, #{}, User], + ok = rabbit_ct_broker_helpers:rpc(Config, Node, rabbit_vhost, add, + AddVhostArgs), + ok = rabbit_ct_broker_helpers:set_full_permissions(Config, User, VHost), + Conn = rabbit_ct_client_helpers:open_unmanaged_connection(Config, Node, VHost), + {ok, Ch} = amqp_connection:open_channel(Conn), + ?assertEqual({'queue.declare_ok', QName, 0, 0}, declare(Ch, QName, [])), + assert_queue_type(Node, VHost, QName, rabbit_quorum_queue), + ?assertEqual({'queue.declare_ok', QName, 0, 0}, declare(Ch, QName, [])), + ?assertEqual({'queue.declare_ok', QName, 0, 0}, + declare(Ch, QName, [{<<"x-queue-type">>, longstr, <<"quorum">>}])), + ?assertEqual({'queue.declare_ok', QName, 0, 0}, declare_passive(Ch, QName, [])), + ?assertEqual({'queue.declare_ok', QName, 0, 0}, + declare_passive(Ch, QName, [{<<"x-queue-type">>, longstr, <<"quorum">>}])), + amqp_connection:close(Conn), + + rpc:call(Node, application, set_env, [rabbit, default_queue_type, rabbit_classic_queue]), + ok. + restart_all_types(Config) -> %% Test the node restart with both types of queues (quorum and classic) to %% ensure there are no regressions @@ -943,6 +989,7 @@ publish_confirm(Ch, QName, Timeout) -> ct:pal("NOT CONFIRMED! ~ts", [QName]), fail after Timeout -> + flush(1), exit(confirm_timeout) end. @@ -990,6 +1037,185 @@ consume_in_minority(Config) -> rabbit_quorum_queue:restart_server({RaName, Server2}), ok. +single_active_consumer_priority_take_over(Config) -> + check_quorum_queues_v4_compat(Config), + + [Server0, Server1, _Server2] = + rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server0), + Ch2 = rabbit_ct_client_helpers:open_channel(Config, Server1), + QName = ?config(queue_name, Config), + Q1 = <>, + RaNameQ1 = binary_to_atom(<<"%2F", "_", Q1/binary>>, utf8), + QueryFun = fun rabbit_fifo:query_single_active_consumer/1, + Args = [{<<"x-queue-type">>, longstr, <<"quorum">>}, + {<<"x-single-active-consumer">>, bool, true}], + ?assertEqual({'queue.declare_ok', Q1, 0, 0}, declare(Ch1, Q1, Args)), + ok = subscribe(Ch1, Q1, false, <<"ch1-ctag1">>, [{"x-priority", byte, 1}]), + ?assertMatch({ok, {_, {value, {<<"ch1-ctag1">>, _}}}, _}, + rpc:call(Server0, ra, local_query, [RaNameQ1, QueryFun])), + #'confirm.select_ok'{} = amqp_channel:call(Ch2, #'confirm.select'{}), + publish_confirm(Ch2, Q1), + %% higher priority consumer attaches + ok = subscribe(Ch2, Q1, false, <<"ch2-ctag1">>, [{"x-priority", byte, 3}]), + + %% Q1 should still have Ch1 as consumer as it has pending messages + ?assertMatch({ok, {_, {value, {<<"ch1-ctag1">>, _}}}, _}, + rpc:call(Server0, ra, local_query, + [RaNameQ1, QueryFun])), + + %% ack the message + receive + {#'basic.deliver'{consumer_tag = <<"ch1-ctag1">>, + delivery_tag = DeliveryTag}, _} -> + amqp_channel:cast(Ch1, #'basic.ack'{delivery_tag = DeliveryTag, + multiple = false}) + after 5000 -> + flush(1), + exit(basic_deliver_timeout) + end, + + ?awaitMatch({ok, {_, {value, {<<"ch2-ctag1">>, _}}}, _}, + rpc:call(Server0, ra, local_query, [RaNameQ1, QueryFun]), + ?DEFAULT_AWAIT), + ok. + +single_active_consumer_priority(Config) -> + check_quorum_queues_v4_compat(Config), + [Server0, Server1, Server2] = + rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server0), + Ch2 = rabbit_ct_client_helpers:open_channel(Config, Server1), + Ch3 = rabbit_ct_client_helpers:open_channel(Config, Server2), + QName = ?config(queue_name, Config), + Q1 = <>, + Q2 = <>, + Q3 = <>, + Args = [{<<"x-queue-type">>, longstr, <<"quorum">>}, + {<<"x-single-active-consumer">>, bool, true}], + ?assertEqual({'queue.declare_ok', Q1, 0, 0}, declare(Ch1, Q1, Args)), + ?assertEqual({'queue.declare_ok', Q2, 0, 0}, declare(Ch2, Q2, Args)), + ?assertEqual({'queue.declare_ok', Q3, 0, 0}, declare(Ch3, Q3, Args)), + + ok = subscribe(Ch1, Q1, false, <<"ch1-ctag1">>, [{"x-priority", byte, 3}]), + ok = subscribe(Ch1, Q2, false, <<"ch1-ctag2">>, [{"x-priority", byte, 2}]), + ok = subscribe(Ch1, Q3, false, <<"ch1-ctag3">>, [{"x-priority", byte, 1}]), + + + ok = subscribe(Ch2, Q1, false, <<"ch2-ctag1">>, [{"x-priority", byte, 1}]), + ok = subscribe(Ch2, Q2, false, <<"ch2-ctag2">>, [{"x-priority", byte, 3}]), + ok = subscribe(Ch2, Q3, false, <<"ch2-ctag3">>, [{"x-priority", byte, 2}]), + + ok = subscribe(Ch3, Q1, false, <<"ch3-ctag1">>, [{"x-priority", byte, 2}]), + ok = subscribe(Ch3, Q2, false, <<"ch3-ctag2">>, [{"x-priority", byte, 1}]), + ok = subscribe(Ch3, Q3, false, <<"ch3-ctag3">>, [{"x-priority", byte, 3}]), + + + RaNameQ1 = binary_to_atom(<<"%2F", "_", Q1/binary>>, utf8), + RaNameQ2 = binary_to_atom(<<"%2F", "_", Q2/binary>>, utf8), + RaNameQ3 = binary_to_atom(<<"%2F", "_", Q3/binary>>, utf8), + %% assert each queue has a different consumer + QueryFun = fun rabbit_fifo:query_single_active_consumer/1, + + %% Q1 should have the consumer on Ch1 + ?assertMatch({ok, {_, {value, {<<"ch1-ctag1">>, _}}}, _}, + rpc:call(Server0, ra, local_query, [RaNameQ1, QueryFun])), + + %% Q2 Ch2 + ?assertMatch({ok, {_, {value, {<<"ch2-ctag2">>, _}}}, _}, + rpc:call(Server1, ra, local_query, [RaNameQ2, QueryFun])), + + %% Q3 Ch3 + ?assertMatch({ok, {_, {value, {<<"ch3-ctag3">>, _}}}, _}, + rpc:call(Server2, ra, local_query, [RaNameQ3, QueryFun])), + + %% close Ch3 + _ = rabbit_ct_client_helpers:close_channel(Ch3), + flush(100), + + %% assert Q3 has Ch2 (priority 2) as consumer + ?assertMatch({ok, {_, {value, {<<"ch2-ctag3">>, _}}}, _}, + rpc:call(Server2, ra, local_query, [RaNameQ3, QueryFun])), + + %% close Ch2 + _ = rabbit_ct_client_helpers:close_channel(Ch2), + flush(100), + + %% assert all queues as has Ch1 as consumer + ?assertMatch({ok, {_, {value, {<<"ch1-ctag1">>, _}}}, _}, + rpc:call(Server0, ra, local_query, [RaNameQ1, QueryFun])), + ?assertMatch({ok, {_, {value, {<<"ch1-ctag2">>, _}}}, _}, + rpc:call(Server0, ra, local_query, [RaNameQ2, QueryFun])), + ?assertMatch({ok, {_, {value, {<<"ch1-ctag3">>, _}}}, _}, + rpc:call(Server0, ra, local_query, [RaNameQ3, QueryFun])), + ok. + +priority_queue_fifo(Config) -> + %% testing: if hi priority messages are published before lo priority + %% messages they are always consumed first (fifo) + check_quorum_queues_v4_compat(Config), + [Server0 | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + Ch = rabbit_ct_client_helpers:open_channel(Config, Server0), + Queue = ?config(queue_name, Config), + ?assertEqual({'queue.declare_ok', Queue, 0, 0}, + declare(Ch, Queue, + [{<<"x-queue-type">>, longstr, <<"quorum">>}])), + ExpectedHi = + [begin + MsgP5 = integer_to_binary(P), + ok = amqp_channel:cast(Ch, #'basic.publish'{routing_key = Queue}, + #amqp_msg{props = #'P_basic'{priority = P}, + payload = MsgP5}), + MsgP5 + %% high priority is > 4 + end || P <- lists:seq(5, 10)], + + ExpectedLo = + [begin + MsgP1 = integer_to_binary(P), + ok = amqp_channel:cast(Ch, #'basic.publish'{routing_key = Queue}, + #amqp_msg{props = #'P_basic'{priority = P}, + payload = MsgP1}), + MsgP1 + end || P <- lists:seq(0, 4)], + + validate_queue(Ch, Queue, ExpectedHi ++ ExpectedLo), + ok. + +priority_queue_2_1_ratio(Config) -> + %% testing: if lo priority messages are published before hi priority + %% messages are consumed in a 2:1 hi to lo ratio + check_quorum_queues_v4_compat(Config), + [Server0 | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + Ch = rabbit_ct_client_helpers:open_channel(Config, Server0), + Queue = ?config(queue_name, Config), + ?assertEqual({'queue.declare_ok', Queue, 0, 0}, + declare(Ch, Queue, + [{<<"x-queue-type">>, longstr, <<"quorum">>}])), + ExpectedLo = + [begin + MsgP1 = integer_to_binary(P), + ok = amqp_channel:cast(Ch, #'basic.publish'{routing_key = Queue}, + #amqp_msg{props = #'P_basic'{priority = P}, + payload = MsgP1}), + MsgP1 + end || P <- lists:seq(0, 4)], + ExpectedHi = + [begin + MsgP5 = integer_to_binary(P), + ok = amqp_channel:cast(Ch, #'basic.publish'{routing_key = Queue}, + #amqp_msg{props = #'P_basic'{priority = P}, + payload = MsgP5}), + MsgP5 + %% high priority is > 4 + end || P <- lists:seq(5, 14)], + + Expected = lists_interleave(ExpectedLo, ExpectedHi), + + validate_queue(Ch, Queue, Expected), + ok. + reject_after_leader_transfer(Config) -> [Server0, Server1, Server2] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), @@ -1047,7 +1273,7 @@ shrink_all(Config) -> rebalance(Config) -> case rabbit_ct_helpers:is_mixed_versions() of true -> - {skip, "rebalance tests isn't mixed version compatible"}; + {skip, "rebalance test isn't mixed version compatible"}; false -> rebalance0(Config) end. @@ -1157,7 +1383,7 @@ test_dead_lettering(PolicySet, Config, Ch, Servers, RaName, Source, Destination) wait_for_messages_ready(Servers, RaName, 1), wait_for_messages_pending_ack(Servers, RaName, 0), wait_for_messages(Config, [[Destination, <<"0">>, <<"0">>, <<"0">>]]), - DeliveryTag = consume(Ch, Source, false), + DeliveryTag = basic_get_tag(Ch, Source, false), wait_for_messages_ready(Servers, RaName, 0), wait_for_messages_pending_ack(Servers, RaName, 1), wait_for_messages(Config, [[Destination, <<"0">>, <<"0">>, <<"0">>]]), @@ -1169,7 +1395,7 @@ test_dead_lettering(PolicySet, Config, Ch, Servers, RaName, Source, Destination) case PolicySet of true -> wait_for_messages(Config, [[Destination, <<"1">>, <<"1">>, <<"0">>]]), - _ = consume(Ch, Destination, true); + _ = basic_get_tag(Ch, Destination, true); false -> wait_for_messages(Config, [[Destination, <<"0">>, <<"0">>, <<"0">>]]) end. @@ -1243,7 +1469,7 @@ dead_letter_to_quorum_queue(Config) -> wait_for_messages_pending_ack(Servers, RaName, 0), wait_for_messages_ready(Servers, RaName2, 0), wait_for_messages_pending_ack(Servers, RaName2, 0), - DeliveryTag = consume(Ch, QQ, false), + DeliveryTag = basic_get_tag(Ch, QQ, false), wait_for_messages_ready(Servers, RaName, 0), wait_for_messages_pending_ack(Servers, RaName, 1), wait_for_messages_ready(Servers, RaName2, 0), @@ -1255,7 +1481,12 @@ dead_letter_to_quorum_queue(Config) -> wait_for_messages_pending_ack(Servers, RaName, 0), wait_for_messages_ready(Servers, RaName2, 1), wait_for_messages_pending_ack(Servers, RaName2, 0), - _ = consume(Ch, QQ2, false). + + {#'basic.get_ok'{delivery_tag = _Tag}, + #amqp_msg{} = Msg} = basic_get(Ch, QQ2, false, 1), + ct:pal("Msg ~p", [Msg]), + flush(1000), + ok. dead_letter_from_classic_to_quorum_queue(Config) -> [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), @@ -1274,7 +1505,7 @@ dead_letter_from_classic_to_quorum_queue(Config) -> wait_for_messages_ready(Servers, RaName, 0), wait_for_messages_pending_ack(Servers, RaName, 0), wait_for_messages(Config, [[CQ, <<"1">>, <<"1">>, <<"0">>]]), - DeliveryTag = consume(Ch, CQ, false), + DeliveryTag = basic_get_tag(Ch, CQ, false), wait_for_messages_ready(Servers, RaName, 0), wait_for_messages_pending_ack(Servers, RaName, 0), wait_for_messages(Config, [[CQ, <<"1">>, <<"0">>, <<"1">>]]), @@ -1284,7 +1515,7 @@ dead_letter_from_classic_to_quorum_queue(Config) -> wait_for_messages_ready(Servers, RaName, 1), wait_for_messages_pending_ack(Servers, RaName, 0), wait_for_messages(Config, [[CQ, <<"0">>, <<"0">>, <<"0">>]]), - _ = consume(Ch, QQ, false), + _ = basic_get_tag(Ch, QQ, false), rabbit_ct_client_helpers:close_channel(Ch). cleanup_queue_state_on_channel_after_publish(Config) -> @@ -1510,7 +1741,7 @@ leadership_takeover(Config) -> metrics_cleanup_on_leadership_takeover(Config) -> case rabbit_ct_helpers:is_mixed_versions() of true -> - {skip, "metrics_cleanup_on_leadership_takeover tests isn't mixed version compatible"}; + {skip, "metrics_cleanup_on_leadership_takeover test isn't mixed version compatible"}; false -> metrics_cleanup_on_leadership_takeover0(Config) end. @@ -1683,8 +1914,8 @@ channel_handles_ra_event(Config) -> publish(Ch1, Q2), wait_for_messages(Config, [[Q1, <<"1">>, <<"1">>, <<"0">>]]), wait_for_messages(Config, [[Q2, <<"1">>, <<"1">>, <<"0">>]]), - ?assertEqual(1, consume(Ch1, Q1, false)), - ?assertEqual(2, consume(Ch1, Q2, false)). + ?assertEqual(1, basic_get_tag(Ch1, Q1, false)), + ?assertEqual(2, basic_get_tag(Ch1, Q2, false)). declare_during_node_down(Config) -> [Server, DownServer, _] = Servers = rabbit_ct_broker_helpers:get_node_configs( @@ -2266,8 +2497,8 @@ subscribe_redelivery_count(Config) -> #amqp_msg{props = #'P_basic'{headers = H0}}} -> ?assertMatch(undefined, rabbit_basic:header(DCHeader, H0)), amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag, - multiple = false, - requeue = true}) + multiple = false, + requeue = true}) after 5000 -> exit(basic_deliver_timeout) end, @@ -2279,8 +2510,8 @@ subscribe_redelivery_count(Config) -> ct:pal("H1 ~p", [H1]), ?assertMatch({DCHeader, _, 1}, rabbit_basic:header(DCHeader, H1)), amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag1, - multiple = false, - requeue = true}) + multiple = false, + requeue = true}) after 5000 -> flush(1), exit(basic_deliver_timeout_2) @@ -2292,7 +2523,7 @@ subscribe_redelivery_count(Config) -> #amqp_msg{props = #'P_basic'{headers = H2}}} -> ?assertMatch({DCHeader, _, 2}, rabbit_basic:header(DCHeader, H2)), amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DeliveryTag2, - multiple = false}), + multiple = false}), ct:pal("wait_for_messages_ready", []), wait_for_messages_ready(Servers, RaName, 0), ct:pal("wait_for_messages_pending_ack", []), @@ -2322,8 +2553,8 @@ subscribe_redelivery_limit(Config) -> #amqp_msg{props = #'P_basic'{headers = H0}}} -> ?assertMatch(undefined, rabbit_basic:header(DCHeader, H0)), amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag, - multiple = false, - requeue = true}) + multiple = false, + requeue = true}) end, wait_for_messages(Config, [[QQ, <<"1">>, <<"0">>, <<"1">>]]), @@ -2333,8 +2564,8 @@ subscribe_redelivery_limit(Config) -> #amqp_msg{props = #'P_basic'{headers = H1}}} -> ?assertMatch({DCHeader, _, 1}, rabbit_basic:header(DCHeader, H1)), amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag1, - multiple = false, - requeue = true}) + multiple = false, + requeue = true}) end, wait_for_messages(Config, [[QQ, <<"0">>, <<"0">>, <<"0">>]]), @@ -2345,6 +2576,51 @@ subscribe_redelivery_limit(Config) -> ok end. +subscribe_redelivery_limit_disable(Config) -> + [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), + QQ = ?config(queue_name, Config), + ?assertEqual({'queue.declare_ok', QQ, 0, 0}, + declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}, + {<<"x-delivery-limit">>, long, -1}])), + publish(Ch, QQ), + wait_for_messages(Config, [[QQ, <<"1">>, <<"1">>, <<"0">>]]), + subscribe(Ch, QQ, false), + + DCHeader = <<"x-delivery-count">>, + receive + {#'basic.deliver'{delivery_tag = DeliveryTag, + redelivered = false}, + #amqp_msg{props = #'P_basic'{headers = H0}}} -> + ?assertMatch(undefined, rabbit_basic:header(DCHeader, H0)), + amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag, + multiple = false, + requeue = true}) + end, + + wait_for_messages(Config, [[QQ, <<"1">>, <<"0">>, <<"1">>]]), + %% set an operator policy, this should always win + ok = rabbit_ct_broker_helpers:set_operator_policy( + Config, 0, <<"delivery-limit">>, QQ, <<"queues">>, + [{<<"delivery-limit">>, 0}]), + + receive + {#'basic.deliver'{delivery_tag = DeliveryTag2, + redelivered = true}, + #amqp_msg{props = #'P_basic'{}}} -> + % ?assertMatch(undefined, rabbit_basic:header(DCHeader, H0)), + amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag2, + multiple = false, + requeue = true}) + after 5000 -> + flush(1), + ct:fail("message did not arrive as expected") + end, + wait_for_messages(Config, [[QQ, <<"0">>, <<"0">>, <<"0">>]]), + ok = rabbit_ct_broker_helpers:clear_operator_policy(Config, 0, <<"delivery-limit">>), + ok. + %% Test that consumer credit is increased correctly. subscribe_redelivery_limit_many(Config) -> [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), @@ -2408,8 +2684,8 @@ subscribe_redelivery_policy(Config) -> #amqp_msg{props = #'P_basic'{headers = H0}}} -> ?assertMatch(undefined, rabbit_basic:header(DCHeader, H0)), amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag, - multiple = false, - requeue = true}) + multiple = false, + requeue = true}) end, wait_for_messages(Config, [[QQ, <<"1">>, <<"0">>, <<"1">>]]), @@ -2419,8 +2695,8 @@ subscribe_redelivery_policy(Config) -> #amqp_msg{props = #'P_basic'{headers = H1}}} -> ?assertMatch({DCHeader, _, 1}, rabbit_basic:header(DCHeader, H1)), amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag1, - multiple = false, - requeue = true}) + multiple = false, + requeue = true}) end, wait_for_messages(Config, [[QQ, <<"0">>, <<"0">>, <<"0">>]]), @@ -2458,8 +2734,8 @@ subscribe_redelivery_limit_with_dead_letter(Config) -> #amqp_msg{props = #'P_basic'{headers = H0}}} -> ?assertMatch(undefined, rabbit_basic:header(DCHeader, H0)), amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag, - multiple = false, - requeue = true}) + multiple = false, + requeue = true}) end, wait_for_messages(Config, [[QQ, <<"1">>, <<"0">>, <<"1">>]]), @@ -2469,8 +2745,8 @@ subscribe_redelivery_limit_with_dead_letter(Config) -> #amqp_msg{props = #'P_basic'{headers = H1}}} -> ?assertMatch({DCHeader, _, 1}, rabbit_basic:header(DCHeader, H1)), amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag1, - multiple = false, - requeue = true}) + multiple = false, + requeue = true}) end, wait_for_messages(Config, [[QQ, <<"0">>, <<"0">>, <<"0">>]]), @@ -2497,8 +2773,8 @@ consume_redelivery_count(Config) -> no_ack = false}), ?assertMatch(undefined, rabbit_basic:header(DCHeader, H0)), amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag, - multiple = false, - requeue = true}), + multiple = false, + requeue = true}), %% wait for requeuing {#'basic.get_ok'{delivery_tag = DeliveryTag1, redelivered = true}, @@ -2507,8 +2783,8 @@ consume_redelivery_count(Config) -> ?assertMatch({DCHeader, _, 1}, rabbit_basic:header(DCHeader, H1)), amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag1, - multiple = false, - requeue = true}), + multiple = false, + requeue = true}), {#'basic.get_ok'{delivery_tag = DeliveryTag2, redelivered = true}, @@ -2517,8 +2793,8 @@ consume_redelivery_count(Config) -> no_ack = false}), ?assertMatch({DCHeader, _, 2}, rabbit_basic:header(DCHeader, H2)), amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag2, - multiple = false, - requeue = true}), + multiple = false, + requeue = true}), ok. message_bytes_metrics(Config) -> @@ -2555,8 +2831,8 @@ message_bytes_metrics(Config) -> {#'basic.deliver'{delivery_tag = DeliveryTag, redelivered = false}, _} -> amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DeliveryTag, - multiple = false, - requeue = false}), + multiple = false, + requeue = false}), wait_for_messages_ready(Servers, RaName, 0), wait_for_messages_pending_ack(Servers, RaName, 0), rabbit_ct_helpers:await_condition( @@ -2696,6 +2972,36 @@ queue_length_limit_reject_publish(Config) -> ok = publish_confirm(Ch, QQ), ok. +queue_length_limit_policy_cleared(Config) -> + [Server | _] = Servers = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), + QQ = ?config(queue_name, Config), + ?assertEqual({'queue.declare_ok', QQ, 0, 0}, + declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])), + ok = rabbit_ct_broker_helpers:set_policy( + Config, 0, <<"max-length">>, QQ, <<"queues">>, + [{<<"max-length">>, 2}, + {<<"overflow">>, <<"reject-publish">>}]), + timer:sleep(1000), + RaName = ra_name(QQ), + QueryFun = fun rabbit_fifo:overview/1, + ?awaitMatch({ok, {_, #{config := #{max_length := 2}}}, _}, + rpc:call(Server, ra, local_query, [RaName, QueryFun]), + ?DEFAULT_AWAIT), + #'confirm.select_ok'{} = amqp_channel:call(Ch, #'confirm.select'{}), + ok = publish_confirm(Ch, QQ), + ok = publish_confirm(Ch, QQ), + ok = publish_confirm(Ch, QQ), %% QQs allow one message above the limit + wait_for_messages_ready(Servers, RaName, 3), + fail = publish_confirm(Ch, QQ), + ok = rabbit_ct_broker_helpers:clear_policy(Config, 0, <<"max-length">>), + ?awaitMatch({ok, {_, #{config := #{max_length := undefined}}}, _}, + rpc:call(Server, ra, local_query, [RaName, QueryFun]), + ?DEFAULT_AWAIT), + ok = publish_confirm(Ch, QQ), + wait_for_messages_ready(Servers, RaName, 4). + purge(Config) -> [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), @@ -3280,12 +3586,14 @@ cancel_consumer_gh_3729(Config) -> ct:fail("basic.cancel_ok timeout") end, - D = #'queue.declare'{queue = QQ, passive = true, arguments = [{<<"x-queue-type">>, longstr, <<"quorum">>}]}, + D = #'queue.declare'{queue = QQ, passive = true, + arguments = [{<<"x-queue-type">>, longstr, <<"quorum">>}]}, F = fun() -> #'queue.declare_ok'{queue = QQ, message_count = MC, consumer_count = CC} = amqp_channel:call(Ch, D), + ct:pal("Mc ~b CC ~b", [MC, CC]), MC =:= 1 andalso CC =:= 0 end, rabbit_ct_helpers:await_condition(F, 30000), @@ -3559,6 +3867,88 @@ select_nodes_with_least_replicas_node_down(Config) -> amqp_channel:call(Ch, #'queue.delete'{queue = Q})) || Q <- Qs]. +requeue_multiple_true(Config) -> + Ch = rabbit_ct_client_helpers:open_channel(Config), + QQ = ?config(queue_name, Config), + ?assertEqual({'queue.declare_ok', QQ, 0, 0}, + declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}, + {<<"x-delivery-limit">>, long, 3}])), + Num = 100, + Payloads = [integer_to_binary(N) || N <- lists:seq(1, Num)], + [publish(Ch, QQ, P) || P <- Payloads], + + amqp_channel:subscribe(Ch, #'basic.consume'{queue = QQ}, self()), + receive #'basic.consume_ok'{} -> ok + end, + + DTags = [receive {#'basic.deliver'{redelivered = false, + delivery_tag = D}, + #amqp_msg{payload = P0}} -> + ?assertEqual(P, P0), + D + after 5000 -> ct:fail({basic_deliver_timeout, P, ?LINE}) + end || P <- Payloads], + + %% Requeue all messages. + ok = amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = lists:last(DTags), + multiple = true, + requeue = true}), + + %% We expect to get all messages re-delivered in the order in which we requeued + %% (which is the same order as messages were sent to us previously). + [receive {#'basic.deliver'{redelivered = true}, + #amqp_msg{payload = P1}} -> + ?assertEqual(P, P1) + after 5000 -> ct:fail({basic_deliver_timeout, P, ?LINE}) + end || P <- Payloads], + + ?assertEqual(#'queue.delete_ok'{message_count = 0}, + amqp_channel:call(Ch, #'queue.delete'{queue = QQ})). + +requeue_multiple_false(Config) -> + Ch = rabbit_ct_client_helpers:open_channel(Config), + QQ = ?config(queue_name, Config), + ?assertEqual({'queue.declare_ok', QQ, 0, 0}, + declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}, + {<<"x-delivery-limit">>, long, 3}])), + Num = 100, + Payloads = [integer_to_binary(N) || N <- lists:seq(1, Num)], + [publish(Ch, QQ, P) || P <- Payloads], + + amqp_channel:subscribe(Ch, #'basic.consume'{queue = QQ}, self()), + receive #'basic.consume_ok'{} -> ok + end, + + DTags = [receive {#'basic.deliver'{redelivered = false, + delivery_tag = D}, + #amqp_msg{payload = P0}} -> + ?assertEqual(P, P0), + D + after 5000 -> ct:fail({basic_deliver_timeout, P, ?LINE}) + end || P <- Payloads], + + %% The delivery tags we received via AMQP 0.9.1 are ordered from 1-100. + %% Sanity check: + ?assertEqual(lists:seq(1, Num), DTags), + + %% Requeue each message individually in random order. + Tuples = [{rand:uniform(), D} || D <- DTags], + DTagsShuffled = [D || {_, D} <- lists:sort(Tuples)], + [ok = amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = D, + multiple = false, + requeue = true}) + || D <- DTagsShuffled], + + %% We expect to get all messages re-delivered in the order in which we requeued. + [receive {#'basic.deliver'{redelivered = true}, + #amqp_msg{payload = P1}} -> + ?assertEqual(integer_to_binary(D), P1) + after 5000 -> ct:fail({basic_deliver_timeout, ?LINE}) + end || D <- DTagsShuffled], + + ?assertEqual(#'queue.delete_ok'{message_count = 0}, + amqp_channel:call(Ch, #'queue.delete'{queue = QQ})). + %%---------------------------------------------------------------------------- same_elements(L1, L2) @@ -3609,7 +3999,7 @@ publish(Ch, Queue, Msg) -> #amqp_msg{props = #'P_basic'{delivery_mode = 2}, payload = Msg}). -consume(Ch, Queue, NoAck) -> +basic_get_tag(Ch, Queue, NoAck) -> {GetOk, _} = Reply = amqp_channel:call(Ch, #'basic.get'{queue = Queue, no_ack = NoAck}), ?assertMatch({#'basic.get_ok'{}, #amqp_msg{payload = <<"msg">>}}, Reply), @@ -3621,13 +4011,20 @@ consume_empty(Ch, Queue, NoAck) -> no_ack = NoAck})). subscribe(Ch, Queue, NoAck) -> + subscribe(Ch, Queue, NoAck, <<"ctag">>, []). + +subscribe(Ch, Queue, NoAck, Tag, Args) -> amqp_channel:subscribe(Ch, #'basic.consume'{queue = Queue, no_ack = NoAck, - consumer_tag = <<"ctag">>}, + arguments = Args, + consumer_tag = Tag}, self()), receive - #'basic.consume_ok'{consumer_tag = <<"ctag">>} -> + #'basic.consume_ok'{consumer_tag = Tag} -> ok + after 30000 -> + flush(100), + exit(subscribe_timeout) end. qos(Ch, Prefetch, Global) -> @@ -3740,3 +4137,19 @@ basic_get(Ch, Q, NoAck, Attempt) -> timer:sleep(100), basic_get(Ch, Q, NoAck, Attempt - 1) end. + +check_quorum_queues_v4_compat(Config) -> + case rabbit_ct_broker_helpers:is_feature_flag_enabled(Config, 'rabbitmq_4.0.0') of + true -> + ok; + false -> + throw({skip, "test needs feature flag rabbitmq_4.0.0"}) + end. + +lists_interleave([], _List) -> + []; +lists_interleave([Item | Items], List) + when is_list(List) -> + {Left, Right} = lists:split(2, List), + Left ++ [Item | lists_interleave(Items, Right)]. + diff --git a/deps/rabbit/test/quorum_queue_member_reconciliation_SUITE.erl b/deps/rabbit/test/quorum_queue_member_reconciliation_SUITE.erl index 00ccb34402fe..85e5120ca037 100644 --- a/deps/rabbit/test/quorum_queue_member_reconciliation_SUITE.erl +++ b/deps/rabbit/test/quorum_queue_member_reconciliation_SUITE.erl @@ -51,8 +51,7 @@ init_per_group(Group, Config) -> [{rmq_nodes_count, ClusterSize}, {rmq_nodename_suffix, Group}, {tcp_ports_base}]), - Config1b = rabbit_ct_helpers:set_config(Config1, [{net_ticktime, 10}]), - rabbit_ct_helpers:run_steps(Config1b, + rabbit_ct_helpers:run_steps(Config1, [fun merge_app_env/1 ] ++ rabbit_ct_broker_helpers:setup_steps()). diff --git a/deps/rabbit/test/rabbit_ct_hook.erl b/deps/rabbit/test/rabbit_ct_hook.erl new file mode 100644 index 000000000000..07097a57e0fa --- /dev/null +++ b/deps/rabbit/test/rabbit_ct_hook.erl @@ -0,0 +1,7 @@ +-module(rabbit_ct_hook). + +-export([init/2]). + +init(_, _) -> + _ = rabbit_ct_helpers:redirect_logger_to_ct_logs([]), + {ok, undefined}. diff --git a/deps/rabbit/test/rabbit_db_binding_SUITE.erl b/deps/rabbit/test/rabbit_db_binding_SUITE.erl index 9055e4ff1ddb..07eb0aea09d0 100644 --- a/deps/rabbit/test/rabbit_db_binding_SUITE.erl +++ b/deps/rabbit/test/rabbit_db_binding_SUITE.erl @@ -131,8 +131,8 @@ delete1(_Config) -> Ret = rabbit_db_binding:delete(Binding, fun(_, _) -> ok end), ?assertMatch({ok, _}, Ret), {ok, Deletions} = Ret, - ?assertMatch({#exchange{}, not_deleted, [#binding{}], none}, - dict:fetch(XName1, Deletions)), + ?assertMatch({#exchange{}, not_deleted, [#binding{}]}, + rabbit_binding:fetch_deletion(XName1, Deletions)), ?assertEqual(false, rabbit_db_binding:exists(Binding)), passed. @@ -152,8 +152,8 @@ auto_delete1(_Config) -> Ret = rabbit_db_binding:delete(Binding, fun(_, _) -> ok end), ?assertMatch({ok, _}, Ret), {ok, Deletions} = Ret, - ?assertMatch({#exchange{}, deleted, [#binding{}], none}, - dict:fetch(XName1, Deletions)), + ?assertMatch({#exchange{}, not_deleted, [#binding{}]}, + rabbit_binding:fetch_deletion(XName1, Deletions)), ?assertEqual(false, rabbit_db_binding:exists(Binding)), passed. diff --git a/deps/rabbit/test/rabbit_db_queue_SUITE.erl b/deps/rabbit/test/rabbit_db_queue_SUITE.erl index 525e6b6dc5ae..06ff1a4889d2 100644 --- a/deps/rabbit/test/rabbit_db_queue_SUITE.erl +++ b/deps/rabbit/test/rabbit_db_queue_SUITE.erl @@ -40,9 +40,9 @@ all_tests() -> count, count_by_vhost, set, - set_many, delete, update, + update_decorators, exists, get_all_durable, get_all_durable_by_type, @@ -282,23 +282,6 @@ set1(_Config) -> ?assertEqual({ok, Q}, rabbit_db_queue:get(QName)), passed. -set_many(Config) -> - passed = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, set_many1, [Config]). - -set_many1(_Config) -> - QName1 = rabbit_misc:r(?VHOST, queue, <<"test-queue1">>), - QName2 = rabbit_misc:r(?VHOST, queue, <<"test-queue2">>), - QName3 = rabbit_misc:r(?VHOST, queue, <<"test-queue3">>), - Q1 = new_queue(QName1, rabbit_classic_queue), - Q2 = new_queue(QName2, rabbit_classic_queue), - Q3 = new_queue(QName3, rabbit_classic_queue), - ?assertEqual(ok, rabbit_db_queue:set_many([])), - ?assertEqual(ok, rabbit_db_queue:set_many([Q1, Q2, Q3])), - ?assertEqual({ok, Q1}, rabbit_db_queue:get_durable(QName1)), - ?assertEqual({ok, Q2}, rabbit_db_queue:get_durable(QName2)), - ?assertEqual({ok, Q3}, rabbit_db_queue:get_durable(QName3)), - passed. - delete(Config) -> passed = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete1, [Config]). @@ -309,8 +292,8 @@ delete1(_Config) -> ?assertEqual({ok, Q}, rabbit_db_queue:get(QName)), %% TODO Can we handle the deletions outside of rabbit_db_queue? Probably not because %% they should be done in a single transaction, but what a horrid API to have! - Dict = rabbit_db_queue:delete(QName, normal), - ?assertEqual(0, dict:size(Dict)), + Deletions = rabbit_db_queue:delete(QName, normal), + ?assertEqual(rabbit_binding:new_deletions(), Deletions), ?assertEqual(ok, rabbit_db_queue:delete(QName, normal)), ?assertEqual({error, not_found}, rabbit_db_queue:get(QName)), passed. @@ -341,7 +324,7 @@ update_decorators1(_Config) -> ?assertEqual({ok, Q}, rabbit_db_queue:get(QName)), ?assertEqual(undefined, amqqueue:get_decorators(Q)), %% Not really testing we set a decorator, but at least the field is being updated - ?assertEqual(ok, rabbit_db_queue:update_decorators(QName)), + ?assertEqual(ok, rabbit_db_queue:update_decorators(QName, [])), {ok, Q1} = rabbit_db_queue:get(QName), ?assertEqual([], amqqueue:get_decorators(Q1)), passed. diff --git a/deps/rabbit/test/rabbit_fifo_SUITE.erl b/deps/rabbit/test/rabbit_fifo_SUITE.erl index 80f6093129eb..8d45aecca10f 100644 --- a/deps/rabbit/test/rabbit_fifo_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_SUITE.erl @@ -14,15 +14,16 @@ -include_lib("rabbit_common/include/rabbit.hrl"). -include_lib("rabbit_common/include/rabbit_framing.hrl"). -include_lib("rabbit/src/rabbit_fifo.hrl"). +-include_lib("rabbit/src/rabbit_fifo_dlx.hrl"). +% -define(PROTOMOD, rabbit_framing_amqp_0_9_1). %%%=================================================================== %%% Common Test callbacks %%%=================================================================== all() -> [ - {group, machine_version_2}, - {group, machine_version_3}, + {group, tests}, {group, machine_version_conversion} ]. @@ -34,19 +35,28 @@ all_tests() -> groups() -> [ - {machine_version_2, [shuffle], all_tests()}, - {machine_version_3, [shuffle], all_tests()}, - {machine_version_conversion, [shuffle], [convert_v2_to_v3]} + {tests, [shuffle], all_tests()}, + {machine_version_conversion, [shuffle], + [convert_v2_to_v3, + convert_v3_to_v4]} ]. -init_per_group(machine_version_2, Config) -> - [{machine_version, 2} | Config]; -init_per_group(machine_version_3, Config) -> - [{machine_version, 3} | Config]; +init_per_group(tests, Config) -> + [{machine_version, 4} | Config]; init_per_group(machine_version_conversion, Config) -> Config. -end_per_group(_Group, _Config) -> +init_per_testcase(_Testcase, Config) -> + FF = ?config(machine_version, Config) == 4, + ok = meck:new(rabbit_feature_flags, [passthrough]), + meck:expect(rabbit_feature_flags, is_enabled, fun (_) -> FF end), + Config. + +end_per_group(_, Config) -> + Config. + +end_per_testcase(_Group, _Config) -> + meck:unload(), ok. %%%=================================================================== @@ -59,7 +69,10 @@ end_per_group(_Group, _Config) -> -define(ASSERT_EFF(EfxPat, Guard, Effects), ?assert(lists:any(fun (EfxPat) when Guard -> true; (_) -> false - end, Effects))). + end, Effects), + lists:flatten(io_lib:format("Expected to find effect matching " + "pattern '~s' in effect list '~0p'", + [??EfxPat, Effects])))). -define(ASSERT_NO_EFF(EfxPat, Effects), ?assert(not lists:any(fun (EfxPat) -> true; @@ -76,37 +89,58 @@ end_per_group(_Group, _Config) -> (_) -> false end, Effects))). +-define(ASSERT(Guard, Fun), + {assert, fun (S) -> ?assertMatch(Guard, S), _ = Fun(S) end}). +-define(ASSERT(Guard), + ?ASSERT(Guard, fun (_) -> true end)). + test_init(Name) -> init(#{name => Name, - max_in_memory_length => 0, queue_resource => rabbit_misc:r("/", queue, atom_to_binary(Name)), release_cursor_interval => 0}). -enq_enq_checkout_test(C) -> - Cid = {<<"enq_enq_checkout_test">>, self()}, - {State1, _} = enq(C, 1, 1, first, test_init(test)), - {State2, _} = enq(C, 2, 2, second, State1), +-define(FUNCTION_NAME_B, atom_to_binary(?FUNCTION_NAME)). +-define(LINE_B, integer_to_binary(?LINE)). + +enq_enq_checkout_compat_test(C) -> + enq_enq_checkout_test(C, {auto, 2, simple_prefetch}). + +enq_enq_checkout_v4_test(C) -> + enq_enq_checkout_test(C, {auto, {simple_prefetch, 2}}). + +enq_enq_checkout_test(Config, Spec) -> + Cid = {?FUNCTION_NAME_B, self()}, + {State1, _} = enq(Config, 1, 1, first, test_init(?FUNCTION_NAME)), + {State2, _} = enq(Config, 2, 2, second, State1), ?assertEqual(2, rabbit_fifo:query_messages_total(State2)), - {_State3, _, Effects} = - apply(meta(C, 3), - rabbit_fifo:make_checkout(Cid, {once, 2, simple_prefetch}, #{}), - State2), - ct:pal("~tp", [Effects]), + {State3, #{key := CKey, + next_msg_id := NextMsgId}, Effects} = + checkout(Config, ?LINE, Cid, Spec, State2), ?ASSERT_EFF({monitor, _, _}, Effects), - ?ASSERT_EFF({log, [1,2], _Fun, _Local}, Effects), + ?ASSERT_EFF({log, [1, 2], _Fun, _Local}, Effects), + + {State4, _} = settle(Config, CKey, ?LINE, + [NextMsgId, NextMsgId+1], State3), + ?assertMatch(#{num_messages := 0, + num_ready_messages := 0, + num_checked_out := 0, + num_consumers := 1}, + rabbit_fifo:overview(State4)), ok. -credit_enq_enq_checkout_settled_credit_v1_test(C) -> +credit_enq_enq_checkout_settled_credit_v1_test(Config) -> Cid = {?FUNCTION_NAME, self()}, - {State1, _} = enq(C, 1, 1, first, test_init(test)), - {State2, _} = enq(C, 2, 2, second, State1), - {State3, _, Effects} = - apply(meta(C, 3), rabbit_fifo:make_checkout(Cid, {auto, 1, credited}, #{}), State2), - ?ASSERT_EFF({monitor, _, _}, Effects), - ?ASSERT_EFF({log, [1], _Fun, _Local}, Effects), + {State1, _} = enq(Config, 1, 1, first, test_init(test)), + {State2, _} = enq(Config, 2, 2, second, State1), + {State3, #{key := CKey, + next_msg_id := NextMsgId}, Effects3} = + checkout(Config, ?LINE, Cid, {auto, 0, credited}, State2), + ?ASSERT_EFF({monitor, _, _}, Effects3), + {State4, Effects4} = credit(Config, CKey, ?LINE, 1, 0, false, State3), + ?ASSERT_EFF({log, [1], _Fun, _Local}, Effects4), %% settle the delivery this should _not_ result in further messages being %% delivered - {State4, SettledEffects} = settle(C, Cid, 4, 1, State3), + {State5, SettledEffects} = settle(Config, CKey, ?LINE, NextMsgId, State4), ?assertEqual(false, lists:any(fun ({log, _, _, _}) -> true; (_) -> @@ -114,151 +148,198 @@ credit_enq_enq_checkout_settled_credit_v1_test(C) -> end, SettledEffects)), %% granting credit (3) should deliver the second msg if the receivers %% delivery count is (1) - {State5, CreditEffects} = credit(C, Cid, 5, 1, 1, false, State4), - % ?debugFmt("CreditEffects ~tp ~n~tp", [CreditEffects, State4]), + {State6, CreditEffects} = credit(Config, CKey, ?LINE, 1, 1, false, State5), ?ASSERT_EFF({log, [2], _, _}, CreditEffects), - {_State6, FinalEffects} = enq(C, 6, 3, third, State5), + {_State, FinalEffects} = enq(Config, 6, 3, third, State6), ?assertEqual(false, lists:any(fun ({log, _, _, _}) -> true; (_) -> false end, FinalEffects)), ok. -credit_enq_enq_checkout_settled_credit_v2_test(C) -> +credit_enq_enq_checkout_settled_credit_v2_test(Config) -> + InitDelCnt = 16#ff_ff_ff_ff, Ctag = ?FUNCTION_NAME, Cid = {Ctag, self()}, - {State1, _} = enq(C, 1, 1, first, test_init(test)), - {State2, _} = enq(C, 2, 2, second, State1), - {State3, _, Effects} = apply(meta(C, 3), - rabbit_fifo:make_checkout( - Cid, - {auto, 1, credited}, - %% denotes that credit API v2 is used - #{initial_delivery_count => 16#ff_ff_ff_ff}), - State2), - ?ASSERT_EFF({monitor, _, _}, Effects), - ?ASSERT_EFF({log, [1], _Fun, _Local}, Effects), + {State1, _} = enq(Config, 1, 1, first, test_init(test)), + {State2, _} = enq(Config, 2, 2, second, State1), + {State3, #{key := CKey, + next_msg_id := NextMsgId}, Effects3} = + checkout(Config, ?LINE, Cid, {auto, {credited, InitDelCnt}}, State2), + ?ASSERT_EFF({monitor, _, _}, Effects3), + {State4, Effects4} = credit(Config, CKey, ?LINE, 1, InitDelCnt, false, State3), + ?ASSERT_EFF({log, [1], _Fun, _Local}, Effects4), %% Settling the delivery should not grant new credit. - {State4, SettledEffects} = settle(C, Cid, 4, 1, State3), + {State5, SettledEffects} = settle(Config, CKey, 4, NextMsgId, State4), ?assertEqual(false, lists:any(fun ({log, _, _, _}) -> true; (_) -> false end, SettledEffects)), - {State5, CreditEffects} = credit(C, Cid, 5, 1, 0, false, State4), + {State6, CreditEffects} = credit(Config, CKey, ?LINE, 1, 0, false, State5), ?ASSERT_EFF({log, [2], _, _}, CreditEffects), %% The credit_reply should be sent **after** the delivery. ?assertEqual({send_msg, self(), {credit_reply, Ctag, _DeliveryCount = 1, _Credit = 0, _Available = 0, _Drain = false}, ?DELIVERY_SEND_MSG_OPTS}, lists:last(CreditEffects)), - {_State6, FinalEffects} = enq(C, 6, 3, third, State5), + {_State, FinalEffects} = enq(Config, 6, 3, third, State6), ?assertEqual(false, lists:any(fun ({log, _, _, _}) -> true; (_) -> false end, FinalEffects)). -credit_with_drained_v1_test(C) -> - Ctag = ?FUNCTION_NAME, +credit_with_drained_v1_test(Config) -> + Ctag = ?FUNCTION_NAME_B, Cid = {Ctag, self()}, State0 = test_init(test), %% checkout with a single credit - {State1, _, _} = - apply(meta(C, 1), rabbit_fifo:make_checkout(Cid, {auto, 1, credited},#{}), - State0), - ?assertMatch(#rabbit_fifo{consumers = #{Cid := #consumer{credit = 1, - delivery_count = 0}}}, + {State1, #{key := CKey}, _} = checkout(Config, ?LINE, Cid, {auto, 0, credited}, State0), + ?assertMatch(#rabbit_fifo{consumers = #{CKey := #consumer{credit = 0, + delivery_count = 0}}}, State1), + {State2, _Effects2} = credit(Config, CKey, ?LINE, 1, 0, false, State1), + ?assertMatch(#rabbit_fifo{consumers = #{CKey := #consumer{credit = 1, + delivery_count = 0}}}, + State2), {State, Result, _} = - apply(meta(C, 3), rabbit_fifo:make_credit(Cid, 5, 0, true), State1), - ?assertMatch(#rabbit_fifo{consumers = #{Cid := #consumer{credit = 0, - delivery_count = 5}}}, + apply(meta(Config, ?LINE), rabbit_fifo:make_credit(Cid, 5, 0, true), State2), + ?assertMatch(#rabbit_fifo{consumers = #{CKey := #consumer{credit = 0, + delivery_count = 5}}}, State), ?assertEqual({multi, [{send_credit_reply, 0}, {send_drained, {Ctag, 5}}]}, - Result), + Result), ok. -credit_with_drained_v2_test(C) -> +credit_with_drained_v2_test(Config) -> Ctag = ?FUNCTION_NAME, Cid = {Ctag, self()}, State0 = test_init(test), %% checkout with a single credit - {State1, _, _} = apply(meta(C, 1), - rabbit_fifo:make_checkout( - Cid, - {auto, 1, credited}, - %% denotes that credit API v2 is used - #{initial_delivery_count => 0}), - State0), - ?assertMatch(#rabbit_fifo{consumers = #{Cid := #consumer{credit = 1, + {State1, #{key := CKey}, _} = checkout(Config, ?LINE, Cid, {auto, {credited, 0}}, State0), + ?assertMatch(#rabbit_fifo{consumers = #{CKey := #consumer{credit = 0, delivery_count = 0}}}, State1), - {State, ok, Effects} = apply(meta(C, 3), rabbit_fifo:make_credit(Cid, 5, 0, true), State1), - ?assertMatch(#rabbit_fifo{consumers = #{Cid := #consumer{credit = 0, + {State2, _Effects2} = credit(Config, CKey, ?LINE, 1, 0, false, State1), + ?assertMatch(#rabbit_fifo{consumers = #{CKey := #consumer{credit = 1, + delivery_count = 0}}}, + State2), + {State, _, Effects} = + apply(meta(Config, ?LINE), rabbit_fifo:make_credit(CKey, 5, 0, true), State2), + ?assertMatch(#rabbit_fifo{consumers = #{CKey := #consumer{credit = 0, delivery_count = 5}}}, State), ?assertEqual([{send_msg, self(), - {credit_reply, Ctag, _DeliveryCount = 5, _Credit = 0, _Available = 0, _Drain = true}, + {credit_reply, Ctag, _DeliveryCount = 5, + _Credit = 0, _Available = 0, _Drain = true}, ?DELIVERY_SEND_MSG_OPTS}], Effects). -credit_and_drain_v1_test(C) -> +credit_and_drain_v1_test(Config) -> Ctag = ?FUNCTION_NAME, Cid = {Ctag, self()}, - {State1, _} = enq(C, 1, 1, first, test_init(test)), - {State2, _} = enq(C, 2, 2, second, State1), + {State1, _} = enq(Config, 1, 1, first, test_init(test)), + {State2, _} = enq(Config, 2, 2, second, State1), %% checkout without any initial credit (like AMQP 1.0 would) {State3, _, CheckEffs} = - apply(meta(C, 3), rabbit_fifo:make_checkout(Cid, {auto, 0, credited}, #{}), + apply(meta(Config, 3), make_checkout(Cid, {auto, 0, credited}, #{}), State2), ?ASSERT_NO_EFF({log, _, _, _}, CheckEffs), {State4, {multi, [{send_credit_reply, 0}, {send_drained, {Ctag, 2}}]}, - Effects} = apply(meta(C, 4), rabbit_fifo:make_credit(Cid, 4, 0, true), State3), + Effects} = apply(meta(Config, 4), rabbit_fifo:make_credit(Cid, 4, 0, true), State3), ?assertMatch(#rabbit_fifo{consumers = #{Cid := #consumer{credit = 0, delivery_count = 4}}}, State4), ?ASSERT_EFF({log, [1, 2], _, _}, Effects), - {_State5, EnqEffs} = enq(C, 5, 2, third, State4), + {_State5, EnqEffs} = enq(Config, 5, 2, third, State4), ?ASSERT_NO_EFF({log, _, _, _}, EnqEffs), ok. -credit_and_drain_v2_test(C) -> - Ctag = ?FUNCTION_NAME, +credit_and_drain_v2_test(Config) -> + Ctag = ?FUNCTION_NAME_B, Cid = {Ctag, self()}, - {State1, _} = enq(C, 1, 1, first, test_init(test)), - {State2, _} = enq(C, 2, 2, second, State1), - {State3, _, CheckEffs} = apply(meta(C, 3), - rabbit_fifo:make_checkout( - Cid, - %% checkout without any initial credit (like AMQP 1.0 would) - {auto, 0, credited}, - %% denotes that credit API v2 is used - #{initial_delivery_count => 16#ff_ff_ff_ff - 1}), - State2), + {State1, _} = enq(Config, 1, 1, first, test_init(test)), + {State2, _} = enq(Config, 2, 2, second, State1), + {State3, #{key := CKey}, CheckEffs} = checkout(Config, ?LINE, Cid, + {auto, {credited, 16#ff_ff_ff_ff - 1}}, + State2), ?ASSERT_NO_EFF({log, _, _, _}, CheckEffs), - {State4, ok, Effects} = apply(meta(C, 4), - rabbit_fifo:make_credit(Cid, 4, 16#ff_ff_ff_ff - 1, true), - State3), - ?assertMatch(#rabbit_fifo{consumers = #{Cid := #consumer{credit = 0, - delivery_count = 2}}}, + {State4, Effects} = credit(Config, CKey, ?LINE, 4, 16#ff_ff_ff_ff - 1, + true, State3), + ?assertMatch(#rabbit_fifo{consumers = #{CKey := #consumer{credit = 0, + delivery_count = 2}}}, State4), ?ASSERT_EFF({log, [1, 2], _, _}, Effects), %% The credit_reply should be sent **after** the deliveries. ?assertEqual({send_msg, self(), - {credit_reply, Ctag, _DeliveryCount = 2, _Credit = 0, _Available = 0, _Drain = true}, + {credit_reply, Ctag, _DeliveryCount = 2, _Credit = 0, + _Available = 0, _Drain = true}, ?DELIVERY_SEND_MSG_OPTS}, lists:last(Effects)), - {_State5, EnqEffs} = enq(C, 5, 2, third, State4), - ?ASSERT_NO_EFF({log, _, _, _}, EnqEffs). + {_State5, EnqEffs} = enq(Config, 5, 2, third, State4), + ?ASSERT_NO_EFF({log, _, _, _}, EnqEffs), + ok. + +credit_and_drain_single_active_consumer_v2_test(Config) -> + State0 = init(#{name => ?FUNCTION_NAME, + queue_resource => rabbit_misc:r( + "/", queue, atom_to_binary(?FUNCTION_NAME)), + release_cursor_interval => 0, + single_active_consumer_on => true}), + Self = self(), + + % Send 1 message. + {State1, _} = enq(Config, 1, 1, first, State0), + + % Add 2 consumers. + Ctag1 = <<"ctag1">>, + Ctag2 = <<"ctag2">>, + C1 = {Ctag1, Self}, + C2 = {Ctag2, Self}, + CK1 = ?LINE, + CK2 = ?LINE, + Entries = [ + {CK1, make_checkout(C1, {auto, {credited, 16#ff_ff_ff_ff}}, #{})}, + {CK2, make_checkout(C2, {auto, {credited, 16#ff_ff_ff_ff}}, #{})} + ], + {State2, _} = run_log(Config, State1, Entries), + + % The 1st registered consumer is the active one, the 2nd consumer is waiting. + ?assertMatch(#{single_active_consumer_id := C1, + single_active_num_waiting_consumers := 1}, + rabbit_fifo:overview(State2)), + + % Drain the inactive consumer. + {State3, Effects0} = credit(Config, CK2, ?LINE, 5000, 16#ff_ff_ff_ff, true, State2), + % The inactive consumer should not receive any message. + % Hence, no log effect should be returned. + % Since we sent drain=true, we expect the sending queue to consume all link credit + % advancing the delivery-count. + ?assertEqual({send_msg, Self, + {credit_reply, Ctag2, _DeliveryCount = 4999, _Credit = 0, + _Available = 0, _Drain = true}, + ?DELIVERY_SEND_MSG_OPTS}, + Effects0), + + % Drain the active consumer. + {_State4, Effects1} = credit(Config, CK1, ?LINE, 1000, 16#ff_ff_ff_ff, true, State3), + ?assertMatch([ + {log, [1], _Fun, _Local}, + {send_msg, Self, + {credit_reply, Ctag1, _DeliveryCount = 999, _Credit = 0, + _Available = 0, _Drain = true}, + ?DELIVERY_SEND_MSG_OPTS} + ], + Effects1). enq_enq_deq_test(C) -> - Cid = {?FUNCTION_NAME, self()}, + Cid = {?FUNCTION_NAME_B, self()}, {State1, _} = enq(C, 1, 1, first, test_init(test)), {State2, _} = enq(C, 2, 2, second, State1), % get returns a reply value @@ -267,52 +348,57 @@ enq_enq_deq_test(C) -> {_State3, _, [{log, [1], Fun}, {monitor, _, _}]} = - apply(meta(C, 3), rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}), + apply(meta(C, 3), make_checkout(Cid, {dequeue, unsettled}, #{}), State2), ct:pal("Out ~tp", [Fun([Msg1])]), ok. -enq_enq_deq_deq_settle_test(C) -> - Cid = {?FUNCTION_NAME, self()}, - {State1, _} = enq(C, 1, 1, first, test_init(test)), - {State2, _} = enq(C, 2, 2, second, State1), +enq_enq_deq_deq_settle_test(Config) -> + Cid = {?FUNCTION_NAME_B, self()}, + {State1, _} = enq(Config, 1, 1, first, test_init(test)), + {State2, _} = enq(Config, 2, 2, second, State1), % get returns a reply value {State3, '$ra_no_reply', [{log, [1], _}, {monitor, _, _}]} = - apply(meta(C, 3), rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}), + apply(meta(Config, 3), make_checkout(Cid, {dequeue, unsettled}, #{}), State2), - {_State4, {dequeue, empty}} = - apply(meta(C, 4), rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}), + {State4, {dequeue, empty}} = + apply(meta(Config, 4), make_checkout(Cid, {dequeue, unsettled}, #{}), State3), + + {State, _} = settle(Config, Cid, ?LINE, 0, State4), + + ?assertMatch(#{num_consumers := 0}, rabbit_fifo:overview(State)), ok. -enq_enq_checkout_get_settled_test(C) -> +enq_enq_checkout_get_settled_test(Config) -> Cid = {?FUNCTION_NAME, self()}, - {State1, _} = enq(C, 1, 1, first, test_init(test)), + {State1, _} = enq(Config, 1, 1, first, test_init(test)), % get returns a reply value {State2, _, Effs} = - apply(meta(C, 3), rabbit_fifo:make_checkout(Cid, {dequeue, settled}, #{}), + apply(meta(Config, 3), make_checkout(Cid, {dequeue, settled}, #{}), State1), ?ASSERT_EFF({log, [1], _}, Effs), ?assertEqual(0, rabbit_fifo:query_messages_total(State2)), ok. -checkout_get_empty_test(C) -> +checkout_get_empty_test(Config) -> Cid = {?FUNCTION_NAME, self()}, - State = test_init(test), - {_State2, {dequeue, empty}, _} = - apply(meta(C, 1), rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}), State), + State0 = test_init(test), + {State, {dequeue, empty}, _} = checkout(Config, ?LINE, Cid, + {dequeue, unsettled}, State0), + ?assertMatch(#{num_consumers := 0}, rabbit_fifo:overview(State)), ok. -untracked_enq_deq_test(C) -> +untracked_enq_deq_test(Config) -> Cid = {?FUNCTION_NAME, self()}, State0 = test_init(test), - {State1, _, _} = apply(meta(C, 1), + {State1, _, _} = apply(meta(Config, 1), rabbit_fifo:make_enqueue(undefined, undefined, first), State0), {_State2, _, Effs} = - apply(meta(C, 3), rabbit_fifo:make_checkout(Cid, {dequeue, settled}, #{}), State1), + apply(meta(Config, 3), make_checkout(Cid, {dequeue, settled}, #{}), State1), ?ASSERT_EFF({log, [1], _}, Effs), ok. @@ -321,104 +407,125 @@ enq_expire_deq_test(C) -> queue_resource => rabbit_misc:r(<<"/">>, queue, <<"test">>), msg_ttl => 0}, S0 = rabbit_fifo:init(Conf), - Msg = #basic_message{content = #content{properties = none, + Msg = #basic_message{content = #content{properties = #'P_basic'{}, payload_fragments_rev = []}}, - {S1, ok, _} = apply(meta(C, 1, 100), rabbit_fifo:make_enqueue(self(), 1, Msg), S0), + {S1, ok, _} = apply(meta(C, 1, 100, {notify, 1, self()}), + rabbit_fifo:make_enqueue(self(), 1, Msg), S0), Cid = {?FUNCTION_NAME, self()}, {_S2, {dequeue, empty}, Effs} = - apply(meta(C, 2, 101), rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}), S1), + apply(meta(C, 2, 101), make_checkout(Cid, {dequeue, unsettled}, #{}), S1), ?ASSERT_EFF({mod_call, rabbit_global_counters, messages_dead_lettered, [expired, rabbit_quorum_queue, disabled, 1]}, Effs), ok. -enq_expire_enq_deq_test(C) -> +enq_expire_enq_deq_test(Config) -> S0 = test_init(test), %% Msg1 and Msg2 get enqueued in the same millisecond, %% but only Msg1 expires immediately. - Msg1 = #basic_message{content = #content{properties = #'P_basic'{expiration = <<"0">>}, - payload_fragments_rev = [<<"msg1">>]}}, + Msg1 = mc_amqpl:from_basic_message( + #basic_message{routing_keys = [<<"">>], + exchange_name = #resource{name = <<"x">>, + kind = exchange, + virtual_host = <<"v">>}, + content = #content{properties = #'P_basic'{ + expiration = <<"0">>}, + payload_fragments_rev = [<<"msg1">>]}}), Enq1 = rabbit_fifo:make_enqueue(self(), 1, Msg1), - {S1, ok, _} = apply(meta(C, 1, 100), Enq1, S0), - Msg2 = #basic_message{content = #content{properties = none, + Idx1 = ?LINE, + {S1, ok, _} = apply(meta(Config, Idx1, 100, {notify, 1, self()}), Enq1, S0), + Msg2 = #basic_message{content = #content{properties = #'P_basic'{}, + % class_id = 60, + % protocol = ?PROTOMOD, payload_fragments_rev = [<<"msg2">>]}}, Enq2 = rabbit_fifo:make_enqueue(self(), 2, Msg2), - {S2, ok, _} = apply(meta(C, 2, 100), Enq2, S1), + Idx2 = ?LINE, + {S2, ok, _} = apply(meta(Config, Idx2, 100, {notify, 2, self()}), Enq2, S1), Cid = {?FUNCTION_NAME, self()}, {_S3, _, Effs} = - apply(meta(C, 3, 101), rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}), S2), - {log, [2], Fun} = get_log_eff(Effs), + apply(meta(Config, ?LINE, 101), make_checkout(Cid, {dequeue, unsettled}, #{}), S2), + {log, [Idx2], Fun} = get_log_eff(Effs), [{reply, _From, {wrap_reply, {dequeue, {_MsgId, _HeaderMsg}, ReadyMsgCount}}}] = Fun([Enq2]), ?assertEqual(0, ReadyMsgCount). -enq_expire_deq_enq_enq_deq_deq_test(C) -> +enq_expire_deq_enq_enq_deq_deq_test(Config) -> S0 = test_init(test), - Msg1 = #basic_message{content = #content{properties = #'P_basic'{expiration = <<"0">>}, - payload_fragments_rev = [<<"msg1">>]}}, - {S1, ok, _} = apply(meta(C, 1, 100), rabbit_fifo:make_enqueue(self(), 1, Msg1), S0), - {S2, {dequeue, empty}, _} = apply(meta(C, 2, 101), - rabbit_fifo:make_checkout({c1, self()}, {dequeue, unsettled}, #{}), S1), - {S3, _} = enq(C, 3, 2, msg2, S2), - {S4, _} = enq(C, 4, 3, msg3, S3), + Msg1 = #basic_message{content = + #content{properties = #'P_basic'{expiration = <<"0">>}, + payload_fragments_rev = [<<"msg1">>]}}, + {S1, ok, _} = apply(meta(Config, 1, 100, {notify, 1, self()}), + rabbit_fifo:make_enqueue(self(), 1, Msg1), S0), + {S2, {dequeue, empty}, _} = apply(meta(Config, 2, 101), + make_checkout({c1, self()}, + {dequeue, unsettled}, #{}), S1), + {S3, _} = enq(Config, 3, 2, msg2, S2), + {S4, _} = enq(Config, 4, 3, msg3, S3), {S5, '$ra_no_reply', [{log, [3], _}, {monitor, _, _}]} = - apply(meta(C, 5), rabbit_fifo:make_checkout({c2, self()}, {dequeue, unsettled}, #{}), S4), + apply(meta(Config, 5), make_checkout({c2, self()}, {dequeue, unsettled}, #{}), S4), {_S6, '$ra_no_reply', [{log, [4], _}, {monitor, _, _}]} = - apply(meta(C, 6), rabbit_fifo:make_checkout({c3, self()}, {dequeue, unsettled}, #{}), S5). + apply(meta(Config, 6), make_checkout({c3, self()}, {dequeue, unsettled}, #{}), S5), + ok. -release_cursor_test(C) -> - Cid = {?FUNCTION_NAME, self()}, - {State1, _} = enq(C, 1, 1, first, test_init(test)), - {State2, _} = enq(C, 2, 2, second, State1), - {State3, _} = check(C, Cid, 3, 10, State2), - % no release cursor effect at this point - {State4, _} = settle(C, Cid, 4, 1, State3), - {_Final, Effects1} = settle(C, Cid, 5, 0, State4), - % empty queue forwards release cursor all the way - ?ASSERT_EFF({release_cursor, 5, _}, Effects1), +checkout_enq_settle_test(Config) -> + Cid = {?FUNCTION_NAME_B, self()}, + {State1, #{key := CKey, + next_msg_id := NextMsgId}, + [{monitor, _, _} | _]} = checkout(Config, ?LINE, Cid, 1, test_init(test)), + {State2, Effects0} = enq(Config, 2, 1, first, State1), + ?ASSERT_EFF({send_msg, _, {delivery, _, [{0, {_, first}}]}, _}, Effects0), + {State3, _} = enq(Config, 3, 2, second, State2), + {_, _Effects} = settle(Config, CKey, 4, NextMsgId, State3), ok. -checkout_enq_settle_test(C) -> - Cid = {?FUNCTION_NAME, self()}, - {State1, [{monitor, _, _} | _]} = check(C, Cid, 1, test_init(test)), - {State2, Effects0} = enq(C, 2, 1, first, State1), - %% TODO: this should go back to a send_msg effect after optimisation - % ?ASSERT_EFF({log, [2], _, _}, Effects0), - ?ASSERT_EFF({send_msg, _, - {delivery, ?FUNCTION_NAME, - [{0, {_, first}}]}, _}, - Effects0), - {State3, _} = enq(C, 3, 2, second, State2), - {_, _Effects} = settle(C, Cid, 4, 0, State3), - % the release cursor is the smallest raft index that does not - % contribute to the state of the application - % ?ASSERT_EFF({release_cursor, 2, _}, Effects), - ok. - -duplicate_enqueue_test(C) -> - Cid = {<<"duplicate_enqueue_test">>, self()}, - {State1, [ {monitor, _, _} | _]} = check_n(C, Cid, 5, 5, test_init(test)), - {State2, Effects2} = enq(C, 2, 1, first, State1), - % ?ASSERT_EFF({log, [2], _, _}, Effects2), +duplicate_enqueue_test(Config) -> + Cid = {?FUNCTION_NAME_B, self()}, + MsgSeq = 1, + {State1, [ {monitor, _, _} | _]} = check_n(Config, Cid, 5, 5, test_init(test)), + {State2, Effects2} = enq(Config, 2, MsgSeq, first, State1), ?ASSERT_EFF({send_msg, _, {delivery, _, [{_, {_, first}}]}, _}, Effects2), - {_State3, Effects3} = enq(C, 3, 1, first, State2), + {_State3, Effects3} = enq(Config, 3, MsgSeq, first, State2), ?ASSERT_NO_EFF({log, [_], _, _}, Effects3), ok. -return_test(C) -> +return_test(Config) -> Cid = {<<"cid">>, self()}, Cid2 = {<<"cid2">>, self()}, - {State0, _} = enq(C, 1, 1, msg, test_init(test)), - {State1, _} = check_auto(C, Cid, 2, State0), - {State2, _} = check_auto(C, Cid2, 3, State1), - {State3, _, _} = apply(meta(C, 4), rabbit_fifo:make_return(Cid, [0]), State2), - ?assertMatch(#{Cid := #consumer{checked_out = C1}} when map_size(C1) == 0, - State3#rabbit_fifo.consumers), - ?assertMatch(#{Cid2 := #consumer{checked_out = C2}} when map_size(C2) == 1, - State3#rabbit_fifo.consumers), + {State0, _} = enq(Config, 1, 1, msg, test_init(test)), + {State1, #{key := C1Key, + next_msg_id := MsgId}, _} = checkout(Config, ?LINE, Cid, 1, State0), + {State2, #{key := C2Key}, _} = checkout(Config, ?LINE, Cid2, 1, State1), + {State3, _, _} = apply(meta(Config, 4), + rabbit_fifo:make_return(C1Key, [MsgId]), State2), + ?assertMatch(#{C1Key := #consumer{checked_out = C1}} + when map_size(C1) == 0, State3#rabbit_fifo.consumers), + ?assertMatch(#{C2Key := #consumer{checked_out = C2}} + when map_size(C2) == 1, State3#rabbit_fifo.consumers), + ok. + +return_multiple_test(Config) -> + Cid = {<<"cid">>, self()}, + {State0, _} = enq(Config, 1, 1, first, test_init(?FUNCTION_NAME)), + {State1, _} = enq(Config, 2, 2, second, State0), + {State2, _} = enq(Config, 3, 3, third, State1), + + {State3, + #{key := CKey, + next_msg_id := NextMsgId}, + Effects0} = checkout(Config, ?LINE, Cid, 3, State2), + ?ASSERT_EFF({log, [1, 2, 3], _Fun, _Local}, Effects0), + + {_, _, Effects1} = apply(meta(Config, ?LINE), + rabbit_fifo:make_return( + CKey, + %% Return messages in following order: 3, 1, 2 + [NextMsgId + 2, NextMsgId, NextMsgId + 1]), + State3), + %% We expect messages to be re-delivered in the same order in which we previously returned. + ?ASSERT_EFF({log, [3, 1, 2], _Fun, _Local}, Effects1), ok. return_dequeue_delivery_limit_test(C) -> @@ -444,33 +551,27 @@ return_dequeue_delivery_limit_test(C) -> ?assertMatch(#{num_messages := 0}, rabbit_fifo:overview(State4)), ok. -return_non_existent_test(C) -> +return_non_existent_test(Config) -> Cid = {<<"cid">>, self()}, - {State0, _} = enq(C, 1, 1, second, test_init(test)), - % return non-existent - {_State2, _} = apply(meta(C, 3), rabbit_fifo:make_return(Cid, [99]), State0), + {State0, _} = enq(Config, 1, 1, second, test_init(test)), + % return non-existent, check it doesn't crash + {_State2, _} = apply(meta(Config, 3), rabbit_fifo:make_return(Cid, [99]), State0), ok. -return_checked_out_test(C) -> +return_checked_out_test(Config) -> Cid = {<<"cid">>, self()}, - {State0, _} = enq(C, 1, 1, first, test_init(test)), - {State1, [_Monitor, - {log, [1], Fun, _} - | _ ] - } = check_auto(C, Cid, 2, State0), - - Msg1 = rabbit_fifo:make_enqueue(self(), 1, first), - - [{send_msg, _, {delivery, _, [{MsgId, _}]}, _}] = Fun([Msg1]), + {State0, _} = enq(Config, 1, 1, first, test_init(test)), + {State1, #{key := CKey, + next_msg_id := MsgId}, Effects1} = + checkout(Config, ?LINE, Cid, 1, State0), + ?ASSERT_EFF({log, [1], _Fun, _Local}, Effects1), % returning immediately checks out the same message again - {_, ok, [ - {log, [1], _, _} - % {send_msg, _, {delivery, _, [{_, _}]}, _}, - ]} = - apply(meta(C, 3), rabbit_fifo:make_return(Cid, [MsgId]), State1), + {_State, ok, Effects2} = + apply(meta(Config, 3), rabbit_fifo:make_return(CKey, [MsgId]), State1), + ?ASSERT_EFF({log, [1], _Fun, _Local}, Effects2), ok. -return_checked_out_limit_test(C) -> +return_checked_out_limit_test(Config) -> Cid = {<<"cid">>, self()}, Init = init(#{name => test, queue_resource => rabbit_misc:r("/", queue, @@ -479,124 +580,173 @@ return_checked_out_limit_test(C) -> max_in_memory_length => 0, delivery_limit => 1}), Msg1 = rabbit_fifo:make_enqueue(self(), 1, first), - {State0, _} = enq(C, 1, 1, first, Init), - {State1, [_Monitor, - {log, [1], Fun1, _} - | _ ]} = check_auto(C, Cid, 2, State0), - [{send_msg, _, {delivery, _, [{MsgId, _}]}, _}] = Fun1([Msg1]), + {State0, _} = enq(Config, 1, 1, Msg1, Init), + {State1, #{key := CKey, + next_msg_id := MsgId}, Effects1} = + checkout(Config, ?LINE, Cid, 1, State0), + ?ASSERT_EFF({log, [1], _Fun, _Local}, Effects1), % returning immediately checks out the same message again - {State2, ok, [ - {log, [1], Fun2, _} - ]} = - apply(meta(C, 3), rabbit_fifo:make_return(Cid, [MsgId]), State1), - [{send_msg, _, {delivery, _, [{MsgId2, _}]}, _}] = Fun2([Msg1]), + {State2, ok, Effects2} = + apply(meta(Config, 3), rabbit_fifo:make_return(CKey, [MsgId]), State1), + ?ASSERT_EFF({log, [1], _Fun, _Local}, Effects2), + {#rabbit_fifo{} = State, ok, _} = - apply(meta(C, 4), rabbit_fifo:make_return(Cid, [MsgId2]), State2), + apply(meta(Config, 4), rabbit_fifo:make_return(Cid, [MsgId + 1]), State2), ?assertEqual(0, rabbit_fifo:query_messages_total(State)), ok. -return_auto_checked_out_test(C) -> +return_auto_checked_out_test(Config) -> Cid = {<<"cid">>, self()}, Msg1 = rabbit_fifo:make_enqueue(self(), 1, first), - {State00, _} = enq(C, 1, 1, first, test_init(test)), - {State0, _} = enq(C, 2, 2, second, State00), + {State00, _} = enq(Config, 1, 1, first, test_init(test)), + {State0, _} = enq(Config, 2, 2, second, State00), % it first active then inactive as the consumer took on but cannot take % any more - {State1, [_Monitor, - {log, [1], Fun1, _} - ]} = check_auto(C, Cid, 2, State0), + {State1, #{key := CKey, + next_msg_id := MsgId}, + [_Monitor, {log, [1], Fun1, _} ]} = checkout(Config, ?LINE, Cid, 1, State0), [{send_msg, _, {delivery, _, [{MsgId, _}]}, _}] = Fun1([Msg1]), % return should include another delivery - {_State2, _, Effects} = apply(meta(C, 3), rabbit_fifo:make_return(Cid, [MsgId]), State1), + {State2, _, Effects} = apply(meta(Config, 3), + rabbit_fifo:make_return(CKey, [MsgId]), State1), [{log, [1], Fun2, _} | _] = Effects, - - [{send_msg, _, {delivery, _, [{_MsgId2, {#{delivery_count := 1}, first}}]}, _}] + [{send_msg, _, {delivery, _, [{_MsgId2, {#{acquired_count := 1}, first}}]}, _}] = Fun2([Msg1]), + + %% a down does not increment the return_count + {State3, _, _} = apply(meta(Config, ?LINE), {down, self(), noproc}, State2), + + {_State4, #{key := _CKey2, + next_msg_id := _}, + [_, {log, [1], Fun3, _} ]} = checkout(Config, ?LINE, Cid, 1, State3), + + [{send_msg, _, {delivery, _, [{_, {#{delivery_count := 1, + acquired_count := 2}, first}}]}, _}] + = Fun3([Msg1]), ok. -cancelled_checkout_empty_queue_test(C) -> +requeue_test(Config) -> Cid = {<<"cid">>, self()}, - {State1, _} = check_auto(C, Cid, 2, test_init(test)), + Msg1 = rabbit_fifo:make_enqueue(self(), 1, first), + {State0, _} = enq(Config, 1, 1, first, test_init(test)), + % it first active then inactive as the consumer took on but cannot take + % any more + {State1, #{key := CKey, + next_msg_id := MsgId}, + [_Monitor, {log, [1], Fun1, _} ]} = checkout(Config, ?LINE, Cid, 1, State0), + [{send_msg, _, {delivery, _, [{MsgId, {H1, _}}]}, _}] = Fun1([Msg1]), + % return should include another delivery + [{append, Requeue, _}] = rabbit_fifo:make_requeue(CKey, {notify, 1, self()}, + [{MsgId, 1, H1, Msg1}], []), + {_State2, _, Effects} = apply(meta(Config, 3), Requeue, State1), + [{log, [_], Fun2, _} | _] = Effects, + [{send_msg, _, + {delivery, _, [{_MsgId2, {#{acquired_count := 1}, first}}]}, _}] + = Fun2([Msg1]), + ok. + +cancelled_checkout_empty_queue_test(Config) -> + Cid = {?FUNCTION_NAME_B, self()}, + {State1, #{key := _CKey, + next_msg_id := _NextMsgId}, _} = + checkout(Config, ?LINE, Cid, 1, test_init(test)),%% prefetch of 1 % cancelled checkout should clear out service_queue also, else we'd get a % build up of these - {State2, _, Effects} = apply(meta(C, 3), rabbit_fifo:make_checkout(Cid, cancel, #{}), State1), + {State2, _, _Effects} = apply(meta(Config, 3), + make_checkout(Cid, cancel, #{}), State1), ?assertEqual(0, map_size(State2#rabbit_fifo.consumers)), ?assertEqual(0, priority_queue:len(State2#rabbit_fifo.service_queue)), - ?ASSERT_EFF({release_cursor, _, _}, Effects), ok. -cancelled_checkout_out_test(C) -> +cancelled_checkout_out_test(Config) -> Cid = {<<"cid">>, self()}, - {State00, _} = enq(C, 1, 1, first, test_init(test)), - {State0, _} = enq(C, 2, 2, second, State00), - {State1, _} = check_auto(C, Cid, 3, State0),%% prefetch of 1 + {State00, _} = enq(Config, 1, 1, first, test_init(test)), + {State0, _} = enq(Config, 2, 2, second, State00), + {State1, #{key := CKey, + next_msg_id := NextMsgId}, _} = + checkout(Config, ?LINE, Cid, 1, State0),%% prefetch of 1 % cancelled checkout should not return pending messages to queue - {State2, _, _} = apply(meta(C, 4), rabbit_fifo:make_checkout(Cid, cancel, #{}), State1), - ?assertEqual(1, lqueue:len(State2#rabbit_fifo.messages)), + {State2, _, _} = apply(meta(Config, 4), + rabbit_fifo:make_checkout(Cid, cancel, #{}), State1), + ?assertEqual(1, rabbit_fifo_q:len(State2#rabbit_fifo.messages)), ?assertEqual(0, lqueue:len(State2#rabbit_fifo.returns)), ?assertEqual(0, priority_queue:len(State2#rabbit_fifo.service_queue)), {State3, {dequeue, empty}} = - apply(meta(C, 5), rabbit_fifo:make_checkout(Cid, {dequeue, settled}, #{}), State2), + apply(meta(Config, 5), make_checkout(Cid, {dequeue, settled}, #{}), State2), %% settle {State4, ok, _} = - apply(meta(C, 6), rabbit_fifo:make_settle(Cid, [0]), State3), + apply(meta(Config, 6), rabbit_fifo:make_settle(CKey, [NextMsgId]), State3), {_State, _, [{log, [2], _Fun} | _]} = - apply(meta(C, 7), rabbit_fifo:make_checkout(Cid, {dequeue, settled}, #{}), State4), + apply(meta(Config, 7), make_checkout(Cid, {dequeue, settled}, #{}), State4), + ok. + +down_with_noproc_consumer_returns_unsettled_test(Config) -> + Cid = {?FUNCTION_NAME_B, self()}, + {State0, _} = enq(Config, 1, 1, second, test_init(test)), + {State1, #{key := CKey}, + [{monitor, process, Pid} | _]} = checkout(Config, ?LINE, Cid, 1, State0), + {State2, _, _} = apply(meta(Config, 3), {down, Pid, noproc}, State1), + {_State, #{key := CKey2}, Effects} = checkout(Config, ?LINE, Cid, 1, State2), + ?assertNotEqual(CKey, CKey2), + ?ASSERT_EFF({monitor, process, _}, Effects), ok. -down_with_noproc_consumer_returns_unsettled_test(C) -> - Cid = {<<"down_consumer_returns_unsettled_test">>, self()}, - {State0, _} = enq(C, 1, 1, second, test_init(test)), - {State1, [{monitor, process, Pid} | _]} = check(C, Cid, 2, State0), - {State2, _, _} = apply(meta(C, 3), {down, Pid, noproc}, State1), - {_State, Effects} = check(C, Cid, 4, State2), +removed_consumer_returns_unsettled_test(Config) -> + Cid = {?FUNCTION_NAME_B, self()}, + {State0, _} = enq(Config, 1, 1, second, test_init(test)), + {State1, #{key := CKey}, + [{monitor, process, _Pid} | _]} = checkout(Config, ?LINE, Cid, 1, State0), + Remove = rabbit_fifo:make_checkout(Cid, remove, #{}), + {State2, _, _} = apply(meta(Config, 3), Remove, State1), + {_State, #{key := CKey2}, Effects} = checkout(Config, ?LINE, Cid, 1, State2), + ?assertNotEqual(CKey, CKey2), ?ASSERT_EFF({monitor, process, _}, Effects), ok. -down_with_noconnection_marks_suspect_and_node_is_monitored_test(C) -> +down_with_noconnection_marks_suspect_and_node_is_monitored_test(Config) -> Pid = spawn(fun() -> ok end), - Cid = {<<"down_with_noconnect">>, Pid}, + Cid = {?FUNCTION_NAME_B, Pid}, Self = self(), Node = node(Pid), - {State0, Effects0} = enq(C, 1, 1, second, test_init(test)), + {State0, Effects0} = enq(Config, 1, 1, second, test_init(test)), ?ASSERT_EFF({monitor, process, P}, P =:= Self, Effects0), - {State1, Effects1} = check_auto(C, Cid, 2, State0), - #consumer{credit = 0} = maps:get(Cid, State1#rabbit_fifo.consumers), + {State1, #{key := CKey}, Effects1} = checkout(Config, ?LINE, Cid, 1, State0), + #consumer{credit = 0} = maps:get(CKey, State1#rabbit_fifo.consumers), ?ASSERT_EFF({monitor, process, P}, P =:= Pid, Effects1), % monitor both enqueuer and consumer % because we received a noconnection we now need to monitor the node - {State2a, _, _} = apply(meta(C, 3), {down, Pid, noconnection}, State1), + {State2a, _, _} = apply(meta(Config, 3), {down, Pid, noconnection}, State1), #consumer{credit = 1, checked_out = Ch, - status = suspected_down} = maps:get(Cid, State2a#rabbit_fifo.consumers), + status = suspected_down} = maps:get(CKey, State2a#rabbit_fifo.consumers), ?assertEqual(#{}, Ch), %% validate consumer has credit - {State2, _, Effects2} = apply(meta(C, 3), {down, Self, noconnection}, State2a), + {State2, _, Effects2} = apply(meta(Config, 3), {down, Self, noconnection}, State2a), ?ASSERT_EFF({monitor, node, _}, Effects2), ?assertNoEffect({demonitor, process, _}, Effects2), % when the node comes up we need to retry the process monitors for the % disconnected processes - {State3, _, Effects3} = apply(meta(C, 3), {nodeup, Node}, State2), - #consumer{status = up} = maps:get(Cid, State3#rabbit_fifo.consumers), + {State3, _, Effects3} = apply(meta(Config, 3), {nodeup, Node}, State2), + #consumer{status = up} = maps:get(CKey, State3#rabbit_fifo.consumers), % try to re-monitor the suspect processes ?ASSERT_EFF({monitor, process, P}, P =:= Pid, Effects3), ?ASSERT_EFF({monitor, process, P}, P =:= Self, Effects3), ok. -down_with_noconnection_returns_unack_test(C) -> +down_with_noconnection_returns_unack_test(Config) -> Pid = spawn(fun() -> ok end), - Cid = {<<"down_with_noconnect">>, Pid}, + Cid = {?FUNCTION_NAME_B, Pid}, Msg = rabbit_fifo:make_enqueue(self(), 1, second), - {State0, _} = enq(C, 1, 1, second, test_init(test)), - ?assertEqual(1, lqueue:len(State0#rabbit_fifo.messages)), + {State0, _} = enq(Config, 1, 1, second, test_init(test)), + ?assertEqual(1, rabbit_fifo_q:len(State0#rabbit_fifo.messages)), ?assertEqual(0, lqueue:len(State0#rabbit_fifo.returns)), - {State1, {_, _}} = deq(C, 2, Cid, unsettled, Msg, State0), - ?assertEqual(0, lqueue:len(State1#rabbit_fifo.messages)), + {State1, {_, _}} = deq(Config, 2, Cid, unsettled, Msg, State0), + ?assertEqual(0, rabbit_fifo_q:len(State1#rabbit_fifo.messages)), ?assertEqual(0, lqueue:len(State1#rabbit_fifo.returns)), - {State2a, _, _} = apply(meta(C, 3), {down, Pid, noconnection}, State1), - ?assertEqual(0, lqueue:len(State2a#rabbit_fifo.messages)), + {State2a, _, _} = apply(meta(Config, 3), {down, Pid, noconnection}, State1), + ?assertEqual(0, rabbit_fifo_q:len(State2a#rabbit_fifo.messages)), ?assertEqual(1, lqueue:len(State2a#rabbit_fifo.returns)), ?assertMatch(#consumer{checked_out = Ch, status = suspected_down} @@ -604,49 +754,72 @@ down_with_noconnection_returns_unack_test(C) -> maps:get(Cid, State2a#rabbit_fifo.consumers)), ok. -down_with_noproc_enqueuer_is_cleaned_up_test(C) -> +down_with_noproc_enqueuer_is_cleaned_up_test(Config) -> State00 = test_init(test), Pid = spawn(fun() -> ok end), - {State0, _, Effects0} = apply(meta(C, 1), rabbit_fifo:make_enqueue(Pid, 1, first), State00), + {State0, _, Effects0} = apply(meta(Config, 1, ?LINE, {notify, 1, Pid}), + rabbit_fifo:make_enqueue(Pid, 1, first), State00), ?ASSERT_EFF({monitor, process, _}, Effects0), - {State1, _, _} = apply(meta(C, 3), {down, Pid, noproc}, State0), + {State1, _, _} = apply(meta(Config, 3), {down, Pid, noproc}, State0), % ensure there are no enqueuers ?assert(0 =:= maps:size(State1#rabbit_fifo.enqueuers)), ok. -discarded_message_without_dead_letter_handler_is_removed_test(C) -> - Cid = {<<"completed_consumer_yields_demonitor_effect_test">>, self()}, - {State0, _} = enq(C, 1, 1, first, test_init(test)), - {State1, Effects1} = check_n(C, Cid, 2, 10, State0), +discarded_message_without_dead_letter_handler_is_removed_test(Config) -> + Cid = {?FUNCTION_NAME_B, self()}, + {State0, _} = enq(Config, 1, 1, first, test_init(test)), + {State1, #{key := CKey, + next_msg_id := MsgId}, Effects1} = + checkout(Config, ?LINE, Cid, 10, State0), ?ASSERT_EFF({log, [1], _Fun, _}, Effects1), - {_State2, _, Effects2} = apply(meta(C, 1), - rabbit_fifo:make_discard(Cid, [0]), State1), + {_State2, _, Effects2} = apply(meta(Config, 1), + rabbit_fifo:make_discard(CKey, [MsgId]), State1), ?ASSERT_NO_EFF({log, [1], _Fun, _}, Effects2), ok. -discarded_message_with_dead_letter_handler_emits_log_effect_test(C) -> - Cid = {<<"cid1">>, self()}, +discarded_message_with_dead_letter_handler_emits_log_effect_test(Config) -> + Cid = {?FUNCTION_NAME_B, self()}, State00 = init(#{name => test, queue_resource => rabbit_misc:r(<<"/">>, queue, <<"test">>), max_in_memory_length => 0, dead_letter_handler => {at_most_once, {somemod, somefun, [somearg]}}}), - Msg1 = rabbit_fifo:make_enqueue(self(), 1, first), - {State0, _} = enq(C, 1, 1, first, State00), - {State1, Effects1} = check_n(C, Cid, 2, 10, State0), + + Mc = mk_mc(<<"first">>), + Msg1 = rabbit_fifo:make_enqueue(self(), 1, Mc), + {State0, _} = enq(Config, 1, 1, Mc, State00), + {State1, #{key := CKey, + next_msg_id := MsgId}, Effects1} = + checkout(Config, ?LINE, Cid, 10, State0), ?ASSERT_EFF({log, [1], _, _}, Effects1), - {_State2, _, Effects2} = apply(meta(C, 1), rabbit_fifo:make_discard(Cid, [0]), State1), + {_State2, _, Effects2} = apply(meta(Config, 1), + rabbit_fifo:make_discard(CKey, [MsgId]), State1), % assert mod call effect with appended reason and message {value, {log, [1], Fun}} = lists:search(fun (E) -> element(1, E) == log end, Effects2), - ?assertMatch([{mod_call,somemod,somefun,[somearg,rejected,[first]]}], Fun([Msg1])), + [{mod_call, somemod, somefun, [somearg, rejected, [McOut]]}] = Fun([Msg1]), + + ?assertEqual(undefined, mc:get_annotation(acquired_count, McOut)), + ?assertEqual(1, mc:get_annotation(delivery_count, McOut)), + + ok. + +enqueued_msg_with_delivery_count_test(Config) -> + State00 = init(#{name => test, + queue_resource => rabbit_misc:r(<<"/">>, queue, <<"test">>), + max_in_memory_length => 0, + dead_letter_handler => + {at_most_once, {somemod, somefun, [somearg]}}}), + Mc = mc:set_annotation(delivery_count, 2, mk_mc(<<"first">>)), + {#rabbit_fifo{messages = Msgs}, _} = enq(Config, 1, 1, Mc, State00), + ?assertMatch(?MSG(_, #{delivery_count := 2}), rabbit_fifo_q:get(Msgs)), ok. get_log_eff(Effs) -> {value, Log} = lists:search(fun (E) -> element(1, E) == log end, Effs), Log. -mixed_send_msg_and_log_effects_are_correctly_ordered_test(C) -> +mixed_send_msg_and_log_effects_are_correctly_ordered_test(Config) -> Cid = {cid(?FUNCTION_NAME), self()}, State00 = init(#{name => test, queue_resource => rabbit_misc:r(<<"/">>, queue, <<"test">>), @@ -656,12 +829,11 @@ mixed_send_msg_and_log_effects_are_correctly_ordered_test(C) -> {somemod, somefun, [somearg]}}}), %% enqueue two messages Msg1 = rabbit_fifo:make_enqueue(self(), 1, first), - {State0, _} = enq(C, 1, 1, first, State00), + {State0, _} = enq(Config, 1, 1, first, State00), Msg2 = rabbit_fifo:make_enqueue(self(), 2, snd), - {State1, _} = enq(C, 2, 2, snd, State0), + {State1, _} = enq(Config, 2, 2, snd, State0), - {_State2, Effects1} = check_n(C, Cid, 3, 10, State1), - ct:pal("Effects ~w", [Effects1]), + {_State2, _, Effects1} = checkout(Config, ?LINE, Cid, 10, State1), {log, [1, 2], Fun, _} = get_log_eff(Effects1), [{send_msg, _, {delivery, _Cid, [{0,{0,first}},{1,{0,snd}}]}, [local,ra_event]}] = Fun([Msg1, Msg2]), @@ -673,17 +845,17 @@ mixed_send_msg_and_log_effects_are_correctly_ordered_test(C) -> ?ASSERT_NO_EFF({send_msg, _, _, _}, Effects1), ok. -tick_test(C) -> +tick_test(Config) -> Cid = {<<"c">>, self()}, Cid2 = {<<"c2">>, self()}, Msg1 = rabbit_fifo:make_enqueue(self(), 1, <<"fst">>), Msg2 = rabbit_fifo:make_enqueue(self(), 2, <<"snd">>), - {S0, _} = enq(C, 1, 1, <<"fst">>, test_init(?FUNCTION_NAME)), - {S1, _} = enq(C, 2, 2, <<"snd">>, S0), - {S2, {MsgId, _}} = deq(C, 3, Cid, unsettled, Msg1, S1), - {S3, {_, _}} = deq(C, 4, Cid2, unsettled, Msg2, S2), - {S4, _, _} = apply(meta(C, 5), rabbit_fifo:make_return(Cid, [MsgId]), S3), + {S0, _} = enq(Config, 1, 1, <<"fst">>, test_init(?FUNCTION_NAME)), + {S1, _} = enq(Config, 2, 2, <<"snd">>, S0), + {S2, {MsgId, _}} = deq(Config, 3, Cid, unsettled, Msg1, S1), + {S3, {_, _}} = deq(Config, 4, Cid2, unsettled, Msg2, S2), + {S4, _, _} = apply(meta(Config, 5), rabbit_fifo:make_return(Cid, [MsgId]), S3), [{aux, {handle_tick, [#resource{}, @@ -700,38 +872,38 @@ tick_test(C) -> ok. -delivery_query_returns_deliveries_test(C) -> +delivery_query_returns_deliveries_test(Config) -> Tag = atom_to_binary(?FUNCTION_NAME, utf8), Cid = {Tag, self()}, - Commands = [ - rabbit_fifo:make_checkout(Cid, {auto, 5, simple_prefetch}, #{}), - rabbit_fifo:make_enqueue(self(), 1, one), - rabbit_fifo:make_enqueue(self(), 2, two), - rabbit_fifo:make_enqueue(self(), 3, tre), - rabbit_fifo:make_enqueue(self(), 4, for) + CKey = ?LINE, + Entries = [ + {CKey, make_checkout(Cid, {auto, {simple_prefetch, 5}}, #{})}, + {?LINE, rabbit_fifo:make_enqueue(self(), 1, one)}, + {?LINE, rabbit_fifo:make_enqueue(self(), 2, two)}, + {?LINE, rabbit_fifo:make_enqueue(self(), 3, tre)}, + {?LINE, rabbit_fifo:make_enqueue(self(), 4, for)} ], - Indexes = lists:seq(1, length(Commands)), - Entries = lists:zip(Indexes, Commands), - {State, _Effects} = run_log(C, test_init(help), Entries), + {State, _Effects} = run_log(Config, test_init(help), Entries), % 3 deliveries are returned - [{0, {_, _}}] = rabbit_fifo:get_checked_out(Cid, 0, 0, State), + [{0, {_, _}}] = rabbit_fifo:get_checked_out(CKey, 0, 0, State), [_, _, _] = rabbit_fifo:get_checked_out(Cid, 1, 3, State), ok. -duplicate_delivery_test(C) -> - {State0, _} = enq(C, 1, 1, first, test_init(test)), - {#rabbit_fifo{messages = Messages} = State, _} = enq(C, 2, 1, first, State0), +duplicate_delivery_test(Config) -> + {State0, _} = enq(Config, 1, 1, first, test_init(test)), + {#rabbit_fifo{messages = Messages} = State, _} = + enq(Config, 2, 1, first, State0), ?assertEqual(1, rabbit_fifo:query_messages_total(State)), - ?assertEqual(1, lqueue:len(Messages)), + ?assertEqual(1, rabbit_fifo_q:len(Messages)), ok. -state_enter_monitors_and_notifications_test(C) -> +state_enter_monitors_and_notifications_test(Config) -> Oth = spawn(fun () -> ok end), - {State0, _} = enq(C, 1, 1, first, test_init(test)), + {State0, _} = enq(Config, 1, 1, first, test_init(test)), Cid = {<<"adf">>, self()}, OthCid = {<<"oth">>, Oth}, - {State1, _} = check(C, Cid, 2, State0), - {State, _} = check(C, OthCid, 3, State1), + {State1, _, _} = checkout(Config, ?LINE, Cid, 1, State0), + {State, _, _} = checkout(Config, ?LINE, OthCid, 1, State1), Self = self(), Effects = rabbit_fifo:state_enter(leader, State), @@ -749,47 +921,48 @@ state_enter_monitors_and_notifications_test(C) -> ?ASSERT_EFF({monitor, process, _}, Effects), ok. -purge_test(C) -> +purge_test(Config) -> Cid = {<<"purge_test">>, self()}, - {State1, _} = enq(C, 1, 1, first, test_init(test)), - {State2, {purge, 1}, _} = apply(meta(C, 2), rabbit_fifo:make_purge(), State1), - {State3, _} = enq(C, 3, 2, second, State2), + {State1, _} = enq(Config, 1, 1, first, test_init(test)), + {State2, {purge, 1}, _} = apply(meta(Config, 2), rabbit_fifo:make_purge(), State1), + {State3, _} = enq(Config, 3, 2, second, State2), % get returns a reply value {_State4, _, Effs} = - apply(meta(C, 4), rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}), State3), + apply(meta(Config, 4), make_checkout(Cid, {dequeue, unsettled}, #{}), State3), ?ASSERT_EFF({log, [3], _}, Effs), ok. -purge_with_checkout_test(C) -> +purge_with_checkout_test(Config) -> Cid = {<<"purge_test">>, self()}, - {State0, _} = check_auto(C, Cid, 1, test_init(?FUNCTION_NAME)), - {State1, _} = enq(C, 2, 1, <<"first">>, State0), - {State2, _} = enq(C, 3, 2, <<"second">>, State1), + {State0, #{key := CKey}, _} = checkout(Config, ?LINE, Cid, 1, + test_init(?FUNCTION_NAME)), + {State1, _} = enq(Config, 2, 1, <<"first">>, State0), + {State2, _} = enq(Config, 3, 2, <<"second">>, State1), %% assert message bytes are non zero ?assert(State2#rabbit_fifo.msg_bytes_checkout > 0), ?assert(State2#rabbit_fifo.msg_bytes_enqueue > 0), - {State3, {purge, 1}, _} = apply(meta(C, 2), rabbit_fifo:make_purge(), State2), + {State3, {purge, 1}, _} = apply(meta(Config, 2), rabbit_fifo:make_purge(), State2), ?assert(State2#rabbit_fifo.msg_bytes_checkout > 0), ?assertEqual(0, State3#rabbit_fifo.msg_bytes_enqueue), ?assertEqual(1, rabbit_fifo:query_messages_total(State3)), - #consumer{checked_out = Checked} = maps:get(Cid, State3#rabbit_fifo.consumers), + #consumer{checked_out = Checked} = maps:get(CKey, State3#rabbit_fifo.consumers), ?assertEqual(1, maps:size(Checked)), ok. -down_noproc_returns_checked_out_in_order_test(C) -> +down_noproc_returns_checked_out_in_order_test(Config) -> S0 = test_init(?FUNCTION_NAME), %% enqueue 100 S1 = lists:foldl(fun (Num, FS0) -> - {FS, _} = enq(C, Num, Num, Num, FS0), + {FS, _} = enq(Config, Num, Num, Num, FS0), FS end, S0, lists:seq(1, 100)), - ?assertEqual(100, lqueue:len(S1#rabbit_fifo.messages)), + ?assertEqual(100, rabbit_fifo_q:len(S1#rabbit_fifo.messages)), Cid = {<<"cid">>, self()}, - {S2, _} = check(C, Cid, 101, 1000, S1), - #consumer{checked_out = Checked} = maps:get(Cid, S2#rabbit_fifo.consumers), + {S2, #{key := CKey}, _} = checkout(Config, ?LINE, Cid, 1000, S1), + #consumer{checked_out = Checked} = maps:get(CKey, S2#rabbit_fifo.consumers), ?assertEqual(100, maps:size(Checked)), %% simulate down - {S, _, _} = apply(meta(C, 102), {down, self(), noproc}, S2), + {S, _, _} = apply(meta(Config, 102), {down, self(), noproc}, S2), Returns = lqueue:to_list(S#rabbit_fifo.returns), ?assertEqual(100, length(Returns)), ?assertEqual(0, maps:size(S#rabbit_fifo.consumers)), @@ -797,30 +970,30 @@ down_noproc_returns_checked_out_in_order_test(C) -> ?assertEqual(lists:sort(Returns), Returns), ok. -down_noconnection_returns_checked_out_test(C) -> +down_noconnection_returns_checked_out_test(Config) -> S0 = test_init(?FUNCTION_NAME), NumMsgs = 20, S1 = lists:foldl(fun (Num, FS0) -> - {FS, _} = enq(C, Num, Num, Num, FS0), + {FS, _} = enq(Config, Num, Num, Num, FS0), FS end, S0, lists:seq(1, NumMsgs)), - ?assertEqual(NumMsgs, lqueue:len(S1#rabbit_fifo.messages)), + ?assertEqual(NumMsgs, rabbit_fifo_q:len(S1#rabbit_fifo.messages)), Cid = {<<"cid">>, self()}, - {S2, _} = check(C, Cid, 101, 1000, S1), - #consumer{checked_out = Checked} = maps:get(Cid, S2#rabbit_fifo.consumers), + {S2, #{key := CKey}, _} = checkout(Config, ?LINE, Cid, 1000, S1), + #consumer{checked_out = Checked} = maps:get(CKey, S2#rabbit_fifo.consumers), ?assertEqual(NumMsgs, maps:size(Checked)), %% simulate down - {S, _, _} = apply(meta(C, 102), {down, self(), noconnection}, S2), + {S, _, _} = apply(meta(Config, 102), {down, self(), noconnection}, S2), Returns = lqueue:to_list(S#rabbit_fifo.returns), ?assertEqual(NumMsgs, length(Returns)), ?assertMatch(#consumer{checked_out = Ch} when map_size(Ch) == 0, - maps:get(Cid, S#rabbit_fifo.consumers)), + maps:get(CKey, S#rabbit_fifo.consumers)), %% validate returns are in order ?assertEqual(lists:sort(Returns), Returns), ok. -single_active_consumer_basic_get_test(C) -> +single_active_consumer_basic_get_test(Config) -> Cid = {?FUNCTION_NAME, self()}, State0 = init(#{name => ?FUNCTION_NAME, queue_resource => rabbit_misc:r("/", queue, @@ -829,27 +1002,28 @@ single_active_consumer_basic_get_test(C) -> single_active_consumer_on => true}), ?assertEqual(single_active, State0#rabbit_fifo.cfg#cfg.consumer_strategy), ?assertEqual(0, map_size(State0#rabbit_fifo.consumers)), - {State1, _} = enq(C, 1, 1, first, State0), + {State1, _} = enq(Config, 1, 1, first, State0), {_State, {error, {unsupported, single_active_consumer}}} = - apply(meta(C, 2), rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}), + apply(meta(Config, 2), make_checkout(Cid, {dequeue, unsettled}, #{}), State1), ok. -single_active_consumer_revive_test(C) -> +single_active_consumer_revive_test(Config) -> S0 = init(#{name => ?FUNCTION_NAME, - queue_resource => rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)), + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), single_active_consumer_on => true}), Cid1 = {<<"one">>, self()}, Cid2 = {<<"two">>, self()}, - {S1, _} = check_auto(C, Cid1, 1, S0), - {S2, _} = check_auto(C, Cid2, 2, S1), - {S3, _} = enq(C, 3, 1, first, S2), + {S1, #{key := CKey1}, _} = checkout(Config, ?LINE, Cid1, 1, S0), + {S2, #{key := _CKey2}, _} = checkout(Config, ?LINE, Cid2, 1, S1), + {S3, _} = enq(Config, 3, 1, first, S2), %% cancel the active consumer whilst it has a message pending - {S4, _, _} = rabbit_fifo:apply(meta(C, 4), make_checkout(Cid1, cancel, #{}), S3), - {S5, _} = check_auto(C, Cid1, 5, S4), + {S4, _, _} = rabbit_fifo:apply(meta(Config, ?LINE), + make_checkout(Cid1, cancel, #{}), S3), + %% the revived consumer should have the original key + {S5, #{key := CKey1}, _} = checkout(Config, ?LINE, Cid1, 1, S4), - ct:pal("S5 ~tp", [S5]), ?assertEqual(1, rabbit_fifo:query_messages_checked_out(S5)), ?assertEqual(1, rabbit_fifo:query_messages_total(S5)), Consumers = S5#rabbit_fifo.consumers, @@ -860,12 +1034,12 @@ single_active_consumer_revive_test(C) -> ?assertEqual(1, map_size(Up)), %% settle message and ensure it is handled correctly - {S6, _} = settle(C, Cid1, 6, 0, S5), + {S6, _} = settle(Config, CKey1, 6, 0, S5), ?assertEqual(0, rabbit_fifo:query_messages_checked_out(S6)), ?assertEqual(0, rabbit_fifo:query_messages_total(S6)), %% requeue message and check that is handled - {S6b, _} = return(C, Cid1, 6, 0, S5), + {S6b, _} = return(Config, CKey1, 6, 0, S5), ?assertEqual(1, rabbit_fifo:query_messages_checked_out(S6b)), ?assertEqual(1, rabbit_fifo:query_messages_total(S6b)), %% @@ -878,22 +1052,21 @@ single_active_consumer_revive_test(C) -> single_active_consumer_revive_2_test(C) -> S0 = init(#{name => ?FUNCTION_NAME, - queue_resource => rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)), + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), single_active_consumer_on => true}), Cid1 = {<<"one">>, self()}, - {S1, _} = check_auto(C, Cid1, 1, S0), + {S1, #{key := CKey}, _} = checkout(C, ?LINE, Cid1, 1, S0), {S2, _} = enq(C, 3, 1, first, S1), %% cancel the active consumer whilst it has a message pending {S3, _, _} = rabbit_fifo:apply(meta(C, 4), make_checkout(Cid1, cancel, #{}), S2), - {S4, _} = check_auto(C, Cid1, 5, S3), + {S4, #{key := CKey}, _} = checkout(C, ?LINE, Cid1, 5, S3), ?assertEqual(1, rabbit_fifo:query_consumer_count(S4)), ?assertEqual(0, length(rabbit_fifo:query_waiting_consumers(S4))), ?assertEqual(1, rabbit_fifo:query_messages_total(S4)), ?assertEqual(1, rabbit_fifo:query_messages_checked_out(S4)), - ok. -single_active_consumer_test(C) -> +single_active_consumer_test(Config) -> State0 = init(#{name => ?FUNCTION_NAME, queue_resource => rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)), @@ -903,62 +1076,62 @@ single_active_consumer_test(C) -> ?assertEqual(0, map_size(State0#rabbit_fifo.consumers)), % adding some consumers - AddConsumer = fun(CTag, State) -> - {NewState, _, _} = apply( - meta(C, 1), - make_checkout({CTag, self()}, - {once, 1, simple_prefetch}, - #{}), - State), - NewState - end, - State1 = lists:foldl(AddConsumer, State0, - [<<"ctag1">>, <<"ctag2">>, <<"ctag3">>, <<"ctag4">>]), C1 = {<<"ctag1">>, self()}, C2 = {<<"ctag2">>, self()}, C3 = {<<"ctag3">>, self()}, C4 = {<<"ctag4">>, self()}, + CK1 = ?LINE, + CK2 = ?LINE, + CK3 = ?LINE, + CK4 = ?LINE, + Entries = [ + {CK1, make_checkout(C1, {once, {simple_prefetch, 1}}, #{})}, + {CK2, make_checkout(C2, {once, {simple_prefetch, 1}}, #{})}, + {CK3, make_checkout(C3, {once, {simple_prefetch, 1}}, #{})}, + {CK4, make_checkout(C4, {once, {simple_prefetch, 1}}, #{})} + ], + {State1, _} = run_log(Config, State0, Entries), % the first registered consumer is the active one, the others are waiting ?assertEqual(1, map_size(State1#rabbit_fifo.consumers)), - ?assertMatch(#{C1 := _}, State1#rabbit_fifo.consumers), + ?assertMatch(#{CK1 := _}, State1#rabbit_fifo.consumers), ?assertMatch(#{single_active_consumer_id := C1, single_active_num_waiting_consumers := 3}, rabbit_fifo:overview(State1)), ?assertEqual(3, length(rabbit_fifo:query_waiting_consumers(State1))), - ?assertNotEqual(false, lists:keyfind(C2, 1, rabbit_fifo:query_waiting_consumers(State1))), - ?assertNotEqual(false, lists:keyfind(C3, 1, rabbit_fifo:query_waiting_consumers(State1))), - ?assertNotEqual(false, lists:keyfind(C4, 1, rabbit_fifo:query_waiting_consumers(State1))), + ?assertNotEqual(false, lists:keyfind(CK2, 1, rabbit_fifo:query_waiting_consumers(State1))), + ?assertNotEqual(false, lists:keyfind(CK3, 1, rabbit_fifo:query_waiting_consumers(State1))), + ?assertNotEqual(false, lists:keyfind(CK4, 1, rabbit_fifo:query_waiting_consumers(State1))), % cancelling a waiting consumer - {State2, _, Effects1} = apply(meta(C, 2), + {State2, _, Effects1} = apply(meta(Config, ?LINE), make_checkout(C3, cancel, #{}), State1), % the active consumer should still be in place ?assertEqual(1, map_size(State2#rabbit_fifo.consumers)), - ?assertMatch(#{C1 := _}, State2#rabbit_fifo.consumers), + ?assertMatch(#{CK1 := _}, State2#rabbit_fifo.consumers), % the cancelled consumer has been removed from waiting consumers ?assertMatch(#{single_active_consumer_id := C1, single_active_num_waiting_consumers := 2}, rabbit_fifo:overview(State2)), ?assertEqual(2, length(rabbit_fifo:query_waiting_consumers(State2))), - ?assertNotEqual(false, lists:keyfind(C2, 1, rabbit_fifo:query_waiting_consumers(State2))), - ?assertNotEqual(false, lists:keyfind(C4, 1, rabbit_fifo:query_waiting_consumers(State2))), + ?assertNotEqual(false, lists:keyfind(CK2, 1, rabbit_fifo:query_waiting_consumers(State2))), + ?assertNotEqual(false, lists:keyfind(CK4, 1, rabbit_fifo:query_waiting_consumers(State2))), % there are some effects to unregister the consumer ?ASSERT_EFF({mod_call, rabbit_quorum_queue, cancel_consumer_handler, [_, Con]}, Con == C3, Effects1), % cancelling the active consumer - {State3, _, Effects2} = apply(meta(C, 3), + {State3, _, Effects2} = apply(meta(Config, ?LINE), make_checkout(C1, cancel, #{}), State2), % the second registered consumer is now the active one ?assertEqual(1, map_size(State3#rabbit_fifo.consumers)), - ?assertMatch(#{C2 := _}, State3#rabbit_fifo.consumers), + ?assertMatch(#{CK2 := _}, State3#rabbit_fifo.consumers), % the new active consumer is no longer in the waiting list ?assertEqual(1, length(rabbit_fifo:query_waiting_consumers(State3))), - ?assertNotEqual(false, lists:keyfind(C4, 1, + ?assertNotEqual(false, lists:keyfind(CK4, 1, rabbit_fifo:query_waiting_consumers(State3))), %% should have a cancel consumer handler mod_call effect and %% an active new consumer effect @@ -968,12 +1141,12 @@ single_active_consumer_test(C) -> update_consumer_handler, _}, Effects2), % cancelling the active consumer - {State4, _, Effects3} = apply(meta(C, 4), + {State4, _, Effects3} = apply(meta(Config, ?LINE), make_checkout(C2, cancel, #{}), State3), % the last waiting consumer became the active one ?assertEqual(1, map_size(State4#rabbit_fifo.consumers)), - ?assertMatch(#{C4 := _}, State4#rabbit_fifo.consumers), + ?assertMatch(#{CK4 := _}, State4#rabbit_fifo.consumers), % the waiting consumer list is now empty ?assertEqual(0, length(rabbit_fifo:query_waiting_consumers(State4))), % there are some effects to unregister the consumer and @@ -984,7 +1157,7 @@ single_active_consumer_test(C) -> update_consumer_handler, _}, Effects3), % cancelling the last consumer - {State5, _, Effects4} = apply(meta(C, 5), + {State5, _, Effects4} = apply(meta(Config, ?LINE), make_checkout(C4, cancel, #{}), State4), % no active consumer anymore @@ -997,33 +1170,34 @@ single_active_consumer_test(C) -> ok. -single_active_consumer_cancel_consumer_when_channel_is_down_test(C) -> +single_active_consumer_cancel_consumer_when_channel_is_down_test(Config) -> State0 = init(#{name => ?FUNCTION_NAME, - queue_resource => rabbit_misc:r("/", queue, - atom_to_binary(?FUNCTION_NAME, utf8)), - release_cursor_interval => 0, - single_active_consumer_on => true}), + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), + single_active_consumer_on => true}), - DummyFunction = fun() -> ok end, - Pid1 = spawn(DummyFunction), - Pid2 = spawn(DummyFunction), - Pid3 = spawn(DummyFunction), + Pid1 = spawn(fun() -> ok end), + Pid2 = spawn(fun() -> ok end), + Pid3 = spawn(fun() -> ok end), + C1 = {<<"ctag1">>, Pid1}, + C2 = {<<"ctag2">>, Pid2}, + C3 = {<<"ctag3">>, Pid2}, + C4 = {<<"ctag4">>, Pid3}, + CK1 = ?LINE, + CK2 = ?LINE, + CK3 = ?LINE, + CK4 = ?LINE, + Entries = [ + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{})}, + {CK2, make_checkout(C2, {auto, {simple_prefetch, 1}}, #{})}, + {CK3, make_checkout(C3, {auto, {simple_prefetch, 1}}, #{})}, + {CK4, make_checkout(C4, {auto, {simple_prefetch, 1}}, #{})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = up}}}), + % the channel of the active consumer goes down + {?LINE, {down, Pid1, noproc}} + ], + {State2, Effects} = run_log(Config, State0, Entries), - [C1, C2, C3, C4] = Consumers = - [{<<"ctag1">>, Pid1}, {<<"ctag2">>, Pid2}, - {<<"ctag3">>, Pid2}, {<<"ctag4">>, Pid3}], - % adding some consumers - AddConsumer = fun({CTag, ChannelId}, State) -> - {NewState, _, _} = apply( - meta(C, 1), - make_checkout({CTag, ChannelId}, {once, 1, simple_prefetch}, #{}), - State), - NewState - end, - State1 = lists:foldl(AddConsumer, State0, Consumers), - - % the channel of the active consumer goes down - {State2, _, Effects} = apply(meta(C, 2), {down, Pid1, noproc}, State1), + % {State2, _, Effects} = apply(meta(Config, 2), {down, Pid1, noproc}, State1), % fell back to another consumer ?assertEqual(1, map_size(State2#rabbit_fifo.consumers)), % there are still waiting consumers @@ -1035,8 +1209,11 @@ single_active_consumer_cancel_consumer_when_channel_is_down_test(C) -> ?ASSERT_EFF({mod_call, rabbit_quorum_queue, update_consumer_handler, _}, Effects), + ct:pal("STate2 ~p", [State2]), % the channel of the active consumer and a waiting consumer goes down - {State3, _, Effects2} = apply(meta(C, 3), {down, Pid2, noproc}, State2), + {State3, _, Effects2} = apply(meta(Config, ?LINE), {down, Pid2, noproc}, State2), + ct:pal("STate3 ~p", [State3]), + ct:pal("Effects2 ~p", [Effects2]), % fell back to another consumer ?assertEqual(1, map_size(State3#rabbit_fifo.consumers)), % no more waiting consumer @@ -1050,7 +1227,8 @@ single_active_consumer_cancel_consumer_when_channel_is_down_test(C) -> update_consumer_handler, _}, Effects2), % the last channel goes down - {State4, _, Effects3} = apply(meta(C, 4), {down, Pid3, doesnotmatter}, State3), + {State4, _, Effects3} = apply(meta(Config, ?LINE), + {down, Pid3, doesnotmatter}, State3), % no more consumers ?assertEqual(0, map_size(State4#rabbit_fifo.consumers)), ?assertEqual(0, length(rabbit_fifo:query_waiting_consumers(State4))), @@ -1060,33 +1238,22 @@ single_active_consumer_cancel_consumer_when_channel_is_down_test(C) -> ok. -single_active_returns_messages_on_noconnection_test(C) -> - R = rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)), +single_active_returns_messages_on_noconnection_test(Config) -> + R = rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), State0 = init(#{name => ?FUNCTION_NAME, queue_resource => R, release_cursor_interval => 0, single_active_consumer_on => true}), - Meta = meta(C, 1), - Nodes = [n1], - ConsumerIds = [{_, DownPid}] = - [begin - B = atom_to_binary(N, utf8), - {<<"ctag_", B/binary>>, - test_util:fake_pid(N)} - end || N <- Nodes], % adding some consumers - State1 = lists:foldl( - fun(CId, Acc0) -> - {Acc, _, _} = - apply(Meta, - make_checkout(CId, - {auto, 1, simple_prefetch}, #{}), - Acc0), - Acc - end, State0, ConsumerIds), - {State2, _} = enq(C, 4, 1, msg1, State1), + {CK1, {_, DownPid} = C1} = {?LINE, {?LINE_B, test_util:fake_pid(n1)}}, + Entries = [ + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{})} + ], + {State1, _} = run_log(Config, State0, Entries), + {State2, _} = enq(Config, 4, 1, msg1, State1), % simulate node goes down - {State3, _, _} = apply(meta(C, 5), {down, DownPid, noconnection}, State2), + {State3, _, _} = apply(meta(Config, ?LINE), {down, DownPid, noconnection}, State2), + ct:pal("state3 ~p", [State3]), %% assert the consumer is up ?assertMatch([_], lqueue:to_list(State3#rabbit_fifo.returns)), ?assertMatch([{_, #consumer{checked_out = Checked, @@ -1096,56 +1263,47 @@ single_active_returns_messages_on_noconnection_test(C) -> ok. -single_active_consumer_replaces_consumer_when_down_noconnection_test(C) -> +single_active_consumer_replaces_consumer_when_down_noconnection_test(Config) -> R = rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)), State0 = init(#{name => ?FUNCTION_NAME, queue_resource => R, release_cursor_interval => 0, single_active_consumer_on => true}), - Meta = meta(C, 1), - Nodes = [n1, n2, node()], - ConsumerIds = [C1 = {_, DownPid}, C2, _C3] = - [begin - B = atom_to_binary(N, utf8), - {<<"ctag_", B/binary>>, - test_util:fake_pid(N)} - end || N <- Nodes], - % adding some consumers - State1a = lists:foldl( - fun(CId, Acc0) -> - {Acc, _, _} = - apply(Meta, - make_checkout(CId, - {once, 1, simple_prefetch}, #{}), - Acc0), - Acc - end, State0, ConsumerIds), + {CK1, {_, DownPid} = C1} = {?LINE, {?LINE_B, test_util:fake_pid(n1)}}, + {CK2, C2} = {?LINE, {?LINE_B, test_util:fake_pid(n2)}}, + {CK3, C3} = {?LINE, {?LINE_B, test_util:fake_pid(n3)}}, + Entries = [ + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{})}, + {CK2, make_checkout(C2, {auto, {simple_prefetch, 1}}, #{})}, + {CK3, make_checkout(C3, {auto, {simple_prefetch, 1}}, #{})}, + {?LINE, rabbit_fifo:make_enqueue(self(), 1, msg)} + ], + {State1, _} = run_log(Config, State0, Entries), %% assert the consumer is up - ?assertMatch(#{C1 := #consumer{status = up}}, - State1a#rabbit_fifo.consumers), - - {State1, _} = enq(C, 10, 1, msg, State1a), + ?assertMatch(#{CK1 := #consumer{status = up}}, + State1#rabbit_fifo.consumers), % simulate node goes down - {State2, _, _} = apply(meta(C, 5), {down, DownPid, noconnection}, State1), + {State2, _, _} = apply(meta(Config, ?LINE), + {down, DownPid, noconnection}, State1), %% assert a new consumer is in place and it is up - ?assertMatch([{C2, #consumer{status = up, - checked_out = Ch}}] + ?assertMatch([{CK2, #consumer{status = up, + checked_out = Ch}}] when map_size(Ch) == 1, maps:to_list(State2#rabbit_fifo.consumers)), %% the disconnected consumer has been returned to waiting - ?assert(lists:any(fun ({Con,_}) -> Con =:= C1 end, + ?assert(lists:any(fun ({Con, _}) -> Con =:= CK1 end, rabbit_fifo:query_waiting_consumers(State2))), ?assertEqual(2, length(rabbit_fifo:query_waiting_consumers(State2))), % simulate node comes back up - {State3, _, _} = apply(meta(C, 2), {nodeup, node(DownPid)}, State2), + {State3, _, _} = apply(meta(Config, 2), {nodeup, node(DownPid)}, State2), %% the consumer is still active and the same as before - ?assertMatch([{C2, #consumer{status = up}}], + ?assertMatch([{CK2, #consumer{status = up}}], maps:to_list(State3#rabbit_fifo.consumers)), % the waiting consumers should be un-suspected ?assertEqual(2, length(rabbit_fifo:query_waiting_consumers(State3))), @@ -1154,190 +1312,167 @@ single_active_consumer_replaces_consumer_when_down_noconnection_test(C) -> end, rabbit_fifo:query_waiting_consumers(State3)), ok. -single_active_consumer_all_disconnected_test(C) -> +single_active_consumer_all_disconnected_test(Config) -> R = rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)), State0 = init(#{name => ?FUNCTION_NAME, queue_resource => R, release_cursor_interval => 0, single_active_consumer_on => true}), - Meta = meta(C, 1), - Nodes = [n1, n2], - ConsumerIds = [C1 = {_, C1Pid}, C2 = {_, C2Pid}] = - [begin - B = atom_to_binary(N, utf8), - {<<"ctag_", B/binary>>, - test_util:fake_pid(N)} - end || N <- Nodes], - % adding some consumers - State1 = lists:foldl( - fun(CId, Acc0) -> - {Acc, _, _} = - apply(Meta, - make_checkout(CId, - {once, 1, simple_prefetch}, #{}), - Acc0), - Acc - end, State0, ConsumerIds), - - %% assert the consumer is up - ?assertMatch(#{C1 := #consumer{status = up}}, State1#rabbit_fifo.consumers), - % simulate node goes down - {State2, _, _} = apply(meta(C, 5), {down, C1Pid, noconnection}, State1), - %% assert the consumer fails over to the consumer on n2 - ?assertMatch(#{C2 := #consumer{status = up}}, State2#rabbit_fifo.consumers), - {State3, _, _} = apply(meta(C, 6), {down, C2Pid, noconnection}, State2), - %% assert these no active consumer after both nodes are maked as down - ?assertMatch([], maps:to_list(State3#rabbit_fifo.consumers)), - %% n2 comes back - {State4, _, _} = apply(meta(C, 7), {nodeup, node(C2Pid)}, State3), - %% ensure n2 is the active consumer as this node as been registered - %% as up again - ?assertMatch([{{<<"ctag_n2">>, _}, #consumer{status = up, - credit = 1}}], - maps:to_list(State4#rabbit_fifo.consumers)), - ok. - -single_active_consumer_state_enter_leader_include_waiting_consumers_test(C) -> + {CK1, {_, C1Pid} = C1} = {?LINE, {?LINE_B, test_util:fake_pid(n1)}}, + {CK2, {_, C2Pid} = C2} = {?LINE, {?LINE_B, test_util:fake_pid(n2)}}, + Entries = + [ + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{})}, + {CK2, make_checkout(C2, {auto, {simple_prefetch, 1}}, #{})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = up}}}), + {?LINE, {down, C1Pid, noconnection}}, + ?ASSERT(#rabbit_fifo{consumers = #{CK2 := #consumer{status = up}}}), + {?LINE, {down, C2Pid, noconnection}}, + ?ASSERT(#rabbit_fifo{consumers = C} when map_size(C) == 0), + {?LINE, {nodeup, node(C2Pid)}}, + ?ASSERT(#rabbit_fifo{consumers = #{CK2 := #consumer{status = up, + credit = 1}}}) + ], + {_State1, _} = run_log(Config, State0, Entries), + ok. + +single_active_consumer_state_enter_leader_include_waiting_consumers_test(Config) -> State0 = init(#{name => ?FUNCTION_NAME, - queue_resource => - rabbit_misc:r("/", queue, - atom_to_binary(?FUNCTION_NAME, utf8)), + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), release_cursor_interval => 0, single_active_consumer_on => true}), - DummyFunction = fun() -> ok end, - Pid1 = spawn(DummyFunction), - Pid2 = spawn(DummyFunction), - Pid3 = spawn(DummyFunction), - - Meta = meta(C, 1), - % adding some consumers - AddConsumer = fun({CTag, ChannelId}, State) -> - {NewState, _, _} = apply( - Meta, - make_checkout({CTag, ChannelId}, - {once, 1, simple_prefetch}, #{}), - State), - NewState - end, - State1 = lists:foldl(AddConsumer, State0, [{<<"ctag1">>, Pid1}, - {<<"ctag2">>, Pid2}, - {<<"ctag3">>, Pid2}, - {<<"ctag4">>, Pid3}]), - + Pid1 = spawn(fun() -> ok end), + Pid2 = spawn(fun() -> ok end), + Pid3 = spawn(fun() -> ok end), + C1 = {<<"ctag1">>, Pid1}, + C2 = {<<"ctag2">>, Pid2}, + C3 = {<<"ctag3">>, Pid2}, + C4 = {<<"ctag4">>, Pid3}, + CK1 = ?LINE, + CK2 = ?LINE, + CK3 = ?LINE, + CK4 = ?LINE, + Entries = [ + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{})}, + {CK2, make_checkout(C2, {auto, {simple_prefetch, 1}}, #{})}, + {CK3, make_checkout(C3, {auto, {simple_prefetch, 1}}, #{})}, + {CK4, make_checkout(C4, {auto, {simple_prefetch, 1}}, #{})} + ], + {State1, _} = run_log(Config, State0, Entries), Effects = rabbit_fifo:state_enter(leader, State1), %% 2 effects for each consumer process (channel process), 1 effect for the node, ?assertEqual(2 * 3 + 1 + 1 + 1, length(Effects)). -single_active_consumer_state_enter_eol_include_waiting_consumers_test(C) -> - Resource = rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)), +single_active_consumer_state_enter_eol_include_waiting_consumers_test(Config) -> + Resource = rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), State0 = init(#{name => ?FUNCTION_NAME, queue_resource => Resource, release_cursor_interval => 0, single_active_consumer_on => true}), - DummyFunction = fun() -> ok end, - Pid1 = spawn(DummyFunction), - Pid2 = spawn(DummyFunction), - Pid3 = spawn(DummyFunction), - - Meta = meta(C, 1), - % adding some consumers - AddConsumer = fun({CTag, ChannelId}, State) -> - {NewState, _, _} = apply( - Meta, - make_checkout({CTag, ChannelId}, - {once, 1, simple_prefetch}, #{}), - State), - NewState - end, - State1 = lists:foldl(AddConsumer, State0, - [{<<"ctag1">>, Pid1}, {<<"ctag2">>, Pid2}, - {<<"ctag3">>, Pid2}, {<<"ctag4">>, Pid3}]), - + Pid1 = spawn(fun() -> ok end), + Pid2 = spawn(fun() -> ok end), + Pid3 = spawn(fun() -> ok end), + {CK1, C1} = {?LINE, {?LINE_B, Pid1}}, + {CK2, C2} = {?LINE, {?LINE_B, Pid2}}, + {CK3, C3} = {?LINE, {?LINE_B, Pid2}}, + {CK4, C4} = {?LINE, {?LINE_B, Pid3}}, + Entries = [ + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{})}, + {CK2, make_checkout(C2, {auto, {simple_prefetch, 1}}, #{})}, + {CK3, make_checkout(C3, {auto, {simple_prefetch, 1}}, #{})}, + {CK4, make_checkout(C4, {auto, {simple_prefetch, 1}}, #{})} + ], + {State1, _} = run_log(Config, State0, Entries), Effects = rabbit_fifo:state_enter(eol, State1), %% 1 effect for each consumer process (channel process), %% 1 effect for eol to handle rabbit_fifo_usage entries - ?assertEqual(4, length(Effects)). + ?assertEqual(4, length(Effects)), + ok. -query_consumers_test(C) -> +query_consumers_test(Config) -> State0 = init(#{name => ?FUNCTION_NAME, queue_resource => rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)), release_cursor_interval => 0, single_active_consumer_on => false}), - % adding some consumers - AddConsumer = fun(CTag, State) -> - {NewState, _, _} = apply( - meta(C, 1), - make_checkout({CTag, self()}, - {once, 1, simple_prefetch}, #{}), - State), - NewState - end, - State1 = lists:foldl(AddConsumer, State0, [<<"ctag1">>, <<"ctag2">>, <<"ctag3">>, <<"ctag4">>]), + % % adding some consumers + {CK1, C1} = {?LINE, {?LINE_B, self()}}, + {CK2, C2} = {?LINE, {?LINE_B, self()}}, + {CK3, C3} = {?LINE, {?LINE_B, self()}}, + {CK4, C4} = {?LINE, {?LINE_B, self()}}, + Entries = [ + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{})}, + {CK2, make_checkout(C2, {auto, {simple_prefetch, 1}}, #{})}, + {CK3, make_checkout(C3, {auto, {simple_prefetch, 1}}, #{})}, + {CK4, make_checkout(C4, {auto, {simple_prefetch, 1}}, #{})} + ], + {State1, _} = run_log(Config, State0, Entries), Consumers0 = State1#rabbit_fifo.consumers, - Consumer = maps:get({<<"ctag2">>, self()}, Consumers0), - Consumers1 = maps:put({<<"ctag2">>, self()}, - Consumer#consumer{status = suspected_down}, Consumers0), + Consumer = maps:get(CK2, Consumers0), + Consumers1 = maps:put(CK2, Consumer#consumer{status = suspected_down}, + Consumers0), State2 = State1#rabbit_fifo{consumers = Consumers1}, ?assertEqual(3, rabbit_fifo:query_consumer_count(State2)), Consumers2 = rabbit_fifo:query_consumers(State2), ?assertEqual(4, maps:size(Consumers2)), - maps:fold(fun(_Key, {Pid, Tag, _, _, Active, ActivityStatus, _, _}, _Acc) -> - ?assertEqual(self(), Pid), - case Tag of - <<"ctag2">> -> - ?assertNot(Active), - ?assertEqual(suspected_down, ActivityStatus); - _ -> - ?assert(Active), - ?assertEqual(up, ActivityStatus) - end - end, [], Consumers2). - -query_consumers_when_single_active_consumer_is_on_test(C) -> + maps:fold(fun(Key, {Pid, _Tag, _, _, Active, ActivityStatus, _, _}, _Acc) -> + ?assertEqual(self(), Pid), + case Key of + CK2 -> + ?assertNot(Active), + ?assertEqual(suspected_down, ActivityStatus); + _ -> + ?assert(Active), + ?assertEqual(up, ActivityStatus) + end + end, [], Consumers2), + ok. + +query_consumers_when_single_active_consumer_is_on_test(Config) -> State0 = init(#{name => ?FUNCTION_NAME, - queue_resource => rabbit_misc:r("/", queue, - atom_to_binary(?FUNCTION_NAME, utf8)), + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), release_cursor_interval => 0, single_active_consumer_on => true}), - Meta = meta(C, 1), - % adding some consumers - AddConsumer = fun(CTag, State) -> - {NewState, _, _} = apply( - Meta, - make_checkout({CTag, self()}, - {once, 1, simple_prefetch}, #{}), - State), - NewState - end, - State1 = lists:foldl(AddConsumer, State0, [<<"ctag1">>, <<"ctag2">>, <<"ctag3">>, <<"ctag4">>]), + % % adding some consumers + {CK1, C1} = {?LINE, {?LINE_B, self()}}, + {CK2, C2} = {?LINE, {?LINE_B, self()}}, + {CK3, C3} = {?LINE, {?LINE_B, self()}}, + {CK4, C4} = {?LINE, {?LINE_B, self()}}, + Entries = [ + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{})}, + {CK2, make_checkout(C2, {auto, {simple_prefetch, 1}}, #{})}, + {CK3, make_checkout(C3, {auto, {simple_prefetch, 1}}, #{})}, + {CK4, make_checkout(C4, {auto, {simple_prefetch, 1}}, #{})} + ], + {State1, _} = run_log(Config, State0, Entries), ?assertEqual(4, rabbit_fifo:query_consumer_count(State1)), Consumers = rabbit_fifo:query_consumers(State1), ?assertEqual(4, maps:size(Consumers)), - maps:fold(fun(_Key, {Pid, Tag, _, _, Active, ActivityStatus, _, _}, _Acc) -> + maps:fold(fun(Key, {Pid, _Tag, _, _, Active, ActivityStatus, _, _}, _Acc) -> ?assertEqual(self(), Pid), - case Tag of - <<"ctag1">> -> + case Key of + CK1 -> ?assert(Active), ?assertEqual(single_active, ActivityStatus); _ -> ?assertNot(Active), ?assertEqual(waiting, ActivityStatus) end - end, [], Consumers). + end, [], Consumers), + ok. -active_flag_updated_when_consumer_suspected_unsuspected_test(C) -> +active_flag_updated_when_consumer_suspected_unsuspected_test(Config) -> State0 = init(#{name => ?FUNCTION_NAME, - queue_resource => rabbit_misc:r("/", queue, - atom_to_binary(?FUNCTION_NAME, utf8)), - release_cursor_interval => 0, - single_active_consumer_on => false}), + queue_resource => rabbit_misc:r("/", queue, + ?FUNCTION_NAME_B), + release_cursor_interval => 0, + single_active_consumer_on => false}), DummyFunction = fun() -> ok end, Pid1 = spawn(DummyFunction), @@ -1345,32 +1480,34 @@ active_flag_updated_when_consumer_suspected_unsuspected_test(C) -> Pid3 = spawn(DummyFunction), % adding some consumers - AddConsumer = fun({CTag, ChannelId}, State) -> - {NewState, _, _} = - apply( - meta(C, 1), - rabbit_fifo:make_checkout({CTag, ChannelId}, - {once, 1, simple_prefetch}, - #{}), - State), - NewState - end, - State1 = lists:foldl(AddConsumer, State0, - [{<<"ctag1">>, Pid1}, {<<"ctag2">>, Pid2}, {<<"ctag3">>, Pid2}, {<<"ctag4">>, Pid3}]), - - {State2, _, Effects2} = apply(meta(C, 3), + {CK1, C1} = {?LINE, {?LINE_B, Pid1}}, + {CK2, C2} = {?LINE, {?LINE_B, Pid2}}, + {CK3, C3} = {?LINE, {?LINE_B, Pid2}}, + {CK4, C4} = {?LINE, {?LINE_B, Pid3}}, + Entries = [ + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{})}, + {CK2, make_checkout(C2, {auto, {simple_prefetch, 1}}, #{})}, + {CK3, make_checkout(C3, {auto, {simple_prefetch, 1}}, #{})}, + {CK4, make_checkout(C4, {auto, {simple_prefetch, 1}}, #{})} + ], + {State1, _} = run_log(Config, State0, Entries), + {State2, _, Effects2} = apply(meta(Config, 3), {down, Pid1, noconnection}, State1), - % 1 effect to update the metrics of each consumer (they belong to the same node), 1 more effect to monitor the node, 1 more decorators effect + % 1 effect to update the metrics of each consumer + % (they belong to the same node), + % 1 more effect to monitor the node, + % 1 more decorators effect ?assertEqual(4 + 1, length(Effects2)), - {_, _, Effects3} = apply(meta(C, 4), {nodeup, node(self())}, State2), - % for each consumer: 1 effect to update the metrics, 1 effect to monitor the consumer PID, 1 more decorators effect - ?assertEqual(4 + 4, length(Effects3)). + {_, _, Effects3} = apply(meta(Config, 4), {nodeup, node(self())}, State2), + % for each consumer: 1 effect to update the metrics, + % 1 effect to monitor the consumer PID, 1 more decorators effect + ?assertEqual(4 + 4, length(Effects3)), + ok. -active_flag_not_updated_when_consumer_suspected_unsuspected_and_single_active_consumer_is_on_test(C) -> +active_flag_not_updated_when_consumer_suspected_unsuspected_and_single_active_consumer_is_on_test(Config) -> State0 = init(#{name => ?FUNCTION_NAME, - queue_resource => rabbit_misc:r("/", queue, - atom_to_binary(?FUNCTION_NAME, utf8)), + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), release_cursor_interval => 0, single_active_consumer_on => true}), @@ -1380,200 +1517,574 @@ active_flag_not_updated_when_consumer_suspected_unsuspected_and_single_active_co Pid3 = spawn(DummyFunction), % adding some consumers - AddConsumer = fun({CTag, ChannelId}, State) -> - {NewState, _, _} = apply( - meta(C, 1), - make_checkout({CTag, ChannelId}, - {once, 1, simple_prefetch}, #{}), - State), - NewState - end, - State1 = lists:foldl(AddConsumer, State0, - [{<<"ctag1">>, Pid1}, {<<"ctag2">>, Pid2}, - {<<"ctag3">>, Pid2}, {<<"ctag4">>, Pid3}]), - - {State2, _, Effects2} = apply(meta(C, 2), {down, Pid1, noconnection}, State1), + {CK1, C1} = {?LINE, {?LINE_B, Pid1}}, + {CK2, C2} = {?LINE, {?LINE_B, Pid2}}, + {CK3, C3} = {?LINE, {?LINE_B, Pid2}}, + {CK4, C4} = {?LINE, {?LINE_B, Pid3}}, + Entries = [ + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{})}, + {CK2, make_checkout(C2, {auto, {simple_prefetch, 1}}, #{})}, + {CK3, make_checkout(C3, {auto, {simple_prefetch, 1}}, #{})}, + {CK4, make_checkout(C4, {auto, {simple_prefetch, 1}}, #{})} + ], + {State1, _} = run_log(Config, State0, Entries), + {State2, _, Effects2} = apply(meta(Config, 2), {down, Pid1, noconnection}, State1), % one monitor and one consumer status update (deactivated) ?assertEqual(2, length(Effects2)), - {_, _, Effects3} = apply(meta(C, 3), {nodeup, node(self())}, State2), + {_, _, Effects3} = apply(meta(Config, 3), {nodeup, node(self())}, State2), % for each consumer: 1 effect to monitor the consumer PID - ?assertEqual(5, length(Effects3)). + ?assertEqual(5, length(Effects3)), + ok. -single_active_cancelled_with_unacked_test(C) -> +single_active_cancelled_with_unacked_test(Config) -> State0 = init(#{name => ?FUNCTION_NAME, - queue_resource => rabbit_misc:r("/", queue, - atom_to_binary(?FUNCTION_NAME, utf8)), + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), release_cursor_interval => 0, single_active_consumer_on => true}), - C1 = {<<"ctag1">>, self()}, - C2 = {<<"ctag2">>, self()}, - % adding some consumers - AddConsumer = fun(Con, S0) -> - {S, _, _} = apply( - meta(C, 1), - make_checkout(Con, - {auto, 1, simple_prefetch}, - #{}), - S0), - S - end, - State1 = lists:foldl(AddConsumer, State0, [C1, C2]), + % % adding some consumers + {CK1, C1} = {?LINE, {?LINE_B, self()}}, + {CK2, C2} = {?LINE, {?LINE_B, self()}}, + Entries = [ + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{})}, + {CK2, make_checkout(C2, {auto, {simple_prefetch, 1}}, #{})} + ], + {State1, _} = run_log(Config, State0, Entries), %% enqueue 2 messages - {State2, _Effects2} = enq(C, 3, 1, msg1, State1), - {State3, _Effects3} = enq(C, 4, 2, msg2, State2), + {State2, _Effects2} = enq(Config, 3, 1, msg1, State1), + {State3, _Effects3} = enq(Config, 4, 2, msg2, State2), %% one should be checked ou to C1 %% cancel C1 - {State4, _, _} = apply(meta(C, 5), + {State4, _, _} = apply(meta(Config, ?LINE), make_checkout(C1, cancel, #{}), State3), %% C2 should be the active consumer - ?assertMatch(#{C2 := #consumer{status = up, - checked_out = #{0 := _}}}, + ?assertMatch(#{CK2 := #consumer{status = up, + checked_out = #{0 := _}}}, State4#rabbit_fifo.consumers), %% C1 should be a cancelled consumer - ?assertMatch(#{C1 := #consumer{status = cancelled, - cfg = #consumer_cfg{lifetime = once}, - checked_out = #{0 := _}}}, + ?assertMatch(#{CK1 := #consumer{status = cancelled, + cfg = #consumer_cfg{lifetime = once}, + checked_out = #{0 := _}}}, State4#rabbit_fifo.consumers), ?assertMatch([], rabbit_fifo:query_waiting_consumers(State4)), %% Ack both messages - {State5, _Effects5} = settle(C, C1, 1, 0, State4), + {State5, _Effects5} = settle(Config, CK1, ?LINE, 0, State4), %% C1 should now be cancelled - {State6, _Effects6} = settle(C, C2, 2, 0, State5), + {State6, _Effects6} = settle(Config, CK2, ?LINE, 0, State5), %% C2 should remain - ?assertMatch(#{C2 := #consumer{status = up}}, + ?assertMatch(#{CK2 := #consumer{status = up}}, State6#rabbit_fifo.consumers), %% C1 should be gone - ?assertNotMatch(#{C1 := _}, + ?assertNotMatch(#{CK1 := _}, State6#rabbit_fifo.consumers), ?assertMatch([], rabbit_fifo:query_waiting_consumers(State6)), ok. -single_active_with_credited_v1_test(C) -> +single_active_with_credited_v1_test(Config) -> State0 = init(#{name => ?FUNCTION_NAME, - queue_resource => rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME)), + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), release_cursor_interval => 0, single_active_consumer_on => true}), - C1 = {<<"ctag1">>, self()}, - C2 = {<<"ctag2">>, self()}, - % adding some consumers - AddConsumer = fun(Con, S0) -> - {S, _, _} = apply( - meta(C, 1), - make_checkout(Con, - {auto, 0, credited}, - #{}), - S0), - S - end, - State1 = lists:foldl(AddConsumer, State0, [C1, C2]), + % % adding some consumers + {CK1, C1} = {?LINE, {?LINE_B, self()}}, + {CK2, C2} = {?LINE, {?LINE_B, self()}}, + Entries = [ + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{})}, + {CK2, make_checkout(C2, {auto, {simple_prefetch, 1}}, #{})} + ], + {State1, _} = run_log(Config, State0, Entries), %% add some credit - C1Cred = rabbit_fifo:make_credit(C1, 5, 0, false), - {State2, _, _Effects2} = apply(meta(C, 3), C1Cred, State1), - C2Cred = rabbit_fifo:make_credit(C2, 4, 0, false), - {State3, _} = apply(meta(C, 4), C2Cred, State2), + C1Cred = rabbit_fifo:make_credit(CK1, 5, 0, false), + {State2, _, _Effects2} = apply(meta(Config, ?LINE), C1Cred, State1), + C2Cred = rabbit_fifo:make_credit(CK2, 4, 0, false), + {State3, _} = apply(meta(Config, ?LINE), C2Cred, State2), %% both consumers should have credit - ?assertMatch(#{C1 := #consumer{credit = 5}}, + ?assertMatch(#{CK1 := #consumer{credit = 5}}, State3#rabbit_fifo.consumers), - ?assertMatch([{C2, #consumer{credit = 4}}], + ?assertMatch([{CK2, #consumer{credit = 4}}], rabbit_fifo:query_waiting_consumers(State3)), ok. -single_active_with_credited_v2_test(C) -> +single_active_with_credited_v2_test(Config) -> State0 = init(#{name => ?FUNCTION_NAME, - queue_resource => rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME)), + queue_resource => rabbit_misc:r("/", queue, + ?FUNCTION_NAME_B), release_cursor_interval => 0, single_active_consumer_on => true}), C1 = {<<"ctag1">>, self()}, - {State1, _, _} = apply(meta(C, 1), - make_checkout(C1, - {auto, 0, credited}, - %% denotes that credit API v2 is used - #{initial_delivery_count => 0}), - State0), + {State1, {ok, #{key := CKey1}}, _} = + apply(meta(Config, 1), + make_checkout(C1, {auto, {credited, 0}}, #{}), State0), C2 = {<<"ctag2">>, self()}, - {State2, _, _} = apply(meta(C, 2), - make_checkout(C2, - {auto, 0, credited}, - %% denotes that credit API v2 is used - #{initial_delivery_count => 0}), - State1), + {State2, {ok, #{key := CKey2}}, _} = + apply(meta(Config, 2), + make_checkout(C2, {auto, {credited, 0}}, #{}), State1), %% add some credit - C1Cred = rabbit_fifo:make_credit(C1, 5, 0, false), - {State3, ok, Effects1} = apply(meta(C, 3), C1Cred, State2), + C1Cred = rabbit_fifo:make_credit(CKey1, 5, 0, false), + {State3, ok, Effects1} = apply(meta(Config, 3), C1Cred, State2), ?assertEqual([{send_msg, self(), - {credit_reply, <<"ctag1">>, _DeliveryCount = 0, _Credit = 5, _Available = 0, _Drain = false}, + {credit_reply, <<"ctag1">>, _DeliveryCount = 0, _Credit = 5, + _Available = 0, _Drain = false}, ?DELIVERY_SEND_MSG_OPTS}], Effects1), - C2Cred = rabbit_fifo:make_credit(C2, 4, 0, false), - {State, ok, Effects2} = apply(meta(C, 4), C2Cred, State3), + C2Cred = rabbit_fifo:make_credit(CKey2, 4, 0, false), + {State, ok, Effects2} = apply(meta(Config, 4), C2Cred, State3), ?assertEqual({send_msg, self(), - {credit_reply, <<"ctag2">>, _DeliveryCount = 0, _Credit = 4, _Available = 0, _Drain = false}, + {credit_reply, <<"ctag2">>, _DeliveryCount = 0, _Credit = 4, + _Available = 0, _Drain = false}, ?DELIVERY_SEND_MSG_OPTS}, Effects2), %% both consumers should have credit - ?assertMatch(#{C1 := #consumer{credit = 5}}, + ?assertMatch(#{CKey1 := #consumer{credit = 5}}, State#rabbit_fifo.consumers), - ?assertMatch([{C2, #consumer{credit = 4}}], - rabbit_fifo:query_waiting_consumers(State)). + ?assertMatch([{CKey2, #consumer{credit = 4}}], + rabbit_fifo:query_waiting_consumers(State)), + ok. + +single_active_settle_after_cancel_test(Config) -> + S0 = init(#{name => ?FUNCTION_NAME, + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), + single_active_consumer_on => true}), + + Pid1 = test_util:fake_pid(node()), + % % adding some consumers + E1Idx = ?LINE, + {CK1, C1} = {?LINE, {?LINE_B, self()}}, + {CK2, C2} = {?LINE, {?LINE_B, self()}}, + Entries = + [ + {E1Idx , rabbit_fifo:make_enqueue(Pid1, 1, msg1)}, + %% add a consumer + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{next_msg_id = 1, + status = up, + checked_out = Ch}}} + when map_size(Ch) == 1), + %% add another consumer + {CK2, make_checkout(C2, {auto, {simple_prefetch, 1}}, #{})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = up}}, + waiting_consumers = [{CK2, _}]}), + + %% cancel C1 + {?LINE, make_checkout(C1, cancel, #{})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = cancelled}, + CK2 := #consumer{status = up}}, + waiting_consumers = []}), + %% settle the message, C1 one should be completely removed + {?LINE, rabbit_fifo:make_settle(CK1, [0])}, + ?ASSERT(#rabbit_fifo{consumers = #{CK2 := #consumer{status = up}} = C, + waiting_consumers = []} + when map_size(C) == 1) + + ], + {_S1, _} = run_log(Config, S0, Entries, fun single_active_invariant/1), + + ok. + +single_active_consumer_priority_test(Config) -> + S0 = init(#{name => ?FUNCTION_NAME, + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), + single_active_consumer_on => true}), -register_enqueuer_test(C) -> + Pid1 = test_util:fake_pid(node()), + % % adding some consumers + {CK1, C1} = {?LINE, {?LINE_B, self()}}, + {CK2, C2} = {?LINE, {?LINE_B, self()}}, + E1Idx = ?LINE, + {CK3, C3} = {?LINE, {?LINE_B, self()}}, + Entries = + [ + %% add a consumer + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{priority => 1})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = up}}, + waiting_consumers = []}), + + %% add a consumer with a higher priority, assert it becomes active + {CK2, make_checkout(C2, {auto, {simple_prefetch, 1}}, #{priority => 2})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK2 := #consumer{status = up}}, + waiting_consumers = [_]}), + + %% enqueue a message + {E1Idx , rabbit_fifo:make_enqueue(Pid1, 1, msg1)}, + ?ASSERT(#rabbit_fifo{consumers = #{CK2 := #consumer{next_msg_id = 1, + status = up, + checked_out = Ch}}} + when map_size(Ch) == 1), + + %% add en even higher consumer, but the current active has a message pending + %% so can't be immedately replaced + {CK3, make_checkout(C3, {auto, {simple_prefetch, 1}}, #{priority => 3})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK2 := #consumer{status = quiescing}}, + waiting_consumers = [_, _]}), + %% settle the message, the higher priority should become the active, + %% completing the replacement + {?LINE, rabbit_fifo:make_settle(CK2, [0])}, + ?ASSERT(#rabbit_fifo{consumers = #{CK3 := #consumer{status = up, + checked_out = Ch}}, + waiting_consumers = [_, _]} + when map_size(Ch) == 0) + + ], + {_S1, _} = run_log(Config, S0, Entries, fun single_active_invariant/1), + + ok. + + +single_active_consumer_priority_cancel_active_test(Config) -> + S0 = init(#{name => ?FUNCTION_NAME, + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), + single_active_consumer_on => true}), + + % % adding some consumers + {CK1, C1} = {?LINE, {?LINE_B, self()}}, + {CK2, C2} = {?LINE, {?LINE_B, self()}}, + {CK3, C3} = {?LINE, {?LINE_B, self()}}, + Entries = + [ + %% add a consumer + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{priority => 2})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = up}}, + waiting_consumers = []}), + + %% add two consumers each with a lower priority + {CK2, make_checkout(C2, {auto, {simple_prefetch, 1}}, #{priority => 1})}, + {CK3, make_checkout(C3, {auto, {simple_prefetch, 1}}, #{priority => 0})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = up}}, + waiting_consumers = [_, _]}), + + {?LINE, make_checkout(C1, cancel, #{})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK2 := #consumer{status = up}}, + waiting_consumers = [{CK3, _}]}) + ], + {_S1, _} = run_log(Config, S0, Entries, fun single_active_invariant/1), + + ok. + +single_active_consumer_update_priority_test(Config) -> + S0 = init(#{name => ?FUNCTION_NAME, + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), + single_active_consumer_on => true}), + + Pid1 = test_util:fake_pid(node()), + % % adding some consumers + {CK1, C1} = {?LINE, {?LINE_B, self()}}, + {CK2, C2} = {?LINE, {?LINE_B, self()}}, + Entries = + [ + %% add a consumer + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{priority => 2})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = up}}, + waiting_consumers = []}), + %% add abother consumer with lower priority + {CK2, make_checkout(C2, {auto, {simple_prefetch, 1}}, #{priority => 1})}, + %% update the current active consumer to lower priority + {?LINE, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{priority => 0})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK2 := #consumer{status = up}}, + waiting_consumers = [_]}), + %% back to original priority + {?LINE, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{priority => 2})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = up}}, + waiting_consumers = [_]}), + {?LINE , rabbit_fifo:make_enqueue(Pid1, 1, msg1)}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{checked_out = Ch}}, + waiting_consumers = [{CK2, _}]} + when map_size(Ch) == 1), + %% update priority for C2 + {?LINE, make_checkout(C2, {auto, {simple_prefetch, 1}}, #{priority => 3})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{checked_out = Ch}}, + waiting_consumers = [{CK2, _}]} + when map_size(Ch) == 1), + %% settle should cause the existing active to be replaced + {?LINE, rabbit_fifo:make_settle(CK1, [0])}, + ?ASSERT(#rabbit_fifo{consumers = #{CK2 := #consumer{status = up}}, + waiting_consumers = [{CK1, _}]}) + ], + {_S1, _} = run_log(Config, S0, Entries, fun single_active_invariant/1), + ok. + +single_active_consumer_quiescing_resumes_after_cancel_test(Config) -> + S0 = init(#{name => ?FUNCTION_NAME, + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), + single_active_consumer_on => true}), + + Pid1 = test_util:fake_pid(node()), + % % adding some consumers + {CK1, C1} = {?LINE, {?LINE_B, self()}}, + {CK2, C2} = {?LINE, {?LINE_B, self()}}, + Entries = + [ + %% add a consumer + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{priority => 1})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = up}}, + waiting_consumers = []}), + + %% enqueue a message + {?LINE , rabbit_fifo:make_enqueue(Pid1, 1, msg1)}, + + %% add a consumer with a higher priority, current is quiescing + {CK2, make_checkout(C2, {auto, {simple_prefetch, 1}}, #{priority => 2})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = quiescing}}, + waiting_consumers = [{CK2, _}]}), + + %% C2 cancels + {?LINE, make_checkout(C2, cancel, #{})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = quiescing, + checked_out = Ch}}, + waiting_consumers = []} + when map_size(Ch) == 1), + + %% settle + {?LINE, rabbit_fifo:make_settle(CK1, [0])}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = up, + credit = 1}}, + waiting_consumers = []}) + ], + + {_S1, _} = run_log(Config, S0, Entries, fun single_active_invariant/1), + + ok. + +single_active_consumer_higher_waiting_disconnected_test(Config) -> + S0 = init(#{name => ?FUNCTION_NAME, + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), + single_active_consumer_on => true}), + + Pid1 = test_util:fake_pid(node()), + C1Pid = test_util:fake_pid(n1@banana), + C2Pid = test_util:fake_pid(n2@banana), + % % adding some consumers + {CK1, C1} = {?LINE, {?LINE_B, C1Pid}}, + {CK2, C2} = {?LINE, {?LINE_B, C2Pid}}, + Entries = + [ + %% add a consumer + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{priority => 1})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = up}}, + waiting_consumers = []}), + + %% enqueue a message + {?LINE , rabbit_fifo:make_enqueue(Pid1, 1, msg1)}, + + %% add a consumer with a higher priority, current is quiescing + {CK2, make_checkout(C2, {auto, {simple_prefetch, 1}}, #{priority => 2})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = quiescing}}, + waiting_consumers = [{CK2, _}]}), + %% C2 is disconnected, + {?LINE, {down, C2Pid, noconnection}}, + ?ASSERT( + #rabbit_fifo{consumers = #{CK1 := #consumer{status = quiescing}}, + waiting_consumers = [{CK2, #consumer{status = suspected_down}}]}), + %% settle + {?LINE, rabbit_fifo:make_settle(CK1, [0])}, + %% C1 should be reactivated + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = up, + credit = 1}}, + waiting_consumers = [_]}), + %% C2 comes back up and takes over + {?LINE, {nodeup, n2@banana}}, + ?ASSERT( + #rabbit_fifo{consumers = #{CK2 := #consumer{status = up}}, + waiting_consumers = [{CK1, #consumer{status = up}}]}) + ], + {_S1, _} = run_log(Config, S0, Entries, fun single_active_invariant/1), + + ok. + +single_active_consumer_quiescing_disconnected_test(Config) -> + S0 = init(#{name => ?FUNCTION_NAME, + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), + single_active_consumer_on => true}), + + Pid1 = test_util:fake_pid(node()), + C1Pid = test_util:fake_pid(n1@banana), + C2Pid = test_util:fake_pid(n2@banana), + % % adding some consumers + {CK1, C1} = {?LINE, {?LINE_B, C1Pid}}, + {CK2, C2} = {?LINE, {?LINE_B, C2Pid}}, + Entries = + [ + %% add a consumer + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{priority => 1})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = up}}, + waiting_consumers = []}), + + %% enqueue a message + {?LINE , rabbit_fifo:make_enqueue(Pid1, 1, msg1)}, + + %% add a consumer with a higher priority, current is quiescing + {CK2, make_checkout(C2, {auto, {simple_prefetch, 1}}, #{priority => 2})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = quiescing}}, + waiting_consumers = [{CK2, _}]}), + %% C1 is disconnected, + {?LINE, {down, C1Pid, noconnection}}, + ?ASSERT( + #rabbit_fifo{consumers = #{CK2 := #consumer{status = up, + checked_out = Ch2}}, + waiting_consumers = + [{CK1, #consumer{status = suspected_down, + checked_out = Ch1}}]} + when map_size(Ch2) == 1 andalso + map_size(Ch1) == 0), + %% C1 settles which will be ignored + {?LINE, rabbit_fifo:make_settle(CK1, [0])}, + ?ASSERT( + #rabbit_fifo{consumers = #{CK2 := #consumer{status = up, + checked_out = Ch2}}, + waiting_consumers = + [{CK1, #consumer{status = suspected_down, + checked_out = Ch1}}]} + when map_size(Ch2) == 1 andalso + map_size(Ch1) == 0), + % %% C1 comes back up + {?LINE, {nodeup, n1@banana}}, + ?ASSERT( + #rabbit_fifo{consumers = #{CK2 := #consumer{status = up}}, + waiting_consumers = [{CK1, #consumer{status = up}}]}) + ], + {_S1, _} = run_log(Config, S0, Entries, fun single_active_invariant/1), + + ok. + +single_active_consumer_quiescing_receives_no_further_messages_test(Config) -> + S0 = init(#{name => ?FUNCTION_NAME, + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), + single_active_consumer_on => true}), + + Pid1 = test_util:fake_pid(node()), + C1Pid = test_util:fake_pid(n1@banana), + C2Pid = test_util:fake_pid(n2@banana), + % % adding some consumers + {CK1, C1} = {?LINE, {?LINE_B, C1Pid}}, + {CK2, C2} = {?LINE, {?LINE_B, C2Pid}}, + Entries = + [ + %% add a consumer, with plenty of prefetch + {CK1, make_checkout(C1, {auto, {simple_prefetch, 10}}, #{priority => 1})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = up}}, + waiting_consumers = []}), + + %% enqueue a message + {?LINE, rabbit_fifo:make_enqueue(Pid1, 1, msg1)}, + + %% add a consumer with a higher priority, current is quiescing + {CK2, make_checkout(C2, {auto, {simple_prefetch, 10}}, #{priority => 2})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = quiescing, + checked_out = Ch}}, + waiting_consumers = [{CK2, _}]} + when map_size(Ch) == 1), + + %% enqueue another message + {?LINE, rabbit_fifo:make_enqueue(Pid1, 2, msg2)}, + %% message should not be assinged to quiescing consumer + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = quiescing, + checked_out = Ch}}, + waiting_consumers = [{CK2, _}]} + when map_size(Ch) == 1) + + ], + + {_S1, _} = run_log(Config, S0, Entries, fun single_active_invariant/1), + ok. + +single_active_consumer_credited_favour_with_credit_test(Config) -> + S0 = init(#{name => ?FUNCTION_NAME, + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), + single_active_consumer_on => true}), + + C1Pid = test_util:fake_pid(n1@banana), + C2Pid = test_util:fake_pid(n2@banana), + C3Pid = test_util:fake_pid(n3@banana), + % % adding some consumers + {CK1, C1} = {?LINE, {?LINE_B, C1Pid}}, + {CK2, C2} = {?LINE, {?LINE_B, C2Pid}}, + {CK3, C3} = {?LINE, {?LINE_B, C3Pid}}, + Entries = + [ + %% add a consumer + {CK1, make_checkout(C1, {auto, {credited, 0}}, #{priority => 3})}, + {CK2, make_checkout(C2, {auto, {credited, 0}}, #{priority => 1})}, + {CK3, make_checkout(C3, {auto, {credited, 0}}, #{priority => 1})}, + %% waiting are sorted by arrival order + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = up}}, + waiting_consumers = [{CK2, _}, {CK3, _}]}), + + %% give credit to C3 + {?LINE , rabbit_fifo:make_credit(CK3, 1, 0, false)}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{status = up}}, + waiting_consumers = [{CK3, _}, {CK2, _}]}), + %% cancel the current active consumer + {CK1, make_checkout(C1, cancel, #{})}, + %% C3 should become active due having credits + ?ASSERT(#rabbit_fifo{consumers = #{CK3 := #consumer{status = up, + credit = 1}}, + waiting_consumers = [{CK2, _}]}) + ], + + {_S1, _} = run_log(Config, S0, Entries, fun single_active_invariant/1), + ok. + + + +register_enqueuer_test(Config) -> State0 = init(#{name => ?FUNCTION_NAME, - queue_resource => rabbit_misc:r("/", queue, - atom_to_binary(?FUNCTION_NAME, utf8)), + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), max_length => 2, max_in_memory_length => 0, overflow_strategy => reject_publish}), %% simply registering should be ok when we're below limit Pid1 = test_util:fake_pid(node()), - {State1, ok, [_]} = apply(meta(C, 1), make_register_enqueuer(Pid1), State0), + {State1, ok, [_]} = apply(meta(Config, 1, ?LINE, {notify, 1, Pid1}), + make_register_enqueuer(Pid1), State0), - {State2, ok, _} = apply(meta(C, 2), rabbit_fifo:make_enqueue(Pid1, 1, one), State1), + {State2, ok, _} = apply(meta(Config, 2, ?LINE, {notify, 2, Pid1}), + rabbit_fifo:make_enqueue(Pid1, 1, one), State1), %% register another enqueuer shoudl be ok Pid2 = test_util:fake_pid(node()), - {State3, ok, [_]} = apply(meta(C, 3), make_register_enqueuer(Pid2), State2), + {State3, ok, [_]} = apply(meta(Config, 3, ?LINE, {notify, 3, Pid2}), + make_register_enqueuer(Pid2), State2), - {State4, ok, _} = apply(meta(C, 4), rabbit_fifo:make_enqueue(Pid1, 2, two), State3), - {State5, ok, Efx} = apply(meta(C, 5), rabbit_fifo:make_enqueue(Pid1, 3, three), State4), - % ct:pal("Efx ~tp", [Efx]), + {State4, ok, _} = apply(meta(Config, 4, ?LINE, {notify, 4, Pid1}), + rabbit_fifo:make_enqueue(Pid1, 2, two), State3), + {State5, ok, Efx} = apply(meta(Config, 5, ?LINE, {notify, 4, Pid1}), + rabbit_fifo:make_enqueue(Pid1, 3, three), State4), %% validate all registered enqueuers are notified of overflow state - ?ASSERT_EFF({send_msg, P, {queue_status, reject_publish}, [ra_event]}, P == Pid1, Efx), - ?ASSERT_EFF({send_msg, P, {queue_status, reject_publish}, [ra_event]}, P == Pid2, Efx), + ?ASSERT_EFF({send_msg, P, {queue_status, reject_publish}, [ra_event]}, + P == Pid1, Efx), + ?ASSERT_EFF({send_msg, P, {queue_status, reject_publish}, [ra_event]}, + P == Pid2, Efx), %% this time, registry should return reject_publish - {State6, reject_publish, [_]} = apply(meta(C, 6), make_register_enqueuer( - test_util:fake_pid(node())), State5), + {State6, reject_publish, [_]} = + apply(meta(Config, 6), make_register_enqueuer( + test_util:fake_pid(node())), State5), ?assertMatch(#{num_enqueuers := 3}, rabbit_fifo:overview(State6)), - Pid3 = test_util:fake_pid(node()), %% remove two messages this should make the queue fall below the 0.8 limit {State7, _, Efx7} = - apply(meta(C, 7), + apply(meta(Config, 7), rabbit_fifo:make_checkout({<<"a">>, Pid3}, {dequeue, settled}, #{}), State6), ?ASSERT_EFF({log, [_], _}, Efx7), - % ct:pal("Efx7 ~tp", [_Efx7]), {State8, _, Efx8} = - apply(meta(C, 8), + apply(meta(Config, 8), rabbit_fifo:make_checkout({<<"a">>, Pid3}, {dequeue, settled}, #{}), State7), ?ASSERT_EFF({log, [_], _}, Efx8), - % ct:pal("Efx8 ~tp", [Efx8]), %% validate all registered enqueuers are notified of overflow state ?ASSERT_EFF({send_msg, P, {queue_status, go}, [ra_event]}, P == Pid1, Efx8), ?ASSERT_EFF({send_msg, P, {queue_status, go}, [ra_event]}, P == Pid2, Efx8), {_State9, _, Efx9} = - apply(meta(C, 9), + apply(meta(Config, 9), rabbit_fifo:make_checkout({<<"a">>, Pid3}, {dequeue, settled}, #{}), State8), ?ASSERT_EFF({log, [_], _}, Efx9), @@ -1581,27 +2092,29 @@ register_enqueuer_test(C) -> ?ASSERT_NO_EFF({send_msg, P, go, [ra_event]}, P == Pid2, Efx9), ok. -reject_publish_purge_test(C) -> +reject_publish_purge_test(Config) -> State0 = init(#{name => ?FUNCTION_NAME, - queue_resource => rabbit_misc:r("/", queue, - atom_to_binary(?FUNCTION_NAME, utf8)), + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), max_length => 2, max_in_memory_length => 0, overflow_strategy => reject_publish}), %% simply registering should be ok when we're below limit Pid1 = test_util:fake_pid(node()), - {State1, ok, [_]} = apply(meta(C, 1), make_register_enqueuer(Pid1), State0), - {State2, ok, _} = apply(meta(C, 2), rabbit_fifo:make_enqueue(Pid1, 1, one), State1), - {State3, ok, _} = apply(meta(C, 3), rabbit_fifo:make_enqueue(Pid1, 2, two), State2), - {State4, ok, Efx} = apply(meta(C, 4), rabbit_fifo:make_enqueue(Pid1, 3, three), State3), + {State1, ok, [_]} = apply(meta(Config, 1), make_register_enqueuer(Pid1), State0), + {State2, ok, _} = apply(meta(Config, 2, ?LINE, {notify, 2, Pid1}), + rabbit_fifo:make_enqueue(Pid1, 1, one), State1), + {State3, ok, _} = apply(meta(Config, 3, ?LINE, {notify, 2, Pid1}), + rabbit_fifo:make_enqueue(Pid1, 2, two), State2), + {State4, ok, Efx} = apply(meta(Config, 4, ?LINE, {notify, 2, Pid1}), + rabbit_fifo:make_enqueue(Pid1, 3, three), State3), % ct:pal("Efx ~tp", [Efx]), ?ASSERT_EFF({send_msg, P, {queue_status, reject_publish}, [ra_event]}, P == Pid1, Efx), - {_State5, {purge, 3}, Efx1} = apply(meta(C, 5), rabbit_fifo:make_purge(), State4), + {_State5, {purge, 3}, Efx1} = apply(meta(Config, 5), rabbit_fifo:make_purge(), State4), ?ASSERT_EFF({send_msg, P, {queue_status, go}, [ra_event]}, P == Pid1, Efx1), ok. -reject_publish_applied_after_limit_test(C) -> - QName = rabbit_misc:r("/", queue, atom_to_binary(?FUNCTION_NAME, utf8)), +reject_publish_applied_after_limit_test(Config) -> + QName = rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), InitConf = #{name => ?FUNCTION_NAME, max_in_memory_length => 0, queue_resource => QName @@ -1609,92 +2122,123 @@ reject_publish_applied_after_limit_test(C) -> State0 = init(InitConf), %% simply registering should be ok when we're below limit Pid1 = test_util:fake_pid(node()), - {State1, ok, [_]} = apply(meta(C, 1), make_register_enqueuer(Pid1), State0), - {State2, ok, _} = apply(meta(C, 2), rabbit_fifo:make_enqueue(Pid1, 1, one), State1), - {State3, ok, _} = apply(meta(C, 3), rabbit_fifo:make_enqueue(Pid1, 2, two), State2), - {State4, ok, Efx} = apply(meta(C, 4), rabbit_fifo:make_enqueue(Pid1, 3, three), State3), - % ct:pal("Efx ~tp", [Efx]), - ?ASSERT_NO_EFF({send_msg, P, {queue_status, reject_publish}, [ra_event]}, P == Pid1, Efx), + {State1, ok, [_]} = apply(meta(Config, 1, ?LINE, {notify, 1, Pid1}), + make_register_enqueuer(Pid1), State0), + {State2, ok, _} = apply(meta(Config, 2, ?LINE, {notify, 1, Pid1}), + rabbit_fifo:make_enqueue(Pid1, 1, one), State1), + {State3, ok, _} = apply(meta(Config, 3, ?LINE, {notify, 1, Pid1}), + rabbit_fifo:make_enqueue(Pid1, 2, two), State2), + {State4, ok, Efx} = apply(meta(Config, 4, ?LINE, {notify, 1, Pid1}), + rabbit_fifo:make_enqueue(Pid1, 3, three), State3), + ?ASSERT_NO_EFF({send_msg, P, {queue_status, reject_publish}, [ra_event]}, + P == Pid1, Efx), %% apply new config Conf = #{name => ?FUNCTION_NAME, queue_resource => QName, max_length => 2, overflow_strategy => reject_publish, - max_in_memory_length => 0, dead_letter_handler => undefined }, - {State5, ok, Efx1} = apply(meta(C, 5), rabbit_fifo:make_update_config(Conf), State4), - ?ASSERT_EFF({send_msg, P, {queue_status, reject_publish}, [ra_event]}, P == Pid1, Efx1), + {State5, ok, Efx1} = apply(meta(Config, 5), rabbit_fifo:make_update_config(Conf), State4), + ?ASSERT_EFF({send_msg, P, {queue_status, reject_publish}, [ra_event]}, + P == Pid1, Efx1), Pid2 = test_util:fake_pid(node()), - {_State6, reject_publish, _} = apply(meta(C, 1), make_register_enqueuer(Pid2), State5), + {_State6, reject_publish, _} = + apply(meta(Config, 1), make_register_enqueuer(Pid2), State5), ok. -purge_nodes_test(C) -> +update_config_delivery_limit_test(Config) -> + QName = rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), + InitConf = #{name => ?FUNCTION_NAME, + queue_resource => QName, + delivery_limit => 20 + }, + State0 = init(InitConf), + ?assertMatch(#{config := #{delivery_limit := 20}}, + rabbit_fifo:overview(State0)), + + %% A delivery limit of -1 (or any negative value) turns the delivery_limit + %% off + Conf = #{name => ?FUNCTION_NAME, + queue_resource => QName, + delivery_limit => -1, + dead_letter_handler => undefined + }, + {State1, ok, _} = apply(meta(Config, ?LINE), + rabbit_fifo:make_update_config(Conf), State0), + + ?assertMatch(#{config := #{delivery_limit := undefined}}, + rabbit_fifo:overview(State1)), + + ok. + +purge_nodes_test(Config) -> Node = purged@node, ThisNode = node(), EnqPid = test_util:fake_pid(Node), EnqPid2 = test_util:fake_pid(node()), ConPid = test_util:fake_pid(Node), Cid = {<<"tag">>, ConPid}, - % WaitingPid = test_util:fake_pid(Node), State0 = init(#{name => ?FUNCTION_NAME, - queue_resource => rabbit_misc:r("/", queue, - atom_to_binary(?FUNCTION_NAME, utf8)), + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), single_active_consumer_on => false}), - {State1, _, _} = apply(meta(C, 1), + {State1, _, _} = apply(meta(Config, 1, ?LINE, {notify, 1, EnqPid}), rabbit_fifo:make_enqueue(EnqPid, 1, msg1), State0), - {State2, _, _} = apply(meta(C, 2), + {State2, _, _} = apply(meta(Config, 2, ?LINE, {notify, 2, EnqPid2}), rabbit_fifo:make_enqueue(EnqPid2, 1, msg2), State1), - {State3, _} = check(C, Cid, 3, 1000, State2), - {State4, _, _} = apply(meta(C, 4), + {State3, _} = check(Config, Cid, 3, 1000, State2), + {State4, _, _} = apply(meta(Config, ?LINE), {down, EnqPid, noconnection}, State3), - ?assertMatch( - [{aux, {handle_tick, - [#resource{}, _Metrics, - [ThisNode, Node] - ]}}] , rabbit_fifo:tick(1, State4)), + ?assertMatch([{aux, {handle_tick, + [#resource{}, _Metrics, + [ThisNode, Node]]}}], + rabbit_fifo:tick(1, State4)), %% assert there are both enqueuers and consumers - {State, _, _} = apply(meta(C, 5), + {State, _, _} = apply(meta(Config, ?LINE), rabbit_fifo:make_purge_nodes([Node]), State4), %% assert there are no enqueuers nor consumers - ?assertMatch(#rabbit_fifo{enqueuers = Enqs} when map_size(Enqs) == 1, - State), - - ?assertMatch(#rabbit_fifo{consumers = Cons} when map_size(Cons) == 0, - State), - ?assertMatch( - [{aux, {handle_tick, - [#resource{}, _Metrics, - [ThisNode] - ]}}] , rabbit_fifo:tick(1, State)), + ?assertMatch(#rabbit_fifo{enqueuers = Enqs} + when map_size(Enqs) == 1, State), + ?assertMatch(#rabbit_fifo{consumers = Cons} + when map_size(Cons) == 0, State), + ?assertMatch([{aux, {handle_tick, [#resource{}, _Metrics, [ThisNode]]}}], + rabbit_fifo:tick(1, State)), ok. meta(Config, Idx) -> meta(Config, Idx, 0). meta(Config, Idx, Timestamp) -> + meta(Config, Idx, Timestamp, no_reply). + +meta(Config, Idx, Timestamp, ReplyMode) -> #{machine_version => ?config(machine_version, Config), index => Idx, term => 1, system_time => Timestamp, + reply_mode => ReplyMode, from => {make_ref(), self()}}. enq(Config, Idx, MsgSeq, Msg, State) -> strip_reply( - rabbit_fifo:apply(meta(Config, Idx), rabbit_fifo:make_enqueue(self(), MsgSeq, Msg), State)). + apply(meta(Config, Idx, 0, {notify, MsgSeq, self()}), + rabbit_fifo:make_enqueue(self(), MsgSeq, Msg), + State)). deq(Config, Idx, Cid, Settlement, Msg, State0) -> {State, _, Effs} = apply(meta(Config, Idx), rabbit_fifo:make_checkout(Cid, {dequeue, Settlement}, #{}), State0), - {value, {log, [_Idx], Fun}} = lists:search(fun(E) -> element(1, E) == log end, Effs), + {value, {log, [_Idx], Fun}} = lists:search(fun(E) -> + element(1, E) == log + end, Effs), [{reply, _From, {wrap_reply, {dequeue, {MsgId, _}, _}}}] = Fun([Msg]), @@ -1724,8 +2268,20 @@ check(Config, Cid, Idx, Num, State) -> rabbit_fifo:make_checkout(Cid, {auto, Num, simple_prefetch}, #{}), State)). -settle(Config, Cid, Idx, MsgId, State) -> - strip_reply(apply(meta(Config, Idx), rabbit_fifo:make_settle(Cid, [MsgId]), State)). +checkout(Config, Idx, Cid, Credit, State) + when is_integer(Credit) -> + checkout(Config, Idx, Cid, {auto, {simple_prefetch, Credit}}, State); +checkout(Config, Idx, Cid, Spec, State) -> + checkout_reply( + apply(meta(Config, Idx), + rabbit_fifo:make_checkout(Cid, Spec, #{}), + State)). + +settle(Config, Cid, Idx, MsgId, State) when is_integer(MsgId) -> + settle(Config, Cid, Idx, [MsgId], State); +settle(Config, Cid, Idx, MsgIds, State) when is_list(MsgIds) -> + strip_reply(apply(meta(Config, Idx), + rabbit_fifo:make_settle(Cid, MsgIds), State)). return(Config, Cid, Idx, MsgId, State) -> strip_reply(apply(meta(Config, Idx), rabbit_fifo:make_return(Cid, [MsgId]), State)). @@ -1737,17 +2293,36 @@ credit(Config, Cid, Idx, Credit, DelCnt, Drain, State) -> strip_reply({State, _, Effects}) -> {State, Effects}. +checkout_reply({State, {ok, CInfo}, Effects}) when is_map(CInfo) -> + {State, CInfo, Effects}; +checkout_reply(Oth) -> + Oth. + run_log(Config, InitState, Entries) -> - lists:foldl(fun ({Idx, E}, {Acc0, Efx0}) -> - case apply(meta(Config, Idx), E, Acc0) of - {Acc, _, Efx} when is_list(Efx) -> - {Acc, Efx0 ++ Efx}; - {Acc, _, Efx} -> - {Acc, Efx0 ++ [Efx]}; - {Acc, _} -> - {Acc, Efx0} - end - end, {InitState, []}, Entries). + run_log(rabbit_fifo, Config, InitState, Entries, fun (_) -> true end). + +run_log(Config, InitState, Entries, Invariant) -> + run_log(rabbit_fifo, Config, InitState, Entries, Invariant). + +run_log(Module, Config, InitState, Entries, Invariant) -> + lists:foldl( + fun ({assert, Fun}, {Acc0, Efx0}) -> + _ = Fun(Acc0), + {Acc0, Efx0}; + ({Idx, E}, {Acc0, Efx0}) -> + case Module:apply(meta(Config, Idx, Idx, {notify, Idx, self()}), + E, Acc0) of + {Acc, _, Efx} when is_list(Efx) -> + ?assert(Invariant(Acc)), + {Acc, Efx0 ++ Efx}; + {Acc, _, Efx} -> + ?assert(Invariant(Acc)), + {Acc, Efx0 ++ [Efx]}; + {Acc, _} -> + ?assert(Invariant(Acc)), + {Acc, Efx0} + end + end, {InitState, []}, Entries). %% AUX Tests @@ -1755,21 +2330,48 @@ run_log(Config, InitState, Entries) -> aux_test(_) -> _ = ra_machine_ets:start_link(), Aux0 = init_aux(aux_test), - MacState = init(#{name => aux_test, - queue_resource => - rabbit_misc:r(<<"/">>, queue, <<"test">>)}), + LastApplied = 0, + State0 = #{machine_state => + init(#{name => ?FUNCTION_NAME, + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), + single_active_consumer_on => false}), + log => mock_log, + last_applied => LastApplied}, ok = meck:new(ra_log, []), - Log = mock_log, meck:expect(ra_log, last_index_term, fun (_) -> {0, 0} end), - {no_reply, Aux, mock_log} = handle_aux(leader, cast, active, Aux0, - Log, MacState), - {no_reply, _Aux, mock_log} = handle_aux(leader, cast, tick, Aux, - Log, MacState), + {no_reply, Aux, State} = handle_aux(leader, cast, active, Aux0, State0), + {no_reply, _Aux, _, + [{release_cursor, LastApplied}]} = handle_aux(leader, cast, tick, Aux, State), [X] = ets:lookup(rabbit_fifo_usage, aux_test), meck:unload(), ?assert(X > 0.0), ok. +handle_aux_tick_test(Config) -> + _ = ra_machine_ets:start_link(), + Aux0 = init_aux(aux_test), + LastApplied = 1, + MacState0 = init(#{name => ?FUNCTION_NAME, + queue_resource => rabbit_misc:r("/", queue, ?FUNCTION_NAME_B), + single_active_consumer_on => false}), + State0 = #{machine_state => MacState0, + log => mock_log, + last_applied => LastApplied}, + {MacState1, _} = enq(Config, 1, 1, first, MacState0), + State1 = State0#{machine_state => MacState1}, + meck:expect(ra_log, last_index_term, fun (_) -> {1, 0} end), + ?assertEqual(1, rabbit_fifo:smallest_raft_index(MacState1)), + %% the release cursor should be 1 lower than the smallest raft index + {no_reply, _, _, + [{release_cursor, 0}]} = handle_aux(leader, cast, tick, Aux0, State1), + timer:sleep(10), + + persistent_term:put(quorum_queue_checkpoint_config, {1, 0, 1}), + {no_reply, _, _, + [{checkpoint, 1, _}, + {release_cursor, 0}]} = handle_aux(follower, cast, force_checkpoint, Aux0, State1), + ok. + %% machine version conversion test @@ -1832,9 +2434,9 @@ convert_v2_to_v3(Config) -> Cid1 = {ctag1, self()}, Cid2 = {ctag2, self()}, MaxCredits = 20, - Entries = [{1, rabbit_fifo:make_checkout(Cid1, {auto, 10, credited}, #{})}, - {2, rabbit_fifo:make_checkout(Cid2, {auto, MaxCredits, simple_prefetch}, - #{prefetch => MaxCredits})}], + Entries = [{1, make_checkout(Cid1, {auto, 10, credited}, #{})}, + {2, make_checkout(Cid2, {auto, MaxCredits, simple_prefetch}, + #{prefetch => MaxCredits})}], %% run log in v2 {State, _} = run_log(ConfigV2, test_init(?FUNCTION_NAME), Entries), @@ -1848,6 +2450,55 @@ convert_v2_to_v3(Config) -> maps:get(Cid2, Consumers)), ok. +convert_v3_to_v4(Config) -> + ConfigV3 = [{machine_version, 3} | Config], + ConfigV4 = [{machine_version, 4} | Config], + + EPid = test_util:fake_pid(node()), + Pid1 = test_util:fake_pid(node()), + Cid1 = {ctag1, Pid1}, + Cid2 = {ctag2, self()}, + MaxCredits = 2, + Entries = [ + {1, rabbit_fifo_v3:make_enqueue(EPid, 1, banana)}, + {2, rabbit_fifo_v3:make_enqueue(EPid, 2, apple)}, + {3, rabbit_fifo_v3:make_enqueue(EPid, 3, orange)}, + {4, make_checkout(Cid1, {auto, 10, credited}, #{})}, + {5, make_checkout(Cid2, {auto, MaxCredits, simple_prefetch}, + #{prefetch => MaxCredits})}, + {6, {down, Pid1, error}}], + + %% run log in v3 + Name = ?FUNCTION_NAME, + Init = rabbit_fifo_v3:init( + #{name => Name, + queue_resource => rabbit_misc:r("/", queue, atom_to_binary(Name)), + release_cursor_interval => 0}), + {State, _} = run_log(rabbit_fifo_v3, ConfigV3, Init, Entries, + fun (_) -> true end), + + %% convert from v3 to v4 + {#rabbit_fifo{consumers = Consumers, + returns = Returns}, ok, _} = + apply(meta(ConfigV4, ?LINE), {machine_version, 3, 4}, State), + + ?assertEqual(1, maps:size(Consumers)), + ?assertMatch(#consumer{cfg = #consumer_cfg{credit_mode = + {simple_prefetch, MaxCredits}}}, + maps:get(Cid2, Consumers)), + ?assertNot(is_map_key(Cid1, Consumers)), + %% assert delivery_count is copied to acquired_count + #consumer{checked_out = Ch2} = maps:get(Cid2, Consumers), + ?assertMatch(#{0 := ?MSG(_, #{delivery_count := 1, + acquired_count := 1}), + 1 := ?MSG(_, #{delivery_count := 1, + acquired_count := 1})}, Ch2), + + ?assertMatch(?MSG(_, #{delivery_count := 1, + acquired_count := 1}), lqueue:get(Returns)), + + ok. + queue_ttl_test(C) -> QName = rabbit_misc:r(<<"/">>, queue, <<"test">>), Conf = #{name => ?FUNCTION_NAME, @@ -1867,7 +2518,7 @@ queue_ttl_test(C) -> [{aux, {handle_tick, [_, _, _]}}] = rabbit_fifo:tick(Now + 1000, S1), %% cancelling the consumer should then {S2, _, _} = apply(meta(C, 2, Now), - rabbit_fifo:make_checkout(Cid, cancel, #{}), S1), + make_checkout(Cid, cancel, #{}), S1), %% last_active should have been reset when consumer was cancelled %% last_active = 2500 [{aux, {handle_tick, [_, _, _]}}] = rabbit_fifo:tick(Now + 1000, S2), @@ -1888,7 +2539,7 @@ queue_ttl_test(C) -> %% dequeue should set last applied {S1Deq, {dequeue, empty}, _} = apply(meta(C, 2, Now), - rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}), + make_checkout(Cid, {dequeue, unsettled}, #{}), S0), [{aux, {handle_tick, [_, _, _]}}] = rabbit_fifo:tick(Now + 1000, S1Deq), @@ -1897,11 +2548,11 @@ queue_ttl_test(C) -> = rabbit_fifo:tick(Now + 2500, S1Deq), %% Enqueue message, Msg = rabbit_fifo:make_enqueue(self(), 1, msg1), - {E1, _, _} = apply(meta(C, 2, Now), Msg, S0), + {E1, _, _} = apply(meta(C, 2, Now, {notify, 2, self()}), Msg, S0), Deq = {<<"deq1">>, self()}, {E2, _, Effs2} = apply(meta(C, 3, Now), - rabbit_fifo:make_checkout(Deq, {dequeue, unsettled}, #{}), + make_checkout(Deq, {dequeue, unsettled}, #{}), E1), {log, [2], Fun2} = get_log_eff(Effs2), @@ -1915,7 +2566,7 @@ queue_ttl_test(C) -> = rabbit_fifo:tick(Now + 3000, E3), ok. -queue_ttl_with_single_active_consumer_test(C) -> +queue_ttl_with_single_active_consumer_test(Config) -> QName = rabbit_misc:r(<<"/">>, queue, <<"test">>), Conf = #{name => ?FUNCTION_NAME, queue_resource => QName, @@ -1930,12 +2581,12 @@ queue_ttl_with_single_active_consumer_test(C) -> = rabbit_fifo:tick(Now + 1000, S0), %% adding a consumer should not ever trigger deletion Cid = {<<"cid1">>, self()}, - {S1, _} = check_auto(C, Cid, 1, S0), + {S1, _, _} = checkout(Config, ?LINE, Cid, 1, S0), [{aux, {handle_tick, [_, _, _]}}] = rabbit_fifo:tick(Now, S1), [{aux, {handle_tick, [_, _, _]}}] = rabbit_fifo:tick(Now + 1000, S1), %% cancelling the consumer should then - {S2, _, _} = apply(meta(C, 2, Now), - rabbit_fifo:make_checkout(Cid, cancel, #{}), S1), + {S2, _, _} = apply(meta(Config, ?LINE, Now), + make_checkout(Cid, cancel, #{}), S1), %% last_active should have been reset when consumer was cancelled %% last_active = 2500 [{aux, {handle_tick, [_, _, _]}}] = rabbit_fifo:tick(Now + 1000, S2), @@ -1943,7 +2594,7 @@ queue_ttl_with_single_active_consumer_test(C) -> [{mod_call, rabbit_quorum_queue, spawn_deleter, [QName]}] = rabbit_fifo:tick(Now + 2500, S2), %% Same for downs - {S2D, _, _} = apply(meta(C, 2, Now), + {S2D, _, _} = apply(meta(Config, ?LINE, Now), {down, self(), noconnection}, S1), %% last_active should have been reset when consumer was cancelled %% last_active = 2500 @@ -1953,11 +2604,11 @@ queue_ttl_with_single_active_consumer_test(C) -> = rabbit_fifo:tick(Now + 2500, S2D), ok. -query_peek_test(C) -> +query_peek_test(Config) -> State0 = test_init(test), ?assertEqual({error, no_message_at_pos}, rabbit_fifo:query_peek(1, State0)), - {State1, _} = enq(C, 1, 1, first, State0), - {State2, _} = enq(C, 2, 2, second, State1), + {State1, _} = enq(Config, 1, 1, first, State0), + {State2, _} = enq(Config, 2, 2, second, State1), ?assertMatch({ok, [1 | _]}, rabbit_fifo:query_peek(1, State1)), ?assertEqual({error, no_message_at_pos}, rabbit_fifo:query_peek(2, State1)), ?assertMatch({ok, [1 | _]}, rabbit_fifo:query_peek(1, State2)), @@ -1965,56 +2616,29 @@ query_peek_test(C) -> ?assertEqual({error, no_message_at_pos}, rabbit_fifo:query_peek(3, State2)), ok. -checkout_priority_test(C) -> +checkout_priority_test(Config) -> Cid = {<<"checkout_priority_test">>, self()}, Pid = spawn(fun () -> ok end), Cid2 = {<<"checkout_priority_test2">>, Pid}, Args = [{<<"x-priority">>, long, 1}], {S1, _, _} = - apply(meta(C, 3), - rabbit_fifo:make_checkout(Cid, {once, 2, simple_prefetch}, - #{args => Args}), + apply(meta(Config, ?LINE), + make_checkout(Cid, {auto, {simple_prefetch, 2}}, + #{args => Args}), test_init(test)), {S2, _, _} = - apply(meta(C, 3), - rabbit_fifo:make_checkout(Cid2, {once, 2, simple_prefetch}, - #{args => []}), + apply(meta(Config, ?LINE), + make_checkout(Cid2, {auto, {simple_prefetch, 2}}, + #{args => []}), S1), - {S3, E3} = enq(C, 1, 1, first, S2), - ct:pal("E3 ~tp ~tp", [E3, self()]), + {S3, E3} = enq(Config, ?LINE, 1, first, S2), ?ASSERT_EFF({send_msg, P, {delivery, _, _}, _}, P == self(), E3), - {S4, E4} = enq(C, 2, 2, second, S3), + {S4, E4} = enq(Config, ?LINE, 2, second, S3), ?ASSERT_EFF({send_msg, P, {delivery, _, _}, _}, P == self(), E4), - {_S5, E5} = enq(C, 3, 3, third, S4), + {_S5, E5} = enq(Config, ?LINE, 3, third, S4), ?ASSERT_EFF({send_msg, P, {delivery, _, _}, _}, P == Pid, E5), ok. -empty_dequeue_should_emit_release_cursor_test(C) -> - State0 = test_init(?FUNCTION_NAME), - Cid = {<<"basic.get1">>, self()}, - {_State, {dequeue, empty}, Effects} = - apply(meta(C, 2, 1234), - rabbit_fifo:make_checkout(Cid, {dequeue, unsettled}, #{}), - State0), - - ?ASSERT_EFF({release_cursor, _, _}, Effects), - ok. - -expire_message_should_emit_release_cursor_test(C) -> - Conf = #{name => ?FUNCTION_NAME, - queue_resource => rabbit_misc:r(<<"/">>, queue, <<"test">>), - release_cursor_interval => 0, - msg_ttl => 1}, - S0 = rabbit_fifo:init(Conf), - Msg = #basic_message{content = #content{properties = none, - payload_fragments_rev = []}}, - {S1, ok, _} = apply(meta(C, 1, 100), rabbit_fifo:make_enqueue(self(), 1, Msg), S0), - {_S, ok, Effs} = apply(meta(C, 2, 101), - rabbit_fifo:make_enqueue(self(), 2, Msg), - S1), - ?ASSERT_EFF({release_cursor, 1, _}, Effs), - ok. - header_test(_) -> H0 = Size = 5, ?assertEqual(Size, rabbit_fifo:get_header(size, H0)), @@ -2086,28 +2710,141 @@ checkout_metadata_test(Config) -> {State0, _} = enq(Config, 2, 2, second, State00), %% NB: the consumer meta data is taken _before_ it runs a checkout %% so in this case num_checked_out will be 0 - {State1, {ok, #{next_msg_id := 0, - num_checked_out := 0}}, _} = - apply(meta(Config, ?LINE), - rabbit_fifo:make_checkout(Cid, {auto, 1, simple_prefetch}, #{}), - State0), + {State1, #{next_msg_id := 0, + num_checked_out := 0}, _} = + checkout(Config, ?LINE, Cid, 1, State0), {State2, _, _} = apply(meta(Config, ?LINE), - rabbit_fifo:make_checkout(Cid, cancel, #{}), State1), - {_State3, {ok, #{next_msg_id := 1, - num_checked_out := 1}}, _} = - apply(meta(Config, ?LINE), - rabbit_fifo:make_checkout(Cid, {auto, 1, simple_prefetch}, #{}), - State2), + make_checkout(Cid, cancel, #{}), State1), + {_State3, #{next_msg_id := 1, + num_checked_out := 1}, _} = + checkout(Config, ?LINE, Cid, 1, State2), + ok. + +modify_test(Config) -> + S0 = init(#{name => ?FUNCTION_NAME, + dead_letter_handler => at_least_once, + queue_resource => + rabbit_misc:r("/", queue, ?FUNCTION_NAME_B)}), + + Pid1 = test_util:fake_pid(node()), + % % adding some consumers + E1Idx = ?LINE, + {CK1, C1} = {?LINE, {?LINE_B, self()}}, + Entries = + [ + {E1Idx , rabbit_fifo:make_enqueue(Pid1, 1, msg1)}, + %% add a consumer + {CK1, make_checkout(C1, {auto, {simple_prefetch, 1}}, #{})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{next_msg_id = 1, + checked_out = Ch}}} + when map_size(Ch) == 1), + %% delivery_failed = false, undeliverable_here = false|true + %% this is the same as a requeue, + %% this should not increment the delivery count + {?LINE, rabbit_fifo:make_modify(CK1, [0], false, false, + #{<<"x-opt-blah">> => <<"blah1">>})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{next_msg_id = 2, + checked_out = Ch}}} + when map_size(Ch) == 1, + fun (#rabbit_fifo{consumers = + #{CK1 := #consumer{checked_out = Ch}}}) -> + ?assertMatch( + ?MSG(_, #{acquired_count := 1, + anns := #{<<"x-opt-blah">> := <<"blah1">>}} = H) + when not is_map_key(delivery_count, H), + maps:get(1, Ch)) + end), + %% delivery_failed = true does increment delivery_count + {?LINE, rabbit_fifo:make_modify(CK1, [1], true, false, + #{<<"x-opt-blah">> => <<"blah2">>})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{next_msg_id = 3, + checked_out = Ch}}} + when map_size(Ch) == 1, + fun (#rabbit_fifo{consumers = + #{CK1 := #consumer{checked_out = Ch}}}) -> + ?assertMatch( + ?MSG(_, #{delivery_count := 1, + acquired_count := 2, + anns := #{<<"x-opt-blah">> := <<"blah2">>}}), + maps:get(2, Ch)) + end), + %% delivery_failed = true and undeliverable_here = true is the same as discard + {?LINE, rabbit_fifo:make_modify(CK1, [2], true, true, + #{<<"x-opt-blah">> => <<"blah3">>})}, + ?ASSERT(#rabbit_fifo{consumers = #{CK1 := #consumer{next_msg_id = 3, + checked_out = Ch}}} + when map_size(Ch) == 0, + fun (#rabbit_fifo{dlx = #rabbit_fifo_dlx{discards = Discards}}) -> + ?assertMatch([[_| + ?MSG(_, #{delivery_count := 2, + acquired_count := 3, + anns := #{<<"x-opt-blah">> := <<"blah3">>}})]], + lqueue:to_list(Discards)) + end) + ], + {_S1, _} = run_log(Config, S0, Entries, fun single_active_invariant/1), + + ok. + +ttb_test(Config) -> + S0 = init(#{name => ?FUNCTION_NAME, + queue_resource => + rabbit_misc:r("/", queue, ?FUNCTION_NAME_B)}), + + + S1 = do_n(5_000_000, + fun (N, Acc) -> + I = (5_000_000 - N), + element(1, enq(Config, I, I, ?FUNCTION_NAME_B, Acc)) + end, S0), + + + + {T1, _Res} = timer:tc(fun () -> + do_n(100, fun (_, S) -> + term_to_binary(S), + S1 end, S1) + end), + ct:pal("T1 took ~bus", [T1]), + + + {T2, _} = timer:tc(fun () -> + do_n(100, fun (_, S) -> term_to_iovec(S), S1 end, S1) + end), + ct:pal("T2 took ~bus", [T2]), + ok. %% Utility +%% + +do_n(0, _, A) -> + A; +do_n(N, Fun, A0) -> + A = Fun(N, A0), + do_n(N-1, Fun, A). + init(Conf) -> rabbit_fifo:init(Conf). make_register_enqueuer(Pid) -> rabbit_fifo:make_register_enqueuer(Pid). apply(Meta, Entry, State) -> rabbit_fifo:apply(Meta, Entry, State). init_aux(Conf) -> rabbit_fifo:init_aux(Conf). -handle_aux(S, T, C, A, L, M) -> rabbit_fifo:handle_aux(S, T, C, A, L, M). +handle_aux(S, T, C, A, A2) -> rabbit_fifo:handle_aux(S, T, C, A, A2). make_checkout(C, S, M) -> rabbit_fifo:make_checkout(C, S, M). cid(A) when is_atom(A) -> atom_to_binary(A, utf8). + +single_active_invariant( #rabbit_fifo{consumers = Cons}) -> + 1 >= map_size(maps:filter(fun (_, #consumer{status = S}) -> + S == up + end, Cons)). + +mk_mc(Body) -> + mc_amqpl:from_basic_message( + #basic_message{routing_keys = [<<"">>], + exchange_name = #resource{name = <<"x">>, + kind = exchange, + virtual_host = <<"v">>}, + content = #content{properties = #'P_basic'{}, + payload_fragments_rev = [Body]}}). diff --git a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl index baf6f72387ac..5d4c39958e1c 100644 --- a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl @@ -29,7 +29,7 @@ -import(rabbit_ct_broker_helpers, [rpc/5, rpc/6]). -import(quorum_queue_SUITE, [publish/2, - consume/3]). + basic_get_tag/3]). -define(DEFAULT_WAIT, 1000). -define(DEFAULT_INTERVAL, 200). @@ -95,8 +95,7 @@ init_per_group(Group, Config, NodesCount) -> Config1 = rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, NodesCount}, {rmq_nodename_suffix, Group}, - {tcp_ports_base}, - {net_ticktime, 10}]), + {tcp_ports_base}]), Config2 = rabbit_ct_helpers:run_steps(Config1, [fun merge_app_env/1 ] ++ rabbit_ct_broker_helpers:setup_steps()), @@ -207,7 +206,7 @@ rejected(Config) -> {Server, Ch, SourceQ, TargetQ} = declare_topology(Config, []), publish(Ch, SourceQ), wait_for_messages_ready([Server], ra_name(SourceQ), 1), - DelTag = consume(Ch, SourceQ, false), + DelTag = basic_get_tag(Ch, SourceQ, false), amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DelTag, multiple = false, requeue = false}), @@ -224,7 +223,7 @@ delivery_limit(Config) -> {Server, Ch, SourceQ, TargetQ} = declare_topology(Config, [{<<"x-delivery-limit">>, long, 0}]), publish(Ch, SourceQ), wait_for_messages_ready([Server], ra_name(SourceQ), 1), - DelTag = consume(Ch, SourceQ, false), + DelTag = basic_get_tag(Ch, SourceQ, false), amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DelTag, multiple = false, requeue = true}), diff --git a/deps/rabbit/test/rabbit_fifo_int_SUITE.erl b/deps/rabbit/test/rabbit_fifo_int_SUITE.erl index 787b60a30d00..fae1251d4738 100644 --- a/deps/rabbit/test/rabbit_fifo_int_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_int_SUITE.erl @@ -8,6 +8,7 @@ -include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). +-include_lib("rabbit_common/include/rabbit_framing.hrl"). -define(RA_EVENT_TIMEOUT, 5000). -define(RA_SYSTEM, quorum_queues). @@ -23,6 +24,7 @@ all_tests() -> return, rabbit_fifo_returns_correlation, resends_lost_command, + returns, returns_after_down, resends_after_lost_applied, handles_reject_notification, @@ -31,6 +33,9 @@ all_tests() -> dequeue, discard, cancel_checkout, + cancel_checkout_with_remove, + cancel_checkout_with_pending_using_cancel_reason, + cancel_checkout_with_pending_using_remove_reason, lost_delivery, credit_api_v1, credit_api_v2, @@ -64,6 +69,8 @@ init_per_testcase(TestCase, Config) -> meck:new(rabbit_quorum_queue, [passthrough]), meck:expect(rabbit_quorum_queue, handle_tick, fun (_, _, _) -> ok end), meck:expect(rabbit_quorum_queue, cancel_consumer_handler, fun (_, _) -> ok end), + meck:new(rabbit_feature_flags, []), + meck:expect(rabbit_feature_flags, is_enabled, fun (_) -> true end), ra_server_sup_sup:remove_all(?RA_SYSTEM), ServerName2 = list_to_atom(atom_to_list(TestCase) ++ "2"), ServerName3 = list_to_atom(atom_to_list(TestCase) ++ "3"), @@ -89,19 +96,18 @@ basics(Config) -> ConsumerTag = UId, ok = start_cluster(ClusterName, [ServerId]), FState0 = rabbit_fifo_client:init([ServerId]), - {ok, FState1} = rabbit_fifo_client:checkout(ConsumerTag, 1, simple_prefetch, - #{}, FState0), + {ok, _, FState1} = rabbit_fifo_client:checkout(ConsumerTag, {simple_prefetch, 1}, + #{}, FState0), rabbit_quorum_queue:wal_force_roll_over(node()), % create segment the segment will trigger a snapshot - timer:sleep(1000), + ra_log_segment_writer:await(ra_log_segment_writer), {ok, FState2, []} = rabbit_fifo_client:enqueue(ClusterName, one, FState1), DeliverFun = fun DeliverFun(S0, F) -> receive {ra_event, From, Evt} -> - ct:pal("ra_event ~p", [Evt]), case rabbit_fifo_client:handle_ra_event(ClusterName, From, Evt, S0) of {ok, S1, [{deliver, C, true, @@ -180,7 +186,7 @@ duplicate_delivery(Config) -> ServerId = ?config(node_id, Config), ok = start_cluster(ClusterName, [ServerId]), F0 = rabbit_fifo_client:init([ServerId]), - {ok, F1} = rabbit_fifo_client:checkout(<<"tag">>, 10, simple_prefetch, #{}, F0), + {ok, _, F1} = rabbit_fifo_client:checkout(<<"tag">>, {simple_prefetch, 10}, #{}, F0), {ok, F2, []} = rabbit_fifo_client:enqueue(ClusterName, corr1, msg1, F1), Fun = fun Loop(S0) -> receive @@ -215,7 +221,7 @@ usage(Config) -> ServerId = ?config(node_id, Config), ok = start_cluster(ClusterName, [ServerId]), F0 = rabbit_fifo_client:init([ServerId]), - {ok, F1} = rabbit_fifo_client:checkout(<<"tag">>, 10, simple_prefetch, #{}, F0), + {ok, _, F1} = rabbit_fifo_client:checkout(<<"tag">>, {simple_prefetch, 10}, #{}, F0), {ok, F2, []} = rabbit_fifo_client:enqueue(ClusterName, corr1, msg1, F1), {ok, F3, []} = rabbit_fifo_client:enqueue(ClusterName, corr2, msg2, F2), {_, _, _} = process_ra_events(receive_ra_events(2, 2), ClusterName, F3), @@ -268,7 +274,7 @@ detects_lost_delivery(Config) -> F000 = rabbit_fifo_client:init([ServerId]), {ok, F00, []} = rabbit_fifo_client:enqueue(ClusterName, msg1, F000), {_, _, F0} = process_ra_events(receive_ra_events(1, 0), ClusterName, F00), - {ok, F1} = rabbit_fifo_client:checkout(<<"tag">>, 10, simple_prefetch, #{}, F0), + {ok, _, F1} = rabbit_fifo_client:checkout(<<"tag">>, {simple_prefetch, 10}, #{}, F0), {ok, F2, []} = rabbit_fifo_client:enqueue(ClusterName, msg2, F1), {ok, F3, []} = rabbit_fifo_client:enqueue(ClusterName, msg3, F2), % lose first delivery @@ -284,28 +290,111 @@ detects_lost_delivery(Config) -> rabbit_quorum_queue:stop_server(ServerId), ok. +returns(Config) -> + ClusterName = ?config(cluster_name, Config), + ServerId = ?config(node_id, Config), + ok = start_cluster(ClusterName, [ServerId]), + + F0 = rabbit_fifo_client:init([ServerId]), + Msg1 = mk_msg(<<"msg1">>), + {ok, F1, []} = rabbit_fifo_client:enqueue(ClusterName, Msg1, F0), + {_, _, _F2} = process_ra_events(receive_ra_events(1, 0), ClusterName, F1), + + FC = rabbit_fifo_client:init([ServerId]), + {ok, _, FC1} = rabbit_fifo_client:checkout(<<"tag">>, + {simple_prefetch, 10}, + #{}, FC), + + {FC3, _} = + receive + {ra_event, Qname, {machine, {delivery, _, [{MsgId, {_, _}}]}} = Evt1} -> + {ok, FC2, Actions1} = + rabbit_fifo_client:handle_ra_event(Qname, Qname, Evt1, FC1), + [{deliver, _, true, + [{_, _, _, _, Msg1Out0}]}] = Actions1, + ?assert(mc:is(Msg1Out0)), + ?assertEqual(undefined, mc:get_annotation(<<"x-delivery-count">>, Msg1Out0)), + ?assertEqual(undefined, mc:get_annotation(delivery_count, Msg1Out0)), + rabbit_fifo_client:return(<<"tag">>, [MsgId], FC2) + after 5000 -> + flush(), + exit(await_delivery_timeout) + end, + {FC5, _} = + receive + {ra_event, Qname2, + {machine, {delivery, _, [{MsgId1, {_, _Msg1Out}}]}} = Evt2} -> + {ok, FC4, Actions2} = + rabbit_fifo_client:handle_ra_event(Qname2, Qname2, Evt2, FC3), + [{deliver, _tag, true, + [{_, _, _, _, Msg1Out}]}] = Actions2, + ?assert(mc:is(Msg1Out)), + ?assertEqual(1, mc:get_annotation(<<"x-delivery-count">>, Msg1Out)), + %% delivery_count should _not_ be incremented for a return + ?assertEqual(undefined, mc:get_annotation(delivery_count, Msg1Out)), + rabbit_fifo_client:modify(<<"tag">>, [MsgId1], true, false, #{}, FC4) + after 5000 -> + flush(), + exit(await_delivery_timeout_2) + end, + receive + {ra_event, Qname3, + {machine, {delivery, _, [{MsgId2, {_, _Msg2Out}}]}} = Evt3} -> + {ok, FC6, Actions3} = + rabbit_fifo_client:handle_ra_event(Qname3, Qname3, Evt3, FC5), + [{deliver, _, true, + [{_, _, _, _, Msg2Out}]}] = Actions3, + ?assert(mc:is(Msg2Out)), + ?assertEqual(2, mc:get_annotation(<<"x-delivery-count">>, Msg2Out)), + %% delivery_count should be incremented for a modify with delivery_failed = true + ?assertEqual(1, mc:get_annotation(delivery_count, Msg2Out)), + rabbit_fifo_client:settle(<<"tag">>, [MsgId2], FC6) + after 5000 -> + flush(), + exit(await_delivery_timeout_3) + end, + rabbit_quorum_queue:stop_server(ServerId), + ok. + returns_after_down(Config) -> ClusterName = ?config(cluster_name, Config), ServerId = ?config(node_id, Config), ok = start_cluster(ClusterName, [ServerId]), F0 = rabbit_fifo_client:init([ServerId]), - {ok, F1, []} = rabbit_fifo_client:enqueue(ClusterName, msg1, F0), + Msg1 = mk_msg(<<"msg1">>), + {ok, F1, []} = rabbit_fifo_client:enqueue(ClusterName, Msg1, F0), {_, _, F2} = process_ra_events(receive_ra_events(1, 0), ClusterName, F1), % start a consumer in a separate processes % that exits after checkout - Self = self(), - _Pid = spawn(fun () -> - F = rabbit_fifo_client:init([ServerId]), - {ok, _} = rabbit_fifo_client:checkout(<<"tag">>, 10, - simple_prefetch, - #{}, F), - Self ! checkout_done - end), - receive checkout_done -> ok after 1000 -> exit(checkout_done_timeout) end, - timer:sleep(1000), + {_, MonRef} = spawn_monitor( + fun () -> + F = rabbit_fifo_client:init([ServerId]), + {ok, _, _} = rabbit_fifo_client:checkout(<<"tag">>, + {simple_prefetch, 10}, + #{}, F) + end), + receive + {'DOWN', MonRef, _, _, _} -> + ok + after 5000 -> + ct:fail("waiting for process exit timed out") + end, + rabbit_ct_helpers:await_condition( + fun () -> + case ra:member_overview(ServerId) of + {ok, #{machine := #{num_consumers := 0}}, _} -> + true; + X -> + ct:pal("X ~p", [X]), + false + end + end), % message should be available for dequeue - {ok, _, {_, _, _, _, msg1}, _} = rabbit_fifo_client:dequeue(ClusterName, <<"tag">>, settled, F2), + {ok, _, {_, _, _, _, Msg1Out}, _} = + rabbit_fifo_client:dequeue(ClusterName, <<"tag">>, settled, F2), + ?assertEqual(1, mc:get_annotation(<<"x-delivery-count">>, Msg1Out)), + ?assertEqual(1, mc:get_annotation(delivery_count, Msg1Out)), rabbit_quorum_queue:stop_server(ServerId), ok. @@ -378,8 +467,8 @@ discard(Config) -> _ = ra:members(ServerId), F0 = rabbit_fifo_client:init([ServerId]), - {ok, F1} = rabbit_fifo_client:checkout(<<"tag">>, 10, - simple_prefetch, #{}, F0), + {ok, _, F1} = rabbit_fifo_client:checkout(<<"tag">>, {simple_prefetch, 10}, + #{}, F0), {ok, F2, []} = rabbit_fifo_client:enqueue(ClusterName, msg1, F1), F3 = discard_next_delivery(ClusterName, F2, 5000), {empty, _F4} = rabbit_fifo_client:dequeue(ClusterName, <<"tag1">>, settled, F3), @@ -401,11 +490,70 @@ cancel_checkout(Config) -> ok = start_cluster(ClusterName, [ServerId]), F0 = rabbit_fifo_client:init([ServerId], 4), {ok, F1, []} = rabbit_fifo_client:enqueue(ClusterName, m1, F0), - {ok, F2} = rabbit_fifo_client:checkout(<<"tag">>, 10, simple_prefetch, #{}, F1), - {_, _, F3} = process_ra_events(receive_ra_events(1, 1), ClusterName, F2, [], [], fun (_, S) -> S end), - {ok, F4} = rabbit_fifo_client:cancel_checkout(<<"tag">>, F3), + {ok, _, F2} = rabbit_fifo_client:checkout(<<"tag">>, {simple_prefetch, 10}, + #{}, F1), + {_, _, F3} = process_ra_events(receive_ra_events(1, 1), ClusterName, F2, + [], [], fun (_, S) -> S end), + {ok, F4} = rabbit_fifo_client:cancel_checkout(<<"tag">>, cancel, F3), {F5, _} = rabbit_fifo_client:return(<<"tag">>, [0], F4), - {ok, _, {_, _, _, _, m1}, F5} = rabbit_fifo_client:dequeue(ClusterName, <<"d1">>, settled, F5), + {ok, _, {_, _, _, _, m1}, F5} = + rabbit_fifo_client:dequeue(ClusterName, <<"d1">>, settled, F5), + ok. + +cancel_checkout_with_remove(Config) -> + ClusterName = ?config(cluster_name, Config), + ServerId = ?config(node_id, Config), + ok = start_cluster(ClusterName, [ServerId]), + F0 = rabbit_fifo_client:init([ServerId], 4), + {ok, F1, []} = rabbit_fifo_client:enqueue(ClusterName, m1, F0), + {ok, _, F2} = rabbit_fifo_client:checkout(<<"tag">>, {simple_prefetch, 10}, + #{}, F1), + {_, _, F3} = process_ra_events(receive_ra_events(1, 1), ClusterName, F2, + [], [], fun (_, S) -> S end), + {ok, F4} = rabbit_fifo_client:cancel_checkout(<<"tag">>, remove, F3), + %% settle here to prove that message is returned by "remove" cancellation + %% and not settled by late settlement + {F5, _} = rabbit_fifo_client:settle(<<"tag">>, [0], F4), + {ok, _, {_, _, _, _, m1}, F5} = + rabbit_fifo_client:dequeue(ClusterName, <<"d1">>, settled, F5), + ok. + +cancel_checkout_with_pending_using_cancel_reason(Config) -> + cancel_checkout_with_pending(Config, cancel). + +cancel_checkout_with_pending_using_remove_reason(Config) -> + cancel_checkout_with_pending(Config, remove). + +cancel_checkout_with_pending(Config, Reason) -> + ClusterName = ?config(cluster_name, Config), + ServerId = ?config(node_id, Config), + ok = start_cluster(ClusterName, [ServerId]), + F0 = rabbit_fifo_client:init([ServerId], 4), + F1 = lists:foldl( + fun (Num, Acc0) -> + {ok, Acc, _} = rabbit_fifo_client:enqueue(ClusterName, Num, Acc0), + Acc + end, F0, lists:seq(1, 10)), + receive_ra_events(10, 0), + {ok, _, F2} = rabbit_fifo_client:checkout(<<"tag">>, {simple_prefetch, 10}, + #{}, F1), + {Msgs, _, F3} = process_ra_events(receive_ra_events(0, 1), ClusterName, F2, + [], [], fun (_, S) -> S end), + %% settling each individually should cause the client to enter the "slow" + %% state where settled msg ids are buffered internally waiting for + %% applied events + F4 = lists:foldl( + fun({_Q, _, MsgId, _, _}, Acc0) -> + {Acc, _} = rabbit_fifo_client:settle(<<"tag">>, [MsgId], Acc0), + Acc + end, F3, Msgs), + + {ok, _F4} = rabbit_fifo_client:cancel_checkout(<<"tag">>, Reason, F4), + timer:sleep(100), + {ok, Overview, _} = ra:member_overview(ServerId), + ?assertMatch(#{machine := #{num_messages := 0, + num_consumers := 0}}, Overview), + flush(), ok. lost_delivery(Config) -> @@ -415,8 +563,9 @@ lost_delivery(Config) -> F0 = rabbit_fifo_client:init([ServerId], 4), {ok, F1, []} = rabbit_fifo_client:enqueue(ClusterName, m1, F0), {_, _, F2} = process_ra_events( - receive_ra_events(1, 0), ClusterName, F1, [], [], fun (_, S) -> S end), - {ok, F3} = rabbit_fifo_client:checkout(<<"tag">>, 10, simple_prefetch, #{}, F2), + receive_ra_events(1, 0), ClusterName, F1, [], [], + fun (_, S) -> S end), + {ok, _, F3} = rabbit_fifo_client:checkout(<<"tag">>, {simple_prefetch, 10}, #{}, F2), %% drop a delivery, simulating e.g. a full distribution buffer receive {ra_event, _, Evt} -> @@ -441,6 +590,7 @@ lost_delivery(Config) -> ok. credit_api_v1(Config) -> + meck:expect(rabbit_feature_flags, is_enabled, fun (_) -> false end), ClusterName = ?config(cluster_name, Config), ServerId = ?config(node_id, Config), ok = start_cluster(ClusterName, [ServerId]), @@ -450,7 +600,7 @@ credit_api_v1(Config) -> {_, _, F3} = process_ra_events(receive_ra_events(2, 0), ClusterName, F2), %% checkout with 0 prefetch CTag = <<"my-tag">>, - {ok, F4} = rabbit_fifo_client:checkout(CTag, 0, credited, #{}, F3), + {ok, _, F4} = rabbit_fifo_client:checkout(CTag, {credited, 0}, #{}, F3), %% assert no deliveries {_, _, F5} = process_ra_events(receive_ra_events(), ClusterName, F4, [], [], fun @@ -497,9 +647,9 @@ credit_api_v2(Config) -> CTag = <<"my-tag">>, DC0 = 16#ff_ff_ff_ff, DC1 = 0, %% = DC0 + 1 using 32 bit serial number arithmetic - {ok, F4} = rabbit_fifo_client:checkout( + {ok, _, F4} = rabbit_fifo_client:checkout( %% initial_delivery_count in consumer meta means credit API v2. - CTag, 0, credited, #{initial_delivery_count => DC0}, F3), + CTag, {credited, DC0}, #{}, F3), %% assert no deliveries {_, _, F5} = process_ra_events(receive_ra_events(), ClusterName, F4, [], [], fun @@ -598,7 +748,7 @@ test_queries(Config) -> exit(ready_timeout) end, F0 = rabbit_fifo_client:init([ServerId], 4), - {ok, _} = rabbit_fifo_client:checkout(<<"tag">>, 1, simple_prefetch, #{}, F0), + {ok, _, _} = rabbit_fifo_client:checkout(<<"tag">>, {simple_prefetch, 1}, #{}, F0), {ok, {_, Ready}, _} = ra:local_query(ServerId, fun rabbit_fifo:query_messages_ready/1), ?assertEqual(1, Ready), @@ -626,8 +776,8 @@ dequeue(Config) -> {ok, F2_, []} = rabbit_fifo_client:enqueue(ClusterName, msg1, F1b), {_, _, F2} = process_ra_events(receive_ra_events(1, 0), ClusterName, F2_), - % {ok, {{0, {_, msg1}}, _}, F3} = rabbit_fifo_client:dequeue(ClusterName, Tag, settled, F2), - {ok, _, {_, _, 0, _, msg1}, F3} = rabbit_fifo_client:dequeue(ClusterName, Tag, settled, F2), + {ok, _, {_, _, 0, _, msg1}, F3} = + rabbit_fifo_client:dequeue(ClusterName, Tag, settled, F2), {ok, F4_, []} = rabbit_fifo_client:enqueue(ClusterName, msg2, F3), {_, _, F4} = process_ra_events(receive_ra_events(1, 0), ClusterName, F4_), {ok, _, {_, _, MsgId, _, msg2}, F5} = rabbit_fifo_client:dequeue(ClusterName, Tag, unsettled, F4), @@ -687,7 +837,7 @@ receive_ra_events(Acc) -> end. process_ra_events(Events, ClusterName, State) -> - DeliveryFun = fun ({deliver, _, Tag, Msgs}, S) -> + DeliveryFun = fun ({deliver, Tag, _, Msgs}, S) -> MsgIds = [element(1, M) || M <- Msgs], {S0, _} = rabbit_fifo_client:settle(Tag, MsgIds, S), S0 @@ -745,3 +895,12 @@ flush() -> after 10 -> ok end. + +mk_msg(Body) when is_binary(Body) -> + mc_amqpl:from_basic_message( + #basic_message{routing_keys = [<<"">>], + exchange_name = #resource{name = <<"x">>, + kind = exchange, + virtual_host = <<"v">>}, + content = #content{properties = #'P_basic'{}, + payload_fragments_rev = [Body]}}). diff --git a/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl index c151c1cd0214..273597982f31 100644 --- a/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_prop_SUITE.erl @@ -11,9 +11,10 @@ -include_lib("eunit/include/eunit.hrl"). -include_lib("rabbit/src/rabbit_fifo.hrl"). -include_lib("rabbit/src/rabbit_fifo_dlx.hrl"). +-include_lib("rabbit_common/include/rabbit_framing.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). --define(record_info(T,R),lists:zip(record_info(fields,T),tl(tuple_to_list(R)))). +-define(MACHINE_VERSION, 4). %%%=================================================================== %%% Common Test callbacks @@ -62,10 +63,6 @@ all_tests() -> scenario31, scenario32, upgrade, - upgrade_snapshots, - upgrade_snapshots_scenario1, - upgrade_snapshots_scenario2, - upgrade_snapshots_v2_to_v3, messages_total, simple_prefetch, simple_prefetch_without_checkout_cancel, @@ -88,8 +85,8 @@ all_tests() -> dlx_06, dlx_07, dlx_08, - dlx_09 - % single_active_ordering_02 + dlx_09, + single_active_ordering_02 ]. groups() -> @@ -110,18 +107,18 @@ end_per_group(_Group, _Config) -> ok. init_per_testcase(_TestCase, Config) -> + ok = meck:new(rabbit_feature_flags, [passthrough]), + meck:expect(rabbit_feature_flags, is_enabled, fun (_) -> false end), Config. end_per_testcase(_TestCase, _Config) -> + meck:unload(), ok. %%%=================================================================== %%% Test cases %%%=================================================================== -% -type log_op() :: -% {enqueue, pid(), maybe(msg_seqno()), Msg :: raw_msg()}. - scenario2(_Config) -> C1 = {<<>>, c:pid(0,346,1)}, C2 = {<<>>,c:pid(0,379,1)}, @@ -693,45 +690,6 @@ scenario23(_Config) -> Commands), ok. -upgrade_snapshots_scenario1(_Config) -> - E = c:pid(0,327,1), - Commands = [make_enqueue(E,1,msg(<<"msg1">>)), - make_enqueue(E,2,msg(<<"msg2">>)), - make_enqueue(E,3,msg(<<"msg3">>))], - run_upgrade_snapshot_test(#{name => ?FUNCTION_NAME, - delivery_limit => 100, - max_length => 1, - max_bytes => 100, - max_in_memory_length => undefined, - max_in_memory_bytes => undefined, - overflow_strategy => drop_head, - single_active_consumer_on => false, - dead_letter_handler => {?MODULE, banana, []} - }, - Commands), - ok. - -upgrade_snapshots_scenario2(_Config) -> - E = c:pid(0,240,0), - CPid = c:pid(0,242,0), - C = {<<>>, CPid}, - Commands = [make_checkout(C, {auto,1,simple_prefetch}), - make_enqueue(E,1,msg(<<"msg1">>)), - make_enqueue(E,2,msg(<<"msg2">>)), - rabbit_fifo:make_settle(C, [0])], - run_upgrade_snapshot_test(#{name => ?FUNCTION_NAME, - delivery_limit => undefined, - max_length => undefined, - max_bytes => undefined, - max_in_memory_length => undefined, - max_in_memory_bytes => undefined, - overflow_strategy => drop_head, - single_active_consumer_on => false, - dead_letter_handler => {?MODULE, banana, []} - }, - Commands), - ok. - single_active_01(_Config) -> C1Pid = test_util:fake_pid(rabbit@fake_node1), C1 = {<<0>>, C1Pid}, @@ -765,15 +723,14 @@ single_active_02(_Config) -> make_checkout(C2, cancel), {down,E,noconnection} ], - Conf = config(?FUNCTION_NAME, undefined, undefined, true, 1, undefined, undefined), + Conf = config(?FUNCTION_NAME, undefined, undefined, true, 1, + undefined, undefined), ?assert(single_active_prop(Conf, Commands, false)), ok. single_active_03(_Config) -> C1Pid = test_util:fake_pid(node()), C1 = {<<0>>, C1Pid}, - % C2Pid = test_util:fake_pid(rabbit@fake_node2), - % C2 = {<<>>, C2Pid}, Pid = test_util:fake_pid(node()), E = test_util:fake_pid(rabbit@fake_node2), Commands = [ @@ -788,67 +745,53 @@ single_active_03(_Config) -> ok. single_active_04(_Config) -> - % C1Pid = test_util:fake_pid(node()), - % C1 = {<<0>>, C1Pid}, - % C2Pid = test_util:fake_pid(rabbit@fake_node2), - % C2 = {<<>>, C2Pid}, - % Pid = test_util:fake_pid(node()), E = test_util:fake_pid(rabbit@fake_node2), Commands = [ - - % make_checkout(C1, {auto,2,simple_prefetch}), make_enqueue(E, 1, msg(<<>>)), make_enqueue(E, 2, msg(<<>>)), make_enqueue(E, 3, msg(<<>>)), make_enqueue(E, 4, msg(<<>>)) - % {down, Pid, noconnection}, - % {nodeup, node()} ], - Conf = config(?FUNCTION_NAME, 3, 587, true, 3, 7, undefined), + Conf = config(?FUNCTION_NAME, 3, 587, true, 3), ?assert(single_active_prop(Conf, Commands, true)), ok. test_run_log(_Config) -> - Fun = {-1, fun ({Prev, _}) -> {Prev + 1, Prev + 1} end}, + meck:expect(rabbit_feature_flags, is_enabled, + fun (_) -> true end), run_proper( fun () -> - ?FORALL({Length, Bytes, SingleActiveConsumer, DeliveryLimit, InMemoryLength, - InMemoryBytes}, - frequency([{10, {0, 0, false, 0, 0, 0}}, + ?FORALL({Length, Bytes, SingleActiveConsumer, DeliveryLimit}, + frequency([{10, {0, 0, false, 0}}, {5, {oneof([range(1, 10), undefined]), oneof([range(1, 1000), undefined]), boolean(), - oneof([range(1, 3), undefined]), - oneof([range(1, 10), undefined]), - oneof([range(1, 1000), undefined]) + oneof([range(1, 3), undefined]) }}]), - ?FORALL(O, ?LET(Ops, log_gen(100), expand(Ops, Fun)), + ?FORALL(O, ?LET(Ops, log_gen(100), expand(Ops, #{})), collect({log_size, length(O)}, dump_generated( config(?FUNCTION_NAME, Length, Bytes, SingleActiveConsumer, - DeliveryLimit, - InMemoryLength, - InMemoryBytes), O)))) + DeliveryLimit), O)))) end, [], 10). snapshots(_Config) -> + meck:expect(rabbit_feature_flags, is_enabled, fun (_) -> true end), run_proper( fun () -> ?FORALL({Length, Bytes, SingleActiveConsumer, - DeliveryLimit, InMemoryLength, InMemoryBytes, - Overflow, DeadLetterHandler}, - frequency([{10, {0, 0, false, 0, 0, 0, drop_head, undefined}}, + DeliveryLimit, Overflow, DeadLetterHandler}, + frequency([{10, {0, 0, false, 0, drop_head, undefined}}, {5, {oneof([range(1, 10), undefined]), oneof([range(1, 1000), undefined]), boolean(), oneof([range(1, 3), undefined]), - oneof([range(1, 10), undefined]), - oneof([range(1, 1000), undefined]), oneof([drop_head, reject_publish]), - oneof([undefined, {at_most_once, {?MODULE, banana, []}}]) + oneof([undefined, + {at_most_once, {?MODULE, banana, []}}]) }}]), begin Config = config(?FUNCTION_NAME, @@ -856,8 +799,6 @@ snapshots(_Config) -> Bytes, SingleActiveConsumer, DeliveryLimit, - InMemoryLength, - InMemoryBytes, Overflow, DeadLetterHandler), ?FORALL(O, ?LET(Ops, log_gen(256), expand(Ops, Config)), @@ -867,17 +808,15 @@ snapshots(_Config) -> end, [], 256). snapshots_dlx(_Config) -> + meck:expect(rabbit_feature_flags, is_enabled, fun (_) -> true end), Size = 256, run_proper( fun () -> - ?FORALL({Length, Bytes, SingleActiveConsumer, - DeliveryLimit, InMemoryLength, InMemoryBytes}, - frequency([{10, {0, 0, false, 0, 0, 0}}, + ?FORALL({Length, Bytes, SingleActiveConsumer, DeliveryLimit}, + frequency([{10, {0, 0, false, 0}}, {5, {oneof([range(1, 10), undefined]), oneof([range(1, 1000), undefined]), boolean(), - oneof([range(1, 3), undefined]), - oneof([range(1, 10), undefined]), oneof([range(1, 1000), undefined]) }}]), begin @@ -886,8 +825,6 @@ snapshots_dlx(_Config) -> Bytes, SingleActiveConsumer, DeliveryLimit, - InMemoryLength, - InMemoryBytes, reject_publish, at_least_once), ?FORALL(O, ?LET(Ops, log_gen_dlx(Size), expand(Ops, Config)), @@ -897,25 +834,24 @@ snapshots_dlx(_Config) -> end, [], Size). single_active(_Config) -> - Size = 300, + %% validates that there can only ever be a single active consumer at a time + %% as well as that message deliveries are done in order + Size = 1000, + meck:expect(rabbit_feature_flags, is_enabled, fun (_) -> true end), run_proper( fun () -> - ?FORALL({Length, Bytes, DeliveryLimit, InMemoryLength, InMemoryBytes}, - frequency([{10, {0, 0, 0, 0, 0}}, + ?FORALL({Length, Bytes, DeliveryLimit}, + frequency([{10, {undefined, undefined, undefined}}, {5, {oneof([range(1, 10), undefined]), oneof([range(1, 1000), undefined]), - oneof([range(1, 3), undefined]), - oneof([range(1, 10), undefined]), - oneof([range(1, 1000), undefined]) + oneof([range(1, 3), undefined]) }}]), begin Config = config(?FUNCTION_NAME, Length, Bytes, true, - DeliveryLimit, - InMemoryLength, - InMemoryBytes), + DeliveryLimit), ?FORALL(O, ?LET(Ops, log_gen(Size), expand(Ops, Config)), collect({log_size, length(O)}, single_active_prop(Config, O, false))) @@ -924,14 +860,15 @@ single_active(_Config) -> upgrade(_Config) -> Size = 256, + %% upgrade is always done using _old_ command formats + meck:expect(rabbit_feature_flags, is_enabled, fun (_) -> false end), run_proper( fun () -> - ?FORALL({Length, Bytes, DeliveryLimit, InMemoryLength, SingleActive}, - frequency([{5, {undefined, undefined, undefined, undefined, false}}, + ?FORALL({Length, Bytes, DeliveryLimit, SingleActive}, + frequency([{5, {undefined, undefined, undefined, false}}, {5, {oneof([range(1, 10), undefined]), oneof([range(1, 1000), undefined]), oneof([range(1, 3), undefined]), - oneof([range(1, 10), 0, undefined]), oneof([true, false]) }}]), begin @@ -940,10 +877,8 @@ upgrade(_Config) -> Bytes, SingleActive, DeliveryLimit, - InMemoryLength, - undefined, drop_head, - {?MODULE, banana, []} + undefined ), ?FORALL(O, ?LET(Ops, log_gen(Size), expand(Ops, Config)), collect({log_size, length(O)}, @@ -951,36 +886,8 @@ upgrade(_Config) -> end) end, [], Size). -upgrade_snapshots(_Config) -> - Size = 256, - run_proper( - fun () -> - ?FORALL({Length, Bytes, DeliveryLimit, InMemoryLength, SingleActive}, - frequency([{5, {undefined, undefined, undefined, undefined, false}}, - {5, {oneof([range(1, 10), undefined]), - oneof([range(1, 1000), undefined]), - oneof([range(1, 3), undefined]), - oneof([range(1, 10), 0, undefined]), - oneof([true, false]) - }}]), - begin - Config = config(?FUNCTION_NAME, - Length, - Bytes, - SingleActive, - DeliveryLimit, - InMemoryLength, - undefined, - drop_head, - {?MODULE, banana, []} - ), - ?FORALL(O, ?LET(Ops, log_gen_upgrade_snapshots(Size), expand(Ops, Config)), - collect({log_size, length(O)}, - upgrade_snapshots_prop(Config, O))) - end) - end, [], Size). - -upgrade_snapshots_v2_to_v3(_Config) -> +messages_total(_Config) -> + meck:expect(rabbit_feature_flags, is_enabled, fun (_) -> false end), Size = 256, run_proper( fun () -> @@ -996,36 +903,7 @@ upgrade_snapshots_v2_to_v3(_Config) -> Length, Bytes, SingleActive, - DeliveryLimit, - undefined, - undefined - ), - ?FORALL(O, ?LET(Ops, log_gen_upgrade_snapshots_v2_to_v3(Size), expand(Ops, Config)), - collect({log_size, length(O)}, - upgrade_snapshots_prop_v2_to_v3(Config, O))) - end) - end, [], Size). - -messages_total(_Config) -> - Size = 1000, - run_proper( - fun () -> - ?FORALL({Length, Bytes, DeliveryLimit, InMemoryLength, SingleActive}, - frequency([{5, {undefined, undefined, undefined, undefined, false}}, - {5, {oneof([range(1, 10), undefined]), - oneof([range(1, 1000), undefined]), - oneof([range(1, 3), undefined]), - oneof([range(1, 10), 0, undefined]), - oneof([true, false]) - }}]), - begin - Config = config(?FUNCTION_NAME, - Length, - Bytes, - SingleActive, - DeliveryLimit, - InMemoryLength, - undefined), + DeliveryLimit), ?FORALL(O, ?LET(Ops, log_gen(Size), expand(Ops, Config)), collect({log_size, length(O)}, messages_total_prop(Config, O))) @@ -1034,6 +912,7 @@ messages_total(_Config) -> simple_prefetch(_Config) -> Size = 500, + meck:expect(rabbit_feature_flags, is_enabled, fun (_) -> true end), run_proper( fun () -> ?FORALL({Length, Bytes, DeliveryLimit, SingleActive}, @@ -1048,9 +927,7 @@ simple_prefetch(_Config) -> Length, Bytes, SingleActive, - DeliveryLimit, - undefined, - undefined), + DeliveryLimit), ?FORALL(O, ?LET(Ops, log_gen(Size), expand(Ops, Config)), collect({log_size, length(O)}, simple_prefetch_prop(Config, O, true))) @@ -1059,6 +936,7 @@ simple_prefetch(_Config) -> simple_prefetch_without_checkout_cancel(_Config) -> Size = 256, + meck:expect(rabbit_feature_flags, is_enabled, fun (_) -> true end), run_proper( fun () -> ?FORALL({Length, Bytes, DeliveryLimit, SingleActive}, @@ -1073,10 +951,9 @@ simple_prefetch_without_checkout_cancel(_Config) -> Length, Bytes, SingleActive, - DeliveryLimit, - undefined, - undefined), - ?FORALL(O, ?LET(Ops, log_gen_without_checkout_cancel(Size), expand(Ops, Config)), + DeliveryLimit), + ?FORALL(O, ?LET(Ops, log_gen_without_checkout_cancel(Size), + expand(Ops, Config)), collect({log_size, length(O)}, simple_prefetch_prop(Config, O, false))) end) @@ -1105,19 +982,19 @@ simple_prefetch_01(_Config) -> single_active_ordering(_Config) -> Size = 500, + meck:expect(rabbit_feature_flags, is_enabled, fun (_) -> true end), Fun = {-1, fun ({Prev, _}) -> {Prev + 1, Prev + 1} end}, run_proper( fun () -> ?FORALL(O, ?LET(Ops, log_gen_ordered(Size), expand(Ops, Fun)), collect({log_size, length(O)}, - single_active_prop(config(?FUNCTION_NAME, - undefined, - undefined, - true, - undefined, - undefined, - undefined), O, - true))) + single_active_prop( + config(?FUNCTION_NAME, + undefined, + undefined, + true, + undefined), O, + true))) end, [], Size). single_active_ordering_01(_Config) -> @@ -1132,7 +1009,7 @@ single_active_ordering_01(_Config) -> make_enqueue(E2, 1, msg(<<"2">>)), make_settle(C1, [0]) ], - Conf = config(?FUNCTION_NAME, 0, 0, true, 0, 0, 0), + Conf = config(?FUNCTION_NAME, 0, 0, true, 0), ?assert(single_active_prop(Conf, Commands, true)), ok. @@ -1153,7 +1030,7 @@ single_active_ordering_02(_Config) -> {down,E,noproc}, make_settle(C1, [0]) ], - Conf = config(?FUNCTION_NAME, 0, 0, true, 0, 0, 0), + Conf = config(?FUNCTION_NAME, 0, 0, true, 0), ?assert(single_active_prop(Conf, Commands, true)), ok. @@ -1173,7 +1050,7 @@ single_active_ordering_03(_Config) -> make_checkout(C1, cancel), {down, C1Pid, noconnection} ], - Conf0 = config(?FUNCTION_NAME, 0, 0, true, 0, 0, 0), + Conf0 = config(?FUNCTION_NAME, 0, 0, true, 0), Conf = Conf0#{release_cursor_interval => 100}, Indexes = lists:seq(1, length(Commands)), Entries = lists:zip(Indexes, Commands), @@ -1198,21 +1075,17 @@ max_length(_Config) -> Size = 1000, run_proper( fun () -> - ?FORALL({Length, SingleActiveConsumer, DeliveryLimit, - InMemoryLength}, + ?FORALL({Length, SingleActiveConsumer, DeliveryLimit}, {oneof([range(1, 100), undefined]), boolean(), - range(1, 3), - range(1, 10) + range(1, 3) }, begin Config = config(?FUNCTION_NAME, Length, undefined, SingleActiveConsumer, - DeliveryLimit, - InMemoryLength, - undefined), + DeliveryLimit), ?FORALL(O, ?LET(Ops, log_gen_config(Size), expand(Ops, Config)), collect({log_size, length(O)}, @@ -1235,7 +1108,8 @@ dlx_01(_Config) -> rabbit_fifo:make_discard(C1, [1]), rabbit_fifo_dlx:make_settle([1]) ], - Config = config(?FUNCTION_NAME, 8, undefined, false, 2, 5, 100, reject_publish, at_least_once), + Config = config(?FUNCTION_NAME, 8, undefined, false, 2, + reject_publish, at_least_once), ?assert(snapshots_prop(Config, Commands)), ok. @@ -1257,7 +1131,8 @@ dlx_02(_Config) -> rabbit_fifo_dlx:make_settle([0]) %% Release cursor A got emitted. ], - Config = config(?FUNCTION_NAME, 10, undefined, false, 5, 5, 100, reject_publish, at_least_once), + Config = config(?FUNCTION_NAME, 10, undefined, false, 5, + reject_publish, at_least_once), ?assert(snapshots_prop(Config, Commands)), ok. @@ -1278,7 +1153,8 @@ dlx_03(_Config) -> rabbit_fifo_dlx:make_settle([0]) %% Release cursor A got emitted. ], - Config = config(?FUNCTION_NAME, 10, undefined, false, 5, 5, 100, reject_publish, at_least_once), + Config = config(?FUNCTION_NAME, 10, undefined, false, 5, + reject_publish, at_least_once), ?assert(snapshots_prop(Config, Commands)), ok. @@ -1298,7 +1174,8 @@ dlx_04(_Config) -> rabbit_fifo:make_discard(C1, [0,1,2,3,4,5]), rabbit_fifo_dlx:make_settle([0,1,2]) ], - Config = config(?FUNCTION_NAME, undefined, undefined, true, 1, 5, 136, reject_publish, at_least_once), + Config = config(?FUNCTION_NAME, undefined, undefined, true, 1, + reject_publish, at_least_once), ?assert(snapshots_prop(Config, Commands)), ok. @@ -1324,7 +1201,8 @@ dlx_05(_Config) -> rabbit_fifo_dlx:make_settle([0]) %% 2 in checkout ], - Config = config(?FUNCTION_NAME, 0, 0, false, 0, 0, 0, reject_publish, at_least_once), + Config = config(?FUNCTION_NAME, 0, 0, false, 0, + reject_publish, at_least_once), ?assert(snapshots_prop(Config, Commands)), ok. @@ -1352,7 +1230,8 @@ dlx_06(_Config) -> rabbit_fifo_dlx:make_settle([0,1]) %% 3 in dlx_checkout ], - Config = config(?FUNCTION_NAME, undefined, 749, false, 1, 1, 131, reject_publish, at_least_once), + Config = config(?FUNCTION_NAME, undefined, 749, false, 1, + reject_publish, at_least_once), ?assert(snapshots_prop(Config, Commands)), ok. @@ -1385,7 +1264,7 @@ dlx_07(_Config) -> rabbit_fifo_dlx:make_settle([0,1]) %% 3 in checkout ], - Config = config(?FUNCTION_NAME, undefined, undefined, false, undefined, undefined, undefined, + Config = config(?FUNCTION_NAME, undefined, undefined, false, undefined, reject_publish, at_least_once), ?assert(snapshots_prop(Config, Commands)), ok. @@ -1431,7 +1310,7 @@ dlx_08(_Config) -> rabbit_fifo_dlx:make_settle([1]), rabbit_fifo_dlx:make_settle([2]) ], - Config = config(?FUNCTION_NAME, undefined, undefined, false, undefined, undefined, undefined, + Config = config(?FUNCTION_NAME, undefined, undefined, false, undefined, reject_publish, at_least_once), ?assert(snapshots_prop(Config, Commands)), ok. @@ -1453,25 +1332,25 @@ dlx_09(_Config) -> rabbit_fifo:make_discard(C1, [2]) %% 1,2 in discards ], - Config = config(?FUNCTION_NAME, undefined, undefined, false, undefined, undefined, undefined, + Config = config(?FUNCTION_NAME, undefined, undefined, false, undefined, reject_publish, at_least_once), ?assert(snapshots_prop(Config, Commands)), ok. -config(Name, Length, Bytes, SingleActive, DeliveryLimit, InMemoryLength, InMemoryBytes) -> -config(Name, Length, Bytes, SingleActive, DeliveryLimit, InMemoryLength, InMemoryBytes, +config(Name, Length, MaxBytes, SingleActive, DeliveryLimit) -> + config(Name, Length, MaxBytes, SingleActive, DeliveryLimit, drop_head, {at_most_once, {?MODULE, banana, []}}). -config(Name, Length, Bytes, SingleActive, DeliveryLimit, - InMemoryLength, InMemoryBytes, Overflow, DeadLetterHandler) -> +config(Name, Length, MaxBytes, SingleActive, DeliveryLimit, + Overflow, DeadLetterHandler) -> #{name => Name, max_length => map_max(Length), - max_bytes => map_max(Bytes), + max_bytes => map_max(MaxBytes), dead_letter_handler => DeadLetterHandler, single_active_consumer_on => SingleActive, delivery_limit => map_max(DeliveryLimit), - max_in_memory_length => map_max(InMemoryLength), - max_in_memory_bytes => map_max(InMemoryBytes), + % max_in_memory_length => map_max(InMemoryLength), + % max_in_memory_bytes => map_max(InMemoryBytes), overflow_strategy => Overflow}. map_max(0) -> undefined; @@ -1485,7 +1364,7 @@ max_length_prop(Conf0, Commands) -> #{num_ready_messages := MsgReady} = rabbit_fifo:overview(S), MsgReady =< MaxLen end, - try run_log(test_init(Conf), Entries, Invariant, rabbit_fifo) of + try run_log(test_init(Conf), Entries, Invariant) of {_State, _Effects} -> true; _ -> @@ -1531,7 +1410,7 @@ single_active_prop(Conf0, Commands, ValidateOrder) -> map_size(Up) =< 1 end, - try run_log(test_init(Conf), Entries, Invariant, rabbit_fifo) of + try run_log(test_init(Conf), Entries, Invariant) of {_State, Effects} when ValidateOrder -> %% validate message ordering lists:foldl(fun ({send_msg, Pid, {delivery, Tag, Msgs}, ra_event}, @@ -1555,7 +1434,7 @@ messages_total_prop(Conf0, Commands) -> Indexes = lists:seq(1, length(Commands)), Entries = lists:zip(Indexes, Commands), InitState = test_init(Conf), - run_log(InitState, Entries, messages_total_invariant(), rabbit_fifo), + run_log(InitState, Entries, messages_total_invariant()), true. messages_total_invariant() -> @@ -1564,7 +1443,7 @@ messages_total_invariant() -> returns = R, dlx = #rabbit_fifo_dlx{discards = D, consumer = DlxCon}} = S) -> - Base = lqueue:len(M) + lqueue:len(R), + Base = rabbit_fifo_q:len(M) + lqueue:len(R), Tot0 = maps:fold(fun (_, #consumer{checked_out = Ch}, Acc) -> Acc + map_size(Ch) end, Base, C), @@ -1590,7 +1469,8 @@ simple_prefetch_prop(Conf0, Commands, WithCheckoutCancel) -> Indexes = lists:seq(1, length(Commands)), Entries = lists:zip(Indexes, Commands), InitState = test_init(Conf), - run_log(InitState, Entries, simple_prefetch_invariant(WithCheckoutCancel), rabbit_fifo), + run_log(InitState, Entries, + simple_prefetch_invariant(WithCheckoutCancel)), true. simple_prefetch_invariant(WithCheckoutCancel) -> @@ -1598,10 +1478,13 @@ simple_prefetch_invariant(WithCheckoutCancel) -> maps:fold( fun(_, _, false) -> false; - (Id, #consumer{cfg = #consumer_cfg{credit_mode = {simple_prefetch, MaxCredit}}, + (Id, #consumer{cfg = #consumer_cfg{credit_mode = + {simple_prefetch, MaxCredit}}, checked_out = CheckedOut, credit = Credit}, true) -> - valid_simple_prefetch(MaxCredit, Credit, maps:size(CheckedOut), WithCheckoutCancel, Id) + valid_simple_prefetch(MaxCredit, Credit, + maps:size(CheckedOut), + WithCheckoutCancel, Id) end, true, Consumers) end. @@ -1628,24 +1511,26 @@ valid_simple_prefetch(_, _, _, _, _) -> true. upgrade_prop(Conf0, Commands) -> + FromVersion = 3, + ToVersion = 4, + FromMod = rabbit_fifo:which_module(FromVersion), + ToMod = rabbit_fifo:which_module(ToVersion), Conf = Conf0#{release_cursor_interval => 0}, Indexes = lists:seq(1, length(Commands)), Entries = lists:zip(Indexes, Commands), - InitState = test_init_v1(Conf), + InitState = test_init_v(Conf, FromVersion), [begin {PreEntries, PostEntries} = lists:split(SplitPos, Entries), %% run log v1 - {V1, _V1Effs} = run_log(InitState, PreEntries, fun (_) -> true end, - rabbit_fifo_v1), + {V3, _V1Effs} = run_log(InitState, PreEntries, + fun (_) -> true end, FromVersion), %% perform conversion - #rabbit_fifo{} = V2 = element(1, rabbit_fifo:apply(meta(length(PreEntries) + 1), - {machine_version, 1, 2}, V1)), + #rabbit_fifo{} = V4 = element(1, rabbit_fifo:apply( + meta(length(PreEntries) + 1), + {machine_version, FromVersion, ToVersion}, + V3)), %% assert invariants - %% - %% Note that we cannot test for num_messages because rabbit_fifo_v1:messages_total/1 - %% relies on ra_indexes not to be empty. However ra_indexes are empty in snapshots - %% in which case the number of messages checked out to consumers will not be included. Fields = [num_ready_messages, smallest_raft_index, num_enqueuers, @@ -1653,42 +1538,18 @@ upgrade_prop(Conf0, Commands) -> enqueue_message_bytes, checkout_message_bytes ], - V1Overview = maps:with(Fields, rabbit_fifo_v1:overview(V1)), - V2Overview = maps:with(Fields, rabbit_fifo:overview(V2)), - case V1Overview == V2Overview of + V3Overview = maps:with(Fields, FromMod:overview(V3)), + V4Overview = maps:with(Fields, ToMod:overview(V4)), + case V3Overview == V4Overview of true -> ok; false -> ct:pal("upgrade_prop failed expected~n~tp~nGot:~n~tp", - [V1Overview, V2Overview]), - ?assertEqual(V1Overview, V2Overview) + [V3Overview, V4Overview]), + ?assertEqual(V3Overview, V4Overview) end, %% check we can run the post entries from the converted state - run_log(V2, PostEntries) + run_log(V4, PostEntries, fun (_) -> true end, ToVersion) end || SplitPos <- lists:seq(1, length(Entries))], - - {_, V1Effs} = run_log(InitState, Entries, fun (_) -> true end, - rabbit_fifo_v1), - [begin - Res = rabbit_fifo:apply(meta(Idx + 1), {machine_version, 1, 2}, RCS) , - #rabbit_fifo{} = V2 = element(1, Res), - %% assert invariants - Fields = [num_ready_messages, - smallest_raft_index, - num_enqueuers, - num_consumers, - enqueue_message_bytes, - checkout_message_bytes - ], - V1Overview = maps:with(Fields, rabbit_fifo_v1:overview(RCS)), - V2Overview = maps:with(Fields, rabbit_fifo:overview(V2)), - case V1Overview == V2Overview of - true -> ok; - false -> - ct:pal("upgrade_prop failed expected~n~tp~nGot:~n~tp", - [V1Overview, V2Overview]), - ?assertEqual(V1Overview, V2Overview) - end - end || {release_cursor, Idx, RCS} <- V1Effs], true. %% single active consumer ordering invariant: @@ -1720,27 +1581,7 @@ dump_generated(Conf, Commands) -> true. snapshots_prop(Conf, Commands) -> - try run_snapshot_test(Conf, Commands, messages_total_invariant()) of - _ -> true - catch - Err -> - ct:pal("Commands: ~tp~nConf~tp~n", [Commands, Conf]), - ct:pal("Err: ~tp~n", [Err]), - false - end. - -upgrade_snapshots_prop(Conf, Commands) -> - try run_upgrade_snapshot_test(Conf, Commands) of - _ -> true - catch - Err -> - ct:pal("Commands: ~tp~nConf~tp~n", [Commands, Conf]), - ct:pal("Err: ~tp~n", [Err]), - false - end. - -upgrade_snapshots_prop_v2_to_v3(Conf, Commands) -> - try run_upgrade_snapshot_test_v2_to_v3(Conf, Commands) of + try run_snapshot_test(Conf, Commands) of _ -> true catch Err -> @@ -1772,28 +1613,6 @@ log_gen(Size) -> {1, purge} ]))))). -%% Does not use "return", "down", or "checkout cancel" Ra commands -%% since these 3 commands change behaviour across v2 and v3 fixing -%% a bug where to many credits are granted to the consumer. -log_gen_upgrade_snapshots_v2_to_v3(Size) -> - Nodes = [node(), - fakenode@fake, - fakenode@fake2 - ], - ?LET(EPids, vector(2, pid_gen(Nodes)), - ?LET(CPids, vector(2, pid_gen(Nodes)), - resize(Size, - list( - frequency( - [{20, enqueue_gen(oneof(EPids))}, - {40, {input_event, - frequency([{10, settle}, - {2, discard}, - {2, requeue}])}}, - {1, checkout_gen(oneof(CPids))}, - {1, purge} - ]))))). - log_gen_upgrade_snapshots(Size) -> Nodes = [node(), fakenode@fake, @@ -1812,14 +1631,8 @@ log_gen_upgrade_snapshots(Size) -> {2, requeue} ])}}, {2, checkout_gen(oneof(CPids))}, - %% v2 fixes a bug that exists in v1 where a cancelled consumer is revived. - %% Therefore, there is an expected behavioural difference between v1 and v2 - %% and below line must be commented out. - % {1, checkout_cancel_gen(oneof(CPids))}, - %% Likewise there is a behavioural difference between v1 and v2 - %% when 'up' is followed by 'down' where v2 behaves correctly. - %% Therefore, below line must be commented out. - % {1, down_gen(oneof(EPids ++ CPids))}, + {1, checkout_cancel_gen(oneof(CPids))}, + {1, down_gen(oneof(EPids ++ CPids))}, {1, nodeup_gen(Nodes)}, {1, purge} ]))))). @@ -1946,16 +1759,21 @@ enqueue_gen(Pid) -> enqueue_gen(Pid, _Enq, _Del) -> ?LET(E, {enqueue, Pid, enqueue, msg_gen()}, E). -%% It's fair to assume that every message enqueued is a #basic_message. -%% That's what the channel expects and what rabbit_quorum_queue invokes rabbit_fifo_client with. msg_gen() -> ?LET(Bin, binary(), - #basic_message{content = #content{payload_fragments_rev = [Bin], - properties = none}}). + mc:prepare( + store, mc_amqpl:from_basic_message( + #basic_message{exchange_name = #resource{name = <<"e">>, + kind = exchange, + virtual_host = <<"/">>}, + routing_keys = [<<>>], + content = + #content{payload_fragments_rev = [Bin], + properties = #'P_basic'{}}}))). msg(Bin) when is_binary(Bin) -> #basic_message{content = #content{payload_fragments_rev = [Bin], - properties = none}}. + properties = #'P_basic'{}}}. checkout_cancel_gen(Pid) -> {checkout, Pid, cancel}. @@ -1974,7 +1792,8 @@ checkout_gen(Pid) -> config :: map(), log = [] :: list(), down = #{} :: #{pid() => noproc | noconnection}, - enq_cmds = #{} :: #{ra:index() => rabbit_fifo:enqueue()} + enq_cmds = #{} :: #{ra:index() => rabbit_fifo:enqueue()}, + is_v4 = false :: boolean() }). expand(Ops, Config) -> @@ -2000,9 +1819,11 @@ expand(Ops, Config, EnqFun) -> _ -> InitConfig0 end, + IsV4 = rabbit_feature_flags:is_enabled('rabbitmq_4.0.0'), T = #t{state = rabbit_fifo:init(InitConfig), enq_body_fun = EnqFun, - config = Config}, + config = Config, + is_v4 = IsV4}, #t{effects = Effs} = T1 = lists:foldl(fun handle_op/2, T, Ops), %% process the remaining effect #t{log = Log} = lists:foldl(fun do_apply/2, @@ -2024,7 +1845,7 @@ handle_op({enqueue, Pid, When, Data}, Enqs = maps:update_with(Pid, fun (Seq) -> Seq + 1 end, 1, Enqs0), MsgSeq = maps:get(Pid, Enqs), {EnqSt, Msg} = Fun({EnqSt0, Data}), - Cmd = rabbit_fifo:make_enqueue(Pid, MsgSeq, Msg), + Cmd = make_enqueue(Pid, MsgSeq, Msg), case When of enqueue -> do_apply(Cmd, T#t{enqueuers = Enqs, @@ -2054,9 +1875,15 @@ handle_op({checkout, CId, Prefetch}, #t{consumers = Cons0} = T) -> %% ignore if it already exists T; _ -> - Cons = maps:put(CId, ok, Cons0), - Cmd = rabbit_fifo:make_checkout(CId, - {auto, Prefetch, simple_prefetch}, + Spec = case T#t.is_v4 of + true -> + {auto, {simple_prefetch, Prefetch}}; + false -> + {auto, Prefetch, simple_prefetch} + end, + + Cons = maps:put(CId, T#t.index, Cons0), + Cmd = rabbit_fifo:make_checkout(CId, Spec, #{ack => true, prefetch => Prefetch, username => <<"user">>, @@ -2084,13 +1911,24 @@ handle_op({input_event, requeue}, #t{effects = Effs} = T) -> T end; handle_op({input_event, Settlement}, #t{effects = Effs, - down = Down} = T) -> + consumers = Cons, + down = Down, + is_v4 = IsV4} = T) -> case queue:out(Effs) of {{value, {settle, CId, MsgIds}}, Q} -> + CKey = case maps:get(CId, Cons, undefined) of + K when is_integer(K) andalso IsV4 -> + K; + _ -> + CId + end, Cmd = case Settlement of - settle -> rabbit_fifo:make_settle(CId, MsgIds); - return -> rabbit_fifo:make_return(CId, MsgIds); - discard -> rabbit_fifo:make_discard(CId, MsgIds) + settle -> + rabbit_fifo:make_settle(CKey, MsgIds); + return -> + rabbit_fifo:make_return(CKey, MsgIds); + discard -> + rabbit_fifo:make_discard(CKey, MsgIds) end, do_apply(Cmd, T#t{effects = Q}); {{value, {enqueue, Pid, _, _} = Cmd}, Q} -> @@ -2113,7 +1951,8 @@ handle_op(purge, T) -> handle_op({update_config, Changes}, #t{config = Conf} = T) -> Config = maps:merge(Conf, Changes), do_apply(rabbit_fifo:make_update_config(Config), T); -handle_op({checkout_dlx, Prefetch}, #t{config = #{dead_letter_handler := at_least_once}} = T) -> +handle_op({checkout_dlx, Prefetch}, + #t{config = #{dead_letter_handler := at_least_once}} = T) -> Cmd = rabbit_fifo_dlx:make_checkout(ignore_pid, Prefetch), do_apply(Cmd, T). @@ -2181,145 +2020,17 @@ run_proper(Fun, Args, NumTests) -> end}])). run_snapshot_test(Conf, Commands) -> - run_snapshot_test(Conf, Commands, fun (_) -> true end). - -run_snapshot_test(Conf, Commands, Invariant) -> - %% create every incremental permutation of the commands lists - %% and run the snapshot tests against that - ct:pal("running snapshot test with ~b commands using config ~tp", - [length(Commands), Conf]), - [begin - % ct:pal("~w running commands to ~w~n", [?FUNCTION_NAME, lists:last(C)]), - run_snapshot_test0(Conf, C, Invariant) - end || C <- prefixes(Commands, 1, [])]. - -run_snapshot_test0(Conf, Commands) -> - run_snapshot_test0(Conf, Commands, fun (_) -> true end). - -run_snapshot_test0(Conf0, Commands, Invariant) -> - Conf = Conf0#{max_in_memory_length => 0}, Indexes = lists:seq(1, length(Commands)), Entries = lists:zip(Indexes, Commands), - {State0, Effects} = run_log(test_init(Conf), Entries, Invariant, rabbit_fifo), - State = rabbit_fifo:normalize(State0), - Cursors = [ C || {release_cursor, _, _} = C <- Effects], - - [begin - %% drop all entries below and including the snapshot - Filtered = lists:dropwhile(fun({X, _}) when X =< SnapIdx -> true; - (_) -> false - end, Entries), - % ct:pal("release_cursor: ~b from ~w~n", [SnapIdx, element(1, hd_or(Filtered))]), - {S0, _} = run_log(SnapState, Filtered, Invariant, rabbit_fifo), - S = rabbit_fifo:normalize(S0), - % assert log can be restored from any release cursor index - case S of - State -> ok; - _ -> - ct:pal("Snapshot tests failed run log:~n" - "~tp~n from snapshot index ~b " - "with snapshot state~n~tp~n Entries~n~tp~n" - "Config: ~tp~n", - [Filtered, SnapIdx, SnapState, Entries, Conf]), - ct:pal("Expected~n~tp~nGot:~n~tp~n", [?record_info(rabbit_fifo, State), - ?record_info(rabbit_fifo, S)]), - ?assertEqual(State, S) - end - end || {release_cursor, SnapIdx, SnapState} <- Cursors], - ok. - -run_upgrade_snapshot_test(Conf, Commands) -> - ct:pal("running test with ~b commands using config ~tp", + ct:pal("running snapshot test 2 with ~b commands using config ~tp", [length(Commands), Conf]), - Indexes = lists:seq(1, length(Commands)), - Entries = lists:zip(Indexes, Commands), - Invariant = fun(_) -> true end, - %% Run the whole command log in v1 to emit release cursors. - {_, Effects} = run_log(test_init_v1(Conf), Entries, Invariant, rabbit_fifo_v1), - Cursors = [ C || {release_cursor, _, _} = C <- Effects], - [begin - %% Drop all entries below and including the snapshot. - FilteredV1 = lists:dropwhile(fun({X, _}) when X =< SnapIdx -> true; - (_) -> false - end, Entries), - %% For V2 we will apply the same commands to the snapshot state as for V1. - %% However, we need to increment all Raft indexes by 1 because V2 - %% requires one additional Raft index for the conversion command from V1 to V2. - FilteredV2 = lists:keymap(fun(Idx) -> Idx + 1 end, 1, FilteredV1), - %% Recover in V1. - {StateV1, _} = run_log(SnapState, FilteredV1, Invariant, rabbit_fifo_v1), - %% Perform conversion and recover in V2. - Res = rabbit_fifo:apply(meta(SnapIdx + 1), {machine_version, 1, 2}, SnapState), - #rabbit_fifo{} = V2 = element(1, Res), - {StateV2, _} = run_log(V2, FilteredV2, Invariant, rabbit_fifo, 2), - %% Invariant: Recovering a V1 snapshot in V1 or V2 should end up in the same - %% number of messages. - Fields = [num_messages, - num_ready_messages, - num_enqueuers, - num_consumers, - enqueue_message_bytes, - checkout_message_bytes - ], - V1Overview = maps:with(Fields, rabbit_fifo_v1:overview(StateV1)), - V2Overview = maps:with(Fields, rabbit_fifo:overview(StateV2)), - case V1Overview == V2Overview of - true -> ok; - false -> - ct:pal("property failed, expected:~n~tp~ngot:~n~tp~nstate v1:~n~tp~nstate v2:~n~tp~n" - "snapshot index: ~tp", - [V1Overview, V2Overview, StateV1, ?record_info(rabbit_fifo, StateV2), SnapIdx]), - ?assertEqual(V1Overview, V2Overview) - end - end || {release_cursor, SnapIdx, SnapState} <- Cursors], - ok. + Fun = fun (_E, S, _Effs) -> + MsgTotFun = messages_total_invariant(), + MsgTotFun(S) + end, + _ = run_log(test_init(Conf), Entries, Fun), + true. -run_upgrade_snapshot_test_v2_to_v3(Conf, Commands) -> - ct:pal("running test with ~b commands using config ~tp", - [length(Commands), Conf]), - Indexes = lists:seq(1, length(Commands)), - Entries = lists:zip(Indexes, Commands), - Invariant = fun(_) -> true end, - %% Run the whole command log in v2 to emit release cursors. - {_, Effects} = run_log(test_init(Conf), Entries, Invariant, rabbit_fifo, 2), - Cursors = [ C || {release_cursor, _, _} = C <- Effects], - [begin - %% Drop all entries below and including the snapshot. - FilteredV2 = lists:dropwhile(fun({X, _}) when X =< SnapIdx -> true; - (_) -> false - end, Entries), - %% For V3 we will apply the same commands to the snapshot state as for V2. - %% However, we need to increment all Raft indexes by 1 because V3 - %% requires one additional Raft index for the conversion command from V2 to V3. - FilteredV3 = lists:keymap(fun(Idx) -> Idx + 1 end, 1, FilteredV2), - %% Recover in V2. - {StateV2, _} = run_log(SnapState, FilteredV2, Invariant, rabbit_fifo, 2), - %% Perform conversion and recover in V3. - Res = rabbit_fifo:apply(meta(SnapIdx + 1), {machine_version, 2, 3}, SnapState), - #rabbit_fifo{} = V3 = element(1, Res), - {StateV3, _} = run_log(V3, FilteredV3, Invariant, rabbit_fifo, 3), - %% Invariant: Recovering a V2 snapshot in V2 or V3 should end up in the same - %% number of messages given that no "return", "down", or "cancel consumer" - %% Ra commands are used. - Fields = [num_messages, - num_ready_messages, - num_enqueuers, - num_consumers, - enqueue_message_bytes, - checkout_message_bytes - ], - V2Overview = maps:with(Fields, rabbit_fifo:overview(StateV2)), - V3Overview = maps:with(Fields, rabbit_fifo:overview(StateV3)), - case V2Overview == V3Overview of - true -> ok; - false -> - ct:pal("property failed, expected:~n~tp~ngot:~n~tp~nstate v2:~n~tp~nstate v3:~n~tp~n" - "snapshot index: ~tp", - [V2Overview, V3Overview, StateV2, ?record_info(rabbit_fifo, StateV3), SnapIdx]), - ?assertEqual(V2Overview, V3Overview) - end - end || {release_cursor, SnapIdx, SnapState} <- Cursors], - ok. hd_or([H | _]) -> H; hd_or(_) -> {undefined}. @@ -2332,45 +2043,64 @@ prefixes(Source, N, Acc) -> prefixes(Source, N+1, [X | Acc]). run_log(InitState, Entries) -> - run_log(InitState, Entries, fun(_) -> true end, rabbit_fifo). - -run_log(InitState, Entries, InvariantFun, FifoMod) -> - run_log(InitState, Entries, InvariantFun, FifoMod, 3). - -run_log(InitState, Entries, InvariantFun, FifoMod, MachineVersion) -> - Invariant = fun(E, S) -> - case InvariantFun(S) of + run_log(InitState, Entries, fun(_) -> true end). + +run_log(InitState, Entries, InvariantFun) -> + run_log(InitState, Entries, InvariantFun, ?MACHINE_VERSION). + +run_log(InitState, Entries, InvariantFun0, MachineVersion) + when is_function(InvariantFun0, 1) -> + InvariantFun = fun (_E, S, _Effs) -> + InvariantFun0(S) + end, + run_log(InitState, Entries, InvariantFun, MachineVersion); +run_log(InitState, Entries, InvariantFun, MachineVersion) + when is_integer(MachineVersion) -> + Invariant = fun(E, S, Effs) -> + case InvariantFun(E, S, Effs) of true -> ok; false -> throw({invariant, E, S}) end end, - - lists:foldl(fun ({Idx, E}, {Acc0, Efx0}) -> - case FifoMod:apply(meta(Idx, MachineVersion), E, Acc0) of + FifoMod = rabbit_fifo:which_module(MachineVersion), + + lists:foldl(fun ({Idx, E0}, {Acc0, Efx0}) -> + {Meta, E} = case E0 of + {M1, E1} when is_map(M1) -> + M0 = meta(Idx, MachineVersion), + {maps:merge(M0, M1), E1}; + _ -> + {meta(Idx, MachineVersion), E0} + end, + + case FifoMod:apply(Meta, E, Acc0) of {Acc, _, Efx} when is_list(Efx) -> - Invariant(E, Acc), + Invariant(E, Acc, lists:flatten(Efx)), {Acc, Efx0 ++ Efx}; {Acc, _, Efx} -> - Invariant(E, Acc), + Invariant(E, Acc, lists:flatten(Efx)), {Acc, Efx0 ++ [Efx]}; {Acc, _} -> - Invariant(E, Acc), + Invariant(E, Acc, []), {Acc, Efx0} end end, {InitState, []}, Entries). test_init(Conf) -> + test_init(rabbit_fifo, Conf). + +test_init(Mod, Conf) -> Default = #{queue_resource => blah, release_cursor_interval => 0, metrics_handler => {?MODULE, metrics_handler, []}}, - rabbit_fifo:init(maps:merge(Default, Conf)). + Mod:init(maps:merge(Default, Conf)). test_init_v1(Conf) -> - Default = #{queue_resource => blah, - release_cursor_interval => 0, - metrics_handler => {?MODULE, metrics_handler, []}}, - rabbit_fifo_v1:init(maps:merge(Default, Conf)). + test_init(rabbit_fifo_v1, Conf). + +test_init_v(Conf, Version) -> + test_init(rabbit_fifo:which_module(Version), Conf). meta(Idx) -> meta(Idx, 3). diff --git a/deps/rabbit/test/rabbit_fifo_q_SUITE.erl b/deps/rabbit/test/rabbit_fifo_q_SUITE.erl new file mode 100644 index 000000000000..919aa40f0e44 --- /dev/null +++ b/deps/rabbit/test/rabbit_fifo_q_SUITE.erl @@ -0,0 +1,208 @@ +-module(rabbit_fifo_q_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("proper/include/proper.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("rabbit/src/rabbit_fifo.hrl"). + +all() -> + [ + {group, tests} + ]. + + +all_tests() -> + [ + hi, + basics, + hi_is_prioritised, + get_lowest_index, + single_priority_behaves_like_queue + ]. + + +groups() -> + [ + {tests, [parallel], all_tests()} + ]. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + ok. + +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(_TestCase, Config) -> + Config. + +end_per_testcase(_TestCase, _Config) -> + ok. + +%%%=================================================================== +%%% Test cases +%%%=================================================================== + +-define(MSG(L), ?MSG(L, L)). + +hi(_Config) -> + Q0 = rabbit_fifo_q:new(), + Q1 = lists:foldl( + fun ({P, I}, Q) -> + rabbit_fifo_q:in(P, I, Q) + end, Q0, [ + {hi, ?MSG(1)} + ]), + {?MSG(1), Q2} = rabbit_fifo_q:out(Q1), + empty = rabbit_fifo_q:out(Q2), + ok. + +basics(_Config) -> + Q0 = rabbit_fifo_q:new(), + Q1 = lists:foldl( + fun ({P, I}, Q) -> + rabbit_fifo_q:in(P, I, Q) + end, Q0, [ + {hi, ?MSG(1)}, + {no, ?MSG(2)}, + {hi, ?MSG(3)}, + {no, ?MSG(4)}, + {hi, ?MSG(5)} + ]), + {?MSG(1), Q2} = rabbit_fifo_q:out(Q1), + {?MSG(3), Q3} = rabbit_fifo_q:out(Q2), + {?MSG(2), Q4} = rabbit_fifo_q:out(Q3), + {?MSG(5), Q5} = rabbit_fifo_q:out(Q4), + {?MSG(4), Q6} = rabbit_fifo_q:out(Q5), + empty = rabbit_fifo_q:out(Q6), + ok. + +hi_is_prioritised(_Config) -> + Q0 = rabbit_fifo_q:new(), + %% when `hi' has a lower index than the next 'no' then it is still + %% prioritied (as this is safe to do). + Q1 = lists:foldl( + fun ({P, I}, Q) -> + rabbit_fifo_q:in(P, I, Q) + end, Q0, [ + {hi, ?MSG(1)}, + {hi, ?MSG(2)}, + {hi, ?MSG(3)}, + {hi, ?MSG(4)}, + {no, ?MSG(5)} + ]), + {?MSG(1), Q2} = rabbit_fifo_q:out(Q1), + {?MSG(2), Q3} = rabbit_fifo_q:out(Q2), + {?MSG(3), Q4} = rabbit_fifo_q:out(Q3), + {?MSG(4), Q5} = rabbit_fifo_q:out(Q4), + {?MSG(5), Q6} = rabbit_fifo_q:out(Q5), + empty = rabbit_fifo_q:out(Q6), + ok. + +get_lowest_index(_Config) -> + Q0 = rabbit_fifo_q:new(), + Q1 = rabbit_fifo_q:in(hi, ?MSG(1, ?LINE), Q0), + Q2 = rabbit_fifo_q:in(no, ?MSG(2, ?LINE), Q1), + Q3 = rabbit_fifo_q:in(no, ?MSG(3, ?LINE), Q2), + {_, Q4} = rabbit_fifo_q:out(Q3), + {_, Q5} = rabbit_fifo_q:out(Q4), + {_, Q6} = rabbit_fifo_q:out(Q5), + + ?assertEqual(undefined, rabbit_fifo_q:get_lowest_index(Q0)), + ?assertEqual(1, rabbit_fifo_q:get_lowest_index(Q1)), + ?assertEqual(1, rabbit_fifo_q:get_lowest_index(Q2)), + ?assertEqual(1, rabbit_fifo_q:get_lowest_index(Q3)), + ?assertEqual(2, rabbit_fifo_q:get_lowest_index(Q4)), + ?assertEqual(3, rabbit_fifo_q:get_lowest_index(Q5)), + ?assertEqual(undefined, rabbit_fifo_q:get_lowest_index(Q6)). + +-type op() :: {in, integer()} | out. + +single_priority_behaves_like_queue(_Config) -> + run_proper( + fun () -> + ?FORALL({P, Ops}, {oneof([hi, no]), op_gen(256)}, + queue_prop(P, Ops)) + end, [], 25), + ok. + +queue_prop(P, Ops) -> + % ct:pal("Running queue_prop for ~s", [Ops]), + Que = queue:new(), + Sut = rabbit_fifo_q:new(), + {Queue, FifoQ} = lists:foldl( + fun ({in, V}, {Q0, S0}) -> + Q = queue:in(V, Q0), + S = rabbit_fifo_q:in(P, V, S0), + case queue:len(Q) == rabbit_fifo_q:len(S) of + true -> + {Q, S}; + false -> + throw(false) + end; + (out, {Q0, S0}) -> + {V1, Q} = case queue:out(Q0) of + {{value, V0}, Q1} -> + {V0, Q1}; + Res0 -> + Res0 + end, + {V2, S} = case rabbit_fifo_q:out(S0) of + empty -> + {empty, S0}; + Res -> + Res + end, + case V1 == V2 of + true -> + {Q, S}; + false -> + ct:pal("V1 ~p, V2 ~p", [V1, V2]), + throw(false) + end + end, {Que, Sut}, Ops), + + queue:len(Queue) == rabbit_fifo_q:len(FifoQ). + + + + +%%% helpers + +op_gen(Size) -> + ?LET(Ops, + resize(Size, + list( + frequency( + [ + {20, {in, non_neg_integer()}}, + {20, out} + ] + ))), + begin + {_, Ops1} = lists:foldl( + fun ({in, I}, {Idx, Os}) -> + {Idx + 1, [{in, ?MSG(Idx, I)} | Os]}; + (out, {Idx, Os}) -> + {Idx + 1, [out | Os] } + end, {1, []}, Ops), + lists:reverse(Ops1) + end + ). + +run_proper(Fun, Args, NumTests) -> + ?assert( + proper:counterexample( + erlang:apply(Fun, Args), + [{numtests, NumTests}, + {on_output, fun(".", _) -> ok; % don't print the '.'s on new lines + (F, A) -> ct:pal(?LOW_IMPORTANCE, F, A) + end}])). diff --git a/deps/rabbit/test/rabbit_stream_queue_SUITE.erl b/deps/rabbit/test/rabbit_stream_queue_SUITE.erl index 3d09d901caf9..c8b2f8aabce9 100644 --- a/deps/rabbit/test/rabbit_stream_queue_SUITE.erl +++ b/deps/rabbit/test/rabbit_stream_queue_SUITE.erl @@ -34,6 +34,7 @@ all() -> {group, cluster_size_3}, {group, cluster_size_3_1}, {group, cluster_size_3_2}, + {group, cluster_size_3_3}, {group, cluster_size_3_parallel_1}, {group, cluster_size_3_parallel_2}, {group, cluster_size_3_parallel_3}, @@ -79,6 +80,7 @@ groups() -> {cluster_size_3_2, [], [recover, declare_with_node_down_1, declare_with_node_down_2]}, + {cluster_size_3_3, [], [consume_while_deleting_replica]}, {cluster_size_3_parallel_1, [parallel], [ delete_replica, delete_last_replica, @@ -207,6 +209,7 @@ init_per_group1(Group, Config) -> cluster_size_3_parallel_5 -> 3; cluster_size_3_1 -> 3; cluster_size_3_2 -> 3; + cluster_size_3_3 -> 3; unclustered_size_3_1 -> 3; unclustered_size_3_2 -> 3; unclustered_size_3_3 -> 3; @@ -224,15 +227,14 @@ init_per_group1(Group, Config) -> {rmq_nodename_suffix, Group}, {tcp_ports_base}, {rmq_nodes_clustered, Clustered}]), - Config1b = rabbit_ct_helpers:set_config(Config1, [{net_ticktime, 10}]), - Config1c = case Group of + Config1b = case Group of unclustered_size_3_4 -> rabbit_ct_helpers:merge_app_env( - Config1b, {rabbit, [{stream_tick_interval, 5000}]}); + Config1, {rabbit, [{stream_tick_interval, 5000}]}); _ -> - Config1b + Config1 end, - Ret = rabbit_ct_helpers:run_steps(Config1c, + Ret = rabbit_ct_helpers:run_steps(Config1b, [fun merge_app_env/1 ] ++ rabbit_ct_broker_helpers:setup_steps()), case Ret of @@ -1649,6 +1651,45 @@ consume_from_replica(Config) -> receive_batch(Ch2, 0, 99), rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_testcase_queue, [Q]). +consume_while_deleting_replica(Config) -> + [Server1, _, Server3] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + Ch1 = rabbit_ct_client_helpers:open_channel(Config, Server1), + Q = ?config(queue_name, Config), + + ?assertEqual({'queue.declare_ok', Q, 0, 0}, + declare(Config, Server1, Q, [{<<"x-queue-type">>, longstr, <<"stream">>}])), + + rabbit_ct_helpers:await_condition( + fun () -> + Info = find_queue_info(Config, 1, [online]), + length(proplists:get_value(online, Info)) == 3 + end), + + Ch2 = rabbit_ct_client_helpers:open_channel(Config, Server3), + qos(Ch2, 10, false), + + CTag = atom_to_binary(?FUNCTION_NAME), + subscribe(Ch2, Q, false, 0, CTag), + + %% Delete replica in node 3 + rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_stream_queue, + delete_replica, [<<"/">>, Q, Server3]), + + publish_confirm(Ch1, Q, [<<"msg1">> || _ <- lists:seq(1, 100)]), + + %% no messages should be received + receive + #'basic.cancel'{consumer_tag = CTag} -> + ok; + {_, #amqp_msg{}} -> + exit(unexpected_message) + after 30000 -> + exit(missing_consumer_cancel) + end, + + rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, delete_testcase_queue, [Q]). + consume_credit(Config) -> [Server | _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), diff --git a/deps/rabbit/test/rabbitmq_4_0_deprecations_SUITE.erl b/deps/rabbit/test/rabbitmq_4_0_deprecations_SUITE.erl index f0e05e580e0d..d5f5f147782a 100644 --- a/deps/rabbit/test/rabbitmq_4_0_deprecations_SUITE.erl +++ b/deps/rabbit/test/rabbitmq_4_0_deprecations_SUITE.erl @@ -75,9 +75,7 @@ groups() -> init_per_suite(Config) -> rabbit_ct_helpers:log_environment(), logger:set_primary_config(level, debug), - rabbit_ct_helpers:run_setup_steps( - Config, - [fun rabbit_ct_helpers:redirect_logger_to_ct_logs/1]). + rabbit_ct_helpers:run_setup_steps(Config, []). end_per_suite(Config) -> Config. diff --git a/deps/rabbit/test/single_active_consumer_SUITE.erl b/deps/rabbit/test/single_active_consumer_SUITE.erl index 6945d213b85a..ac682ad95712 100644 --- a/deps/rabbit/test/single_active_consumer_SUITE.erl +++ b/deps/rabbit/test/single_active_consumer_SUITE.erl @@ -11,13 +11,15 @@ -include_lib("eunit/include/eunit.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). +-compile(nowarn_export_all). -compile(export_all). -define(TIMEOUT, 30000). all() -> [ - {group, classic_queue}, {group, quorum_queue} + {group, classic_queue}, + {group, quorum_queue} ]. groups() -> diff --git a/deps/rabbit/test/unit_access_control_SUITE.erl b/deps/rabbit/test/unit_access_control_SUITE.erl index 3bab2d7bb416..4f8e2b44235b 100644 --- a/deps/rabbit/test/unit_access_control_SUITE.erl +++ b/deps/rabbit/test/unit_access_control_SUITE.erl @@ -282,31 +282,36 @@ version_negotiation(Config) -> ok = rabbit_ct_broker_helpers:rpc(Config, ?MODULE, version_negotiation1, [Config]). version_negotiation1(Config) -> - H = ?config(rmq_hostname, Config), - P = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp), + Hostname = ?config(rmq_hostname, Config), + Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp), - [?assertEqual(<<"AMQP",0,1,0,0>>, version_negotiation2(H, P, Vsn)) || + [?assertEqual(<<"AMQP",3,1,0,0>>, + version_negotiation2(Hostname, Port, Vsn)) || Vsn <- [<<"AMQP",0,1,0,0>>, <<"AMQP",0,1,0,1>>, <<"AMQP",0,1,1,0>>, <<"AMQP",0,9,1,0>>, <<"AMQP",0,0,8,0>>, - <<"XXXX",0,1,0,0>>, - <<"XXXX",0,0,9,1>>]], - - [?assertEqual(<<"AMQP",3,1,0,0>>, version_negotiation2(H, P, Vsn)) || - Vsn <- [<<"AMQP",1,1,0,0>>, + <<"AMQP",1,1,0,0>>, + <<"AMQP",2,1,0,0>>, + <<"AMQP",3,1,0,0>>, + <<"AMQP",3,1,0,1>>, + <<"AMQP",3,1,0,1>>, <<"AMQP",4,1,0,0>>, - <<"AMQP",9,1,0,0>>]], + <<"AMQP",9,1,0,0>>, + <<"XXXX",0,1,0,0>>, + <<"XXXX",0,0,9,1>> + ]], - [?assertEqual(<<"AMQP",0,0,9,1>>, version_negotiation2(H, P, Vsn)) || + [?assertEqual(<<"AMQP",0,0,9,1>>, + version_negotiation2(Hostname, Port, Vsn)) || Vsn <- [<<"AMQP",0,0,9,2>>, <<"AMQP",0,0,10,0>>, <<"AMQP",0,0,10,1>>]], ok. -version_negotiation2(H, P, Header) -> - {ok, C} = gen_tcp:connect(H, P, [binary, {active, false}]), +version_negotiation2(Hostname, Port, Header) -> + {ok, C} = gen_tcp:connect(Hostname, Port, [binary, {active, false}]), ok = gen_tcp:send(C, Header), {ok, ServerVersion} = gen_tcp:recv(C, 8, 100), ok = gen_tcp:close(C), diff --git a/deps/rabbit/test/unit_config_value_encryption_SUITE.erl b/deps/rabbit/test/unit_config_value_encryption_SUITE.erl index 72968c0b37ac..297da7493cbf 100644 --- a/deps/rabbit/test/unit_config_value_encryption_SUITE.erl +++ b/deps/rabbit/test/unit_config_value_encryption_SUITE.erl @@ -68,7 +68,10 @@ decrypt_config(_Config) -> ok. do_decrypt_config(Algo = {C, H, I, P}) -> - ok = application:load(rabbit), + case application:load(rabbit) of + ok -> ok; + {error, {already_loaded, rabbit}} -> ok + end, RabbitConfig = application:get_all_env(rabbit), %% Encrypt a few values in configuration. %% Common cases. diff --git a/deps/rabbit/test/unit_msg_size_metrics_SUITE.erl b/deps/rabbit/test/unit_msg_size_metrics_SUITE.erl new file mode 100644 index 000000000000..cd496932cd92 --- /dev/null +++ b/deps/rabbit/test/unit_msg_size_metrics_SUITE.erl @@ -0,0 +1,64 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(unit_msg_size_metrics_SUITE). + +-include_lib("stdlib/include/assert.hrl"). + +-compile([nowarn_export_all, export_all]). + +all() -> + [ + {group, tests} + ]. + +groups() -> + [ + {tests, [], + [ + prometheus_format + ]} + ]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown. +%% ------------------------------------------------------------------- + +init_per_suite(Config) -> + ok = rabbit_msg_size_metrics:init(fake_protocol), + Config. + +end_per_suite(Config) -> + ok = rabbit_msg_size_metrics:cleanup(fake_protocol), + Config. + +%% ------------------------------------------------------------------- +%% Testcases. +%% ------------------------------------------------------------------- + +prometheus_format(_Config) -> + MsgSizes = [1, 100, 1_000_000_000, 99_000_000, 15_000, 15_000], + [ok = rabbit_msg_size_metrics:observe(fake_protocol, MsgSize) || MsgSize <- MsgSizes], + + ?assertEqual( + #{message_size_bytes => + #{type => histogram, + help => "Size of messages received from publishers", + values => [{ + [{protocol, fake_protocol}], + [{100, 2}, + {1_000, 2}, + {10_000, 2}, + {100_000, 4}, + {1_000_000, 4}, + {10_000_000, 4}, + {50_000_000, 4}, + {100_000_000, 5}, + {infinity, 6}], + length(MsgSizes), + lists:sum(MsgSizes)}]}}, + rabbit_msg_size_metrics:prometheus_format()). diff --git a/deps/rabbit/test/unit_policy_validators_SUITE.erl b/deps/rabbit/test/unit_policy_validators_SUITE.erl index 89207caae97e..6b05404e2297 100644 --- a/deps/rabbit/test/unit_policy_validators_SUITE.erl +++ b/deps/rabbit/test/unit_policy_validators_SUITE.erl @@ -9,6 +9,7 @@ -include_lib("eunit/include/eunit.hrl"). +-compile(nowarn_export_all). -compile(export_all). all() -> @@ -93,7 +94,7 @@ max_in_memory_length(_Config) -> requires_non_negative_integer_value(<<"max-in-memory-bytes">>). delivery_limit(_Config) -> - requires_non_negative_integer_value(<<"delivery-limit">>). + requires_integer_value(<<"delivery-limit">>). classic_queue_lazy_mode(_Config) -> test_valid_and_invalid_values(<<"queue-mode">>, @@ -142,3 +143,8 @@ requires_non_negative_integer_value(Key) -> test_valid_and_invalid_values(Key, [0, 1, 1000], [-1000, -1, <<"a.binary">>]). + +requires_integer_value(Key) -> + test_valid_and_invalid_values(Key, + [-1, 0, 1, 1000, -10000], + [<<"a.binary">>, 0.1]). diff --git a/deps/rabbit/test/unit_quorum_queue_SUITE.erl b/deps/rabbit/test/unit_quorum_queue_SUITE.erl index be96bd612359..2f4a7e7133b6 100644 --- a/deps/rabbit/test/unit_quorum_queue_SUITE.erl +++ b/deps/rabbit/test/unit_quorum_queue_SUITE.erl @@ -3,13 +3,63 @@ -compile(nowarn_export_all). -compile(export_all). +-include_lib("eunit/include/eunit.hrl"). + all() -> [ all_replica_states_includes_nonvoters, filter_nonvoters, - filter_quorum_critical_accounts_nonvoters + filter_quorum_critical_accounts_nonvoters, + ra_machine_conf_delivery_limit ]. +ra_machine_conf_delivery_limit(_Config) -> + Q0 = amqqueue:new(rabbit_misc:r(<<"/">>, queue, <<"q2">>), + {q2, test@leader}, + false, false, none, [], undefined, #{}), + %% ensure default is set + ?assertMatch(#{delivery_limit := 20}, + rabbit_quorum_queue:ra_machine_config(Q0)), + + Q = amqqueue:set_policy(Q0, [{name, <<"p1">>}, + {definition, [{<<"delivery-limit">>,-1}]}]), + %% a policy of -1 + ?assertMatch(#{delivery_limit := -1}, + rabbit_quorum_queue:ra_machine_config(Q)), + + %% if therre is a queue arg with a non neg value this takes precedence + Q1 = amqqueue:set_arguments(Q, [{<<"x-delivery-limit">>, long, 5}]), + ?assertMatch(#{delivery_limit := 5}, + rabbit_quorum_queue:ra_machine_config(Q1)), + + Q2 = amqqueue:set_policy(Q1, [{name, <<"o1">>}, + {definition, [{<<"delivery-limit">>, 5}]}]), + Q3 = amqqueue:set_arguments(Q2, [{<<"x-delivery-limit">>, long, -1}]), + ?assertMatch(#{delivery_limit := 5}, + rabbit_quorum_queue:ra_machine_config(Q3)), + + %% non neg takes precedence + ?assertMatch(#{delivery_limit := 5}, + make_ra_machine_conf(Q0, -1, -1, 5)), + ?assertMatch(#{delivery_limit := 5}, + make_ra_machine_conf(Q0, -1, 5, -1)), + ?assertMatch(#{delivery_limit := 5}, + make_ra_machine_conf(Q0, 5, -1, -1)), + + ?assertMatch(#{delivery_limit := 5}, + make_ra_machine_conf(Q0, -1, 10, 5)), + ?assertMatch(#{delivery_limit := 5}, + make_ra_machine_conf(Q0, -1, 5, 10)), + ?assertMatch(#{delivery_limit := 5}, + make_ra_machine_conf(Q0, 5, 15, 10)), + ?assertMatch(#{delivery_limit := 5}, + make_ra_machine_conf(Q0, 15, 5, 10)), + ?assertMatch(#{delivery_limit := 5}, + make_ra_machine_conf(Q0, 15, 10, 5)), + + ok. + + filter_quorum_critical_accounts_nonvoters(_Config) -> Nodes = [test@leader, test@follower1, test@follower2], Qs0 = [amqqueue:new(rabbit_misc:r(<<"/">>, queue, <<"q1">>), @@ -69,3 +119,12 @@ all_replica_states_includes_nonvoters(_Config) -> true = ets:delete(ra_state), ok. + +make_ra_machine_conf(Q0, Arg, Pol, OpPol) -> + Q1 = amqqueue:set_arguments(Q0, [{<<"x-delivery-limit">>, long, Arg}]), + Q2 = amqqueue:set_policy(Q1, [{name, <<"p1">>}, + {definition, [{<<"delivery-limit">>,Pol}]}]), + Q = amqqueue:set_operator_policy(Q2, [{name, <<"p1">>}, + {definition, [{<<"delivery-limit">>,OpPol}]}]), + rabbit_quorum_queue:ra_machine_config(Q). + diff --git a/deps/rabbit/test/unit_vm_memory_monitor_SUITE.erl b/deps/rabbit/test/unit_vm_memory_monitor_SUITE.erl index 09e782018f53..83fb5c27ef70 100644 --- a/deps/rabbit/test/unit_vm_memory_monitor_SUITE.erl +++ b/deps/rabbit/test/unit_vm_memory_monitor_SUITE.erl @@ -118,4 +118,4 @@ set_and_verify_vm_memory_high_watermark_absolute(MemLimit0) -> _ -> ct:fail("Expected memory high watermark to be ~tp but it was ~tp", [Interpreted, MemLimit]) end, - vm_memory_monitor:set_vm_memory_high_watermark(0.4). \ No newline at end of file + vm_memory_monitor:set_vm_memory_high_watermark(0.6). diff --git a/deps/rabbit/test/upgrade_preparation_SUITE.erl b/deps/rabbit/test/upgrade_preparation_SUITE.erl index 29787ae8d524..54bb13483fa9 100644 --- a/deps/rabbit/test/upgrade_preparation_SUITE.erl +++ b/deps/rabbit/test/upgrade_preparation_SUITE.erl @@ -14,20 +14,16 @@ all() -> [ - {group, quorum_queue}, - {group, stream} + {group, clustered} ]. groups() -> [ - {quorum_queue, [], [ - await_quorum_plus_one_qq - ]}, - {stream, [], [ - await_quorum_plus_one_stream - ]}, - {stream_coordinator, [], [ - await_quorum_plus_one_stream_coordinator + {clustered, [], [ + await_quorum_plus_one_qq, + await_quorum_plus_one_stream, + await_quorum_plus_one_stream_coordinator, + await_quorum_plus_one_rabbitmq_metadata ]} ]. @@ -44,21 +40,14 @@ end_per_suite(Config) -> rabbit_ct_helpers:run_teardown_steps(Config). init_per_group(Group, Config) -> - case rabbit_ct_helpers:is_mixed_versions() of - true -> - %% in a 3.8/3.9 mixed cluster, ra will not cluster across versions, - %% so quorum plus one will not be achieved - {skip, "not mixed versions compatible"}; - _ -> - Config1 = rabbit_ct_helpers:set_config(Config, - [ - {rmq_nodes_count, 3}, - {rmq_nodename_suffix, Group} - ]), - rabbit_ct_helpers:run_steps(Config1, - rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps()) - end. + Config1 = rabbit_ct_helpers:set_config(Config, + [ + {rmq_nodes_count, 3}, + {rmq_nodename_suffix, Group} + ]), + rabbit_ct_helpers:run_steps(Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). end_per_group(_Group, Config) -> rabbit_ct_helpers:run_steps(Config, @@ -66,9 +55,15 @@ end_per_group(_Group, Config) -> rabbit_ct_broker_helpers:teardown_steps()). -init_per_testcase(TestCase, Config) -> - rabbit_ct_helpers:testcase_started(Config, TestCase), - Config. +init_per_testcase(Testcase, Config) when Testcase == await_quorum_plus_one_rabbitmq_metadata -> + case rabbit_ct_helpers:is_mixed_versions() of + true -> + {skip, "not mixed versions compatible"}; + _ -> + rabbit_ct_helpers:testcase_started(Config, Testcase) + end; +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase). end_per_testcase(TestCase, Config) -> rabbit_ct_helpers:testcase_finished(Config, TestCase). @@ -120,12 +115,24 @@ await_quorum_plus_one_stream_coordinator(Config) -> %% no queues/streams beyond this point ok = rabbit_ct_broker_helpers:stop_node(Config, B), - %% this should fail because the corrdinator has only 2 running nodes + %% this should fail because the coordinator has only 2 running nodes ?assertNot(await_quorum_plus_one(Config, 0)), ok = rabbit_ct_broker_helpers:start_node(Config, B), ?assert(await_quorum_plus_one(Config, 0)). +await_quorum_plus_one_rabbitmq_metadata(Config) -> + Nodes = [A, B, _C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + ok = rabbit_ct_broker_helpers:enable_feature_flag(Config, Nodes, khepri_db), + ?assert(await_quorum_plus_one(Config, A)), + + ok = rabbit_ct_broker_helpers:stop_node(Config, B), + %% this should fail because rabbitmq_metadata has only 2 running nodes + ?assertNot(await_quorum_plus_one(Config, A)), + + ok = rabbit_ct_broker_helpers:start_node(Config, B), + ?assert(await_quorum_plus_one(Config, A)). + %% %% Implementation %% diff --git a/deps/rabbit_common/Makefile b/deps/rabbit_common/Makefile index f4a56200f693..857cee1ade5d 100644 --- a/deps/rabbit_common/Makefile +++ b/deps/rabbit_common/Makefile @@ -38,12 +38,10 @@ DEPS = thoas ranch recon credentials_obfuscation -include development.pre.mk -DEP_EARLY_PLUGINS = $(PROJECT)/mk/rabbitmq-early-test.mk +DEP_EARLY_PLUGINS = $(PROJECT)/mk/rabbitmq-early-plugin.mk +# We do not depend on rabbit therefore can't run the broker. DEP_PLUGINS = $(PROJECT)/mk/rabbitmq-build.mk \ - $(PROJECT)/mk/rabbitmq-hexpm.mk \ - $(PROJECT)/mk/rabbitmq-dist.mk \ - $(PROJECT)/mk/rabbitmq-test.mk \ - $(PROJECT)/mk/rabbitmq-tools.mk + $(PROJECT)/mk/rabbitmq-hexpm.mk PLT_APPS += mnesia crypto ssl @@ -54,9 +52,7 @@ HEX_TARBALL_FILES += rabbitmq-components.mk \ git-revisions.txt \ mk/rabbitmq-build.mk \ mk/rabbitmq-dist.mk \ - mk/rabbitmq-early-test.mk \ - mk/rabbitmq-hexpm.mk \ - mk/rabbitmq-test.mk \ - mk/rabbitmq-tools.mk + mk/rabbitmq-early-plugin.mk \ + mk/rabbitmq-hexpm.mk -include development.post.mk diff --git a/deps/rabbit_common/mk/rabbitmq-build.mk b/deps/rabbit_common/mk/rabbitmq-build.mk index 010045f5c37a..93d9613c17ce 100644 --- a/deps/rabbit_common/mk/rabbitmq-build.mk +++ b/deps/rabbit_common/mk/rabbitmq-build.mk @@ -8,9 +8,10 @@ TEST_ERLC_OPTS += +nowarn_export_all -ifneq ($(filter-out rabbit_common amqp_client,$(PROJECT)),) +ifneq ($(filter rabbitmq_cli,$(BUILD_DEPS) $(DEPS)),) # Add the CLI ebin directory to the code path for the compiler: plugin # CLI extensions may access behaviour modules defined in this directory. + RMQ_ERLC_OPTS += -pa $(DEPS_DIR)/rabbitmq_cli/_build/dev/lib/rabbitmqctl/ebin endif diff --git a/deps/rabbit_common/mk/rabbitmq-components.hexpm.mk b/deps/rabbit_common/mk/rabbitmq-components.hexpm.mk index faf75872024e..e9a1ac0db080 100644 --- a/deps/rabbit_common/mk/rabbitmq-components.hexpm.mk +++ b/deps/rabbit_common/mk/rabbitmq-components.hexpm.mk @@ -6,25 +6,30 @@ ifeq ($(.DEFAULT_GOAL),) endif # PROJECT_VERSION defaults to: -# 1. the version exported by rabbitmq-server-release; +# 1. the version exported by environment; # 2. the version stored in `git-revisions.txt`, if it exists; # 3. a version based on git-describe(1), if it is a Git clone; # 4. 0.0.0 +# +# Note that in the case where git-describe(1) is used +# (e.g. during development), running "git gc" may help +# improve the performance. PROJECT_VERSION := $(RABBITMQ_VERSION) ifeq ($(PROJECT_VERSION),) -PROJECT_VERSION := $(shell \ -if test -f git-revisions.txt; then \ +ifneq ($(wildcard git-revisions.txt),) +PROJECT_VERSION = $(shell \ head -n1 git-revisions.txt | \ - awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \ -else \ + awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}') +else +PROJECT_VERSION = $(shell \ (git describe --dirty --abbrev=7 --tags --always --first-parent \ - 2>/dev/null || echo rabbitmq_v0_0_0) | \ - sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \ - -e 's/-/./g'; \ -fi) + 2>/dev/null || echo 0.0.0) | \ + sed -e 's/^v//' -e 's/_/./g' -e 's/-/+/' -e 's/-/./g') endif +endif + # -------------------------------------------------------------------- # RabbitMQ components. diff --git a/deps/rabbit_common/mk/rabbitmq-dist.mk b/deps/rabbit_common/mk/rabbitmq-dist.mk index 1d0254452fec..f55fe1ef08ea 100644 --- a/deps/rabbit_common/mk/rabbitmq-dist.mk +++ b/deps/rabbit_common/mk/rabbitmq-dist.mk @@ -212,7 +212,10 @@ CLI_ESCRIPTS_LOCK = $(CLI_ESCRIPTS_DIR).lock ifeq ($(MAKELEVEL),0) ifneq ($(filter-out rabbit_common amqp10_common rabbitmq_stream_common,$(PROJECT)),) +# These do not depend on 'rabbit' as DEPS but may as TEST_DEPS. +ifneq ($(filter-out amqp_client amqp10_client rabbitmq_amqp_client rabbitmq_ct_helpers,$(PROJECT)),) app:: install-cli +endif test-build:: install-cli endif endif diff --git a/deps/rabbit_common/mk/rabbitmq-early-plugin.mk b/deps/rabbit_common/mk/rabbitmq-early-plugin.mk index 7b5f14b8f912..1b8aaa3f422a 100644 --- a/deps/rabbit_common/mk/rabbitmq-early-plugin.mk +++ b/deps/rabbit_common/mk/rabbitmq-early-plugin.mk @@ -1,3 +1,65 @@ -ifeq ($(filter rabbitmq-early-test.mk,$(notdir $(MAKEFILE_LIST))),) -include $(dir $(lastword $(MAKEFILE_LIST)))rabbitmq-early-test.mk +# -------------------------------------------------------------------- +# dialyzer +# -------------------------------------------------------------------- + +DIALYZER_OPTS ?= -Werror_handling -Wunmatched_returns -Wunknown + +dialyze: ERL_LIBS = $(APPS_DIR):$(DEPS_DIR):$(DEPS_DIR)/rabbitmq_cli/_build/dev/lib:$(dir $(shell elixir --eval ":io.format '~s~n', [:code.lib_dir :elixir ]")) + +# -------------------------------------------------------------------- +# Common Test flags. +# -------------------------------------------------------------------- + +ifneq ($(PROJECT),rabbitmq_server_release) +CT_LOGS_DIR = $(abspath $(CURDIR)/../../logs) +endif + +# We start the common_test node as a hidden Erlang node. The benefit +# is that other Erlang nodes won't try to connect to each other after +# discovering the common_test node if they are not meant to. +# +# This helps when several unrelated RabbitMQ clusters are started in +# parallel. + +CT_OPTS += -hidden + +# We set a low tick time to deal with distribution failures quicker. + +CT_OPTS += -kernel net_ticktime 5 + +# Enable the following common_test hooks on GH and Concourse: +# +# cth_fail_fast +# This hook will make sure the first failure puts an end to the +# testsuites; ie. all remaining tests are skipped. +# +# cth_styledout +# This hook will change the output of common_test to something more +# concise and colored. + +CT_HOOKS ?= cth_styledout +TEST_DEPS += cth_styledout + +ifdef CONCOURSE +FAIL_FAST = 1 +SKIP_AS_ERROR = 1 +endif + +RMQ_CI_CT_HOOKS = cth_fail_fast +ifeq ($(FAIL_FAST),1) +CT_HOOKS += $(RMQ_CI_CT_HOOKS) +TEST_DEPS += $(RMQ_CI_CT_HOOKS) +endif + +dep_cth_fail_fast = git https://github.com/rabbitmq/cth_fail_fast.git master +dep_cth_styledout = git https://github.com/rabbitmq/cth_styledout.git master + +CT_HOOKS_PARAM_VALUE = $(patsubst %,and %,$(CT_HOOKS)) +CT_OPTS += -ct_hooks $(wordlist 2,$(words $(CT_HOOKS_PARAM_VALUE)),$(CT_HOOKS_PARAM_VALUE)) + +# On CI, set $RABBITMQ_CT_SKIP_AS_ERROR so that any skipped +# testsuite/testgroup/testcase is considered an error. + +ifeq ($(SKIP_AS_ERROR),1) +export RABBITMQ_CT_SKIP_AS_ERROR = true endif diff --git a/deps/rabbit_common/mk/rabbitmq-early-test.mk b/deps/rabbit_common/mk/rabbitmq-early-test.mk deleted file mode 100644 index 3779bd4a2fe7..000000000000 --- a/deps/rabbit_common/mk/rabbitmq-early-test.mk +++ /dev/null @@ -1,72 +0,0 @@ -# -------------------------------------------------------------------- -# dialyzer -# -------------------------------------------------------------------- - -DIALYZER_OPTS ?= -Werror_handling -Wunmatched_returns -Wunknown - -dialyze: ERL_LIBS = $(APPS_DIR):$(DEPS_DIR):$(DEPS_DIR)/rabbitmq_cli/_build/dev/lib:$(dir $(shell elixir --eval ":io.format '~s~n', [:code.lib_dir :elixir ]")) - -# -------------------------------------------------------------------- -# %-on-concourse dependencies. -# -------------------------------------------------------------------- - -ifneq ($(words $(filter %-on-concourse,$(MAKECMDGOALS))),0) -TEST_DEPS += ci $(RMQ_CI_CT_HOOKS) -NO_AUTOPATCH += ci $(RMQ_CI_CT_HOOKS) -dep_ci = git git@github.com:rabbitmq/rabbitmq-ci.git main -endif - -# -------------------------------------------------------------------- -# Common Test flags. -# -------------------------------------------------------------------- - -# We start the common_test node as a hidden Erlang node. The benefit -# is that other Erlang nodes won't try to connect to each other after -# discovering the common_test node if they are not meant to. -# -# This helps when several unrelated RabbitMQ clusters are started in -# parallel. - -CT_OPTS += -hidden - -# Enable the following common_test hooks on GH and Concourse: -# -# cth_fail_fast -# This hook will make sure the first failure puts an end to the -# testsuites; ie. all remaining tests are skipped. -# -# cth_styledout -# This hook will change the output of common_test to something more -# concise and colored. -# -# On Jenkins, in addition to those common_test hooks, enable JUnit-like -# report. Jenkins parses those reports so the results can be browsed -# from its UI. Furthermore, it displays a graph showing evolution of the -# results over time. - -CT_HOOKS ?= cth_styledout -TEST_DEPS += cth_styledout - -ifdef CONCOURSE -FAIL_FAST = 1 -SKIP_AS_ERROR = 1 -endif - -RMQ_CI_CT_HOOKS = cth_fail_fast -ifeq ($(FAIL_FAST),1) -CT_HOOKS += $(RMQ_CI_CT_HOOKS) -TEST_DEPS += $(RMQ_CI_CT_HOOKS) -endif - -dep_cth_fail_fast = git https://github.com/rabbitmq/cth_fail_fast.git master -dep_cth_styledout = git https://github.com/rabbitmq/cth_styledout.git master - -CT_HOOKS_PARAM_VALUE = $(patsubst %,and %,$(CT_HOOKS)) -CT_OPTS += -ct_hooks $(wordlist 2,$(words $(CT_HOOKS_PARAM_VALUE)),$(CT_HOOKS_PARAM_VALUE)) - -# On CI, set $RABBITMQ_CT_SKIP_AS_ERROR so that any skipped -# testsuite/testgroup/testcase is considered an error. - -ifeq ($(SKIP_AS_ERROR),1) -export RABBITMQ_CT_SKIP_AS_ERROR = true -endif diff --git a/deps/rabbit_common/mk/rabbitmq-hexpm.mk b/deps/rabbit_common/mk/rabbitmq-hexpm.mk index 4f314249bdf5..c4c62fdfa865 100644 --- a/deps/rabbit_common/mk/rabbitmq-hexpm.mk +++ b/deps/rabbit_common/mk/rabbitmq-hexpm.mk @@ -1,5 +1,8 @@ # -------------------------------------------------------------------- # Hex.pm. +# +# This Erlang.mk plugin should only be included by +# applications that produce an Hex.pm release. # -------------------------------------------------------------------- .PHONY: hex-publish hex-publish-docs diff --git a/deps/rabbit_common/mk/rabbitmq-plugin.mk b/deps/rabbit_common/mk/rabbitmq-plugin.mk index 29064a9a4f94..fd47b8beec21 100644 --- a/deps/rabbit_common/mk/rabbitmq-plugin.mk +++ b/deps/rabbit_common/mk/rabbitmq-plugin.mk @@ -2,10 +2,6 @@ ifeq ($(filter rabbitmq-build.mk,$(notdir $(MAKEFILE_LIST))),) include $(dir $(lastword $(MAKEFILE_LIST)))rabbitmq-build.mk endif -ifeq ($(filter rabbitmq-hexpm.mk,$(notdir $(MAKEFILE_LIST))),) -include $(dir $(lastword $(MAKEFILE_LIST)))rabbitmq-hexpm.mk -endif - ifeq ($(filter rabbitmq-dist.mk,$(notdir $(MAKEFILE_LIST))),) include $(dir $(lastword $(MAKEFILE_LIST)))rabbitmq-dist.mk endif @@ -13,11 +9,3 @@ endif ifeq ($(filter rabbitmq-run.mk,$(notdir $(MAKEFILE_LIST))),) include $(dir $(lastword $(MAKEFILE_LIST)))rabbitmq-run.mk endif - -ifeq ($(filter rabbitmq-test.mk,$(notdir $(MAKEFILE_LIST))),) -include $(dir $(lastword $(MAKEFILE_LIST)))rabbitmq-test.mk -endif - -ifeq ($(filter rabbitmq-tools.mk,$(notdir $(MAKEFILE_LIST))),) -include $(dir $(lastword $(MAKEFILE_LIST)))rabbitmq-tools.mk -endif diff --git a/deps/rabbit_common/mk/rabbitmq-run.mk b/deps/rabbit_common/mk/rabbitmq-run.mk index c7c322110897..b3f7a3e998f9 100644 --- a/deps/rabbit_common/mk/rabbitmq-run.mk +++ b/deps/rabbit_common/mk/rabbitmq-run.mk @@ -115,7 +115,7 @@ RABBITMQ_STREAM_DIR="$(call node_stream_dir,$(2))" \ RABBITMQ_FEATURE_FLAGS_FILE="$(call node_feature_flags_file,$(2))" \ RABBITMQ_PLUGINS_DIR="$(call node_plugins_dir)" \ RABBITMQ_PLUGINS_EXPAND_DIR="$(call node_plugins_expand_dir,$(2))" \ -RABBITMQ_SERVER_START_ARGS="-ra wal_sync_method sync $(RABBITMQ_SERVER_START_ARGS)" \ +RABBITMQ_SERVER_START_ARGS="$(RABBITMQ_SERVER_START_ARGS)" \ RABBITMQ_ENABLED_PLUGINS="$(RABBITMQ_ENABLED_PLUGINS)" endef @@ -189,8 +189,7 @@ $(if $(RABBITMQ_NODE_PORT), {tcp_listeners$(comma) [$(shell echo "$$((5552 $(if $(RABBITMQ_NODE_PORT), {tcp_config$(comma) [{port$(comma) $(shell echo "$$((15692 + $(RABBITMQ_NODE_PORT) - 5672))")}]},) ]}, {ra, [ - {data_dir, "$(RABBITMQ_QUORUM_DIR)"}, - {wal_sync_method, sync} + {data_dir, "$(RABBITMQ_QUORUM_DIR)"} ]}, {osiris, [ {data_dir, "$(RABBITMQ_STREAM_DIR)"} @@ -227,8 +226,7 @@ define test_rabbitmq_config_with_tls ]} ]}, {ra, [ - {data_dir, "$(RABBITMQ_QUORUM_DIR)"}, - {wal_sync_method, sync} + {data_dir, "$(RABBITMQ_QUORUM_DIR)"} ]}, {osiris, [ {data_dir, "$(RABBITMQ_STREAM_DIR)"} diff --git a/deps/rabbit_common/mk/rabbitmq-test.mk b/deps/rabbit_common/mk/rabbitmq-test.mk deleted file mode 100644 index 16cf2dc8f6bc..000000000000 --- a/deps/rabbit_common/mk/rabbitmq-test.mk +++ /dev/null @@ -1,66 +0,0 @@ -.PHONY: ct-slow ct-fast - -ct-slow ct-fast: - $(MAKE) ct CT_SUITES='$(CT_SUITES)' - -# -------------------------------------------------------------------- -# Helpers to run Make targets on Concourse. -# -------------------------------------------------------------------- - -FLY ?= fly -FLY_TARGET ?= $(shell $(FLY) targets | awk '/ci\.rabbitmq\.com/ { print $$1; }') - -CONCOURSE_TASK = $(ERLANG_MK_TMP)/concourse-task.yaml - -CI_DIR ?= $(DEPS_DIR)/ci -PIPELINE_DIR = $(CI_DIR)/server-release -BRANCH_RELEASE = $(shell "$(PIPELINE_DIR)/scripts/map-branch-to-release.sh" "$(base_rmq_ref)") -PIPELINE_DATA = $(PIPELINE_DIR)/release-data-$(BRANCH_RELEASE).yaml -REPOSITORY_NAME = $(shell "$(PIPELINE_DIR)/scripts/map-erlang-app-and-repository-name.sh" "$(PIPELINE_DATA)" "$(PROJECT)") - -CONCOURSE_PLATFORM ?= linux -ERLANG_VERSION ?= $(shell "$(PIPELINE_DIR)/scripts/list-erlang-versions.sh" "$(PIPELINE_DATA)" | head -n 1) -TASK_INPUTS = $(shell "$(PIPELINE_DIR)/scripts/list-task-inputs.sh" "$(CONCOURSE_TASK)") - -.PHONY: $(CONCOURSE_TASK) -$(CONCOURSE_TASK): $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) - $(gen_verbose) echo 'platform: $(CONCOURSE_PLATFORM)' > "$@" - $(verbose) echo 'inputs:' >> "$@" - $(verbose) echo ' - name: $(PROJECT)' >> "$@" - $(verbose) cat $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) | while read -r file; do \ - echo " - name: $$(basename "$$file")" >> "$@"; \ - done - $(verbose) echo 'outputs:' >> "$@" - $(verbose) echo ' - name: test-output' >> "$@" -ifeq ($(CONCOURSE_PLATFORM),linux) - $(verbose) echo 'image_resource:' >> "$@" - $(verbose) echo ' type: docker-image' >> "$@" - $(verbose) echo ' source:' >> "$@" - $(verbose) echo ' repository: pivotalrabbitmq/rabbitmq-server-buildenv' >> "$@" - $(verbose) echo ' tag: linux-erlang-$(ERLANG_VERSION)' >> "$@" -endif - $(verbose) echo 'run:' >> "$@" - $(verbose) echo ' path: ci/server-release/scripts/test-erlang-app.sh' >> "$@" - $(verbose) echo ' args:' >> "$@" - $(verbose) echo " - $(PROJECT)" >> "$@" -# This section must be the last because the `%-on-concourse` target -# appends other variables. - $(verbose) echo 'params:' >> "$@" -ifdef V - $(verbose) echo ' V: "$(V)"' >> "$@" -endif -ifdef t - $(verbose) echo ' t: "$(t)"' >> "$@" -endif - -%-on-concourse: $(CONCOURSE_TASK) - $(verbose) test -d "$(PIPELINE_DIR)" - $(verbose) echo ' MAKE_TARGET: "$*"' >> "$(CONCOURSE_TASK)" - $(FLY) -t $(FLY_TARGET) execute \ - --config="$(CONCOURSE_TASK)" \ - $(foreach input,$(TASK_INPUTS), \ - $(if $(filter $(PROJECT),$(input)), \ - --input="$(input)=.", \ - --input="$(input)=$(DEPS_DIR)/$(input)")) \ - --output="test-output=$(CT_LOGS_DIR)/on-concourse" - $(verbose) rm -f "$(CT_LOGS_DIR)/on-concourse/filename" diff --git a/deps/rabbit_common/mk/rabbitmq-tools.mk b/deps/rabbit_common/mk/rabbitmq-tools.mk deleted file mode 100644 index 0e5ca370a8e4..000000000000 --- a/deps/rabbit_common/mk/rabbitmq-tools.mk +++ /dev/null @@ -1,300 +0,0 @@ -ifeq ($(PLATFORM),msys2) -HOSTNAME = $(COMPUTERNAME) -else -ifeq ($(PLATFORM),solaris) -HOSTNAME = $(shell hostname | sed 's@\..*@@') -else -HOSTNAME = $(shell hostname -s) -endif -endif - -READY_DEPS = $(foreach DEP,\ - $(filter $(RABBITMQ_COMPONENTS),$(DEPS) $(BUILD_DEPS) $(TEST_DEPS)), \ - $(if $(wildcard $(DEPS_DIR)/$(DEP)),$(DEP),)) - -RELEASED_RMQ_DEPS = $(filter $(RABBITMQ_COMPONENTS),$(DEPS) $(BUILD_DEPS)) - -.PHONY: show-upstream-git-fetch-url show-upstream-git-push-url \ - show-current-git-fetch-url show-current-git-push-url - -show-upstream-git-fetch-url: - @echo $(RABBITMQ_UPSTREAM_FETCH_URL) - -show-upstream-git-push-url: - @echo $(RABBITMQ_UPSTREAM_PUSH_URL) - -show-current-git-fetch-url: - @echo $(RABBITMQ_CURRENT_FETCH_URL) - -show-current-git-push-url: - @echo $(RABBITMQ_CURRENT_PUSH_URL) - -update-contributor-code-of-conduct: - $(verbose) for repo in $(READY_DEPS:%=$(DEPS_DIR)/%); do \ - cp $(DEPS_DIR)/rabbit_common/CODE_OF_CONDUCT.md $$repo/CODE_OF_CONDUCT.md; \ - cp $(DEPS_DIR)/rabbit_common/CONTRIBUTING.md $$repo/CONTRIBUTING.md; \ - done - -ifneq ($(wildcard .git),) - -.PHONY: sync-gitremote sync-gituser - -sync-gitremote: $(READY_DEPS:%=$(DEPS_DIR)/%+sync-gitremote) - @: - -%+sync-gitremote: - $(exec_verbose) cd $* && \ - git remote set-url origin \ - '$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(notdir $*))' - $(verbose) cd $* && \ - git remote set-url --push origin \ - '$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(notdir $*))' - -ifeq ($(origin, RMQ_GIT_GLOBAL_USER_NAME),undefined) -RMQ_GIT_GLOBAL_USER_NAME := $(shell git config --global user.name) -export RMQ_GIT_GLOBAL_USER_NAME -endif -ifeq ($(origin RMQ_GIT_GLOBAL_USER_EMAIL),undefined) -RMQ_GIT_GLOBAL_USER_EMAIL := $(shell git config --global user.email) -export RMQ_GIT_GLOBAL_USER_EMAIL -endif -ifeq ($(origin RMQ_GIT_USER_NAME),undefined) -RMQ_GIT_USER_NAME := $(shell git config user.name) -export RMQ_GIT_USER_NAME -endif -ifeq ($(origin RMQ_GIT_USER_EMAIL),undefined) -RMQ_GIT_USER_EMAIL := $(shell git config user.email) -export RMQ_GIT_USER_EMAIL -endif - -sync-gituser: $(READY_DEPS:%=$(DEPS_DIR)/%+sync-gituser) - @: - -%+sync-gituser: -ifeq ($(RMQ_GIT_USER_NAME),$(RMQ_GIT_GLOBAL_USER_NAME)) - $(exec_verbose) cd $* && git config --unset user.name || : -else - $(exec_verbose) cd $* && git config user.name "$(RMQ_GIT_USER_NAME)" -endif -ifeq ($(RMQ_GIT_USER_EMAIL),$(RMQ_GIT_GLOBAL_USER_EMAIL)) - $(verbose) cd $* && git config --unset user.email || : -else - $(verbose) cd $* && git config user.email "$(RMQ_GIT_USER_EMAIL)" -endif - -.PHONY: sync-gitignore-from-main -sync-gitignore-from-main: $(READY_DEPS:%=$(DEPS_DIR)/%+sync-gitignore-from-main) - -%+sync-gitignore-from-main: - $(gen_verbose) cd $* && \ - if test -d .git; then \ - branch=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}'); \ - ! test "$$branch" = 'main' || exit 0; \ - git show origin/main:.gitignore > .gitignore; \ - fi -ifeq ($(DO_COMMIT),yes) - $(verbose) cd $* && \ - if test -d .git; then \ - git diff --quiet .gitignore \ - || git commit -m 'Git: Sync .gitignore from main' .gitignore; \ - fi -endif - -.PHONY: show-branch - -show-branch: $(READY_DEPS:%=$(DEPS_DIR)/%+show-branch) - $(verbose) printf '%-34s %s\n' $(PROJECT): "$$(git symbolic-ref -q --short HEAD || git describe --tags --exact-match)" - -%+show-branch: - $(verbose) printf '%-34s %s\n' $(notdir $*): "$$(cd $* && (git symbolic-ref -q --short HEAD || git describe --tags --exact-match))" - -SINCE_TAG ?= last-release -COMMITS_LOG_OPTS ?= --oneline --decorate --no-merges -MARKDOWN ?= no - -define show_commits_since_tag -set -e; \ -if test "$1"; then \ - erlang_app=$(notdir $1); \ - repository=$(call rmq_cmp_repo_name,$(notdir $1)); \ - git_dir=-C\ "$1"; \ -else \ - erlang_app=$(PROJECT); \ - repository=$(call rmq_cmp_repo_name,$(PROJECT)); \ -fi; \ -case "$(SINCE_TAG)" in \ -last-release) \ - tags_count=$$(git $$git_dir tag -l 2>/dev/null | grep -E -v '(-beta|_milestone|[-_]rc)' | wc -l); \ - ;; \ -*) \ - tags_count=$$(git $$git_dir tag -l 2>/dev/null | wc -l); \ - ;; \ -esac; \ -if test "$$tags_count" -gt 0; then \ - case "$(SINCE_TAG)" in \ - last-release) \ - ref=$$(git $$git_dir describe --abbrev=0 --tags \ - --exclude "*-beta*" \ - --exclude "*_milestone*" \ - --exclude "*[-_]rc*"); \ - ;; \ - last-prerelease) \ - ref=$$(git $$git_dir describe --abbrev=0 --tags); \ - ;; \ - *) \ - git $$git_dir rev-parse "$(SINCE_TAG)" -- >/dev/null; \ - ref=$(SINCE_TAG); \ - ;; \ - esac; \ - commits_count=$$(git $$git_dir log --oneline "$$ref.." | wc -l); \ - if test "$$commits_count" -gt 0; then \ - if test "$(MARKDOWN)" = yes; then \ - printf "\n## [\`$$repository\`](https://github.com/rabbitmq/$$repository)\n\nCommits since \`$$ref\`:\n\n"; \ - git $$git_dir --no-pager log $(COMMITS_LOG_OPTS) \ - --format="format:* %s ([\`%h\`](https://github.com/rabbitmq/$$repository/commit/%H))" \ - "$$ref.."; \ - echo; \ - else \ - echo; \ - echo "# $$repository - Commits since $$ref"; \ - git $$git_dir log $(COMMITS_LOG_OPTS) "$$ref.."; \ - fi; \ - fi; \ -else \ - if test "$(MARKDOWN)" = yes; then \ - printf "\n## [\`$$repository\`](https://github.com/rabbitmq/$$repository)\n\n**New** since the last release!\n"; \ - else \ - echo; \ - echo "# $$repository - New since the last release!"; \ - fi; \ -fi -endef - -.PHONY: commits-since-release - -commits-since-release: commits-since-release-title \ - $(RELEASED_RMQ_DEPS:%=$(DEPS_DIR)/%+commits-since-release) - $(verbose) $(call show_commits_since_tag) - -commits-since-release-title: - $(verbose) set -e; \ - case "$(SINCE_TAG)" in \ - last-release) \ - tags_count=$$(git $$git_dir tag -l 2>/dev/null | grep -E -v '(-beta|_milestone|[-_]rc)' | wc -l); \ - ;; \ - *) \ - tags_count=$$(git $$git_dir tag -l 2>/dev/null | wc -l); \ - ;; \ - esac; \ - if test "$$tags_count" -gt 0; then \ - case "$(SINCE_TAG)" in \ - last-release) \ - ref=$$(git $$git_dir describe --abbrev=0 --tags \ - --exclude "*-beta*" \ - --exclude "*_milestone*" \ - --exclude "*[-_]rc*"); \ - ;; \ - last-prerelease) \ - ref=$$(git $$git_dir describe --abbrev=0 --tags); \ - ;; \ - *) \ - ref=$(SINCE_TAG); \ - ;; \ - esac; \ - version=$$(echo "$$ref" | sed -E \ - -e 's/rabbitmq_v([0-9]+)_([0-9]+)_([0-9]+)/v\1.\2.\3/' \ - -e 's/_milestone/-beta./' \ - -e 's/_rc/-rc./' \ - -e 's/^v//'); \ - echo "# Changes since RabbitMQ $$version"; \ - else \ - echo "# Changes since the beginning of time"; \ - fi - -%+commits-since-release: - $(verbose) $(call show_commits_since_tag,$*) - -endif # ($(wildcard .git),) - -# -------------------------------------------------------------------- -# erlang.mk query-deps* formatting. -# -------------------------------------------------------------------- - -# We need to provide a repo mapping for deps resolved via git_rmq fetch method -query_repo_git_rmq = https://github.com/rabbitmq/$(call rmq_cmp_repo_name,$(1)) - -# -------------------------------------------------------------------- -# Common test logs compression. -# -------------------------------------------------------------------- - -.PHONY: ct-logs-archive clean-ct-logs-archive - -ifneq ($(wildcard logs/*),) -TAR := tar -ifeq ($(PLATFORM),freebsd) -TAR := gtar -endif -ifeq ($(PLATFORM),darwin) -TAR := gtar -endif - -CT_LOGS_ARCHIVE ?= $(PROJECT)-ct-logs-$(subst _,-,$(subst -,,$(subst .,,$(patsubst ct_run.ct_$(PROJECT)@$(HOSTNAME).%,%,$(notdir $(lastword $(wildcard logs/ct_run.*))))))).tar.xz - -ifeq ($(patsubst %.tar.xz,%,$(CT_LOGS_ARCHIVE)),$(CT_LOGS_ARCHIVE)) -$(error CT_LOGS_ARCHIVE file must use '.tar.xz' as its filename extension) -endif - -ct-logs-archive: $(CT_LOGS_ARCHIVE) - @: - -$(CT_LOGS_ARCHIVE): - $(gen_verbose) \ - for file in logs/*; do \ - ! test -L "$$file" || rm "$$file"; \ - done - $(verbose) \ - $(TAR) -c \ - --exclude "*/mnesia" \ - --transform "s/^logs/$(patsubst %.tar.xz,%,$(notdir $(CT_LOGS_ARCHIVE)))/" \ - -f - logs | \ - xz > "$@" -else -ct-logs-archive: - @: -endif - -clean-ct-logs-archive:: - $(gen_verbose) rm -f $(PROJECT)-ct-logs-*.tar.xz - -clean:: clean-ct-logs-archive - -# -------------------------------------------------------------------- -# Generate a file listing RabbitMQ component dependencies and their -# Git commit hash. -# -------------------------------------------------------------------- - -.PHONY: rabbitmq-deps.mk clean-rabbitmq-deps.mk - -rabbitmq-deps.mk: $(PROJECT)-rabbitmq-deps.mk - @: - -closing_paren := ) - -define rmq_deps_mk_line -dep_$(1) := git $(dir $(RABBITMQ_UPSTREAM_FETCH_URL))$(call rmq_cmp_repo_name,$(1)).git $$(git -C "$(2)" rev-parse HEAD) -endef - -$(PROJECT)-rabbitmq-deps.mk: $(ERLANG_MK_RECURSIVE_DEPS_LIST) - $(gen_verbose) echo "# In $(PROJECT) - commit $$(git rev-parse HEAD)" > $@ - $(verbose) cat $(ERLANG_MK_RECURSIVE_DEPS_LIST) | \ - while read -r dir; do \ - component=$$(basename "$$dir"); \ - case "$$component" in \ - $(foreach component,$(RABBITMQ_COMPONENTS),$(component)$(closing_paren) echo "$(call rmq_deps_mk_line,$(component),$$dir)" ;;) \ - esac; \ - done >> $@ - -clean:: clean-rabbitmq-deps.mk - -clean-rabbitmq-deps.mk: - $(gen_verbose) rm -f $(PROJECT)-rabbitmq-deps.mk diff --git a/deps/rabbit_common/src/rabbit_core_metrics.erl b/deps/rabbit_common/src/rabbit_core_metrics.erl index c06b73bc457d..8b5430076f53 100644 --- a/deps/rabbit_common/src/rabbit_core_metrics.erl +++ b/deps/rabbit_common/src/rabbit_core_metrics.erl @@ -124,8 +124,8 @@ terminate() -> connection_created(Pid, Infos) -> ets:insert(connection_created, {Pid, Infos}), - ets:update_counter(connection_churn_metrics, node(), {2, 1}, - ?CONNECTION_CHURN_METRICS), + _ = ets:update_counter(connection_churn_metrics, node(), {2, 1}, + ?CONNECTION_CHURN_METRICS), ok. connection_closed(Pid) -> @@ -133,8 +133,8 @@ connection_closed(Pid) -> ets:delete(connection_metrics, Pid), %% Delete marker ets:update_element(connection_coarse_metrics, Pid, {5, 1}), - ets:update_counter(connection_churn_metrics, node(), {3, 1}, - ?CONNECTION_CHURN_METRICS), + _ = ets:update_counter(connection_churn_metrics, node(), {3, 1}, + ?CONNECTION_CHURN_METRICS), ok. connection_stats(Pid, Infos) -> @@ -148,16 +148,16 @@ connection_stats(Pid, Recv_oct, Send_oct, Reductions) -> channel_created(Pid, Infos) -> ets:insert(channel_created, {Pid, Infos}), - ets:update_counter(connection_churn_metrics, node(), {4, 1}, - ?CONNECTION_CHURN_METRICS), + _ = ets:update_counter(connection_churn_metrics, node(), {4, 1}, + ?CONNECTION_CHURN_METRICS), ok. channel_closed(Pid) -> ets:delete(channel_created, Pid), ets:delete(channel_metrics, Pid), ets:delete(channel_process_metrics, Pid), - ets:update_counter(connection_churn_metrics, node(), {5, 1}, - ?CONNECTION_CHURN_METRICS), + _ = ets:update_counter(connection_churn_metrics, node(), {5, 1}, + ?CONNECTION_CHURN_METRICS), ok. channel_stats(Pid, Infos) -> @@ -276,20 +276,20 @@ queue_stats(Name, MessagesReady, MessagesUnacknowledge, Messages, Reductions) -> queue_declared(_Name) -> %% Name is not needed, but might be useful in the future. - ets:update_counter(connection_churn_metrics, node(), {6, 1}, - ?CONNECTION_CHURN_METRICS), + _ = ets:update_counter(connection_churn_metrics, node(), {6, 1}, + ?CONNECTION_CHURN_METRICS), ok. queue_created(_Name) -> %% Name is not needed, but might be useful in the future. - ets:update_counter(connection_churn_metrics, node(), {7, 1}, - ?CONNECTION_CHURN_METRICS), + _ = ets:update_counter(connection_churn_metrics, node(), {7, 1}, + ?CONNECTION_CHURN_METRICS), ok. queue_deleted(Name) -> ets:delete(queue_coarse_metrics, Name), - ets:update_counter(connection_churn_metrics, node(), {8, 1}, - ?CONNECTION_CHURN_METRICS), + _ = ets:update_counter(connection_churn_metrics, node(), {8, 1}, + ?CONNECTION_CHURN_METRICS), %% Delete markers ets:update_element(queue_metrics, Name, {3, 1}), CQX = ets:select(channel_queue_exchange_metrics, match_spec_cqx(Name)), @@ -302,8 +302,8 @@ queue_deleted(Name) -> end, CQ). queues_deleted(Queues) -> - ets:update_counter(connection_churn_metrics, node(), {8, length(Queues)}, - ?CONNECTION_CHURN_METRICS), + _ = ets:update_counter(connection_churn_metrics, node(), {8, length(Queues)}, + ?CONNECTION_CHURN_METRICS), [ delete_queue_metrics(Queue) || Queue <- Queues ], [ begin diff --git a/deps/rabbit_common/src/rabbit_misc.erl b/deps/rabbit_common/src/rabbit_misc.erl index af6fc536b046..1821abb75eca 100644 --- a/deps/rabbit_common/src/rabbit_misc.erl +++ b/deps/rabbit_common/src/rabbit_misc.erl @@ -26,9 +26,6 @@ -export([table_lookup/2, set_table_value/4, amqp_table/1, to_amqp_table/1]). -export([r/3, r/2, r_arg/4, rs/1, queue_resource/2, exchange_resource/2]). --export([enable_cover/0, report_cover/0]). --export([enable_cover/1, report_cover/1]). --export([start_cover/1]). -export([throw_on_error/2, with_exit_handler/2, is_abnormal_exit/1, filter_exit_map/2]). -export([ensure_ok/2]). @@ -89,7 +86,7 @@ maps_put_falsy/3 ]). -export([remote_sup_child/2]). --export([for_each_while_ok/2]). +-export([for_each_while_ok/2, fold_while_ok/3]). %% Horrible macro to use in guards -define(IS_BENIGN_EXIT(R), @@ -165,11 +162,6 @@ {invalid_type, rabbit_framing:amqp_field_type()}) | rabbit_types:r(K) when is_subtype(K, atom()). -spec rs(rabbit_types:r(atom())) -> string(). --spec enable_cover() -> ok_or_error(). --spec start_cover([{string(), string()} | string()]) -> 'ok'. --spec report_cover() -> 'ok'. --spec enable_cover([file:filename() | atom()]) -> ok_or_error(). --spec report_cover([file:filename() | atom()]) -> 'ok'. -spec throw_on_error (atom(), thunk(rabbit_types:error(any()) | {ok, A} | A)) -> A. -spec with_exit_handler(thunk(A), thunk(A)) -> A. @@ -449,59 +441,6 @@ queue_resource(VHostPath, Name) -> exchange_resource(VHostPath, Name) -> r(VHostPath, exchange, Name). -enable_cover() -> enable_cover(["."]). - -enable_cover(Dirs) -> - lists:foldl(fun (Dir, ok) -> - case cover:compile_beam_directory( - filename:join(lists:concat([Dir]),"ebin")) of - {error, _} = Err -> Err; - _ -> ok - end; - (_Dir, Err) -> - Err - end, ok, Dirs). - -start_cover(NodesS) -> - {ok, _} = cover:start([rabbit_nodes_common:make(N) || N <- NodesS]), - ok. - -report_cover() -> report_cover(["."]). - -report_cover(Dirs) -> [report_cover1(lists:concat([Dir])) || Dir <- Dirs], ok. - -report_cover1(Root) -> - Dir = filename:join(Root, "cover"), - ok = filelib:ensure_dir(filename:join(Dir, "junk")), - lists:foreach(fun (F) -> file:delete(F) end, - filelib:wildcard(filename:join(Dir, "*.html"))), - {ok, SummaryFile} = file:open(filename:join(Dir, "summary.txt"), [write]), - {CT, NCT} = - lists:foldl( - fun (M,{CovTot, NotCovTot}) -> - {ok, {M, {Cov, NotCov}}} = cover:analyze(M, module), - ok = report_coverage_percentage(SummaryFile, - Cov, NotCov, M), - {ok,_} = cover:analyze_to_file( - M, - filename:join(Dir, atom_to_list(M) ++ ".html"), - [html]), - {CovTot+Cov, NotCovTot+NotCov} - end, - {0, 0}, - lists:sort(cover:modules())), - ok = report_coverage_percentage(SummaryFile, CT, NCT, 'TOTAL'), - ok = file:close(SummaryFile), - ok. - -report_coverage_percentage(File, Cov, NotCov, Mod) -> - io:fwrite(File, "~6.2f ~tp~n", - [if - Cov+NotCov > 0 -> 100.0*Cov/(Cov+NotCov); - true -> 100.0 - end, - Mod]). - %% @doc Halts the emulator returning the given status code to the os. %% On Windows this function will block indefinitely so as to give the io %% subsystem time to flush stdout completely. @@ -1655,3 +1594,24 @@ for_each_while_ok(Fun, [Elem | Rest]) -> end; for_each_while_ok(_, []) -> ok. + +-spec fold_while_ok(FoldFun, Acc, List) -> Ret when + FoldFun :: fun((Element, Acc) -> {ok, Acc} | {error, ErrReason}), + Element :: any(), + List :: Element, + Ret :: {ok, Acc} | {error, ErrReason}. +%% @doc Calls the given `FoldFun' on each element of the given `List' and the +%% accumulator value, short-circuiting if the function returns `{error,_}'. +%% +%% @returns the first `{error,_}' returned by `FoldFun' or `{ok,Acc}' if +%% `FoldFun' never returns an error tuple. + +fold_while_ok(Fun, Acc0, [Elem | Rest]) -> + case Fun(Elem, Acc0) of + {ok, Acc} -> + fold_while_ok(Fun, Acc, Rest); + {error, _} = Error -> + Error + end; +fold_while_ok(_Fun, Acc, []) -> + {ok, Acc}. diff --git a/deps/rabbitmq_amqp1_0/Makefile b/deps/rabbitmq_amqp1_0/Makefile index 30dc3ed18824..f59aac6d7fa7 100644 --- a/deps/rabbitmq_amqp1_0/Makefile +++ b/deps/rabbitmq_amqp1_0/Makefile @@ -3,8 +3,7 @@ PROJECT_DESCRIPTION = Deprecated no-op AMQP 1.0 plugin LOCAL_DEPS = rabbit -DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk -DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk +DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk .DEFAULT_GOAL = all diff --git a/deps/rabbitmq_amqp_client/Makefile b/deps/rabbitmq_amqp_client/Makefile index 0a50069065e3..d9cabad59ba1 100644 --- a/deps/rabbitmq_amqp_client/Makefile +++ b/deps/rabbitmq_amqp_client/Makefile @@ -8,12 +8,9 @@ BUILD_DEPS = rabbit_common DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk TEST_DEPS = rabbit rabbitmq_ct_helpers -DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk \ - rabbit_common/mk/rabbitmq-hexpm.mk \ - rabbit_common/mk/rabbitmq-dist.mk \ - rabbit_common/mk/rabbitmq-run.mk \ - rabbit_common/mk/rabbitmq-test.mk \ - rabbit_common/mk/rabbitmq-tools.mk +# We do not depend on rabbit therefore can't run the broker; +# however we can run a test broker in the test suites. +DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk .DEFAULT_GOAL = all diff --git a/deps/rabbitmq_amqp_client/test/management_SUITE.erl b/deps/rabbitmq_amqp_client/test/management_SUITE.erl index 0e49a0d786e8..4926f13c8c92 100644 --- a/deps/rabbitmq_amqp_client/test/management_SUITE.erl +++ b/deps/rabbitmq_amqp_client/test/management_SUITE.erl @@ -117,7 +117,7 @@ init_per_testcase(Testcase, Config) -> rabbit_ct_helpers:testcase_started(Config, Testcase). end_per_testcase(Testcase, Config) -> - %% Assert that every testcase cleaned up. + %% Ensure that all queues were cleaned up eventually(?_assertEqual([], rpc(Config, rabbit_amqqueue, list, []))), rabbit_ct_helpers:testcase_finished(Config, Testcase). @@ -268,12 +268,12 @@ all_management_operations(Config) -> queue_defaults(Config) -> Init = {_, LinkPair} = init(Config), QName = atom_to_binary(?FUNCTION_NAME), + {ok, _} = rabbitmq_amqp_client:delete_queue(LinkPair, QName), {ok, _} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, #{}), - [Q] = rpc(Config, rabbit_amqqueue, list, []), + {ok, Q} = rpc(Config, rabbit_amqqueue, lookup, [QName, <<"/">>]), ?assert(rpc(Config, amqqueue, is_durable, [Q])), ?assertNot(rpc(Config, amqqueue, is_exclusive, [Q])), ?assertNot(rpc(Config, amqqueue, is_auto_delete, [Q])), - ?assertEqual([], rpc(Config, amqqueue, get_arguments, [Q])), {ok, _} = rabbitmq_amqp_client:delete_queue(LinkPair, QName), ok = cleanup(Init). @@ -448,10 +448,11 @@ declare_queue_default_queue_type(Config) -> {ok, Session} = amqp10_client:begin_session_sync(Connection), {ok, LinkPair} = rabbitmq_amqp_client:attach_management_link_pair_sync(Session, <<"my link pair">>), + {ok, _} = rabbitmq_amqp_client:delete_queue(LinkPair, QName), ?assertMatch({ok, #{type := <<"quorum">>}}, rabbitmq_amqp_client:declare_queue(LinkPair, QName, #{})), - {ok, #{}} = rabbitmq_amqp_client:delete_queue(LinkPair, QName), + {ok, _} = rabbitmq_amqp_client:delete_queue(LinkPair, QName), ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), ok = amqp10_client:end_session(Session), ok = amqp10_client:close_connection(Connection), diff --git a/deps/rabbitmq_auth_backend_http/README.md b/deps/rabbitmq_auth_backend_http/README.md index 050e3837d2ec..fefb2889d862 100644 --- a/deps/rabbitmq_auth_backend_http/README.md +++ b/deps/rabbitmq_auth_backend_http/README.md @@ -159,6 +159,27 @@ If the certificate of your Web Server should be matched against a wildcard certi {customize_hostname_check, [{match_fun,public_key:pkix_verify_hostname_match_fun(https)}]} ``` +## Tuning HTTP client timeouts + +You can configure the request timeout and connection timeout (see `timeout` and `connect_timeout` respectively in Erlang/OTP [httpc documentation](https://www.erlang.org/doc/apps/inets/httpc.html#request/5)). The default value is 15 seconds for both. + +In `rabbitmq.conf`: + +``` +auth_http.request_timeout=20000 +auth_http.connection_timeout=10000 +``` + +In the [`advanced.config` format](https://www.rabbitmq.com/configure.html#advanced-config-file): + +``` +{rabbitmq_auth_backend_http, + [{request_timeout, 20_000}, + {connection_timeout, 10_000}, + ... +]} +``` + ## Debugging [Enable debug logging](https://rabbitmq.com/logging.html#debug-logging) to see what the backend service receives. diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/pom.xml b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/pom.xml index b17460d8adef..fd64bfacc31b 100644 --- a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/pom.xml +++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/pom.xml @@ -29,13 +29,13 @@ org.springframework.boot spring-boot-starter-parent - 3.3.2 + 3.3.4 17 17 - 5.10.3 + 5.11.1 com.rabbitmq.examples diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/pom.xml b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/pom.xml index f002a7f09f4b..d12ea560a97a 100644 --- a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/pom.xml +++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot_kotlin/pom.xml @@ -14,7 +14,7 @@ org.springframework.boot spring-boot-starter-parent - 3.3.2 + 3.3.4 @@ -23,7 +23,7 @@ UTF-8 17 17 - 2.0.0 + 2.0.20 5.10.0 diff --git a/deps/rabbitmq_auth_backend_http/priv/schema/rabbitmq_auth_backend_http.schema b/deps/rabbitmq_auth_backend_http/priv/schema/rabbitmq_auth_backend_http.schema index 150770ce2c18..b50013fb1651 100644 --- a/deps/rabbitmq_auth_backend_http/priv/schema/rabbitmq_auth_backend_http.schema +++ b/deps/rabbitmq_auth_backend_http/priv/schema/rabbitmq_auth_backend_http.schema @@ -116,7 +116,7 @@ end}. [{datatype, {enum, [true, false]}}]}. {mapping, "auth_http.ssl_options.password", "rabbitmq_auth_backend_http.ssl_options.password", - [{datatype, string}]}. + [{datatype, [tagged_binary, binary]}]}. {mapping, "auth_http.ssl_options.psk_identity", "rabbitmq_auth_backend_http.ssl_options.psk_identity", [{datatype, string}]}. diff --git a/deps/rabbitmq_auth_backend_http/test/config_schema_SUITE_data/rabbitmq_auth_backend_http.snippets b/deps/rabbitmq_auth_backend_http/test/config_schema_SUITE_data/rabbitmq_auth_backend_http.snippets index 9cd2ade9cb24..7d94d78bbc16 100644 --- a/deps/rabbitmq_auth_backend_http/test/config_schema_SUITE_data/rabbitmq_auth_backend_http.snippets +++ b/deps/rabbitmq_auth_backend_http/test/config_schema_SUITE_data/rabbitmq_auth_backend_http.snippets @@ -78,7 +78,7 @@ [{cacertfile,"test/config_schema_SUITE_data/certs/invalid_cacert.pem"}, {certfile,"test/config_schema_SUITE_data/certs/invalid_cert.pem"}, {keyfile,"test/config_schema_SUITE_data/certs/invalid_key.pem"}, - {password,"t0p$3kRe7"}]}]}], + {password,<<"t0p$3kRe7">>}]}]}], []}, {ssl_options_tls_versions, "auth_http.ssl_options.cacertfile = test/config_schema_SUITE_data/certs/invalid_cacert.pem diff --git a/deps/rabbitmq_auth_backend_ldap/priv/schema/rabbitmq_auth_backend_ldap.schema b/deps/rabbitmq_auth_backend_ldap/priv/schema/rabbitmq_auth_backend_ldap.schema index 669e27912552..daf58bb49440 100644 --- a/deps/rabbitmq_auth_backend_ldap/priv/schema/rabbitmq_auth_backend_ldap.schema +++ b/deps/rabbitmq_auth_backend_ldap/priv/schema/rabbitmq_auth_backend_ldap.schema @@ -120,7 +120,7 @@ end}. [{datatype, [string]}]}. {mapping, "auth_ldap.dn_lookup_bind.password", "rabbitmq_auth_backend_ldap.dn_lookup_bind", - [{datatype, [string]}]}. + [{datatype, [tagged_binary, binary]}]}. %% - as_user (to bind as the authenticated user - requires a password) %% - anon (to bind anonymously) @@ -161,7 +161,7 @@ end}. [{datatype, string}]}. {mapping, "auth_ldap.other_bind.password", "rabbitmq_auth_backend_ldap.other_bind", - [{datatype, string}]}. + [{datatype, [tagged_binary, binary]}]}. {translation, "rabbitmq_auth_backend_ldap.other_bind", fun(Conf) -> diff --git a/deps/rabbitmq_auth_backend_ldap/src/rabbit_auth_backend_ldap.erl b/deps/rabbitmq_auth_backend_ldap/src/rabbit_auth_backend_ldap.erl index 37d62f0dd218..bba6767a3ce4 100644 --- a/deps/rabbitmq_auth_backend_ldap/src/rabbit_auth_backend_ldap.erl +++ b/deps/rabbitmq_auth_backend_ldap/src/rabbit_auth_backend_ldap.erl @@ -85,6 +85,7 @@ user_login_authentication(Username, _AuthProps) -> %% Credentials (i.e. password) maybe directly in the password attribute in AuthProps %% or as a Function with the attribute rabbit_auth_backend_ldap if the user was already authenticated with http backend %% or as a Function with the attribute rabbit_auth_backend_cache if the user was already authenticated via cache backend +-spec extractPassword(list()) -> rabbit_types:option(binary()). extractPassword(AuthProps) -> case proplists:get_value(password, AuthProps, none) of none -> @@ -377,14 +378,8 @@ search_groups(LDAP, Desc, GroupsBase, Scope, DN) -> []; {ok, {referral, Referrals}} -> {error, {referrals_not_supported, Referrals}}; - %% support #eldap_search_result before and after - %% https://github.com/erlang/otp/pull/5538 - {ok, {eldap_search_result, [], _Referrals}} -> - []; {ok, {eldap_search_result, [], _Referrals, _Controls}}-> []; - {ok, {eldap_search_result, Entries, _Referrals}} -> - [ON || #eldap_entry{object_name = ON} <- Entries]; {ok, {eldap_search_result, Entries, _Referrals, _Controls}} -> [ON || #eldap_entry{object_name = ON} <- Entries] end. @@ -469,10 +464,6 @@ object_exists(DN, Filter, LDAP) -> {scope, eldap:baseObject()}]) of {ok, {referral, Referrals}} -> {error, {referrals_not_supported, Referrals}}; - %% support #eldap_search_result before and after - %% https://github.com/erlang/otp/pull/5538 - {ok, {eldap_search_result, Entries, _Referrals}} -> - length(Entries) > 0; {ok, {eldap_search_result, Entries, _Referrals, _Controls}} -> length(Entries) > 0; {error, _} = E -> @@ -486,14 +477,8 @@ attribute(DN, AttributeName, LDAP) -> {attributes, [AttributeName]}]) of {ok, {referral, Referrals}} -> {error, {referrals_not_supported, Referrals}}; - %% support #eldap_search_result before and after - %% https://github.com/erlang/otp/pull/5538 - {ok, {eldap_search_result, E = [#eldap_entry{}|_], _Referrals}} -> - get_attributes(AttributeName, E); {ok, {eldap_search_result, E = [#eldap_entry{}|_], _Referrals, _Controls}} -> get_attributes(AttributeName, E); - {ok, {eldap_search_result, _Entries, _Referrals}} -> - {error, not_found}; {ok, {eldap_search_result, _Entries, _Referrals, _Controls}} -> {error, not_found}; {error, _} = E -> @@ -889,18 +874,9 @@ dn_lookup(Username, LDAP) -> {attributes, ["distinguishedName"]}]) of {ok, {referral, Referrals}} -> {error, {referrals_not_supported, Referrals}}; - %% support #eldap_search_result before and after - %% https://github.com/erlang/otp/pull/5538 - {ok, {eldap_search_result, [#eldap_entry{object_name = DN}], _Referrals}}-> - ?L1("DN lookup: ~ts -> ~ts", [Username, DN]), - DN; {ok, {eldap_search_result, [#eldap_entry{object_name = DN}], _Referrals, _Controls}}-> ?L1("DN lookup: ~ts -> ~ts", [Username, DN]), DN; - {ok, {eldap_search_result, Entries, _Referrals}} -> - rabbit_log_ldap:warning("Searching for DN for ~ts, got back ~tp", - [Filled, Entries]), - Filled; {ok, {eldap_search_result, Entries, _Referrals, _Controls}} -> rabbit_log_ldap:warning("Searching for DN for ~ts, got back ~tp", [Filled, Entries]), diff --git a/deps/rabbitmq_auth_backend_ldap/test/config_schema_SUITE_data/rabbitmq_auth_backend_ldap.snippets b/deps/rabbitmq_auth_backend_ldap/test/config_schema_SUITE_data/rabbitmq_auth_backend_ldap.snippets index c07e8aa37844..daa7e955cc0a 100644 --- a/deps/rabbitmq_auth_backend_ldap/test/config_schema_SUITE_data/rabbitmq_auth_backend_ldap.snippets +++ b/deps/rabbitmq_auth_backend_ldap/test/config_schema_SUITE_data/rabbitmq_auth_backend_ldap.snippets @@ -119,7 +119,7 @@ {db_lookup_bind, "auth_ldap.dn_lookup_bind.user_dn = username auth_ldap.dn_lookup_bind.password = password", - [{rabbitmq_auth_backend_ldap,[{dn_lookup_bind,{"username","password"}}]}], + [{rabbitmq_auth_backend_ldap,[{dn_lookup_bind,{"username",<<"password">>}}]}], [rabbitmq_auth_backend_ldap]}, {db_lookup_bind_anon, @@ -147,7 +147,7 @@ {other_bind_pass, "auth_ldap.other_bind.user_dn = username auth_ldap.other_bind.password = password", - [{rabbitmq_auth_backend_ldap,[{other_bind,{"username","password"}}]}], + [{rabbitmq_auth_backend_ldap,[{other_bind,{"username",<<"password">>}}]}], [rabbitmq_auth_backend_ldap]}, {ssl_options, diff --git a/deps/rabbitmq_auth_backend_oauth2/BUILD.bazel b/deps/rabbitmq_auth_backend_oauth2/BUILD.bazel index 6529f4a3622b..71c3d2e46289 100644 --- a/deps/rabbitmq_auth_backend_oauth2/BUILD.bazel +++ b/deps/rabbitmq_auth_backend_oauth2/BUILD.bazel @@ -50,6 +50,7 @@ rabbitmq_app( "//deps/rabbit:erlang_app", "@base64url//:erlang_app", "@cowlib//:erlang_app", + "@cuttlefish//:erlang_app", "@jose//:erlang_app", ], ) @@ -93,7 +94,7 @@ eunit( broker_for_integration_suites( extra_plugins = [ - "//deps/rabbitmq_mqtt:erlang_app", + "//deps/rabbitmq_web_mqtt:erlang_app", ], ) @@ -143,6 +144,14 @@ rabbitmq_suite( ], ) +rabbitmq_suite( + name = "rabbit_oauth2_schema_SUITE", + size = "medium", + deps = [ + "//deps/rabbit_common:erlang_app", + ], +) + rabbitmq_integration_suite( name = "system_SUITE", size = "medium", @@ -151,6 +160,7 @@ rabbitmq_integration_suite( ], runtime_deps = [ "//deps/oauth2_client:erlang_app", + "//deps/rabbitmq_amqp_client:erlang_app", "@emqtt//:erlang_app", ], ) diff --git a/deps/rabbitmq_auth_backend_oauth2/Makefile b/deps/rabbitmq_auth_backend_oauth2/Makefile index 96f8cf6a2970..1066e7be8271 100644 --- a/deps/rabbitmq_auth_backend_oauth2/Makefile +++ b/deps/rabbitmq_auth_backend_oauth2/Makefile @@ -8,7 +8,7 @@ export BUILD_WITHOUT_QUIC LOCAL_DEPS = inets public_key BUILD_DEPS = rabbit_common DEPS = rabbit cowlib jose base64url oauth2_client -TEST_DEPS = cowboy rabbitmq_web_dispatch rabbitmq_ct_helpers rabbitmq_ct_client_helpers amqp_client rabbitmq_mqtt emqtt +TEST_DEPS = cowboy rabbitmq_web_dispatch rabbitmq_ct_helpers rabbitmq_ct_client_helpers amqp_client rabbitmq_web_mqtt emqtt rabbitmq_amqp_client PLT_APPS += rabbitmqctl diff --git a/deps/rabbitmq_auth_backend_oauth2/app.bzl b/deps/rabbitmq_auth_backend_oauth2/app.bzl index ccf72932cfaa..003818ac74be 100644 --- a/deps/rabbitmq_auth_backend_oauth2/app.bzl +++ b/deps/rabbitmq_auth_backend_oauth2/app.bzl @@ -14,6 +14,7 @@ def all_beam_files(name = "all_beam_files"): "src/rabbit_auth_backend_oauth2.erl", "src/rabbit_auth_backend_oauth2_app.erl", "src/rabbit_oauth2_config.erl", + "src/rabbit_oauth2_schema.erl", "src/rabbit_oauth2_scope.erl", "src/uaa_jwks.erl", "src/uaa_jwt.erl", @@ -48,6 +49,7 @@ def all_test_beam_files(name = "all_test_beam_files"): "src/rabbit_auth_backend_oauth2.erl", "src/rabbit_auth_backend_oauth2_app.erl", "src/rabbit_oauth2_config.erl", + "src/rabbit_oauth2_schema.erl", "src/rabbit_oauth2_scope.erl", "src/uaa_jwks.erl", "src/uaa_jwt.erl", @@ -93,6 +95,7 @@ def all_srcs(name = "all_srcs"): "src/rabbit_auth_backend_oauth2.erl", "src/rabbit_auth_backend_oauth2_app.erl", "src/rabbit_oauth2_config.erl", + "src/rabbit_oauth2_schema.erl", "src/rabbit_oauth2_scope.erl", "src/uaa_jwks.erl", "src/uaa_jwt.erl", @@ -155,6 +158,15 @@ def test_suite_beam_files(name = "test_suite_beam_files"): erlc_opts = "//:test_erlc_opts", deps = ["//deps/rabbit_common:erlang_app"], ) + erlang_bytecode( + name = "rabbit_oauth2_schema_SUITE_beam_files", + testonly = True, + srcs = ["test/rabbit_oauth2_schema_SUITE.erl"], + outs = ["test/rabbit_oauth2_schema_SUITE.beam"], + app_name = "rabbitmq_auth_backend_oauth2", + erlc_opts = "//:test_erlc_opts", + deps = ["//deps/rabbit_common:erlang_app"], + ) erlang_bytecode( name = "system_SUITE_beam_files", testonly = True, diff --git a/deps/rabbitmq_auth_backend_oauth2/priv/schema/rabbitmq_auth_backend_oauth2.schema b/deps/rabbitmq_auth_backend_oauth2/priv/schema/rabbitmq_auth_backend_oauth2.schema index c53c5d162b80..399708ae2562 100644 --- a/deps/rabbitmq_auth_backend_oauth2/priv/schema/rabbitmq_auth_backend_oauth2.schema +++ b/deps/rabbitmq_auth_backend_oauth2/priv/schema/rabbitmq_auth_backend_oauth2.schema @@ -130,22 +130,7 @@ {translation, "rabbitmq_auth_backend_oauth2.key_config.signing_keys", fun(Conf) -> - Settings = cuttlefish_variable:filter_by_prefix("auth_oauth2.signing_keys", Conf), - TryReadingFileFun = - fun(Path) -> - case file:read_file(Path) of - {ok, Bin} -> - string:trim(Bin, trailing, "\n"); - _ -> - %% this throws and makes Cuttlefish treak the key as invalid - cuttlefish:invalid("file does not exist or cannot be read by the node") - end - end, - SigningKeys = - lists:map(fun({Id, Path}) -> - {list_to_binary(lists:last(Id)), {pem, TryReadingFileFun(Path)}} - end, Settings), - maps:from_list(SigningKeys) + rabbit_oauth2_schema:translate_signing_keys(Conf) end}. {mapping, @@ -285,36 +270,29 @@ "rabbitmq_auth_backend_oauth2.oauth_providers", [{datatype, {enum, [true, false, peer, best_effort]}}]}. +{mapping, + "auth_oauth2.oauth_providers.$name.default_key", + "rabbitmq_auth_backend_oauth2.oauth_providers", + [{datatype, string}]}. + +%% A map of signing keys +%% +%% {signing_keys, #{<<"id1">> => {pem, <<"value1">>}, <<"id2">> => {pem, <<"value2">>}}} +%% validator doesn't work + +{mapping, + "auth_oauth2.oauth_providers.$name.signing_keys.$id", + "rabbitmq_auth_backend_oauth2.oauth_providers", + [{datatype, file}, {validators, ["file_accessible"]}]}. + +{mapping, + "auth_oauth2.oauth_providers.$name.algorithms.$algorithm", + "rabbitmq_auth_backend_oauth2.oauth_providers", + [{datatype, string}]}. + {translation, "rabbitmq_auth_backend_oauth2.oauth_providers", fun(Conf) -> - Settings = cuttlefish_variable:filter_by_prefix("auth_oauth2.oauth_providers", Conf), - AuthBackends = [{Name, {list_to_atom(Key), list_to_binary(V)}} || {["auth_oauth2","oauth_providers", Name, Key], V} <- Settings ], - Https = [{Name, {https, {list_to_atom(Key), V}}} || {["auth_oauth2","oauth_providers", Name, "https", Key], V} <- Settings ], - - %% Aggregate all options for one provider - KeyFun = fun({Name, _}) -> list_to_binary(Name) end, - ValueFun = fun({_, V}) -> V end, - ProviderNameToListOfSettings = maps:groups_from_list(KeyFun, ValueFun, AuthBackends), - ProviderNameToListOfHttpsSettings = maps:groups_from_list(KeyFun, fun({_, {https, V}}) -> V end, Https), - ProviderNameToListWithHttps = maps:map(fun(K1,L1) -> [{https, L1}] end, ProviderNameToListOfHttpsSettings), - NewGroup = maps:merge_with(fun(K, V1, V2) -> V1 ++ V2 end, ProviderNameToListOfSettings, ProviderNameToListWithHttps), - - ListOrSingleFun = fun(K, List) -> - case K of - ssl_options -> proplists:get_all_values(K, List); - _ -> - case proplists:lookup_all(K, List) of - [One] -> proplists:get_value(K, List); - [One|_] = V -> V - end - end - end, - GroupKeyConfigFun = fun(K, List) -> - ListKeys = proplists:get_keys(List), - [{K, ListOrSingleFun(K, List)} || K <- ListKeys] - end, - maps:map(GroupKeyConfigFun, NewGroup) - + rabbit_oauth2_schema:translate_oauth_providers(Conf) end}. {mapping, @@ -347,34 +325,13 @@ [{datatype, string}] }. +{mapping, + "auth_oauth2.resource_servers.$name.preferred_username_claims.$preferred_username_claims", + "rabbitmq_auth_backend_oauth2.resource_servers", + [{datatype, string}]}. + + {translation, "rabbitmq_auth_backend_oauth2.resource_servers", fun(Conf) -> - Settings = cuttlefish_variable:filter_by_prefix("auth_oauth2.resource_servers", Conf), - AuthBackends = [{Name, {list_to_atom(Key), list_to_binary(V)}} || {["auth_oauth2","resource_servers", Name, Key], V} <- Settings], - KeyFun = fun({Name,_}) -> list_to_binary(Name) end, - ValueFun = fun({_,V}) -> V end, - NewGroup = maps:groups_from_list(KeyFun, ValueFun, AuthBackends), - ListOrSingleFun = fun(K, List) -> - case K of - key_config -> proplists:get_all_values(K, List); - _ -> - case proplists:lookup_all(K, List) of - [One] -> proplists:get_value(K, List); - [One|_] = V -> V - end - end - end, - GroupKeyConfigFun = fun(K, List) -> - ListKeys = proplists:get_keys(List), - [ {K,ListOrSingleFun(K,List)} || K <- ListKeys ] - end, - NewGroupTwo = maps:map(GroupKeyConfigFun, NewGroup), - IndexByIdOrElseNameFun = fun(K, V, NewMap) -> - case proplists:get_value(id, V) of - undefined -> maps:put(K, V, NewMap); - ID -> maps:put(ID, V, NewMap) - end - end, - maps:fold(IndexByIdOrElseNameFun,#{}, NewGroupTwo) - + rabbit_oauth2_schema:translate_resource_servers(Conf) end}. diff --git a/deps/rabbitmq_auth_backend_oauth2/src/rabbit_oauth2_config.erl b/deps/rabbitmq_auth_backend_oauth2/src/rabbit_oauth2_config.erl index 1a02dccde057..f6219c06ad0f 100644 --- a/deps/rabbitmq_auth_backend_oauth2/src/rabbit_oauth2_config.erl +++ b/deps/rabbitmq_auth_backend_oauth2/src/rabbit_oauth2_config.erl @@ -15,13 +15,16 @@ -define(TOP_RESOURCE_SERVER_ID, application:get_env(?APP, resource_server_id)). %% scope aliases map "role names" to a set of scopes - -export([ add_signing_key/2, add_signing_key/3, replace_signing_keys/1, replace_signing_keys/2, - get_signing_keys/0, get_signing_keys/1, get_signing_key/2, - get_key_config/0, get_key_config/1, get_default_resource_server_id/0, - get_oauth_provider_for_resource_server_id/2, + get_signing_keys/0, get_signing_keys/1, get_signing_key/1, get_signing_key/2, + get_default_key/0, + get_default_resource_server_id/0, + get_resource_server_id_for_audience/1, + get_algorithms/0, get_algorithms/1, get_default_key/1, + get_oauth_provider_id_for_resource_server_id/1, + get_oauth_provider/2, get_allowed_resource_server_ids/0, find_audience_in_resource_server_ids/1, is_verify_aud/0, is_verify_aud/1, get_additional_scopes_key/0, get_additional_scopes_key/1, @@ -42,165 +45,239 @@ get_preferred_username_claims() -> append_or_return_default(Value, ?DEFAULT_PREFERRED_USERNAME_CLAIMS); _ -> ?DEFAULT_PREFERRED_USERNAME_CLAIMS end. --spec get_preferred_username_claims(binary()) -> list(). +-spec get_preferred_username_claims(binary() | list()) -> list(). get_preferred_username_claims(ResourceServerId) -> - get_preferred_username_claims(get_default_resource_server_id(), - ResourceServerId). -get_preferred_username_claims(TopResourceServerId, ResourceServerId) - when ResourceServerId =:= TopResourceServerId -> - get_preferred_username_claims(); -get_preferred_username_claims(TopResourceServerId, ResourceServerId) - when ResourceServerId =/= TopResourceServerId -> - ResourceServer = maps:get(ResourceServerId, application:get_env(?APP, - resource_servers, #{})), - case proplists:get_value(preferred_username_claims, ResourceServer) of - undefined -> get_preferred_username_claims(); - Value -> append_or_return_default(Value, ?DEFAULT_PREFERRED_USERNAME_CLAIMS) + ResourceServers = application:get_env(?APP, resource_servers, #{}), + ResourceServer = maps:get(ResourceServerId, ResourceServers, []), + case proplists:get_value(preferred_username_claims, ResourceServer, undefined) of + undefined -> + get_preferred_username_claims(); + Value -> + append_or_return_default(Value, ?DEFAULT_PREFERRED_USERNAME_CLAIMS) end. +-spec get_default_key() -> {ok, binary()} | {error, no_default_key_configured}. +get_default_key() -> + get_default_key(root). + +-spec get_default_key(oauth_provider_id()) -> {ok, binary()} | {error, no_default_key_configured}. +get_default_key(root) -> + case application:get_env(?APP, key_config, undefined) of + undefined -> + {error, no_default_key_configured}; + KeyConfig -> + case proplists:get_value(default_key, KeyConfig, undefined) of + undefined -> {error, no_default_key_configured}; + V -> {ok, V} + end + end; +get_default_key(OauthProviderId) -> + OauthProviders = application:get_env(?APP, oauth_providers, #{}), + case maps:get(OauthProviderId, OauthProviders, []) of + [] -> + {error, no_default_key_configured}; + OauthProvider -> + case proplists:get_value(default_key, OauthProvider, undefined) of + undefined -> {error, no_default_key_configured}; + V -> {ok, V} + end + end. + +%% +%% Signing Key storage: +%% +%% * Static signing keys configured via config file are stored under signing_keys attribute +%% in their respective location (under key_config for the root oauth provider and +%% directly under each oauth provider) +%% * Dynamic signing keys loaded via rabbitmqctl or via JWKS endpoint are stored under +%% jwks attribute in their respective location. However, this attribute stores the +%% combination of static signing keys and dynamic signing keys. If the same kid is +%% found in both sets, the dynamic kid overrides the static kid. +%% + -type key_type() :: json | pem | map. --spec add_signing_key(binary(), {key_type(), binary()} ) -> {ok, map()} | {error, term()}. +-spec add_signing_key(binary(), {key_type(), binary()} ) -> map() | {error, term()}. add_signing_key(KeyId, Key) -> LockId = lock(), - try do_add_signing_key(KeyId, Key) of + try do_add_signing_key(KeyId, Key, root) of V -> V after unlock(LockId) end. --spec add_signing_key(binary(), binary(), {key_type(), binary()}) -> {ok, map()} | {error, term()}. -add_signing_key(ResourceServerId, KeyId, Key) -> - LockId = lock(), - try do_add_signing_key(ResourceServerId, KeyId, Key) of - V -> V - after - unlock(LockId) +-spec add_signing_key(binary(), {key_type(), binary()}, oauth_provider_id()) -> + map() | {error, term()}. +add_signing_key(KeyId, Key, OAuthProviderId) -> + case lock() of + {error, _} = Error -> + Error; + LockId -> + try do_add_signing_key(KeyId, Key, OAuthProviderId) of + V -> V + after + unlock(LockId) + end end. -do_add_signing_key(KeyId, Key) -> - do_replace_signing_keys(maps:put(KeyId, Key, get_signing_keys())). +do_add_signing_key(KeyId, Key, OAuthProviderId) -> + do_replace_signing_keys(maps:put(KeyId, Key, + get_signing_keys_from_jwks(OAuthProviderId)), OAuthProviderId). -do_add_signing_key(ResourceServerId, KeyId, Key) -> - do_replace_signing_keys(ResourceServerId, - maps:put(KeyId, Key, get_signing_keys(ResourceServerId))). +get_signing_keys_from_jwks(root) -> + KeyConfig = application:get_env(?APP, key_config, []), + proplists:get_value(jwks, KeyConfig, #{}); +get_signing_keys_from_jwks(OAuthProviderId) -> + OAuthProviders0 = application:get_env(?APP, oauth_providers, #{}), + OAuthProvider0 = maps:get(OAuthProviderId, OAuthProviders0, []), + proplists:get_value(jwks, OAuthProvider0, #{}). +-spec replace_signing_keys(map()) -> map() | {error, term()}. replace_signing_keys(SigningKeys) -> - LockId = lock(), - try do_replace_signing_keys(SigningKeys) of - V -> V - after - unlock(LockId) + replace_signing_keys(SigningKeys, root). + +-spec replace_signing_keys(map(), oauth_provider_id()) -> map() | {error, term()}. +replace_signing_keys(SigningKeys, OAuthProviderId) -> + case lock() of + {error,_} = Error -> + Error; + LockId -> + try do_replace_signing_keys(SigningKeys, OAuthProviderId) of + V -> V + after + unlock(LockId) + end end. -replace_signing_keys(ResourceServerId, SigningKeys) -> - LockId = lock(), - try do_replace_signing_keys(ResourceServerId, SigningKeys) of - V -> V - after - unlock(LockId) - end. - -do_replace_signing_keys(SigningKeys) -> +do_replace_signing_keys(SigningKeys, root) -> KeyConfig = application:get_env(?APP, key_config, []), - KeyConfig1 = proplists:delete(signing_keys, KeyConfig), - KeyConfig2 = [{signing_keys, SigningKeys} | KeyConfig1], + KeyConfig1 = proplists:delete(jwks, KeyConfig), + KeyConfig2 = [{jwks, maps:merge( + proplists:get_value(signing_keys, KeyConfig1, #{}), + SigningKeys)} | KeyConfig1], application:set_env(?APP, key_config, KeyConfig2), rabbit_log:debug("Replacing signing keys ~p", [ KeyConfig2]), + SigningKeys; + +do_replace_signing_keys(SigningKeys, OauthProviderId) -> + OauthProviders0 = application:get_env(?APP, oauth_providers, #{}), + OauthProvider0 = maps:get(OauthProviderId, OauthProviders0, []), + OauthProvider1 = proplists:delete(jwks, OauthProvider0), + OauthProvider = [{jwks, maps:merge( + proplists:get_value(signing_keys, OauthProvider1, #{}), + SigningKeys)} | OauthProvider1], + + OauthProviders = maps:put(OauthProviderId, OauthProvider, OauthProviders0), + application:set_env(?APP, oauth_providers, OauthProviders), + rabbit_log:debug("Replacing signing keys for ~p -> ~p", [OauthProviderId, OauthProvider]), SigningKeys. -do_replace_signing_keys(ResourceServerId, SigningKeys) -> - do_replace_signing_keys(get_default_resource_server_id(), - ResourceServerId, SigningKeys). -do_replace_signing_keys(TopResourceServerId, ResourceServerId, SigningKeys) - when ResourceServerId =:= TopResourceServerId -> - do_replace_signing_keys(SigningKeys); -do_replace_signing_keys(TopResourceServerId, ResourceServerId, SigningKeys) - when ResourceServerId =/= TopResourceServerId -> - ResourceServers = application:get_env(?APP, resource_servers, #{}), - ResourceServer = maps:get(ResourceServerId, ResourceServers, []), - KeyConfig0 = proplists:get_value(key_config, ResourceServer, []), - KeyConfig1 = proplists:delete(signing_keys, KeyConfig0), - KeyConfig2 = [{signing_keys, SigningKeys} | KeyConfig1], - - ResourceServer1 = proplists:delete(key_config, ResourceServer), - ResourceServer2 = [{key_config, KeyConfig2} | ResourceServer1], - - ResourceServers1 = maps:put(ResourceServerId, ResourceServer2, ResourceServers), - application:set_env(?APP, resource_servers, ResourceServers1), - rabbit_log:debug("Replacing signing keys for ~p -> ~p", [ResourceServerId, ResourceServers1]), - SigningKeys. -spec get_signing_keys() -> map(). -get_signing_keys() -> proplists:get_value(signing_keys, get_key_config(), #{}). +get_signing_keys() -> + get_signing_keys(root). --spec get_signing_keys(binary()) -> map(). -get_signing_keys(ResourceServerId) -> - get_signing_keys(get_default_resource_server_id(), ResourceServerId). +-spec get_signing_keys(oauth_provider_id()) -> map(). +get_signing_keys(root) -> + case application:get_env(?APP, key_config, undefined) of + undefined -> + #{}; + KeyConfig -> + case proplists:get_value(jwks, KeyConfig, undefined) of + undefined -> proplists:get_value(signing_keys, KeyConfig, #{}); + Jwks -> Jwks + end + end; +get_signing_keys(OauthProviderId) -> + OauthProviders = application:get_env(?APP, oauth_providers, #{}), + OauthProvider = maps:get(OauthProviderId, OauthProviders, []), + case proplists:get_value(jwks, OauthProvider, undefined) of + undefined -> + proplists:get_value(signing_keys, OauthProvider, #{}); + Jwks -> + Jwks + end. -get_signing_keys(TopResourceServerId, ResourceServerId) - when ResourceServerId =:= TopResourceServerId -> - get_signing_keys(); -get_signing_keys(TopResourceServerId, ResourceServerId) - when ResourceServerId =/= TopResourceServerId -> - proplists:get_value(signing_keys, get_key_config(ResourceServerId), #{}). +-spec get_resource_server_id_for_audience(binary() | list() | none) -> binary() | {error, term()}. +get_resource_server_id_for_audience(none) -> + case is_verify_aud() of + true -> + {error, no_matching_aud_found}; + false -> + case get_default_resource_server_id() of + {error, missing_resource_server_id_in_config} -> + {error, mising_audience_in_token_and_resource_server_in_config}; + V -> V + end + end; +get_resource_server_id_for_audience(Audience) -> + case find_audience_in_resource_server_ids(Audience) of + {ok, ResourceServerId} -> + ResourceServerId; + {error, only_one_resource_server_as_audience_found_many} = Error -> + Error; + {error, no_matching_aud_found} -> + case is_verify_aud() of + true -> + {error, no_matching_aud_found}; + false -> + case get_default_resource_server_id() of + {error, missing_resource_server_id_in_config} -> + {error, mising_audience_in_token_and_resource_server_in_config}; + V -> V + end + end + end. --spec get_oauth_provider_for_resource_server_id(binary(), list()) -> - {ok, oauth_provider()} | {error, any()}. +-spec get_oauth_provider_id_for_resource_server_id(binary()) -> oauth_provider_id(). -get_oauth_provider_for_resource_server_id(ResourceServerId, RequiredAttributeList) -> - get_oauth_provider_for_resource_server_id(get_default_resource_server_id(), - ResourceServerId, RequiredAttributeList). -get_oauth_provider_for_resource_server_id(TopResourceServerId, - ResourceServerId, RequiredAttributeList) when ResourceServerId =:= TopResourceServerId -> +get_oauth_provider_id_for_resource_server_id(ResourceServerId) -> + get_oauth_provider_id_for_resource_server_id(get_default_resource_server_id(), + ResourceServerId). +get_oauth_provider_id_for_resource_server_id(TopResourceServerId, + ResourceServerId) when ResourceServerId =:= TopResourceServerId -> case application:get_env(?APP, default_oauth_provider) of - undefined -> - oauth2_client:get_oauth_provider(RequiredAttributeList); - {ok, DefaultOauthProviderId} -> - oauth2_client:get_oauth_provider(DefaultOauthProviderId, RequiredAttributeList) + undefined -> root; + {ok, DefaultOauthProviderId} -> DefaultOauthProviderId end; - -get_oauth_provider_for_resource_server_id(TopResourceServerId, ResourceServerId, - RequiredAttributeList) when ResourceServerId =/= TopResourceServerId -> +get_oauth_provider_id_for_resource_server_id(TopResourceServerId, + ResourceServerId) when ResourceServerId =/= TopResourceServerId -> case proplists:get_value(oauth_provider_id, get_resource_server_props(ResourceServerId)) of undefined -> case application:get_env(?APP, default_oauth_provider) of - undefined -> - oauth2_client:get_oauth_provider(RequiredAttributeList); - {ok, DefaultOauthProviderId} -> - oauth2_client:get_oauth_provider(DefaultOauthProviderId, - RequiredAttributeList) + undefined -> root; + {ok, DefaultOauthProviderId} -> DefaultOauthProviderId end; - OauthProviderId -> - oauth2_client:get_oauth_provider(OauthProviderId, RequiredAttributeList) + OauthProviderId -> OauthProviderId end. --spec get_key_config() -> list(). -get_key_config() -> application:get_env(?APP, key_config, []). - --spec get_key_config(binary()) -> list(). -get_key_config(ResourceServerId) -> - get_key_config(get_default_resource_server_id(), ResourceServerId). -get_key_config(TopResourceServerId, ResourceServerId) - when ResourceServerId =:= TopResourceServerId -> - get_key_config(); -get_key_config(TopResourceServerId, ResourceServerId) - when ResourceServerId =/= TopResourceServerId -> - proplists:get_value(key_config, get_resource_server_props(ResourceServerId), - get_key_config()). +-spec get_oauth_provider(oauth_provider_id(), list()) -> + {ok, oauth_provider()} | {error, any()}. +get_oauth_provider(OAuthProviderId, RequiredAttributeList) -> + oauth2_client:get_oauth_provider(OAuthProviderId, RequiredAttributeList). + +-spec get_algorithms() -> list() | undefined. +get_algorithms() -> + get_algorithms(root). + +-spec get_algorithms(oauth_provider_id()) -> list() | undefined. +get_algorithms(root) -> + proplists:get_value(algorithms, application:get_env(?APP, key_config, []), + undefined); +get_algorithms(OAuthProviderId) -> + OAuthProviders = application:get_env(?APP, oauth_providers, #{}), + case maps:get(OAuthProviderId, OAuthProviders, undefined) of + undefined -> undefined; + V -> proplists:get_value(algorithms, V, undefined) + end. get_resource_server_props(ResourceServerId) -> ResourceServers = application:get_env(?APP, resource_servers, #{}), maps:get(ResourceServerId, ResourceServers, []). -get_signing_key(KeyId, ResourceServerId) -> - get_signing_key(get_default_resource_server_id(), KeyId, ResourceServerId). - -get_signing_key(TopResourceServerId, KeyId, ResourceServerId) - when ResourceServerId =:= TopResourceServerId -> - maps:get(KeyId, get_signing_keys(), undefined); -get_signing_key(TopResourceServerId, KeyId, ResourceServerId) - when ResourceServerId =/= TopResourceServerId -> - maps:get(KeyId, get_signing_keys(ResourceServerId), undefined). +get_signing_key(KeyId) -> + maps:get(KeyId, get_signing_keys(root), undefined). +get_signing_key(KeyId, OAuthProviderId) -> + maps:get(KeyId, get_signing_keys(OAuthProviderId), undefined). append_or_return_default(ListOrBinary, Default) -> @@ -213,7 +290,7 @@ append_or_return_default(ListOrBinary, Default) -> -spec get_default_resource_server_id() -> binary() | {error, term()}. get_default_resource_server_id() -> case ?TOP_RESOURCE_SERVER_ID of - undefined -> {error, missing_token_audience_and_or_config_resource_server_id }; + undefined -> {error, missing_resource_server_id_in_config }; {ok, ResourceServerId} -> ResourceServerId end. @@ -241,13 +318,17 @@ find_audience_in_resource_server_ids(AudList) when is_list(AudList) -> [] -> {error, no_matching_aud_found} end. - -spec is_verify_aud() -> boolean(). is_verify_aud() -> application:get_env(?APP, verify_aud, true). -spec is_verify_aud(binary()) -> boolean(). is_verify_aud(ResourceServerId) -> - is_verify_aud(get_default_resource_server_id(), ResourceServerId). + case get_default_resource_server_id() of + {error, _} -> + is_verify_aud(undefined, ResourceServerId); + V -> + is_verify_aud(V, ResourceServerId) + end. is_verify_aud(TopResourceServerId, ResourceServerId) when ResourceServerId =:= TopResourceServerId -> is_verify_aud(); is_verify_aud(TopResourceServerId, ResourceServerId) @@ -261,10 +342,14 @@ get_additional_scopes_key() -> undefined -> {error, not_found}; ScopeKey -> {ok, ScopeKey} end. - -spec get_additional_scopes_key(binary()) -> {ok, binary()} | {error, not_found}. get_additional_scopes_key(ResourceServerId) -> - get_additional_scopes_key(get_default_resource_server_id(), ResourceServerId). + case get_default_resource_server_id() of + {error, _} -> + get_additional_scopes_key(undefined, ResourceServerId); + V -> + get_additional_scopes_key(V, ResourceServerId) + end. get_additional_scopes_key(TopResourceServerId, ResourceServerId) when ResourceServerId =:= TopResourceServerId -> get_additional_scopes_key(); get_additional_scopes_key(TopResourceServerId, ResourceServerId) @@ -279,13 +364,20 @@ get_additional_scopes_key(TopResourceServerId, ResourceServerId) -spec get_scope_prefix() -> binary(). get_scope_prefix() -> - DefaultScopePrefix = erlang:iolist_to_binary([ - get_default_resource_server_id(), <<".">>]), + DefaultScopePrefix = case get_default_resource_server_id() of + {error, _} -> <<"">>; + V -> erlang:iolist_to_binary([V, <<".">>]) + end, application:get_env(?APP, scope_prefix, DefaultScopePrefix). -spec get_scope_prefix(binary()) -> binary(). get_scope_prefix(ResourceServerId) -> - get_scope_prefix(get_default_resource_server_id(), ResourceServerId). + case get_default_resource_server_id() of + {error, _} -> + get_scope_prefix(undefined, ResourceServerId); + V -> + get_scope_prefix(V, ResourceServerId) + end. get_scope_prefix(TopResourceServerId, ResourceServerId) when ResourceServerId =:= TopResourceServerId -> get_scope_prefix(); get_scope_prefix(TopResourceServerId, ResourceServerId) @@ -306,7 +398,12 @@ get_resource_server_type() -> application:get_env(?APP, resource_server_type, << -spec get_resource_server_type(binary()) -> binary(). get_resource_server_type(ResourceServerId) -> - get_resource_server_type(get_default_resource_server_id(), ResourceServerId). + case get_default_resource_server_id() of + {error, _} -> + get_resource_server_type(undefined, ResourceServerId); + V -> + get_resource_server_type(V, ResourceServerId) + end. get_resource_server_type(TopResourceServerId, ResourceServerId) when ResourceServerId =:= TopResourceServerId -> get_resource_server_type(); get_resource_server_type(TopResourceServerId, ResourceServerId) @@ -318,7 +415,12 @@ get_resource_server_type(TopResourceServerId, ResourceServerId) -spec has_scope_aliases(binary()) -> boolean(). has_scope_aliases(ResourceServerId) -> - has_scope_aliases(get_default_resource_server_id(), ResourceServerId). + case get_default_resource_server_id() of + {error, _} -> + has_scope_aliases(undefined, ResourceServerId); + V -> + has_scope_aliases(V, ResourceServerId) + end. has_scope_aliases(TopResourceServerId, ResourceServerId) when ResourceServerId =:= TopResourceServerId -> case application:get_env(?APP, scope_aliases) of @@ -336,7 +438,12 @@ has_scope_aliases(TopResourceServerId, ResourceServerId) -spec get_scope_aliases(binary()) -> map(). get_scope_aliases(ResourceServerId) -> - get_scope_aliases(get_default_resource_server_id(), ResourceServerId). + case get_default_resource_server_id() of + {error, _} -> + get_scope_aliases(undefined, ResourceServerId); + V -> + get_scope_aliases(V, ResourceServerId) + end. get_scope_aliases(TopResourceServerId, ResourceServerId) when ResourceServerId =:= TopResourceServerId -> application:get_env(?APP, scope_aliases, #{}); @@ -357,15 +464,11 @@ lock() -> LockId = case global:set_lock({oauth2_config_lock, rabbitmq_auth_backend_oauth2}, Nodes, Retries) of true -> rabbitmq_auth_backend_oauth2; - false -> undefined + false -> {error, unable_to_claim_lock} end, LockId. unlock(LockId) -> Nodes = rabbit_nodes:list_running(), - case LockId of - undefined -> ok; - Value -> - global:del_lock({oauth2_config_lock, Value}, Nodes) - end, + global:del_lock({oauth2_config_lock, LockId}, Nodes), ok. diff --git a/deps/rabbitmq_auth_backend_oauth2/src/rabbit_oauth2_schema.erl b/deps/rabbitmq_auth_backend_oauth2/src/rabbit_oauth2_schema.erl new file mode 100644 index 000000000000..d79972509ba0 --- /dev/null +++ b/deps/rabbitmq_auth_backend_oauth2/src/rabbit_oauth2_schema.erl @@ -0,0 +1,157 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(rabbit_oauth2_schema). + + +-export([ + translate_oauth_providers/1, + translate_resource_servers/1, + translate_signing_keys/1 +]). + +extract_key_as_binary({Name,_}) -> list_to_binary(Name). +extract_value({_Name,V}) -> V. + +-spec translate_resource_servers([{list(), binary()}]) -> map(). +translate_resource_servers(Conf) -> + Settings = cuttlefish_variable:filter_by_prefix("auth_oauth2.resource_servers", Conf), + Map = merge_list_of_maps([ + extract_resource_server_properties(Settings), + extract_resource_server_preferred_username_claims(Settings) + ]), + Map0 = maps:map(fun(K,V) -> + case proplists:get_value(id, V) of + undefined -> V ++ [{id, K}]; + _ -> V + end end, Map), + ResourceServers = maps:values(Map0), + lists:foldl(fun(Elem,AccMap)-> maps:put(proplists:get_value(id, Elem), Elem, AccMap) end, #{}, + ResourceServers). + +-spec translate_oauth_providers([{list(), binary()}]) -> map(). +translate_oauth_providers(Conf) -> + Settings = cuttlefish_variable:filter_by_prefix("auth_oauth2.oauth_providers", Conf), + + merge_list_of_maps([ + extract_oauth_providers_properties(Settings), + extract_oauth_providers_algorithm(Settings), + extract_oauth_providers_https(Settings), + extract_oauth_providers_signing_keys(Settings)]). + +-spec translate_signing_keys([{list(), binary()}]) -> map(). +translate_signing_keys(Conf) -> + Settings = cuttlefish_variable:filter_by_prefix("auth_oauth2.signing_keys", Conf), + ListOfKidPath = lists:map(fun({Id, Path}) -> {list_to_binary(lists:last(Id)), Path} end, Settings), + translate_list_of_signing_keys(ListOfKidPath). + +-spec translate_list_of_signing_keys([{list(), list()}]) -> map(). +translate_list_of_signing_keys(ListOfKidPath) -> + TryReadingFileFun = + fun(Path) -> + case file:read_file(Path) of + {ok, Bin} -> + string:trim(Bin, trailing, "\n"); + _Error -> + %% this throws and makes Cuttlefish treak the key as invalid + cuttlefish:invalid("file does not exist or cannot be read by the node") + end + end, + maps:map(fun(_K, Path) -> {pem, TryReadingFileFun(Path)} end, maps:from_list(ListOfKidPath)). + +validator_file_exists(Attr, Filename) -> + case file:read_file(Filename) of + {ok, _} -> + Filename; + _Error -> + %% this throws and makes Cuttlefish treak the key as invalid + cuttlefish:invalid(io_lib:format( + "Invalid attribute (~p) value: file ~p does not exist or cannot be read by the node", [Attr, Filename])) + end. +validator_https_uri(Attr, Uri) when is_binary(Uri) -> + list_to_binary(validator_https_uri(Attr, binary_to_list(Uri))); + +validator_https_uri(Attr, Uri) -> + case string:nth_lexeme(Uri, 1, "://") == "https" of + true -> Uri; + false -> + cuttlefish:invalid(io_lib:format( + "Invalid attribute (~p) value: uri ~p must be a valid https uri", [Attr, Uri])) + end. + +merge_list_of_maps(ListOfMaps) -> + lists:foldl(fun(Elem, AccIn) -> maps:merge_with(fun(_K,V1,V2) -> V1 ++ V2 end, + Elem, AccIn) end, #{}, ListOfMaps). + +extract_oauth_providers_properties(Settings) -> + KeyFun = fun extract_key_as_binary/1, + ValueFun = fun extract_value/1, + + OAuthProviders = [{Name, mapOauthProviderProperty({list_to_atom(Key), list_to_binary(V)})} + || {["auth_oauth2","oauth_providers", Name, Key], V} <- Settings ], + maps:groups_from_list(KeyFun, ValueFun, OAuthProviders). + +extract_resource_server_properties(Settings) -> + KeyFun = fun extract_key_as_binary/1, + ValueFun = fun extract_value/1, + + OAuthProviders = [{Name, {list_to_atom(Key), list_to_binary(V)}} + || {["auth_oauth2","resource_servers", Name, Key], V} <- Settings ], + maps:groups_from_list(KeyFun, ValueFun, OAuthProviders). + +mapOauthProviderProperty({Key, Value}) -> + {Key, case Key of + issuer -> validator_https_uri(Key, Value); + token_endpoint -> validator_https_uri(Key, Value); + jwks_uri -> validator_https_uri(Key, Value); + end_session_endpoint -> validator_https_uri(Key, Value); + authorization_endpoint -> validator_https_uri(Key, Value); + _ -> Value + end}. + +extract_oauth_providers_https(Settings) -> + ExtractProviderNameFun = fun extract_key_as_binary/1, + + AttributesPerProvider = [{Name, mapHttpProperty({list_to_atom(Key), V})} || + {["auth_oauth2","oauth_providers", Name, "https", Key], V} <- Settings ], + + maps:map(fun(_K,V)-> [{https, V}] end, + maps:groups_from_list(ExtractProviderNameFun, fun({_, V}) -> V end, AttributesPerProvider)). + +mapHttpProperty({Key, Value}) -> + {Key, case Key of + cacertfile -> validator_file_exists(Key, Value); + _ -> Value + end}. + +extract_oauth_providers_algorithm(Settings) -> + KeyFun = fun extract_key_as_binary/1, + + IndexedAlgorithms = [{Name, {Index, list_to_binary(V)}} || + {["auth_oauth2","oauth_providers", Name, "algorithms", Index], V} <- Settings ], + SortedAlgorithms = lists:sort(fun({_,{AI,_}},{_,{BI,_}}) -> AI < BI end, IndexedAlgorithms), + Algorithms = [{Name, V} || {Name, {_I, V}} <- SortedAlgorithms], + maps:map(fun(_K,V)-> [{algorithms, V}] end, + maps:groups_from_list(KeyFun, fun({_, V}) -> V end, Algorithms)). + +extract_resource_server_preferred_username_claims(Settings) -> + KeyFun = fun extract_key_as_binary/1, + + IndexedClaims = [{Name, {Index, list_to_binary(V)}} || + {["auth_oauth2","resource_servers", Name, "preferred_username_claims", Index], V} <- Settings ], + SortedClaims = lists:sort(fun({_,{AI,_}},{_,{BI,_}}) -> AI < BI end, IndexedClaims), + Claims = [{Name, V} || {Name, {_I, V}} <- SortedClaims], + maps:map(fun(_K,V)-> [{preferred_username_claims, V}] end, + maps:groups_from_list(KeyFun, fun({_, V}) -> V end, Claims)). + +extract_oauth_providers_signing_keys(Settings) -> + KeyFun = fun extract_key_as_binary/1, + + IndexedSigningKeys = [{Name, {list_to_binary(Kid), list_to_binary(V)}} || + {["auth_oauth2","oauth_providers", Name, "signing_keys", Kid], V} <- Settings ], + maps:map(fun(_K,V)-> [{signing_keys, translate_list_of_signing_keys(V)}] end, + maps:groups_from_list(KeyFun, fun({_, V}) -> V end, IndexedSigningKeys)). diff --git a/deps/rabbitmq_auth_backend_oauth2/src/uaa_jwt.erl b/deps/rabbitmq_auth_backend_oauth2/src/uaa_jwt.erl index d78b7b4c9c1c..eafaa2122c74 100644 --- a/deps/rabbitmq_auth_backend_oauth2/src/uaa_jwt.erl +++ b/deps/rabbitmq_auth_backend_oauth2/src/uaa_jwt.erl @@ -29,42 +29,46 @@ add_signing_key(KeyId, Type, Value) -> Err end. --spec update_jwks_signing_keys(term()) -> ok | {error, term()}. -update_jwks_signing_keys(ResourceServerId) -> - case rabbit_oauth2_config:get_oauth_provider_for_resource_server_id(ResourceServerId, [jwks_uri]) of - {error, _} = Error -> - rabbit_log:error("Failed to obtain a JWKS URL for resource_server_id '~tp'", [ResourceServerId]), - Error; - {ok, #oauth_provider{jwks_uri = JwksUrl, ssl_options = SslOptions}} -> - rabbit_log:debug("OAuth 2 JWT: downloading keys from ~tp (TLS options: ~p)", [JwksUrl, SslOptions]), - case uaa_jwks:get(JwksUrl, SslOptions) of - {ok, {_, _, JwksBody}} -> - KeyList = maps:get(<<"keys">>, jose:decode(erlang:iolist_to_binary(JwksBody)), []), - Keys = maps:from_list(lists:map(fun(Key) -> {maps:get(<<"kid">>, Key, undefined), {json, Key}} end, KeyList)), - rabbit_log:debug("OAuth 2 JWT: downloaded keys ~tp", [Keys]), - case rabbit_oauth2_config:replace_signing_keys(ResourceServerId, Keys) of - {error, _} = Err -> Err; - _ -> ok - end; - {error, _} = Err -> - rabbit_log:error("OAuth 2 JWT: failed to download keys: ~tp", [Err]), - Err - end +-spec update_jwks_signing_keys(oauth_provider()) -> ok | {error, term()}. +update_jwks_signing_keys(#oauth_provider{id = Id, jwks_uri = JwksUrl, + ssl_options = SslOptions}) -> + rabbit_log:debug("OAuth 2 JWT: downloading keys from ~tp (TLS options: ~p)", + [JwksUrl, SslOptions]), + case uaa_jwks:get(JwksUrl, SslOptions) of + {ok, {_, _, JwksBody}} -> + KeyList = maps:get(<<"keys">>, + jose:decode(erlang:iolist_to_binary(JwksBody)), []), + Keys = maps:from_list(lists:map(fun(Key) -> + {maps:get(<<"kid">>, Key, undefined), {json, Key}} end, KeyList)), + rabbit_log:debug("OAuth 2 JWT: downloaded keys ~tp", [Keys]), + case rabbit_oauth2_config:replace_signing_keys(Keys, Id) of + {error, _} = Err -> Err; + _ -> ok + end; + {error, _} = Err -> + rabbit_log:error("OAuth 2 JWT: failed to download keys: ~tp", [Err]), + Err end. -spec decode_and_verify(binary()) -> {boolean(), binary(), map()} | {error, term()}. decode_and_verify(Token) -> - case uaa_jwt_jwt:resolve_resource_server_id(Token) of + case resolve_resource_server_id(Token) of {error, _} = Err -> Err; ResourceServerId -> - rabbit_log:debug("OAuth 2 JWT: resolved resource_server_id: '~tp'", [ResourceServerId]), - case uaa_jwt_jwt:get_key_id(ResourceServerId, Token) of + OAuthProviderId = + rabbit_oauth2_config:get_oauth_provider_id_for_resource_server_id(ResourceServerId), + rabbit_log:debug("OAuth 2 JWT: resolved resource_server_id: ~p oauth_provider_id: ~p", + [ResourceServerId, OAuthProviderId]), + case uaa_jwt_jwt:get_key_id(rabbit_oauth2_config:get_default_key(OAuthProviderId), Token) of {ok, KeyId} -> rabbit_log:debug("OAuth 2 JWT: signing_key_id : '~tp'", [KeyId]), - case get_jwk(KeyId, ResourceServerId) of + case get_jwk(KeyId, OAuthProviderId) of {ok, JWK} -> - case uaa_jwt_jwt:decode_and_verify(ResourceServerId, JWK, Token) of + case uaa_jwt_jwt:decode_and_verify( + OAuthProviderId, + JWK, + Token) of {true, Payload} -> {true, ResourceServerId, Payload}; {false, Payload} -> {false, ResourceServerId, Payload} end; @@ -75,23 +79,37 @@ decode_and_verify(Token) -> end end. --spec get_jwk(binary(), binary()) -> {ok, map()} | {error, term()}. -get_jwk(KeyId, ResourceServerId) -> - get_jwk(KeyId, ResourceServerId, true). +resolve_resource_server_id(Token) -> + case uaa_jwt_jwt:get_aud(Token) of + {error, _} = Error -> + Error; + {ok, Audience} -> + rabbit_oauth2_config:get_resource_server_id_for_audience(Audience) + end. + +-spec get_jwk(binary(), oauth_provider_id()) -> {ok, map()} | {error, term()}. +get_jwk(KeyId, OAuthProviderId) -> + get_jwk(KeyId, OAuthProviderId, true). -get_jwk(KeyId, ResourceServerId, AllowUpdateJwks) -> - case rabbit_oauth2_config:get_signing_key(KeyId, ResourceServerId) of +get_jwk(KeyId, OAuthProviderId, AllowUpdateJwks) -> + case rabbit_oauth2_config:get_signing_key(KeyId, OAuthProviderId) of undefined -> if AllowUpdateJwks -> rabbit_log:debug("OAuth 2 JWT: signing key '~tp' not found. Downloading it... ", [KeyId]), - case update_jwks_signing_keys(ResourceServerId) of - ok -> - get_jwk(KeyId, ResourceServerId, false); - {error, no_jwks_url} -> - {error, key_not_found}; - {error, _} = Err -> - Err + case rabbit_oauth2_config:get_oauth_provider(OAuthProviderId, [jwks_uri]) of + {ok, OAuthProvider} -> + case update_jwks_signing_keys(OAuthProvider) of + ok -> + get_jwk(KeyId, OAuthProviderId, false); + {error, no_jwks_url} -> + {error, key_not_found}; + {error, _} = Err -> + Err + end; + {error, _} = Error -> + rabbit_log:debug("OAuth 2 JWT: unable to download keys due to ~p", [Error]), + Error end; true -> rabbit_log:debug("OAuth 2 JWT: signing key '~tp' not found. Downloading is not allowed", [KeyId]), diff --git a/deps/rabbitmq_auth_backend_oauth2/src/uaa_jwt_jwt.erl b/deps/rabbitmq_auth_backend_oauth2/src/uaa_jwt_jwt.erl index 962a3b55daba..7d8c37457028 100644 --- a/deps/rabbitmq_auth_backend_oauth2/src/uaa_jwt_jwt.erl +++ b/deps/rabbitmq_auth_backend_oauth2/src/uaa_jwt_jwt.erl @@ -6,23 +6,15 @@ %% -module(uaa_jwt_jwt). --export([decode/1, decode_and_verify/3, get_key_id/2, get_aud/1, resolve_resource_server_id/1]). +-export([decode_and_verify/3, get_key_id/2, get_aud/1]). -include_lib("jose/include/jose_jwt.hrl"). -include_lib("jose/include/jose_jws.hrl"). -decode(Token) -> - try - #jose_jwt{fields = Fields} = jose_jwt:peek_payload(Token), - Fields - catch Type:Err:Stacktrace -> - {error, {invalid_token, Type, Err, Stacktrace}} - end. -decode_and_verify(ResourceServerId, Jwk, Token) -> - KeyConfig = rabbit_oauth2_config:get_key_config(ResourceServerId), +decode_and_verify(OauthProviderId, Jwk, Token) -> Verify = - case proplists:get_value(algorithms, KeyConfig) of + case rabbit_oauth2_config:get_algorithms(OauthProviderId) of undefined -> jose_jwt:verify(Jwk, Token); Algs -> jose_jwt:verify_strict(Jwk, Algs, Token) end, @@ -32,31 +24,11 @@ decode_and_verify(ResourceServerId, Jwk, Token) -> end. -resolve_resource_server_id(Token) -> - case get_aud(Token) of - {error, _} = Error -> Error; - undefined -> - case rabbit_oauth2_config:is_verify_aud() of - true -> {error, no_matching_aud_found}; - false -> rabbit_oauth2_config:get_default_resource_server_id() - end; - {ok, Audience} -> - case rabbit_oauth2_config:find_audience_in_resource_server_ids(Audience) of - {ok, ResourceServerId} -> ResourceServerId; - {error, only_one_resource_server_as_audience_found_many} = Error -> Error; - {error, no_matching_aud_found} -> - case rabbit_oauth2_config:is_verify_aud() of - true -> {error, no_matching_aud_found}; - false -> rabbit_oauth2_config:get_default_resource_server_id() - end - end - end. - -get_key_id(ResourceServerId, Token) -> +get_key_id(DefaultKey, Token) -> try case jose_jwt:peek_protected(Token) of #jose_jws{fields = #{<<"kid">> := Kid}} -> {ok, Kid}; - #jose_jws{} -> get_default_key(ResourceServerId) + #jose_jws{} -> DefaultKey end catch Type:Err:Stacktrace -> {error, {invalid_token, Type, Err, Stacktrace}} @@ -66,16 +38,8 @@ get_aud(Token) -> try case jose_jwt:peek_payload(Token) of #jose_jwt{fields = #{<<"aud">> := Aud}} -> {ok, Aud}; - #jose_jwt{} -> undefined + #jose_jwt{} -> {ok, none} end catch Type:Err:Stacktrace -> {error, {invalid_token, Type, Err, Stacktrace}} end. - - -get_default_key(ResourceServerId) -> - KeyConfig = rabbit_oauth2_config:get_key_config(ResourceServerId), - case proplists:get_value(default_key, KeyConfig, undefined) of - undefined -> {error, no_key}; - Val -> {ok, Val} - end. diff --git a/deps/rabbitmq_auth_backend_oauth2/test/config_schema_SUITE_data/rabbitmq_auth_backend_oauth2.snippets b/deps/rabbitmq_auth_backend_oauth2/test/config_schema_SUITE_data/rabbitmq_auth_backend_oauth2.snippets index 3d93e06d4d42..a76c0cdf1a23 100644 --- a/deps/rabbitmq_auth_backend_oauth2/test/config_schema_SUITE_data/rabbitmq_auth_backend_oauth2.snippets +++ b/deps/rabbitmq_auth_backend_oauth2/test/config_schema_SUITE_data/rabbitmq_auth_backend_oauth2.snippets @@ -1,5 +1,5 @@ [ - {oauth2_pem_config2, + {root_resource_server, "auth_oauth2.resource_server_id = new_resource_server_id auth_oauth2.scope_prefix = new_resource_server_id. auth_oauth2.resource_server_type = new_resource_server_type @@ -51,7 +51,7 @@ ]} ],[] }, - {oauth2_pem_config3, + {multiple_resource_servers, "auth_oauth2.resource_server_id = new_resource_server_id auth_oauth2.scope_prefix = new_resource_server_id. auth_oauth2.resource_server_type = new_resource_server_type @@ -87,12 +87,12 @@ {resource_servers, #{ <<"rabbitmq-operations">> => [ - {id, <<"rabbitmq-operations">>}, - {scope_prefix, <<"api://">>} + {scope_prefix, <<"api://">>}, + {id, <<"rabbitmq-operations">>} ], <<"rabbitmq-customers">> => [ - {id, <<"rabbitmq-customers">>}, - {additional_scopes_key, <<"roles">>} + {additional_scopes_key, <<"roles">>}, + {id, <<"rabbitmq-customers">>} ] } }, @@ -117,7 +117,7 @@ ]} ],[] }, - {oauth2_pem_config4, + {multiple_oauth_providers, "auth_oauth2.resource_server_id = new_resource_server_id auth_oauth2.scope_prefix = new_resource_server_id. auth_oauth2.resource_server_type = new_resource_server_type @@ -131,8 +131,13 @@ auth_oauth2.oauth_providers.keycloak.jwks_uri = https://keycloak/keys auth_oauth2.oauth_providers.keycloak.authorization_endpoint = https://keycloak/authorize auth_oauth2.oauth_providers.keycloak.end_session_endpoint = https://keycloak/logout - auth_oauth2.oauth_providers.keycloak.https.cacertfile = /mnt/certs/ca_certificate.pem - auth_oauth2.oauth_providers.keycloak.https.verify = verify_none", + auth_oauth2.oauth_providers.keycloak.https.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem + auth_oauth2.oauth_providers.keycloak.https.verify = verify_none + auth_oauth2.oauth_providers.keycloak.https.depth = 2 + auth_oauth2.oauth_providers.keycloak.default_key = token-key + auth_oauth2.oauth_providers.keycloak.signing_keys.id1 = test/config_schema_SUITE_data/certs/key.pem + auth_oauth2.oauth_providers.keycloak.algorithms.1 = HS256 + auth_oauth2.oauth_providers.keycloak.algorithms.2 = RS256", [ {rabbitmq_auth_backend_oauth2, [ {resource_server_id,<<"new_resource_server_id">>}, @@ -143,24 +148,41 @@ {verify_aud, true}, {oauth_providers, #{ - <<"uaa">> => [ - {issuer, <<"https://uaa">>} - ], <<"keycloak">> => [ + {signing_keys, + #{ + <<"id1">> => {pem, <<"I'm not a certificate">>} + } + }, {https, [ + {depth, 2}, {verify, verify_none}, - {cacertfile, "/mnt/certs/ca_certificate.pem"} + {cacertfile, "test/config_schema_SUITE_data/certs/cacert.pem"} ]}, + {algorithms, [<<"HS256">>, <<"RS256">>]}, + {default_key, <<"token-key">>}, {end_session_endpoint, <<"https://keycloak/logout">>}, {authorization_endpoint, <<"https://keycloak/authorize">>}, - {token_endpoint, <<"https://keycloak/token">>}, - {jwks_uri, <<"https://keycloak/keys">>} - ] + {jwks_uri, <<"https://keycloak/keys">>}, + {token_endpoint, <<"https://keycloak/token">>} + ], + <<"uaa">> => [ + {issuer, <<"https://uaa">>} + ] } } ]} ],[] + }, + {empty_scope_prefix, + "auth_oauth2.resource_server_id = new_resource_server_id + auth_oauth2.scope_prefix = '' ", + [ + {rabbitmq_auth_backend_oauth2, [ + {resource_server_id,<<"new_resource_server_id">>}, + {scope_prefix,<<>>} + ]} + ],[] } - ]. diff --git a/deps/rabbitmq_auth_backend_oauth2/test/jwks_SUITE.erl b/deps/rabbitmq_auth_backend_oauth2/test/jwks_SUITE.erl index ec72a0f46abf..db4de4d8a677 100644 --- a/deps/rabbitmq_auth_backend_oauth2/test/jwks_SUITE.erl +++ b/deps/rabbitmq_auth_backend_oauth2/test/jwks_SUITE.erl @@ -23,39 +23,114 @@ all() -> {group, happy_path}, {group, unhappy_path}, {group, no_peer_verification}, - {group, multi_resource} + {group, verify_signing_keys} ]. groups() -> - [ - {happy_path, [], [ - test_successful_connection_with_a_full_permission_token_and_all_defaults, - test_successful_connection_with_a_full_permission_token_and_explicitly_configured_vhost, - test_successful_connection_with_simple_strings_for_aud_and_scope, - test_successful_connection_with_complex_claim_as_a_map, - test_successful_connection_with_complex_claim_as_a_list, - test_successful_connection_with_complex_claim_as_a_binary, - test_successful_connection_with_keycloak_token, - test_successful_connection_with_algorithm_restriction, - test_successful_token_refresh - ]}, + [{happy_path, [], [ + test_successful_connection_with_a_full_permission_token_and_all_defaults, + test_successful_connection_with_a_full_permission_token_and_explicitly_configured_vhost, + test_successful_connection_with_simple_strings_for_aud_and_scope, + test_successful_connection_with_complex_claim_as_a_map, + test_successful_connection_with_complex_claim_as_a_list, + test_successful_connection_with_complex_claim_as_a_binary, + test_successful_connection_with_keycloak_token, + test_successful_connection_with_algorithm_restriction, + test_successful_token_refresh + ]}, {unhappy_path, [], [ - test_failed_connection_with_expired_token, - test_failed_connection_with_a_non_token, - test_failed_connection_with_a_token_with_insufficient_vhost_permission, - test_failed_connection_with_a_token_with_insufficient_resource_permission, - test_failed_connection_with_algorithm_restriction, - test_failed_token_refresh_case1, - test_failed_token_refresh_case2 - ]}, - {no_peer_verification, [], [ + test_failed_connection_with_expired_token, + test_failed_connection_with_a_non_token, + test_failed_connection_with_a_token_with_insufficient_vhost_permission, + test_failed_connection_with_a_token_with_insufficient_resource_permission, + test_failed_connection_with_algorithm_restriction, + test_failed_token_refresh_case1, + test_failed_token_refresh_case2 + ]}, + {no_peer_verification, [], [ {group, happy_path}, {group, unhappy_path} - ]}, - {multi_resource, [], [ - test_m_successful_connection, - test_m_failed_connection_due_to_missing_key - ]} + ]}, + {verify_signing_keys, [], [ + {with_oauth_providers_A_B_and_C, [], [ + {with_default_oauth_provider_B, [], [ + {with_oauth_provider_A_with_jwks_with_one_signing_key, [], [ + {with_resource_servers_rabbitmq1_with_oauth_provider_A, [], [ + test_successful_connection_for_rabbitmq1_audience_signed_by_provider_A, + {without_kid, [], [ + test_unsuccessful_connection_for_rabbitmq1_signed_by_provider_A, + {with_oauth_providers_A_with_default_key, [], [ + test_successful_connection_for_rabbitmq1_audience_signed_by_provider_A + ]} + ]} + ]} + ]}, + {with_oauth_provider_B_with_one_static_key_and_jwks_with_two_signing_keys, [], [ + {with_resource_servers_rabbitmq2, [], [ + test_successful_connection_for_rabbitmq2_audience_signed_by_provider_B_with_static_key, + test_successful_connection_for_rabbitmq2_audience_signed_by_provider_B_with_jwks_key_1, + test_successful_connection_for_rabbitmq2_audience_signed_by_provider_B_with_jwks_key_2, + {without_kid, [], [ + test_unsuccessful_connection_for_rabbitmq2_signed_by_provider_B_with_static_key, + {with_oauth_providers_B_with_default_key_static_key, [], [ + test_successful_connection_for_rabbitmq2_audience_signed_by_provider_B_with_static_key + ]} + ]} + ]}, + {with_oauth_provider_C_with_two_static_keys, [], [ + {with_resource_servers_rabbitmq3_with_oauth_provider_C, [], [ + test_successful_connection_for_rabbitmq3_audience_signed_by_provider_C_with_static_key_1, + test_successful_connection_for_rabbitmq3_audience_signed_by_provider_C_with_static_key_2, + {without_kid, [], [ + test_unsuccessful_connection_for_rabbitmq3_audience_signed_by_provider_C_with_static_key_1, + {with_oauth_providers_C_with_default_key_static_key_1, [], [ + test_successful_connection_for_rabbitmq3_audience_signed_by_provider_C_with_static_key_1 + ]} + ]} + ]} + ]} + ]} + ]} + + ]}, + {with_root_oauth_provider_with_two_static_keys_and_one_jwks_key, [], [ + {with_resource_server_rabbitmq, [], [ + test_successful_connection_for_rabbitmq_audience_signed_by_root_oauth_provider_with_static_key_1, + test_successful_connection_for_rabbitmq_audience_signed_by_root_oauth_provider_with_static_key_2, + test_successful_connection_for_rabbitmq_audience_signed_by_root_oauth_provider_with_jwks_key, + {without_kid, [], [ + test_unsuccessful_connection_for_rabbitmq_audience_signed_by_root_oauth_provider_with_static_key_1, + {with_root_oauth_provider_with_default_key_1, [], [ + test_successful_connection_for_rabbitmq_audience_signed_by_root_oauth_provider_with_static_key_1 + ]} + ]}, + {with_resource_servers_rabbitmq2, [], [ + test_successful_connection_for_rabbitmq2_audience_signed_by_root_oauth_provider_with_jwks_key, + {without_kid, [], [ + test_unsuccessful_connection_for_rabbitmq2_audience_signed_by_root_oauth_provider_with_jwks_key, + {with_root_oauth_provider_with_default_jwks_key, [], [ + test_successful_connection_for_rabbitmq2_audience_signed_by_root_oauth_provider_with_jwks_key + ]} + ]}, + {with_oauth_providers_A_B_and_C, [], [ + {with_oauth_provider_A_with_jwks_with_one_signing_key, [], [ + {with_resource_servers_rabbitmq1_with_oauth_provider_A, [], [ + test_successful_connection_for_rabbitmq1_audience_signed_by_provider_A, + test_successful_connection_for_rabbitmq2_audience_signed_by_root_oauth_provider_with_jwks_key, + test_successful_connection_for_rabbitmq_audience_signed_by_root_oauth_provider_with_static_key_1, + {without_kid, [], [ + test_unsuccessful_connection_for_rabbitmq1_signed_by_provider_A, + {with_oauth_providers_A_with_default_key, [], [ + test_successful_connection_for_rabbitmq1_audience_signed_by_provider_A + ]} + ]} + ]} + ]} + ]} + ]} + ]} + ]} + ]} ]. %% @@ -74,6 +149,7 @@ init_per_suite(Config) -> fun preconfigure_node/1, fun start_jwks_server/1, fun preconfigure_token/1 + %We fun add_vhosts/1 ]). end_per_suite(Config) -> @@ -83,60 +159,213 @@ end_per_suite(Config) -> ] ++ rabbit_ct_broker_helpers:teardown_steps()). init_per_group(no_peer_verification, Config) -> - add_vhosts(Config), KeyConfig = rabbit_ct_helpers:set_config(?config(key_config, Config), [{jwks_url, ?config(non_strict_jwks_url, Config)}, {peer_verification, verify_none}]), ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, [rabbitmq_auth_backend_oauth2, key_config, KeyConfig]), rabbit_ct_helpers:set_config(Config, {key_config, KeyConfig}); -init_per_group(multi_resource, Config) -> - add_vhosts(Config), - ResourceServersConfig = - #{ - <<"rabbitmq1">> => [ - {id, <<"rabbitmq1">>}, - {oauth_provider_id, <<"one">>} +init_per_group(without_kid, Config) -> + rabbit_ct_helpers:set_config(Config, [{include_kid, false}]); + +init_per_group(with_resource_servers_rabbitmq1_with_oauth_provider_A, Config) -> + ResourceServersConfig0 = rabbit_ct_broker_helpers:rpc(Config, 0, application, get_env, + [rabbitmq_auth_backend_oauth2, resource_servers, #{}]), + Resource0 = maps:get(<<"rabbitmq1">>, ResourceServersConfig0, [{id, <<"rabbitmq1">>}]), + ResourceServersConfig1 = maps:put(<<"rabbitmq1">>, [{oauth_provider_id, <<"A">>} | Resource0], ResourceServersConfig0), + + ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, + [rabbitmq_auth_backend_oauth2, resource_servers, ResourceServersConfig1]); + +init_per_group(with_oauth_providers_A_B_and_C, Config) -> + OAuthProviders = #{ + <<"A">> => [ + {id, <<"A">>}, + {https, [{verify, verify_none}]} ], - <<"rabbitmq2">> => [ - {id, <<"rabbitmq2">>}, - {oauth_provider_id, <<"two">>} - ] - }, - OAuthProviders = - #{ - <<"one">> => [ - {issuer, strict_jwks_url(Config, "/")}, - {jwks_uri, strict_jwks_url(Config, "/jwks1")}, - {https, [{verify, verify_none}]} + <<"B">> => [ + {id, <<"B">>}, + {https, [{verify, verify_none}]} ], - <<"two">> => [ - {issuer, strict_jwks_url(Config, "/")}, - {jwks_uri, strict_jwks_url(Config, "/jwks2")}, - {https, [{verify, verify_none}]} + <<"C">> => [ + {id, <<"C">>}, + {https, [{verify, verify_none}]} ] - }, - ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, [rabbitmq_auth_backend_oauth2, resource_servers, ResourceServersConfig]), - ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, [rabbitmq_auth_backend_oauth2, oauth_providers, OAuthProviders]), + }, + ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, + [rabbitmq_auth_backend_oauth2, oauth_providers, OAuthProviders]), + Config; + +init_per_group(with_default_oauth_provider_B, Config) -> + ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, + [rabbitmq_auth_backend_oauth2, default_oauth_provider, <<"B">>]); +init_per_group(with_oauth_providers_A_with_default_key, Config) -> + {ok, OAuthProviders0} = rabbit_ct_broker_helpers:rpc(Config, 0, application, get_env, + [rabbitmq_auth_backend_oauth2, oauth_providers]), + OAuthProvider = maps:get(<<"A">>, OAuthProviders0, []), + OAuthProviders1 = maps:put(<<"A">>, [ + {default_key, ?UTIL_MOD:token_key(?config(fixture_jwksA, Config))} | OAuthProvider], + OAuthProviders0), + + ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, + [rabbitmq_auth_backend_oauth2, oauth_providers, OAuthProviders1]), + Config; + +init_per_group(with_oauth_provider_A_with_jwks_with_one_signing_key, Config) -> + {ok, OAuthProviders0} = rabbit_ct_broker_helpers:rpc(Config, 0, application, get_env, + [rabbitmq_auth_backend_oauth2, oauth_providers]), + OAuthProvider = maps:get(<<"A">>, OAuthProviders0, []), + OAuthProviders1 = maps:put(<<"A">>, [{jwks_uri, strict_jwks_url(Config, "/jwksA")} | OAuthProvider], + OAuthProviders0), + + ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, + [rabbitmq_auth_backend_oauth2, oauth_providers, OAuthProviders1]), + Config; +init_per_group(with_resource_servers_rabbitmq2, Config) -> + ResourceServersConfig0 = rabbit_ct_broker_helpers:rpc(Config, 0, application, get_env, + [rabbitmq_auth_backend_oauth2, resource_servers, #{}]), + Resource0 = maps:get(<<"rabbitmq2">>, ResourceServersConfig0, [{id, <<"rabbitmq2">>}]), + ResourceServersConfig1 = maps:put(<<"rabbitmq2">>, Resource0, ResourceServersConfig0), + + ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, + [rabbitmq_auth_backend_oauth2, resource_servers, ResourceServersConfig1]); +init_per_group(with_oauth_providers_B_with_default_key_static_key, Config) -> + {ok, OAuthProviders0} = rabbit_ct_broker_helpers:rpc(Config, 0, application, get_env, + [rabbitmq_auth_backend_oauth2, oauth_providers]), + OAuthProvider = maps:get(<<"B">>, OAuthProviders0, []), + OAuthProviders1 = maps:put(<<"B">>, [ + {default_key, ?UTIL_MOD:token_key(?config(fixture_staticB, Config))} | + proplists:delete(default_key, OAuthProvider)], + OAuthProviders0), + + ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, + [rabbitmq_auth_backend_oauth2, oauth_providers, OAuthProviders1]), + Config; +init_per_group(with_oauth_provider_C_with_two_static_keys, Config) -> + {ok, OAuthProviders0} = rabbit_ct_broker_helpers:rpc(Config, 0, application, get_env, + [rabbitmq_auth_backend_oauth2, oauth_providers]), + OAuthProvider = maps:get(<<"C">>, OAuthProviders0, []), + Jwks1 = ?config(fixture_staticC_1, Config), + Jwks2 = ?config(fixture_staticC_2, Config), + SigningKeys = #{ + ?UTIL_MOD:token_key(Jwks1) => {json, Jwks1}, + ?UTIL_MOD:token_key(Jwks2) => {json, Jwks2} + }, + OAuthProviders1 = maps:put(<<"C">>, [{signing_keys, SigningKeys} | OAuthProvider], + OAuthProviders0), + + ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, + [rabbitmq_auth_backend_oauth2, oauth_providers, OAuthProviders1]), + Config; + +init_per_group(with_root_oauth_provider_with_two_static_keys_and_one_jwks_key, Config) -> + KeyConfig = rabbit_ct_broker_helpers:rpc(Config, 0, application, get_env, + [rabbitmq_auth_backend_oauth2, key_config, []]), + Jwks1 = ?config(fixture_static_1, Config), + Jwks2 = ?config(fixture_static_2, Config), + SigningKeys = #{ + ?UTIL_MOD:token_key(Jwks1) => {json, Jwks1}, + ?UTIL_MOD:token_key(Jwks2) => {json, Jwks2} + }, + KeyConfig1 = [{signing_keys, SigningKeys}, + {jwks_url, strict_jwks_url(Config, "/jwks")}| KeyConfig], + ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, + [rabbitmq_auth_backend_oauth2, key_config, KeyConfig1]), + + Config; +init_per_group(with_root_oauth_provider_with_default_key_1, Config) -> + KeyConfig = rabbit_ct_broker_helpers:rpc(Config, 0, application, get_env, + [rabbitmq_auth_backend_oauth2, key_config, []]), + KeyConfig1 = [{default_key, ?UTIL_MOD:token_key(?config(fixture_static_1, Config))} | KeyConfig], + ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, + [rabbitmq_auth_backend_oauth2, key_config, KeyConfig1]), + Config; +init_per_group(with_root_oauth_provider_with_default_jwks_key, Config) -> + KeyConfig = rabbit_ct_broker_helpers:rpc(Config, 0, application, get_env, + [rabbitmq_auth_backend_oauth2, key_config, []]), + KeyConfig1 = [{default_key, ?UTIL_MOD:token_key(?config(fixture_jwk, Config))} | KeyConfig], + ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, + [rabbitmq_auth_backend_oauth2, key_config, KeyConfig1]), + Config; + +init_per_group(with_oauth_provider_B_with_one_static_key_and_jwks_with_two_signing_keys, Config) -> + {ok, OAuthProviders0} = rabbit_ct_broker_helpers:rpc(Config, 0, application, get_env, + [rabbitmq_auth_backend_oauth2, oauth_providers]), + OAuthProvider = maps:get(<<"B">>, OAuthProviders0, []), + Jwks = ?config(fixture_staticB, Config), + SigningKeys = #{ + ?UTIL_MOD:token_key(Jwks) => {json, Jwks} + }, + OAuthProviders1 = maps:put(<<"B">>, [ + {signing_keys, SigningKeys}, + {jwks_uri, strict_jwks_url(Config, "/jwksB")} | OAuthProvider], + OAuthProviders0), + + ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, + [rabbitmq_auth_backend_oauth2, oauth_providers, OAuthProviders1]), + Config; + +init_per_group(with_resource_servers_rabbitmq3_with_oauth_provider_C, Config) -> + ResourceServersConfig0 = rabbit_ct_broker_helpers:rpc(Config, 0, application, get_env, + [rabbitmq_auth_backend_oauth2, resource_servers, #{}]), + Resource0 = maps:get(<<"rabbitmq3">>, ResourceServersConfig0, [ + {id, <<"rabbitmq3">>},{oauth_provider_id, <<"C">>}]), + ResourceServersConfig1 = maps:put(<<"rabbitmq3">>, Resource0, ResourceServersConfig0), + + ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, + [rabbitmq_auth_backend_oauth2, resource_servers, ResourceServersConfig1]); + +init_per_group(with_oauth_providers_C_with_default_key_static_key_1, Config) -> + {ok, OAuthProviders0} = rabbit_ct_broker_helpers:rpc(Config, 0, application, get_env, + [rabbitmq_auth_backend_oauth2, oauth_providers]), + OAuthProvider = maps:get(<<"C">>, OAuthProviders0, []), + Jwks = ?config(fixture_staticC_1, Config), + OAuthProviders1 = maps:put(<<"C">>, [ + {default_key, ?UTIL_MOD:token_key(Jwks)} | OAuthProvider], + OAuthProviders0), + + ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, + [rabbitmq_auth_backend_oauth2, oauth_providers, OAuthProviders1]), Config; init_per_group(_Group, Config) -> - add_vhosts(Config), - ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, [rabbitmq_auth_backend_oauth2, resource_server_id, ?RESOURCE_SERVER_ID]), + ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, + [rabbitmq_auth_backend_oauth2, resource_server_id, ?RESOURCE_SERVER_ID]), Config. +end_per_group(without_kid, Config) -> + rabbit_ct_helpers:delete_config(Config, include_kid); + end_per_group(no_peer_verification, Config) -> - delete_vhosts(Config), KeyConfig = rabbit_ct_helpers:set_config(?config(key_config, Config), [{jwks_url, ?config(strict_jwks_url, Config)}, {peer_verification, verify_peer}]), ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, [rabbitmq_auth_backend_oauth2, key_config, KeyConfig]), rabbit_ct_helpers:set_config(Config, {key_config, KeyConfig}); +end_per_group(with_default_oauth_provider_B, Config) -> + ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, unset_env, + [rabbitmq_auth_backend_oauth2, default_oauth_provider]); + +end_per_group(with_root_oauth_provider_with_default_key_1, Config) -> + KeyConfig = rabbit_ct_broker_helpers:rpc(Config, 0, application, get_env, + [rabbitmq_auth_backend_oauth2, key_config, []]), + KeyConfig1 = proplists:delete(default_key, KeyConfig), + ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, + [rabbitmq_auth_backend_oauth2, key_config, KeyConfig1]), + Config; +end_per_group(with_root_oauth_provider_with_default_jwks_key, Config) -> + KeyConfig = rabbit_ct_broker_helpers:rpc(Config, 0, application, get_env, + [rabbitmq_auth_backend_oauth2, key_config, []]), + KeyConfig1 = proplists:delete(default_key, KeyConfig), + ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, + [rabbitmq_auth_backend_oauth2, key_config, KeyConfig1]), + Config; + end_per_group(_Group, Config) -> - delete_vhosts(Config), Config. add_vhosts(Config) -> %% The broker is managed by {init,end}_per_testcase(). lists:foreach(fun(Value) -> rabbit_ct_broker_helpers:add_vhost(Config, Value) end, [<<"vhost1">>, <<"vhost2">>, <<"vhost3">>, <<"vhost4">>]). + %rabbit_ct_helpers:set_config(Config, []). delete_vhosts(Config) -> %% The broker is managed by {init,end}_per_testcase(). @@ -211,6 +440,7 @@ preconfigure_node(Config) -> [rabbit, auth_backends, [rabbit_auth_backend_oauth2]]), ok = rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, [rabbitmq_auth_backend_oauth2, resource_server_id, ?RESOURCE_SERVER_ID]), + add_vhosts(Config), Config. start_jwks_server(Config0) -> @@ -218,6 +448,11 @@ start_jwks_server(Config0) -> Jwk1 = ?UTIL_MOD:fixture_jwk(<<"token-key-1">>), Jwk2 = ?UTIL_MOD:fixture_jwk(<<"token-key-2">>), Jwk3 = ?UTIL_MOD:fixture_jwk(<<"token-key-3">>), + Jwk4 = ?UTIL_MOD:fixture_jwk(<<"token-key-4">>), + Jwk5 = ?UTIL_MOD:fixture_jwk(<<"token-key-5">>), + Jwk6 = ?UTIL_MOD:fixture_jwk(<<"token-key-6">>), + Jwk7 = ?UTIL_MOD:fixture_jwk(<<"token-key-7">>), + Jwk8 = ?UTIL_MOD:fixture_jwk(<<"token-key-8">>), %% Assume we don't have more than 100 ports allocated for tests PortBase = rabbit_ct_broker_helpers:get_node_config(Config0, 0, tcp_ports_base), JwksServerPort = PortBase + 100, @@ -232,7 +467,10 @@ start_jwks_server(Config0) -> {ok, _} = application:ensure_all_started(cowboy), CertsDir = ?config(rmq_certsdir, Config), ok = jwks_http_app:start(JwksServerPort, CertsDir, - [ {"/jwks", [Jwk]}, + [ {"/jwksA", [Jwk]}, + {"/jwksB", [Jwk1, Jwk3]}, + {"/jwksRoot", [Jwk2]}, + {"/jwks", [Jwk]}, {"/jwks1", [Jwk1, Jwk3]}, {"/jwks2", [Jwk2]} ]), @@ -246,6 +484,14 @@ start_jwks_server(Config0) -> {non_strict_jwks_url, NonStrictJwksUrl}, {strict_jwks_url, StrictJwksUrl}, {key_config, KeyConfig}, + {fixture_static_1, Jwk7}, + {fixture_static_2, Jwk8}, + {fixture_staticB, Jwk4}, + {fixture_staticC_1, Jwk5}, + {fixture_staticC_2, Jwk6}, + {fixture_jwksB_1, Jwk1}, + {fixture_jwksB_2, Jwk3}, + {fixture_jwksA, Jwk}, {fixture_jwk, Jwk}, {fixture_jwks_1, [Jwk1, Jwk3]}, {fixture_jwks_2, [Jwk2]} @@ -277,12 +523,13 @@ generate_valid_token(Config, Scopes, Audience) -> end, generate_valid_token(Config, Jwk, Scopes, Audience). -generate_valid_token(_Config, Jwk, Scopes, Audience) -> +generate_valid_token(Config, Jwk, Scopes, Audience) -> Token = case Audience of undefined -> ?UTIL_MOD:fixture_token_with_scopes(Scopes); DefinedAudience -> maps:put(<<"aud">>, DefinedAudience, ?UTIL_MOD:fixture_token_with_scopes(Scopes)) end, - ?UTIL_MOD:sign_token_hs(Token, Jwk). + IncludeKid = rabbit_ct_helpers:get_config(Config, include_kid, true), + ?UTIL_MOD:sign_token_hs(Token, Jwk, IncludeKid). generate_valid_token_with_extra_fields(Config, ExtraFields) -> Jwk = case rabbit_ct_helpers:get_config(Config, fixture_jwk) of @@ -290,7 +537,7 @@ generate_valid_token_with_extra_fields(Config, ExtraFields) -> Value -> Value end, Token = maps:merge(?UTIL_MOD:fixture_token_with_scopes([]), ExtraFields), - ?UTIL_MOD:sign_token_hs(Token, Jwk). + ?UTIL_MOD:sign_token_hs(Token, Jwk, rabbit_ct_helpers:get_config(Config, include_kid, true)). generate_expired_token(Config) -> generate_expired_token(Config, ?UTIL_MOD:full_permission_scopes()). @@ -300,7 +547,8 @@ generate_expired_token(Config, Scopes) -> undefined -> ?UTIL_MOD:fixture_jwk(); Value -> Value end, - ?UTIL_MOD:sign_token_hs(?UTIL_MOD:expired_token_with_scopes(Scopes), Jwk). + ?UTIL_MOD:sign_token_hs(?UTIL_MOD:expired_token_with_scopes(Scopes), Jwk, + rabbit_ct_helpers:get_config(Config, include_kid, true)). generate_expirable_token(Config, Seconds) -> generate_expirable_token(Config, ?UTIL_MOD:full_permission_scopes(), Seconds). @@ -311,7 +559,8 @@ generate_expirable_token(Config, Scopes, Seconds) -> Value -> Value end, Expiration = os:system_time(seconds) + Seconds, - ?UTIL_MOD:sign_token_hs(?UTIL_MOD:token_with_scopes_and_expiration(Scopes, Expiration), Jwk). + ?UTIL_MOD:sign_token_hs(?UTIL_MOD:token_with_scopes_and_expiration(Scopes, Expiration), + Jwk, rabbit_ct_helpers:get_config(Config, include_kid, true)). preconfigure_token(Config) -> Token = generate_valid_token(Config), @@ -321,7 +570,117 @@ preconfigure_token(Config) -> %% %% Test Cases %% - +test_successful_connection_for_rabbitmq1_audience_signed_by_provider_A(Config) -> + Jwks = ?config(fixture_jwksA, Config), + Scopes = <<"rabbitmq1.configure:*/* rabbitmq1.write:*/* rabbitmq1.read:*/*">>, + Audience = <<"rabbitmq1">>, + test_queue_declare(Config, Jwks, Scopes, Audience). +test_unsuccessful_connection_for_rabbitmq1_signed_by_provider_A(Config) -> + Jwks = ?config(fixture_jwksA, Config), + Scopes = <<"rabbitmq1.configure:*/* rabbitmq1.write:*/* rabbitmq1.read:*/*">>, + Audience = <<"rabbitmq1">>, + {_Alg, Token} = generate_valid_token( + Config, + Jwks, + Scopes, + [Audience] + ), + ?assertMatch({error, {auth_failure, _}}, + open_unmanaged_connection(Config, 0, <<"vhost1">>, <<"username">>, Token)). + +test_successful_connection_for_rabbitmq2_audience_signed_by_provider_B_with_static_key(Config) -> + Jwks = ?config(fixture_staticB, Config), + Scopes = <<"rabbitmq2.configure:*/* rabbitmq2.write:*/* rabbitmq2.read:*/*">>, + Audience = <<"rabbitmq2">>, + test_queue_declare(Config, Jwks, Scopes, Audience). +test_successful_connection_for_rabbitmq2_audience_signed_by_provider_B_with_jwks_key_1(Config) -> + Jwks = ?config(fixture_jwksB_1, Config), + Scopes = <<"rabbitmq2.configure:*/* rabbitmq2.write:*/* rabbitmq2.read:*/*">>, + Audience = <<"rabbitmq2">>, + test_queue_declare(Config, Jwks, Scopes, Audience). +test_successful_connection_for_rabbitmq2_audience_signed_by_provider_B_with_jwks_key_2(Config) -> + Jwks = ?config(fixture_jwksB_2, Config), + Scopes = <<"rabbitmq2.configure:*/* rabbitmq2.write:*/* rabbitmq2.read:*/*">>, + Audience = <<"rabbitmq2">>, + test_queue_declare(Config, Jwks, Scopes, Audience). +test_successful_connection_for_rabbitmq3_audience_signed_by_provider_C_with_static_key_1(Config) -> + Jwks = ?config(fixture_staticC_1, Config), + Scopes = <<"rabbitmq3.configure:*/* rabbitmq3.write:*/* rabbitmq3.read:*/*">>, + Audience = <<"rabbitmq3">>, + test_queue_declare(Config, Jwks, Scopes, Audience). +test_successful_connection_for_rabbitmq3_audience_signed_by_provider_C_with_static_key_2(Config) -> + Jwks = ?config(fixture_staticC_2, Config), + Scopes = <<"rabbitmq3.configure:*/* rabbitmq3.write:*/* rabbitmq3.read:*/*">>, + Audience = <<"rabbitmq3">>, + test_queue_declare(Config, Jwks, Scopes, Audience). +test_successful_connection_for_rabbitmq_audience_signed_by_root_oauth_provider_with_static_key_1(Config) -> + Jwks = ?config(fixture_static_1, Config), + Scopes = <<"rabbitmq.configure:*/* rabbitmq.write:*/* rabbitmq.read:*/*">>, + Audience = <<"rabbitmq">>, + test_queue_declare(Config, Jwks, Scopes, Audience). +test_successful_connection_for_rabbitmq_audience_signed_by_root_oauth_provider_with_static_key_2(Config) -> + Jwks = ?config(fixture_static_2, Config), + Scopes = <<"rabbitmq.configure:*/* rabbitmq.write:*/* rabbitmq.read:*/*">>, + Audience = <<"rabbitmq">>, + test_queue_declare(Config, Jwks, Scopes, Audience). +test_successful_connection_for_rabbitmq_audience_signed_by_root_oauth_provider_with_jwks_key(Config) -> + Jwks = ?config(fixture_jwk, Config), + Scopes = <<"rabbitmq.configure:*/* rabbitmq.write:*/* rabbitmq.read:*/*">>, + Audience = <<"rabbitmq">>, + test_queue_declare(Config, Jwks, Scopes, Audience). +test_successful_connection_for_rabbitmq2_audience_signed_by_root_oauth_provider_with_jwks_key(Config) -> + Jwks = ?config(fixture_jwk, Config), + Scopes = <<"rabbitmq2.configure:*/* rabbitmq2.write:*/* rabbitmq2.read:*/*">>, + Audience = <<"rabbitmq2">>, + test_queue_declare(Config, Jwks, Scopes, Audience). +test_unsuccessful_connection_for_rabbitmq2_audience_signed_by_root_oauth_provider_with_jwks_key(Config) -> + Jwks = ?config(fixture_jwk, Config), + Scopes = <<"rabbitmq2.configure:*/* rabbitmq2.write:*/* rabbitmq2.read:*/*">>, + Audience = <<"rabbitmq2">>, + {_Alg, Token} = generate_valid_token( + Config, + Jwks, + Scopes, + [Audience] + ), + ?assertMatch({error, {auth_failure, _}}, + open_unmanaged_connection(Config, 0, <<"vhost1">>, <<"username">>, Token)). +test_unsuccessful_connection_for_rabbitmq2_signed_by_provider_B_with_static_key(Config) -> + Jwks = ?config(fixture_staticB, Config), + Scopes = <<"rabbitmq2.configure:*/* rabbitmq2.write:*/* rabbitmq2.read:*/*">>, + Audience = <<"rabbitmq2">>, + {_Alg, Token} = generate_valid_token( + Config, + Jwks, + Scopes, + [Audience] + ), + ?assertMatch({error, {auth_failure, _}}, + open_unmanaged_connection(Config, 0, <<"vhost1">>, <<"username">>, Token)). +test_unsuccessful_connection_for_rabbitmq3_audience_signed_by_provider_C_with_static_key_1(Config) -> + Jwks = ?config(fixture_staticC_1, Config), + Scopes = <<"rabbitmq3.configure:*/* rabbitmq3.write:*/* rabbitmq3.read:*/*">>, + Audience = <<"rabbitmq3">>, + {_Alg, Token} = generate_valid_token( + Config, + Jwks, + Scopes, + [Audience] + ), + ?assertMatch({error, {auth_failure, _}}, + open_unmanaged_connection(Config, 0, <<"vhost1">>, <<"username">>, Token)). +test_unsuccessful_connection_for_rabbitmq_audience_signed_by_root_oauth_provider_with_static_key_1(Config) -> + Jwks = ?config(fixture_static_1, Config), + Scopes = <<"rabbitmq.configure:*/* rabbitmq.write:*/* rabbitmq.read:*/*">>, + Audience = <<"rabbitmq">>, + {_Alg, Token} = generate_valid_token( + Config, + Jwks, + Scopes, + [Audience] + ), + ?assertMatch({error, {auth_failure, _}}, + open_unmanaged_connection(Config, 0, <<"vhost1">>, <<"username">>, Token)). test_successful_connection_with_a_full_permission_token_and_all_defaults(Config) -> {_Algo, Token} = rabbit_ct_helpers:get_config(Config, fixture_jwt), verify_queue_declare_with_token(Config, Token). @@ -333,41 +692,45 @@ verify_queue_declare_with_token(Config, Token) -> amqp_channel:call(Ch, #'queue.declare'{exclusive = true}), close_connection_and_channel(Conn, Ch). -test_m_successful_connection(Config) -> +test_queue_declare(Config, Jwks, Scopes, Audience) -> {_Alg, Token1} = generate_valid_token( Config, - lists:nth(1, ?config(fixture_jwks_1, Config)), - <<"rabbitmq1.configure:*/* rabbitmq1.write:*/* rabbitmq1.read:*/*">>, - [<<"rabbitmq1">>] + Jwks, + Scopes, + [Audience] ), - verify_queue_declare_with_token(Config, Token1), + verify_queue_declare_with_token(Config, Token1). - {_Alg2, Token2} = generate_valid_token( - Config, - lists:nth(2, ?config(fixture_jwks_1, Config)), +c(Config) -> + TestCases = [ + {?config(fixture_jwk, Config), <<"rabbitmq1.configure:*/* rabbitmq1.write:*/* rabbitmq1.read:*/*">>, - [<<"rabbitmq1">>] - ), - verify_queue_declare_with_token(Config, Token2), - - {_Alg3, Token3} = generate_valid_token( - Config, - lists:nth(1, ?config(fixture_jwks_2, Config)), + <<"rabbitmq1">>}, + {?config(fixture_jwk, Config), <<"rabbitmq2.configure:*/* rabbitmq2.write:*/* rabbitmq2.read:*/*">>, - [<<"rabbitmq2">>] - ), - verify_queue_declare_with_token(Config, Token3). + <<"rabbitmq2">>}, + {?config(fixture_jwk, Config), + <<"rabbitmq1.configure:*/* rabbitmq1.write:*/* rabbitmq1.read:*/*">>, + <<"rabbitmq1">>} + ], + [test_queue_declare(Config, Jwks, Scopes, Audience) || + {Jwks, Scopes, Audience} <- TestCases]. -test_m_failed_connection_due_to_missing_key(Config) -> - {_Alg, Token} = generate_valid_token( - Config, - lists:nth(1, ?config(fixture_jwks_2, Config)), %% used signing key for rabbitmq2 instead of rabbitmq1 one +test_successful_queue_declaration_using_multiple_keys_and_audiences(Config) -> + TestCases = [ + {lists:nth(1, ?config(fixture_jwks_1, Config)), <<"rabbitmq1.configure:*/* rabbitmq1.write:*/* rabbitmq1.read:*/*">>, - [<<"rabbitmq1">>] - ), - ?assertMatch({error, {auth_failure, _}}, - open_unmanaged_connection(Config, 0, <<"username">>, Token)). + <<"rabbitmq1">>}, + {lists:nth(2, ?config(fixture_jwks_1, Config)), + <<"rabbitmq1.configure:*/* rabbitmq1.write:*/* rabbitmq1.read:*/*">>, + <<"rabbitmq1">>}, + {lists:nth(1, ?config(fixture_jwks_2, Config)), + <<"rabbitmq2.configure:*/* rabbitmq2.write:*/* rabbitmq2.read:*/*">>, + <<"rabbitmq2">>} + ], + [test_queue_declare(Config, Jwks, Scopes, Audience) || + {Jwks, Scopes, Audience} <- TestCases]. test_successful_connection_with_a_full_permission_token_and_explicitly_configured_vhost(Config) -> diff --git a/deps/rabbitmq_auth_backend_oauth2/test/rabbit_auth_backend_oauth2_test_util.erl b/deps/rabbitmq_auth_backend_oauth2/test/rabbit_auth_backend_oauth2_test_util.erl index cea238a1e857..07fefd9c2c09 100644 --- a/deps/rabbitmq_auth_backend_oauth2/test/rabbit_auth_backend_oauth2_test_util.erl +++ b/deps/rabbitmq_auth_backend_oauth2/test/rabbit_auth_backend_oauth2_test_util.erl @@ -14,15 +14,27 @@ %% API %% -sign_token_hs(Token, #{<<"kid">> := TokenKey} = Jwk) -> - sign_token_hs(Token, Jwk, TokenKey). +sign_token_hs(Token, Jwk) -> + sign_token_hs(Token, Jwk, true). -sign_token_hs(Token, Jwk, TokenKey) -> - Jws = #{ +sign_token_hs(Token, #{<<"kid">> := TokenKey} = Jwk, IncludeKid) -> + sign_token_hs(Token, Jwk, TokenKey, IncludeKid). + +%%sign_token_hs(Token, Jwk, TokenKey) -> +%% sign_token_hs(Token, Jwk, TokenKey, true). + +sign_token_hs(Token, Jwk, TokenKey, IncludeKid) -> + Jws0 = #{ <<"alg">> => <<"HS256">>, <<"kid">> => TokenKey }, - sign_token(Token, Jwk, Jws). + case IncludeKid of + true -> + Jws = maps:put(<<"kid">>, TokenKey, Jws0), + sign_token(Token, Jwk, Jws); + false -> + sign_token_no_kid(Token, Jwk) + end. sign_token_rsa(Token, Jwk, TokenKey) -> Jws = #{ @@ -39,12 +51,15 @@ sign_token(Token, Jwk, Jws) -> Signed = jose_jwt:sign(Jwk, Jws, Token), jose_jws:compact(Signed). +token_key(#{<<"kid">> := TokenKey} = _Token) -> + TokenKey. + fixture_jwk() -> - fixture_jwk(<<"token-key">>). + fixture_jwk(<<"token-key">>). fixture_jwk(TokenKey) -> fixture_jwk(TokenKey, <<"dG9rZW5rZXk">>). - + fixture_jwk(TokenKey, K) -> #{<<"alg">> => <<"HS256">>, <<"k">> => K, diff --git a/deps/rabbitmq_auth_backend_oauth2/test/rabbit_oauth2_config_SUITE.erl b/deps/rabbitmq_auth_backend_oauth2/test/rabbit_oauth2_config_SUITE.erl index b94f743baba0..1d3736bd414a 100644 --- a/deps/rabbitmq_auth_backend_oauth2/test/rabbit_oauth2_config_SUITE.erl +++ b/deps/rabbitmq_auth_backend_oauth2/test/rabbit_oauth2_config_SUITE.erl @@ -18,610 +18,919 @@ -define(AUTH_PORT, 8000). -all() -> - [ - {group, with_resource_server_id}, - {group, without_resource_server_id}, - {group, with_resource_servers}, - {group, with_resource_servers_and_resource_server_id}, - {group, inheritance_group} - - ]. -groups() -> - [ - {with_rabbitmq_node, [], [ - add_signing_keys_for_top_specific_resource_server, - add_signing_keys_for_top_level_resource_server, - - replace_signing_keys_for_top_level_resource_server, - replace_signing_keys_for_specific_resource_server - ] - }, - - {with_resource_server_id, [], [ - get_default_resource_server_id, - get_allowed_resource_server_ids_returns_resource_server_id, - find_audience_in_resource_server_ids_found_resource_server_id, - get_oauth_provider_should_fail, - {with_jwks_url, [], [ - get_oauth_provider_should_return_root_oauth_provider_with_jwks_uri, - {with_oauth_providers_A_with_jwks_uri, [], [ - get_oauth_provider_should_return_root_oauth_provider_with_jwks_uri, - {with_default_oauth_provider_A, [], [ - get_oauth_provider_should_return_oauth_provider_A_with_jwks_uri - ] - } - ] - }, - {with_oauth_providers_A_B_with_jwks_uri, [], [ - get_oauth_provider_should_return_root_oauth_provider_with_jwks_uri, - {with_default_oauth_provider_B, [], [ - get_oauth_provider_should_return_oauth_provider_B_with_jwks_uri - ] - } - ] - } - ] - }, - {with_oauth_providers_A_with_jwks_uri, [], [ - get_oauth_provider_should_fail, - {with_default_oauth_provider_A, [], [ - get_oauth_provider_should_return_oauth_provider_A_with_jwks_uri - ] - } - ] - }, - {with_issuer, [], [ - get_oauth_provider_should_return_root_oauth_provider_with_all_discovered_endpoints, - {with_oauth_providers_A_with_issuer, [], [ - get_oauth_provider_should_return_root_oauth_provider_with_all_discovered_endpoints, - {with_default_oauth_provider_A, [], [ - get_oauth_provider_should_return_oauth_provider_A_with_all_discovered_endpoints - ] - } - ] - }, - {with_oauth_providers_A_B_with_issuer, [], [ - get_oauth_provider_should_return_root_oauth_provider_with_all_discovered_endpoints, - {with_default_oauth_provider_B, [], [ - get_oauth_provider_should_return_oauth_provider_B_with_all_discovered_endpoints - ] - } - ] - } - ] - } - ] - }, - {without_resource_server_id, [], [ - get_default_resource_server_id_returns_error, - get_allowed_resource_server_ids_returns_empty_list - ] - }, - {with_resource_servers, [], [ - get_allowed_resource_server_ids_returns_resource_servers_ids, - find_audience_in_resource_server_ids_found_one_resource_servers, - index_resource_servers_by_id_else_by_key, - {with_jwks_url, [], [ - get_oauth_provider_for_both_resources_should_return_root_oauth_provider, - {with_oauth_providers_A_with_jwks_uri, [], [ - {with_default_oauth_provider_A, [], [ - get_oauth_provider_for_both_resources_should_return_oauth_provider_A - ] - } - ] - }, - {with_different_oauth_provider_for_each_resource, [], [ - {with_oauth_providers_A_B_with_jwks_uri, [], [ +all() -> [ + {group, with_rabbitmq_node}, + {group, with_resource_server_id}, + {group, without_resource_server_id}, + {group, with_resource_servers}, + {group, with_resource_servers_and_resource_server_id}, + {group, inheritance_group} +]. +groups() -> [ + {with_rabbitmq_node, [], [ + add_signing_keys_for_specific_oauth_provider, + add_signing_keys_for_root_oauth_provider, + + replace_signing_keys_for_root_oauth_provider, + replace_signing_keys_for_specific_oauth_provider, + {with_root_static_signing_keys, [], [ + replace_merge_root_static_keys_with_newly_added_keys, + replace_override_root_static_keys_with_newly_added_keys + ]}, + {with_static_signing_keys_for_specific_oauth_provider, [], [ + replace_merge_static_keys_with_newly_added_keys, + replace_override_static_keys_with_newly_added_keys + ]} + ]}, + {with_resource_server_id, [], [ + get_default_resource_server_id, + get_allowed_resource_server_ids_returns_resource_server_id, + get_resource_server_id_for_rabbit_audience_returns_rabbit, + get_resource_server_id_for_none_audience_should_fail, + get_resource_server_id_for_unknown_audience_should_fail, + {with_verify_aud_false, [], [ + get_resource_server_id_for_rabbit_audience_returns_rabbit, + get_resource_server_id_for_none_audience_returns_rabbit, + get_resource_server_id_for_unknown_audience_returns_rabbit + ]}, + find_audience_in_resource_server_ids_found_resource_server_id, + get_oauth_provider_root_with_jwks_uri_should_fail, + get_default_key_should_fail, + {with_default_key, [], [ + get_default_key + ]}, + {with_static_signing_keys, [], [ + get_signing_keys + ]}, + {with_static_signing_keys_for_oauth_provider_A, [], [ + get_signing_keys_for_oauth_provider_A + ]}, + get_algorithms_should_return_undefined, + {with_algorithms, [], [ + get_algorithms + ]}, + {with_jwks_url, [], [ + get_oauth_provider_should_return_root_oauth_provider_with_jwks_uri, + {with_oauth_providers_A_with_jwks_uri, [], [ + get_oauth_provider_should_return_root_oauth_provider_with_jwks_uri, + {with_default_oauth_provider_A, [], [ + get_oauth_provider_should_return_oauth_provider_A_with_jwks_uri + ]} + ]}, + {with_oauth_providers_A_B_with_jwks_uri, [], [ + get_default_key_for_provider_A_should_fail, + {with_default_key, [], [ + get_default_key_for_provider_A_should_fail + ]}, + {with_default_key_for_provider_A, [], [ + get_default_key_for_provider_A + ]}, + get_algorithms_for_provider_A_should_return_undefined, + {with_algorithms_for_provider_A, [], [ + get_algorithms_for_provider_A + ]}, + get_oauth_provider_should_return_root_oauth_provider_with_jwks_uri, + {with_default_oauth_provider_B, [], [ + get_oauth_provider_should_return_oauth_provider_B_with_jwks_uri + ]} + ]} + ]}, + {with_oauth_providers_A_with_jwks_uri, [], [ + get_oauth_provider_root_with_jwks_uri_should_fail, + {with_default_oauth_provider_A, [], [ + get_oauth_provider_should_return_oauth_provider_A_with_jwks_uri + ]} + ]}, + {with_issuer, [], [ + get_oauth_provider_should_return_root_oauth_provider_with_all_discovered_endpoints, + {with_oauth_providers_A_with_issuer, [], [ + get_oauth_provider_should_return_root_oauth_provider_with_all_discovered_endpoints, + {with_default_oauth_provider_A, [], [ + get_oauth_provider_should_return_oauth_provider_A_with_all_discovered_endpoints + ]} + ]}, + {with_oauth_providers_A_B_with_issuer, [], [ + get_oauth_provider_should_return_root_oauth_provider_with_all_discovered_endpoints, + {with_default_oauth_provider_B, [], [ + get_oauth_provider_should_return_oauth_provider_B_with_all_discovered_endpoints + ]} + ]} + ]} + ]}, + {without_resource_server_id, [], [ + get_default_resource_server_id_returns_error, + get_allowed_resource_server_ids_returns_empty_list + ]}, + {with_resource_servers, [], [ + get_allowed_resource_server_ids_returns_resource_servers_ids, + find_audience_in_resource_server_ids_found_one_resource_servers, + index_resource_servers_by_id_else_by_key, + is_verify_aud_for_resource_two_returns_true, + {with_verify_aud_false_for_resource_two, [], [ + is_verify_aud_for_resource_one_returns_true, + is_verify_aud_for_resource_two_returns_false + ]}, + get_scope_prefix_for_resource_one_returns_default_scope_prefix, + {with_root_scope_prefix, [], [ + get_scope_prefix_for_resource_one_returns_root_scope_prefix, + {with_empty_scope_prefix_for_resource_one, [], [ + get_scope_prefix_for_resource_one_returns_empty_scope_prefix, + get_scope_prefix_for_resource_two_returns_root_scope_prefix + ]} + ]}, + {with_jwks_url, [], [ + get_oauth_provider_for_both_resources_should_return_root_oauth_provider, + {with_oauth_providers_A_with_jwks_uri, [], [ + {with_default_oauth_provider_A, [], [ + get_oauth_provider_for_both_resources_should_return_oauth_provider_A + ]} + ]}, + {with_different_oauth_provider_for_each_resource, [], [ + {with_oauth_providers_A_B_with_jwks_uri, [], [ get_oauth_provider_for_resource_one_should_return_oauth_provider_A, get_oauth_provider_for_resource_two_should_return_oauth_provider_B - ]} - ] - } - ] - } - ] - }, - {with_resource_servers_and_resource_server_id, [], [ - get_allowed_resource_server_ids_returns_all_resource_servers_ids, - find_audience_in_resource_server_ids_found_resource_server_id, - find_audience_in_resource_server_ids_found_one_resource_servers, - find_audience_in_resource_server_ids_using_binary_audience - - ] - }, - - {inheritance_group, [], [ - get_key_config, - get_additional_scopes_key, - get_additional_scopes_key_when_not_defined, - is_verify_aud, - is_verify_aud_when_is_false, - get_default_preferred_username_claims, - get_preferred_username_claims, - get_scope_prefix, - get_scope_prefix_when_not_defined, - get_resource_server_type, - get_resource_server_type_when_not_defined, - has_scope_aliases, - has_scope_aliases_when_not_defined, - get_scope_aliases - ] - } - - ]. + ]} + ]} + ]} + ]}, + {with_resource_servers_and_resource_server_id, [], [ + get_allowed_resource_server_ids_returns_all_resource_servers_ids, + find_audience_in_resource_server_ids_found_resource_server_id, + find_audience_in_resource_server_ids_found_one_resource_servers, + find_audience_in_resource_server_ids_using_binary_audience + ]}, + + {inheritance_group, [], [ + get_additional_scopes_key, + get_additional_scopes_key_when_not_defined, + is_verify_aud, + is_verify_aud_when_is_false, + get_default_preferred_username_claims, + get_preferred_username_claims, + get_scope_prefix, + get_empty_scope_prefix, + get_scope_prefix_when_not_defined, + get_resource_server_type, + get_resource_server_type_when_not_defined, + has_scope_aliases, + has_scope_aliases_when_not_defined, + get_scope_aliases + ]} +]. init_per_suite(Config) -> - rabbit_ct_helpers:log_environment(), - rabbit_ct_helpers:run_setup_steps(Config). + rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:run_setup_steps(Config). end_per_suite(Config) -> - rabbit_ct_helpers:run_teardown_steps(Config). + rabbit_ct_helpers:run_teardown_steps(Config). init_per_group(with_rabbitmq_node, Config) -> - Config1 = rabbit_ct_helpers:set_config(Config, [ - {rmq_nodename_suffix, with_rabbitmq_node}, - {rmq_nodes_count, 1} - ]), - rabbit_ct_helpers:run_steps(Config1, rabbit_ct_broker_helpers:setup_steps()); - + Config1 = rabbit_ct_helpers:set_config(Config, [ + {rmq_nodename_suffix, with_rabbitmq_node}, + {rmq_nodes_count, 1} + ]), + rabbit_ct_helpers:run_steps(Config1, rabbit_ct_broker_helpers:setup_steps()); +init_per_group(with_default_key, Config) -> + KeyConfig = application:get_env(rabbitmq_auth_backend_oauth2, key_config, []), + application:set_env(rabbitmq_auth_backend_oauth2, key_config, + proplists:delete(default_key, KeyConfig) ++ [{default_key,<<"default-key">>}]), + Config; +init_per_group(with_root_static_signing_keys, Config) -> + KeyConfig = call_get_env(Config, key_config, []), + SigningKeys = #{ + <<"mykey-root-1">> => <<"some key root-1">>, + <<"mykey-root-2">> => <<"some key root-2">> + }, + call_set_env(Config, key_config, + proplists:delete(default_key, KeyConfig) ++ [{signing_keys,SigningKeys}]), + Config; +init_per_group(with_static_signing_keys_for_specific_oauth_provider, Config) -> + OAuthProviders = call_get_env(Config, oauth_providers, #{}), + OAuthProvider = maps:get(<<"A">>, OAuthProviders, []), + SigningKeys = #{ + <<"mykey-root-1">> => <<"some key root-1">>, + <<"mykey-root-2">> => <<"some key root-2">> + }, + OAuthProvider1 = proplists:delete(signing_keys, OAuthProvider) ++ [{signing_keys, SigningKeys}], + + call_set_env(Config, oauth_providers, maps:put(<<"A">>, OAuthProvider1, OAuthProviders)), + Config; + +init_per_group(with_default_key_for_provider_A, Config) -> + OAuthProviders = application:get_env(rabbitmq_auth_backend_oauth2, oauth_providers, #{}), + OAuthProvider = maps:get(<<"A">>, OAuthProviders, []), + application:set_env(rabbitmq_auth_backend_oauth2, oauth_providers, maps:put(<<"A">>, + proplists:delete(default_key, OAuthProvider) ++ [{default_key,<<"A-default-key">>}], + OAuthProviders)), + Config; +init_per_group(with_static_signing_keys, Config) -> + KeyConfig = application:get_env(rabbitmq_auth_backend_oauth2, key_config, []), + SigningKeys = #{<<"mykey-1-1">> => <<"some key 1-1">>, + <<"mykey-1-2">> => <<"some key 1-2">>}, + application:set_env(rabbitmq_auth_backend_oauth2, key_config, + proplists:delete(signing_keys, KeyConfig) ++ [{signing_keys, SigningKeys}]), + Config; +init_per_group(with_static_signing_keys_for_oauth_provider_A, Config) -> + OAuthProviders = application:get_env(rabbitmq_auth_backend_oauth2, oauth_providers, #{}), + OAuthProvider = maps:get(<<"A">>, OAuthProviders, []), + SigningKeys = #{<<"A-mykey-1-1">> => <<"A-some key 1-1">>, + <<"A-mykey-1-2">> => <<"A-some key 1-2">>}, + + OAuthProvider0 = proplists:delete(signing_keys, OAuthProvider) ++ + [{signing_keys, SigningKeys}], + application:set_env(rabbitmq_auth_backend_oauth2, oauth_providers, + maps:put(<<"A">>, OAuthProvider0, OAuthProviders)), + Config; init_per_group(with_jwks_url, Config) -> - application:set_env(rabbitmq_auth_backend_oauth2, key_config, [{jwks_url,build_url_to_oauth_provider(<<"/keys">>)}]), - Config; + KeyConfig = application:get_env(rabbitmq_auth_backend_oauth2, key_config, []), + application:set_env(rabbitmq_auth_backend_oauth2, key_config, KeyConfig + ++ [{jwks_url,build_url_to_oauth_provider(<<"/keys">>)}]), + [{key_config_before_group_with_jwks_url, KeyConfig} | Config]; init_per_group(with_issuer, Config) -> - {ok, _} = application:ensure_all_started(inets), - {ok, _} = application:ensure_all_started(ssl), - application:ensure_all_started(cowboy), - CertsDir = ?config(rmq_certsdir, Config), - CaCertFile = filename:join([CertsDir, "testca", "cacert.pem"]), - SslOptions = ssl_options(verify_peer, false, CaCertFile), + {ok, _} = application:ensure_all_started(inets), + {ok, _} = application:ensure_all_started(ssl), + application:ensure_all_started(cowboy), + CertsDir = ?config(rmq_certsdir, Config), + CaCertFile = filename:join([CertsDir, "testca", "cacert.pem"]), + SslOptions = ssl_options(verify_peer, false, CaCertFile), - HttpOauthServerExpectations = get_openid_configuration_expectations(), - ListOfExpectations = maps:values(proplists:to_map(HttpOauthServerExpectations)), + HttpOauthServerExpectations = get_openid_configuration_expectations(), + ListOfExpectations = maps:values(proplists:to_map(HttpOauthServerExpectations)), - start_https_oauth_server(?AUTH_PORT, CertsDir, ListOfExpectations), - application:set_env(rabbitmq_auth_backend_oauth2, use_global_locks, false), - application:set_env(rabbitmq_auth_backend_oauth2, issuer, build_url_to_oauth_provider(<<"/">>)), - application:set_env(rabbitmq_auth_backend_oauth2, key_config, SslOptions), + start_https_oauth_server(?AUTH_PORT, CertsDir, ListOfExpectations), + application:set_env(rabbitmq_auth_backend_oauth2, use_global_locks, false), + application:set_env(rabbitmq_auth_backend_oauth2, issuer, build_url_to_oauth_provider(<<"/">>)), + KeyConfig = application:get_env(rabbitmq_auth_backend_oauth2, key_config, []), + application:set_env(rabbitmq_auth_backend_oauth2, key_config, KeyConfig ++ SslOptions), - [{ssl_options, SslOptions} | Config]; + [{key_config_before_group_with_issuer, KeyConfig}, {ssl_options, SslOptions} | Config]; init_per_group(with_oauth_providers_A_with_jwks_uri, Config) -> - application:set_env(rabbitmq_auth_backend_oauth2, oauth_providers, + application:set_env(rabbitmq_auth_backend_oauth2, oauth_providers, #{<<"A">> => [ - {issuer,build_url_to_oauth_provider(<<"/A">>) }, - {jwks_uri,build_url_to_oauth_provider(<<"/A/keys">>) } - ] } ), - Config; + {issuer,build_url_to_oauth_provider(<<"/A">>) }, + {jwks_uri,build_url_to_oauth_provider(<<"/A/keys">>) } + ] } ), + Config; init_per_group(with_oauth_providers_A_with_issuer, Config) -> - application:set_env(rabbitmq_auth_backend_oauth2, oauth_providers, + application:set_env(rabbitmq_auth_backend_oauth2, oauth_providers, #{<<"A">> => [ - {issuer,build_url_to_oauth_provider(<<"/A">>) }, - {https, ?config(ssl_options, Config)} - ] } ), - Config; + {issuer,build_url_to_oauth_provider(<<"/A">>) }, + {https, ?config(ssl_options, Config)} + ] } ), + Config; init_per_group(with_oauth_providers_A_B_with_jwks_uri, Config) -> - application:set_env(rabbitmq_auth_backend_oauth2, oauth_providers, + application:set_env(rabbitmq_auth_backend_oauth2, oauth_providers, #{ <<"A">> => [ - {issuer,build_url_to_oauth_provider(<<"/A">>) }, - {jwks_uri,build_url_to_oauth_provider(<<"/A/keys">>)} - ], - <<"B">> => [ - {issuer,build_url_to_oauth_provider(<<"/B">>) }, - {jwks_uri,build_url_to_oauth_provider(<<"/B/keys">>)} - ] }), - Config; + {issuer,build_url_to_oauth_provider(<<"/A">>) }, + {jwks_uri,build_url_to_oauth_provider(<<"/A/keys">>)} + ], + <<"B">> => [ + {issuer,build_url_to_oauth_provider(<<"/B">>) }, + {jwks_uri,build_url_to_oauth_provider(<<"/B/keys">>)} + ] }), + Config; init_per_group(with_oauth_providers_A_B_with_issuer, Config) -> - application:set_env(rabbitmq_auth_backend_oauth2, oauth_providers, + application:set_env(rabbitmq_auth_backend_oauth2, oauth_providers, #{ <<"A">> => [ - {issuer,build_url_to_oauth_provider(<<"/A">>) }, - {https, ?config(ssl_options, Config)} - ], - <<"B">> => [ - {issuer,build_url_to_oauth_provider(<<"/B">>) }, - {https, ?config(ssl_options, Config)} - ] }), - Config; + {issuer,build_url_to_oauth_provider(<<"/A">>) }, + {https, ?config(ssl_options, Config)} + ], + <<"B">> => [ + {issuer,build_url_to_oauth_provider(<<"/B">>) }, + {https, ?config(ssl_options, Config)} + ] }), + Config; init_per_group(with_default_oauth_provider_A, Config) -> - application:set_env(rabbitmq_auth_backend_oauth2, default_oauth_provider, <<"A">>), - Config; + application:set_env(rabbitmq_auth_backend_oauth2, default_oauth_provider, <<"A">>), + Config; init_per_group(with_default_oauth_provider_B, Config) -> - application:set_env(rabbitmq_auth_backend_oauth2, default_oauth_provider, <<"B">>), - Config; + application:set_env(rabbitmq_auth_backend_oauth2, default_oauth_provider, <<"B">>), + Config; init_per_group(with_resource_server_id, Config) -> - application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, ?RABBITMQ), - Config; + application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, ?RABBITMQ), + Config; + +init_per_group(with_root_scope_prefix, Config) -> + application:set_env(rabbitmq_auth_backend_oauth2, scope_prefix, <<"some-prefix:">>), + Config; +init_per_group(with_empty_scope_prefix_for_resource_one, Config) -> + ResourceServers = application:get_env(rabbitmq_auth_backend_oauth2, resource_servers, #{}), + Proplist = maps:get(?RABBITMQ_RESOURCE_ONE, ResourceServers, []), + application:set_env(rabbitmq_auth_backend_oauth2, resource_servers, + maps:put(?RABBITMQ_RESOURCE_ONE, [{scope_prefix, <<"">>} | proplists:delete(scope_prefix, Proplist)], ResourceServers)), + Config; + +init_per_group(with_verify_aud_false, Config) -> + application:set_env(rabbitmq_auth_backend_oauth2, verify_aud, false), + Config; +init_per_group(with_verify_aud_false_for_resource_two, Config) -> + ResourceServers = application:get_env(rabbitmq_auth_backend_oauth2, resource_servers, #{}), + Proplist = maps:get(?RABBITMQ_RESOURCE_TWO, ResourceServers, []), + application:set_env(rabbitmq_auth_backend_oauth2, resource_servers, + maps:put(?RABBITMQ_RESOURCE_TWO, [{verify_aud, false} | proplists:delete(verify_aud, Proplist)], ResourceServers)), + Config; +init_per_group(with_algorithms, Config) -> + KeyConfig = application:get_env(rabbitmq_auth_backend_oauth2, key_config, []), + application:set_env(rabbitmq_auth_backend_oauth2, key_config, KeyConfig ++ + [{algorithms, [<<"HS256">>, <<"RS256">>]}]), + [{algorithms, [<<"HS256">>, <<"RS256">>]} | Config]; +init_per_group(with_algorithms_for_provider_A, Config) -> + OAuthProviders = application:get_env(rabbitmq_auth_backend_oauth2, oauth_providers, #{}), + OAuthProvider = maps:get(<<"A">>, OAuthProviders, []), + application:set_env(rabbitmq_auth_backend_oauth2, oauth_providers, + maps:put(<<"A">>, [{algorithms, [<<"HS256">>, <<"RS256">>]} | OAuthProvider], OAuthProviders)), + [{algorithms, [<<"HS256">>, <<"RS256">>]} | Config]; init_per_group(with_resource_servers_and_resource_server_id, Config) -> - application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, ?RABBITMQ), - application:set_env(rabbitmq_auth_backend_oauth2, key_config, [{jwks_url,<<"https://oauth-for-rabbitmq">> }]), - application:set_env(rabbitmq_auth_backend_oauth2, resource_servers, - #{?RABBITMQ_RESOURCE_ONE => [ { key_config, [ - {jwks_url,<<"https://oauth-for-rabbitmq1">> } - ] - } - - ], - ?RABBITMQ_RESOURCE_TWO => [ { key_config, [ - {jwks_url,<<"https://oauth-for-rabbitmq2">> } - ] - } - ] - }), - Config; + application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, ?RABBITMQ), + application:set_env(rabbitmq_auth_backend_oauth2, key_config, [{jwks_url,<<"https://oauth-for-rabbitmq">> }]), + application:set_env(rabbitmq_auth_backend_oauth2, resource_servers, + #{?RABBITMQ_RESOURCE_ONE => [ + { key_config, [ + {jwks_url,<<"https://oauth-for-rabbitmq1">> } + ]} + + ], + ?RABBITMQ_RESOURCE_TWO => [ + { key_config, [ + {jwks_url,<<"https://oauth-for-rabbitmq2">> } + ]} + ] + }), + Config; init_per_group(with_different_oauth_provider_for_each_resource, Config) -> - {ok, ResourceServers} = application:get_env(rabbitmq_auth_backend_oauth2, resource_servers), - Rabbit1 = maps:get(?RABBITMQ_RESOURCE_ONE, ResourceServers) ++ [ {oauth_provider_id, <<"A">>} ], - Rabbit2 = maps:get(?RABBITMQ_RESOURCE_TWO, ResourceServers) ++ [ {oauth_provider_id, <<"B">>} ], - ResourceServers1 = maps:update(?RABBITMQ_RESOURCE_ONE, Rabbit1, ResourceServers), - application:set_env(rabbitmq_auth_backend_oauth2, resource_servers, maps:update(?RABBITMQ_RESOURCE_TWO, Rabbit2, ResourceServers1)), - Config; + {ok, ResourceServers} = application:get_env(rabbitmq_auth_backend_oauth2, resource_servers), + Rabbit1 = maps:get(?RABBITMQ_RESOURCE_ONE, ResourceServers) ++ [ {oauth_provider_id, <<"A">>} ], + Rabbit2 = maps:get(?RABBITMQ_RESOURCE_TWO, ResourceServers) ++ [ {oauth_provider_id, <<"B">>} ], + ResourceServers1 = maps:update(?RABBITMQ_RESOURCE_ONE, Rabbit1, ResourceServers), + application:set_env(rabbitmq_auth_backend_oauth2, resource_servers, maps:update(?RABBITMQ_RESOURCE_TWO, Rabbit2, ResourceServers1)), + Config; init_per_group(with_resource_servers, Config) -> - application:set_env(rabbitmq_auth_backend_oauth2, resource_servers, - #{?RABBITMQ_RESOURCE_ONE => [ { key_config, [ - {jwks_url,<<"https://oauth-for-rabbitmq1">> } - ] - } - ], - ?RABBITMQ_RESOURCE_TWO => [ { key_config, [ - {jwks_url,<<"https://oauth-for-rabbitmq2">> } - ] - } - ], - <<"0">> => [ {id, <<"rabbitmq-0">> } ], - <<"1">> => [ {id, <<"rabbitmq-1">> } ] + application:set_env(rabbitmq_auth_backend_oauth2, resource_servers, + #{?RABBITMQ_RESOURCE_ONE => [ + { key_config, [ + {jwks_url,<<"https://oauth-for-rabbitmq1">> } + ]} + ], + ?RABBITMQ_RESOURCE_TWO => [ + { key_config, [ + {jwks_url,<<"https://oauth-for-rabbitmq2">> } + ]} + ], + <<"0">> => [ {id, <<"rabbitmq-0">> } ], + <<"1">> => [ {id, <<"rabbitmq-1">> } ] }), Config; init_per_group(inheritance_group, Config) -> - application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, ?RABBITMQ), - application:set_env(rabbitmq_auth_backend_oauth2, resource_server_type, <<"rabbitmq-type">>), - application:set_env(rabbitmq_auth_backend_oauth2, scope_prefix, <<"some-prefix-">>), - application:set_env(rabbitmq_auth_backend_oauth2, extra_scopes_source, <<"roles">>), - application:set_env(rabbitmq_auth_backend_oauth2, scope_aliases, #{}), - - application:set_env(rabbitmq_auth_backend_oauth2, key_config, [ {jwks_url,<<"https://oauth-for-rabbitmq">> } ]), - - application:set_env(rabbitmq_auth_backend_oauth2, resource_servers, - #{?RABBITMQ_RESOURCE_ONE => [ { key_config, [ {jwks_url,<<"https://oauth-for-rabbitmq1">> } ] }, - { extra_scopes_source, <<"extra-scope-1">>}, - { verify_aud, false}, - { preferred_username_claims, [<<"email-address">>] }, - { scope_prefix, <<"my-prefix:">> }, - { resource_server_type, <<"my-type">> }, - { scope_aliases, #{} } - ], - ?RABBITMQ_RESOURCE_TWO => [ {id, ?RABBITMQ_RESOURCE_TWO } ] - } + application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, ?RABBITMQ), + application:set_env(rabbitmq_auth_backend_oauth2, resource_server_type, <<"rabbitmq-type">>), + application:set_env(rabbitmq_auth_backend_oauth2, scope_prefix, <<"some-prefix-">>), + application:set_env(rabbitmq_auth_backend_oauth2, extra_scopes_source, <<"roles">>), + application:set_env(rabbitmq_auth_backend_oauth2, scope_aliases, #{}), + + application:set_env(rabbitmq_auth_backend_oauth2, key_config, [ {jwks_url,<<"https://oauth-for-rabbitmq">> } ]), + + application:set_env(rabbitmq_auth_backend_oauth2, resource_servers, + #{?RABBITMQ_RESOURCE_ONE => [ + { extra_scopes_source, <<"extra-scope-1">>}, + { verify_aud, false}, + { preferred_username_claims, [<<"email-address">>] }, + { scope_prefix, <<"my-prefix:">> }, + { resource_server_type, <<"my-type">> }, + { scope_aliases, #{} } + ], + ?RABBITMQ_RESOURCE_TWO => [ {id, ?RABBITMQ_RESOURCE_TWO } ] + } ), - Config; + Config; init_per_group(_any, Config) -> - Config. + Config. end_per_group(with_rabbitmq_node, Config) -> - rabbit_ct_helpers:run_steps(Config, rabbit_ct_broker_helpers:teardown_steps()); - + rabbit_ct_helpers:run_steps(Config, rabbit_ct_broker_helpers:teardown_steps()); +end_per_group(with_root_static_signing_keys, Config) -> + KeyConfig = call_get_env(Config, key_config, []), + call_set_env(Config, key_config, KeyConfig), + Config; +end_per_group(get_empty_scope_prefix, Config) -> + application:unset_env(rabbitmq_auth_backend_oauth2, scope_prefix), + Config; end_per_group(with_resource_server_id, Config) -> - application:unset_env(rabbitmq_auth_backend_oauth2, resource_server_id), - Config; + application:unset_env(rabbitmq_auth_backend_oauth2, resource_server_id), + Config; +end_per_group(with_verify_aud_false, Config) -> + application:unset_env(rabbitmq_auth_backend_oauth2, verify_aud), + Config; +end_per_group(with_verify_aud_false_for_resource_two, Config) -> + ResourceServers = application:get_env(rabbitmq_auth_backend_oauth2, resource_servers, #{}), + Proplist = maps:get(?RABBITMQ_RESOURCE_TWO, ResourceServers, []), + application:set_env(rabbitmq_auth_backend_oauth2, resource_servers, + maps:put(?RABBITMQ_RESOURCE_TWO, proplists:delete(verify_aud, Proplist), ResourceServers)), + Config; +end_per_group(with_empty_scope_prefix_for_resource_one, Config) -> + ResourceServers = application:get_env(rabbitmq_auth_backend_oauth2, resource_servers, #{}), + Proplist = maps:get(?RABBITMQ_RESOURCE_ONE, ResourceServers, []), + application:set_env(rabbitmq_auth_backend_oauth2, resource_servers, + maps:put(?RABBITMQ_RESOURCE_ONE, proplists:delete(scope_prefix, Proplist), ResourceServers)), + Config; + +end_per_group(with_default_key, Config) -> + KeyConfig = application:get_env(rabbitmq_auth_backend_oauth2, key_config, []), + application:set_env(rabbitmq_auth_backend_oauth2, key_config, + proplists:delete(default_key, KeyConfig)), + Config; +end_per_group(with_algorithms, Config) -> + KeyConfig = application:get_env(rabbitmq_auth_backend_oauth2, key_config, []), + application:set_env(rabbitmq_auth_backend_oauth2, key_config, + proplists:delete(algorithms, KeyConfig)), + Config; +end_per_group(with_algorithms_for_provider_A, Config) -> + OAuthProviders = application:get_env(rabbitmq_auth_backend_oauth2, oauth_providers, #{}), + OAuthProvider = maps:get(<<"A">>, OAuthProviders, []), + application:set_env(rabbitmq_auth_backend_oauth2, oauth_providers, + maps:put(<<"A">>, proplists:delete(algorithms, OAuthProvider),OAuthProviders)), + Config; +end_per_group(with_static_signing_keys_for_oauth_provider_A, Config) -> + OAuthProviders = application:get_env(rabbitmq_auth_backend_oauth2, oauth_providers, #{}), + OAuthProvider = maps:get(<<"A">>, OAuthProviders), + OAuthProvider0 = proplists:delete(signing_keys, OAuthProvider), + application:set_env(rabbitmq_auth_backend_oauth2, oauth_providers, + maps:put(<<"A">>, OAuthProvider0, OAuthProviders)), + Config; end_per_group(with_jwks_url, Config) -> - application:unset_env(rabbitmq_auth_backend_oauth2, key_config), - Config; + KeyConfig = ?config(key_config_before_group_with_jwks_url, Config), + application:set_env(rabbitmq_auth_backend_oauth2, key_config, KeyConfig), + Config; end_per_group(with_issuer, Config) -> - application:unset_env(rabbitmq_auth_backend_oauth2, issuer), - stop_http_auth_server(), - Config; + KeyConfig = ?config(key_config_before_group_with_issuer, Config), + application:unset_env(rabbitmq_auth_backend_oauth2, issuer), + application:set_env(rabbitmq_auth_backend_oauth2, key_config, KeyConfig), + stop_http_auth_server(), + Config; end_per_group(with_oauth_providers_A_with_jwks_uri, Config) -> - application:unset_env(rabbitmq_auth_backend_oauth2, oauth_providers), - Config; + application:unset_env(rabbitmq_auth_backend_oauth2, oauth_providers), + Config; end_per_group(with_oauth_providers_A_with_issuer, Config) -> - application:unset_env(rabbitmq_auth_backend_oauth2, oauth_providers), - Config; + application:unset_env(rabbitmq_auth_backend_oauth2, oauth_providers), + Config; end_per_group(with_oauth_providers_A_B_with_jwks_uri, Config) -> - application:unset_env(rabbitmq_auth_backend_oauth2, oauth_providers), - Config; + application:unset_env(rabbitmq_auth_backend_oauth2, oauth_providers), + Config; end_per_group(with_oauth_providers_A_B_with_issuer, Config) -> - application:unset_env(rabbitmq_auth_backend_oauth2, oauth_providers), - Config; + application:unset_env(rabbitmq_auth_backend_oauth2, oauth_providers), + Config; end_per_group(with_oauth_providers_A, Config) -> - application:unset_env(rabbitmq_auth_backend_oauth2, oauth_providers), - Config; + application:unset_env(rabbitmq_auth_backend_oauth2, oauth_providers), + Config; end_per_group(with_oauth_providers_A_B, Config) -> - application:unset_env(rabbitmq_auth_backend_oauth2, oauth_providers), - Config; + application:unset_env(rabbitmq_auth_backend_oauth2, oauth_providers), + Config; end_per_group(with_default_oauth_provider_B, Config) -> - application:unset_env(rabbitmq_auth_backend_oauth2, default_oauth_provider), - Config; + application:unset_env(rabbitmq_auth_backend_oauth2, default_oauth_provider), + Config; end_per_group(with_default_oauth_provider_A, Config) -> - application:unset_env(rabbitmq_auth_backend_oauth2, default_oauth_provider), - Config; + application:unset_env(rabbitmq_auth_backend_oauth2, default_oauth_provider), + Config; end_per_group(get_oauth_provider_for_resource_server_id, Config) -> - application:unset_env(rabbitmq_auth_backend_oauth2, resource_server_id), - Config; + application:unset_env(rabbitmq_auth_backend_oauth2, resource_server_id), + Config; end_per_group(with_resource_servers_and_resource_server_id, Config) -> - application:unset_env(rabbitmq_auth_backend_oauth2, resource_server_id), - Config; + application:unset_env(rabbitmq_auth_backend_oauth2, resource_server_id), + Config; end_per_group(with_resource_servers, Config) -> - application:unset_env(rabbitmq_auth_backend_oauth2, resource_servers), - Config; + application:unset_env(rabbitmq_auth_backend_oauth2, resource_servers), + Config; end_per_group(with_different_oauth_provider_for_each_resource, Config) -> - {ok, ResourceServers} = application:get_env(rabbitmq_auth_backend_oauth2, resource_servers), - Rabbit1 = proplists:delete(oauth_provider_id, maps:get(?RABBITMQ_RESOURCE_ONE, ResourceServers)), - Rabbit2 = proplists:delete(oauth_provider_id, maps:get(?RABBITMQ_RESOURCE_TWO, ResourceServers)), - ResourceServers1 = maps:update(?RABBITMQ_RESOURCE_ONE, Rabbit1, ResourceServers), - application:set_env(rabbitmq_auth_backend_oauth2, resource_servers, maps:update(?RABBITMQ_RESOURCE_TWO, Rabbit2, ResourceServers1)), - Config; + {ok, ResourceServers} = application:get_env(rabbitmq_auth_backend_oauth2, resource_servers), + Rabbit1 = proplists:delete(oauth_provider_id, maps:get(?RABBITMQ_RESOURCE_ONE, ResourceServers)), + Rabbit2 = proplists:delete(oauth_provider_id, maps:get(?RABBITMQ_RESOURCE_TWO, ResourceServers)), + ResourceServers1 = maps:update(?RABBITMQ_RESOURCE_ONE, Rabbit1, ResourceServers), + application:set_env(rabbitmq_auth_backend_oauth2, resource_servers, maps:update(?RABBITMQ_RESOURCE_TWO, Rabbit2, ResourceServers1)), + Config; end_per_group(inheritance_group, Config) -> - application:unset_env(rabbitmq_auth_backend_oauth2, resource_server_id), - application:unset_env(rabbitmq_auth_backend_oauth2, scope_prefix), - application:unset_env(rabbitmq_auth_backend_oauth2, extra_scopes_source), + application:unset_env(rabbitmq_auth_backend_oauth2, resource_server_id), + application:unset_env(rabbitmq_auth_backend_oauth2, scope_prefix), + application:unset_env(rabbitmq_auth_backend_oauth2, extra_scopes_source), - application:unset_env(rabbitmq_auth_backend_oauth2, key_config), + application:unset_env(rabbitmq_auth_backend_oauth2, key_config), - application:unset_env(rabbitmq_auth_backend_oauth2, resource_servers), - Config; + application:unset_env(rabbitmq_auth_backend_oauth2, resource_servers), + Config; + +end_per_group(with_root_scope_prefix, Config) -> + application:unset_env(rabbitmq_auth_backend_oauth2, scope_prefix), + Config; end_per_group(_any, Config) -> - Config. + Config. init_per_testcase(get_preferred_username_claims, Config) -> - application:set_env(rabbitmq_auth_backend_oauth2, preferred_username_claims, [<<"username">>]), - Config; + application:set_env(rabbitmq_auth_backend_oauth2, preferred_username_claims, [<<"username">>]), + Config; init_per_testcase(get_additional_scopes_key_when_not_defined, Config) -> - application:unset_env(rabbitmq_auth_backend_oauth2, extra_scopes_source), - Config; + application:unset_env(rabbitmq_auth_backend_oauth2, extra_scopes_source), + Config; init_per_testcase(is_verify_aud_when_is_false, Config) -> - application:set_env(rabbitmq_auth_backend_oauth2, verify_aud, false), - Config; + application:set_env(rabbitmq_auth_backend_oauth2, verify_aud, false), + Config; +init_per_testcase(get_empty_scope_prefix, Config) -> + application:set_env(rabbitmq_auth_backend_oauth2, scope_prefix, <<"">>), + Config; init_per_testcase(get_scope_prefix_when_not_defined, Config) -> - application:unset_env(rabbitmq_auth_backend_oauth2, scope_prefix), - Config; + application:unset_env(rabbitmq_auth_backend_oauth2, scope_prefix), + Config; init_per_testcase(get_resource_server_type_when_not_defined, Config) -> - application:unset_env(rabbitmq_auth_backend_oauth2, resource_server_type), - Config; + application:unset_env(rabbitmq_auth_backend_oauth2, resource_server_type), + Config; init_per_testcase(has_scope_aliases_when_not_defined, Config) -> - application:unset_env(rabbitmq_auth_backend_oauth2, scope_aliases), - Config; + application:unset_env(rabbitmq_auth_backend_oauth2, scope_aliases), + Config; init_per_testcase(_TestCase, Config) -> - Config. + Config. end_per_testcase(get_preferred_username_claims, Config) -> - application:unset_env(rabbitmq_auth_backend_oauth2, preferred_username_claims), - Config; + application:unset_env(rabbitmq_auth_backend_oauth2, preferred_username_claims), + Config; end_per_testcase(_Testcase, Config) -> - Config. + Config. %% ----- +call_set_env(Config, Par, Value) -> + rabbit_ct_broker_helpers:rpc(Config, 0, application, set_env, + [rabbitmq_auth_backend_oauth2, Par, Value]). + +call_get_env(Config, Par, Def) -> + rabbit_ct_broker_helpers:rpc(Config, 0, application, get_env, + [rabbitmq_auth_backend_oauth2, Par, Def]). + call_add_signing_key(Config, Args) -> - rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_oauth2_config, add_signing_key, Args). + rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_oauth2_config, add_signing_key, Args). call_get_signing_keys(Config, Args) -> - rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_oauth2_config, get_signing_keys, Args). + rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_oauth2_config, get_signing_keys, Args). +call_get_signing_keys(Config) -> + call_get_signing_keys(Config, []). call_get_signing_key(Config, Args) -> - rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_oauth2_config, get_signing_key, Args). + rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_oauth2_config, get_signing_key, Args). call_add_signing_keys(Config, Args) -> - rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_oauth2_config, add_signing_keys, Args). + rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_oauth2_config, add_signing_keys, Args). call_replace_signing_keys(Config, Args) -> - rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_oauth2_config, replace_signing_keys, Args). - -add_signing_keys_for_top_level_resource_server(Config) -> - #{<<"mykey-1">> := <<"some key 1">>} = call_add_signing_key(Config, [<<"mykey-1">>, <<"some key 1">>]), - #{<<"mykey-1">> := <<"some key 1">>} = call_get_signing_keys(Config, []), - - #{<<"mykey-1">> := <<"some key 1">>, <<"mykey-2">> := <<"some key 2">>} = call_add_signing_key(Config, [<<"mykey-2">>, <<"some key 2">>]), - #{<<"mykey-1">> := <<"some key 1">>, <<"mykey-2">> := <<"some key 2">>} = call_get_signing_keys(Config, []), - - ?assertEqual(<<"some key 1">>, call_get_signing_key(Config, [<<"mykey-1">>, ?RABBITMQ])). - -add_signing_keys_for_top_specific_resource_server(Config) -> - #{<<"mykey-3-1">> := <<"some key 3-1">>} = call_add_signing_key(Config, [<<"my-resource-server-3">>, <<"mykey-3-1">>, <<"some key 3-1">>]), - #{<<"mykey-4-1">> := <<"some key 4-1">>} = call_add_signing_key(Config, [<<"my-resource-server-4">>, <<"mykey-4-1">>, <<"some key 4-1">>]), - #{<<"mykey-3-1">> := <<"some key 3-1">>} = call_get_signing_keys(Config, [<<"my-resource-server-3">>]), - #{<<"mykey-4-1">> := <<"some key 4-1">>} = call_get_signing_keys(Config, [<<"my-resource-server-4">>]), - - #{<<"mykey-3-1">> := <<"some key 3-1">>, <<"mykey-3-2">> := <<"some key 3-2">>} = call_add_signing_key(Config, [<<"my-resource-server-3">>, <<"mykey-3-2">>, <<"some key 3-2">>]), - - #{<<"mykey-1">> := <<"some key 1">>} = call_add_signing_key(Config, [<<"mykey-1">>, <<"some key 1">>]), - #{<<"mykey-1">> := <<"some key 1">>} = call_get_signing_keys(Config, []), + rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_oauth2_config, replace_signing_keys, Args). + + +add_signing_keys_for_root_oauth_provider(Config) -> + #{<<"mykey-1">> := <<"some key 1">>} = + call_add_signing_key(Config, [<<"mykey-1">>, <<"some key 1">>]), + #{<<"mykey-1">> := <<"some key 1">>} = + call_get_signing_keys(Config), + + #{<<"mykey-1">> := <<"some key 1">>, <<"mykey-2">> := <<"some key 2">>} = + call_add_signing_key(Config, [<<"mykey-2">>, <<"some key 2">>]), + #{<<"mykey-1">> := <<"some key 1">>, <<"mykey-2">> := <<"some key 2">>} = + call_get_signing_keys(Config), + + ?assertEqual(<<"some key 1">>, + call_get_signing_key(Config, [<<"mykey-1">>])). + +add_signing_keys_for_specific_oauth_provider(Config) -> + #{<<"mykey-3-1">> := <<"some key 3-1">>} = + call_add_signing_key(Config, + [<<"mykey-3-1">>, <<"some key 3-1">>, <<"my-oauth-provider-3">>]), + #{<<"mykey-4-1">> := <<"some key 4-1">>} = + call_add_signing_key(Config, + [<<"mykey-4-1">>, <<"some key 4-1">>, <<"my-oauth-provider-4">>]), + #{<<"mykey-3-1">> := <<"some key 3-1">>} = + call_get_signing_keys(Config, [<<"my-oauth-provider-3">>]), + #{<<"mykey-4-1">> := <<"some key 4-1">>} = + call_get_signing_keys(Config, [<<"my-oauth-provider-4">>]), + + #{<<"mykey-3-1">> := <<"some key 3-1">>, + <<"mykey-3-2">> := <<"some key 3-2">>} = + call_add_signing_key(Config, [ + <<"mykey-3-2">>, <<"some key 3-2">>, <<"my-oauth-provider-3">>]), + + #{<<"mykey-1">> := <<"some key 1">>} = + call_add_signing_key(Config, [<<"mykey-1">>, <<"some key 1">>]), + #{<<"mykey-1">> := <<"some key 1">>} = + call_get_signing_keys(Config, []), + + ?assertEqual(<<"some key 3-1">>, + call_get_signing_key(Config, [<<"mykey-3-1">> , <<"my-oauth-provider-3">>])). + +replace_merge_root_static_keys_with_newly_added_keys(Config) -> + NewKeys = #{<<"key-2">> => <<"some key 2">>, <<"key-3">> => <<"some key 3">>}, + call_replace_signing_keys(Config, [NewKeys]), + #{ <<"mykey-root-1">> := <<"some key root-1">>, + <<"mykey-root-2">> := <<"some key root-2">>, + <<"key-2">> := <<"some key 2">>, + <<"key-3">> := <<"some key 3">> + } = call_get_signing_keys(Config). +replace_merge_static_keys_with_newly_added_keys(Config) -> + NewKeys = #{<<"key-2">> => <<"some key 2">>, <<"key-3">> => <<"some key 3">>}, + call_replace_signing_keys(Config, [NewKeys, <<"A">>]), + #{ <<"mykey-root-1">> := <<"some key root-1">>, + <<"mykey-root-2">> := <<"some key root-2">>, + <<"key-2">> := <<"some key 2">>, + <<"key-3">> := <<"some key 3">> + } = call_get_signing_keys(Config, [<<"A">>]). +replace_override_root_static_keys_with_newly_added_keys(Config) -> + NewKeys = #{<<"mykey-root-1">> => <<"new key root-1">>, <<"key-3">> => <<"some key 3">>}, + call_replace_signing_keys(Config, [NewKeys]), + #{ <<"mykey-root-1">> := <<"new key root-1">>, + <<"mykey-root-2">> := <<"some key root-2">>, + <<"key-3">> := <<"some key 3">> + } = call_get_signing_keys(Config). +replace_override_static_keys_with_newly_added_keys(Config) -> + NewKeys = #{<<"mykey-root-1">> => <<"new key root-1">>, <<"key-3">> => <<"some key 3">>}, + call_replace_signing_keys(Config, [NewKeys, <<"A">>]), + #{ <<"mykey-root-1">> := <<"new key root-1">>, + <<"mykey-root-2">> := <<"some key root-2">>, + <<"key-3">> := <<"some key 3">> + } = call_get_signing_keys(Config, [<<"A">>]). + +replace_signing_keys_for_root_oauth_provider(Config) -> + call_add_signing_key(Config, [<<"mykey-1">>, <<"some key 1">>]), + NewKeys = #{<<"key-2">> => <<"some key 2">>, <<"key-3">> => <<"some key 3">>}, + call_replace_signing_keys(Config, [NewKeys]), + #{<<"key-2">> := <<"some key 2">>, <<"key-3">> := <<"some key 3">>} = + call_get_signing_keys(Config). + +replace_signing_keys_for_specific_oauth_provider(Config) -> + OAuthProviderId = <<"my-oauth-provider-3">>, + #{<<"mykey-3-1">> := <<"some key 3-1">>} = + call_add_signing_key(Config, + [<<"mykey-3-1">>, <<"some key 3-1">>, OAuthProviderId]), + NewKeys = #{<<"key-2">> => <<"some key 2">>, + <<"key-3">> => <<"some key 3">>}, + call_replace_signing_keys(Config, [NewKeys, OAuthProviderId]), + #{<<"key-2">> := <<"some key 2">>, <<"key-3">> := <<"some key 3">>} = + call_get_signing_keys(Config, [OAuthProviderId]). - ?assertEqual(<<"some key 3-1">>, call_get_signing_key(Config, [<<"mykey-3-1">> , <<"my-resource-server-3">>])). -replace_signing_keys_for_top_level_resource_server(Config) -> - call_add_signing_key(Config, [<<"mykey-1">>, <<"some key 1">>]), - NewKeys = #{<<"key-2">> => <<"some key 2">>, <<"key-3">> => <<"some key 3">>}, - call_replace_signing_keys(Config, [NewKeys]), - #{<<"key-2">> := <<"some key 2">>, <<"key-3">> := <<"some key 3">>} = call_get_signing_keys(Config, []). +get_default_resource_server_id_returns_error(_Config) -> + {error, _} = rabbit_oauth2_config:get_default_resource_server_id(). -replace_signing_keys_for_specific_resource_server(Config) -> - ResourceServerId = <<"my-resource-server-3">>, - #{<<"mykey-3-1">> := <<"some key 3-1">>} = call_add_signing_key(Config, [ResourceServerId, <<"mykey-3-1">>, <<"some key 3-1">>]), - NewKeys = #{<<"key-2">> => <<"some key 2">>, <<"key-3">> => <<"some key 3">>}, - call_replace_signing_keys(Config, [ResourceServerId, NewKeys]), - #{<<"key-2">> := <<"some key 2">>, <<"key-3">> := <<"some key 3">>} = call_get_signing_keys(Config, [ResourceServerId]). +get_resource_server_id_for_rabbit_audience_returns_rabbit(_Config) -> + ?assertEqual(?RABBITMQ, rabbit_oauth2_config:get_resource_server_id_for_audience(?RABBITMQ)). +get_resource_server_id_for_none_audience_returns_rabbit(_Config) -> + ?assertEqual(?RABBITMQ, rabbit_oauth2_config:get_resource_server_id_for_audience(none)). +get_resource_server_id_for_unknown_audience_returns_rabbit(_Config) -> + ?assertEqual(?RABBITMQ, rabbit_oauth2_config:get_resource_server_id_for_audience(<<"unknown">>)). -get_default_resource_server_id_returns_error(_Config) -> - {error, _} = rabbit_oauth2_config:get_default_resource_server_id(). +get_resource_server_id_for_none_audience_should_fail(_Config) -> + ?assertEqual({error, no_matching_aud_found}, rabbit_oauth2_config:get_resource_server_id_for_audience(none)). +get_resource_server_id_for_unknown_audience_should_fail(_Config) -> + ?assertEqual({error, no_matching_aud_found}, rabbit_oauth2_config:get_resource_server_id_for_audience(<<"unknown">>)). get_default_resource_server_id(_Config) -> - ?assertEqual(?RABBITMQ, rabbit_oauth2_config:get_default_resource_server_id()). + ?assertEqual(?RABBITMQ, rabbit_oauth2_config:get_default_resource_server_id()). get_allowed_resource_server_ids_returns_empty_list(_Config) -> - [] = rabbit_oauth2_config:get_allowed_resource_server_ids(). + [] = rabbit_oauth2_config:get_allowed_resource_server_ids(). get_allowed_resource_server_ids_returns_resource_server_id(_Config) -> - [?RABBITMQ] = rabbit_oauth2_config:get_allowed_resource_server_ids(). + [?RABBITMQ] = rabbit_oauth2_config:get_allowed_resource_server_ids(). get_allowed_resource_server_ids_returns_all_resource_servers_ids(_Config) -> - [ <<"rabbitmq1">>, <<"rabbitmq2">>, ?RABBITMQ] = rabbit_oauth2_config:get_allowed_resource_server_ids(). + [ <<"rabbitmq1">>, <<"rabbitmq2">>, ?RABBITMQ] = rabbit_oauth2_config:get_allowed_resource_server_ids(). get_allowed_resource_server_ids_returns_resource_servers_ids(_Config) -> - [<<"rabbitmq-0">>, <<"rabbitmq-1">>, <<"rabbitmq1">>, <<"rabbitmq2">> ] = - lists:sort(rabbit_oauth2_config:get_allowed_resource_server_ids()). + [<<"rabbitmq-0">>, <<"rabbitmq-1">>, <<"rabbitmq1">>, <<"rabbitmq2">> ] = + lists:sort(rabbit_oauth2_config:get_allowed_resource_server_ids()). index_resource_servers_by_id_else_by_key(_Config) -> - {error, no_matching_aud_found} = rabbit_oauth2_config:find_audience_in_resource_server_ids(<<"0">>), - {ok, <<"rabbitmq-0">>} = rabbit_oauth2_config:find_audience_in_resource_server_ids([<<"rabbitmq-0">>]), - {ok, <<"rabbitmq-0">>} = rabbit_oauth2_config:find_audience_in_resource_server_ids(<<"rabbitmq-0">>). + {error, no_matching_aud_found} = rabbit_oauth2_config:find_audience_in_resource_server_ids(<<"0">>), + {ok, <<"rabbitmq-0">>} = rabbit_oauth2_config:find_audience_in_resource_server_ids([<<"rabbitmq-0">>]), + {ok, <<"rabbitmq-0">>} = rabbit_oauth2_config:find_audience_in_resource_server_ids(<<"rabbitmq-0">>). find_audience_in_resource_server_ids_returns_key_not_found(_Config) -> - {error, no_matching_aud_found} = rabbit_oauth2_config:find_audience_in_resource_server_ids(?RABBITMQ). + {error, no_matching_aud_found} = rabbit_oauth2_config:find_audience_in_resource_server_ids(?RABBITMQ). find_audience_in_resource_server_ids_returns_found_too_many(_Config) -> - {error, only_one_resource_server_as_audience_found_many} = rabbit_oauth2_config:find_audience_in_resource_server_ids([?RABBITMQ, <<"rabbitmq1">>]). + {error, only_one_resource_server_as_audience_found_many} = rabbit_oauth2_config:find_audience_in_resource_server_ids([?RABBITMQ, <<"rabbitmq1">>]). find_audience_in_resource_server_ids_found_one_resource_servers(_Config) -> - {ok, <<"rabbitmq1">>} = rabbit_oauth2_config:find_audience_in_resource_server_ids(<<"rabbitmq1">>), - {ok, <<"rabbitmq1">>} = rabbit_oauth2_config:find_audience_in_resource_server_ids([<<"rabbitmq1">>, <<"other">>]). + {ok, <<"rabbitmq1">>} = rabbit_oauth2_config:find_audience_in_resource_server_ids(<<"rabbitmq1">>), + {ok, <<"rabbitmq1">>} = rabbit_oauth2_config:find_audience_in_resource_server_ids([<<"rabbitmq1">>, <<"other">>]). find_audience_in_resource_server_ids_found_resource_server_id(_Config) -> - {ok, ?RABBITMQ} = rabbit_oauth2_config:find_audience_in_resource_server_ids(?RABBITMQ), - {ok, ?RABBITMQ} = rabbit_oauth2_config:find_audience_in_resource_server_ids([?RABBITMQ, <<"other">>]). + {ok, ?RABBITMQ} = rabbit_oauth2_config:find_audience_in_resource_server_ids(?RABBITMQ), + {ok, ?RABBITMQ} = rabbit_oauth2_config:find_audience_in_resource_server_ids([?RABBITMQ, <<"other">>]). find_audience_in_resource_server_ids_using_binary_audience(_Config) -> - {ok, ?RABBITMQ} = rabbit_oauth2_config:find_audience_in_resource_server_ids(<<"rabbitmq other">>). - -get_key_config(_Config) -> - RootKeyConfig = rabbit_oauth2_config:get_key_config(<<"rabbitmq-2">>), - ?assertEqual(<<"https://oauth-for-rabbitmq">>, proplists:get_value(jwks_url, RootKeyConfig)), - - KeyConfig = rabbit_oauth2_config:get_key_config(<<"rabbitmq1">>), - ?assertEqual(<<"https://oauth-for-rabbitmq1">>, proplists:get_value(jwks_url, KeyConfig)). + {ok, ?RABBITMQ} = rabbit_oauth2_config:find_audience_in_resource_server_ids(<<"rabbitmq other">>). get_additional_scopes_key(_Config) -> - ?assertEqual({ok, <<"roles">>}, rabbit_oauth2_config:get_additional_scopes_key()), - ?assertEqual({ok, <<"extra-scope-1">>}, rabbit_oauth2_config:get_additional_scopes_key(<<"rabbitmq1">> )), - ?assertEqual(rabbit_oauth2_config:get_additional_scopes_key(), rabbit_oauth2_config:get_additional_scopes_key(<<"rabbitmq2">>)), - ?assertEqual({ok, <<"roles">>}, rabbit_oauth2_config:get_additional_scopes_key(?RABBITMQ)). + ?assertEqual({ok, <<"roles">>}, rabbit_oauth2_config:get_additional_scopes_key()), + ?assertEqual({ok, <<"extra-scope-1">>}, rabbit_oauth2_config:get_additional_scopes_key(<<"rabbitmq1">> )), + ?assertEqual(rabbit_oauth2_config:get_additional_scopes_key(), rabbit_oauth2_config:get_additional_scopes_key(<<"rabbitmq2">>)), + ?assertEqual({ok, <<"roles">>}, rabbit_oauth2_config:get_additional_scopes_key(?RABBITMQ)). get_additional_scopes_key_when_not_defined(_Config) -> - ?assertEqual({error, not_found}, rabbit_oauth2_config:get_additional_scopes_key()), - ?assertEqual(rabbit_oauth2_config:get_additional_scopes_key(), rabbit_oauth2_config:get_additional_scopes_key(<<"rabbitmq2">>)). + ?assertEqual({error, not_found}, rabbit_oauth2_config:get_additional_scopes_key()), + ?assertEqual(rabbit_oauth2_config:get_additional_scopes_key(), rabbit_oauth2_config:get_additional_scopes_key(<<"rabbitmq2">>)). is_verify_aud(_Config) -> - ?assertEqual(true, rabbit_oauth2_config:is_verify_aud()), - ?assertEqual(rabbit_oauth2_config:is_verify_aud(?RABBITMQ), rabbit_oauth2_config:is_verify_aud()), - ?assertEqual(false, rabbit_oauth2_config:is_verify_aud(<<"rabbitmq1">>)), - ?assertEqual(rabbit_oauth2_config:is_verify_aud(), rabbit_oauth2_config:is_verify_aud(<<"rabbitmq2">>)). + ?assertEqual(true, rabbit_oauth2_config:is_verify_aud()), + ?assertEqual(rabbit_oauth2_config:is_verify_aud(?RABBITMQ), rabbit_oauth2_config:is_verify_aud()), + ?assertEqual(false, rabbit_oauth2_config:is_verify_aud(<<"rabbitmq1">>)), + ?assertEqual(rabbit_oauth2_config:is_verify_aud(), rabbit_oauth2_config:is_verify_aud(<<"rabbitmq2">>)). +is_verify_aud_for_resource_one_returns_false(_Config) -> + ?assertEqual(false, rabbit_oauth2_config:is_verify_aud(?RABBITMQ_RESOURCE_ONE)). + +is_verify_aud_for_resource_two_returns_true(_Config) -> + ?assertEqual(true, rabbit_oauth2_config:is_verify_aud(?RABBITMQ_RESOURCE_TWO)). is_verify_aud_when_is_false(_Config) -> - ?assertEqual(false, rabbit_oauth2_config:is_verify_aud()), - ?assertEqual(rabbit_oauth2_config:is_verify_aud(), rabbit_oauth2_config:is_verify_aud(<<"rabbitmq2">>)). + ?assertEqual(false, rabbit_oauth2_config:is_verify_aud()), + ?assertEqual(rabbit_oauth2_config:is_verify_aud(), rabbit_oauth2_config:is_verify_aud(<<"rabbitmq2">>)). + +is_verify_aud_for_resource_one_returns_true(_Config) -> + ?assertEqual(true, rabbit_oauth2_config:is_verify_aud(?RABBITMQ_RESOURCE_ONE)). +is_verify_aud_for_resource_two_returns_false(_Config) -> + ?assertEqual(false, rabbit_oauth2_config:is_verify_aud(?RABBITMQ_RESOURCE_TWO)). get_default_preferred_username_claims(_Config) -> - ?assertEqual(rabbit_oauth2_config:get_default_preferred_username_claims(), rabbit_oauth2_config:get_preferred_username_claims()). + ?assertEqual(rabbit_oauth2_config:get_default_preferred_username_claims(), rabbit_oauth2_config:get_preferred_username_claims()). get_preferred_username_claims(_Config) -> - ?assertEqual([<<"username">>] ++ rabbit_oauth2_config:get_default_preferred_username_claims(), - rabbit_oauth2_config:get_preferred_username_claims()), - ?assertEqual([<<"email-address">>] ++ rabbit_oauth2_config:get_default_preferred_username_claims(), - rabbit_oauth2_config:get_preferred_username_claims(<<"rabbitmq1">>)), - ?assertEqual(rabbit_oauth2_config:get_preferred_username_claims(), - rabbit_oauth2_config:get_preferred_username_claims(<<"rabbitmq2">>)). + ?assertEqual([<<"username">>] ++ rabbit_oauth2_config:get_default_preferred_username_claims(), + rabbit_oauth2_config:get_preferred_username_claims()), + ?assertEqual([<<"email-address">>] ++ rabbit_oauth2_config:get_default_preferred_username_claims(), + rabbit_oauth2_config:get_preferred_username_claims(<<"rabbitmq1">>)), + ?assertEqual(rabbit_oauth2_config:get_preferred_username_claims(), + rabbit_oauth2_config:get_preferred_username_claims(<<"rabbitmq2">>)). get_scope_prefix_when_not_defined(_Config) -> - ?assertEqual(<<"rabbitmq.">>, rabbit_oauth2_config:get_scope_prefix()), - ?assertEqual(<<"rabbitmq2.">>, rabbit_oauth2_config:get_scope_prefix(<<"rabbitmq2">>)). + ?assertEqual(<<"rabbitmq.">>, rabbit_oauth2_config:get_scope_prefix()), + ?assertEqual(<<"rabbitmq2.">>, rabbit_oauth2_config:get_scope_prefix(<<"rabbitmq2">>)). + +get_empty_scope_prefix(_Config) -> + ?assertEqual(<<"">>, rabbit_oauth2_config:get_scope_prefix()), + ?assertEqual(<<"">>, rabbit_oauth2_config:get_scope_prefix(<<"rabbitmq2">>)). get_scope_prefix(_Config) -> - ?assertEqual(<<"some-prefix-">>, rabbit_oauth2_config:get_scope_prefix()), - ?assertEqual(<<"my-prefix:">>, rabbit_oauth2_config:get_scope_prefix(<<"rabbitmq1">>)), - ?assertEqual(rabbit_oauth2_config:get_scope_prefix(), rabbit_oauth2_config:get_scope_prefix(<<"rabbitmq2">>)). + ?assertEqual(<<"some-prefix-">>, rabbit_oauth2_config:get_scope_prefix()), + ?assertEqual(<<"my-prefix:">>, rabbit_oauth2_config:get_scope_prefix(<<"rabbitmq1">>)), + ?assertEqual(rabbit_oauth2_config:get_scope_prefix(), rabbit_oauth2_config:get_scope_prefix(<<"rabbitmq2">>)). + +get_scope_prefix_for_resource_one_returns_default_scope_prefix(_Config) -> + ?assertEqual(undefined, application:get_env(rabbitmq_auth_backend_oauth2, scope_prefix)), + ?assertEqual(append_paths(?RABBITMQ_RESOURCE_ONE, <<".">>), + rabbit_oauth2_config:get_scope_prefix(?RABBITMQ_RESOURCE_ONE)). +get_scope_prefix_for_resource_one_returns_root_scope_prefix(_Config) -> + {ok, Prefix} = application:get_env(rabbitmq_auth_backend_oauth2, scope_prefix), + ?assertEqual(rabbit_oauth2_config:get_scope_prefix(), + rabbit_oauth2_config:get_scope_prefix(?RABBITMQ_RESOURCE_ONE)), + ?assertEqual(Prefix, + rabbit_oauth2_config:get_scope_prefix(?RABBITMQ_RESOURCE_ONE)). +get_scope_prefix_for_resource_one_returns_empty_scope_prefix(_Config) -> + ?assertEqual(<<"">>, + rabbit_oauth2_config:get_scope_prefix(?RABBITMQ_RESOURCE_ONE)). +get_scope_prefix_for_resource_two_returns_root_scope_prefix(_Config) -> + {ok, Prefix} = application:get_env(rabbitmq_auth_backend_oauth2, scope_prefix), + ?assertEqual(rabbit_oauth2_config:get_scope_prefix(), + rabbit_oauth2_config:get_scope_prefix(?RABBITMQ_RESOURCE_TWO)), + ?assertEqual(Prefix, + rabbit_oauth2_config:get_scope_prefix(?RABBITMQ_RESOURCE_TWO)). get_resource_server_type_when_not_defined(_Config) -> - ?assertEqual(<<>>, rabbit_oauth2_config:get_resource_server_type()), - ?assertEqual(<<>>, rabbit_oauth2_config:get_resource_server_type(<<"rabbitmq2">>)). + ?assertEqual(<<>>, rabbit_oauth2_config:get_resource_server_type()), + ?assertEqual(<<>>, rabbit_oauth2_config:get_resource_server_type(<<"rabbitmq2">>)). get_resource_server_type(_Config) -> - ?assertEqual(<<"rabbitmq-type">>, rabbit_oauth2_config:get_resource_server_type()), - ?assertEqual(<<"my-type">>, rabbit_oauth2_config:get_resource_server_type(<<"rabbitmq1">>)), - ?assertEqual(rabbit_oauth2_config:get_resource_server_type(), rabbit_oauth2_config:get_resource_server_type(<<"rabbitmq2">>)). + ?assertEqual(<<"rabbitmq-type">>, rabbit_oauth2_config:get_resource_server_type()), + ?assertEqual(<<"my-type">>, rabbit_oauth2_config:get_resource_server_type(<<"rabbitmq1">>)), + ?assertEqual(rabbit_oauth2_config:get_resource_server_type(), rabbit_oauth2_config:get_resource_server_type(<<"rabbitmq2">>)). has_scope_aliases_when_not_defined(_Config) -> - ?assertEqual(false, rabbit_oauth2_config:has_scope_aliases(?RABBITMQ)), - ?assertEqual(true, rabbit_oauth2_config:has_scope_aliases(<<"rabbitmq1">>)), - ?assertEqual(rabbit_oauth2_config:has_scope_aliases(?RABBITMQ), rabbit_oauth2_config:has_scope_aliases(<<"rabbitmq2">>)). + ?assertEqual(false, rabbit_oauth2_config:has_scope_aliases(?RABBITMQ)), + ?assertEqual(true, rabbit_oauth2_config:has_scope_aliases(<<"rabbitmq1">>)), + ?assertEqual(rabbit_oauth2_config:has_scope_aliases(?RABBITMQ), rabbit_oauth2_config:has_scope_aliases(<<"rabbitmq2">>)). has_scope_aliases(_Config) -> - ?assertEqual(true, rabbit_oauth2_config:has_scope_aliases(?RABBITMQ)), - ?assertEqual(true, rabbit_oauth2_config:has_scope_aliases(<<"rabbitmq1">>)), - ?assertEqual(rabbit_oauth2_config:has_scope_aliases(?RABBITMQ), rabbit_oauth2_config:has_scope_aliases(<<"rabbitmq2">>)). + ?assertEqual(true, rabbit_oauth2_config:has_scope_aliases(?RABBITMQ)), + ?assertEqual(true, rabbit_oauth2_config:has_scope_aliases(<<"rabbitmq1">>)), + ?assertEqual(rabbit_oauth2_config:has_scope_aliases(?RABBITMQ), rabbit_oauth2_config:has_scope_aliases(<<"rabbitmq2">>)). get_scope_aliases(_Config) -> - ?assertEqual(#{}, rabbit_oauth2_config:get_scope_aliases(?RABBITMQ)), - ?assertEqual(#{}, rabbit_oauth2_config:get_scope_aliases(<<"rabbitmq1">>)), - ?assertEqual(rabbit_oauth2_config:get_scope_aliases(?RABBITMQ), rabbit_oauth2_config:get_scope_aliases(<<"rabbitmq2">>)). - -get_oauth_provider_should_fail(_Config) -> - {error, _Message} = rabbit_oauth2_config:get_oauth_provider_for_resource_server_id(?RABBITMQ, [jwks_uri]). + ?assertEqual(#{}, rabbit_oauth2_config:get_scope_aliases(?RABBITMQ)), + ?assertEqual(#{}, rabbit_oauth2_config:get_scope_aliases(<<"rabbitmq1">>)), + ?assertEqual(rabbit_oauth2_config:get_scope_aliases(?RABBITMQ), rabbit_oauth2_config:get_scope_aliases(<<"rabbitmq2">>)). + +get_default_key_should_fail(_Config) -> + {error, no_default_key_configured} = rabbit_oauth2_config:get_default_key(). +get_default_key(_Config) -> + {ok, <<"default-key">>} = rabbit_oauth2_config:get_default_key(). +get_default_key_for_provider_A_should_fail(_Config) -> + {error, no_default_key_configured} = rabbit_oauth2_config:get_default_key(<<"A">>). +get_default_key_for_provider_A(_Config) -> + {ok, <<"A-default-key">>} = rabbit_oauth2_config:get_default_key(<<"A">>). + +get_signing_keys(_Config) -> + #{<<"mykey-1-1">> := <<"some key 1-1">>, + <<"mykey-1-2">> := <<"some key 1-2">>} = rabbit_oauth2_config:get_signing_keys(), + <<"some key 1-1">> = rabbit_oauth2_config:get_signing_key(<<"mykey-1-1">>), + undefined = rabbit_oauth2_config:get_signing_key(<<"unknown">>). +get_signing_keys_for_oauth_provider_A(_Config) -> + #{<<"A-mykey-1-1">> := <<"A-some key 1-1">>, + <<"A-mykey-1-2">> := <<"A-some key 1-2">>} = rabbit_oauth2_config:get_signing_keys(<<"A">>), + <<"A-some key 1-1">> = rabbit_oauth2_config:get_signing_key(<<"A-mykey-1-1">>, <<"A">>), + undefined = rabbit_oauth2_config:get_signing_key(<<"unknown">>, <<"A">>). + +get_algorithms_should_return_undefined(_Config) -> + undefined = rabbit_oauth2_config:get_algorithms(). +get_algorithms(Config) -> + ?assertEqual(?config(algorithms, Config), rabbit_oauth2_config:get_algorithms()). +get_algorithms_for_provider_A_should_return_undefined(_Config) -> + undefined = rabbit_oauth2_config:get_algorithms(<<"A">>). +get_algorithms_for_provider_A(Config) -> + ?assertEqual(?config(algorithms, Config), rabbit_oauth2_config:get_algorithms(<<"A">>)). + +get_oauth_provider_root_with_jwks_uri_should_fail(_Config) -> + root = rabbit_oauth2_config:get_oauth_provider_id_for_resource_server_id(?RABBITMQ), + {error, _Message} = rabbit_oauth2_config:get_oauth_provider(root, [jwks_uri]). +get_oauth_provider_A_with_jwks_uri_should_fail(_Config) -> + <<"A">> = rabbit_oauth2_config:get_oauth_provider_id_for_resource_server_id(?RABBITMQ), + {error, _Message} = rabbit_oauth2_config:get_oauth_provider(<<"A">>, [jwks_uri]). get_oauth_provider_should_return_root_oauth_provider_with_jwks_uri(_Config) -> - {ok, OAuthProvider} = rabbit_oauth2_config:get_oauth_provider_for_resource_server_id(?RABBITMQ, [jwks_uri]), - ?assertEqual(build_url_to_oauth_provider(<<"/keys">>), OAuthProvider#oauth_provider.jwks_uri). + root = rabbit_oauth2_config:get_oauth_provider_id_for_resource_server_id(?RABBITMQ), + {ok, OAuthProvider} = rabbit_oauth2_config:get_oauth_provider(root, [jwks_uri]), + ?assertEqual(build_url_to_oauth_provider(<<"/keys">>), OAuthProvider#oauth_provider.jwks_uri). get_oauth_provider_for_both_resources_should_return_root_oauth_provider(_Config) -> - {ok, OAuthProvider} = rabbit_oauth2_config:get_oauth_provider_for_resource_server_id(?RABBITMQ_RESOURCE_ONE, [jwks_uri]), - ?assertEqual(build_url_to_oauth_provider(<<"/keys">>), OAuthProvider#oauth_provider.jwks_uri), - {ok, OAuthProvider} = rabbit_oauth2_config:get_oauth_provider_for_resource_server_id(?RABBITMQ_RESOURCE_TWO, [jwks_uri]), - ?assertEqual(build_url_to_oauth_provider(<<"/keys">>), OAuthProvider#oauth_provider.jwks_uri). + root = rabbit_oauth2_config:get_oauth_provider_id_for_resource_server_id(?RABBITMQ_RESOURCE_ONE), + {ok, OAuthProvider} = rabbit_oauth2_config:get_oauth_provider(root, [jwks_uri]), + ?assertEqual(build_url_to_oauth_provider(<<"/keys">>), OAuthProvider#oauth_provider.jwks_uri), + root = rabbit_oauth2_config:get_oauth_provider_id_for_resource_server_id(?RABBITMQ_RESOURCE_TWO). get_oauth_provider_for_resource_one_should_return_oauth_provider_A(_Config) -> - {ok, ResourceServers} = application:get_env(rabbitmq_auth_backend_oauth2, resource_servers), - ct:log("ResourceServers : ~p", [ResourceServers]), - {ok, OAuthProvider} = rabbit_oauth2_config:get_oauth_provider_for_resource_server_id(?RABBITMQ_RESOURCE_ONE, [jwks_uri]), - ?assertEqual(build_url_to_oauth_provider(<<"/A/keys">>), OAuthProvider#oauth_provider.jwks_uri). + <<"A">> = rabbit_oauth2_config:get_oauth_provider_id_for_resource_server_id(?RABBITMQ_RESOURCE_ONE), + {ok, OAuthProvider} = rabbit_oauth2_config:get_oauth_provider(<<"A">>, [jwks_uri]), + ?assertEqual(build_url_to_oauth_provider(<<"/A/keys">>), OAuthProvider#oauth_provider.jwks_uri). get_oauth_provider_for_both_resources_should_return_oauth_provider_A(_Config) -> - {ok, OAuthProvider} = rabbit_oauth2_config:get_oauth_provider_for_resource_server_id(?RABBITMQ_RESOURCE_ONE, [jwks_uri]), - ?assertEqual(build_url_to_oauth_provider(<<"/A/keys">>), OAuthProvider#oauth_provider.jwks_uri), - {ok, OAuthProvider} = rabbit_oauth2_config:get_oauth_provider_for_resource_server_id(?RABBITMQ_RESOURCE_TWO, [jwks_uri]), - ?assertEqual(build_url_to_oauth_provider(<<"/A/keys">>), OAuthProvider#oauth_provider.jwks_uri). + <<"A">> = rabbit_oauth2_config:get_oauth_provider_id_for_resource_server_id(?RABBITMQ_RESOURCE_ONE), + {ok, OAuthProvider} = rabbit_oauth2_config:get_oauth_provider(<<"A">>, [jwks_uri]), + ?assertEqual(build_url_to_oauth_provider(<<"/A/keys">>), OAuthProvider#oauth_provider.jwks_uri), + <<"A">> = rabbit_oauth2_config:get_oauth_provider_id_for_resource_server_id(?RABBITMQ_RESOURCE_TWO). get_oauth_provider_for_resource_two_should_return_oauth_provider_B(_Config) -> - {ok, OAuthProvider} = rabbit_oauth2_config:get_oauth_provider_for_resource_server_id(?RABBITMQ_RESOURCE_TWO, [jwks_uri]), - ?assertEqual(build_url_to_oauth_provider(<<"/B/keys">>), OAuthProvider#oauth_provider.jwks_uri). + <<"B">> = rabbit_oauth2_config:get_oauth_provider_id_for_resource_server_id(?RABBITMQ_RESOURCE_TWO), + {ok, OAuthProvider} = rabbit_oauth2_config:get_oauth_provider(<<"B">>, [jwks_uri]), + ?assertEqual(build_url_to_oauth_provider(<<"/B/keys">>), OAuthProvider#oauth_provider.jwks_uri). get_oauth_provider_should_return_root_oauth_provider_with_all_discovered_endpoints(_Config) -> - {ok, OAuthProvider} = rabbit_oauth2_config:get_oauth_provider_for_resource_server_id(?RABBITMQ, [jwks_uri]), - ?assertEqual(build_url_to_oauth_provider(<<"/keys">>), OAuthProvider#oauth_provider.jwks_uri), - ?assertEqual(build_url_to_oauth_provider(<<"/">>), OAuthProvider#oauth_provider.issuer). + root = rabbit_oauth2_config:get_oauth_provider_id_for_resource_server_id(?RABBITMQ), + {ok, OAuthProvider} = rabbit_oauth2_config:get_oauth_provider(root, [jwks_uri]), + ?assertEqual(build_url_to_oauth_provider(<<"/keys">>), OAuthProvider#oauth_provider.jwks_uri), + ?assertEqual(build_url_to_oauth_provider(<<"/">>), OAuthProvider#oauth_provider.issuer). append_paths(Path1, Path2) -> - erlang:iolist_to_binary([Path1, Path2]). + erlang:iolist_to_binary([Path1, Path2]). get_oauth_provider_should_return_oauth_provider_B_with_jwks_uri(_Config) -> - {ok, OAuthProvider} = rabbit_oauth2_config:get_oauth_provider_for_resource_server_id(?RABBITMQ, [jwks_uri]), - ?assertEqual(build_url_to_oauth_provider(<<"/B/keys">>), OAuthProvider#oauth_provider.jwks_uri). + <<"B">> = rabbit_oauth2_config:get_oauth_provider_id_for_resource_server_id(?RABBITMQ), + {ok, OAuthProvider} = rabbit_oauth2_config:get_oauth_provider(<<"B">>, [jwks_uri]), + ?assertEqual(build_url_to_oauth_provider(<<"/B/keys">>), OAuthProvider#oauth_provider.jwks_uri). get_oauth_provider_should_return_oauth_provider_B_with_all_discovered_endpoints(_Config) -> - {ok, OAuthProvider} = rabbit_oauth2_config:get_oauth_provider_for_resource_server_id(?RABBITMQ, [jwks_uri]), - ?assertEqual(build_url_to_oauth_provider(<<"/B/keys">>), OAuthProvider#oauth_provider.jwks_uri), - ?assertEqual(build_url_to_oauth_provider(<<"/B">>), OAuthProvider#oauth_provider.issuer). + <<"B">> = rabbit_oauth2_config:get_oauth_provider_id_for_resource_server_id(?RABBITMQ), + {ok, OAuthProvider} = rabbit_oauth2_config:get_oauth_provider(<<"B">>, [jwks_uri]), + ?assertEqual(build_url_to_oauth_provider(<<"/B/keys">>), OAuthProvider#oauth_provider.jwks_uri), + ?assertEqual(build_url_to_oauth_provider(<<"/B">>), OAuthProvider#oauth_provider.issuer). get_oauth_provider_should_return_oauth_provider_A_with_jwks_uri(_Config) -> - {ok, OAuthProvider} = rabbit_oauth2_config:get_oauth_provider_for_resource_server_id(?RABBITMQ, [jwks_uri]), - ?assertEqual(build_url_to_oauth_provider(<<"/A/keys">>), OAuthProvider#oauth_provider.jwks_uri). + <<"A">> = rabbit_oauth2_config:get_oauth_provider_id_for_resource_server_id(?RABBITMQ), + {ok, OAuthProvider} = rabbit_oauth2_config:get_oauth_provider(<<"A">>, [jwks_uri]), + ?assertEqual(build_url_to_oauth_provider(<<"/A/keys">>), OAuthProvider#oauth_provider.jwks_uri). get_oauth_provider_should_return_oauth_provider_A_with_all_discovered_endpoints(_Config) -> - {ok, OAuthProvider} = rabbit_oauth2_config:get_oauth_provider_for_resource_server_id(?RABBITMQ, [jwks_uri]), - ?assertEqual(build_url_to_oauth_provider(<<"/A/keys">>), OAuthProvider#oauth_provider.jwks_uri), - ?assertEqual(build_url_to_oauth_provider(<<"/A">>), OAuthProvider#oauth_provider.issuer). + <<"A">> = rabbit_oauth2_config:get_oauth_provider_id_for_resource_server_id(?RABBITMQ), + {ok, OAuthProvider} = rabbit_oauth2_config:get_oauth_provider(<<"A">>, [jwks_uri]), + ?assertEqual(build_url_to_oauth_provider(<<"/A/keys">>), OAuthProvider#oauth_provider.jwks_uri), + ?assertEqual(build_url_to_oauth_provider(<<"/A">>), OAuthProvider#oauth_provider.issuer). get_openid_configuration_expectations() -> [ {get_root_openid_configuration, @@ -675,31 +984,31 @@ get_openid_configuration_expectations() -> ]. start_https_oauth_server(Port, CertsDir, Expectations) when is_list(Expectations) -> - Dispatch = cowboy_router:compile([ - {'_', [{Path, oauth2_http_mock, Expected} || #{request := #{path := Path}} = Expected <- Expectations ]} - ]), - ct:log("start_https_oauth_server (port:~p) with expectation list : ~p -> dispatch: ~p", [Port, Expectations, Dispatch]), - {ok, Pid} = cowboy:start_tls( - mock_http_auth_listener, + Dispatch = cowboy_router:compile([ + {'_', [{Path, oauth2_http_mock, Expected} || #{request := #{path := Path}} = Expected <- Expectations ]} + ]), + ct:log("start_https_oauth_server (port:~p) with expectation list : ~p -> dispatch: ~p", [Port, Expectations, Dispatch]), + {ok, Pid} = cowboy:start_tls( + mock_http_auth_listener, [{port, Port}, {certfile, filename:join([CertsDir, "server", "cert.pem"])}, {keyfile, filename:join([CertsDir, "server", "key.pem"])} ], #{env => #{dispatch => Dispatch}}), - ct:log("Started on Port ~p and pid ~p", [ranch:get_port(mock_http_auth_listener), Pid]). + ct:log("Started on Port ~p and pid ~p", [ranch:get_port(mock_http_auth_listener), Pid]). build_url_to_oauth_provider(Path) -> - uri_string:recompose(#{scheme => "https", + uri_string:recompose(#{scheme => "https", host => "localhost", port => rabbit_data_coercion:to_integer(?AUTH_PORT), path => Path}). stop_http_auth_server() -> - cowboy:stop_listener(mock_http_auth_listener). + cowboy:stop_listener(mock_http_auth_listener). -spec ssl_options(ssl:verify_type(), boolean(), file:filename()) -> list(). ssl_options(PeerVerification, FailIfNoPeerCert, CaCertFile) -> - [{verify, PeerVerification}, + [{verify, PeerVerification}, {depth, 10}, {fail_if_no_peer_cert, FailIfNoPeerCert}, {crl_check, false}, diff --git a/deps/rabbitmq_auth_backend_oauth2/test/rabbit_oauth2_schema_SUITE.erl b/deps/rabbitmq_auth_backend_oauth2/test/rabbit_oauth2_schema_SUITE.erl new file mode 100644 index 000000000000..58e69c334d83 --- /dev/null +++ b/deps/rabbitmq_auth_backend_oauth2/test/rabbit_oauth2_schema_SUITE.erl @@ -0,0 +1,183 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% +-module(rabbit_oauth2_schema_SUITE). + +-compile(export_all). + +-include_lib("rabbit_common/include/rabbit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). + + +all() -> + [ + test_without_oauth_providers, + test_with_one_oauth_provider, + test_with_many_oauth_providers, + test_oauth_providers_attributes, + test_oauth_providers_attributes_with_invalid_uri, + test_oauth_providers_algorithms, + test_oauth_providers_https, + test_oauth_providers_https_with_missing_cacertfile, + test_oauth_providers_signing_keys, + test_without_resource_servers, + test_with_one_resource_server, + test_with_many_resource_servers, + test_resource_servers_attributes + + ]. + + +test_without_oauth_providers(_) -> + #{} = rabbit_oauth2_schema:translate_oauth_providers([]). + +test_without_resource_servers(_) -> + #{} = rabbit_oauth2_schema:translate_resource_servers([]). + +test_with_one_oauth_provider(_) -> + Conf = [{["auth_oauth2","oauth_providers","keycloak","issuer"],"https://rabbit"} + ], + #{<<"keycloak">> := [{issuer, <<"https://rabbit">>}] + } = rabbit_oauth2_schema:translate_oauth_providers(Conf). + +test_with_one_resource_server(_) -> + Conf = [{["auth_oauth2","resource_servers","rabbitmq1","id"],"rabbitmq1"} + ], + #{<<"rabbitmq1">> := [{id, <<"rabbitmq1">>}] + } = rabbit_oauth2_schema:translate_resource_servers(Conf). + +test_with_many_oauth_providers(_) -> + Conf = [{["auth_oauth2","oauth_providers","keycloak","issuer"],"https://keycloak"}, + {["auth_oauth2","oauth_providers","uaa","issuer"],"https://uaa"} + ], + #{<<"keycloak">> := [{issuer, <<"https://keycloak">>} + ], + <<"uaa">> := [{issuer, <<"https://uaa">>} + ] + } = rabbit_oauth2_schema:translate_oauth_providers(Conf). + + +test_with_many_resource_servers(_) -> + Conf = [{["auth_oauth2","resource_servers","rabbitmq1","id"],"rabbitmq1"}, + {["auth_oauth2","resource_servers","rabbitmq2","id"],"rabbitmq2"} + ], + #{<<"rabbitmq1">> := [{id, <<"rabbitmq1">>} + ], + <<"rabbitmq2">> := [{id, <<"rabbitmq2">>} + ] + } = rabbit_oauth2_schema:translate_resource_servers(Conf). + +test_oauth_providers_attributes(_) -> + Conf = [{["auth_oauth2","oauth_providers","keycloak","issuer"],"https://keycloak"}, + {["auth_oauth2","oauth_providers","keycloak","default_key"],"token-key"} + ], + #{<<"keycloak">> := [{default_key, <<"token-key">>}, + {issuer, <<"https://keycloak">>} + ] + } = sort_settings(rabbit_oauth2_schema:translate_oauth_providers(Conf)). + +test_resource_servers_attributes(_) -> + Conf = [{["auth_oauth2","resource_servers","rabbitmq1","id"],"rabbitmq1xxx"}, + {["auth_oauth2","resource_servers","rabbitmq1","scope_prefix"],"somescope."}, + {["auth_oauth2","resource_servers","rabbitmq1","additional_scopes_key"],"roles"}, + {["auth_oauth2","resource_servers","rabbitmq1","preferred_username_claims","1"],"userid"}, + {["auth_oauth2","resource_servers","rabbitmq1","preferred_username_claims","2"],"groupid"} + ], + #{<<"rabbitmq1xxx">> := [{additional_scopes_key, <<"roles">>}, + {id, <<"rabbitmq1xxx">>}, + {preferred_username_claims, [<<"userid">>, <<"groupid">>]}, + {scope_prefix, <<"somescope.">>} + ] + } = sort_settings(rabbit_oauth2_schema:translate_resource_servers(Conf)), + + Conf2 = [ + {["auth_oauth2","resource_servers","rabbitmq1","scope_prefix"],"somescope."}, + {["auth_oauth2","resource_servers","rabbitmq1","additional_scopes_key"],"roles"}, + {["auth_oauth2","resource_servers","rabbitmq1","preferred_username_claims","1"],"userid"}, + {["auth_oauth2","resource_servers","rabbitmq1","preferred_username_claims","2"],"groupid"} + ], + #{<<"rabbitmq1">> := [{additional_scopes_key, <<"roles">>}, + {id, <<"rabbitmq1">>}, + {preferred_username_claims, [<<"userid">>, <<"groupid">>]}, + {scope_prefix, <<"somescope.">>} + ] + } = sort_settings(rabbit_oauth2_schema:translate_resource_servers(Conf2)). + +test_oauth_providers_attributes_with_invalid_uri(_) -> + Conf = [{["auth_oauth2","oauth_providers","keycloak","issuer"],"http://keycloak"}, + {["auth_oauth2","oauth_providers","keycloak","default_key"],"token-key"} + ], + try sort_settings(rabbit_oauth2_schema:translate_oauth_providers(Conf)) of + _ -> {throw, should_have_failed} + catch + _ -> ok + end. + +test_oauth_providers_algorithms(_) -> + Conf = [{["auth_oauth2","oauth_providers","keycloak","issuer"],"https://keycloak"}, + {["auth_oauth2","oauth_providers","keycloak","algorithms","2"],"HS256"}, + {["auth_oauth2","oauth_providers","keycloak","algorithms","1"],"RS256"} + ], + #{<<"keycloak">> := [{algorithms, [<<"RS256">>, <<"HS256">>]}, + {issuer, <<"https://keycloak">>} + ] + } = sort_settings(rabbit_oauth2_schema:translate_oauth_providers(Conf)). + +test_oauth_providers_https(Conf) -> + + CuttlefishConf = [{["auth_oauth2","oauth_providers","keycloak","issuer"],"https://keycloak"}, + {["auth_oauth2","oauth_providers","keycloak","https","verify"],verify_none}, + {["auth_oauth2","oauth_providers","keycloak","https","peer_verification"],verify_peer}, + {["auth_oauth2","oauth_providers","keycloak","https","depth"],2}, + {["auth_oauth2","oauth_providers","keycloak","https","hostname_verification"],wildcard}, + {["auth_oauth2","oauth_providers","keycloak","https","crl_check"],false}, + {["auth_oauth2","oauth_providers","keycloak","https","fail_if_no_peer_cert"],true}, + {["auth_oauth2","oauth_providers","keycloak","https","cacertfile"],cert_filename(Conf)} + ], + #{<<"keycloak">> := [{https, [{verify, verify_none}, + {peer_verification, verify_peer}, + {depth, 2}, + {hostname_verification, wildcard}, + {crl_check, false}, + {fail_if_no_peer_cert, true}, + {cacertfile, _CaCertFile} + ]}, + {issuer, <<"https://keycloak">>} + ] + } = sort_settings(rabbit_oauth2_schema:translate_oauth_providers(CuttlefishConf)). + +test_oauth_providers_https_with_missing_cacertfile(_) -> + + Conf = [{["auth_oauth2","oauth_providers","keycloak","issuer"],"https://keycloak"}, + {["auth_oauth2","oauth_providers","keycloak","https","cacertfile"],"/non-existent.pem"} + ], + try sort_settings(rabbit_oauth2_schema:translate_oauth_providers(Conf)) of + _ -> {throw, should_have_failed} + catch + _ -> ok + end. + +test_oauth_providers_signing_keys(Conf) -> + CuttlefishConf = [{["auth_oauth2","oauth_providers","keycloak","issuer"],"https://keycloak"}, + {["auth_oauth2","oauth_providers","keycloak","signing_keys","2"], cert_filename(Conf)}, + {["auth_oauth2","oauth_providers","keycloak","signing_keys","1"], cert_filename(Conf)} + ], + #{<<"keycloak">> := [{issuer, <<"https://keycloak">>}, + {signing_keys, SigningKeys} + ] + } = sort_settings(rabbit_oauth2_schema:translate_oauth_providers(CuttlefishConf)), + ct:log("SigningKey: ~p", [SigningKeys]), + #{<<"1">> := {pem, <<"I'm not a certificate">>}, + <<"2">> := {pem, <<"I'm not a certificate">>} + } = SigningKeys. + +cert_filename(Conf) -> + string:concat(?config(data_dir, Conf), "certs/cert.pem"). + +sort_settings(MapOfListOfSettings) -> + maps:map(fun(_K,List) -> + lists:sort(fun({K1,_}, {K2,_}) -> K1 < K2 end, List) end, MapOfListOfSettings). diff --git a/deps/rabbitmq_web_mqtt/test/config_schema_SUITE_data/certs/cacert.pem b/deps/rabbitmq_auth_backend_oauth2/test/rabbit_oauth2_schema_SUITE_data/certs/cacert.pem similarity index 100% rename from deps/rabbitmq_web_mqtt/test/config_schema_SUITE_data/certs/cacert.pem rename to deps/rabbitmq_auth_backend_oauth2/test/rabbit_oauth2_schema_SUITE_data/certs/cacert.pem diff --git a/deps/rabbitmq_web_mqtt/test/config_schema_SUITE_data/certs/cert.pem b/deps/rabbitmq_auth_backend_oauth2/test/rabbit_oauth2_schema_SUITE_data/certs/cert.pem similarity index 100% rename from deps/rabbitmq_web_mqtt/test/config_schema_SUITE_data/certs/cert.pem rename to deps/rabbitmq_auth_backend_oauth2/test/rabbit_oauth2_schema_SUITE_data/certs/cert.pem diff --git a/deps/rabbitmq_web_mqtt/test/config_schema_SUITE_data/certs/key.pem b/deps/rabbitmq_auth_backend_oauth2/test/rabbit_oauth2_schema_SUITE_data/certs/key.pem similarity index 100% rename from deps/rabbitmq_web_mqtt/test/config_schema_SUITE_data/certs/key.pem rename to deps/rabbitmq_auth_backend_oauth2/test/rabbit_oauth2_schema_SUITE_data/certs/key.pem diff --git a/deps/rabbitmq_auth_backend_oauth2/test/system_SUITE.erl b/deps/rabbitmq_auth_backend_oauth2/test/system_SUITE.erl index 9e1b8159e345..e17a76281411 100644 --- a/deps/rabbitmq_auth_backend_oauth2/test/system_SUITE.erl +++ b/deps/rabbitmq_auth_backend_oauth2/test/system_SUITE.erl @@ -43,7 +43,11 @@ groups() -> test_failed_connection_with_a_non_token, test_failed_connection_with_a_token_with_insufficient_vhost_permission, test_failed_connection_with_a_token_with_insufficient_resource_permission, - more_than_one_resource_server_id_not_allowed_in_one_token + more_than_one_resource_server_id_not_allowed_in_one_token, + mqtt_expired_token, + mqtt_expirable_token, + web_mqtt_expirable_token, + amqp_expirable_token ]}, {token_refresh, [], [ @@ -422,15 +426,124 @@ mqtt(Config) -> {ok, Pub} = emqtt:start_link([{clientid, <<"mqtt-publisher">>} | Opts]), {ok, _} = emqtt:connect(Pub), {ok, _} = emqtt:publish(Pub, Topic, Payload, at_least_once), - receive - {publish, #{client_pid := Sub, - topic := Topic, - payload := Payload}} -> ok + receive {publish, #{client_pid := Sub, + topic := Topic, + payload := Payload}} -> ok after 1000 -> ct:fail("no publish received") end, ok = emqtt:disconnect(Sub), ok = emqtt:disconnect(Pub). +mqtt_expired_token(Config) -> + {_Algo, Token} = generate_expired_token(Config), + Opts = [{port, rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mqtt)}, + {proto_ver, v5}, + {username, <<"">>}, + {password, Token}], + ClientId = atom_to_binary(?FUNCTION_NAME), + {ok, C} = emqtt:start_link([{clientid, ClientId} | Opts]), + true = unlink(C), + ?assertMatch({error, {bad_username_or_password, _}}, + emqtt:connect(C)). + +mqtt_expirable_token(Config) -> + mqtt_expirable_token0(tcp_port_mqtt, + [], + fun emqtt:connect/1, + Config). + +web_mqtt_expirable_token(Config) -> + mqtt_expirable_token0(tcp_port_web_mqtt, + [{ws_path, "/ws"}], + fun emqtt:ws_connect/1, + Config). + +mqtt_expirable_token0(Port, AdditionalOpts, Connect, Config) -> + Topic = <<"test/topic">>, + Payload = <<"mqtt-test-message">>, + + Seconds = 4, + Millis = Seconds * 1000, + {_Algo, Token} = generate_expirable_token(Config, + [<<"rabbitmq.configure:*/*/*">>, + <<"rabbitmq.write:*/*/*">>, + <<"rabbitmq.read:*/*/*">>], + Seconds), + + Opts = [{port, rabbit_ct_broker_helpers:get_node_config(Config, 0, Port)}, + {proto_ver, v5}, + {username, <<"">>}, + {password, Token}] ++ AdditionalOpts, + {ok, Sub} = emqtt:start_link([{clientid, <<"my subscriber">>} | Opts]), + {ok, _} = Connect(Sub), + {ok, _, [1]} = emqtt:subscribe(Sub, Topic, at_least_once), + {ok, Pub} = emqtt:start_link([{clientid, <<"my publisher">>} | Opts]), + {ok, _} = Connect(Pub), + {ok, _} = emqtt:publish(Pub, Topic, Payload, at_least_once), + receive {publish, #{client_pid := Sub, + topic := Topic, + payload := Payload}} -> ok + after 1000 -> ct:fail("no publish received") + end, + + %% reason code "Maximum connect time" defined in + %% https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901208 + ReasonCode = 16#A0, + true = unlink(Sub), + true = unlink(Pub), + + %% In 4 seconds from now, we expect that RabbitMQ disconnects us because our token expired. + receive {disconnected, ReasonCode, _} -> ok + after Millis * 2 -> ct:fail("missing DISCONNECT packet from server") + end, + receive {disconnected, ReasonCode, _} -> ok + after Millis * 2 -> ct:fail("missing DISCONNECT packet from server") + end. + +amqp_expirable_token(Config) -> + {ok, _} = application:ensure_all_started(rabbitmq_amqp_client), + + Seconds = 4, + Millis = Seconds * 1000, + {_Algo, Token} = generate_expirable_token(Config, + [<<"rabbitmq.configure:*/*">>, + <<"rabbitmq.write:*/*">>, + <<"rabbitmq.read:*/*">>], + Seconds), + + %% Send and receive a message via AMQP 1.0. + QName = atom_to_binary(?FUNCTION_NAME), + Address = rabbitmq_amqp_address:queue(QName), + Host = ?config(rmq_hostname, Config), + Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp), + OpnConf = #{address => Host, + port => Port, + container_id => <<"my container">>, + sasl => {plain, <<"">>, Token}}, + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + {ok, LinkPair} = rabbitmq_amqp_client:attach_management_link_pair_sync(Session, <<"my link pair">>), + {ok, _} = rabbitmq_amqp_client:declare_queue(LinkPair, QName, #{}), + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"my sender">>, Address), + receive {amqp10_event, {link, Sender, credited}} -> ok + after 5000 -> ct:fail({missing_event, ?LINE}) + end, + Body = <<"hey">>, + Msg0 = amqp10_msg:new(<<"tag">>, Body), + ok = amqp10_client:send_msg(Sender, Msg0), + {ok, Receiver} = amqp10_client:attach_receiver_link(Session, <<"my receiver">>, Address), + {ok, Msg} = amqp10_client:get_msg(Receiver), + ?assertEqual([Body], amqp10_msg:body(Msg)), + + %% In 4 seconds from now, we expect that RabbitMQ disconnects us because our token expired. + receive {amqp10_event, + {connection, Connection, + {closed, {unauthorized_access, <<"credential expired">>}}}} -> + ok + after Millis * 2 -> + ct:fail("server did not close our connection") + end. + test_successful_connection_with_complex_claim_as_a_map(Config) -> {_Algo, Token} = generate_valid_token_with_extra_fields( Config, diff --git a/deps/rabbitmq_auth_backend_oauth2/test/unit_SUITE.erl b/deps/rabbitmq_auth_backend_oauth2/test/unit_SUITE.erl index 2efc81f0fe98..c8b3f296e213 100644 --- a/deps/rabbitmq_auth_backend_oauth2/test/unit_SUITE.erl +++ b/deps/rabbitmq_auth_backend_oauth2/test/unit_SUITE.erl @@ -59,6 +59,7 @@ groups() -> test_successful_access_with_a_token_that_uses_multiple_scope_aliases_in_extra_scope_source_field, test_post_process_token_payload_complex_claims, test_successful_access_with_a_token_that_uses_single_scope_alias_in_scope_field_and_custom_scope_prefix + ]} ]. @@ -1128,7 +1129,7 @@ test_incorrect_kid(_) -> Username = <<"username">>, Jwk = ?UTIL_MOD:fixture_jwk(), application:set_env(rabbitmq_auth_backend_oauth2, resource_server_id, <<"rabbitmq">>), - Token = ?UTIL_MOD:sign_token_hs(?UTIL_MOD:token_with_sub(?UTIL_MOD:fixture_token(), Username), Jwk, AltKid), + Token = ?UTIL_MOD:sign_token_hs(?UTIL_MOD:token_with_sub(?UTIL_MOD:fixture_token(), Username), Jwk, AltKid, true), ?assertMatch({refused, "Authentication using an OAuth 2/JWT token failed: ~tp", [{error,{missing_oauth_provider_attributes, [issuer]}}]}, rabbit_auth_backend_oauth2:user_login_authentication(Username, #{password => Token})). @@ -1269,7 +1270,7 @@ test_validate_payload_resource_server_id_mismatch(_) -> rabbit_auth_backend_oauth2:validate_payload(?RESOURCE_SERVER_ID, EmptyAud, ?DEFAULT_SCOPE_PREFIX)). test_validate_payload_with_scope_prefix(_) -> - Scenarios = [ { <<>>, + Scenarios = [ { <<"">>, #{<<"aud">> => [?RESOURCE_SERVER_ID], <<"scope">> => [<<"foo">>, <<"foo.bar">>, <<"foo.other.third">> ]}, [<<"foo">>, <<"foo.bar">>, <<"foo.other.third">> ] diff --git a/deps/rabbitmq_auth_mechanism_ssl/BUILD.bazel b/deps/rabbitmq_auth_mechanism_ssl/BUILD.bazel index 778774f9e63b..6127cccd64ec 100644 --- a/deps/rabbitmq_auth_mechanism_ssl/BUILD.bazel +++ b/deps/rabbitmq_auth_mechanism_ssl/BUILD.bazel @@ -1,17 +1,22 @@ +load("@rules_erlang//:eunit2.bzl", "eunit") load("@rules_erlang//:xref2.bzl", "xref") load("@rules_erlang//:dialyze.bzl", "dialyze", "plt") +load("//:rabbitmq_home.bzl", "rabbitmq_home") +load("//:rabbitmq_run.bzl", "rabbitmq_run") load( "//:rabbitmq.bzl", "BROKER_VERSION_REQUIREMENTS_ANY", "RABBITMQ_DIALYZER_OPTS", "assert_suites", "rabbitmq_app", + "rabbitmq_integration_suite", ) load( ":app.bzl", "all_beam_files", "all_srcs", "all_test_beam_files", + "test_suite_beam_files", ) APP_NAME = "rabbitmq_auth_mechanism_ssl" @@ -26,7 +31,7 @@ APP_ENV = """[ all_beam_files(name = "all_beam_files") -all_test_beam_files() +all_test_beam_files(name = "all_test_beam_files") all_srcs(name = "all_srcs") @@ -70,6 +75,28 @@ dialyze( target = ":erlang_app", ) +rabbitmq_home( + name = "broker-for-tests-home", + testonly = True, + plugins = [ + ":test_erlang_app", + ], +) + +rabbitmq_run( + name = "rabbitmq-for-tests-run", + testonly = True, + home = ":broker-for-tests-home", +) + +rabbitmq_integration_suite( + name = "system_SUITE", + shard_count = 1, + runtime_deps = [ + "//deps/amqp10_client:erlang_app", + ], +) + assert_suites() alias( @@ -77,3 +104,10 @@ alias( actual = ":erlang_app", visibility = ["//visibility:public"], ) + +test_suite_beam_files(name = "test_suite_beam_files") + +eunit( + name = "eunit", + target = ":test_erlang_app", +) diff --git a/deps/rabbitmq_auth_mechanism_ssl/Makefile b/deps/rabbitmq_auth_mechanism_ssl/Makefile index 9b540fdaf716..f6705d7c3a6a 100644 --- a/deps/rabbitmq_auth_mechanism_ssl/Makefile +++ b/deps/rabbitmq_auth_mechanism_ssl/Makefile @@ -14,6 +14,7 @@ endef LOCAL_DEPS = public_key DEPS = rabbit_common rabbit +TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers amqp10_client DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk diff --git a/deps/rabbitmq_auth_mechanism_ssl/README.md b/deps/rabbitmq_auth_mechanism_ssl/README.md index 522ebb193cd1..68aff0e462c0 100644 --- a/deps/rabbitmq_auth_mechanism_ssl/README.md +++ b/deps/rabbitmq_auth_mechanism_ssl/README.md @@ -18,7 +18,7 @@ present a client certificate. ## Usage This mechanism must also be enabled in RabbitMQ's configuration file, -see [Authentication Mechanisms](https://www.rabbitmq.com/authentication.html) and +see [Authentication Mechanisms](https://www.rabbitmq.com/docs/access-control/) and [Configuration](https://www.rabbitmq.com/configure.html) guides for more details. diff --git a/deps/rabbitmq_auth_mechanism_ssl/app.bzl b/deps/rabbitmq_auth_mechanism_ssl/app.bzl index 6a95279a2cff..335857be922e 100644 --- a/deps/rabbitmq_auth_mechanism_ssl/app.bzl +++ b/deps/rabbitmq_auth_mechanism_ssl/app.bzl @@ -75,4 +75,11 @@ def all_test_beam_files(name = "all_test_beam_files"): ) def test_suite_beam_files(name = "test_suite_beam_files"): - pass + erlang_bytecode( + name = "system_SUITE_beam_files", + testonly = True, + srcs = ["test/system_SUITE.erl"], + outs = ["test/system_SUITE.beam"], + app_name = "rabbitmq_auth_mechanism_ssl", + erlc_opts = "//:test_erlc_opts", + ) diff --git a/deps/rabbitmq_auth_mechanism_ssl/src/rabbit_auth_mechanism_ssl.erl b/deps/rabbitmq_auth_mechanism_ssl/src/rabbit_auth_mechanism_ssl.erl index 6fc78d9bdeb3..11a7e79ee700 100644 --- a/deps/rabbitmq_auth_mechanism_ssl/src/rabbit_auth_mechanism_ssl.erl +++ b/deps/rabbitmq_auth_mechanism_ssl/src/rabbit_auth_mechanism_ssl.erl @@ -23,7 +23,9 @@ {cleanup, {rabbit_registry, unregister, [auth_mechanism, <<"EXTERNAL">>]}}]}). --record(state, {username = undefined}). +-record(state, { + username = undefined :: undefined | rabbit_types:username() | {refused, none, string(), [term()]} + }). description() -> [{description, <<"TLS peer verification-based authentication plugin. Used in combination with the EXTERNAL SASL mechanism.">>}]. diff --git a/deps/rabbitmq_auth_mechanism_ssl/test/system_SUITE.erl b/deps/rabbitmq_auth_mechanism_ssl/test/system_SUITE.erl new file mode 100644 index 000000000000..402704fbfe89 --- /dev/null +++ b/deps/rabbitmq_auth_mechanism_ssl/test/system_SUITE.erl @@ -0,0 +1,124 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2023 VMware, Inc. or its affiliates. All rights reserved. + +-module(system_SUITE). + +-compile([export_all, + nowarn_export_all]). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). + +all() -> + [{group, external_enforced}]. + +groups() -> + [ + {external_enforced, [shuffle], + [external_succeeds, + anonymous_fails] + } + ]. + +init_per_suite(Config) -> + {ok, _} = application:ensure_all_started(amqp10_client), + rabbit_ct_helpers:log_environment(), + Config. + +end_per_suite(Config) -> + Config. + +init_per_group(_Group, Config0) -> + %% Command `deps/rabbitmq_ct_helpers/tools/tls-certs$ make` + %% will put our hostname as common name in the client cert. + Config1 = rabbit_ct_helpers:merge_app_env( + Config0, + {rabbit, + [ + %% Enforce EXTERNAL disallowing other mechanisms. + {auth_mechanisms, ['EXTERNAL']}, + {ssl_cert_login_from, common_name} + ]}), + Config = rabbit_ct_helpers:run_setup_steps( + Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()), + {ok, UserString} = inet:gethostname(), + User = unicode:characters_to_binary(UserString), + ok = rabbit_ct_broker_helpers:add_user(Config, User), + Vhost = <<"test vhost">>, + ok = rabbit_ct_broker_helpers:add_vhost(Config, Vhost), + [{test_vhost, Vhost}, + {test_user, User}] ++ Config. + +end_per_group(_Group, Config) -> + ok = rabbit_ct_broker_helpers:delete_user(Config, ?config(test_user, Config)), + ok = rabbit_ct_broker_helpers:delete_vhost(Config, ?config(test_vhost, Config)), + rabbit_ct_helpers:run_teardown_steps( + Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()). + +init_per_testcase(Testcase, Config) -> + ok = set_permissions(Config, <<>>, <<>>, <<"^some vhost permission">>), + rabbit_ct_helpers:testcase_started(Config, Testcase). + +end_per_testcase(Testcase, Config) -> + ok = clear_permissions(Config), + rabbit_ct_helpers:testcase_finished(Config, Testcase). + +external_succeeds(Config) -> + Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp_tls), + Host = ?config(rmq_hostname, Config), + Vhost = ?config(test_vhost, Config), + CACertFile = ?config(rmq_certsdir, Config) ++ "/testca/cacert.pem", + CertFile = ?config(rmq_certsdir, Config) ++ "/client/cert.pem", + KeyFile = ?config(rmq_certsdir, Config) ++ "/client/key.pem", + OpnConf = #{address => Host, + port => Port, + container_id => atom_to_binary(?FUNCTION_NAME), + hostname => <<"vhost:", Vhost/binary>>, + sasl => external, + tls_opts => {secure_port, [{cacertfile, CACertFile}, + {certfile, CertFile}, + {keyfile, KeyFile}]} + }, + {ok, Connection} = amqp10_client:open_connection(OpnConf), + receive {amqp10_event, {connection, Connection, opened}} -> ok + after 5000 -> ct:fail(missing_opened) + end, + ok = amqp10_client:close_connection(Connection). + +anonymous_fails(Config) -> + Mechansim = anon, + OpnConf0 = connection_config(Config, <<"/">>), + OpnConf = OpnConf0#{sasl => Mechansim}, + {ok, Connection} = amqp10_client:open_connection(OpnConf), + receive {amqp10_event, {connection, Connection, {closed, Reason}}} -> + ?assertEqual({sasl_not_supported, Mechansim}, Reason) + after 5000 -> ct:fail(missing_closed) + end. + +connection_config(Config, Vhost) -> + Host = ?config(rmq_hostname, Config), + Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp), + #{address => Host, + port => Port, + container_id => <<"my container">>, + hostname => <<"vhost:", Vhost/binary>>}. + +set_permissions(Config, ConfigurePerm, WritePerm, ReadPerm) -> + ok = rabbit_ct_broker_helpers:set_permissions(Config, + ?config(test_user, Config), + ?config(test_vhost, Config), + ConfigurePerm, + WritePerm, + ReadPerm). + +clear_permissions(Config) -> + User = ?config(test_user, Config), + Vhost = ?config(test_vhost, Config), + ok = rabbit_ct_broker_helpers:clear_permissions(Config, User, Vhost). diff --git a/deps/rabbitmq_aws/Makefile b/deps/rabbitmq_aws/Makefile index 29089276c9b1..3647e0dfd5c1 100644 --- a/deps/rabbitmq_aws/Makefile +++ b/deps/rabbitmq_aws/Makefile @@ -9,7 +9,8 @@ endef LOCAL_DEPS = crypto inets ssl xmerl public_key BUILD_DEPS = rabbit_common -DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk +# We do not depend on rabbit therefore can't run the broker. +DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk TEST_DEPS = meck include ../../rabbitmq-components.mk diff --git a/deps/rabbitmq_cli/Makefile b/deps/rabbitmq_cli/Makefile index a76d414f08f0..185b1407c893 100644 --- a/deps/rabbitmq_cli/Makefile +++ b/deps/rabbitmq_cli/Makefile @@ -11,7 +11,7 @@ dep_temp = hex 0.4.7 dep_x509 = hex 0.8.8 DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk -DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk +DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk VERBOSE_TEST ?= true MAX_CASES ?= 1 @@ -113,7 +113,12 @@ rel:: $(ESCRIPTS) @: tests:: $(ESCRIPTS) - $(gen_verbose) $(MIX_TEST) $(TEST_FILE) + $(verbose) $(MAKE) -C ../../ install-cli + $(verbose) $(MAKE) -C ../../ run-background-broker PLUGINS="rabbit rabbitmq_federation rabbitmq_stomp rabbitmq_stream_management amqp_client" + $(gen_verbose) $(MIX_TEST) $(TEST_FILE); \ + RES=$$?; \ + $(MAKE) -C ../../ stop-node; \ + exit $$RES .PHONY: test diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/decode_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/decode_command.ex index 9f4211d89491..da124ae55564 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/decode_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/decode_command.ex @@ -86,6 +86,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.DecodeCommand do {:ok, result} catch _, _ -> + IO.inspect(__STACKTRACE__) {:error, "Failed to decrypt the value. Things to check: is the passphrase correct? Are the cipher and hash algorithms the same as those used for encryption?"} end @@ -109,6 +110,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.DecodeCommand do {:ok, result} catch _, _ -> + IO.inspect(__STACKTRACE__) {:error, "Failed to decrypt the value. Things to check: is the passphrase correct? Are the cipher and hash algorithms the same as those used for encryption?"} end @@ -117,7 +119,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.DecodeCommand do def formatter(), do: RabbitMQ.CLI.Formatters.Erlang def banner(_, _) do - "Decrypting value..." + "Decrypting an advanced.config (Erlang term) value..." end def usage, @@ -125,7 +127,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.DecodeCommand do def usage_additional() do [ - ["", "config value to decode"], + ["", "advanced.config (Erlang term) value to decode"], ["", "passphrase to use with the config value encryption key"], ["--cipher ", "cipher suite to use"], ["--hash ", "hashing function to use"], @@ -141,7 +143,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.DecodeCommand do def help_section(), do: :configuration - def description(), do: "Decrypts an encrypted configuration value" + def description(), do: "Decrypts an encrypted advanced.config value" # # Implementation diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/decrypt_conf_value_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/decrypt_conf_value_command.ex new file mode 100644 index 000000000000..6ac5958a96a1 --- /dev/null +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/decrypt_conf_value_command.ex @@ -0,0 +1,172 @@ +## This Source Code Form is subject to the terms of the Mozilla Public +## License, v. 2.0. If a copy of the MPL was not distributed with this +## file, You can obtain one at https://mozilla.org/MPL/2.0/. +## +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +alias RabbitMQ.CLI.Core.Helpers + +defmodule RabbitMQ.CLI.Ctl.Commands.DecryptConfValueCommand do + alias RabbitMQ.CLI.Core.{DocGuide, Input} + + @behaviour RabbitMQ.CLI.CommandBehaviour + use RabbitMQ.CLI.DefaultOutput + + def switches() do + [ + cipher: :string, + hash: :string, + iterations: :integer + ] + end + + @atomized_keys [:cipher, :hash] + @prefix "encrypted:" + + def distribution(_), do: :none + + def merge_defaults(args, opts) do + with_defaults = + Map.merge( + %{ + cipher: :rabbit_pbe.default_cipher(), + hash: :rabbit_pbe.default_hash(), + iterations: :rabbit_pbe.default_iterations() + }, + opts + ) + + {args, Helpers.atomize_values(with_defaults, @atomized_keys)} + end + + def validate(args, _) when length(args) < 1 do + {:validation_failure, {:not_enough_args, "Please provide a value to decode and a passphrase"}} + end + + def validate(args, _) when length(args) > 2 do + {:validation_failure, :too_many_args} + end + + def validate(_args, opts) do + case {supports_cipher(opts.cipher), supports_hash(opts.hash), opts.iterations > 0} do + {false, _, _} -> + {:validation_failure, {:bad_argument, "The requested cipher is not supported"}} + + {_, false, _} -> + {:validation_failure, {:bad_argument, "The requested hash is not supported"}} + + {_, _, false} -> + {:validation_failure, + {:bad_argument, + "The requested number of iterations is incorrect (must be a positive integer)"}} + + {true, true, true} -> + :ok + end + end + + def run([value], %{cipher: cipher, hash: hash, iterations: iterations} = opts) do + case Input.consume_single_line_string_with_prompt("Passphrase: ", opts) do + :eof -> + {:error, :not_enough_args} + + passphrase -> + try do + term_value = Helpers.evaluate_input_as_term(value) + + term_to_decrypt = + case term_value do + prefixed_val when is_bitstring(prefixed_val) or is_list(prefixed_val) -> + tag_input_value_with_encrypted(prefixed_val) + + {:encrypted, _} = encrypted -> + encrypted + + _ -> + {:encrypted, term_value} + end + + result = :rabbit_pbe.decrypt_term(cipher, hash, iterations, passphrase, term_to_decrypt) + {:ok, result} + catch + _, _ -> + IO.inspect(__STACKTRACE__) + {:error, + "Failed to decrypt the value. Things to check: is the passphrase correct? Are the cipher and hash algorithms the same as those used for encryption?"} + end + end + end + + def run([value, passphrase], %{cipher: cipher, hash: hash, iterations: iterations}) do + try do + term_value = Helpers.evaluate_input_as_term(value) + + term_to_decrypt = + case term_value do + prefixed_val when is_bitstring(prefixed_val) or is_list(prefixed_val) -> + tag_input_value_with_encrypted(prefixed_val) + + {:encrypted, _} = encrypted -> + encrypted + + _ -> + {:encrypted, term_value} + end + + result = :rabbit_pbe.decrypt_term(cipher, hash, iterations, passphrase, term_to_decrypt) + {:ok, result} + catch + _, _ -> + IO.inspect(__STACKTRACE__) + {:error, + "Failed to decrypt the value. Things to check: is the passphrase correct? Are the cipher and hash algorithms the same as those used for encryption?"} + end + end + + def formatter(), do: RabbitMQ.CLI.Formatters.Erlang + + def banner(_, _) do + "Decrypting a rabbitmq.conf string value..." + end + + def usage, + do: "decrypt_conf_value value passphrase [--cipher ] [--hash ] [--iterations ]" + + def usage_additional() do + [ + ["", "a double-quoted rabbitmq.conf string value to decode"], + ["", "passphrase to use with the config value encryption key"], + ["--cipher ", "cipher suite to use"], + ["--hash ", "hashing function to use"], + ["--iterations ", "number of iteration to apply"] + ] + end + + def usage_doc_guides() do + [ + DocGuide.configuration() + ] + end + + def help_section(), do: :configuration + + def description(), do: "Decrypts an encrypted configuration value" + + # + # Implementation + # + + defp supports_cipher(cipher), do: Enum.member?(:rabbit_pbe.supported_ciphers(), cipher) + + defp supports_hash(hash), do: Enum.member?(:rabbit_pbe.supported_hashes(), hash) + + defp tag_input_value_with_encrypted(value) when is_bitstring(value) or is_list(value) do + bin_val = :rabbit_data_coercion.to_binary(value) + untagged_val = String.replace_prefix(bin_val, @prefix, "") + + {:encrypted, untagged_val} + end + defp tag_input_value_with_encrypted(value) do + {:encrypted, value} + end +end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/enable_feature_flag_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/enable_feature_flag_command.ex index d4405f322891..c974cc3e1cd5 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/enable_feature_flag_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/enable_feature_flag_command.ex @@ -7,54 +7,75 @@ defmodule RabbitMQ.CLI.Ctl.Commands.EnableFeatureFlagCommand do @behaviour RabbitMQ.CLI.CommandBehaviour - def merge_defaults(args, opts), do: {args, opts} + def switches(), do: [experimental: :boolean, opt_in: :boolean] + def aliases(), do: [e: :experimental, o: :opt_in] - def validate([], _), do: {:validation_failure, :not_enough_args} - def validate([_ | _] = args, _) when length(args) > 1, do: {:validation_failure, :too_many_args} + def merge_defaults(args, opts), do: { args, Map.merge(%{experimental: false, opt_in: false}, opts) } - def validate([""], _), - do: {:validation_failure, {:bad_argument, "feature_flag cannot be an empty string."}} + def validate([], _opts), do: {:validation_failure, :not_enough_args} + def validate([_ | _] = args, _opts) when length(args) > 1, do: {:validation_failure, :too_many_args} - def validate([_], _), do: :ok + def validate([""], _opts), + do: {:validation_failure, {:bad_argument, "feature flag (or group) name cannot be an empty string"}} + + def validate([_], _opts), do: :ok use RabbitMQ.CLI.Core.RequiresRabbitAppRunning + def run(["all"], %{node: node_name, opt_in: opt_in, experimental: experimental}) do + has_opted_in = (opt_in || experimental) + enable_all(node_name, has_opted_in) + end + + def run(["all"], %{node: node_name, opt_in: has_opted_in}) do + enable_all(node_name, has_opted_in) + end + + def run(["all"], %{node: node_name, experimental: has_opted_in}) do + enable_all(node_name, has_opted_in) + end + def run(["all"], %{node: node_name}) do - case :rabbit_misc.rpc_call(node_name, :rabbit_feature_flags, :enable_all, []) do - # Server does not support feature flags, consider none are available. - # See rabbitmq/rabbitmq-cli#344 for context. MK. - {:badrpc, {:EXIT, {:undef, _}}} -> {:error, :unsupported} - {:badrpc, _} = err -> err - other -> other - end + enable_all(node_name, false) + end + + + def run([feature_flag], %{node: node_name, opt_in: opt_in, experimental: experimental}) do + has_opted_in = (opt_in || experimental) + enable_one(node_name, feature_flag, has_opted_in) + end + + def run([feature_flag], %{node: node_name, opt_in: has_opted_in}) do + enable_one(node_name, feature_flag, has_opted_in) + end + + def run([feature_flag], %{node: node_name, experimental: has_opted_in}) do + enable_one(node_name, feature_flag, has_opted_in) end def run([feature_flag], %{node: node_name}) do - case :rabbit_misc.rpc_call(node_name, :rabbit_feature_flags, :enable, [ - String.to_atom(feature_flag) - ]) do - # Server does not support feature flags, consider none are available. - # See rabbitmq/rabbitmq-cli#344 for context. MK. - {:badrpc, {:EXIT, {:undef, _}}} -> {:error, :unsupported} - {:badrpc, _} = err -> err - other -> other - end + enable_one(node_name, feature_flag, false) end + def output({:error, :unsupported}, %{node: node_name}) do {:error, RabbitMQ.CLI.Core.ExitCodes.exit_usage(), - "This feature flag is not supported by node #{node_name}"} + "This feature flag is not supported by node #{node_name}"} end use RabbitMQ.CLI.DefaultOutput - def usage, do: "enable_feature_flag " + def usage, do: "enable_feature_flag [--opt-in] " def usage_additional() do [ [ "", "name of the feature flag to enable, or \"all\" to enable all supported flags" + ], + [ + "--opt-in", + "required to enable certain feature flags (those with vast scope or maturing)" ] ] end @@ -67,4 +88,39 @@ defmodule RabbitMQ.CLI.Ctl.Commands.EnableFeatureFlagCommand do def banner(["all"], _), do: "Enabling all feature flags ..." def banner([feature_flag], _), do: "Enabling feature flag \"#{feature_flag}\" ..." + + # + # Implementation + # + + defp enable_all(node_name, has_opted_in) do + case has_opted_in do + true -> + msg = "`--opt-in` (aliased as `--experimental`) flag is not allowed when enabling all feature flags.\nUse --opt-in with a specific feature flag name if to enable an opt-in flag" + {:error, RabbitMQ.CLI.Core.ExitCodes.exit_usage(), msg} + _ -> + case :rabbit_misc.rpc_call(node_name, :rabbit_feature_flags, :enable_all, []) do + {:badrpc, _} = err -> err + other -> other + end + end + end + + defp enable_one(node_name, feature_flag, has_opted_in) do + case {has_opted_in, :rabbit_misc.rpc_call(node_name, :rabbit_feature_flags, :get_stability, [ + String.to_atom(feature_flag) + ])} do + {_, {:badrpc, _} = err} -> err + {false, :experimental} -> + msg = "Feature flag #{feature_flag} requires the user to explicitly opt-in.\nUse --opt-in with a specific feature flag name if to enable an opt-in flag" + {:error, RabbitMQ.CLI.Core.ExitCodes.exit_usage(), msg} + _ -> + case :rabbit_misc.rpc_call(node_name, :rabbit_feature_flags, :enable, [ + String.to_atom(feature_flag) + ]) do + {:badrpc, _} = err -> err + other -> other + end + end + end end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/encode_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/encode_command.ex index ae69e44b72d0..8eb43e688c91 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/encode_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/encode_command.ex @@ -77,6 +77,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.EncodeCommand do {:ok, result} catch _, _ -> + IO.inspect(__STACKTRACE__) {:error, "Error during cipher operation"} end end @@ -99,6 +100,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.EncodeCommand do {:ok, result} catch _, _ -> + IO.inspect(__STACKTRACE__) {:error, "Error during cipher operation"} end end @@ -115,6 +117,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.EncodeCommand do {:ok, result} catch _, _ -> + IO.inspect(__STACKTRACE__) {:error, "Error during cipher operation"} end end @@ -122,7 +125,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.EncodeCommand do def formatter(), do: RabbitMQ.CLI.Formatters.Erlang def banner(_, _) do - "Encrypting value ..." + "Encrypting value to be used in advanced.config..." end def usage, @@ -130,7 +133,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.EncodeCommand do def usage_additional() do [ - ["", "config value to encode"], + ["", "value to encode, to be used in advanced.config"], ["", "passphrase to use with the config value encryption key"], ["--cipher ", "cipher suite to use"], ["--hash ", "hashing function to use"], @@ -146,7 +149,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.EncodeCommand do def help_section(), do: :configuration - def description(), do: "Encrypts a sensitive configuration value" + def description(), do: "Encrypts a sensitive configuration value to be used in the advanced.config file" # # Implementation diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/encrypt_conf_value_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/encrypt_conf_value_command.ex new file mode 100644 index 000000000000..914ad7debeb2 --- /dev/null +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/encrypt_conf_value_command.ex @@ -0,0 +1,157 @@ +## This Source Code Form is subject to the terms of the Mozilla Public +## License, v. 2.0. If a copy of the MPL was not distributed with this +## file, You can obtain one at https://mozilla.org/MPL/2.0/. +## +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +defmodule RabbitMQ.CLI.Ctl.Commands.EncryptConfValueCommand do + alias RabbitMQ.CLI.Core.{DocGuide, Helpers, Input} + + @behaviour RabbitMQ.CLI.CommandBehaviour + use RabbitMQ.CLI.DefaultOutput + + def switches() do + [ + cipher: :string, + hash: :string, + iterations: :integer + ] + end + + @atomized_keys [:cipher, :hash] + + def distribution(_), do: :none + + def merge_defaults(args, opts) do + with_defaults = + Map.merge( + %{ + cipher: :rabbit_pbe.default_cipher(), + hash: :rabbit_pbe.default_hash(), + iterations: :rabbit_pbe.default_iterations() + }, + opts + ) + + {args, Helpers.atomize_values(with_defaults, @atomized_keys)} + end + + def validate(args, _) when length(args) > 2 do + {:validation_failure, :too_many_args} + end + + def validate(_args, opts) do + case {supports_cipher(opts.cipher), supports_hash(opts.hash), opts.iterations > 0} do + {false, _, _} -> + {:validation_failure, {:bad_argument, "The requested cipher is not supported."}} + + {_, false, _} -> + {:validation_failure, {:bad_argument, "The requested hash is not supported"}} + + {_, _, false} -> + {:validation_failure, {:bad_argument, "The requested number of iterations is incorrect"}} + + {true, true, true} -> + :ok + end + end + + def run([], %{cipher: cipher, hash: hash, iterations: iterations} = opts) do + case Input.consume_single_line_string_with_prompt("Value to encode: ", opts) do + :eof -> + {:error, :not_enough_args} + + value -> + case Input.consume_single_line_string_with_prompt("Passphrase: ", opts) do + :eof -> + {:error, :not_enough_args} + + passphrase -> + try do + term_value = Helpers.evaluate_input_as_term(value) + + {:encrypted, result} = + :rabbit_pbe.encrypt_term(cipher, hash, iterations, passphrase, term_value) + + {:ok, result} + catch + _, _ -> + {:error, "Error during cipher operation"} + end + end + end + end + + def run([value], %{cipher: cipher, hash: hash, iterations: iterations} = opts) do + case Input.consume_single_line_string_with_prompt("Passphrase: ", opts) do + :eof -> + {:error, :not_enough_args} + + passphrase -> + try do + term_value = Helpers.evaluate_input_as_term(value) + + {:encrypted, result} = + :rabbit_pbe.encrypt_term(cipher, hash, iterations, passphrase, term_value) + + {:ok, result} + catch + _, _ -> + IO.inspect(__STACKTRACE__) + {:error, "Error during cipher operation"} + end + end + end + + def run([value, passphrase], %{cipher: cipher, hash: hash, iterations: iterations}) do + try do + term_value = Helpers.evaluate_input_as_term(value) + + {:encrypted, result} = + :rabbit_pbe.encrypt_term(cipher, hash, iterations, passphrase, term_value) + + {:ok, result} + catch + _, _ -> + IO.inspect(__STACKTRACE__) + {:error, "Error during cipher operation"} + end + end + + def formatter(), do: RabbitMQ.CLI.Formatters.EncryptedConfValue + + def banner(_, _) do + "Encrypting value to be used in rabbitmq.conf..." + end + + def usage, + do: "encrypt_conf_value value passphrase [--cipher ] [--hash ] [--iterations ]" + + def usage_additional() do + [ + ["", "config value to encode"], + ["", "passphrase to use with the config value encryption key"], + ["--cipher ", "cipher suite to use"], + ["--hash ", "hashing function to use"], + ["--iterations ", "number of iteration to apply"] + ] + end + + def usage_doc_guides() do + [ + DocGuide.configuration() + ] + end + + def help_section(), do: :configuration + + def description(), do: "Encrypts a sensitive configuration value to be used in the advanced.config file" + + # + # Implementation + # + + defp supports_cipher(cipher), do: Enum.member?(:rabbit_pbe.supported_ciphers(), cipher) + + defp supports_hash(hash), do: Enum.member?(:rabbit_pbe.supported_hashes(), hash) +end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/hipe_compile_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/hipe_compile_command.ex deleted file mode 100644 index 3cc58a8c9127..000000000000 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/hipe_compile_command.ex +++ /dev/null @@ -1,99 +0,0 @@ -## This Source Code Form is subject to the terms of the Mozilla Public -## License, v. 2.0. If a copy of the MPL was not distributed with this -## file, You can obtain one at https://mozilla.org/MPL/2.0/. -## -## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. - -defmodule RabbitMQ.CLI.Ctl.Commands.HipeCompileCommand do - @moduledoc """ - HiPE support has been deprecated since Erlang/OTP 22 (mid-2019) and - won't be a part of Erlang/OTP 24. - - Therefore this command is DEPRECATED and is no-op. - """ - - alias RabbitMQ.CLI.Core.{DocGuide, Validators} - import RabbitMQ.CLI.Core.CodePath - - @behaviour RabbitMQ.CLI.CommandBehaviour - - # - # API - # - - def distribution(_), do: :none - - use RabbitMQ.CLI.Core.MergesNoDefaults - - def validate([], _), do: {:validation_failure, :not_enough_args} - - def validate([target_dir], opts) do - :ok - |> Validators.validate_step(fn -> - case acceptable_path?(target_dir) do - true -> :ok - false -> {:error, {:bad_argument, "Target directory path cannot be blank"}} - end - end) - |> Validators.validate_step(fn -> - case File.dir?(target_dir) do - true -> - :ok - - false -> - case File.mkdir_p(target_dir) do - :ok -> - :ok - - {:error, perm} when perm == :eperm or perm == :eacces -> - {:error, - {:bad_argument, - "Cannot create target directory #{target_dir}: insufficient permissions"}} - end - end - end) - |> Validators.validate_step(fn -> require_rabbit(opts) end) - end - - def validate(_, _), do: {:validation_failure, :too_many_args} - - def run([_target_dir], _opts) do - :ok - end - - use RabbitMQ.CLI.DefaultOutput - - def usage, do: "hipe_compile " - - def usage_additional do - [ - ["", "Target directory for HiPE-compiled modules"] - ] - end - - def usage_doc_guides() do - [ - DocGuide.configuration(), - DocGuide.erlang_versions() - ] - end - - def help_section(), do: :deprecated - - def description() do - "DEPRECATED. This command is a no-op. HiPE is no longer supported by modern Erlang versions" - end - - def banner([_target_dir], _) do - "This command is a no-op. HiPE is no longer supported by modern Erlang versions" - end - - # - # Implementation - # - - # Accepts any non-blank path - defp acceptable_path?(value) do - String.length(String.trim(value)) != 0 - end -end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_connections_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_connections_command.ex index c5a362e8859c..faa92cfbb879 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_connections_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_connections_command.ex @@ -17,7 +17,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.ListConnectionsCommand do @info_keys ~w(pid name port host peer_port peer_host ssl ssl_protocol ssl_key_exchange ssl_cipher ssl_hash peer_cert_subject peer_cert_issuer peer_cert_validity state - channels protocol auth_mechanism user vhost timeout frame_max + channels protocol auth_mechanism user vhost container_id timeout frame_max channel_max client_properties recv_oct recv_cnt send_oct send_cnt send_pend connected_at)a @@ -79,7 +79,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.ListConnectionsCommand do def help_section(), do: :observability_and_health_checks - def description(), do: "Lists AMQP 0.9.1 connections for the node" + def description(), do: "Lists AMQP connections for the node" def banner(_, _), do: "Listing connections ..." end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/metadata_store_status_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/metadata_store_status_command.ex index 35e1f2f78402..6eb3242bfbcd 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/metadata_store_status_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/diagnostics/commands/metadata_store_status_command.ex @@ -14,7 +14,12 @@ defmodule RabbitMQ.CLI.Diagnostics.Commands.MetadataStoreStatusCommand do use RabbitMQ.CLI.Core.RequiresRabbitAppRunning def run([] = _args, %{node: node_name}) do - :rabbit_misc.rpc_call(node_name, :rabbit_khepri, :status, []) + case :rabbit_misc.rpc_call(node_name, :rabbit_feature_flags, :is_enabled, [:khepri_db]) do + true -> + :rabbit_misc.rpc_call(node_name, :rabbit_khepri, :status, []) + false -> + [[{<<"Metadata Store">>, "mnesia"}]] + end end use RabbitMQ.CLI.DefaultOutput diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/encrypted_conf_value.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/encrypted_conf_value.ex new file mode 100644 index 000000000000..7eabc77b3a7a --- /dev/null +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/formatters/encrypted_conf_value.ex @@ -0,0 +1,26 @@ +## This Source Code Form is subject to the terms of the Mozilla Public +## License, v. 2.0. If a copy of the MPL was not distributed with this +## file, You can obtain one at https://mozilla.org/MPL/2.0/. +## +## Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +## Prints values from a command as strings(if possible) +defmodule RabbitMQ.CLI.Formatters.EncryptedConfValue do + alias RabbitMQ.CLI.Core.Helpers + alias RabbitMQ.CLI.Formatters.FormatterHelpers + + @behaviour RabbitMQ.CLI.FormatterBehaviour + + def format_output(output, _) do + Helpers.string_or_inspect("encrypted:#{output}") + end + + def format_stream(stream, options) do + Stream.map( + stream, + FormatterHelpers.without_errors_1(fn el -> + format_output(el, options) + end) + ) + end +end diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/add_replica_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/add_replica_command.ex index 9c9c03a748ba..ba0d24974a77 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/add_replica_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/streams/commands/add_replica_command.ex @@ -25,10 +25,10 @@ defmodule RabbitMQ.CLI.Streams.Commands.AddReplicaCommand do to_atom(node) ]) do {:error, :classic_queue_not_supported} -> - {:error, "Cannot add replicas to a classic queue"} + {:error, "Cannot add replicas to classic queues"} {:error, :quorum_queue_not_supported} -> - {:error, "Cannot add replicas to a quorum queue"} + {:error, "Cannot add replicas to quorum queues"} other -> other @@ -37,11 +37,11 @@ defmodule RabbitMQ.CLI.Streams.Commands.AddReplicaCommand do use RabbitMQ.CLI.DefaultOutput - def usage, do: "add_replica [--vhost ] " + def usage, do: "add_replica [--vhost ] " def usage_additional do [ - ["", "stream queue name"], + ["", "stream name"], ["", "node to add a new replica on"] ] end @@ -54,11 +54,11 @@ defmodule RabbitMQ.CLI.Streams.Commands.AddReplicaCommand do def help_section, do: :replication - def description, do: "Adds a stream queue replica on the given node." + def description, do: "Adds a stream replica on the given node" def banner([name, node], _) do [ - "Adding a replica for queue #{name} on node #{node}..." + "Adding a replica for stream #{name} on node #{node}..." ] end end diff --git a/deps/rabbitmq_cli/test/core/json_stream_test.exs b/deps/rabbitmq_cli/test/core/json_stream_test.exs index ccbe0c54b65f..0d736fb8af61 100644 --- a/deps/rabbitmq_cli/test/core/json_stream_test.exs +++ b/deps/rabbitmq_cli/test/core/json_stream_test.exs @@ -12,6 +12,8 @@ defmodule JsonStreamTest do test "format_output map with atom keys is converted to JSON object" do assert @formatter.format_output(%{a: :apple, b: :beer}, %{}) == "{\"a\":\"apple\",\"b\":\"beer\"}" + or @formatter.format_output(%{a: :apple, b: :beer}, %{}) == + "{\"b\":\"beer\",\"a\":\"apple\"}" end test "format_output map with binary keys is converted to JSON object" do diff --git a/deps/rabbitmq_cli/test/ctl/decrypt_conf_value_command_test.exs b/deps/rabbitmq_cli/test/ctl/decrypt_conf_value_command_test.exs new file mode 100644 index 000000000000..e6dff24dbc21 --- /dev/null +++ b/deps/rabbitmq_cli/test/ctl/decrypt_conf_value_command_test.exs @@ -0,0 +1,83 @@ +## This Source Code Form is subject to the terms of the Mozilla Public +## License, v. 2.0. If a copy of the MPL was not distributed with this +## file, You can obtain one at https://mozilla.org/MPL/2.0/. +## +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +defmodule DecryptConfValueCommandTest do + use ExUnit.Case, async: false + @command RabbitMQ.CLI.Ctl.Commands.DecryptConfValueCommand + + setup _context do + {:ok, + opts: %{ + cipher: :rabbit_pbe.default_cipher(), + hash: :rabbit_pbe.default_hash(), + iterations: :rabbit_pbe.default_iterations() + }} + end + + test "validate: providing exactly 2 positional arguments passes", context do + assert :ok == @command.validate(["value", "secret"], context[:opts]) + end + + test "validate: providing no positional arguments fails", context do + assert match?( + {:validation_failure, {:not_enough_args, _}}, + @command.validate([], context[:opts]) + ) + end + + test "validate: providing one positional argument passes", context do + assert :ok == @command.validate(["value"], context[:opts]) + end + + test "validate: providing three or more positional argument fails", context do + assert match?( + {:validation_failure, :too_many_args}, + @command.validate(["value", "secret", "incorrect"], context[:opts]) + ) + end + + test "validate: hash and cipher must be supported", context do + assert match?( + {:validation_failure, {:bad_argument, _}}, + @command.validate( + ["value", "secret"], + Map.merge(context[:opts], %{cipher: :funny_cipher}) + ) + ) + + assert match?( + {:validation_failure, {:bad_argument, _}}, + @command.validate( + ["value", "secret"], + Map.merge(context[:opts], %{hash: :funny_hash}) + ) + ) + + assert match?( + {:validation_failure, {:bad_argument, _}}, + @command.validate( + ["value", "secret"], + Map.merge(context[:opts], %{cipher: :funny_cipher, hash: :funny_hash}) + ) + ) + + assert :ok == @command.validate(["value", "secret"], context[:opts]) + end + + test "validate: number of iterations must greater than 0", context do + assert match?( + {:validation_failure, {:bad_argument, _}}, + @command.validate(["value", "secret"], Map.merge(context[:opts], %{iterations: 0})) + ) + + assert match?( + {:validation_failure, {:bad_argument, _}}, + @command.validate(["value", "secret"], Map.merge(context[:opts], %{iterations: -1})) + ) + + assert :ok == @command.validate(["value", "secret"], context[:opts]) + end +end diff --git a/deps/rabbitmq_cli/test/ctl/enable_feature_flag_test.exs b/deps/rabbitmq_cli/test/ctl/enable_feature_flag_test.exs index 2608751f404a..635eaa07800b 100644 --- a/deps/rabbitmq_cli/test/ctl/enable_feature_flag_test.exs +++ b/deps/rabbitmq_cli/test/ctl/enable_feature_flag_test.exs @@ -10,6 +10,8 @@ defmodule EnableFeatureFlagCommandTest do @command RabbitMQ.CLI.Ctl.Commands.EnableFeatureFlagCommand @feature_flag :ff_from_enable_ff_testsuite + @experimental_flag :ff_from_enable_ff_testsuite_experimental + @usage_exit_code RabbitMQ.CLI.Core.ExitCodes.exit_usage() setup_all do RabbitMQ.CLI.Core.Distribution.start() @@ -22,6 +24,11 @@ defmodule EnableFeatureFlagCommandTest do desc: ~c"My feature flag", provided_by: :EnableFeatureFlagCommandTest, stability: :stable + }, + @experimental_flag => %{ + desc: ~c"An **experimental** feature!", + provided_by: :EnableFeatureFlagCommandTest, + stability: :experimental } } @@ -35,7 +42,9 @@ defmodule EnableFeatureFlagCommandTest do { :ok, - opts: %{node: get_rabbit_hostname()}, feature_flag: @feature_flag + opts: %{node: get_rabbit_hostname(), experimental: false}, + feature_flag: @feature_flag, + experimental_flag: @experimental_flag } end @@ -58,11 +67,36 @@ defmodule EnableFeatureFlagCommandTest do assert list_feature_flags(:enabled) |> Map.has_key?(context[:feature_flag]) end - test "run: attempt to use an unreachable node returns a nodedown" do - opts = %{node: :jake@thedog, timeout: 200} + test "run: attempt to use an unreachable node with --opt-in returns a nodedown" do + opts = %{node: :jake@thedog, timeout: 200, opt_in: false} assert match?({:badrpc, _}, @command.run(["na"], opts)) end + test "run: attempt to use an unreachable node with --experimental returns a nodedown" do + opts = %{node: :jake@thedog, timeout: 200, experimental: false} + assert match?({:badrpc, _}, @command.run(["na"], opts)) + end + + test "run: enabling an experimental flag requires '--opt-in'", context do + experimental_flag = Atom.to_string(context[:experimental_flag]) + assert match?( + {:error, @usage_exit_code, _}, + @command.run([experimental_flag], context[:opts]) + ) + opts = Map.put(context[:opts], :opt_in, true) + assert @command.run([experimental_flag], opts) == :ok + end + + test "run: enabling an experimental flag accepts '--experimental'", context do + experimental_flag = Atom.to_string(context[:experimental_flag]) + assert match?( + {:error, @usage_exit_code, _}, + @command.run([experimental_flag], context[:opts]) + ) + opts = Map.put(context[:opts], :experimental, true) + assert @command.run([experimental_flag], opts) == :ok + end + test "run: enabling the same feature flag twice is idempotent", context do enable_feature_flag(context[:feature_flag]) assert @command.run([Atom.to_string(context[:feature_flag])], context[:opts]) == :ok @@ -75,6 +109,18 @@ defmodule EnableFeatureFlagCommandTest do assert list_feature_flags(:enabled) |> Map.has_key?(context[:feature_flag]) end + test "run: enabling all feature flags with '--opt-in' returns an error", context do + enable_feature_flag(context[:feature_flag]) + opts = Map.put(context[:opts], :opt_in, true) + assert match?({:error, @usage_exit_code, _}, @command.run(["all"], opts)) + end + + test "run: enabling all feature flags with '--experimental' returns an error", context do + enable_feature_flag(context[:feature_flag]) + opts = Map.put(context[:opts], :experimental, true) + assert match?({:error, @usage_exit_code, _}, @command.run(["all"], opts)) + end + test "banner", context do assert @command.banner([context[:feature_flag]], context[:opts]) =~ ~r/Enabling feature flag \"#{context[:feature_flag]}\" \.\.\./ diff --git a/deps/rabbitmq_cli/test/ctl/encrypt_conf_value_command_test.exs b/deps/rabbitmq_cli/test/ctl/encrypt_conf_value_command_test.exs new file mode 100644 index 000000000000..e65f3b99a22a --- /dev/null +++ b/deps/rabbitmq_cli/test/ctl/encrypt_conf_value_command_test.exs @@ -0,0 +1,78 @@ +## This Source Code Form is subject to the terms of the Mozilla Public +## License, v. 2.0. If a copy of the MPL was not distributed with this +## file, You can obtain one at https://mozilla.org/MPL/2.0/. +## +## Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +defmodule EncryptConfValueCommandTest do + use ExUnit.Case, async: false + + @command RabbitMQ.CLI.Ctl.Commands.EncryptConfValueCommand + + setup _context do + {:ok, + opts: %{ + cipher: :rabbit_pbe.default_cipher(), + hash: :rabbit_pbe.default_hash(), + iterations: :rabbit_pbe.default_iterations() + }} + end + + test "validate: providing exactly 2 positional arguments passes", context do + assert :ok == @command.validate(["value", "secret"], context[:opts]) + end + + test "validate: providing zero or one positional argument passes", context do + assert :ok == @command.validate([], context[:opts]) + assert :ok == @command.validate(["value"], context[:opts]) + end + + test "validate: providing three or more positional argument fails", context do + assert match?( + {:validation_failure, :too_many_args}, + @command.validate(["value", "secret", "incorrect"], context[:opts]) + ) + end + + test "validate: hash and cipher must be supported", context do + assert match?( + {:validation_failure, {:bad_argument, _}}, + @command.validate( + ["value", "secret"], + Map.merge(context[:opts], %{cipher: :funny_cipher}) + ) + ) + + assert match?( + {:validation_failure, {:bad_argument, _}}, + @command.validate( + ["value", "secret"], + Map.merge(context[:opts], %{hash: :funny_hash}) + ) + ) + + assert match?( + {:validation_failure, {:bad_argument, _}}, + @command.validate( + ["value", "secret"], + Map.merge(context[:opts], %{cipher: :funny_cipher, hash: :funny_hash}) + ) + ) + + assert :ok == @command.validate(["value", "secret"], context[:opts]) + end + + test "validate: number of iterations must greater than 0", context do + assert match?( + {:validation_failure, {:bad_argument, _}}, + @command.validate(["value", "secret"], Map.merge(context[:opts], %{iterations: 0})) + ) + + assert match?( + {:validation_failure, {:bad_argument, _}}, + @command.validate(["value", "secret"], Map.merge(context[:opts], %{iterations: -1})) + ) + + assert :ok == @command.validate(["value", "secret"], context[:opts]) + end +end diff --git a/deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugin_for_3_8-0.1.0/ebin/mock_rabbitmq_plugin_for_3_8.app b/deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugin_for_3_8-0.1.0/ebin/mock_rabbitmq_plugin_for_3_8.app index 94f286b72257..8ea87019ad7d 100644 --- a/deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugin_for_3_8-0.1.0/ebin/mock_rabbitmq_plugin_for_3_8.app +++ b/deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugin_for_3_8-0.1.0/ebin/mock_rabbitmq_plugin_for_3_8.app @@ -6,5 +6,5 @@ {applications, [kernel,stdlib,rabbit]}, {mod, {mock_rabbitmq_plugins_01_app, []}}, {env, []}, - {broker_version_requirements, ["3.9.0", "3.10.0", "3.11.0", "3.12.0", "3.13.0", "4.0.0"]} + {broker_version_requirements, ["3.9.0", "3.10.0", "3.11.0", "3.12.0", "3.13.0", "4.0.0", "4.1.0"]} ]}. diff --git a/deps/rabbitmq_codegen/Makefile b/deps/rabbitmq_codegen/Makefile index 55d72ed88a1e..a2f6c0be813f 100644 --- a/deps/rabbitmq_codegen/Makefile +++ b/deps/rabbitmq_codegen/Makefile @@ -8,35 +8,3 @@ clean: distclean: clean find . -regex '.*\(~\|#\|\.swp\)' -exec rm {} \; - -# Upstream URL for the current project. -RABBITMQ_COMPONENT_REPO_NAME := rabbitmq-codegen -RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git -RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git - -# Current URL for the current project. If this is not a Git clone, -# default to the upstream Git repository. -ifneq ($(wildcard .git),) -git_origin_fetch_url := $(shell git config remote.origin.url) -git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url) -RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url) -RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url) -else -RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL) -RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL) -endif - -.PHONY: show-upstream-git-fetch-url show-upstream-git-push-url \ - show-current-git-fetch-url show-current-git-push-url - -show-upstream-git-fetch-url: - @echo $(RABBITMQ_UPSTREAM_FETCH_URL) - -show-upstream-git-push-url: - @echo $(RABBITMQ_UPSTREAM_PUSH_URL) - -show-current-git-fetch-url: - @echo $(RABBITMQ_CURRENT_FETCH_URL) - -show-current-git-push-url: - @echo $(RABBITMQ_CURRENT_PUSH_URL) diff --git a/deps/rabbitmq_consistent_hash_exchange/src/rabbit_db_ch_exchange.erl b/deps/rabbitmq_consistent_hash_exchange/src/rabbit_db_ch_exchange.erl index 5b2daa4819fc..83a3ac208e6d 100644 --- a/deps/rabbitmq_consistent_hash_exchange/src/rabbit_db_ch_exchange.erl +++ b/deps/rabbitmq_consistent_hash_exchange/src/rabbit_db_ch_exchange.erl @@ -20,8 +20,8 @@ ]). -export([ - khepri_consistent_hash_path/0, - khepri_consistent_hash_path/1 + khepri_consistent_hash_path/1, + khepri_consistent_hash_path/2 ]). -define(HASH_RING_STATE_TABLE, rabbit_exchange_type_consistent_hash_ring_state). @@ -222,7 +222,8 @@ delete_binding_in_khepri(#binding{source = S, destination = D}, DeleteFun) -> khepri_consistent_hash_path(#exchange{name = Name}) -> khepri_consistent_hash_path(Name); khepri_consistent_hash_path(#resource{virtual_host = VHost, name = Name}) -> - [?MODULE, exchange_type_consistent_hash_ring_state, VHost, Name]. + khepri_consistent_hash_path(VHost, Name). -khepri_consistent_hash_path() -> - [?MODULE, exchange_type_consistent_hash_ring_state]. +khepri_consistent_hash_path(VHost, Name) -> + ExchangePath = rabbit_db_exchange:khepri_exchange_path(VHost, Name), + ExchangePath ++ [consistent_hash_ring_state]. diff --git a/deps/rabbitmq_consistent_hash_exchange/src/rabbit_db_ch_exchange_m2k_converter.erl b/deps/rabbitmq_consistent_hash_exchange/src/rabbit_db_ch_exchange_m2k_converter.erl index 39cc14fc929f..2f86802ae583 100644 --- a/deps/rabbitmq_consistent_hash_exchange/src/rabbit_db_ch_exchange_m2k_converter.erl +++ b/deps/rabbitmq_consistent_hash_exchange/src/rabbit_db_ch_exchange_m2k_converter.erl @@ -10,6 +10,7 @@ -behaviour(mnesia_to_khepri_converter). -include_lib("kernel/include/logger.hrl"). +-include_lib("khepri/include/khepri.hrl"). -include_lib("khepri_mnesia_migration/src/kmm_logging.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). @@ -93,7 +94,8 @@ delete_from_khepri(?HASH_RING_STATE_TABLE = Table, Key, State) -> end, State). clear_data_in_khepri(?HASH_RING_STATE_TABLE) -> - Path = rabbit_db_ch_exchange:khepri_consistent_hash_path(), + Path = rabbit_db_ch_exchange:khepri_consistent_hash_path( + ?KHEPRI_WILDCARD_STAR, ?KHEPRI_WILDCARD_STAR), case rabbit_khepri:delete(Path) of ok -> ok; diff --git a/deps/rabbitmq_consistent_hash_exchange/test/rabbit_exchange_type_consistent_hash_SUITE.erl b/deps/rabbitmq_consistent_hash_exchange/test/rabbit_exchange_type_consistent_hash_SUITE.erl index 16f7ccb1fd66..85a76358df5e 100644 --- a/deps/rabbitmq_consistent_hash_exchange/test/rabbit_exchange_type_consistent_hash_SUITE.erl +++ b/deps/rabbitmq_consistent_hash_exchange/test/rabbit_exchange_type_consistent_hash_SUITE.erl @@ -244,21 +244,21 @@ amqp_dead_letter(Config) -> Msg1 = case Seq rem 2 of 0 -> amqp10_msg:set_message_annotations( - #{<<"k1">> => Seq}, Msg0); + #{<<"x-k1">> => Seq}, Msg0); 1 -> Msg0 end, Msg2 = case Seq rem 3 of 0 -> amqp10_msg:set_application_properties( - #{<<"k2">> => Seq}, Msg1); + #{<<"x-k2">> => Seq}, Msg1); _ -> Msg1 end, Msg = case Seq rem 4 of 0 -> amqp10_msg:set_delivery_annotations( - #{<<"k3">> => Seq}, Msg2); + #{<<"x-k3">> => Seq}, Msg2); _ -> Msg2 end, diff --git a/deps/rabbitmq_ct_client_helpers/Makefile b/deps/rabbitmq_ct_client_helpers/Makefile index c61e87a82a34..84b5238fb08e 100644 --- a/deps/rabbitmq_ct_client_helpers/Makefile +++ b/deps/rabbitmq_ct_client_helpers/Makefile @@ -3,8 +3,7 @@ PROJECT_DESCRIPTION = Common Test helpers for RabbitMQ (client-side helpers) DEPS = rabbit_common rabbitmq_ct_helpers amqp_client -DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk \ - rabbit_common/mk/rabbitmq-tools.mk +DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk PLT_APPS = common_test diff --git a/deps/rabbitmq_ct_helpers/BUILD.bazel b/deps/rabbitmq_ct_helpers/BUILD.bazel index 5509595668cd..1002b4289a8a 100644 --- a/deps/rabbitmq_ct_helpers/BUILD.bazel +++ b/deps/rabbitmq_ct_helpers/BUILD.bazel @@ -45,6 +45,7 @@ rabbitmq_app( "//deps/rabbit_common:erlang_app", "@meck//:erlang_app", "@proper//:erlang_app", + "@ra//:erlang_app", ], ) diff --git a/deps/rabbitmq_ct_helpers/Makefile b/deps/rabbitmq_ct_helpers/Makefile index 2e1f19839036..405118580fc8 100644 --- a/deps/rabbitmq_ct_helpers/Makefile +++ b/deps/rabbitmq_ct_helpers/Makefile @@ -2,7 +2,13 @@ PROJECT = rabbitmq_ct_helpers PROJECT_DESCRIPTION = Common Test helpers for RabbitMQ DEPS = rabbit_common proper inet_tcp_proxy meck -TEST_DEPS = rabbit +LOCAL_DEPS = common_test eunit inets +#TEST_DEPS = rabbit + +# We are calling one function from 'rabbit' so we need it in the PLT. +# But really this should be a full dependency; or we don't use the +# function anymore; or move it to rabbit_common. @todo +dialyze: DEPS += rabbit XREF_IGNORE = [ \ {'Elixir.OptionParser',split,1}, \ @@ -10,10 +16,9 @@ XREF_IGNORE = [ \ dep_inet_tcp_proxy = git https://github.com/rabbitmq/inet_tcp_proxy master -DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk \ - rabbit_common/mk/rabbitmq-dist.mk \ - rabbit_common/mk/rabbitmq-run.mk \ - rabbit_common/mk/rabbitmq-tools.mk +# As this is a helper application we don't need other plugins; +# however we can run a test broker in the test suites. +DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk include ../../rabbitmq-components.mk include ../../erlang.mk diff --git a/deps/rabbitmq_ct_helpers/src/rabbit_control_helper.erl b/deps/rabbitmq_ct_helpers/src/rabbit_control_helper.erl index de51925db73a..31a80a159040 100644 --- a/deps/rabbitmq_ct_helpers/src/rabbit_control_helper.erl +++ b/deps/rabbitmq_ct_helpers/src/rabbit_control_helper.erl @@ -40,7 +40,8 @@ wait_for_async_command(Node) -> command_with_output(Command, Node, Args, Opts) -> Formatted = format_command(Command, Node, Args, Opts), - CommandResult = 'Elixir.RabbitMQCtl':exec_command( + Mod = 'Elixir.RabbitMQCtl', %% To silence a Dialyzer warning. + CommandResult = Mod:exec_command( Formatted, fun(Output,_,_) -> Output end), ct:pal("Executed command ~tp against node ~tp~nResult: ~tp~n", [Formatted, Node, CommandResult]), CommandResult. @@ -50,7 +51,8 @@ format_command(Command, Node, Args, Opts) -> [Command, format_args(Args), format_options([{"--node", Node} | Opts])]), - 'Elixir.OptionParser':split(iolist_to_binary(Formatted)). + Mod = 'Elixir.OptionParser', %% To silence a Dialyzer warning. + Mod:split(iolist_to_binary(Formatted)). format_args(Args) -> iolist_to_binary([ io_lib:format("~tp ", [Arg]) || Arg <- Args ]). diff --git a/deps/rabbitmq_ct_helpers/src/rabbit_ct_broker_helpers.erl b/deps/rabbitmq_ct_helpers/src/rabbit_ct_broker_helpers.erl index 9f8ff9e6f932..b01ea002842e 100644 --- a/deps/rabbitmq_ct_helpers/src/rabbit_ct_broker_helpers.erl +++ b/deps/rabbitmq_ct_helpers/src/rabbit_ct_broker_helpers.erl @@ -170,7 +170,8 @@ test_writer/1, user/1, - configured_metadata_store/1 + configured_metadata_store/1, + await_metadata_store_consistent/2 ]). %% Internal functions exported to be used by rpc:call/4. @@ -392,7 +393,7 @@ wait_for_rabbitmq_nodes(Config, Starting, NodeConfigs, Clustered) -> NodeConfigs1 = [NC || {_, NC} <- NodeConfigs], Config1 = rabbit_ct_helpers:set_config(Config, {rmq_nodes, NodeConfigs1}), - stop_rabbitmq_nodes(Config1), + _ = stop_rabbitmq_nodes(Config1), Error; {Pid, I, NodeConfig} when NodeConfigs =:= [] -> wait_for_rabbitmq_nodes(Config, Starting -- [Pid], @@ -488,11 +489,15 @@ init_tcp_port_numbers(Config, NodeConfig, I) -> update_tcp_ports_in_rmq_config(NodeConfig2, ?TCP_PORTS_LIST). tcp_port_base_for_broker(Config, I, PortsCount) -> + tcp_port_base_for_broker0(Config, I, PortsCount). + +tcp_port_base_for_broker0(Config, I, PortsCount) -> + Base0 = persistent_term:get(rabbit_ct_tcp_port_base, ?TCP_PORTS_BASE), Base = case rabbit_ct_helpers:get_config(Config, tcp_ports_base) of undefined -> - ?TCP_PORTS_BASE; + Base0; {skip_n_nodes, N} -> - tcp_port_base_for_broker1(?TCP_PORTS_BASE, N, PortsCount); + tcp_port_base_for_broker1(Base0, N, PortsCount); B -> B end, @@ -628,7 +633,14 @@ do_start_rabbitmq_node(Config, NodeConfig, I) -> true -> lists:nth(I + 1, WithPlugins0); false -> WithPlugins0 end, - CanUseSecondary = (I + 1) rem 2 =:= 0, + ForceUseSecondary = rabbit_ct_helpers:get_config( + Config, force_secondary_umbrella, undefined), + CanUseSecondary = case ForceUseSecondary of + undefined -> + (I + 1) rem 2 =:= 0; + Override when is_boolean(Override) -> + Override + end, UseSecondaryUmbrella = case ?config(secondary_umbrella, Config) of false -> false; _ -> CanUseSecondary @@ -660,25 +672,9 @@ do_start_rabbitmq_node(Config, NodeConfig, I) -> DistArg = re:replace(DistModS, "_dist$", "", [{return, list}]), "-pa \"" ++ DistModPath ++ "\" -proto_dist " ++ DistArg end, - %% Set the net_ticktime. - CurrentTicktime = case net_kernel:get_net_ticktime() of - {ongoing_change_to, T} -> T; - T -> T - end, - StartArgs1 = case rabbit_ct_helpers:get_config(Config, net_ticktime) of - undefined -> - case CurrentTicktime of - 60 -> ok; - _ -> net_kernel:set_net_ticktime(60) - end, - StartArgs0; - Ticktime -> - case CurrentTicktime of - Ticktime -> ok; - _ -> net_kernel:set_net_ticktime(Ticktime) - end, - StartArgs0 ++ " -kernel net_ticktime " ++ integer_to_list(Ticktime) - end, + %% Set the net_ticktime to 5s for all nodes (including CT via CT_OPTS). + %% A lower tick time helps trigger distribution failures faster. + StartArgs1 = StartArgs0 ++ " -kernel net_ticktime 5", ExtraArgs0 = [], ExtraArgs1 = case rabbit_ct_helpers:get_config(Config, rmq_plugins_dir) of undefined -> @@ -745,7 +741,6 @@ do_start_rabbitmq_node(Config, NodeConfig, I) -> {"RABBITMQ_SERVER_START_ARGS=~ts", [StartArgs1]}, {"RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS=+S 2 +sbwt very_short +A 24 ~ts", [AdditionalErlArgs]}, "RABBITMQ_LOG=debug", - "RMQCTL_WAIT_TIMEOUT=180", {"TEST_TMPDIR=~ts", [PrivDir]} | ExtraArgs], Cmd = ["start-background-broker" | MakeVars], @@ -761,6 +756,7 @@ do_start_rabbitmq_node(Config, NodeConfig, I) -> _ -> AbortCmd = ["stop-node" | MakeVars], _ = rabbit_ct_helpers:make(Config, SrcDir, AbortCmd), + %% @todo Need to stop all nodes in the cluster, not just the one node. {skip, "Failed to initialize RabbitMQ"} end; RunCmd -> @@ -920,7 +916,7 @@ wait_for_node_handling(Procs, Fun, T0, Results) -> move_nonworking_nodedir_away(NodeConfig) -> ConfigFile = ?config(erlang_node_config_filename, NodeConfig), ConfigDir = filename:dirname(ConfigFile), - case os:getenv("RABBITMQ_CT_HELPERS_DELETE_UNUSED_NODES") =/= false + ok = case os:getenv("RABBITMQ_CT_HELPERS_DELETE_UNUSED_NODES") =/= false andalso ?OTP_RELEASE >= 23 of true -> file:del_dir_r(ConfigDir); @@ -984,12 +980,36 @@ enable_khepri_metadata_store(Config, FFs0) -> case enable_feature_flag(C, FF) of ok -> C; - Skip -> + {skip, _} = Skip -> ct:pal("Enabling metadata store failed: ~p", [Skip]), Skip end end, Config, FFs). +%% Waits until the metadata store replica on Node is up to date with the leader. +await_metadata_store_consistent(Config, Node) -> + case configured_metadata_store(Config) of + mnesia -> + ok; + {khepri, _} -> + RaClusterName = rabbit_khepri:get_ra_cluster_name(), + Leader = rpc(Config, Node, ra_leaderboard, lookup_leader, [RaClusterName]), + LastAppliedLeader = ra_last_applied(Leader), + + NodeName = get_node_config(Config, Node, nodename), + ServerId = {RaClusterName, NodeName}, + rabbit_ct_helpers:eventually( + ?_assert( + begin + LastApplied = ra_last_applied(ServerId), + is_integer(LastApplied) andalso LastApplied >= LastAppliedLeader + end)) + end. + +ra_last_applied(ServerId) -> + #{last_applied := LastApplied} = ra:key_metrics(ServerId), + LastApplied. + rewrite_node_config_file(Config, Node) -> NodeConfig = get_node_config(Config, Node), I = if @@ -1114,7 +1134,7 @@ stop_rabbitmq_node(Config, NodeConfig) -> {"RABBITMQ_NODENAME_FOR_PATHS=~ts", [InitialNodename]} ], Cmd = ["stop-node" | MakeVars], - case rabbit_ct_helpers:get_config(Config, rabbitmq_run_cmd) of + _ = case rabbit_ct_helpers:get_config(Config, rabbitmq_run_cmd) of undefined -> rabbit_ct_helpers:make(Config, SrcDir, Cmd); RunCmd -> @@ -1893,10 +1913,8 @@ restart_node(Config, Node) -> stop_node(Config, Node) -> NodeConfig = get_node_config(Config, Node), - case stop_rabbitmq_node(Config, NodeConfig) of - {skip, _} = Error -> Error; - _ -> ok - end. + _ = stop_rabbitmq_node(Config, NodeConfig), + ok. stop_node_after(Config, Node, Sleep) -> timer:sleep(Sleep), @@ -1919,7 +1937,7 @@ kill_node(Config, Node) -> _ -> rabbit_misc:format("kill -9 ~ts", [Pid]) end, - os:cmd(Cmd), + _ = os:cmd(Cmd), await_os_pid_death(Pid). kill_node_after(Config, Node, Sleep) -> @@ -2210,7 +2228,7 @@ if_cover(F) -> os:getenv("COVERAGE") } of {false, false} -> ok; - _ -> F() + _ -> _ = F(), ok end. setup_meck(Config) -> diff --git a/deps/rabbitmq_ct_helpers/src/rabbit_ct_config_schema.erl b/deps/rabbitmq_ct_helpers/src/rabbit_ct_config_schema.erl index 2f68bc364302..09c9b6108734 100644 --- a/deps/rabbitmq_ct_helpers/src/rabbit_ct_config_schema.erl +++ b/deps/rabbitmq_ct_helpers/src/rabbit_ct_config_schema.erl @@ -24,11 +24,15 @@ init_schemas(App, Config) -> run_snippets(Config) -> {ok, [Snippets]} = file:consult(?config(conf_snippets, Config)), ct:pal("Loaded config schema snippets: ~tp", [Snippets]), - lists:map( - fun({N, S, C, P}) -> ok = test_snippet(Config, {snippet_id(N), S, []}, C, P); - ({N, S, A, C, P}) -> ok = test_snippet(Config, {snippet_id(N), S, A}, C, P) - end, - Snippets), + lists:foreach( + fun({N, S, C, P}) -> + ok = test_snippet(Config, {snippet_id(N), S, []}, C, P, true); + ({N, S, A, C, P}) -> + ok = test_snippet(Config, {snippet_id(N), S, A}, C, P, true); + ({N, S, A, C, P, nosort}) -> + ok = test_snippet(Config, {snippet_id(N), S, A}, C, P, false) + end, + Snippets), ok. snippet_id(N) when is_integer(N) -> @@ -40,7 +44,7 @@ snippet_id(A) when is_atom(A) -> snippet_id(L) when is_list(L) -> L. -test_snippet(Config, Snippet = {SnipID, _, _}, Expected, _Plugins) -> +test_snippet(Config, Snippet = {SnipID, _, _}, Expected, _Plugins, Sort) -> {ConfFile, AdvancedFile} = write_snippet(Config, Snippet), %% We ignore the rabbit -> log portion of the config on v3.9+, where the lager %% dependency has been dropped @@ -50,8 +54,12 @@ test_snippet(Config, Snippet = {SnipID, _, _}, Expected, _Plugins) -> _ -> generate_config(ConfFile, AdvancedFile) end, - Gen = deepsort(Generated), - Exp = deepsort(Expected), + {Exp, Gen} = case Sort of + true -> + {deepsort(Expected), deepsort(Generated)}; + false -> + {Expected, Generated} + end, case Exp of Gen -> ok; _ -> @@ -62,12 +70,12 @@ test_snippet(Config, Snippet = {SnipID, _, _}, Expected, _Plugins) -> write_snippet(Config, {Name, Conf, Advanced}) -> ResultsDir = ?config(results_dir, Config), - file:make_dir(filename:join(ResultsDir, Name)), + _ = file:make_dir(filename:join(ResultsDir, Name)), ConfFile = filename:join([ResultsDir, Name, "config.conf"]), AdvancedFile = filename:join([ResultsDir, Name, "advanced.config"]), - file:write_file(ConfFile, Conf), - rabbit_file:write_term_file(AdvancedFile, [Advanced]), + ok = file:write_file(ConfFile, Conf), + ok = rabbit_file:write_term_file(AdvancedFile, [Advanced]), {ConfFile, AdvancedFile}. generate_config(ConfFile, AdvancedFile) -> diff --git a/deps/rabbitmq_ct_helpers/src/rabbit_ct_helpers.erl b/deps/rabbitmq_ct_helpers/src/rabbit_ct_helpers.erl index 801de565d125..d9e34cf38fa6 100644 --- a/deps/rabbitmq_ct_helpers/src/rabbit_ct_helpers.erl +++ b/deps/rabbitmq_ct_helpers/src/rabbit_ct_helpers.erl @@ -155,10 +155,10 @@ redirect_logger_to_ct_logs(Config) -> ct:pal( ?LOW_IMPORTANCE, "Configuring logger to send logs to common_test logs"), - logger:set_handler_config(cth_log_redirect, level, debug), + ok = logger:set_handler_config(cth_log_redirect, level, debug), %% Let's use the same format as RabbitMQ itself. - logger:set_handler_config( + ok = logger:set_handler_config( cth_log_redirect, formatter, rabbit_prelaunch_early_logging:default_file_formatter(#{})), @@ -170,7 +170,7 @@ redirect_logger_to_ct_logs(Config) -> cth_log_redirect_any_domains, cth_log_redirect_any_domains, LogCfg), - logger:remove_handler(default), + ok = logger:remove_handler(default), ct:pal( ?LOW_IMPORTANCE, @@ -686,7 +686,6 @@ load_elixir(Config) -> ElixirLibDir -> ct:pal(?LOW_IMPORTANCE, "Elixir lib dir: ~ts~n", [ElixirLibDir]), true = code:add_pathz(ElixirLibDir), - application:load(elixir), {ok, _} = application:ensure_all_started(elixir), Config end. @@ -947,7 +946,7 @@ port_receive_loop(Port, Stdout, Options, Until, DumpTimer) -> end, receive {Port, {exit_status, X}} -> - timer:cancel(DumpTimer), + _ = timer:cancel(DumpTimer), DropStdout = lists:member(drop_stdout, Options) orelse Stdout =:= "", if diff --git a/deps/rabbitmq_ct_helpers/src/rabbit_ct_vm_helpers.erl b/deps/rabbitmq_ct_helpers/src/rabbit_ct_vm_helpers.erl index b98cb0dd862a..490ccda377f7 100644 --- a/deps/rabbitmq_ct_helpers/src/rabbit_ct_vm_helpers.erl +++ b/deps/rabbitmq_ct_helpers/src/rabbit_ct_vm_helpers.erl @@ -499,7 +499,7 @@ spawn_terraform_vms(Config) -> rabbit_ct_helpers:register_teardown_steps( Config1, teardown_steps()); _ -> - destroy_terraform_vms(Config), + _ = destroy_terraform_vms(Config), {skip, "Terraform failed to spawn VM"} end. @@ -520,7 +520,7 @@ destroy_terraform_vms(Config) -> ] ++ TfVarFlags ++ [ TfConfigDir ], - rabbit_ct_helpers:exec(Cmd, [{env, Env}]), + {ok, _} = rabbit_ct_helpers:exec(Cmd, [{env, Env}]), Config. terraform_var_flags(Config) -> @@ -696,7 +696,7 @@ ensure_instance_count(Config, TRef) -> poll_vms(Config) end; true -> - timer:cancel(TRef), + _ = timer:cancel(TRef), rabbit_ct_helpers:set_config(Config, {terraform_poll_done, true}) end; @@ -760,7 +760,7 @@ initialize_ct_peers(Config, NodenamesMap, IPAddrsMap) -> set_inet_hosts(Config) -> CTPeers = get_ct_peer_entries(Config), inet_db:set_lookup([file, native]), - [begin + _ = [begin Hostname = ?config(hostname, CTPeerConfig), IPAddr = ?config(ipaddr, CTPeerConfig), inet_db:add_host(IPAddr, [Hostname]), @@ -831,7 +831,7 @@ wait_for_ct_peers(Config, [CTPeer | Rest] = CTPeers, TRef) -> end end; wait_for_ct_peers(Config, [], TRef) -> - timer:cancel(TRef), + _ = timer:cancel(TRef), Config. set_ct_peers_code_path(Config) -> @@ -864,7 +864,7 @@ download_dirs(Config) -> ?MODULE, prepare_dirs_to_download_archives, [Config]), - inets:start(), + _ = inets:start(), download_dirs(Config, ConfigsPerCTPeer). download_dirs(_, [{skip, _} = Error | _]) -> @@ -964,7 +964,7 @@ add_archive_to_list(Config, Archive) -> start_http_server(Config) -> PrivDir = ?config(priv_dir, Config), {ok, Hostname} = inet:gethostname(), - inets:start(), + _ = inets:start(), Options = [{port, 0}, {server_name, Hostname}, {server_root, PrivDir}, @@ -1021,7 +1021,8 @@ do_setup_ct_logs_proxies(Nodes) -> [begin user_io_proxy(Node), ct_logs_proxy(Node) - end || Node <- Nodes]. + end || Node <- Nodes], + ok. user_io_proxy(Node) -> ok = setup_proxy(Node, user). diff --git a/deps/rabbitmq_ct_helpers/src/rabbit_mgmt_test_util.erl b/deps/rabbitmq_ct_helpers/src/rabbit_mgmt_test_util.erl index 6424df081608..20b833194624 100644 --- a/deps/rabbitmq_ct_helpers/src/rabbit_mgmt_test_util.erl +++ b/deps/rabbitmq_ct_helpers/src/rabbit_mgmt_test_util.erl @@ -107,7 +107,7 @@ uri_base_from(Config, Node) -> uri_base_from(Config, Node, Base) -> Port = mgmt_port(Config, Node), Prefix = get_uri_prefix(Config), - Uri = rabbit_mgmt_format:print("http://localhost:~w~ts/~ts", [Port, Prefix, Base]), + Uri = list_to_binary(lists:flatten(io_lib:format("http://localhost:~w~ts/~ts", [Port, Prefix, Base]))), binary_to_list(Uri). get_uri_prefix(Config) -> @@ -250,10 +250,13 @@ assert_code(CodeExp, CodeAct, Type, Path, Body) -> end. decode(?OK, _Headers, ResBody) -> - JSON = rabbit_data_coercion:to_binary(ResBody), - atomize_map_keys(rabbit_json:decode(JSON)); + decode_body(ResBody); decode(_, Headers, _ResBody) -> Headers. +decode_body(ResBody) -> + JSON = rabbit_data_coercion:to_binary(ResBody), + atomize_map_keys(rabbit_json:decode(JSON)). + atomize_map_keys(L) when is_list(L) -> [atomize_map_keys(I) || I <- L]; atomize_map_keys(M) when is_map(M) -> @@ -265,7 +268,10 @@ atomize_map_keys(I) -> %% @todo There wasn't a specific order before; now there is; maybe we shouldn't have one? assert_list(Exp, Act) -> - case length(Exp) == length(Act) of + %% allow actual map to include keys we do not assert on + %% but not the other way around: we may want to only assert on a subset + %% of keys + case length(Act) >= length(Exp) of true -> ok; false -> error({expected, Exp, actual, Act}) end, diff --git a/deps/rabbitmq_ct_helpers/tools/tls-certs/Makefile b/deps/rabbitmq_ct_helpers/tools/tls-certs/Makefile index 6f6fec5fd680..5071bedb62da 100644 --- a/deps/rabbitmq_ct_helpers/tools/tls-certs/Makefile +++ b/deps/rabbitmq_ct_helpers/tools/tls-certs/Makefile @@ -52,11 +52,11 @@ $(DIR)/testca/cacert.pem: $(DIR)/%/cert.pem: $(DIR)/testca/cacert.pem $(gen_verbose) mkdir -p $(DIR)/$(TARGET) $(verbose) { ( cd $(DIR)/$(TARGET) && \ + sed -e 's/@HOSTNAME@/$(HOSTNAME)/g' $(CURDIR)/openssl.cnf.in > $(CURDIR)/openssl.cnf && \ openssl genrsa -out key.pem 2048 && \ - openssl req -new -key key.pem -out req.pem -outform PEM \ + openssl req -config $(CURDIR)/openssl.cnf -new -key key.pem -out req.pem -outform PEM \ -subj /C=UK/ST=England/CN=$(HOSTNAME)/O=$(TARGET)/L=$$$$/ -nodes && \ cd ../testca && \ - sed -e 's/@HOSTNAME@/$(HOSTNAME)/g' $(CURDIR)/openssl.cnf.in > $(CURDIR)/openssl.cnf && \ openssl ca -config $(CURDIR)/openssl.cnf -in ../$(TARGET)/req.pem -out \ ../$(TARGET)/cert.pem -notext -batch -extensions \ $(TARGET)_ca_extensions && \ diff --git a/deps/rabbitmq_ct_helpers/tools/tls-certs/openssl.cnf.in b/deps/rabbitmq_ct_helpers/tools/tls-certs/openssl.cnf.in index b4b4019b2e81..dba9bf7446cb 100644 --- a/deps/rabbitmq_ct_helpers/tools/tls-certs/openssl.cnf.in +++ b/deps/rabbitmq_ct_helpers/tools/tls-certs/openssl.cnf.in @@ -49,6 +49,7 @@ keyUsage = keyCertSign, cRLSign [ client_ca_extensions ] basicConstraints = CA:false keyUsage = digitalSignature,keyEncipherment +subjectAltName = @client_alt_names [ server_ca_extensions ] basicConstraints = CA:false @@ -59,3 +60,6 @@ subjectAltName = @server_alt_names [ server_alt_names ] DNS.1 = @HOSTNAME@ DNS.2 = localhost + +[ client_alt_names ] +DNS.1 = rabbit_client_id diff --git a/deps/rabbitmq_event_exchange/test/system_SUITE.erl b/deps/rabbitmq_event_exchange/test/system_SUITE.erl index 3cd01a79e852..76d9199a586c 100644 --- a/deps/rabbitmq_event_exchange/test/system_SUITE.erl +++ b/deps/rabbitmq_event_exchange/test/system_SUITE.erl @@ -21,6 +21,7 @@ all() -> authentication, audit_queue, audit_exchange, + audit_exchange_internal_parameter, audit_binding, audit_vhost, audit_vhost_deletion, @@ -28,7 +29,6 @@ all() -> audit_connection, audit_direct_connection, audit_consumer, - audit_vhost_internal_parameter, audit_parameter, audit_policy, audit_vhost_limit, @@ -272,13 +272,19 @@ audit_consumer(Config) -> rabbit_ct_client_helpers:close_channel(Ch), ok. -audit_vhost_internal_parameter(Config) -> +audit_exchange_internal_parameter(Config) -> Ch = declare_event_queue(Config, <<"parameter.*">>), - User = <<"Bugs Bunny">>, - Vhost = <<"test-vhost">>, - rabbit_ct_broker_helpers:add_vhost(Config, 0, Vhost, User), - rabbit_ct_broker_helpers:delete_vhost(Config, 0, Vhost, User), + X = <<"exchange.audited-for-parameters">>, + #'exchange.declare_ok'{} = + amqp_channel:call(Ch, #'exchange.declare'{exchange = X, + type = <<"topic">>}), + #'exchange.delete_ok'{} = + amqp_channel:call(Ch, #'exchange.delete'{exchange = X}), + + User = proplists:get_value(rmq_username, Config), + %% Exchange deletion sets and clears a runtime parameter which acts as a + %% kind of lock: receive_user_in_event(<<"parameter.set">>, User), receive_user_in_event(<<"parameter.cleared">>, User), diff --git a/deps/rabbitmq_federation_prometheus/BUILD.bazel b/deps/rabbitmq_federation_prometheus/BUILD.bazel new file mode 100644 index 000000000000..b6a8c641f149 --- /dev/null +++ b/deps/rabbitmq_federation_prometheus/BUILD.bazel @@ -0,0 +1,117 @@ +load("@rules_erlang//:eunit2.bzl", "eunit") +load("@rules_erlang//:xref2.bzl", "xref") +load("@rules_erlang//:dialyze.bzl", "dialyze", "plt") +load("//:rabbitmq_home.bzl", "rabbitmq_home") +load("//:rabbitmq_run.bzl", "rabbitmq_run") +load( + "//:rabbitmq.bzl", + "BROKER_VERSION_REQUIREMENTS_ANY", + "RABBITMQ_DIALYZER_OPTS", + "assert_suites", + "rabbitmq_app", + "rabbitmq_integration_suite", +) +load( + ":app.bzl", + "all_beam_files", + "all_srcs", + "all_test_beam_files", + "test_suite_beam_files", +) + +APP_NAME = "rabbitmq_federation_prometheus" + +APP_DESCRIPTION = "Prometheus extension for the Federation plugin" + +APP_ENV = """[ +]""" + +all_srcs(name = "all_srcs") + +all_beam_files(name = "all_beam_files") + +all_test_beam_files(name = "all_test_beam_files") + +test_suite_beam_files(name = "test_suite_beam_files") + +# gazelle:erlang_app_extra_app crypto + +# gazelle:erlang_app_dep rabbit +# gazelle:erlang_app_dep rabbitmq_prometheus + +# gazelle:erlang_app_dep_exclude prometheus + +rabbitmq_app( + name = "erlang_app", + srcs = [":all_srcs"], + hdrs = [":public_hdrs"], + app_description = APP_DESCRIPTION, + app_env = APP_ENV, + app_extra_keys = BROKER_VERSION_REQUIREMENTS_ANY, + app_module = "rabbit_federation_prometheus_app", + app_name = APP_NAME, + beam_files = [":beam_files"], + extra_apps = [ + "crypto", + ], + license_files = [":license_files"], + priv = [":priv"], + deps = [ + "//deps/rabbit:erlang_app", + "//deps/rabbitmq_federation:erlang_app", + "//deps/rabbitmq_prometheus:erlang_app", + ], +) + +xref( + name = "xref", + target = ":erlang_app", +) + +plt( + name = "deps_plt", + for_target = ":erlang_app", + ignore_warnings = True, + libs = ["@rules_elixir//elixir"], # keep + plt = "//:base_plt", +) + +dialyze( + name = "dialyze", + dialyzer_opts = RABBITMQ_DIALYZER_OPTS, + plt = ":deps_plt", + target = ":erlang_app", +) + +eunit( + name = "eunit", + target = ":test_erlang_app", +) + +rabbitmq_home( + name = "broker-for-tests-home", + plugins = [ + "//deps/rabbit:erlang_app", + ":erlang_app", + ], +) + +rabbitmq_run( + name = "rabbitmq-for-tests-run", + home = ":broker-for-tests-home", +) + +rabbitmq_integration_suite( + name = "prometheus_rabbitmq_federation_collector_SUITE", + size = "small", + additional_beam = [ + ], +) + +assert_suites() + +alias( + name = "rabbitmq_federation_prometheus", + actual = ":erlang_app", + visibility = ["//visibility:public"], +) diff --git a/deps/rabbitmq_federation_prometheus/CODE_OF_CONDUCT.md b/deps/rabbitmq_federation_prometheus/CODE_OF_CONDUCT.md new file mode 120000 index 000000000000..a3613c99f0b0 --- /dev/null +++ b/deps/rabbitmq_federation_prometheus/CODE_OF_CONDUCT.md @@ -0,0 +1 @@ +../../CODE_OF_CONDUCT.md \ No newline at end of file diff --git a/deps/rabbitmq_federation_prometheus/CONTRIBUTING.md b/deps/rabbitmq_federation_prometheus/CONTRIBUTING.md new file mode 120000 index 000000000000..f939e75f21a8 --- /dev/null +++ b/deps/rabbitmq_federation_prometheus/CONTRIBUTING.md @@ -0,0 +1 @@ +../../CONTRIBUTING.md \ No newline at end of file diff --git a/deps/rabbitmq_federation_prometheus/LICENSE b/deps/rabbitmq_federation_prometheus/LICENSE new file mode 100644 index 000000000000..46e08bb41d0b --- /dev/null +++ b/deps/rabbitmq_federation_prometheus/LICENSE @@ -0,0 +1 @@ +This package is licensed under the MPL 2.0. For the MPL 2.0, please see LICENSE-MPL-RabbitMQ. \ No newline at end of file diff --git a/deps/rabbitmq_federation_prometheus/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_federation_prometheus/LICENSE-MPL-RabbitMQ new file mode 100644 index 000000000000..14e2f777f6c3 --- /dev/null +++ b/deps/rabbitmq_federation_prometheus/LICENSE-MPL-RabbitMQ @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/deps/rabbitmq_federation_prometheus/Makefile b/deps/rabbitmq_federation_prometheus/Makefile new file mode 100644 index 000000000000..3d069be8ed41 --- /dev/null +++ b/deps/rabbitmq_federation_prometheus/Makefile @@ -0,0 +1,16 @@ +PROJECT = rabbitmq_federation_prometheus +PROJECT_DESCRIPTION = Exposes rabbitmq_federation metrics to Prometheus +PROJECT_MOD = rabbit_federation_prometheus_app + +define PROJECT_APP_EXTRA_KEYS + {broker_version_requirements, []} +endef + +DEPS = rabbit_common rabbit rabbitmq_federation rabbitmq_prometheus +TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers eunit_formatters + +DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk +DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk + +include ../../rabbitmq-components.mk +include ../../erlang.mk diff --git a/deps/rabbitmq_federation_prometheus/README.md b/deps/rabbitmq_federation_prometheus/README.md new file mode 100644 index 000000000000..2651c440499b --- /dev/null +++ b/deps/rabbitmq_federation_prometheus/README.md @@ -0,0 +1,16 @@ +# RabbitMQ Federation Prometheus + +This plugin adds Federation metrics to prometheus + +## Installation + +This plugin ships with RabbitMQ. Like all other plugins, it must be enabled +before it can be used: + +```bash +[sudo] rabbitmq-plugins enable rabbitmq_federation_prometheus +``` + +## License + +See [LICENSE](./LICENSE). diff --git a/deps/rabbitmq_federation_prometheus/app.bzl b/deps/rabbitmq_federation_prometheus/app.bzl new file mode 100644 index 000000000000..405196d21119 --- /dev/null +++ b/deps/rabbitmq_federation_prometheus/app.bzl @@ -0,0 +1,89 @@ +load("@rules_erlang//:erlang_bytecode2.bzl", "erlang_bytecode") +load("@rules_erlang//:filegroup.bzl", "filegroup") + +def all_beam_files(name = "all_beam_files"): + filegroup( + name = "beam_files", + srcs = [":other_beam"], + ) + erlang_bytecode( + name = "other_beam", + srcs = [ + "src/rabbit_federation_prometheus_app.erl", + "src/rabbit_federation_prometheus_collector.erl", + "src/rabbit_federation_prometheus_sup.erl", + ], + hdrs = [":public_and_private_hdrs"], + app_name = "rabbitmq_federation_prometheus", + dest = "ebin", + erlc_opts = "//:erlc_opts", + deps = ["@prometheus//:erlang_app"], + ) + +def all_srcs(name = "all_srcs"): + filegroup( + name = "all_srcs", + srcs = [":public_and_private_hdrs", ":srcs"], + ) + filegroup( + name = "public_and_private_hdrs", + srcs = [":private_hdrs", ":public_hdrs"], + ) + + filegroup( + name = "priv", + ) + + filegroup( + name = "srcs", + srcs = [ + "src/rabbit_federation_prometheus_app.erl", + "src/rabbit_federation_prometheus_collector.erl", + "src/rabbit_federation_prometheus_sup.erl", + ], + ) + filegroup( + name = "private_hdrs", + ) + filegroup( + name = "public_hdrs", + ) + filegroup( + name = "license_files", + srcs = [ + "LICENSE", + "LICENSE-MPL-RabbitMQ", + ], + ) + +def all_test_beam_files(name = "all_test_beam_files"): + filegroup( + name = "test_beam_files", + testonly = True, + srcs = [":test_other_beam"], + ) + erlang_bytecode( + name = "test_other_beam", + testonly = True, + srcs = [ + "src/rabbit_federation_prometheus_app.erl", + "src/rabbit_federation_prometheus_collector.erl", + "src/rabbit_federation_prometheus_sup.erl", + ], + hdrs = [":public_and_private_hdrs"], + app_name = "rabbitmq_federation_prometheus", + dest = "test", + erlc_opts = "//:test_erlc_opts", + deps = ["@prometheus//:erlang_app"], + ) + +def test_suite_beam_files(name = "test_suite_beam_files"): + erlang_bytecode( + name = "prometheus_rabbitmq_federation_collector_SUITE_beam_files", + testonly = True, + srcs = ["test/prometheus_rabbitmq_federation_collector_SUITE.erl"], + outs = ["test/prometheus_rabbitmq_federation_collector_SUITE.beam"], + app_name = "rabbitmq_federation_prometheus", + erlc_opts = "//:test_erlc_opts", + deps = ["//deps/amqp_client:erlang_app", "@prometheus//:erlang_app"], + ) diff --git a/deps/rabbitmq_federation_prometheus/src/rabbit_federation_prometheus_app.erl b/deps/rabbitmq_federation_prometheus/src/rabbit_federation_prometheus_app.erl new file mode 100644 index 000000000000..fda59b4620e8 --- /dev/null +++ b/deps/rabbitmq_federation_prometheus/src/rabbit_federation_prometheus_app.erl @@ -0,0 +1,27 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% +-module(rabbit_federation_prometheus_app). + +-behavior(application). + +-export([start/0, stop/0, start/2, stop/1]). + +start(normal, []) -> + {ok, _} = application:ensure_all_started(prometheus), + _ = rabbit_federation_prometheus_collector:start(), + rabbit_federation_prometheus_sup:start_link(). + +stop(_State) -> + _ = rabbit_federation_prometheus_collector:stop(), + ok. + + +start() -> + _ = rabbit_federation_prometheus_collector:start(). + +stop() -> ok. + diff --git a/deps/rabbitmq_prometheus/src/collectors/prometheus_rabbitmq_federation_collector.erl b/deps/rabbitmq_federation_prometheus/src/rabbit_federation_prometheus_collector.erl similarity index 73% rename from deps/rabbitmq_prometheus/src/collectors/prometheus_rabbitmq_federation_collector.erl rename to deps/rabbitmq_federation_prometheus/src/rabbit_federation_prometheus_collector.erl index c00209177d38..12db4594ddac 100644 --- a/deps/rabbitmq_prometheus/src/collectors/prometheus_rabbitmq_federation_collector.erl +++ b/deps/rabbitmq_federation_prometheus/src/rabbit_federation_prometheus_collector.erl @@ -2,22 +2,28 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% --module(prometheus_rabbitmq_federation_collector). +-module(rabbit_federation_prometheus_collector). + +-behaviour(prometheus_collector). + +-export([start/0, stop/0]). -export([deregister_cleanup/1, collect_mf/2]). -import(prometheus_model_helpers, [create_mf/4]). --behaviour(prometheus_collector). +%%==================================================================== +%% Collector API +%%==================================================================== --define(METRICS, [{rabbitmq_federation_links, gauge, - "Current number of federation links."}, - ]). +start() -> + {ok, _} = application:ensure_all_started(prometheus), + prometheus_registry:register_collector(?MODULE). -%% API exports --export([]). +stop() -> + prometheus_registry:deregister_collector(?MODULE). %%==================================================================== %% Collector API @@ -32,7 +38,7 @@ collect_mf(_Registry, Callback) -> %% update with will take Init and put into Acc, wuthout calling fun maps:update_with(proplists:get_value(status, S), fun(C) -> C + 1 end, 1, Acc) end, #{}, Status), - Metrics = [{rabbitmq_federation_links, gauge, "Current number of federation links.", + Metrics = [{rabbitmq_federation_links, gauge, "Number of federation links", [{[{status, S}], C} || {S, C} <- maps:to_list(StatusGroups)]}], _ = [add_metric_family(Metric, Callback) || Metric <- Metrics], ok. diff --git a/deps/rabbitmq_federation_prometheus/src/rabbit_federation_prometheus_sup.erl b/deps/rabbitmq_federation_prometheus/src/rabbit_federation_prometheus_sup.erl new file mode 100644 index 000000000000..e9106c29b31f --- /dev/null +++ b/deps/rabbitmq_federation_prometheus/src/rabbit_federation_prometheus_sup.erl @@ -0,0 +1,20 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% +-module(rabbit_federation_prometheus_sup). + +-behaviour(supervisor). + +-export([start_link/0]). +-export([init/1]). + +start_link() -> + supervisor:start_link(?MODULE, []). + +init(_Args) -> + SupFlags = #{strategy => one_for_one, intensity => 1, period => 5}, + ChildSpecs = [], + {ok, {SupFlags, ChildSpecs}}. diff --git a/deps/rabbitmq_prometheus/test/prometheus_rabbitmq_federation_collector_SUITE.erl b/deps/rabbitmq_federation_prometheus/test/prometheus_rabbitmq_federation_collector_SUITE.erl similarity index 55% rename from deps/rabbitmq_prometheus/test/prometheus_rabbitmq_federation_collector_SUITE.erl rename to deps/rabbitmq_federation_prometheus/test/prometheus_rabbitmq_federation_collector_SUITE.erl index e379d1a47b87..5a15a0ffb4d9 100644 --- a/deps/rabbitmq_prometheus/test/prometheus_rabbitmq_federation_collector_SUITE.erl +++ b/deps/rabbitmq_federation_prometheus/test/prometheus_rabbitmq_federation_collector_SUITE.erl @@ -14,21 +14,21 @@ -compile(export_all). -define(ONE_RUNNING_METRIC, #'MetricFamily'{name = <<"rabbitmq_federation_links">>, - help = "Current number of federation links.", + help = "Number of federation links", type = 'GAUGE', metric = [#'Metric'{label = [#'LabelPair'{name = <<"status">>, value = <<"running">>}], gauge = #'Gauge'{value = 1}}]}). -define(TWO_RUNNING_METRIC, #'MetricFamily'{name = <<"rabbitmq_federation_links">>, - help = "Current number of federation links.", + help = "Number of federation links", type = 'GAUGE', metric = [#'Metric'{label = [#'LabelPair'{name = <<"status">>, value = <<"running">>}], gauge = #'Gauge'{value = 2}}]}). -define(ONE_RUNNING_ONE_STARTING_METRIC, #'MetricFamily'{name = <<"rabbitmq_federation_links">>, - help = "Current number of federation links.", + help = "Number of federation links", type = 'GAUGE', metric = [#'Metric'{label = [#'LabelPair'{name = <<"status">>, value = <<"running">>}], @@ -37,12 +37,6 @@ value = <<"starting">>}], gauge = #'Gauge'{value = 1}}]}). --import(rabbit_federation_test_util, - [expect/3, expect_empty/2, - set_upstream/4, clear_upstream/3, set_upstream_set/4, - set_policy/5, clear_policy/3, - set_policy_upstream/5, set_policy_upstreams/4, - no_plugins/1, with_ch/3, q/2, maybe_declare_queue/3, delete_all/2]). all() -> [ @@ -71,7 +65,7 @@ init_per_suite(Config) -> rabbit_ct_helpers:run_setup_steps(Config1, rabbit_ct_broker_helpers:setup_steps() ++ rabbit_ct_client_helpers:setup_steps() ++ - [fun rabbit_federation_test_util:setup_federation/1]). + [fun setup_federation/1]). end_per_suite(Config) -> rabbit_ct_helpers:run_teardown_steps(Config, rabbit_ct_client_helpers:teardown_steps() ++ @@ -120,7 +114,7 @@ single_link_then_second_added(Config) -> get_metrics(Config)), 500, 5) - + end, delete_all(Ch, [q(<<"fed.downstream2">>, [{<<"x-queue-type">>, longstr, <<"classic">>}])]) @@ -147,5 +141,163 @@ upstream_downstream() -> get_metrics(Config) -> rabbit_ct_broker_helpers:rpc(Config, 0, - rabbitmq_prometheus_collector_test_proxy, collect_mf, - [default, prometheus_rabbitmq_federation_collector]). + ?MODULE, collect_mf, + [default, rabbit_federation_prometheus_collector]). + + + + +setup_federation(Config) -> + setup_federation_with_upstream_params(Config, []). + +setup_federation_with_upstream_params(Config, ExtraParams) -> + rabbit_ct_broker_helpers:set_parameter(Config, 0, + <<"federation-upstream">>, <<"localhost">>, [ + {<<"uri">>, rabbit_ct_broker_helpers:node_uri(Config, 0)}, + {<<"consumer-tag">>, <<"fed.tag">>} + ] ++ ExtraParams), + + rabbit_ct_broker_helpers:set_parameter(Config, 0, + <<"federation-upstream">>, <<"local5673">>, [ + {<<"uri">>, <<"amqp://localhost:1">>} + ] ++ ExtraParams), + + rabbit_ct_broker_helpers:set_parameter(Config, 0, + <<"federation-upstream-set">>, <<"upstream">>, [ + [ + {<<"upstream">>, <<"localhost">>}, + {<<"exchange">>, <<"upstream">>}, + {<<"queue">>, <<"upstream">>} + ] + ]), + + rabbit_ct_broker_helpers:set_parameter(Config, 0, + <<"federation-upstream-set">>, <<"upstream2">>, [ + [ + {<<"upstream">>, <<"localhost">>}, + {<<"exchange">>, <<"upstream2">>}, + {<<"queue">>, <<"upstream2">>} + ] + ]), + + rabbit_ct_broker_helpers:set_parameter(Config, 0, + <<"federation-upstream-set">>, <<"localhost">>, [ + [{<<"upstream">>, <<"localhost">>}] + ]), + + rabbit_ct_broker_helpers:set_parameter(Config, 0, + <<"federation-upstream-set">>, <<"upstream12">>, [ + [ + {<<"upstream">>, <<"localhost">>}, + {<<"exchange">>, <<"upstream">>}, + {<<"queue">>, <<"upstream">>} + ], [ + {<<"upstream">>, <<"localhost">>}, + {<<"exchange">>, <<"upstream2">>}, + {<<"queue">>, <<"upstream2">>} + ] + ]), + + rabbit_ct_broker_helpers:set_parameter(Config, 0, + <<"federation-upstream-set">>, <<"one">>, [ + [ + {<<"upstream">>, <<"localhost">>}, + {<<"exchange">>, <<"one">>}, + {<<"queue">>, <<"one">>} + ] + ]), + + rabbit_ct_broker_helpers:set_parameter(Config, 0, + <<"federation-upstream-set">>, <<"two">>, [ + [ + {<<"upstream">>, <<"localhost">>}, + {<<"exchange">>, <<"two">>}, + {<<"queue">>, <<"two">>} + ] + ]), + + rabbit_ct_broker_helpers:set_parameter(Config, 0, + <<"federation-upstream-set">>, <<"upstream5673">>, [ + [ + {<<"upstream">>, <<"local5673">>}, + {<<"exchange">>, <<"upstream">>} + ] + ]), + + rabbit_ct_broker_helpers:rpc( + Config, 0, rabbit_policy, set, + [<<"/">>, <<"fed">>, <<"^fed\.">>, [{<<"federation-upstream-set">>, <<"upstream">>}], + 0, <<"all">>, <<"acting-user">>]), + + rabbit_ct_broker_helpers:rpc( + Config, 0, rabbit_policy, set, + [<<"/">>, <<"fed12">>, <<"^fed12\.">>, [{<<"federation-upstream-set">>, <<"upstream12">>}], + 2, <<"all">>, <<"acting-user">>]), + + rabbit_ct_broker_helpers:set_policy(Config, 0, + <<"one">>, <<"^two$">>, <<"all">>, [ + {<<"federation-upstream-set">>, <<"one">>}]), + + rabbit_ct_broker_helpers:set_policy(Config, 0, + <<"two">>, <<"^one$">>, <<"all">>, [ + {<<"federation-upstream-set">>, <<"two">>}]), + + rabbit_ct_broker_helpers:set_policy(Config, 0, + <<"hare">>, <<"^hare\.">>, <<"all">>, [ + {<<"federation-upstream-set">>, <<"upstream5673">>}]), + + rabbit_ct_broker_helpers:set_policy(Config, 0, + <<"all">>, <<"^all\.">>, <<"all">>, [ + {<<"federation-upstream-set">>, <<"all">>}]), + + rabbit_ct_broker_helpers:set_policy(Config, 0, + <<"new">>, <<"^new\.">>, <<"all">>, [ + {<<"federation-upstream-set">>, <<"new-set">>}]), + Config. + +with_ch(Config, Fun, Methods) -> + Ch = rabbit_ct_client_helpers:open_channel(Config), + declare_all(Config, Ch, Methods), + %% Clean up queues even after test failure. + try + Fun(Ch) + after + delete_all(Ch, Methods), + rabbit_ct_client_helpers:close_channel(Ch) + end, + ok. + +declare_all(Config, Ch, Methods) -> [maybe_declare_queue(Config, Ch, Op) || Op <- Methods]. +delete_all(Ch, Methods) -> + [delete_queue(Ch, Q) || #'queue.declare'{queue = Q} <- Methods]. + +maybe_declare_queue(Config, Ch, Method) -> + OneOffCh = rabbit_ct_client_helpers:open_channel(Config), + try + amqp_channel:call(OneOffCh, Method#'queue.declare'{passive = true}) + catch exit:{{shutdown, {server_initiated_close, ?NOT_FOUND, _Message}}, _} -> + amqp_channel:call(Ch, Method) + after + catch rabbit_ct_client_helpers:close_channel(OneOffCh) + end. + +delete_queue(Ch, Q) -> + amqp_channel:call(Ch, #'queue.delete'{queue = Q}). + +q(Name) -> + q(Name, []). + +q(Name, undefined) -> + q(Name, []); +q(Name, Args) -> + #'queue.declare'{queue = Name, + durable = true, + arguments = Args}. + +-define(PD_KEY, metric_families). +collect_mf(Registry, Collector) -> + put(?PD_KEY, []), + Collector:collect_mf(Registry, fun(MF) -> put(?PD_KEY, [MF | get(?PD_KEY)]) end), + MFs = lists:reverse(get(?PD_KEY)), + erase(?PD_KEY), + MFs. diff --git a/deps/rabbitmq_jms_topic_exchange/src/rabbit_db_jms_exchange.erl b/deps/rabbitmq_jms_topic_exchange/src/rabbit_db_jms_exchange.erl index 999003be7285..36d922d76347 100644 --- a/deps/rabbitmq_jms_topic_exchange/src/rabbit_db_jms_exchange.erl +++ b/deps/rabbitmq_jms_topic_exchange/src/rabbit_db_jms_exchange.erl @@ -20,8 +20,8 @@ ]). -export([ - khepri_jms_topic_exchange_path/0, - khepri_jms_topic_exchange_path/1 + khepri_jms_topic_exchange_path/1, + khepri_jms_topic_exchange_path/2 ]). -rabbit_mnesia_tables_to_khepri_db( @@ -233,7 +233,8 @@ remove_items(Dict, [Key | Keys]) -> remove_items(dict:erase(Key, Dict), Keys). %% ------------------------------------------------------------------- khepri_jms_topic_exchange_path(#resource{virtual_host = VHost, name = Name}) -> - [?MODULE, jms_topic_exchange, VHost, Name]. + khepri_jms_topic_exchange_path(VHost, Name). -khepri_jms_topic_exchange_path() -> - [?MODULE, jms_topic_exchange]. +khepri_jms_topic_exchange_path(VHost, Name) -> + ExchangePath = rabbit_db_exchange:khepri_exchange_path(VHost, Name), + ExchangePath ++ [jms_topic]. diff --git a/deps/rabbitmq_jms_topic_exchange/src/rabbit_db_jms_exchange_m2k_converter.erl b/deps/rabbitmq_jms_topic_exchange/src/rabbit_db_jms_exchange_m2k_converter.erl index 13b28f791951..39834199d357 100644 --- a/deps/rabbitmq_jms_topic_exchange/src/rabbit_db_jms_exchange_m2k_converter.erl +++ b/deps/rabbitmq_jms_topic_exchange/src/rabbit_db_jms_exchange_m2k_converter.erl @@ -10,6 +10,7 @@ -behaviour(mnesia_to_khepri_converter). -include_lib("kernel/include/logger.hrl"). +-include_lib("khepri/include/khepri.hrl"). -include_lib("khepri_mnesia_migration/src/kmm_logging.hrl"). -include_lib("rabbit_common/include/rabbit.hrl"). @@ -91,7 +92,8 @@ delete_from_khepri(?JMS_TOPIC_TABLE = Table, Key, State) -> end, State). clear_data_in_khepri(?JMS_TOPIC_TABLE) -> - Path = rabbit_db_jms_exchange:khepri_jms_topic_exchange_path(), + Path = rabbit_db_jms_exchange:khepri_jms_topic_exchange_path( + ?KHEPRI_WILDCARD_STAR, ?KHEPRI_WILDCARD_STAR), case rabbit_khepri:delete(Path) of ok -> ok; diff --git a/deps/rabbitmq_management/app.bzl b/deps/rabbitmq_management/app.bzl index 1f8429e9f7e4..7fd01cd065c8 100644 --- a/deps/rabbitmq_management/app.bzl +++ b/deps/rabbitmq_management/app.bzl @@ -98,6 +98,7 @@ def all_beam_files(name = "all_beam_files"): "src/rabbit_mgmt_wm_quorum_queue_replicas_delete_member.erl", "src/rabbit_mgmt_wm_quorum_queue_replicas_grow.erl", "src/rabbit_mgmt_wm_quorum_queue_replicas_shrink.erl", + "src/rabbit_mgmt_wm_quorum_queue_status.erl", "src/rabbit_mgmt_wm_rebalance_queues.erl", "src/rabbit_mgmt_wm_redirect.erl", "src/rabbit_mgmt_wm_reset.erl", @@ -230,6 +231,7 @@ def all_test_beam_files(name = "all_test_beam_files"): "src/rabbit_mgmt_wm_quorum_queue_replicas_delete_member.erl", "src/rabbit_mgmt_wm_quorum_queue_replicas_grow.erl", "src/rabbit_mgmt_wm_quorum_queue_replicas_shrink.erl", + "src/rabbit_mgmt_wm_quorum_queue_status.erl", "src/rabbit_mgmt_wm_rebalance_queues.erl", "src/rabbit_mgmt_wm_redirect.erl", "src/rabbit_mgmt_wm_reset.erl", @@ -453,6 +455,7 @@ def all_srcs(name = "all_srcs"): "src/rabbit_mgmt_wm_quorum_queue_replicas_delete_member.erl", "src/rabbit_mgmt_wm_quorum_queue_replicas_grow.erl", "src/rabbit_mgmt_wm_quorum_queue_replicas_shrink.erl", + "src/rabbit_mgmt_wm_quorum_queue_status.erl", "src/rabbit_mgmt_wm_rebalance_queues.erl", "src/rabbit_mgmt_wm_redirect.erl", "src/rabbit_mgmt_wm_reset.erl", diff --git a/deps/rabbitmq_management/bin/rabbitmqadmin b/deps/rabbitmq_management/bin/rabbitmqadmin index a5977ed36e94..d00f5377ff17 100755 --- a/deps/rabbitmq_management/bin/rabbitmqadmin +++ b/deps/rabbitmq_management/bin/rabbitmqadmin @@ -134,7 +134,7 @@ DECLARABLE = { 'optional': {'destination_type': 'queue', 'routing_key': '', 'arguments': {}}}, 'vhost': {'mandatory': ['name'], - 'optional': {'tracing': None}}, + 'optional': {'tracing': None, 'default_queue_type': None}}, 'user': {'mandatory': ['name', ['password', 'password_hash'], 'tags'], 'optional': {'hashing_algorithm': None}}, 'permission': {'mandatory': ['vhost', 'user', 'configure', 'write', 'read'], diff --git a/deps/rabbitmq_management/priv/schema/rabbitmq_management.schema b/deps/rabbitmq_management/priv/schema/rabbitmq_management.schema index 1ee80516e200..83c32b3022ac 100644 --- a/deps/rabbitmq_management/priv/schema/rabbitmq_management.schema +++ b/deps/rabbitmq_management/priv/schema/rabbitmq_management.schema @@ -87,7 +87,7 @@ end}. {mapping, "management.ssl.cacertfile", "rabbitmq_management.ssl_config.cacertfile", [{datatype, string}, {validators, ["file_accessible"]}]}. {mapping, "management.ssl.password", "rabbitmq_management.ssl_config.password", - [{datatype, string}]}. + [{datatype, [tagged_binary, binary]}]}. {mapping, "management.ssl.verify", "rabbitmq_management.ssl_config.verify", [ {datatype, {enum, [verify_peer, verify_none]}}]}. @@ -295,7 +295,7 @@ end}. [{datatype, {enum, [true, false]}}]}. {mapping, "management.listener.ssl_opts.password", "rabbitmq_management.listener.ssl_opts.password", - [{datatype, string}]}. + [{datatype, [tagged_binary, binary]}]}. {mapping, "management.listener.ssl_opts.psk_identity", "rabbitmq_management.listener.ssl_opts.psk_identity", [{datatype, string}]}. diff --git a/deps/rabbitmq_management/priv/www/js/charts.js b/deps/rabbitmq_management/priv/www/js/charts.js index 97c66c40a745..fc11f20a1878 100644 --- a/deps/rabbitmq_management/priv/www/js/charts.js +++ b/deps/rabbitmq_management/priv/www/js/charts.js @@ -15,9 +15,7 @@ function message_rates(id, stats) { ['Get (auto ack)', 'get_no_ack'], ['Get (empty)', 'get_empty'], ['Unroutable (return)', 'return_unroutable'], - ['Unroutable (drop)', 'drop_unroutable'], - ['Disk read', 'disk_reads'], - ['Disk write', 'disk_writes']]; + ['Unroutable (drop)', 'drop_unroutable']]; return rates_chart_or_text(id, stats, items, fmt_rate, fmt_rate_axis, true, 'Message rates', 'message-rates'); } diff --git a/deps/rabbitmq_management/priv/www/js/global.js b/deps/rabbitmq_management/priv/www/js/global.js index 2b92175742b1..295e36454ff2 100644 --- a/deps/rabbitmq_management/priv/www/js/global.js +++ b/deps/rabbitmq_management/priv/www/js/global.js @@ -108,7 +108,8 @@ var ALL_COLUMNS = ['rate-redeliver', 'redelivered', false], ['rate-ack', 'ack', true]]}, 'connections': - {'Overview': [['user', 'User name', true], + {'Overview': [['container_id', 'Container ID', true], + ['user', 'User name', true], ['state', 'State', true]], 'Details': [['ssl', 'TLS', true], ['ssl_info', 'TLS details', false], @@ -179,7 +180,7 @@ const QUEUE_EXTRA_CONTENT_REQUESTS = []; // All help ? popups var HELP = { 'delivery-limit': - 'The number of allowed unsuccessful delivery attempts. Once a message has been delivered unsuccessfully more than this many times it will be dropped or dead-lettered, depending on the queue configuration.', + 'The number of allowed unsuccessful delivery attempts. Once a message has been delivered unsuccessfully more than this many times it will be dropped or dead-lettered, depending on the queue configuration. The default is always 20. A value of -1 or lower sets the limit to "unlimited".', 'exchange-auto-delete': 'If yes, the exchange will delete itself after at least one queue or exchange has been bound to this one, and then all queues or exchanges have been unbound.', @@ -232,9 +233,6 @@ var HELP = { 'queue-max-age': 'Sets the data retention for stream queues in time units
    (Y=Years, M=Months, D=Days, h=hours, m=minutes, s=seconds).
    E.g. "1h" configures the stream to only keep the last 1 hour of received messages.

    (Sets the x-max-age argument.)', - 'queue-version': - 'Set the queue version. Defaults to version 1.
    Version 1 has a journal-based index that embeds small messages.
    Version 2 has a different index which improves memory usage and performance in many scenarios, as well as a per-queue store for messages that were previously embedded.
    (Sets the "x-queue-version" argument.)', - 'queue-overflow': 'Sets the
    queue overflow behaviour. This determines what happens to messages when the maximum length of a queue is reached. Valid values are drop-head, reject-publish or reject-publish-dlx. The quorum queue type only supports drop-head and reject-publish.', @@ -253,9 +251,15 @@ var HELP = { 'queue-messages': '

    Message counts.

    Note that "in memory" and "persistent" are not mutually exclusive; persistent messages can be in memory as well as on disc, and transient messages can be paged out if memory is tight. Non-durable queues will consider all messages to be transient.

    ', + 'queue-messages-stream': + '

    Approximate message counts.

    Note that streams store some entries that are not user messages such as offset tracking data which is included in this count. Thus this value will never be completely correct.

    ', + 'queue-dead-lettered': 'Applies to messages dead-lettered with dead-letter-strategy at-least-once.', + 'queue-delivery-limit': + 'The number of times a message can be returned to this queue before it is dead-lettered (if configured) or dropped.', + 'queue-message-body-bytes': '

    The sum total of the sizes of the message bodies in this queue. This only counts message bodies; it does not include message properties (including headers) or metadata used by the queue.

    Note that "in memory" and "persistent" are not mutually exclusive; persistent messages can be in memory as well as on disc, and transient messages can be paged out if memory is tight. Non-durable queues will consider all messages to be transient.

    If a message is routed to multiple queues on publication, its body will be stored only once (in memory and on disk) and shared between queues. The value shown here does not take account of this effect.

    ', @@ -442,10 +446,6 @@ var HELP = {
    Rate at which empty queues are hit in response to basic.get.
    \
    Return
    \
    Rate at which basic.return is sent to publishers for unroutable messages published with the \'mandatory\' flag set.
    \ -
    Disk read
    \ -
    Rate at which queues read messages from disk.
    \ -
    Disk write
    \ -
    Rate at which queues write messages to disk.
    \ \

    \ Note that the last two items originate in queues rather than \ @@ -586,7 +586,10 @@ var HELP = {

    Rate at which queues are created. Declaring a queue that already exists counts for a "Declared" event, but not for a "Created" event.
    \
    Deleted
    \
    Rate at which queues are deleted.
    \ - ' + ', + + 'container-id': + 'Name of the client application as sent from client to RabbitMQ in the "container-id" field of the AMQP 1.0 open frame.' }; diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/connection.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/connection.ejs index f834b02fb5e0..07ee18ae5043 100644 --- a/deps/rabbitmq_management/priv/www/js/tmpl/connection.ejs +++ b/deps/rabbitmq_management/priv/www/js/tmpl/connection.ejs @@ -17,11 +17,20 @@ <% if (connection.client_properties.connection_name) { %> - Client-provided name + Client-provided connection name <%= fmt_string(connection.client_properties.connection_name) %> <% } %> +<% if (connection.container_id) { %> + + Container ID + + + <%= fmt_string(connection.container_id) %> + +<% } %> + Username <%= fmt_string(connection.user) %> diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/connections.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/connections.ejs index 464894d20876..470aa3577fbe 100644 --- a/deps/rabbitmq_management/priv/www/js/tmpl/connections.ejs +++ b/deps/rabbitmq_management/priv/www/js/tmpl/connections.ejs @@ -26,6 +26,9 @@ <% if (nodes_interesting) { %> <%= fmt_sort('Node', 'node') %> <% } %> +<% if (show_column('connections', 'container_id')) { %> + Container ID +<% } %> <% if (show_column('connections', 'user')) { %> <%= fmt_sort('User name', 'user') %> <% } %> @@ -84,7 +87,9 @@ <% if(connection.client_properties) { %> <%= link_conn(connection.name) %> - <%= fmt_string(short_conn(connection.client_properties.connection_name)) %> + <% if (connection.client_properties.connection_name) { %> + <%= fmt_string(short_conn(connection.client_properties.connection_name)) %> + <% } %> <% } else { %> <%= link_conn(connection.name) %> @@ -92,6 +97,13 @@ <% if (nodes_interesting) { %> <%= fmt_node(connection.node) %> <% } %> +<% if (show_column('connections', 'container_id')) { %> + + <% if (connection.container_id) { %> + <%= fmt_string(connection.container_id) %> + <% } %> + +<% } %> <% if (show_column('connections', 'user')) { %> <%= fmt_string(connection.user) %> <% } %> diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/feature-flags.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/feature-flags.ejs index ee704e453806..a2ed48ad4573 100644 --- a/deps/rabbitmq_management/priv/www/js/tmpl/feature-flags.ejs +++ b/deps/rabbitmq_management/priv/www/js/tmpl/feature-flags.ejs @@ -13,7 +13,7 @@

    <% } %>
    -

    All Feature Flags

    +

    Feature Flags

    <%= filter_ui(feature_flags) %>
    @@ -30,6 +30,9 @@ <% for (var i = 0; i < feature_flags.length; i++) { var feature_flag = feature_flags[i]; + if (feature_flag.stability == "experimental") { + continue; + } var state_color = "grey"; if (feature_flag.state == "enabled") { state_color = "green"; @@ -76,3 +79,76 @@
    + + + +
    +

    Opt-in Feature Flags

    +
    +<% if (feature_flags.length > 0) { %> +

    +These feature flags opt-in. + +These flags can be enabled in production deployments after an appropriate amount of testing in non-production environments. +

    + + + + + + + + + + <% + for (var i = 0; i < feature_flags.length; i++) { + var feature_flag = feature_flags[i]; + if (feature_flag.stability != "experimental") { + continue; + } + var state_color = "grey"; + if (feature_flag.state == "enabled") { + state_color = "green"; + } else if (feature_flag.state == "disabled") { + state_color = "yellow"; + } else if (feature_flag.state == "unsupported") { + state_color = "red"; + } + %> + > + + + + + <% } %> + +
    <%= fmt_sort('Name', 'name') %><%= fmt_sort('State', 'state') %>Description
    <%= fmt_string(feature_flag.name) %> + <% if (feature_flag.state == "disabled") { %> +
    + +
    +
    +
    + + +
    + + <% } else { %> + + <%= fmt_string(feature_flag.state) %> + + <% } %> +
    +

    <%= fmt_string(feature_flag.desc) %>

    + <% if (feature_flag.doc_url) { %> +

    [Learn more]

    + <% } %> +
    +<% } else { %> +

    ... no feature_flags ...

    +<% } %> +
    +
    + diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/policies.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/policies.ejs index cf191f97ee10..54ee48189620 100644 --- a/deps/rabbitmq_management/priv/www/js/tmpl/policies.ejs +++ b/deps/rabbitmq_management/priv/www/js/tmpl/policies.ejs @@ -111,12 +111,6 @@ Consumer Timeout | Leader locator
    - - Queues [Classic] - - Version | - - Queues [Quorum] @@ -275,7 +269,6 @@ Max length bytes | Message TTL | | - Version
    Length limit overflow behaviour
    diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/queue.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/queue.ejs index c402ce4875d8..7f2c9e131a55 100644 --- a/deps/rabbitmq_management/priv/www/js/tmpl/queue.ejs +++ b/deps/rabbitmq_management/priv/www/js/tmpl/queue.ejs @@ -92,23 +92,39 @@ <%= fmt_string(queue.consumer_details.length) %> <% } %> - <% if (!is_stream(queue)) { %> + <% if (is_classic(queue)) { %> Consumer capacity <%= fmt_percent(queue.consumer_capacity) %> <% } %> + <% if(queue.hasOwnProperty('publishers')) { %> + + Publishers + <%= fmt_string(queue.publishers) %> + + <% } %> <% if (is_quorum(queue)) { %> Open files <%= fmt_table_short(queue.open_files) %> + <% if (queue.hasOwnProperty('delivery_limit')) { %> + + Delivery limit + <%= fmt_string(queue.delivery_limit) %> + + <% } %> <% } %> <% if (is_stream(queue)) { %> Readers <%= fmt_table_short(queue.readers) %> + + Segments + <%= fmt_string(queue.segments) %> + <% } %> @@ -116,10 +132,14 @@ Total + <% if (!is_stream(queue)) { %> Ready Unacked + <% } %> <% if (is_quorum(queue)) { %> - In memory ready + High priority + Normal priority + Returned Dead-lettered @@ -133,20 +153,32 @@ Messages + <% if (is_stream(queue)) { %> + + <% } else { %> + <% } %> <%= fmt_num_thousands(queue.messages) %> + <% if (!is_stream(queue)) { %> <%= fmt_num_thousands(queue.messages_ready) %> <%= fmt_num_thousands(queue.messages_unacknowledged) %> + <% } %> <% if (is_quorum(queue)) { %> - <%= fmt_num_thousands(queue.messages_ram) %> + <%= fmt_num_thousands(queue.messages_ready_high) %> + + + <%= fmt_num_thousands(queue.messages_ready_normal) %> + + + <%= fmt_num_thousands(queue.messages_ready_returned) %> <%= fmt_num_thousands(queue.messages_dlx) %> @@ -179,16 +211,22 @@ <%= fmt_bytes(queue.message_bytes_unacknowledged) %> - - <%= fmt_bytes(queue.message_bytes_ram) %> - <% } %> <% if (is_quorum(queue)) { %> + + + + + + <%= fmt_bytes(queue.message_bytes_dlx) %> <% } %> <% if (is_classic(queue)) { %> + + <%= fmt_bytes(queue.message_bytes_ram) %> + <%= fmt_bytes(queue.message_bytes_persistent) %> diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/queues.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/queues.ejs index caba0efe3092..014b1a9a9686 100644 --- a/deps/rabbitmq_management/priv/www/js/tmpl/queues.ejs +++ b/deps/rabbitmq_management/priv/www/js/tmpl/queues.ejs @@ -325,7 +325,6 @@ Max length bytes
    <% if (queue_type == "classic") { %> Maximum priority - | Version <% } %> <% if (queue_type == "quorum") { %> Delivery limit diff --git a/deps/rabbitmq_management/selenium/bin/components/rabbitmq b/deps/rabbitmq_management/selenium/bin/components/rabbitmq deleted file mode 100644 index 1d36b6567fe8..000000000000 --- a/deps/rabbitmq_management/selenium/bin/components/rabbitmq +++ /dev/null @@ -1,96 +0,0 @@ -#!/usr/bin/env bash - -init_rabbitmq() { - RABBITMQ_CONFIG_DIR=${TEST_CONFIG_DIR} - RABBITMQ_DOCKER_IMAGE=${RABBITMQ_DOCKER_IMAGE:-rabbitmq} - - print "> RABBITMQ_CONFIG_DIR: ${RABBITMQ_CONFIG_DIR}" - print "> RABBITMQ_DOCKER_IMAGE: ${RABBITMQ_DOCKER_IMAGE}" - [[ -z "${OAUTH_SERVER_CONFIG_BASEDIR}" ]] || print "> OAUTH_SERVER_CONFIG_BASEDIR: ${OAUTH_SERVER_CONFIG_BASEDIR}" - [[ -z "${OAUTH_SERVER_CONFIG_DIR}" ]] || print "> OAUTH_SERVER_CONFIG_DIR: ${OAUTH_SERVER_CONFIG_DIR}" - -} -start_rabbitmq() { - if [[ "$PROFILES" == *"docker"* ]]; then - start_docker_rabbitmq - else - start_local_rabbitmq - fi -} - -start_local_rabbitmq() { - begin "Starting rabbitmq ..." - - init_rabbitmq - - RABBITMQ_SERVER_ROOT=$(realpath $TEST_DIR/../../../../) - MOUNT_RABBITMQ_CONF="/etc/rabbitmq/rabbitmq.conf" - MOUNT_ADVANCED_CONFIG="/etc/rabbitmq/advanced.config" - - RABBITMQ_TEST_DIR="${RABBITMQ_CONFIG_DIR}" ${BIN_DIR}/gen-rabbitmq-conf ${RABBITMQ_CONFIG_DIR} $ENV_FILE /tmp$MOUNT_RABBITMQ_CONF - - print "> EFFECTIVE RABBITMQ_CONFIG_FILE: /tmp$MOUNT_RABBITMQ_CONF" - ${BIN_DIR}/gen-advanced-config ${RABBITMQ_CONFIG_DIR} $ENV_FILE /tmp$MOUNT_ADVANCED_CONFIG - RESULT=$? - if [ $RESULT -eq 0 ]; then - print "> EFFECTIVE RABBITMQ_CONFIG_FILE: /tmp$MOUNT_ADVANCED_CONFIG" - gmake --directory=${RABBITMQ_SERVER_ROOT} run-broker \ - RABBITMQ_ENABLED_PLUGINS_FILE=${RABBITMQ_CONFIG_DIR}/enabled_plugins \ - RABBITMQ_CONFIG_FILE=/tmp$MOUNT_RABBITMQ_CONF \ - RABBITMQ_ADVANCED_CONFIG_FILE=/tmp$MOUNT_ADVANCED_CONFIG - else - gmake --directory=${RABBITMQ_SERVER_ROOT} run-broker \ - RABBITMQ_ENABLED_PLUGINS_FILE=${RABBITMQ_CONFIG_DIR}/enabled_plugins \ - RABBITMQ_CONFIG_FILE=/tmp$MOUNT_RABBITMQ_CONF - fi - print "> RABBITMQ_TEST_DIR: ${RABBITMQ_CONFIG_DIR}" - - -} -start_docker_rabbitmq() { - begin "Starting rabbitmq in docker ..." - - init_rabbitmq - kill_container_if_exist rabbitmq - - mkdir -p $CONF_DIR/rabbitmq - MOUNT_RABBITMQ_CONF="/etc/rabbitmq/rabbitmq.conf" - MOUNT_ADVANCED_CONFIG="/etc/rabbitmq/advanced.config" - - RABBITMQ_TEST_DIR="/var/rabbitmq" ${BIN_DIR}/gen-rabbitmq-conf ${RABBITMQ_CONFIG_DIR} $ENV_FILE $CONF_DIR/rabbitmq/rabbitmq.conf - print "> EFFECTIVE RABBITMQ_CONFIG_FILE: $CONF_DIR/rabbitmq/rabbitmq.conf" - ${BIN_DIR}/gen-advanced-config ${RABBITMQ_CONFIG_DIR} $ENV_FILE /$CONF_DIR/rabbitmq/advanced.config - RESULT=$? - if [ $RESULT -eq 0 ]; then - print "> EFFECTIVE ADVANCED_CONFIG_FILE: $CONF_DIR/rabbitmq/advanced.config" - EXTRA_MOUNTS="-v $CONF_DIR/rabbitmq/advanced.config:${MOUNT_ADVANCED_CONFIG}:ro " - fi - if [ -f ${RABBITMQ_CONFIG_DIR}/enabled_plugins ]; then - EXTRA_MOUNTS="$EXTRA_MOUNTS -v ${RABBITMQ_CONFIG_DIR}/enabled_plugins:/etc/rabbitmq/enabled_plugins " - fi - if [ -d ${RABBITMQ_CONFIG_DIR}/certs ]; then - EXTRA_MOUNTS=" $EXTRA_MOUNTS -v ${RABBITMQ_CONFIG_DIR}/certs:/var/rabbitmq/certs " - fi - if [ -d ${RABBITMQ_CONFIG_DIR}/imports ]; then - EXTRA_MOUNTS="$EXTRA_MOUNTS -v ${RABBITMQ_CONFIG_DIR}/imports:/var/rabbitmq/imports " - fi - - print "> RABBITMQ_TEST_DIR: /var/rabbitmq" - - docker run \ - --detach \ - --name rabbitmq \ - --net ${DOCKER_NETWORK} \ - -p 5672:5672 \ - -p 5671:5671 \ - -p 15672:15672 \ - -p 15671:15671 \ - -v ${RABBITMQ_CONFIG_DIR}/logging.conf:/etc/rabbitmq/conf.d/logging.conf:ro \ - -v $CONF_DIR/rabbitmq/rabbitmq.conf:${MOUNT_RABBITMQ_CONF}:ro \ - -v ${TEST_DIR}:/config \ - ${EXTRA_MOUNTS} \ - ${RABBITMQ_DOCKER_IMAGE} - - wait_for_message rabbitmq "Server startup complete" - end "RabbitMQ ready" -} diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.local b/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.local deleted file mode 100644 index 5e033cd289d9..000000000000 --- a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.local +++ /dev/null @@ -1 +0,0 @@ -export IMPORT_DIR=deps/rabbitmq_management/selenium/test/authnz-msg-protocols/imports diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/env.local.devkeycloak b/deps/rabbitmq_management/selenium/test/multi-oauth/env.local.devkeycloak deleted file mode 100644 index a1e2d5d596c2..000000000000 --- a/deps/rabbitmq_management/selenium/test/multi-oauth/env.local.devkeycloak +++ /dev/null @@ -1,2 +0,0 @@ -export DEVKEYCLOAK_URL=https://localhost:8442/realms/dev -export DEVKEYCLOAK_CA_CERT=deps/rabbitmq_management/selenium/test/multi-oauth/devkeycloak/ca_certificate.pem diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/env.local.prodkeycloak b/deps/rabbitmq_management/selenium/test/multi-oauth/env.local.prodkeycloak deleted file mode 100644 index e267b558cd49..000000000000 --- a/deps/rabbitmq_management/selenium/test/multi-oauth/env.local.prodkeycloak +++ /dev/null @@ -1,2 +0,0 @@ -export PRODKEYCLOAK_URL=https://localhost:8443/realms/prod -export PRODKEYCLOAK_CA_CERT=deps/rabbitmq_management/selenium/test/multi-oauth/prodkeycloak/ca_certificate.pem diff --git a/deps/rabbitmq_management/selenium/test/oauth/env.local b/deps/rabbitmq_management/selenium/test/oauth/env.local deleted file mode 100644 index d61f528c4e4a..000000000000 --- a/deps/rabbitmq_management/selenium/test/oauth/env.local +++ /dev/null @@ -1 +0,0 @@ -export OAUTH_SERVER_CONFIG_BASEDIR=deps/rabbitmq_management/selenium/test diff --git a/deps/rabbitmq_management/selenium/test/vhosts/Makefile b/deps/rabbitmq_management/selenium/test/vhosts/Makefile deleted file mode 100644 index 84446d1c122d..000000000000 --- a/deps/rabbitmq_management/selenium/test/vhosts/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -.ONESHELL:# single shell invocation for all lines in the recipe -SHELL = bash# we depend on bash expansion for e.g. queue patterns - -.DEFAULT_GOAL = help -RABBITMQ_SERVER_ROOT = ../../../../../ - - -### TARGETS ### - -help: - @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' - -start-rabbitmq: ## Start RabbitMQ - @(docker kill rabbitmq >/dev/null 2>&1 && docker rm rabbitmq) - @(gmake --directory=${RABBITMQ_SERVER_ROOT} run-broker \ - RABBITMQ_ENABLED_PLUGINS="rabbitmq_management" \ - RABBITMQ_CONFIG_FILE=deps/rabbitmq_management/selenium/test/basic-auth/rabbitmq.config) - -test: ## Run tests interactively e.g. make test [TEST=landing.js] - @(RABBITMQ_URL=http://localhost:15672 RUN_LOCAL=true SCREENSHOTS_DIR=${PWD}/../../screens npm test $(PWD)/$(TEST)) diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_dispatcher.erl b/deps/rabbitmq_management/src/rabbit_mgmt_dispatcher.erl index ef73bd7cfca8..726a4291cf0f 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_dispatcher.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_dispatcher.erl @@ -154,6 +154,7 @@ dispatcher() -> {"/queues/quorum/:vhost/:queue/replicas/delete", rabbit_mgmt_wm_quorum_queue_replicas_delete_member, []}, {"/queues/quorum/replicas/on/:node/grow", rabbit_mgmt_wm_quorum_queue_replicas_grow, []}, {"/queues/quorum/replicas/on/:node/shrink", rabbit_mgmt_wm_quorum_queue_replicas_shrink, []}, + {"/queues/quorum/:vhost/:queue/status", rabbit_mgmt_wm_quorum_queue_status, []}, {"/bindings", rabbit_mgmt_wm_bindings, [all]}, {"/bindings/:vhost", rabbit_mgmt_wm_bindings, [all]}, {"/bindings/:vhost/e/:source/:dtype/:destination", rabbit_mgmt_wm_bindings, [source_destination]}, diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_definitions.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_definitions.erl index f9b3e0e81a79..335081c7ad55 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_definitions.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_definitions.erl @@ -107,8 +107,7 @@ vhost_definitions(ReqData, VHost, Context) -> export_binding(B, QNames)], {ok, Vsn} = application:get_key(rabbit, vsn), Parameters = [strip_vhost( - rabbit_mgmt_format:parameter( - rabbit_mgmt_wm_parameters:fix_shovel_publish_properties(P))) + rabbit_mgmt_format:parameter(P)) || P <- rabbit_runtime_parameters:list(VHost)], rabbit_mgmt_util:reply( [{rabbit_version, rabbit_data_coercion:to_binary(Vsn)}] ++ diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_limits.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_limits.erl index 8f6230fc55e7..22f7536ef5c6 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_limits.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_limits.erl @@ -45,13 +45,21 @@ limits(ReqData, Context) -> none -> User = Context#context.user, VisibleVhosts = rabbit_mgmt_util:list_visible_vhosts_names(User), - [ [{vhost, VHost}, {value, Value}] - || {VHost, Value} <- rabbit_vhost_limit:list(), - lists:member(VHost, VisibleVhosts) ]; + [ + #{ + vhost => VHost, + value => rabbit_data_coercion:to_map(Value) + } || {VHost, Value} <- rabbit_vhost_limit:list(), lists:member(VHost, VisibleVhosts) + ]; VHost when is_binary(VHost) -> case rabbit_vhost_limit:list(VHost) of [] -> []; - Value -> [[{vhost, VHost}, {value, Value}]] + Value -> [ + #{ + vhost => VHost, + value => rabbit_data_coercion:to_map(Value) + } + ] end end. %%-------------------------------------------------------------------- diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_parameter.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_parameter.erl index 0cdca8dc072f..a30430261a56 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_parameter.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_parameter.erl @@ -40,8 +40,7 @@ resource_exists(ReqData, Context) -> end, ReqData, Context}. to_json(ReqData, Context) -> - rabbit_mgmt_util:reply(rabbit_mgmt_format:parameter( - rabbit_mgmt_wm_parameters:fix_shovel_publish_properties(parameter(ReqData))), + rabbit_mgmt_util:reply(rabbit_mgmt_format:parameter(parameter(ReqData)), ReqData, Context). accept_content(ReqData0, Context = #context{user = User}) -> diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_parameters.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_parameters.erl index d6eac0ff6553..cf0ddb357470 100644 --- a/deps/rabbitmq_management/src/rabbit_mgmt_wm_parameters.erl +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_parameters.erl @@ -9,7 +9,6 @@ -export([init/2, to_json/2, content_types_provided/2, is_authorized/2, resource_exists/2, basic/1]). --export([fix_shovel_publish_properties/1]). -export([variances/2]). -include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). @@ -40,25 +39,6 @@ is_authorized(ReqData, Context) -> %%-------------------------------------------------------------------- -%% Hackish fix to make sure we return a JSON object instead of an empty list -%% when the publish-properties value is empty. Should be removed in 3.7.0 -%% when we switch to a new JSON library. -fix_shovel_publish_properties(P) -> - case lists:keyfind(component, 1, P) of - {_, <<"shovel">>} -> - case lists:keytake(value, 1, P) of - {value, {_, Values}, P2} -> - case lists:keytake(<<"publish-properties">>, 1, Values) of - {_, {_, []}, Values2} -> - P2 ++ [{value, Values2 ++ [{<<"publish-properties">>, empty_struct}]}]; - _ -> - P - end; - _ -> P - end; - _ -> P - end. - basic(ReqData) -> Raw = case rabbit_mgmt_util:id(component, ReqData) of none -> rabbit_runtime_parameters:list(); @@ -72,5 +52,5 @@ basic(ReqData) -> end, case Raw of not_found -> not_found; - _ -> [rabbit_mgmt_format:parameter(fix_shovel_publish_properties(P)) || P <- Raw] + _ -> [rabbit_mgmt_format:parameter(P) || P <- Raw] end. diff --git a/deps/rabbitmq_management/src/rabbit_mgmt_wm_quorum_queue_status.erl b/deps/rabbitmq_management/src/rabbit_mgmt_wm_quorum_queue_status.erl new file mode 100644 index 000000000000..abde4acc417b --- /dev/null +++ b/deps/rabbitmq_management/src/rabbit_mgmt_wm_quorum_queue_status.erl @@ -0,0 +1,66 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +%% An HTTP API counterpart of 'rabbitmq-diagnostics check_if_node_is_quorum_critical' +-module(rabbit_mgmt_wm_quorum_queue_status). + +-export([init/2, to_json/2, content_types_provided/2, is_authorized/2, allowed_methods/2]). +-export([resource_exists/2]). +-export([variances/2]). + +-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). + +%%-------------------------------------------------------------------- + +init(Req, _State) -> + {cowboy_rest, rabbit_mgmt_headers:set_common_permission_headers(Req, ?MODULE), #context{}}. + +variances(Req, Context) -> + {[<<"accept-encoding">>, <<"origin">>], Req, Context}. + +allowed_methods(ReqData, Context) -> + {[<<"GET">>, <<"OPTIONS">>], ReqData, Context}. + +content_types_provided(ReqData, Context) -> + {rabbit_mgmt_util:responder_map(to_json), ReqData, Context}. + +resource_exists(ReqData, Context) -> + {case queue(ReqData) of + not_found -> false; + _ -> true + end, ReqData, Context}. + +to_json(ReqData, Context) -> + case queue(ReqData) of + {error, Reason} -> + failure(Reason, ReqData, Context); + Res -> + rabbit_mgmt_util:reply(Res, ReqData, Context) + end. + +queue(ReqData) -> + case rabbit_mgmt_util:vhost(ReqData) of + not_found -> not_found; + VHost -> queue(VHost, rabbit_mgmt_util:id(queue, ReqData)) + end. + +queue(VHost, QName) -> + Name = rabbit_misc:r(VHost, queue, QName), + case rabbit_amqqueue:lookup(Name) of + {ok, _Q} -> rabbit_quorum_queue:status(VHost, QName); + {error, not_found} -> not_found + end. + + +failure(Reason, ReqData, Context) -> + {Response, ReqData1, Context1} = rabbit_mgmt_util:reply([{status, failed}, + {reason, Reason}], + ReqData, Context), + {stop, cowboy_req:reply(503, #{}, Response, ReqData1), Context1}. + +is_authorized(ReqData, Context) -> + rabbit_mgmt_util:is_authorized(ReqData, Context). diff --git a/deps/rabbitmq_management/test/rabbit_mgmt_http_SUITE.erl b/deps/rabbitmq_management/test/rabbit_mgmt_http_SUITE.erl index 7d354bae1e2f..d517cb4810a8 100644 --- a/deps/rabbitmq_management/test/rabbit_mgmt_http_SUITE.erl +++ b/deps/rabbitmq_management/test/rabbit_mgmt_http_SUITE.erl @@ -21,6 +21,7 @@ eventually/1]). -import(rabbit_mgmt_test_util, [assert_list/2, assert_item/2, test_item/2, assert_keys/2, assert_no_keys/2, + decode_body/1, http_get/2, http_get/3, http_get/5, http_get_no_auth/3, http_get_no_decode/5, @@ -198,6 +199,7 @@ all_tests() -> [ user_limit_set_test, config_environment_test, disabled_qq_replica_opers_test, + qq_status_test, list_deprecated_features_test, list_used_deprecated_features_test ]. @@ -1150,21 +1152,24 @@ queues_test(Config) -> durable => true, auto_delete => false, exclusive => false, - arguments => #{}}, + arguments => #{'x-queue-type' => <<"classic">>} + }, #{name => <<"foo">>, vhost => <<"downvhost">>, state => <<"stopped">>, durable => true, auto_delete => false, exclusive => false, - arguments => #{}}], DownQueues), + arguments => #{'x-queue-type' => <<"classic">>} + }], DownQueues), assert_item(#{name => <<"foo">>, vhost => <<"downvhost">>, state => <<"stopped">>, durable => true, auto_delete => false, exclusive => false, - arguments => #{}}, DownQueue), + arguments => #{'x-queue-type' => <<"classic">>} + }, DownQueue), http_put(Config, "/queues/badvhost/bar", Good, ?NOT_FOUND), http_put(Config, "/queues/%2F/bar", @@ -1186,21 +1191,21 @@ queues_test(Config) -> durable => true, auto_delete => false, exclusive => false, - arguments => #{}, + arguments => #{'x-queue-type' => <<"classic">>}, storage_version => 2}, #{name => <<"foo">>, vhost => <<"/">>, durable => true, auto_delete => false, exclusive => false, - arguments => #{}, + arguments => #{'x-queue-type' => <<"classic">>}, storage_version => 2}], Queues), assert_item(#{name => <<"foo">>, vhost => <<"/">>, durable => true, auto_delete => false, exclusive => false, - arguments => #{}, + arguments => #{'x-queue-type' => <<"classic">>}, storage_version => 2}, Queue), http_delete(Config, "/queues/%2F/foo", {group, '2xx'}), @@ -2240,7 +2245,8 @@ exclusive_queue_test(Config) -> durable => false, auto_delete => false, exclusive => true, - arguments => #{}}, Queue), + arguments => #{'x-queue-type' => <<"classic">>} + }, Queue), true end), amqp_channel:close(Ch), @@ -2807,7 +2813,7 @@ columns_test(Config) -> http_delete(Config, Path, [{group, '2xx'}, 404]), http_put(Config, Path, [{arguments, [{<<"x-message-ttl">>, TTL}]}], {group, '2xx'}), - Item = #{arguments => #{'x-message-ttl' => TTL}, name => <<"columns.test">>}, + Item = #{arguments => #{'x-message-ttl' => TTL, 'x-queue-type' => <<"classic">>}, name => <<"columns.test">>}, ?AWAIT( begin @@ -3409,13 +3415,14 @@ vhost_limits_list_test(Config) -> lists:map( fun(#{vhost := VHost, value := Val}) -> Param = [ {atom_to_binary(K, utf8),V} || {K,V} <- maps:to_list(Val) ], + ct:pal("Setting limits of virtual host '~ts' to ~tp", [VHost, Param]), ok = rabbit_ct_broker_helpers:set_parameter(Config, 0, VHost, <<"vhost-limits">>, <<"limits">>, Param) end, Expected), - Expected = http_get(Config, "/vhost-limits", ?OK), - Limits1 = http_get(Config, "/vhost-limits/limit_test_vhost_1", ?OK), - Limits2 = http_get(Config, "/vhost-limits/limit_test_vhost_2", ?OK), + ?assertEqual(lists:usort(Expected), lists:usort(http_get(Config, "/vhost-limits", ?OK))), + ?assertEqual(Limits1, http_get(Config, "/vhost-limits/limit_test_vhost_1", ?OK)), + ?assertEqual(Limits2, http_get(Config, "/vhost-limits/limit_test_vhost_2", ?OK)), NoVhostUser = <<"no_vhost_user">>, rabbit_ct_broker_helpers:add_user(Config, NoVhostUser), @@ -3866,6 +3873,28 @@ disabled_qq_replica_opers_test(Config) -> http_delete(Config, "/queues/quorum/replicas/on/" ++ Nodename ++ "/shrink", ?METHOD_NOT_ALLOWED), passed. +qq_status_test(Config) -> + QQArgs = [{durable, true}, {arguments, [{'x-queue-type', 'quorum'}]}], + http_get(Config, "/queues/%2f/qq_status", ?NOT_FOUND), + http_put(Config, "/queues/%2f/qq_status", QQArgs, {group, '2xx'}), + [MapRes] = http_get(Config, "/queues/quorum/%2f/qq_status/status", ?OK), + Keys = ['Commit Index','Last Applied','Last Log Index', + 'Last Written','Machine Version','Membership','Node Name', + 'Raft State','Snapshot Index','Term'], + ?assertEqual(lists:sort(Keys), lists:sort(maps:keys(MapRes))), + http_delete(Config, "/queues/%2f/qq_status", {group, '2xx'}), + + + CQArgs = [{durable, true}], + http_get(Config, "/queues/%2F/cq_status", ?NOT_FOUND), + http_put(Config, "/queues/%2F/cq_status", CQArgs, {group, '2xx'}), + ResBody = http_get_no_decode(Config, "/queues/quorum/%2f/cq_status/status", "guest", "guest", 503), + ?assertEqual(#{reason => <<"classic_queue_not_supported">>, + status => <<"failed">>}, decode_body(ResBody)), + http_delete(Config, "/queues/%2f/cq_status", {group, '2xx'}), + passed. + + list_deprecated_features_test(Config) -> Desc = "This is a deprecated feature", DocUrl = "https://rabbitmq.com/", diff --git a/deps/rabbitmq_management/test/rabbit_mgmt_http_health_checks_SUITE.erl b/deps/rabbitmq_management/test/rabbit_mgmt_http_health_checks_SUITE.erl index 71c532ead6f5..d8277d34da72 100644 --- a/deps/rabbitmq_management/test/rabbit_mgmt_http_health_checks_SUITE.erl +++ b/deps/rabbitmq_management/test/rabbit_mgmt_http_health_checks_SUITE.erl @@ -198,8 +198,11 @@ is_quorum_critical_test(Config) -> Body = http_get_failed(Config, "/health/checks/node-is-quorum-critical"), ?assertEqual(<<"failed">>, maps:get(<<"status">>, Body)), ?assertEqual(true, maps:is_key(<<"reason">>, Body)), - [Queue] = maps:get(<<"queues">>, Body), - ?assertEqual(QName, maps:get(<<"name">>, Queue)), + Queues = maps:get(<<"queues">>, Body), + ?assert(lists:any( + fun(Item) -> + QName =:= maps:get(<<"name">>, Item) + end, Queues)), passed. diff --git a/deps/rabbitmq_management/test/rabbit_mgmt_only_http_SUITE.erl b/deps/rabbitmq_management/test/rabbit_mgmt_only_http_SUITE.erl index 7fe227d8f357..812c4d2e60fe 100644 --- a/deps/rabbitmq_management/test/rabbit_mgmt_only_http_SUITE.erl +++ b/deps/rabbitmq_management/test/rabbit_mgmt_only_http_SUITE.erl @@ -381,21 +381,23 @@ queues_test(Config) -> durable => true, auto_delete => false, exclusive => false, - arguments => #{}}, + arguments => #{'x-queue-type' => <<"classic">>} + }, #{name => <<"foo">>, vhost => <<"downvhost">>, state => <<"stopped">>, durable => true, auto_delete => false, exclusive => false, - arguments => #{}}], DownQueues), + arguments => #{'x-queue-type' => <<"classic">>} + }], DownQueues), assert_item(#{name => <<"foo">>, vhost => <<"downvhost">>, state => <<"stopped">>, durable => true, auto_delete => false, exclusive => false, - arguments => #{}}, DownQueue), + arguments => #{'x-queue-type' => <<"classic">>}}, DownQueue), http_put(Config, "/queues/badvhost/bar", Good, ?NOT_FOUND), http_put(Config, "/queues/%2F/bar", @@ -418,7 +420,7 @@ queues_test(Config) -> durable => true, auto_delete => false, exclusive => false, - arguments => #{}, + arguments => #{'x-queue-type' => <<"classic">>}, node => NodeBin}, #{name => <<"foo">>, vhost => <<"/">>, @@ -495,7 +497,7 @@ queues_enable_totals_test(Config) -> durable => true, auto_delete => false, exclusive => false, - arguments => #{}, + arguments => #{'x-queue-type' => <<"classic">>}, node => NodeBin, messages => 1, messages_ready => 1, @@ -882,7 +884,8 @@ exclusive_queue_test(Config) -> durable => false, auto_delete => false, exclusive => true, - arguments => #{}}, Queue), + arguments => #{'x-queue-type' => <<"classic">>} + }, Queue), amqp_channel:close(Ch), close_connection(Conn), passed. @@ -1514,7 +1517,7 @@ columns_test(Config) -> http_delete(Config, Path, [{group, '2xx'}, 404]), http_put(Config, Path, [{arguments, [{<<"x-message-ttl">>, TTL}]}], {group, '2xx'}), - Item = #{arguments => #{'x-message-ttl' => TTL}, name => <<"columns.test">>}, + Item = #{arguments => #{'x-message-ttl' => TTL, 'x-queue-type' => <<"classic">>}, name => <<"columns.test">>}, timer:sleep(2000), [Item] = http_get(Config, "/queues?columns=arguments.x-message-ttl,name", ?OK), Item = http_get(Config, "/queues/%2F/columns.test?columns=arguments.x-message-ttl,name", ?OK), diff --git a/deps/rabbitmq_mqtt/BUILD.bazel b/deps/rabbitmq_mqtt/BUILD.bazel index 9d7d3e88e43b..b994ca7e59aa 100644 --- a/deps/rabbitmq_mqtt/BUILD.bazel +++ b/deps/rabbitmq_mqtt/BUILD.bazel @@ -26,10 +26,7 @@ APP_DESCRIPTION = "RabbitMQ MQTT Adapter" APP_MODULE = "rabbit_mqtt" APP_ENV = """[ - {default_user, <<"guest">>}, - {default_pass, <<"guest">>}, {ssl_cert_login,false}, - %% To satisfy an unfortunate expectation from popular MQTT clients. {allow_anonymous, true}, {vhost, <<"/">>}, {exchange, <<"amq.topic">>}, @@ -52,7 +49,8 @@ APP_ENV = """[ {mailbox_soft_limit, 200}, {max_packet_size_unauthenticated, 65536}, %% 256 MB is upper limit defined by MQTT spec - {max_packet_size_authenticated, 268435455}, + %% We set 16 MB as defined in deps/rabbit/Makefile max_message_size + {max_packet_size_authenticated, 16777216}, {topic_alias_maximum, 16} ] """ @@ -138,7 +136,7 @@ rabbitmq_integration_suite( "test/rabbit_auth_backend_mqtt_mock.beam", "test/util.beam", ], - shard_count = 14, + shard_count = 18, runtime_deps = [ "@emqtt//:erlang_app", "@meck//:erlang_app", @@ -228,13 +226,13 @@ rabbitmq_integration_suite( ) rabbitmq_integration_suite( - name = "shared_SUITE", + name = "mqtt_shared_SUITE", size = "large", additional_beam = [ ":test_util_beam", ":test_event_recorder_beam", ], - shard_count = 10, + shard_count = 5, runtime_deps = [ "//deps/rabbitmq_management_agent:erlang_app", "@emqtt//:erlang_app", @@ -249,7 +247,7 @@ rabbitmq_integration_suite( additional_beam = [ ":test_util_beam", ], - shard_count = 4, + shard_count = 2, runtime_deps = [ "@emqtt//:erlang_app", "@gun//:erlang_app", diff --git a/deps/rabbitmq_mqtt/Makefile b/deps/rabbitmq_mqtt/Makefile index eb1d6b657356..63427c949327 100644 --- a/deps/rabbitmq_mqtt/Makefile +++ b/deps/rabbitmq_mqtt/Makefile @@ -4,10 +4,7 @@ PROJECT_MOD = rabbit_mqtt define PROJECT_ENV [ - {default_user, <<"guest">>}, - {default_pass, <<"guest">>}, {ssl_cert_login,false}, - %% To satisfy an unfortunate expectation from popular MQTT clients. {allow_anonymous, true}, {vhost, <<"/">>}, {exchange, <<"amq.topic">>}, @@ -30,7 +27,8 @@ define PROJECT_ENV {mailbox_soft_limit, 200}, {max_packet_size_unauthenticated, 65536}, %% 256 MB is upper limit defined by MQTT spec - {max_packet_size_authenticated, 268435455}, + %% We set 16 MB as defined in deps/rabbit/Makefile max_message_size + {max_packet_size_authenticated, 16777216}, {topic_alias_maximum, 16} ] endef @@ -45,7 +43,7 @@ export BUILD_WITHOUT_QUIC LOCAL_DEPS = ssl DEPS = ranch rabbit_common rabbit amqp10_common -TEST_DEPS = emqtt ct_helper rabbitmq_ct_helpers rabbitmq_ct_client_helpers rabbitmq_management rabbitmq_web_mqtt amqp_client rabbitmq_consistent_hash_exchange rabbitmq_amqp_client rabbitmq_stomp rabbitmq_stream +TEST_DEPS = emqtt ct_helper rabbitmq_ct_helpers rabbitmq_ct_client_helpers rabbitmq_management amqp_client rabbitmq_consistent_hash_exchange rabbitmq_amqp_client rabbitmq_stomp rabbitmq_stream PLT_APPS += rabbitmqctl elixir @@ -61,3 +59,77 @@ include ../../erlang.mk clean:: if test -d test/java_SUITE_data; then cd test/java_SUITE_data && $(MAKE) clean; fi + +# Parallel CT. +# +# @todo Move most of this in common files. + +define ct_master.erl + StartOpts = #{ + host => "localhost", + connection => standard_io, + args => ["-hidden"] + }, + {ok, Pid1, _} = peer:start(StartOpts#{name => "rabbit_shard1"}), + {ok, Pid2, _} = peer:start(StartOpts#{name => "rabbit_shard2"}), + {ok, Pid3, _} = peer:start(StartOpts#{name => "rabbit_shard3"}), + {ok, Pid4, _} = peer:start(StartOpts#{name => "rabbit_shard4"}), + peer:call(Pid1, net_kernel, set_net_ticktime, [5]), + peer:call(Pid2, net_kernel, set_net_ticktime, [5]), + peer:call(Pid3, net_kernel, set_net_ticktime, [5]), + peer:call(Pid4, net_kernel, set_net_ticktime, [5]), + peer:call(Pid1, persistent_term, put, [rabbit_ct_tcp_port_base, 23000]), + peer:call(Pid2, persistent_term, put, [rabbit_ct_tcp_port_base, 25000]), + peer:call(Pid3, persistent_term, put, [rabbit_ct_tcp_port_base, 27000]), + peer:call(Pid4, persistent_term, put, [rabbit_ct_tcp_port_base, 29000]), + ct_master:run("$1"), + peer:stop(Pid4), + peer:stop(Pid3), + peer:stop(Pid2), + peer:stop(Pid1), + halt() +endef + +PARALLEL_CT_SET_1_A = auth retainer +PARALLEL_CT_SET_1_B = cluster command config config_schema mc_mqtt packet_prop \ + processor protocol_interop proxy_protocol rabbit_mqtt_confirms reader util +PARALLEL_CT_SET_1_C = java v5 +PARALLEL_CT_SET_1_D = mqtt_shared + +PARALLEL_CT_SUITES = $(PARALLEL_CT_SET_1_A) $(PARALLEL_CT_SET_1_B) $(PARALLEL_CT_SET_1_C) $(PARALLEL_CT_SET_1_D) + +ifneq ($(filter-out $(PARALLEL_CT_SUITES),$(CT_SUITES)),) +$(error Some test suites in CT_SUITES but not configured for CI: $(filter-out $(PARALLEL_CT_SUITES),$(CT_SUITES))) +endif + +define tpl_parallel_ct_test_spec +{logdir, "$(CT_LOGS_DIR)"}. +{logdir, master, "$(CT_LOGS_DIR)"}. +{create_priv_dir, all_nodes, auto_per_run}. + +{node, shard1, 'rabbit_shard1@localhost'}. +{node, shard2, 'rabbit_shard2@localhost'}. +{node, shard3, 'rabbit_shard3@localhost'}. +{node, shard4, 'rabbit_shard4@localhost'}. + +{define, 'Set1', [$(call comma_list,$(addsuffix _SUITE,$1))]}. +{define, 'Set2', [$(call comma_list,$(addsuffix _SUITE,$2))]}. +{define, 'Set3', [$(call comma_list,$(addsuffix _SUITE,$3))]}. +{define, 'Set4', [$(call comma_list,$(addsuffix _SUITE,$4))]}. + +{suites, shard1, "test/", 'Set1'}. +{suites, shard2, "test/", 'Set2'}. +{suites, shard3, "test/", 'Set3'}. +{suites, shard4, "test/", 'Set4'}. +endef + +define parallel_ct_set_target +tpl_parallel_ct_test_spec_set_$1 = $$(call tpl_parallel_ct_test_spec,$(PARALLEL_CT_SET_$(1)_A),$(PARALLEL_CT_SET_$(1)_B),$(PARALLEL_CT_SET_$(1)_C),$(PARALLEL_CT_SET_$(1)_D)) + +parallel-ct-set-$(1): test-build + $(verbose) mkdir -p $(CT_LOGS_DIR) + $(verbose) $$(call core_render,tpl_parallel_ct_test_spec_set_$(1),ct.set-$(1).spec) + $$(call erlang,$$(call ct_master.erl,ct.set-$(1).spec),-sname parallel_ct_$(PROJECT)@localhost -hidden -kernel net_ticktime 5) +endef + +$(foreach set,1,$(eval $(call parallel_ct_set_target,$(set)))) diff --git a/deps/rabbitmq_mqtt/app.bzl b/deps/rabbitmq_mqtt/app.bzl index 87d17a12e46d..86830f4f9c7a 100644 --- a/deps/rabbitmq_mqtt/app.bzl +++ b/deps/rabbitmq_mqtt/app.bzl @@ -255,15 +255,6 @@ def test_suite_beam_files(name = "test_suite_beam_files"): erlc_opts = "//:test_erlc_opts", ) - erlang_bytecode( - name = "shared_SUITE_beam_files", - testonly = True, - srcs = ["test/shared_SUITE.erl"], - outs = ["test/shared_SUITE.beam"], - app_name = "rabbitmq_mqtt", - erlc_opts = "//:test_erlc_opts", - deps = ["//deps/amqp_client:erlang_app", "//deps/rabbitmq_ct_helpers:erlang_app"], - ) erlang_bytecode( name = "test_event_recorder_beam", testonly = True, @@ -329,3 +320,12 @@ def test_suite_beam_files(name = "test_suite_beam_files"): erlc_opts = "//:test_erlc_opts", deps = ["//deps/amqp10_common:erlang_app", "//deps/amqp_client:erlang_app", "//deps/rabbitmq_stomp:erlang_app"], ) + erlang_bytecode( + name = "mqtt_shared_SUITE_beam_files", + testonly = True, + srcs = ["test/mqtt_shared_SUITE.erl"], + outs = ["test/mqtt_shared_SUITE.beam"], + app_name = "rabbitmq_mqtt", + erlc_opts = "//:test_erlc_opts", + deps = ["//deps/amqp_client:erlang_app", "//deps/rabbitmq_ct_helpers:erlang_app"], + ) diff --git a/deps/rabbitmq_mqtt/priv/schema/rabbitmq_mqtt.schema b/deps/rabbitmq_mqtt/priv/schema/rabbitmq_mqtt.schema index cef29eeb4eaf..b69e2b06075c 100644 --- a/deps/rabbitmq_mqtt/priv/schema/rabbitmq_mqtt.schema +++ b/deps/rabbitmq_mqtt/priv/schema/rabbitmq_mqtt.schema @@ -6,35 +6,8 @@ %% ---------------------------------------------------------------------------- % {rabbitmq_mqtt, -% [%% Set the default user name and password. Will be used as the default login -%% if a connecting client provides no other login details. -%% -%% Please note that setting this will allow clients to connect without -%% authenticating! -%% -%% {default_user, <<"guest">>}, -%% {default_pass, <<"guest">>}, - -{mapping, "mqtt.default_user", "rabbitmq_mqtt.default_user", [ - {datatype, string} -]}. - -{mapping, "mqtt.default_pass", "rabbitmq_mqtt.default_pass", [ - {datatype, string} -]}. - -{translation, "rabbitmq_mqtt.default_user", -fun(Conf) -> - list_to_binary(cuttlefish:conf_get("mqtt.default_user", Conf)) -end}. - -{translation, "rabbitmq_mqtt.default_pass", -fun(Conf) -> - list_to_binary(cuttlefish:conf_get("mqtt.default_pass", Conf)) -end}. - -%% Enable anonymous access. If this is set to false, clients MUST provide -%% login information in order to connect. See the default_user/default_pass +% [%% Enable anonymous access. If this is set to false, clients MUST provide +%% login information in order to connect. See the anonymous_login_user/anonymous_login_pass %% configuration elements for managing logins without authentication. %% %% {allow_anonymous, true}, @@ -183,6 +156,20 @@ end}. {datatype, {enum, [true, false]}}]}. +{mapping, "mqtt.ssl_cert_client_id_from", "rabbitmq_mqtt.ssl_cert_client_id_from", [ + {datatype, {enum, [distinguished_name, subject_alternative_name]}} +]}. + +{mapping, "mqtt.ssl_cert_login_san_type", "rabbitmq_mqtt.ssl_cert_login_san_type", [ + {datatype, {enum, [dns, ip, email, uri, other_name]}} +]}. + +{mapping, "mqtt.ssl_cert_login_san_index", "rabbitmq_mqtt.ssl_cert_login_san_index", [ + {datatype, integer}, {validators, ["non_negative_integer"]} +]}. + + + %% TCP/Socket options (as per the broker configuration). %% %% {tcp_listen_options, [{backlog, 128}, diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt.erl index c5ea59abedea..694b31687262 100644 --- a/deps/rabbitmq_mqtt/src/rabbit_mqtt.erl +++ b/deps/rabbitmq_mqtt/src/rabbit_mqtt.erl @@ -87,7 +87,8 @@ init_global_counters(ProtoVer) -> rabbit_global_counters:init([Proto]), rabbit_global_counters:init([Proto, {queue_type, rabbit_classic_queue}]), rabbit_global_counters:init([Proto, {queue_type, rabbit_quorum_queue}]), - rabbit_global_counters:init([Proto, {queue_type, ?QUEUE_TYPE_QOS_0}]). + rabbit_global_counters:init([Proto, {queue_type, ?QUEUE_TYPE_QOS_0}]), + rabbit_msg_size_metrics:init(ProtoVer). persist_static_configuration() -> rabbit_mqtt_util:init_sparkplug(), @@ -112,6 +113,8 @@ persist_static_configuration() -> {ok, MaxSizeAuth} = application:get_env(?APP_NAME, max_packet_size_authenticated), assert_valid_max_packet_size(MaxSizeAuth), + {ok, MaxMsgSize} = application:get_env(rabbit, max_message_size), + ?assert(MaxSizeAuth =< MaxMsgSize), ok = persistent_term:put(?PERSISTENT_TERM_MAX_PACKET_SIZE_AUTHENTICATED, MaxSizeAuth). assert_valid_max_packet_size(Val) -> diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl index eeea5b8a8295..939d82b0d9e8 100644 --- a/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl +++ b/deps/rabbitmq_mqtt/src/rabbit_mqtt_processor.erl @@ -42,6 +42,12 @@ -define(QUEUE_TTL_KEY, <<"x-expires">>). -define(DEFAULT_EXCHANGE_NAME, <<>>). +-ifdef(TEST). +-define(SILENT_CLOSE_DELAY, 10). +-else. +-define(SILENT_CLOSE_DELAY, 3_000). +-endif. + -type send_fun() :: fun((iodata()) -> ok). -type session_expiry_interval() :: non_neg_integer() | infinity. -type subscriptions() :: #{topic_filter() => #mqtt_subscription_opts{}}. @@ -176,9 +182,9 @@ process_connect( Result0 = maybe ok ?= check_extended_auth(ConnectProps), - {ok, ClientId} ?= ensure_client_id(ClientId0, CleanStart, ProtoVer), - {ok, {Username1, Password}} ?= check_credentials(Username0, Password0, SslLoginName, PeerIp), - + {ok, ClientId1} ?= extract_client_id_from_certificate(ClientId0, Socket), + {ok, ClientId} ?= ensure_client_id(ClientId1, CleanStart, ProtoVer), + {ok, Username1, Password} ?= check_credentials(Username0, Password0, SslLoginName, PeerIp), {VHostPickedUsing, {VHost, Username2}} = get_vhost(Username1, SslLoginName, Port), ?LOG_DEBUG("MQTT connection ~s picked vhost using ~s", [ConnName0, VHostPickedUsing]), ok ?= check_vhost_exists(VHost, Username2, PeerIp), @@ -189,6 +195,7 @@ process_connect( ok ?= check_user_connection_limit(Username), {ok, AuthzCtx} ?= check_vhost_access(VHost, User, ClientId, PeerIp), ok ?= check_user_loopback(Username, PeerIp), + ok ?= ensure_credential_expiry_timer(User, PeerIp), rabbit_core_metrics:auth_attempt_succeeded(PeerIp, Username, mqtt), ok = register_client_id(VHost, ClientId, CleanStart, WillProps), {ok, WillMsg} ?= make_will_msg(Packet), @@ -384,6 +391,7 @@ process_request(?PUBLISH, {ok, Topic, Props, State1} -> EffectiveQos = maybe_downgrade_qos(Qos), rabbit_global_counters:messages_received(ProtoVer, 1), + rabbit_msg_size_metrics:observe(ProtoVer, iolist_size(Payload)), State = maybe_increment_publisher(State1), Msg = #mqtt_msg{retain = Retain, qos = EffectiveQos, @@ -619,20 +627,40 @@ check_extended_auth(_) -> check_credentials(Username, Password, SslLoginName, PeerIp) -> case creds(Username, Password, SslLoginName) of + {ok, _, _} = Ok -> + Ok; nocreds -> - auth_attempt_failed(PeerIp, <<>>), ?LOG_ERROR("MQTT login failed: no credentials provided"), + auth_attempt_failed(PeerIp, <<>>), {error, ?RC_BAD_USER_NAME_OR_PASSWORD}; {invalid_creds, {undefined, Pass}} when is_binary(Pass) -> - auth_attempt_failed(PeerIp, <<>>), ?LOG_ERROR("MQTT login failed: no username is provided"), + auth_attempt_failed(PeerIp, <<>>), {error, ?RC_BAD_USER_NAME_OR_PASSWORD}; {invalid_creds, {User, _Pass}} when is_binary(User) -> - auth_attempt_failed(PeerIp, User), ?LOG_ERROR("MQTT login failed for user '~s': no password provided", [User]), - {error, ?RC_BAD_USER_NAME_OR_PASSWORD}; - {UserBin, PassBin} -> - {ok, {UserBin, PassBin}} + auth_attempt_failed(PeerIp, User), + {error, ?RC_BAD_USER_NAME_OR_PASSWORD} + end. + +%% Extract client_id from the certificate provided it was configured to do so and +%% it is possible to extract it else returns the client_id passed as parameter +-spec extract_client_id_from_certificate(client_id(), rabbit_net:socket()) -> {ok, client_id()} | {error, reason_code()}. +extract_client_id_from_certificate(Client0, Socket) -> + case extract_ssl_cert_client_id_settings() of + none -> {ok, Client0}; + SslClientIdSettings -> + case ssl_client_id(Socket, SslClientIdSettings) of + none -> + {ok, Client0}; + Client0 -> + {ok, Client0}; + Other -> + ?LOG_ERROR( + "MQTT login failed: client_id in the certificate (~tp) does not match the client-provided ID (~p)", + [Other, Client0]), + {error, ?RC_CLIENT_IDENTIFIER_NOT_VALID} + end end. -spec ensure_client_id(client_id(), boolean(), protocol_version()) -> @@ -742,7 +770,9 @@ handle_clean_start(_, QoS, State = #state{cfg = #cfg{clean_start = true}}) -> ok -> {ok, SessPresent, State}; {error, access_refused} -> - {error, ?RC_NOT_AUTHORIZED} + {error, ?RC_NOT_AUTHORIZED}; + {error, _Reason} -> + {error, ?RC_IMPLEMENTATION_SPECIFIC_ERROR} end end; handle_clean_start(SessPresent, QoS, @@ -964,7 +994,8 @@ clear_will_msg(#state{cfg = #cfg{vhost = Vhost, QName = #resource{virtual_host = Vhost, kind = queue, name = QNameBin}, case delete_queue(QName, State) of ok -> ok; - {error, access_refused} -> {error, ?RC_NOT_AUTHORIZED} + {error, access_refused} -> {error, ?RC_NOT_AUTHORIZED}; + {error, _Reason} -> {error, ?RC_IMPLEMENTATION_SPECIFIC_ERROR} end. make_will_msg(#mqtt_packet_connect{will_flag = false}) -> @@ -997,8 +1028,8 @@ check_vhost_exists(VHost, Username, PeerIp) -> true -> ok; false -> - auth_attempt_failed(PeerIp, Username), ?LOG_ERROR("MQTT connection failed: virtual host '~s' does not exist", [VHost]), + auth_attempt_failed(PeerIp, Username), {error, ?RC_BAD_USER_NAME_OR_PASSWORD} end. @@ -1022,25 +1053,18 @@ check_vhost_alive(VHost) -> end. check_user_login(VHost, Username, Password, ClientId, PeerIp, ConnName) -> - AuthProps = case Password of - none -> - %% SSL user name provided. - %% Authenticating using username only. - []; - _ -> - [{password, Password}, - {vhost, VHost}, - {client_id, ClientId}] - end, + AuthProps = [{vhost, VHost}, + {client_id, ClientId}, + {password, Password}], case rabbit_access_control:check_user_login(Username, AuthProps) of {ok, User = #user{username = Username1}} -> notify_auth_result(user_authentication_success, Username1, ConnName), {ok, User}; {refused, Username, Msg, Args} -> - auth_attempt_failed(PeerIp, Username), ?LOG_ERROR("MQTT connection failed: access refused for user '~s':" ++ Msg, [Username | Args]), notify_auth_result(user_authentication_failure, Username, ConnName), + auth_attempt_failed(PeerIp, Username), {error, ?RC_BAD_USER_NAME_OR_PASSWORD} end. @@ -1069,9 +1093,9 @@ check_vhost_access(VHost, User = #user{username = Username}, ClientId, PeerIp) - ok -> {ok, AuthzCtx} catch exit:#amqp_error{name = not_allowed} -> - auth_attempt_failed(PeerIp, Username), ?LOG_ERROR("MQTT connection failed: access refused for user '~s' to vhost '~s'", [Username, VHost]), + auth_attempt_failed(PeerIp, Username), {error, ?RC_NOT_AUTHORIZED} end. @@ -1080,12 +1104,33 @@ check_user_loopback(Username, PeerIp) -> ok -> ok; not_allowed -> + ?LOG_WARNING("MQTT login failed: user '~s' can only connect via localhost", + [Username]), auth_attempt_failed(PeerIp, Username), - ?LOG_WARNING( - "MQTT login failed: user '~s' can only connect via localhost", [Username]), {error, ?RC_NOT_AUTHORIZED} end. + +ensure_credential_expiry_timer(User = #user{username = Username}, PeerIp) -> + case rabbit_access_control:expiry_timestamp(User) of + never -> + ok; + Ts when is_integer(Ts) -> + Time = (Ts - os:system_time(second)) * 1000, + ?LOG_DEBUG("Credential expires in ~b ms frow now " + "(absolute timestamp = ~b seconds since epoch)", + [Time, Ts]), + case Time > 0 of + true -> + _TimerRef = erlang:send_after(Time, self(), credential_expired), + ok; + false -> + ?LOG_WARNING("Credential expired ~b ms ago", [abs(Time)]), + auth_attempt_failed(PeerIp, Username), + {error, ?RC_NOT_AUTHORIZED} + end + end. + get_vhost(UserBin, none, Port) -> get_vhost_no_ssl(UserBin, Port); get_vhost(UserBin, SslLogin, Port) -> @@ -1173,34 +1218,43 @@ get_vhost_from_port_mapping(Port, Mapping) -> Res. creds(User, Pass, SSLLoginName) -> - DefaultUser = rabbit_mqtt_util:env(default_user), - DefaultPass = rabbit_mqtt_util:env(default_pass), - {ok, Anon} = application:get_env(?APP_NAME, allow_anonymous), - {ok, TLSAuth} = application:get_env(?APP_NAME, ssl_cert_login), - HaveDefaultCreds = Anon =:= true andalso - is_binary(DefaultUser) andalso - is_binary(DefaultPass), - CredentialsProvided = User =/= undefined orelse Pass =/= undefined, - CorrectCredentials = is_binary(User) andalso is_binary(Pass) andalso Pass =/= <<>>, + ValidCredentials = is_binary(User) andalso is_binary(Pass) andalso Pass =/= <<>>, + {ok, TLSAuth} = application:get_env(?APP_NAME, ssl_cert_login), SSLLoginProvided = TLSAuth =:= true andalso SSLLoginName =/= none, - case {CredentialsProvided, CorrectCredentials, SSLLoginProvided, HaveDefaultCreds} of - %% Username and password take priority - {true, true, _, _} -> {User, Pass}; - %% Either username or password is provided - {true, false, _, _} -> {invalid_creds, {User, Pass}}; - %% rabbitmq_mqtt.ssl_cert_login is true. SSL user name provided. - %% Authenticating using username only. - {false, false, true, _} -> {SSLLoginName, none}; - %% Anonymous connection uses default credentials - {false, false, false, true} -> {DefaultUser, DefaultPass}; - _ -> nocreds + case {CredentialsProvided, ValidCredentials, SSLLoginProvided} of + {true, true, _} -> + %% Username and password take priority + {ok, User, Pass}; + {true, false, _} -> + %% Either username or password is provided + {invalid_creds, {User, Pass}}; + {false, false, true} -> + %% rabbitmq_mqtt.ssl_cert_login is true. SSL user name provided. + %% Authenticating using username only. + {ok, SSLLoginName, none}; + {false, false, false} -> + {ok, AllowAnon} = application:get_env(?APP_NAME, allow_anonymous), + case AllowAnon of + true -> + case rabbit_auth_mechanism_anonymous:credentials() of + {ok, _, _} = Ok -> + Ok; + error -> + nocreds + end; + false -> + nocreds + end; + _ -> + nocreds end. -spec auth_attempt_failed(inet:ip_address(), binary()) -> ok. auth_attempt_failed(PeerIp, Username) -> - rabbit_core_metrics:auth_attempt_failed(PeerIp, Username, mqtt). + rabbit_core_metrics:auth_attempt_failed(PeerIp, Username, mqtt), + timer:sleep(?SILENT_CLOSE_DELAY). maybe_downgrade_qos(?QOS_0) -> ?QOS_0; maybe_downgrade_qos(?QOS_1) -> ?QOS_1; @@ -1273,8 +1327,10 @@ ensure_queue(QoS, State) -> case delete_queue(QName, State) of ok -> create_queue(QoS, State); - {error, access_refused} = E -> - E + {error, _} = Err -> + Err; + {protocol_error, _, _, _} = Err -> + {error, Err} end; {error, not_found} -> create_queue(QoS, State) @@ -1779,7 +1835,10 @@ maybe_delete_mqtt_qos0_queue(_) -> ok. -spec delete_queue(rabbit_amqqueue:name(), state()) -> - ok | {error, access_refused}. + ok | + {error, access_refused} | + {error, timeout} | + {protocol_error, Type :: atom(), Reason :: string(), Args :: term()}. delete_queue(QName, #state{auth_state = #auth_state{ user = User = #user{username = Username}, @@ -1791,8 +1850,12 @@ delete_queue(QName, fun (Q) -> case check_resource_access(User, QName, configure, AuthzCtx) of ok -> - {ok, _N} = rabbit_queue_type:delete(Q, false, false, Username), - ok; + case rabbit_queue_type:delete(Q, false, false, Username) of + {ok, _} -> + ok; + Err -> + Err + end; Err -> Err end @@ -2255,6 +2318,37 @@ ssl_login_name(Sock) -> nossl -> none end. +-spec extract_ssl_cert_client_id_settings() -> none | rabbit_ssl:ssl_cert_login_type(). +extract_ssl_cert_client_id_settings() -> + case application:get_env(?APP_NAME, ssl_cert_client_id_from) of + {ok, Mode} -> + case Mode of + subject_alternative_name -> extract_client_id_san_type(Mode); + _ -> {Mode, undefined, undefined} + end; + undefined -> none + end. + +extract_client_id_san_type(Mode) -> + {Mode, + application:get_env(?APP_NAME, ssl_cert_client_id_san_type, dns), + application:get_env(?APP_NAME, ssl_cert_client_id_san_index, 0) + }. + + +-spec ssl_client_id(rabbit_net:socket(), rabbit_ssl:ssl_cert_login_type()) -> + none | binary(). +ssl_client_id(Sock, SslClientIdSettings) -> + case rabbit_net:peercert(Sock) of + {ok, C} -> case rabbit_ssl:peer_cert_auth_name(SslClientIdSettings, C) of + unsafe -> none; + not_found -> none; + Name -> Name + end; + {error, no_peercert} -> none; + nossl -> none + end. + -spec proto_integer_to_atom(protocol_version()) -> protocol_version_atom(). proto_integer_to_atom(3) -> ?MQTT_PROTO_V3; diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt_qos0_queue.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt_qos0_queue.erl index 920276966c6c..77e59848bec8 100644 --- a/deps/rabbitmq_mqtt/src/rabbit_mqtt_qos0_queue.erl +++ b/deps/rabbitmq_mqtt/src/rabbit_mqtt_qos0_queue.erl @@ -70,8 +70,10 @@ is_stateful() -> -spec declare(amqqueue:amqqueue(), node()) -> {'new' | 'existing' | 'owner_died', amqqueue:amqqueue()} | - {'absent', amqqueue:amqqueue(), rabbit_amqqueue:absent_reason()}. + {'absent', amqqueue:amqqueue(), rabbit_amqqueue:absent_reason()} | + {protocol_error, internal_error, string(), [string()]}. declare(Q0, _Node) -> + QName = amqqueue:get_name(Q0), Q1 = case amqqueue:get_pid(Q0) of none -> %% declaring process becomes the queue @@ -86,7 +88,7 @@ declare(Q0, _Node) -> Opts = amqqueue:get_options(Q), ActingUser = maps:get(user, Opts, ?UNKNOWN_USER), rabbit_event:notify(queue_created, - [{name, amqqueue:get_name(Q)}, + [{name, QName}, {durable, true}, {auto_delete, false}, {exclusive, true}, @@ -94,6 +96,11 @@ declare(Q0, _Node) -> {arguments, amqqueue:get_arguments(Q)}, {user_who_performed_action, ActingUser}]), {new, Q}; + {error, timeout} -> + {protocol_error, internal_error, + "Could not declare ~ts because the metadata store operation " + "timed out", + [rabbit_misc:rs(QName)]}; Other -> Other end. @@ -102,12 +109,17 @@ declare(Q0, _Node) -> boolean(), boolean(), rabbit_types:username()) -> - rabbit_types:ok(non_neg_integer()). + rabbit_types:ok(non_neg_integer()) | + rabbit_types:error(timeout). delete(Q, _IfUnused, _IfEmpty, ActingUser) -> QName = amqqueue:get_name(Q), log_delete(QName, amqqueue:get_exclusive_owner(Q)), - ok = rabbit_amqqueue:internal_delete(Q, ActingUser), - {ok, 0}. + case rabbit_amqqueue:internal_delete(Q, ActingUser) of + ok -> + {ok, 0}; + {error, timeout} = Err -> + Err + end. -spec deliver([{amqqueue:amqqueue(), stateless}], Msg :: mc:state(), diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt_reader.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt_reader.erl index eb7f70a937ab..94925d75fb9c 100644 --- a/deps/rabbitmq_mqtt/src/rabbit_mqtt_reader.erl +++ b/deps/rabbitmq_mqtt/src/rabbit_mqtt_reader.erl @@ -71,39 +71,34 @@ close_connection(Pid, Reason) -> init(Ref) -> process_flag(trap_exit, true), logger:set_process_metadata(#{domain => ?RMQLOG_DOMAIN_CONN ++ [mqtt]}), - ProxyProtocolEnabled = application:get_env(?APP_NAME, proxy_protocol, false), - case rabbit_networking:handshake(Ref, ProxyProtocolEnabled) of + {ok, Sock} = rabbit_networking:handshake(Ref, + application:get_env(?APP_NAME, proxy_protocol, false)), + RealSocket = rabbit_net:unwrap_socket(Sock), + case rabbit_net:connection_string(Sock, inbound) of + {ok, ConnStr} -> + ConnName = rabbit_data_coercion:to_binary(ConnStr), + ?LOG_DEBUG("MQTT accepting TCP connection ~tp (~ts)", [self(), ConnName]), + _ = rabbit_alarm:register(self(), {?MODULE, conserve_resources, []}), + LoginTimeout = application:get_env(?APP_NAME, login_timeout, 10_000), + erlang:send_after(LoginTimeout, self(), login_timeout), + State0 = #state{socket = RealSocket, + proxy_socket = rabbit_net:maybe_get_proxy_socket(Sock), + conn_name = ConnName, + await_recv = false, + connection_state = running, + conserve = false, + parse_state = rabbit_mqtt_packet:init_state()}, + State1 = control_throttle(State0), + State = rabbit_event:init_stats_timer(State1, #state.stats_timer), + gen_server:enter_loop(?MODULE, [], State); + {error, Reason = enotconn} -> + ?LOG_INFO("MQTT could not get connection string: ~s", [Reason]), + rabbit_net:fast_close(RealSocket), + ignore; {error, Reason} -> - ?LOG_ERROR("MQTT could not establish connection: ~s", [Reason]), - {stop, Reason}; - {ok, Sock} -> - RealSocket = rabbit_net:unwrap_socket(Sock), - case rabbit_net:connection_string(Sock, inbound) of - {ok, ConnStr} -> - ConnName = rabbit_data_coercion:to_binary(ConnStr), - ?LOG_DEBUG("MQTT accepting TCP connection ~tp (~ts)", [self(), ConnName]), - _ = rabbit_alarm:register(self(), {?MODULE, conserve_resources, []}), - LoginTimeout = application:get_env(?APP_NAME, login_timeout, 10_000), - erlang:send_after(LoginTimeout, self(), login_timeout), - State0 = #state{socket = RealSocket, - proxy_socket = rabbit_net:maybe_get_proxy_socket(Sock), - conn_name = ConnName, - await_recv = false, - connection_state = running, - conserve = false, - parse_state = rabbit_mqtt_packet:init_state()}, - State1 = control_throttle(State0), - State = rabbit_event:init_stats_timer(State1, #state.stats_timer), - gen_server:enter_loop(?MODULE, [], State); - {error, Reason = enotconn} -> - ?LOG_INFO("MQTT could not get connection string: ~s", [Reason]), - rabbit_net:fast_close(RealSocket), - ignore; - {error, Reason} -> - ?LOG_ERROR("MQTT could not get connection string: ~p", [Reason]), - rabbit_net:fast_close(RealSocket), - {stop, Reason} - end + ?LOG_ERROR("MQTT could not get connection string: ~p", [Reason]), + rabbit_net:fast_close(RealSocket), + {stop, Reason} end. handle_call({info, InfoItems}, _From, State) -> @@ -120,16 +115,9 @@ handle_cast({duplicate_id, SendWill}, rabbit_mqtt_processor:send_disconnect(?RC_SESSION_TAKEN_OVER, PState), {stop, {shutdown, duplicate_id}, {SendWill, State}}; -handle_cast(decommission_node, - State = #state{ proc_state = PState, - conn_name = ConnName }) -> - ?LOG_WARNING("MQTT disconnecting client ~tp with client ID '~ts' as its node is about" - " to be decommissioned", - [ConnName, rabbit_mqtt_processor:info(client_id, PState)]), - {stop, {shutdown, decommission_node}, State}; - handle_cast({close_connection, Reason}, - State = #state{conn_name = ConnName, proc_state = PState}) -> + State = #state{conn_name = ConnName, + proc_state = PState}) -> ?LOG_WARNING("MQTT disconnecting client ~tp with client ID '~ts', reason: ~ts", [ConnName, rabbit_mqtt_processor:info(client_id, PState), Reason]), case Reason of @@ -217,6 +205,14 @@ handle_info({keepalive, Req}, State = #state{proc_state = PState, {stop, Reason, State} end; +handle_info(credential_expired, + State = #state{conn_name = ConnName, + proc_state = PState}) -> + ?LOG_WARNING("MQTT disconnecting client ~tp with client ID '~ts' because credential expired", + [ConnName, rabbit_mqtt_processor:info(client_id, PState)]), + rabbit_mqtt_processor:send_disconnect(?RC_MAXIMUM_CONNECT_TIME, PState), + {stop, {shutdown, {disconnect, server_initiated}}, State}; + handle_info(login_timeout, State = #state{proc_state = connect_packet_unprocessed, conn_name = ConnName}) -> %% The connection is also closed if the CONNECT packet happens to @@ -246,7 +242,7 @@ handle_info({'DOWN', _MRef, process, QPid, _Reason}, State) -> {noreply, State, ?HIBERNATE_AFTER}; handle_info({shutdown, Explanation} = Reason, State = #state{conn_name = ConnName}) -> - %% rabbitmq_management plugin requests to close connection. + %% rabbitmq_management plugin or CLI command requests to close connection. ?LOG_INFO("MQTT closing connection ~tp: ~p", [ConnName, Explanation]), {stop, Reason, State}; diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt_sup.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt_sup.erl index 2bdacebb58e2..943960ccffd5 100644 --- a/deps/rabbitmq_mqtt/src/rabbit_mqtt_sup.erl +++ b/deps/rabbitmq_mqtt/src/rabbit_mqtt_sup.erl @@ -29,7 +29,7 @@ init([{Listeners, SslListeners0}]) -> end, %% Use separate process group scope per RabbitMQ node. This achieves a local-only %% process group which requires less memory with millions of connections. - PgScope = list_to_atom(io_lib:format("~s_~s", [?PG_SCOPE, node()])), + PgScope = rabbit:pg_local_scope(?PG_SCOPE), persistent_term:put(?PG_SCOPE, PgScope), {ok, {#{strategy => one_for_all, diff --git a/deps/rabbitmq_mqtt/src/rabbit_mqtt_util.erl b/deps/rabbitmq_mqtt/src/rabbit_mqtt_util.erl index e47d5e443eae..b8c65cb7e54c 100644 --- a/deps/rabbitmq_mqtt/src/rabbit_mqtt_util.erl +++ b/deps/rabbitmq_mqtt/src/rabbit_mqtt_util.erl @@ -141,10 +141,10 @@ env(Key) -> undefined -> undefined end. -coerce_env_value(default_pass, Val) -> rabbit_data_coercion:to_binary(Val); -coerce_env_value(default_user, Val) -> rabbit_data_coercion:to_binary(Val); -coerce_env_value(vhost, Val) -> rabbit_data_coercion:to_binary(Val); -coerce_env_value(_, Val) -> Val. +coerce_env_value(vhost, Val) -> + rabbit_data_coercion:to_binary(Val); +coerce_env_value(_, Val) -> + Val. -spec table_lookup(rabbit_framing:amqp_table() | undefined, binary()) -> tuple() | undefined. diff --git a/deps/rabbitmq_mqtt/test/auth_SUITE.erl b/deps/rabbitmq_mqtt/test/auth_SUITE.erl index b7c6f33f405d..685cd7efaf29 100644 --- a/deps/rabbitmq_mqtt/test/auth_SUITE.erl +++ b/deps/rabbitmq_mqtt/test/auth_SUITE.erl @@ -68,6 +68,13 @@ sub_groups() -> ssl_user_vhost_parameter_mapping_vhost_does_not_exist, ssl_user_cert_vhost_mapping_takes_precedence_over_port_vhost_mapping ]}, + {ssl_user_with_client_id_in_cert_san_dns, [], + [client_id_from_cert_san_dns, + invalid_client_id_from_cert_san_dns + ]}, + {ssl_user_with_client_id_in_cert_dn, [], + [client_id_from_cert_dn + ]}, {no_ssl_user, [shuffle], [anonymous_auth_failure, user_credentials_auth, @@ -123,15 +130,20 @@ init_per_group(authz, Config0) -> User = <<"mqtt-user">>, Password = <<"mqtt-password">>, VHost = <<"mqtt-vhost">>, - MqttConfig = {rabbitmq_mqtt, [{default_user, User} - ,{default_pass, Password} - ,{allow_anonymous, true} - ,{vhost, VHost} - ,{exchange, <<"amq.topic">>} - ]}, - Config = rabbit_ct_helpers:run_setup_steps(rabbit_ct_helpers:merge_app_env(Config0, MqttConfig), - rabbit_ct_broker_helpers:setup_steps() ++ - rabbit_ct_client_helpers:setup_steps()), + Env = [{rabbitmq_mqtt, + [{allow_anonymous, true}, + {vhost, VHost}, + {exchange, <<"amq.topic">>} + ]}, + {rabbit, + [{anonymous_login_user, User}, + {anonymous_login_pass, Password} + ]}], + Config1 = rabbit_ct_helpers:merge_app_env(Config0, Env), + Config = rabbit_ct_helpers:run_setup_steps( + Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()), rabbit_ct_broker_helpers:add_user(Config, User, Password), rabbit_ct_broker_helpers:add_vhost(Config, VHost), [Log|_] = rpc(Config, 0, rabbit, log_locations, []), @@ -189,14 +201,27 @@ mqtt_config(no_ssl_user) -> mqtt_config(client_id_propagation) -> {rabbitmq_mqtt, [{ssl_cert_login, true}, {allow_anonymous, true}]}; +mqtt_config(ssl_user_with_client_id_in_cert_san_dns) -> + {rabbitmq_mqtt, [{ssl_cert_login, true}, + {allow_anonymous, false}, + {ssl_cert_client_id_from, subject_alternative_name}, + {ssl_cert_client_id_san_type, dns}]}; +mqtt_config(ssl_user_with_client_id_in_cert_dn) -> + {rabbitmq_mqtt, [{ssl_cert_login, true}, + {allow_anonymous, false}, + {ssl_cert_client_id_from, distinguished_name} + ]}; mqtt_config(_) -> undefined. -auth_config(client_id_propagation) -> +auth_config(T) when T == client_id_propagation; + T == ssl_user_with_client_id_in_cert_san_dns; + T == ssl_user_with_client_id_in_cert_dn -> {rabbit, [ {auth_backends, [rabbit_auth_backend_mqtt_mock]} ] }; + auth_config(_) -> undefined. @@ -287,9 +312,24 @@ init_per_testcase(T, Config) v4 -> {skip, "Will Delay Interval is an MQTT 5.0 feature"}; v5 -> testcase_started(Config, T) end; +init_per_testcase(T, Config) + when T =:= client_id_propagation; + T =:= invalid_client_id_from_cert_san_dns; + T =:= client_id_from_cert_san_dns; + T =:= client_id_from_cert_dn -> + SetupProcess = setup_rabbit_auth_backend_mqtt_mock(Config), + rabbit_ct_helpers:set_config(Config, {mock_setup_process, SetupProcess}); + init_per_testcase(Testcase, Config) -> testcase_started(Config, Testcase). +get_client_cert_subject(Config) -> + CertsDir = ?config(rmq_certsdir, Config), + CertFile = filename:join([CertsDir, "client", "cert.pem"]), + {ok, CertBin} = file:read_file(CertFile), + [{'Certificate', Cert, not_encrypted}] = public_key:pem_decode(CertBin), + iolist_to_binary(rpc(Config, 0, rabbit_ssl, peer_cert_subject, [Cert])). + set_cert_user_on_default_vhost(Config) -> CertsDir = ?config(rmq_certsdir, Config), CertFile = filename:join([CertsDir, "client", "cert.pem"]), @@ -399,6 +439,15 @@ end_per_testcase(T, Config) when T == queue_bind_permission; file:write_file(?config(log_location, Config), <<>>), rabbit_ct_helpers:testcase_finished(Config, T); + +end_per_testcase(T, Config) + when T =:= client_id_propagation; + T =:= invalid_client_id_from_cert_san_dns; + T =:= client_id_from_cert_san_dns; + T =:= client_id_from_cert_dn -> + SetupProcess = ?config(mock_setup_process, Config), + SetupProcess ! stop; + end_per_testcase(Testcase, Config) -> rabbit_ct_helpers:testcase_finished(Config, Testcase). @@ -412,7 +461,6 @@ anonymous_auth_success(Config) -> anonymous_auth_failure(Config) -> expect_authentication_failure(fun connect_anonymous/1, Config). - ssl_user_auth_success(Config) -> expect_successful_connection(fun connect_ssl/1, Config). @@ -451,6 +499,36 @@ user_credentials_auth(Config) -> fun(Conf) -> connect_user(<<"non-existing-vhost:guest">>, <<"guest">>, Conf) end, Config). +client_id_from_cert_san_dns(Config) -> + ExpectedClientId = <<"rabbit_client_id">>, % Found in the client's certificate as SAN type CLIENT_ID + MqttClientId = ExpectedClientId, + {ok, C} = connect_ssl(MqttClientId, Config), + {ok, _} = emqtt:connect(C), + [{authentication, AuthProps}] = rpc(Config, 0, + rabbit_auth_backend_mqtt_mock, + get, + [authentication]), + ?assertEqual(ExpectedClientId, proplists:get_value(client_id, AuthProps)), + ok = emqtt:disconnect(C). + +client_id_from_cert_dn(Config) -> + ExpectedClientId = get_client_cert_subject(Config), % subject = distinguished_name + MqttClientId = ExpectedClientId, + {ok, C} = connect_ssl(MqttClientId, Config), + {ok, _} = emqtt:connect(C), + [{authentication, AuthProps}] = rpc(Config, 0, + rabbit_auth_backend_mqtt_mock, + get, + [authentication]), + ?assertEqual(ExpectedClientId, proplists:get_value(client_id, AuthProps)), + ok = emqtt:disconnect(C). + +invalid_client_id_from_cert_san_dns(Config) -> + MqttClientId = <<"other_client_id">>, + {ok, C} = connect_ssl(MqttClientId, Config), + ?assertMatch({error, _}, emqtt:connect(C)), + unlink(C). + ssl_user_vhost_parameter_mapping_success(Config) -> expect_successful_connection(fun connect_ssl/1, Config). @@ -502,6 +580,9 @@ connect_anonymous(Config, ClientId) -> {proto_ver, ?config(mqtt_version, Config)}]). connect_ssl(Config) -> + connect_ssl(<<"simpleClient">>, Config). + +connect_ssl(ClientId, Config) -> CertsDir = ?config(rmq_certsdir, Config), SSLConfig = [{cacertfile, filename:join([CertsDir, "testca", "cacert.pem"])}, {certfile, filename:join([CertsDir, "client", "cert.pem"])}, @@ -510,12 +591,12 @@ connect_ssl(Config) -> P = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_mqtt_tls), emqtt:start_link([{host, "localhost"}, {port, P}, - {clientid, <<"simpleClient">>}, + {clientid, ClientId}, {proto_ver, ?config(mqtt_version, Config)}, {ssl, true}, {ssl_opts, SSLConfig}]). -client_id_propagation(Config) -> +setup_rabbit_auth_backend_mqtt_mock(Config) -> ok = rabbit_ct_broker_helpers:add_code_path_to_all_nodes(Config, rabbit_auth_backend_mqtt_mock), %% setup creates the ETS table required for the mqtt auth mock @@ -526,11 +607,13 @@ client_id_propagation(Config) -> rpc(Config, 0, rabbit_auth_backend_mqtt_mock, setup, [Self]) end), %% the setup process will notify us - SetupProcess = receive + receive {ok, SP} -> SP after 3000 -> ct:fail("timeout waiting for rabbit_auth_backend_mqtt_mock:setup/1") - end, + end. + +client_id_propagation(Config) -> ClientId = <<"client-id-propagation">>, {ok, C} = connect_user(<<"fake-user">>, <<"fake-password">>, Config, ClientId), @@ -561,11 +644,8 @@ client_id_propagation(Config) -> VariableMap = maps:get(variable_map, TopicContext), ?assertEqual(ClientId, maps:get(<<"client_id">>, VariableMap)), - ok = emqtt:disconnect(C), - - SetupProcess ! stop, - - ok. + emqtt:disconnect(C). + %% These tests try to cover all operations that are listed in the %% table in https://www.rabbitmq.com/access-control.html#authorisation diff --git a/deps/rabbitmq_mqtt/test/command_SUITE.erl b/deps/rabbitmq_mqtt/test/command_SUITE.erl index 528c4b0b1b97..864727077c40 100644 --- a/deps/rabbitmq_mqtt/test/command_SUITE.erl +++ b/deps/rabbitmq_mqtt/test/command_SUITE.erl @@ -85,12 +85,6 @@ run(Config) -> %% No connections [] = 'Elixir.Enum':to_list(?COMMAND:run([], Opts)), - %% Open a WebMQTT connection, command won't list it - WebMqttConfig = [{websocket, true} | Config], - _C0 = connect(<<"simpleWebMqttClient">>, WebMqttConfig, [{ack_timeout, 1}]), - - [] = 'Elixir.Enum':to_list(?COMMAND:run([], Opts)), - %% Open a connection C1 = connect(<<"simpleClient">>, Config, [{ack_timeout, 1}]), diff --git a/deps/rabbitmq_mqtt/test/config_schema_SUITE_data/rabbitmq_mqtt.snippets b/deps/rabbitmq_mqtt/test/config_schema_SUITE_data/rabbitmq_mqtt.snippets index df1a3f3a57f5..92c1b2f29c7e 100644 --- a/deps/rabbitmq_mqtt/test/config_schema_SUITE_data/rabbitmq_mqtt.snippets +++ b/deps/rabbitmq_mqtt/test/config_schema_SUITE_data/rabbitmq_mqtt.snippets @@ -1,7 +1,5 @@ [{defaults, "listeners.tcp.default = 5672 - mqtt.default_user = guest - mqtt.default_pass = guest mqtt.allow_anonymous = true mqtt.vhost = / mqtt.exchange = amq.topic @@ -20,9 +18,7 @@ mqtt.topic_alias_maximum = 16", [{rabbit,[{tcp_listeners,[5672]}]}, {rabbitmq_mqtt, - [{default_user,<<"guest">>}, - {default_pass,<<"guest">>}, - {allow_anonymous,true}, + [{allow_anonymous,true}, {vhost,<<"/">>}, {exchange,<<"amq.topic">>}, {max_session_expiry_interval_seconds,86400}, @@ -99,10 +95,24 @@ "ssl_cert_login_from = common_name", [{rabbit,[{ssl_cert_login_from,common_name}]}], [rabbitmq_mqtt]}, + {ssl_cert_client_id_from_common_name, + "mqtt.ssl_cert_client_id_from = distinguished_name", + [{rabbitmq_mqtt,[{ssl_cert_client_id_from,distinguished_name}]}], + [rabbitmq_mqtt]}, + {ssl_cert_login_dns_san_type, + "mqtt.ssl_cert_login_san_type = dns", + [{rabbitmq_mqtt,[{ssl_cert_login_san_type,dns}]}], + [rabbitmq_mqtt]}, + {ssl_cert_login_other_name_san_type, + "mqtt.ssl_cert_login_san_type = other_name", + [{rabbitmq_mqtt,[{ssl_cert_login_san_type,other_name}]}], + [rabbitmq_mqtt]}, + {ssl_cert_login_san_index, + "mqtt.ssl_cert_login_san_index = 0", + [{rabbitmq_mqtt,[{ssl_cert_login_san_index,0}]}], + [rabbitmq_mqtt]}, {proxy_protocol, "listeners.tcp.default = 5672 - mqtt.default_user = guest - mqtt.default_pass = guest mqtt.allow_anonymous = true mqtt.vhost = / mqtt.exchange = amq.topic @@ -111,9 +121,7 @@ mqtt.proxy_protocol = true", [{rabbit,[{tcp_listeners,[5672]}]}, {rabbitmq_mqtt, - [{default_user,<<"guest">>}, - {default_pass,<<"guest">>}, - {allow_anonymous,true}, + [{allow_anonymous,true}, {vhost,<<"/">>}, {exchange,<<"amq.topic">>}, {max_session_expiry_interval_seconds,infinity}, @@ -121,9 +129,7 @@ {proxy_protocol,true}]}], [rabbitmq_mqtt]}, {prefetch_retained_msg_store, - "mqtt.default_user = guest - mqtt.default_pass = guest - mqtt.allow_anonymous = true + "mqtt.allow_anonymous = true mqtt.vhost = / mqtt.exchange = amq.topic mqtt.max_session_expiry_interval_seconds = 1800 @@ -136,9 +142,7 @@ mqtt.listeners.ssl = none mqtt.listeners.tcp.default = 1883", [{rabbitmq_mqtt, - [{default_user,<<"guest">>}, - {default_pass,<<"guest">>}, - {allow_anonymous,true}, + [{allow_anonymous,true}, {vhost,<<"/">>}, {exchange,<<"amq.topic">>}, {max_session_expiry_interval_seconds,1800}, diff --git a/deps/rabbitmq_mqtt/test/java_SUITE_data/pom.xml b/deps/rabbitmq_mqtt/test/java_SUITE_data/pom.xml index a2864258d020..450edf13d401 100644 --- a/deps/rabbitmq_mqtt/test/java_SUITE_data/pom.xml +++ b/deps/rabbitmq_mqtt/test/java_SUITE_data/pom.xml @@ -15,11 +15,11 @@ [1.2.5,) [1.2.5,) - 5.21.0 - 5.10.3 + 5.22.0 + 5.11.1 3.26.3 1.2.13 - 3.3.1 + 3.5.0 2.1.1 2.4.21 3.12.1 diff --git a/deps/rabbitmq_mqtt/test/mc_mqtt_SUITE.erl b/deps/rabbitmq_mqtt/test/mc_mqtt_SUITE.erl index 9a0d9de6447a..14d88f357602 100644 --- a/deps/rabbitmq_mqtt/test/mc_mqtt_SUITE.erl +++ b/deps/rabbitmq_mqtt/test/mc_mqtt_SUITE.erl @@ -265,7 +265,7 @@ amqp_to_mqtt_reply_to(_Config) -> amqp_to_mqtt_footer(_Config) -> Body = <<"hey">>, - Footer = #'v1_0.footer'{content = [{{symbol, <<"key">>}, {utf8, <<"value">>}}]}, + Footer = #'v1_0.footer'{content = [{{symbol, <<"x-key">>}, {utf8, <<"value">>}}]}, %% We can translate, but lose the footer. #mqtt_msg{payload = Payload} = amqp_to_mqtt([#'v1_0.data'{content = Body}, Footer]), ?assertEqual(<<"hey">>, iolist_to_binary(Payload)). @@ -404,8 +404,6 @@ amqp_mqtt(_Config) -> durable = true}, MAC = [ {{symbol, <<"x-stream-filter">>}, {utf8, <<"apple">>}}, - thead2(list, [utf8(<<"l">>)]), - thead2(map, [{utf8(<<"k">>), utf8(<<"v">>)}]), thead2('x-list', list, [utf8(<<"l">>)]), thead2('x-map', map, [{utf8(<<"k">>), utf8(<<"v">>)}]) ], diff --git a/deps/rabbitmq_mqtt/test/shared_SUITE.erl b/deps/rabbitmq_mqtt/test/mqtt_shared_SUITE.erl similarity index 97% rename from deps/rabbitmq_mqtt/test/shared_SUITE.erl rename to deps/rabbitmq_mqtt/test/mqtt_shared_SUITE.erl index a63e1a83ffe9..aa6735fb202e 100644 --- a/deps/rabbitmq_mqtt/test/shared_SUITE.erl +++ b/deps/rabbitmq_mqtt/test/mqtt_shared_SUITE.erl @@ -10,7 +10,9 @@ %% %% In other words, this test suite should not contain any test case that is executed %% only with a particular plugin or particular MQTT version. --module(shared_SUITE). +%% +%% When adding a test case here the same function must be defined in web_mqtt_shared_SUITE. +-module(mqtt_shared_SUITE). -compile([export_all, nowarn_export_all]). @@ -27,7 +29,8 @@ rpc_all/4, get_node_config/3, drain_node/2, - revive_node/2 + revive_node/2, + await_metadata_store_consistent/2 ]). -import(rabbit_ct_helpers, [eventually/3, @@ -52,23 +55,13 @@ -define(RC_SESSION_TAKEN_OVER, 16#8E). all() -> - [{group, mqtt}, - {group, web_mqtt}]. + [{group, mqtt}]. %% The code being tested under v3 and v4 is almost identical. %% To save time in CI, we therefore run only a very small subset of tests in v3. groups() -> [ {mqtt, [], - [{cluster_size_1, [], - [{v3, [], cluster_size_1_tests_v3()}, - {v4, [], cluster_size_1_tests()}, - {v5, [], cluster_size_1_tests()}]}, - {cluster_size_3, [], - [{v4, [], cluster_size_3_tests()}, - {v5, [], cluster_size_3_tests()}]} - ]}, - {web_mqtt, [], [{cluster_size_1, [], [{v3, [], cluster_size_1_tests_v3()}, {v4, [], cluster_size_1_tests()}, @@ -87,9 +80,12 @@ cluster_size_1_tests_v3() -> cluster_size_1_tests() -> [ global_counters %% must be the 1st test case + ,message_size_metrics ,block_only_publisher ,many_qos1_messages ,session_expiry + ,cli_close_all_connections + ,cli_close_all_user_connections ,management_plugin_connection ,management_plugin_enable ,disconnect @@ -170,9 +166,6 @@ end_per_suite(Config) -> init_per_group(mqtt, Config) -> rabbit_ct_helpers:set_config(Config, {websocket, false}); -init_per_group(web_mqtt, Config) -> - rabbit_ct_helpers:set_config(Config, {websocket, true}); - init_per_group(Group, Config) when Group =:= v3; Group =:= v4; @@ -213,8 +206,6 @@ init_per_testcase(Testcase, Config) -> init_per_testcase0(Testcase, Config). init_per_testcase0(Testcase, Config) -> - Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - [ok = rabbit_ct_broker_helpers:enable_plugin(Config, N, rabbitmq_web_mqtt) || N <- Nodes], rabbit_ct_helpers:testcase_started(Config, Testcase). end_per_testcase(T, Config) @@ -688,6 +679,34 @@ global_counters(Config) -> messages_unroutable_returned_total => 1}, get_global_counters(Config, ProtoVer))). +message_size_metrics(Config) -> + Protocol = case ?config(mqtt_version, Config) of + v4 -> mqtt311; + v5 -> mqtt50 + end, + BucketsBefore = rpc(Config, rabbit_msg_size_metrics, raw_buckets, [Protocol]), + + Topic = ClientId = atom_to_binary(?FUNCTION_NAME), + C = connect(ClientId, Config), + {ok, _, [0]} = emqtt:subscribe(C, Topic, qos0), + Payload1B = <<255>>, + Payload500B = binary:copy(Payload1B, 500), + Payload5KB = binary:copy(Payload1B, 5_000), + Payload2MB = binary:copy(Payload1B, 2_000_000), + Payloads = [Payload2MB, Payload5KB, Payload500B, Payload1B, Payload500B], + [ok = emqtt:publish(C, Topic, P, qos0) || P <- Payloads], + ok = expect_publishes(C, Topic, Payloads), + + BucketsAfter = rpc(Config, rabbit_msg_size_metrics, raw_buckets, [Protocol]), + ?assertEqual( + [{100, 1}, + {1000, 2}, + {10_000, 1}, + {10_000_000, 1}], + rabbit_msg_size_metrics:diff_raw_buckets(BucketsAfter, BucketsBefore)), + + ok = emqtt:disconnect(C). + pubsub(Config) -> Topic0 = <<"t/0">>, Topic1 = <<"t/1">>, @@ -828,7 +847,9 @@ delete_create_queue(Config) -> timer:sleep(2), delete_queue(Ch, [CQ1, QQ]), %% Give queues some time to be fully deleted - timer:sleep(2000), + %% TODO: wait longer for quorum queues in mixed mode as it can take longer + %% for deletion to complete, delete timeout is 5s so we need to exceed that + timer:sleep(6000), %% We expect confirms for all messages. %% Confirm here does not mean that messages made it ever to the deleted queues. @@ -1128,6 +1149,7 @@ rabbit_mqtt_qos0_queue_kill_node(Config) -> SubscriberId = <<"subscriber">>, Sub0 = connect(SubscriberId, Config, 0, []), {ok, _, [0]} = emqtt:subscribe(Sub0, Topic1, qos0), + ok = await_metadata_store_consistent(Config, 2), ok = emqtt:publish(Pub, Topic1, <<"m0">>, qos0), ok = expect_publishes(Sub0, Topic1, [<<"m0">>]), @@ -1161,6 +1183,24 @@ rabbit_mqtt_qos0_queue_kill_node(Config) -> ok = rabbit_ct_broker_helpers:start_node(Config, 1), ?assertEqual([], rpc(Config, rabbit_db_binding, get_all, [])). +cli_close_all_connections(Config) -> + ClientId = atom_to_binary(?FUNCTION_NAME), + C = connect(ClientId, Config), + process_flag(trap_exit, true), + {ok, String} = rabbit_ct_broker_helpers:rabbitmqctl( + Config, 0, ["close_all_connections", "bye"]), + ?assertEqual(match, re:run(String, "Closing .* reason: bye", [{capture, none}])), + ok = await_exit(C). + +cli_close_all_user_connections(Config) -> + ClientId = atom_to_binary(?FUNCTION_NAME), + C = connect(ClientId, Config), + process_flag(trap_exit, true), + {ok, String} = rabbit_ct_broker_helpers:rabbitmqctl( + Config, 0, ["close_all_user_connections","guest", "bye"]), + ?assertEqual(match, re:run(String, "Closing .* reason: bye", [{capture, none}])), + ok = await_exit(C). + %% Test that MQTT connection can be listed and closed via the rabbitmq_management plugin. management_plugin_connection(Config) -> KeepaliveSecs = 99, @@ -1393,7 +1433,7 @@ block(Config) -> puback_timeout = publish_qos1_timeout(C, Topic, <<"Still blocked">>, 1000), %% Unblock - rpc(Config, vm_memory_monitor, set_vm_memory_high_watermark, [0.4]), + rpc(Config, vm_memory_monitor, set_vm_memory_high_watermark, [0.6]), ok = expect_publishes(C, Topic, [<<"Not blocked yet">>, <<"Now blocked">>, <<"Still blocked">>]), @@ -1434,7 +1474,7 @@ block_only_publisher(Config) -> ?assertEqual(puback_timeout, publish_qos1_timeout(Con, Topic, <<"from Con 2">>, 500)), ?assertEqual(pong, emqtt:ping(Sub)), - rpc(Config, vm_memory_monitor, set_vm_memory_high_watermark, [0.4]), + rpc(Config, vm_memory_monitor, set_vm_memory_high_watermark, [0.6]), %% Let it unblock timer:sleep(100), diff --git a/deps/rabbitmq_mqtt/test/protocol_interop_SUITE.erl b/deps/rabbitmq_mqtt/test/protocol_interop_SUITE.erl index d85fc4fb1b14..249e335e2afd 100644 --- a/deps/rabbitmq_mqtt/test/protocol_interop_SUITE.erl +++ b/deps/rabbitmq_mqtt/test/protocol_interop_SUITE.erl @@ -338,7 +338,7 @@ amqp_mqtt_amqp(Config) -> properties := Props = #{'Correlation-Data' := Correlation} } = MqttMsg, case rabbit_ct_broker_helpers:is_feature_flag_enabled( - Config, message_containers_store_amqp_v1) of + Config, 'rabbitmq_4.0.0') of true -> ?assertEqual({ok, ResponseTopic}, maps:find('Response-Topic', Props)); @@ -430,7 +430,7 @@ amqp_mqtt(Qos, Config) -> } = MqttMsg1, ?assertEqual([Body1], amqp10_framing:decode_bin(Payload1)), case rabbit_ct_broker_helpers:is_feature_flag_enabled( - Config, message_containers_store_amqp_v1) of + Config, 'rabbitmq_4.0.0') of true -> ?assertEqual({ok, <<"message/vnd.rabbitmq.amqp">>}, maps:find('Content-Type', Props)); diff --git a/deps/rabbitmq_mqtt/test/rabbitmq_mqtt.app b/deps/rabbitmq_mqtt/test/rabbitmq_mqtt.app index c4083ec5fc81..287c59cfe230 100644 --- a/deps/rabbitmq_mqtt/test/rabbitmq_mqtt.app +++ b/deps/rabbitmq_mqtt/test/rabbitmq_mqtt.app @@ -4,9 +4,7 @@ {modules, []}, {registered, []}, {mod, {rabbit_mqtt, []}}, - {env, [{default_user, "guest_user"}, - {default_pass, "guest_pass"}, - {ssl_cert_login,false}, + {env, [{ssl_cert_login,false}, {allow_anonymous, true}, {vhost, "/"}, {exchange, "amq.topic"}, diff --git a/deps/rabbitmq_mqtt/test/util_SUITE.erl b/deps/rabbitmq_mqtt/test/util_SUITE.erl index a4a343c1eb94..3b16c8e68824 100644 --- a/deps/rabbitmq_mqtt/test/util_SUITE.erl +++ b/deps/rabbitmq_mqtt/test/util_SUITE.erl @@ -18,8 +18,6 @@ groups() -> [ {tests, [parallel], [ coerce_vhost, - coerce_default_user, - coerce_default_pass, mqtt_amqp_topic_translation ] } @@ -36,12 +34,6 @@ end_per_suite(Config) -> coerce_vhost(_) -> ?assertEqual(<<"/">>, rabbit_mqtt_util:env(vhost)). -coerce_default_user(_) -> - ?assertEqual(<<"guest_user">>, rabbit_mqtt_util:env(default_user)). - -coerce_default_pass(_) -> - ?assertEqual(<<"guest_pass">>, rabbit_mqtt_util:env(default_pass)). - mqtt_amqp_topic_translation(_) -> ok = application:set_env(rabbitmq_mqtt, sparkplug, true), ok = rabbit_mqtt_util:init_sparkplug(), diff --git a/deps/rabbitmq_mqtt/test/v5_SUITE.erl b/deps/rabbitmq_mqtt/test/v5_SUITE.erl index 475b9450af9a..72df49577639 100644 --- a/deps/rabbitmq_mqtt/test/v5_SUITE.erl +++ b/deps/rabbitmq_mqtt/test/v5_SUITE.erl @@ -42,16 +42,11 @@ -define(RC_TOPIC_ALIAS_INVALID, 16#94). all() -> - [{group, mqtt}, - {group, web_mqtt}]. + [{group, mqtt}]. groups() -> [ {mqtt, [], - [{cluster_size_1, [shuffle], cluster_size_1_tests()}, - {cluster_size_3, [shuffle], cluster_size_3_tests()} - ]}, - {web_mqtt, [], [{cluster_size_1, [shuffle], cluster_size_1_tests()}, {cluster_size_3, [shuffle], cluster_size_3_tests()} ]} @@ -153,9 +148,6 @@ end_per_suite(Config) -> init_per_group(mqtt, Config) -> rabbit_ct_helpers:set_config(Config, {websocket, false}); -init_per_group(web_mqtt, Config) -> - rabbit_ct_helpers:set_config(Config, {websocket, true}); - init_per_group(Group, Config0) -> Nodes = case Group of cluster_size_1 -> 1; @@ -198,8 +190,6 @@ init_per_testcase(T, Config) -> init_per_testcase0(T, Config). init_per_testcase0(Testcase, Config) -> - Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - [ok = rabbit_ct_broker_helpers:enable_plugin(Config, N, rabbitmq_web_mqtt) || N <- Nodes], rabbit_ct_helpers:testcase_started(Config, Testcase). end_per_testcase(T, Config) diff --git a/deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd_v3_client.erl b/deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd_v3_client.erl index 9a0fc9da426f..082c5c09c7bc 100644 --- a/deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd_v3_client.erl +++ b/deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd_v3_client.erl @@ -87,7 +87,7 @@ callback_mode() -> [state_functions, state_enter]. terminate(Reason, State, Data) -> rabbit_log:debug("etcd v3 API client will terminate in state ~tp, reason: ~tp", [State, Reason]), - disconnect(?ETCD_CONN_NAME, Data), + _ = disconnect(?ETCD_CONN_NAME, Data), rabbit_log:debug("etcd v3 API client has disconnected"), rabbit_log:debug("etcd v3 API client: total number of connections to etcd is ~tp", [length(eetcd_conn_sup:info())]), ok. @@ -157,13 +157,13 @@ recover(internal, start, Data = #statem_data{endpoints = Endpoints, connection_m }}; {error, Errors} -> [rabbit_log:error("etcd peer discovery: failed to connect to endpoint ~tp: ~tp", [Endpoint, Err]) || {Endpoint, Err} <- Errors], - ensure_disconnected(?ETCD_CONN_NAME, Data), + _ = ensure_disconnected(?ETCD_CONN_NAME, Data), Actions = [{state_timeout, reconnection_interval(), recover}], {keep_state, reset_statem_data(Data), Actions} end; recover(state_timeout, _PrevState, Data) -> rabbit_log:debug("etcd peer discovery: connection entered a reconnection delay state"), - ensure_disconnected(?ETCD_CONN_NAME, Data), + _ = ensure_disconnected(?ETCD_CONN_NAME, Data), {next_state, recover, reset_statem_data(Data)}; recover({call, From}, Req, _Data) -> rabbit_log:error("etcd v3 API: client received a call ~tp while not connected, will do nothing", [Req]), diff --git a/deps/rabbitmq_prelaunch/Makefile b/deps/rabbitmq_prelaunch/Makefile index 38c4b940ab3e..ee82d02d3c39 100644 --- a/deps/rabbitmq_prelaunch/Makefile +++ b/deps/rabbitmq_prelaunch/Makefile @@ -3,9 +3,9 @@ PROJECT_DESCRIPTION = RabbitMQ prelaunch setup PROJECT_VERSION = 4.0.0 PROJECT_MOD = rabbit_prelaunch_app -DEPS = rabbit_common cuttlefish thoas +DEPS = rabbit_common cuttlefish thoas osiris systemd -PLT_APPS += runtime_tools eunit osiris systemd +PLT_APPS += runtime_tools eunit DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk diff --git a/deps/rabbitmq_prelaunch/src/rabbit_prelaunch_conf.erl b/deps/rabbitmq_prelaunch/src/rabbit_prelaunch_conf.erl index c2f27226a1c5..07fcd86a7f10 100644 --- a/deps/rabbitmq_prelaunch/src/rabbit_prelaunch_conf.erl +++ b/deps/rabbitmq_prelaunch/src/rabbit_prelaunch_conf.erl @@ -11,9 +11,8 @@ generate_config_from_cuttlefish_files/3, decrypt_config/1]). --ifdef(TEST). +%% Only used in tests. -export([decrypt_config/2]). --endif. %% These can be removed when we only support OTP-26+. -ignore_xref([{user_drv, whereis_group, 0}, diff --git a/deps/rabbitmq_prometheus/BUILD.bazel b/deps/rabbitmq_prometheus/BUILD.bazel index 64a4325d234d..b0d71c0cda52 100644 --- a/deps/rabbitmq_prometheus/BUILD.bazel +++ b/deps/rabbitmq_prometheus/BUILD.bazel @@ -52,7 +52,6 @@ rabbitmq_app( priv = [":priv"], deps = [ "//deps/rabbit:erlang_app", - "//deps/rabbitmq_federation:erlang_app", "//deps/rabbitmq_management_agent:erlang_app", "//deps/rabbitmq_web_dispatch:erlang_app", "@accept//:erlang_app", @@ -99,15 +98,6 @@ rabbitmq_integration_suite( flaky = True, ) -rabbitmq_integration_suite( - name = "prometheus_rabbitmq_federation_collector_SUITE", - size = "small", - additional_beam = [ - "//deps/rabbitmq_federation:test/rabbit_federation_test_util.beam", #keep - "test/rabbitmq_prometheus_collector_test_proxy.beam", #keep - ], -) - assert_suites() alias( diff --git a/deps/rabbitmq_prometheus/Makefile b/deps/rabbitmq_prometheus/Makefile index abfb4195f722..8380e81b9a7b 100644 --- a/deps/rabbitmq_prometheus/Makefile +++ b/deps/rabbitmq_prometheus/Makefile @@ -9,7 +9,7 @@ endef PROJECT := rabbitmq_prometheus PROJECT_DESCRIPTION = Prometheus metrics for RabbitMQ PROJECT_MOD := rabbit_prometheus_app -DEPS = accept cowboy rabbit rabbitmq_management_agent prometheus rabbitmq_web_dispatch rabbitmq_federation +DEPS = accept cowboy rabbit rabbitmq_management_agent prometheus rabbitmq_web_dispatch BUILD_DEPS = amqp_client rabbit_common rabbitmq_management TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers eunit_formatters diff --git a/deps/rabbitmq_prometheus/app.bzl b/deps/rabbitmq_prometheus/app.bzl index d3078b96bf8f..3084d1ced302 100644 --- a/deps/rabbitmq_prometheus/app.bzl +++ b/deps/rabbitmq_prometheus/app.bzl @@ -13,8 +13,8 @@ def all_beam_files(name = "all_beam_files"): "src/collectors/prometheus_rabbitmq_alarm_metrics_collector.erl", "src/collectors/prometheus_rabbitmq_core_metrics_collector.erl", "src/collectors/prometheus_rabbitmq_dynamic_collector.erl", - "src/collectors/prometheus_rabbitmq_federation_collector.erl", "src/collectors/prometheus_rabbitmq_global_metrics_collector.erl", + "src/collectors/prometheus_rabbitmq_message_size_metrics_collector.erl", "src/rabbit_prometheus_app.erl", "src/rabbit_prometheus_dispatcher.erl", "src/rabbit_prometheus_handler.erl", @@ -44,8 +44,8 @@ def all_test_beam_files(name = "all_test_beam_files"): "src/collectors/prometheus_rabbitmq_alarm_metrics_collector.erl", "src/collectors/prometheus_rabbitmq_core_metrics_collector.erl", "src/collectors/prometheus_rabbitmq_dynamic_collector.erl", - "src/collectors/prometheus_rabbitmq_federation_collector.erl", "src/collectors/prometheus_rabbitmq_global_metrics_collector.erl", + "src/collectors/prometheus_rabbitmq_message_size_metrics_collector.erl", "src/rabbit_prometheus_app.erl", "src/rabbit_prometheus_dispatcher.erl", "src/rabbit_prometheus_handler.erl", @@ -86,8 +86,8 @@ def all_srcs(name = "all_srcs"): "src/collectors/prometheus_rabbitmq_alarm_metrics_collector.erl", "src/collectors/prometheus_rabbitmq_core_metrics_collector.erl", "src/collectors/prometheus_rabbitmq_dynamic_collector.erl", - "src/collectors/prometheus_rabbitmq_federation_collector.erl", "src/collectors/prometheus_rabbitmq_global_metrics_collector.erl", + "src/collectors/prometheus_rabbitmq_message_size_metrics_collector.erl", "src/rabbit_prometheus_app.erl", "src/rabbit_prometheus_dispatcher.erl", "src/rabbit_prometheus_handler.erl", @@ -125,15 +125,7 @@ def test_suite_beam_files(name = "test_suite_beam_files"): "//deps/rabbitmq_ct_helpers:erlang_app", ], ) - erlang_bytecode( - name = "prometheus_rabbitmq_federation_collector_SUITE_beam_files", - testonly = True, - srcs = ["test/prometheus_rabbitmq_federation_collector_SUITE.erl"], - outs = ["test/prometheus_rabbitmq_federation_collector_SUITE.beam"], - app_name = "rabbitmq_prometheus", - erlc_opts = "//:test_erlc_opts", - deps = ["//deps/amqp_client:erlang_app", "@prometheus//:erlang_app"], - ) + erlang_bytecode( name = "rabbitmq_prometheus_collector_test_proxy_beam_files", testonly = True, diff --git a/deps/rabbitmq_prometheus/docker/docker-entrypoint.sh b/deps/rabbitmq_prometheus/docker/docker-entrypoint.sh index b5994f87a73a..83f88b1b4b40 100755 --- a/deps/rabbitmq_prometheus/docker/docker-entrypoint.sh +++ b/deps/rabbitmq_prometheus/docker/docker-entrypoint.sh @@ -55,7 +55,6 @@ rabbitConfigKeys=( default_pass default_user default_vhost - hipe_compile vm_memory_high_watermark ) fileConfigKeys=( @@ -267,7 +266,7 @@ rabbit_env_config() { local val="${!var:-}" local rawVal="$val" case "$conf" in - fail_if_no_peer_cert|hipe_compile) + fail_if_no_peer_cert) case "${val,,}" in false|no|0|'') rawVal='false' ;; true|yes|1|*) rawVal='true' ;; diff --git a/deps/rabbitmq_prometheus/docker/rabbitmq-dist-metrics.conf b/deps/rabbitmq_prometheus/docker/rabbitmq-dist-metrics.conf index a253d823d19e..c8ea2c3a7063 100644 --- a/deps/rabbitmq_prometheus/docker/rabbitmq-dist-metrics.conf +++ b/deps/rabbitmq_prometheus/docker/rabbitmq-dist-metrics.conf @@ -5,7 +5,6 @@ management.listener.port = 15672 management.listener.ssl = false vm_memory_high_watermark.absolute = 768MiB -vm_memory_high_watermark_paging_ratio = 0.2 cluster_name = rabbitmq-dist-metrics diff --git a/deps/rabbitmq_prometheus/docker/rabbitmq-dist-tls.conf b/deps/rabbitmq_prometheus/docker/rabbitmq-dist-tls.conf index 94d6aaab01bf..19c08a7c6aa9 100644 --- a/deps/rabbitmq_prometheus/docker/rabbitmq-dist-tls.conf +++ b/deps/rabbitmq_prometheus/docker/rabbitmq-dist-tls.conf @@ -5,7 +5,6 @@ management.listener.port = 15672 management.listener.ssl = false vm_memory_high_watermark.absolute = 4GiB -vm_memory_high_watermark_paging_ratio = 0.9 disk_free_limit.absolute = 2048MiB cluster_name = rabbitmq-dist-tls diff --git a/deps/rabbitmq_prometheus/docker/rabbitmq-overview.conf b/deps/rabbitmq_prometheus/docker/rabbitmq-overview.conf index b276485b2722..82d548fd34bd 100644 --- a/deps/rabbitmq_prometheus/docker/rabbitmq-overview.conf +++ b/deps/rabbitmq_prometheus/docker/rabbitmq-overview.conf @@ -5,7 +5,6 @@ management.listener.port = 15672 management.listener.ssl = false vm_memory_high_watermark.absolute = 768MiB -vm_memory_high_watermark_paging_ratio = 0.2 cluster_name = rabbitmq-overview diff --git a/deps/rabbitmq_prometheus/src/collectors/prometheus_rabbitmq_core_metrics_collector.erl b/deps/rabbitmq_prometheus/src/collectors/prometheus_rabbitmq_core_metrics_collector.erl index 848e6c764fde..ac2a64383989 100644 --- a/deps/rabbitmq_prometheus/src/collectors/prometheus_rabbitmq_core_metrics_collector.erl +++ b/deps/rabbitmq_prometheus/src/collectors/prometheus_rabbitmq_core_metrics_collector.erl @@ -489,6 +489,14 @@ label({RemoteAddress, Username, Protocol}) when is_binary(RemoteAddress), is_bin V =/= <<>> end, [{remote_address, RemoteAddress}, {username, Username}, {protocol, atom_to_binary(Protocol, utf8)}]); +label({ + #resource{kind=queue, virtual_host=VHost, name=QName}, + #resource{kind=exchange, name=ExName} + }) -> + %% queue_exchange_metrics {queue_id, exchange_id} + <<"vhost=\"", (escape_label_value(VHost))/binary, "\",", + "exchange=\"", (escape_label_value(ExName))/binary, "\",", + "queue=\"", (escape_label_value(QName))/binary, "\"">>; label({I1, I2}) -> case {label(I1), label(I2)} of {<<>>, L} -> L; @@ -640,6 +648,19 @@ get_data(Table, false, VHostsFilter) when Table == channel_exchange_metrics; _ -> [Result] end; +get_data(ra_metrics = Table, true, _) -> + ets:foldl( + fun ({#resource{kind = queue}, _, _, _, _, _, _} = Row, Acc) -> + %% Metrics for QQ records use the queue resource as the table + %% key. The queue name and vhost will be rendered as tags. + [Row | Acc]; + ({ClusterName, _, _, _, _, _, _} = Row, Acc) when is_atom(ClusterName) -> + %% Other Ra clusters like Khepri and the stream coordinator use + %% the cluster name as the metrics key. Transform this into a + %% value that can be rendered as a "raft_cluster" tag. + Row1 = setelement(1, Row, #{<<"raft_cluster">> => atom_to_binary(ClusterName, utf8)}), + [Row1 | Acc] + end, [], Table); get_data(exchange_metrics = Table, true, VHostsFilter) when is_map(VHostsFilter)-> ets:foldl(fun ({#resource{kind = exchange, virtual_host = VHost}, _, _, _, _, _} = Row, Acc) when diff --git a/deps/rabbitmq_prometheus/src/collectors/prometheus_rabbitmq_global_metrics_collector.erl b/deps/rabbitmq_prometheus/src/collectors/prometheus_rabbitmq_global_metrics_collector.erl index af2073737724..0e7b027b8503 100644 --- a/deps/rabbitmq_prometheus/src/collectors/prometheus_rabbitmq_global_metrics_collector.erl +++ b/deps/rabbitmq_prometheus/src/collectors/prometheus_rabbitmq_global_metrics_collector.erl @@ -29,22 +29,16 @@ register() -> ok = prometheus_registry:register_collector(?MODULE). -deregister_cleanup(_) -> ok. +deregister_cleanup(_) -> + ok. collect_mf(_Registry, Callback) -> - _ = maps:fold( - fun (Name, #{type := Type, help := Help, values := Values}, Acc) -> - Callback( - create_mf(?METRIC_NAME(Name), - Help, - Type, - maps:to_list(Values))), - Acc - end, - ok, - rabbit_global_counters:prometheus_format() - ). - -%% =================================================================== -%% Private functions -%% =================================================================== + maps:foreach( + fun(Name, #{type := Type, help := Help, values := Values}) -> + Callback( + create_mf(?METRIC_NAME(Name), + Help, + Type, + maps:to_list(Values))) + end, + rabbit_global_counters:prometheus_format()). diff --git a/deps/rabbitmq_prometheus/src/collectors/prometheus_rabbitmq_message_size_metrics_collector.erl b/deps/rabbitmq_prometheus/src/collectors/prometheus_rabbitmq_message_size_metrics_collector.erl new file mode 100644 index 000000000000..54a349547744 --- /dev/null +++ b/deps/rabbitmq_prometheus/src/collectors/prometheus_rabbitmq_message_size_metrics_collector.erl @@ -0,0 +1,33 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% +-module(prometheus_rabbitmq_message_size_metrics_collector). + +-behaviour(prometheus_collector). +-include_lib("prometheus/include/prometheus.hrl"). + +-export([register/0, + deregister_cleanup/1, + collect_mf/2]). + +-define(METRIC_NAME_PREFIX, "rabbitmq_"). + +register() -> + ok = prometheus_registry:register_collector(?MODULE). + +deregister_cleanup(_) -> + ok. + +collect_mf(_Registry, Callback) -> + maps:foreach( + fun(Name, #{type := Type, + help := Help, + values := Values}) -> + MetricsFamily = prometheus_model_helpers:create_mf( + ?METRIC_NAME(Name), Help, Type, Values), + Callback(MetricsFamily) + end, + rabbit_msg_size_metrics:prometheus_format()). diff --git a/deps/rabbitmq_prometheus/src/rabbit_prometheus_dispatcher.erl b/deps/rabbitmq_prometheus/src/rabbit_prometheus_dispatcher.erl index e8b5a1d0de3f..2b07be760098 100644 --- a/deps/rabbitmq_prometheus/src/rabbit_prometheus_dispatcher.erl +++ b/deps/rabbitmq_prometheus/src/rabbit_prometheus_dispatcher.erl @@ -16,9 +16,9 @@ build_dispatcher() -> prometheus_registry:register_collectors([ prometheus_rabbitmq_core_metrics_collector, prometheus_rabbitmq_global_metrics_collector, + prometheus_rabbitmq_message_size_metrics_collector, prometheus_rabbitmq_alarm_metrics_collector, prometheus_rabbitmq_dynamic_collector, - prometheus_rabbitmq_federation_collector, prometheus_process_collector]), prometheus_registry:register_collectors('per-object', [ prometheus_vm_system_info_collector, @@ -28,7 +28,8 @@ build_dispatcher() -> prometheus_vm_statistics_collector, prometheus_vm_msacc_collector, prometheus_rabbitmq_core_metrics_collector, - prometheus_rabbitmq_global_metrics_collector + prometheus_rabbitmq_global_metrics_collector, + prometheus_rabbitmq_message_size_metrics_collector ]), prometheus_registry:register_collectors('detailed', [ prometheus_rabbitmq_core_metrics_collector diff --git a/deps/rabbitmq_prometheus/test/rabbit_prometheus_http_SUITE.erl b/deps/rabbitmq_prometheus/test/rabbit_prometheus_http_SUITE.erl index 1a9c514391be..a0c64ebc6c5d 100644 --- a/deps/rabbitmq_prometheus/test/rabbit_prometheus_http_SUITE.erl +++ b/deps/rabbitmq_prometheus/test/rabbit_prometheus_http_SUITE.erl @@ -38,13 +38,15 @@ groups() -> aggregated_metrics_test, specific_erlang_metrics_present_test, global_metrics_present_test, - global_metrics_single_metric_family_test + global_metrics_single_metric_family_test, + message_size_metrics_present ]}, {per_object_metrics, [], [ globally_configure_per_object_metrics_test, specific_erlang_metrics_present_test, global_metrics_present_test, - global_metrics_single_metric_family_test + global_metrics_single_metric_family_test, + message_size_metrics_present ]}, {per_object_endpoint_metrics, [], [ endpoint_per_object_metrics, @@ -490,6 +492,35 @@ global_metrics_present_test(Config) -> ?assertEqual(match, re:run(Body, "^rabbitmq_global_publishers{", [{capture, none}, multiline])), ?assertEqual(match, re:run(Body, "^rabbitmq_global_consumers{", [{capture, none}, multiline])). +message_size_metrics_present(Config) -> + {_Headers, Body} = http_get_with_pal(Config, [], 200), + + ?assertEqual(match, re:run(Body, "^rabbitmq_message_size_bytes_bucket{protocol=\"amqp091\",le=\"100\"}", [{capture, none}, multiline])), + ?assertEqual(match, re:run(Body, "^rabbitmq_message_size_bytes_bucket{protocol=\"amqp091\",le=\"1000\"}", [{capture, none}, multiline])), + ?assertEqual(match, re:run(Body, "^rabbitmq_message_size_bytes_bucket{protocol=\"amqp091\",le=\"10000\"}", [{capture, none}, multiline])), + ?assertEqual(match, re:run(Body, "^rabbitmq_message_size_bytes_bucket{protocol=\"amqp091\",le=\"100000\"}", [{capture, none}, multiline])), + ?assertEqual(match, re:run(Body, "^rabbitmq_message_size_bytes_bucket{protocol=\"amqp091\",le=\"1000000\"}", [{capture, none}, multiline])), + ?assertEqual(match, re:run(Body, "^rabbitmq_message_size_bytes_bucket{protocol=\"amqp091\",le=\"10000000\"}", [{capture, none}, multiline])), + ?assertEqual(match, re:run(Body, "^rabbitmq_message_size_bytes_bucket{protocol=\"amqp091\",le=\"10000000\"}", [{capture, none}, multiline])), + ?assertEqual(match, re:run(Body, "^rabbitmq_message_size_bytes_bucket{protocol=\"amqp091\",le=\"50000000\"}", [{capture, none}, multiline])), + ?assertEqual(match, re:run(Body, "^rabbitmq_message_size_bytes_bucket{protocol=\"amqp091\",le=\"100000000\"}", [{capture, none}, multiline])), + ?assertEqual(match, re:run(Body, "^rabbitmq_message_size_bytes_bucket{protocol=\"amqp091\",le=\"\\+Inf\"}", [{capture, none}, multiline])), + ?assertEqual(match, re:run(Body, "^rabbitmq_message_size_bytes_count{protocol=\"amqp091\"}", [{capture, none}, multiline])), + ?assertEqual(match, re:run(Body, "^rabbitmq_message_size_bytes_sum{protocol=\"amqp091\"}", [{capture, none}, multiline])), + + ?assertEqual(match, re:run(Body, "^rabbitmq_message_size_bytes_bucket{protocol=\"amqp10\",le=\"100\"}", [{capture, none}, multiline])), + ?assertEqual(match, re:run(Body, "^rabbitmq_message_size_bytes_bucket{protocol=\"amqp10\",le=\"1000\"}", [{capture, none}, multiline])), + ?assertEqual(match, re:run(Body, "^rabbitmq_message_size_bytes_bucket{protocol=\"amqp10\",le=\"10000\"}", [{capture, none}, multiline])), + ?assertEqual(match, re:run(Body, "^rabbitmq_message_size_bytes_bucket{protocol=\"amqp10\",le=\"100000\"}", [{capture, none}, multiline])), + ?assertEqual(match, re:run(Body, "^rabbitmq_message_size_bytes_bucket{protocol=\"amqp10\",le=\"1000000\"}", [{capture, none}, multiline])), + ?assertEqual(match, re:run(Body, "^rabbitmq_message_size_bytes_bucket{protocol=\"amqp10\",le=\"10000000\"}", [{capture, none}, multiline])), + ?assertEqual(match, re:run(Body, "^rabbitmq_message_size_bytes_bucket{protocol=\"amqp10\",le=\"10000000\"}", [{capture, none}, multiline])), + ?assertEqual(match, re:run(Body, "^rabbitmq_message_size_bytes_bucket{protocol=\"amqp10\",le=\"50000000\"}", [{capture, none}, multiline])), + ?assertEqual(match, re:run(Body, "^rabbitmq_message_size_bytes_bucket{protocol=\"amqp10\",le=\"100000000\"}", [{capture, none}, multiline])), + ?assertEqual(match, re:run(Body, "^rabbitmq_message_size_bytes_bucket{protocol=\"amqp10\",le=\"\\+Inf\"}", [{capture, none}, multiline])), + ?assertEqual(match, re:run(Body, "^rabbitmq_message_size_bytes_count{protocol=\"amqp10\"}", [{capture, none}, multiline])), + ?assertEqual(match, re:run(Body, "^rabbitmq_message_size_bytes_sum{protocol=\"amqp10\"}", [{capture, none}, multiline])). + global_metrics_single_metric_family_test(Config) -> {_Headers, Body} = http_get_with_pal(Config, [], 200), {match, MetricFamilyMatches} = re:run(Body, "TYPE rabbitmq_global_messages_acknowledged_total", [global]), diff --git a/deps/rabbitmq_recent_history_exchange/src/rabbit_db_rh_exchange.erl b/deps/rabbitmq_recent_history_exchange/src/rabbit_db_rh_exchange.erl index c50fc93a189f..a6eeef97a751 100644 --- a/deps/rabbitmq_recent_history_exchange/src/rabbit_db_rh_exchange.erl +++ b/deps/rabbitmq_recent_history_exchange/src/rabbit_db_rh_exchange.erl @@ -16,11 +16,11 @@ get/1, insert/3, delete/0, - delete/1 + delete/1, + delete_in_khepri/0 ]). --export([khepri_recent_history_path/1, - khepri_recent_history_path/0]). +-export([khepri_recent_history_path/1]). -rabbit_mnesia_tables_to_khepri_db( [{?RH_TABLE, rabbit_db_rh_exchange_m2k_converter}]). @@ -150,7 +150,9 @@ delete_in_mnesia() -> end. delete_in_khepri() -> - rabbit_khepri:delete(khepri_recent_history_path()). + Path = khepri_recent_history_path( + ?KHEPRI_WILDCARD_STAR, ?KHEPRI_WILDCARD_STAR), + rabbit_khepri:delete(Path). delete(XName) -> rabbit_khepri:handle_fallback( @@ -165,14 +167,16 @@ delete_in_mnesia(XName) -> end). delete_in_khepri(XName) -> - rabbit_khepri:delete(khepri_recent_history_path(XName)). + Path = khepri_recent_history_path(XName), + rabbit_khepri:delete(Path). %% ------------------------------------------------------------------- %% paths %% ------------------------------------------------------------------- -khepri_recent_history_path() -> - [?MODULE, recent_history_exchange]. - khepri_recent_history_path(#resource{virtual_host = VHost, name = Name}) -> - [?MODULE, recent_history_exchange, VHost, Name]. + khepri_recent_history_path(VHost, Name). + +khepri_recent_history_path(VHost, Name) -> + ExchangePath = rabbit_db_exchange:khepri_exchange_path(VHost, Name), + ExchangePath ++ [recent_history]. diff --git a/deps/rabbitmq_recent_history_exchange/src/rabbit_db_rh_exchange_m2k_converter.erl b/deps/rabbitmq_recent_history_exchange/src/rabbit_db_rh_exchange_m2k_converter.erl index c3e17dd525d8..17dec8c39e01 100644 --- a/deps/rabbitmq_recent_history_exchange/src/rabbit_db_rh_exchange_m2k_converter.erl +++ b/deps/rabbitmq_recent_history_exchange/src/rabbit_db_rh_exchange_m2k_converter.erl @@ -90,8 +90,7 @@ delete_from_khepri(?RH_TABLE = Table, Key, State) -> end, State). clear_data_in_khepri(?RH_TABLE) -> - Path = rabbit_db_rh_exchange:khepri_recent_history_path(), - case rabbit_khepri:delete(Path) of + case rabbit_db_rh_exchange:delete_in_khepri() of ok -> ok; Error -> diff --git a/deps/rabbitmq_recent_history_exchange/test/system_SUITE.erl b/deps/rabbitmq_recent_history_exchange/test/system_SUITE.erl index 2e05ddb30eba..124805a4e6d2 100644 --- a/deps/rabbitmq_recent_history_exchange/test/system_SUITE.erl +++ b/deps/rabbitmq_recent_history_exchange/test/system_SUITE.erl @@ -58,11 +58,15 @@ end_per_suite(Config) -> rabbit_ct_helpers:run_teardown_steps(Config). init_per_group(mnesia_store, Config) -> - rabbit_ct_helpers:set_config(Config, [{metadata_store, mnesia}]); + case rabbit_ct_broker_helpers:configured_metadata_store(Config) of + {khepri, _} -> {skip, "These tests target Mnesia"}; + _ -> Config + end; init_per_group(khepri_store, Config) -> - rabbit_ct_helpers:set_config( - Config, - [{metadata_store, {khepri, [khepri_db]}}]); + case rabbit_ct_broker_helpers:configured_metadata_store(Config) of + mnesia -> {skip, "These tests target Khepri"}; + _ -> Config + end; init_per_group(_, Config) -> Config1 = rabbit_ct_helpers:set_config(Config, [ {rmq_nodename_suffix, ?MODULE}, diff --git a/deps/rabbitmq_shovel/src/rabbit_amqp10_shovel.erl b/deps/rabbitmq_shovel/src/rabbit_amqp10_shovel.erl index eafe5e15a1ff..af140b76a03e 100644 --- a/deps/rabbitmq_shovel/src/rabbit_amqp10_shovel.erl +++ b/deps/rabbitmq_shovel/src/rabbit_amqp10_shovel.erl @@ -74,8 +74,13 @@ connect_source(State = #{name := Name, ack_mode := AckMode, source := #{uris := [Uri | _], source_address := Addr} = Src}) -> + SndSettleMode = case AckMode of + no_ack -> settled; + on_publish -> unsettled; + on_confirm -> unsettled + end, AttachFun = fun amqp10_client:attach_receiver_link/5, - {Conn, Sess, LinkRef} = connect(Name, AckMode, Uri, "receiver", Addr, Src, + {Conn, Sess, LinkRef} = connect(Name, SndSettleMode, Uri, "receiver", Addr, Src, AttachFun), State#{source => Src#{current => #{conn => Conn, session => Sess, @@ -87,8 +92,13 @@ connect_dest(State = #{name := Name, ack_mode := AckMode, dest := #{uris := [Uri | _], target_address := Addr} = Dst}) -> + SndSettleMode = case AckMode of + no_ack -> settled; + on_publish -> settled; + on_confirm -> unsettled + end, AttachFun = fun amqp10_client:attach_sender_link_sync/5, - {Conn, Sess, LinkRef} = connect(Name, AckMode, Uri, "sender", Addr, Dst, + {Conn, Sess, LinkRef} = connect(Name, SndSettleMode, Uri, "sender", Addr, Dst, AttachFun), %% wait for link credit here as if there are messages waiting we may try %% to forward before we've received credit @@ -99,7 +109,7 @@ connect_dest(State = #{name := Name, link => LinkRef, uri => Uri}}}. -connect(Name, AckMode, Uri, Postfix, Addr, Map, AttachFun) -> +connect(Name, SndSettleMode, Uri, Postfix, Addr, Map, AttachFun) -> {ok, Config0} = amqp10_client:parse_uri(Uri), %% As done for AMQP 0.9.1, exclude AMQP 1.0 shovel connections from maintenance mode %% to prevent crashes and errors being logged by the shovel plugin when a node gets drained. @@ -113,16 +123,11 @@ connect(Name, AckMode, Uri, Postfix, Addr, Map, AttachFun) -> LinkName0 = gen_unique_name(Name, Postfix), rabbit_data_coercion:to_binary(LinkName0) end, - % mixed settlement mode covers all the ack_modes - SettlementMode = case AckMode of - no_ack -> settled; - _ -> unsettled - end, % needs to be sync, i.e. awaits the 'attach' event as % else we may try to use the link before it is ready Durability = maps:get(durability, Map, unsettled_state), {ok, LinkRef} = AttachFun(Sess, LinkName, Addr, - SettlementMode, + SndSettleMode, Durability), {Conn, Sess, LinkRef}. diff --git a/deps/rabbitmq_shovel/src/rabbit_shovel_status.erl b/deps/rabbitmq_shovel/src/rabbit_shovel_status.erl index 90ff9cb725f5..5fca473c6671 100644 --- a/deps/rabbitmq_shovel/src/rabbit_shovel_status.erl +++ b/deps/rabbitmq_shovel/src/rabbit_shovel_status.erl @@ -14,6 +14,7 @@ report_blocked_status/2, remove/1, status/0, + status/1, lookup/1, cluster_status/0, cluster_status_with_nodes/0, @@ -70,7 +71,9 @@ remove(Name) -> %% format without a feature flag. -spec status() -> [status_tuple()]. status() -> - gen_server:call(?SERVER, status, infinity). + status(infinity). +status(Timeout) -> + gen_server:call(?SERVER, status, Timeout). -spec cluster_status() -> [status_tuple()]. cluster_status() -> @@ -229,4 +232,3 @@ blocked_status_to_info(#entry{info = {running, Info}, {running, Info ++ [{blocked_status, BlockedStatus}]}; blocked_status_to_info(#entry{info = Info}) -> Info. - diff --git a/deps/rabbitmq_shovel/test/amqp10_dynamic_SUITE.erl b/deps/rabbitmq_shovel/test/amqp10_dynamic_SUITE.erl index 18b5ef3595e6..9c624f6e8219 100644 --- a/deps/rabbitmq_shovel/test/amqp10_dynamic_SUITE.erl +++ b/deps/rabbitmq_shovel/test/amqp10_dynamic_SUITE.erl @@ -121,12 +121,12 @@ test_amqp10_destination(Config, Src, Dest, Sess, Protocol, ProtocolSrc) -> end}, {<<"dest-message-annotations">>, case MapConfig of - true -> - #{<<"message-ann-key">> => - <<"message-ann-value">>}; - _ -> - [{<<"message-ann-key">>, - <<"message-ann-value">>}] + true -> + #{<<"x-message-ann-key">> => + <<"message-ann-value">>}; + _ -> + [{<<"x-message-ann-key">>, + <<"message-ann-value">>}] end}]), Msg = publish_expect(Sess, Src, Dest, <<"tag1">>, <<"hello">>), AppProps = amqp10_msg:application_properties(Msg), @@ -138,7 +138,7 @@ test_amqp10_destination(Config, Src, Dest, Sess, Protocol, ProtocolSrc) -> <<"app-prop-key">> := <<"app-prop-value">>}), (AppProps)), ?assertEqual(undefined, maps:get(<<"delivery_mode">>, AppProps, undefined)), - ?assertMatch((#{<<"message-ann-key">> := <<"message-ann-value">>}), + ?assertMatch((#{<<"x-message-ann-key">> := <<"message-ann-value">>}), (amqp10_msg:message_annotations(Msg))). simple_amqp10_src(Config) -> diff --git a/deps/rabbitmq_shovel/test/rolling_upgrade_SUITE.erl b/deps/rabbitmq_shovel/test/rolling_upgrade_SUITE.erl index c4051ae3bba6..57afc089d160 100644 --- a/deps/rabbitmq_shovel/test/rolling_upgrade_SUITE.erl +++ b/deps/rabbitmq_shovel/test/rolling_upgrade_SUITE.erl @@ -257,12 +257,14 @@ child_id_format(Config) -> mnesia -> ok; khepri -> - Path = rabbit_db_msup:khepri_mirrored_supervisor_path(), + Pattern = rabbit_db_msup:khepri_mirrored_supervisor_path( + ?KHEPRI_WILDCARD_STAR, ?KHEPRI_WILDCARD_STAR_STAR), + Path = rabbit_db_msup:khepri_mirrored_supervisor_path( + rabbit_shovel_dyn_worker_sup_sup, {VHost, ShovelName}), + ct:pal("Pattern=~0p~nPath=~0p", [Pattern, Path]), ?assertMatch( - {ok, - #{[rabbit_db_msup, mirrored_supervisor_childspec, - rabbit_shovel_dyn_worker_sup_sup, VHost, ShovelName] := _}}, + {ok, #{Path := _}}, rabbit_ct_broker_helpers:rpc( Config, NewNode, rabbit_khepri, list, - [Path ++ [?KHEPRI_WILDCARD_STAR_STAR]])) + [Pattern])) end. diff --git a/deps/rabbitmq_shovel_management/app.bzl b/deps/rabbitmq_shovel_management/app.bzl index 0ca17b66892d..3c338cf4f318 100644 --- a/deps/rabbitmq_shovel_management/app.bzl +++ b/deps/rabbitmq_shovel_management/app.bzl @@ -9,7 +9,8 @@ def all_beam_files(name = "all_beam_files"): erlang_bytecode( name = "other_beam", srcs = [ - "src/rabbit_shovel_mgmt.erl", + "src/rabbit_shovel_mgmt_shovel.erl", + "src/rabbit_shovel_mgmt_shovels.erl", "src/rabbit_shovel_mgmt_util.erl", ], hdrs = [":public_and_private_hdrs"], @@ -33,7 +34,8 @@ def all_test_beam_files(name = "all_test_beam_files"): name = "test_other_beam", testonly = True, srcs = [ - "src/rabbit_shovel_mgmt.erl", + "src/rabbit_shovel_mgmt_shovel.erl", + "src/rabbit_shovel_mgmt_shovels.erl", "src/rabbit_shovel_mgmt_util.erl", ], hdrs = [":public_and_private_hdrs"], @@ -72,7 +74,8 @@ def all_srcs(name = "all_srcs"): filegroup( name = "srcs", srcs = [ - "src/rabbit_shovel_mgmt.erl", + "src/rabbit_shovel_mgmt_shovel.erl", + "src/rabbit_shovel_mgmt_shovels.erl", "src/rabbit_shovel_mgmt_util.erl", ], ) diff --git a/deps/rabbitmq_shovel_management/src/rabbit_shovel_mgmt.erl b/deps/rabbitmq_shovel_management/src/rabbit_shovel_mgmt_shovel.erl similarity index 83% rename from deps/rabbitmq_shovel_management/src/rabbit_shovel_mgmt.erl rename to deps/rabbitmq_shovel_management/src/rabbit_shovel_mgmt_shovel.erl index 2c414bded340..f41e70d0b84a 100644 --- a/deps/rabbitmq_shovel_management/src/rabbit_shovel_mgmt.erl +++ b/deps/rabbitmq_shovel_management/src/rabbit_shovel_mgmt_shovel.erl @@ -5,7 +5,7 @@ %% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% --module(rabbit_shovel_mgmt). +-module(rabbit_shovel_mgmt_shovel). -behaviour(rabbit_mgmt_extension). @@ -19,9 +19,9 @@ -include_lib("amqp_client/include/amqp_client.hrl"). -include("rabbit_shovel_mgmt.hrl"). -dispatcher() -> [{"/shovels", ?MODULE, []}, - {"/shovels/:vhost", ?MODULE, []}, - {"/shovels/vhost/:vhost/:name", ?MODULE, []}, +-define(COMPONENT, <<"shovel">>). + +dispatcher() -> [{"/shovels/vhost/:vhost/:name", ?MODULE, []}, {"/shovels/vhost/:vhost/:name/restart", ?MODULE, []}]. web_ui() -> [{javascript, <<"shovel.js">>}]. @@ -42,20 +42,25 @@ resource_exists(ReqData, Context) -> not_found -> false; VHost -> - case rabbit_mgmt_util:id(name, ReqData) of + case name(ReqData) of none -> true; Name -> - %% Deleting or restarting a shovel case get_shovel_node(VHost, Name, ReqData, Context) of undefined -> rabbit_log:error("Shovel with the name '~ts' was not found on virtual host '~ts'. " "It may be failing to connect and report its status.", [Name, VHost]), - case is_restart(ReqData) of - true -> false; - %% this is a deletion attempt, it can continue and idempotently try to - %% delete the shovel - false -> true + case cowboy_req:method(ReqData) of + <<"DELETE">> -> + %% Deleting or restarting a shovel + case is_restart(ReqData) of + true -> false; + %% this is a deletion attempt, it can continue and idempotently try to + %% delete the shovel + false -> true + end; + _ -> + false end; _ -> true @@ -65,8 +70,9 @@ resource_exists(ReqData, Context) -> {Reply, ReqData, Context}. to_json(ReqData, Context) -> - rabbit_mgmt_util:reply_list( - filter_vhost_req(rabbit_shovel_mgmt_util:status(ReqData, Context), ReqData), ReqData, Context). + Shovel = parameter(ReqData), + rabbit_mgmt_util:reply(rabbit_mgmt_format:parameter(Shovel), + ReqData, Context). is_authorized(ReqData, Context) -> rabbit_mgmt_util:is_authorized_monitor(ReqData, Context). @@ -115,6 +121,19 @@ delete_resource(ReqData, #context{user = #user{username = Username}}=Context) -> %%-------------------------------------------------------------------- +name(ReqData) -> rabbit_mgmt_util:id(name, ReqData). + +parameter(ReqData) -> + VHostName = rabbit_mgmt_util:vhost(ReqData), + Name = name(ReqData), + if + VHostName =/= not_found andalso + Name =/= none -> + rabbit_runtime_parameters:lookup(VHostName, ?COMPONENT, Name); + true -> + not_found + end. + is_restart(ReqData) -> Path = cowboy_req:path(ReqData), case string:find(Path, "/restart", trailing) of @@ -122,13 +141,6 @@ is_restart(ReqData) -> _ -> true end. -filter_vhost_req(List, ReqData) -> - case rabbit_mgmt_util:vhost(ReqData) of - none -> List; - VHost -> [I || I <- List, - pget(vhost, I) =:= VHost] - end. - get_shovel_node(VHost, Name, ReqData, Context) -> Shovels = rabbit_shovel_mgmt_util:status(ReqData, Context), Match = find_matching_shovel(VHost, Name, Shovels), diff --git a/deps/rabbitmq_shovel_management/src/rabbit_shovel_mgmt_shovels.erl b/deps/rabbitmq_shovel_management/src/rabbit_shovel_mgmt_shovels.erl new file mode 100644 index 000000000000..ca5a5f528556 --- /dev/null +++ b/deps/rabbitmq_shovel_management/src/rabbit_shovel_mgmt_shovels.erl @@ -0,0 +1,57 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(rabbit_shovel_mgmt_shovels). + +-behaviour(rabbit_mgmt_extension). + +-export([dispatcher/0, web_ui/0]). +-export([init/2, to_json/2, resource_exists/2, content_types_provided/2, + is_authorized/2, allowed_methods/2]). + +-import(rabbit_misc, [pget/2]). + +-include_lib("rabbitmq_management_agent/include/rabbit_mgmt_records.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). +-include("rabbit_shovel_mgmt.hrl"). + +dispatcher() -> [{"/shovels", ?MODULE, []}, + {"/shovels/:vhost", ?MODULE, []}]. + +web_ui() -> [{javascript, <<"shovel.js">>}]. + +%%-------------------------------------------------------------------- + +init(Req, _Opts) -> + {cowboy_rest, rabbit_mgmt_cors:set_headers(Req, ?MODULE), #context{}}. + +content_types_provided(ReqData, Context) -> + {[{<<"application/json">>, to_json}], ReqData, Context}. + +allowed_methods(ReqData, Context) -> + {[<<"HEAD">>, <<"GET">>, <<"OPTIONS">>], ReqData, Context}. + +resource_exists(ReqData, Context) -> + Reply = case rabbit_mgmt_util:vhost(ReqData) of + not_found -> false; + _Found -> true + end, + {Reply, ReqData, Context}. + +to_json(ReqData, Context) -> + rabbit_mgmt_util:reply_list( + filter_vhost_req(rabbit_shovel_mgmt_util:status(ReqData, Context), ReqData), ReqData, Context). + +is_authorized(ReqData, Context) -> + rabbit_mgmt_util:is_authorized_monitor(ReqData, Context). + +filter_vhost_req(List, ReqData) -> + case rabbit_mgmt_util:vhost(ReqData) of + none -> List; + VHost -> [I || I <- List, + pget(vhost, I) =:= VHost] + end. diff --git a/deps/rabbitmq_shovel_management/test/http_SUITE.erl b/deps/rabbitmq_shovel_management/test/http_SUITE.erl index 07d294086a5f..d4e93c91ebf9 100644 --- a/deps/rabbitmq_shovel_management/test/http_SUITE.erl +++ b/deps/rabbitmq_shovel_management/test/http_SUITE.erl @@ -8,6 +8,7 @@ -module(http_SUITE). -include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). -include_lib("rabbit_common/include/rabbit_framing.hrl"). -include_lib("rabbitmq_ct_helpers/include/rabbit_mgmt_test.hrl"). @@ -27,6 +28,10 @@ groups() -> [ {dynamic_shovels, [], [ start_and_list_a_dynamic_amqp10_shovel, + start_and_get_a_dynamic_amqp10_shovel, + start_and_get_a_dynamic_amqp091_shovel_with_publish_properties, + start_and_get_a_dynamic_amqp091_shovel_with_missing_publish_properties, + start_and_get_a_dynamic_amqp091_shovel_with_empty_publish_properties, create_and_delete_a_dynamic_shovel_that_successfully_connects, create_and_delete_a_dynamic_shovel_that_fails_to_connect ]}, @@ -124,25 +129,33 @@ start_inets(Config) -> %% ------------------------------------------------------------------- start_and_list_a_dynamic_amqp10_shovel(Config) -> - Port = integer_to_binary( - rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp)), - remove_all_dynamic_shovels(Config, <<"/">>), - ID = {<<"/">>, <<"dynamic-amqp10-1">>}, + Name = rabbit_data_coercion:to_binary(?FUNCTION_NAME), + ID = {<<"/">>, Name}, await_shovel_removed(Config, ID), - http_put(Config, "/parameters/shovel/%2f/dynamic-amqp10-1", - #{value => #{'src-protocol' => <<"amqp10">>, - 'src-uri' => <<"amqp://localhost:", Port/binary>>, - 'src-address' => <<"test">>, - 'dest-protocol' => <<"amqp10">>, - 'dest-uri' => <<"amqp://localhost:", Port/binary>>, - 'dest-address' => <<"test2">>, - 'dest-properties' => #{}, - 'dest-application-properties' => #{}, - 'dest-message-annotations' => #{}}}, ?CREATED), + declare_shovel(Config, Name), + await_shovel_startup(Config, ID), + Shovels = list_shovels(Config), + ?assert(lists:any( + fun(M) -> + maps:get(name, M) =:= Name + end, Shovels)), + delete_shovel(Config, <<"dynamic-amqp10-await-startup-1">>), + ok. + +start_and_get_a_dynamic_amqp10_shovel(Config) -> + remove_all_dynamic_shovels(Config, <<"/">>), + Name = rabbit_data_coercion:to_binary(?FUNCTION_NAME), + ID = {<<"/">>, Name}, + await_shovel_removed(Config, ID), + + declare_shovel(Config, Name), await_shovel_startup(Config, ID), + Sh = get_shovel(Config, Name), + ?assertEqual(Name, maps:get(name, Sh)), + delete_shovel(Config, Name), ok. @@ -157,6 +170,48 @@ start_and_list_a_dynamic_amqp10_shovel(Config) -> vhost := <<"v">>, type := <<"dynamic">>}). +start_and_get_a_dynamic_amqp091_shovel_with_publish_properties(Config) -> + remove_all_dynamic_shovels(Config, <<"/">>), + Name = rabbit_data_coercion:to_binary(?FUNCTION_NAME), + ID = {<<"/">>, Name}, + await_shovel_removed(Config, ID), + + declare_amqp091_shovel_with_publish_properties(Config, Name), + await_shovel_startup(Config, ID), + Sh = get_shovel(Config, Name), + ?assertEqual(Name, maps:get(name, Sh)), + delete_shovel(Config, Name), + + ok. + +start_and_get_a_dynamic_amqp091_shovel_with_missing_publish_properties(Config) -> + remove_all_dynamic_shovels(Config, <<"/">>), + Name = rabbit_data_coercion:to_binary(?FUNCTION_NAME), + ID = {<<"/">>, Name}, + await_shovel_removed(Config, ID), + + declare_amqp091_shovel(Config, Name), + await_shovel_startup(Config, ID), + Sh = get_shovel(Config, Name), + ?assertEqual(Name, maps:get(name, Sh)), + delete_shovel(Config, Name), + + ok. + +start_and_get_a_dynamic_amqp091_shovel_with_empty_publish_properties(Config) -> + remove_all_dynamic_shovels(Config, <<"/">>), + Name = rabbit_data_coercion:to_binary(?FUNCTION_NAME), + ID = {<<"/">>, Name}, + await_shovel_removed(Config, ID), + + declare_amqp091_shovel_with_publish_properties(Config, Name, #{}), + await_shovel_startup(Config, ID), + Sh = get_shovel(Config, Name), + ?assertEqual(Name, maps:get(name, Sh)), + delete_shovel(Config, Name), + + ok. + start_static_shovels(Config) -> http_put(Config, "/users/admin", #{password => <<"admin">>, tags => <<"administrator">>}, ?CREATED), @@ -317,14 +372,89 @@ assert_item(ExpI, ActI) -> ExpI = maps:with(maps:keys(ExpI), ActI), ok. +list_shovels(Config) -> + list_shovels(Config, "%2F"). + +list_shovels(Config, VirtualHost) -> + Path = io_lib:format("/shovels/~s", [VirtualHost]), + http_get(Config, Path, ?OK). + +get_shovel(Config, Name) -> + get_shovel(Config, "%2F", Name). + +get_shovel(Config, VirtualHost, Name) -> + Path = io_lib:format("/shovels/vhost/~s/~s", [VirtualHost, Name]), + http_get(Config, Path, ?OK). + delete_shovel(Config, Name) -> - Path = io_lib:format("/shovels/vhost/%2F/~s", [Name]), + delete_shovel(Config, "%2F", Name). + +delete_shovel(Config, VirtualHost, Name) -> + Path = io_lib:format("/shovels/vhost/~s/~s", [VirtualHost, Name]), http_delete(Config, Path, ?NO_CONTENT). remove_all_dynamic_shovels(Config, VHost) -> rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_runtime_parameters, clear_vhost, [VHost, <<"CT tests">>]). +declare_shovel(Config, Name) -> + Port = integer_to_binary( + rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp)), + http_put(Config, io_lib:format("/parameters/shovel/%2f/~ts", [Name]), + #{ + value => #{ + 'src-protocol' => <<"amqp10">>, + 'src-uri' => <<"amqp://localhost:", Port/binary>>, + 'src-address' => <<"test">>, + 'dest-protocol' => <<"amqp10">>, + 'dest-uri' => <<"amqp://localhost:", Port/binary>>, + 'dest-address' => <<"test2">>, + 'dest-properties' => #{}, + 'dest-application-properties' => #{}, + 'dest-message-annotations' => #{} + } + }, ?CREATED). + +declare_amqp091_shovel(Config, Name) -> + Port = integer_to_binary( + rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp)), + http_put(Config, io_lib:format("/parameters/shovel/%2f/~ts", [Name]), + #{ + value => #{ + <<"src-protocol">> => <<"amqp091">>, + <<"src-uri">> => <<"amqp://localhost:", Port/binary>>, + <<"src-queue">> => <<"amqp091.src.test">>, + <<"src-delete-after">> => <<"never">>, + <<"dest-protocol">> => <<"amqp091">>, + <<"dest-uri">> => <<"amqp://localhost:", Port/binary>>, + <<"dest-queue">> => <<"amqp091.dest.test">> + } + }, ?CREATED). + +declare_amqp091_shovel_with_publish_properties(Config, Name) -> + Props = #{ + <<"delivery_mode">> => 2, + <<"app_id">> => <<"shovel_management:http_SUITE">> + }, + declare_amqp091_shovel_with_publish_properties(Config, Name, Props). + +declare_amqp091_shovel_with_publish_properties(Config, Name, Props) -> + Port = integer_to_binary( + rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp)), + http_put(Config, io_lib:format("/parameters/shovel/%2f/~ts", [Name]), + #{ + value => #{ + <<"src-protocol">> => <<"amqp091">>, + <<"src-uri">> => <<"amqp://localhost:", Port/binary>>, + <<"src-queue">> => <<"amqp091.src.test">>, + <<"src-delete-after">> => <<"never">>, + <<"dest-protocol">> => <<"amqp091">>, + <<"dest-uri">> => <<"amqp://localhost:", Port/binary>>, + <<"dest-queue">> => <<"amqp091.dest.test">>, + <<"dest-publish-properties">> => Props + } + }, ?CREATED). + await_shovel_startup(Config, Name) -> await_shovel_startup(Config, Name, 10_000). diff --git a/deps/rabbitmq_shovel_prometheus/BUILD.bazel b/deps/rabbitmq_shovel_prometheus/BUILD.bazel new file mode 100644 index 000000000000..d34bd895525a --- /dev/null +++ b/deps/rabbitmq_shovel_prometheus/BUILD.bazel @@ -0,0 +1,115 @@ +load("@rules_erlang//:eunit2.bzl", "eunit") +load("@rules_erlang//:xref2.bzl", "xref") +load("@rules_erlang//:dialyze.bzl", "dialyze", "plt") +load("//:rabbitmq_home.bzl", "rabbitmq_home") +load("//:rabbitmq_run.bzl", "rabbitmq_run") +load( + "//:rabbitmq.bzl", + "BROKER_VERSION_REQUIREMENTS_ANY", + "RABBITMQ_DIALYZER_OPTS", + "assert_suites", + "rabbitmq_app", + "rabbitmq_integration_suite", +) +load( + ":app.bzl", + "all_beam_files", + "all_srcs", + "all_test_beam_files", + "test_suite_beam_files", +) + +APP_NAME = "rabbitmq_shovel_prometheus" + +APP_DESCRIPTION = "Prometheus extension for the Shovel plugin" + +APP_ENV = """[ +]""" + +all_srcs(name = "all_srcs") + +all_beam_files(name = "all_beam_files") + +all_test_beam_files(name = "all_test_beam_files") + +test_suite_beam_files(name = "test_suite_beam_files") + +# gazelle:erlang_app_extra_app crypto +# gazelle:erlang_app_dep rabbit +# gazelle:erlang_app_dep rabbitmq_prometheus +# gazelle:erlang_app_dep_exclude prometheus + +rabbitmq_app( + name = "erlang_app", + srcs = [":all_srcs"], + hdrs = [":public_hdrs"], + app_description = APP_DESCRIPTION, + app_env = APP_ENV, + app_extra_keys = BROKER_VERSION_REQUIREMENTS_ANY, + app_module = "rabbit_shovel_prometheus_app", + app_name = APP_NAME, + beam_files = [":beam_files"], + extra_apps = [ + "crypto", + ], + license_files = [":license_files"], + priv = [":priv"], + deps = [ + "//deps/rabbit:erlang_app", + "//deps/rabbitmq_prometheus:erlang_app", + "//deps/rabbitmq_shovel:erlang_app", + ], +) + +xref( + name = "xref", + target = ":erlang_app", +) + +plt( + name = "deps_plt", + for_target = ":erlang_app", + ignore_warnings = True, + libs = ["@rules_elixir//elixir"], # keep + plt = "//:base_plt", +) + +dialyze( + name = "dialyze", + dialyzer_opts = RABBITMQ_DIALYZER_OPTS, + plt = ":deps_plt", + target = ":erlang_app", +) + +eunit( + name = "eunit", + target = ":test_erlang_app", +) + +rabbitmq_home( + name = "broker-for-tests-home", + plugins = [ + "//deps/rabbit:erlang_app", + ":erlang_app", + ], +) + +rabbitmq_run( + name = "rabbitmq-for-tests-run", + home = ":broker-for-tests-home", +) + +rabbitmq_integration_suite( + name = "prometheus_rabbitmq_shovel_collector_SUITE", + size = "small", + additional_beam = [ + ], +) + +assert_suites() + +alias( + name = "rabbitmq_shovel_prometheus", + actual = ":erlang_app", + visibility = ["//visibility:public"], +) diff --git a/deps/rabbitmq_shovel_prometheus/CODE_OF_CONDUCT.md b/deps/rabbitmq_shovel_prometheus/CODE_OF_CONDUCT.md new file mode 120000 index 000000000000..a3613c99f0b0 --- /dev/null +++ b/deps/rabbitmq_shovel_prometheus/CODE_OF_CONDUCT.md @@ -0,0 +1 @@ +../../CODE_OF_CONDUCT.md \ No newline at end of file diff --git a/deps/rabbitmq_shovel_prometheus/CONTRIBUTING.md b/deps/rabbitmq_shovel_prometheus/CONTRIBUTING.md new file mode 120000 index 000000000000..f939e75f21a8 --- /dev/null +++ b/deps/rabbitmq_shovel_prometheus/CONTRIBUTING.md @@ -0,0 +1 @@ +../../CONTRIBUTING.md \ No newline at end of file diff --git a/deps/rabbitmq_shovel_prometheus/LICENSE b/deps/rabbitmq_shovel_prometheus/LICENSE new file mode 100644 index 000000000000..46e08bb41d0b --- /dev/null +++ b/deps/rabbitmq_shovel_prometheus/LICENSE @@ -0,0 +1 @@ +This package is licensed under the MPL 2.0. For the MPL 2.0, please see LICENSE-MPL-RabbitMQ. \ No newline at end of file diff --git a/deps/rabbitmq_shovel_prometheus/LICENSE-MPL-RabbitMQ b/deps/rabbitmq_shovel_prometheus/LICENSE-MPL-RabbitMQ new file mode 100644 index 000000000000..14e2f777f6c3 --- /dev/null +++ b/deps/rabbitmq_shovel_prometheus/LICENSE-MPL-RabbitMQ @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/deps/rabbitmq_shovel_prometheus/Makefile b/deps/rabbitmq_shovel_prometheus/Makefile new file mode 100644 index 000000000000..f448bde8c6ca --- /dev/null +++ b/deps/rabbitmq_shovel_prometheus/Makefile @@ -0,0 +1,16 @@ +PROJECT = rabbitmq_shovel_prometheus +PROJECT_DESCRIPTION = Exposes rabbitmq_shovel metrics to Prometheus +PROJECT_MOD = rabbit_shovel_prometheus_app + +define PROJECT_APP_EXTRA_KEYS + {broker_version_requirements, []} +endef + +DEPS = rabbit_common rabbit rabbitmq_shovel rabbitmq_prometheus +TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers eunit_formatters + +DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk +DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk + +include ../../rabbitmq-components.mk +include ../../erlang.mk diff --git a/deps/rabbitmq_shovel_prometheus/README.md b/deps/rabbitmq_shovel_prometheus/README.md new file mode 100644 index 000000000000..0a1b6882f9e3 --- /dev/null +++ b/deps/rabbitmq_shovel_prometheus/README.md @@ -0,0 +1,16 @@ +# RabbitMQ Shovel Prometheus + +This plugin adds Shovel metrics to prometheus + +## Installation + +This plugin ships with RabbitMQ. Like all other plugins, it must be enabled +before it can be used: + +```bash +[sudo] rabbitmq-plugins enable rabbitmq_shovel_prometheus +``` + +## License + +See [LICENSE](./LICENSE). diff --git a/deps/rabbitmq_shovel_prometheus/app.bzl b/deps/rabbitmq_shovel_prometheus/app.bzl new file mode 100644 index 000000000000..b79594dc27a4 --- /dev/null +++ b/deps/rabbitmq_shovel_prometheus/app.bzl @@ -0,0 +1,89 @@ +load("@rules_erlang//:erlang_bytecode2.bzl", "erlang_bytecode") +load("@rules_erlang//:filegroup.bzl", "filegroup") + +def all_beam_files(name = "all_beam_files"): + filegroup( + name = "beam_files", + srcs = [":other_beam"], + ) + erlang_bytecode( + name = "other_beam", + srcs = [ + "src/rabbit_shovel_prometheus_app.erl", + "src/rabbit_shovel_prometheus_collector.erl", + "src/rabbit_shovel_prometheus_sup.erl", + ], + hdrs = [":public_and_private_hdrs"], + app_name = "rabbitmq_shovel_prometheus", + dest = "ebin", + erlc_opts = "//:erlc_opts", + deps = ["@prometheus//:erlang_app"], + ) + +def all_srcs(name = "all_srcs"): + filegroup( + name = "all_srcs", + srcs = [":public_and_private_hdrs", ":srcs"], + ) + filegroup( + name = "public_and_private_hdrs", + srcs = [":private_hdrs", ":public_hdrs"], + ) + + filegroup( + name = "priv", + ) + + filegroup( + name = "srcs", + srcs = [ + "src/rabbit_shovel_prometheus_app.erl", + "src/rabbit_shovel_prometheus_collector.erl", + "src/rabbit_shovel_prometheus_sup.erl", + ], + ) + filegroup( + name = "private_hdrs", + ) + filegroup( + name = "public_hdrs", + ) + filegroup( + name = "license_files", + srcs = [ + "LICENSE", + "LICENSE-MPL-RabbitMQ", + ], + ) + +def all_test_beam_files(name = "all_test_beam_files"): + filegroup( + name = "test_beam_files", + testonly = True, + srcs = [":test_other_beam"], + ) + erlang_bytecode( + name = "test_other_beam", + testonly = True, + srcs = [ + "src/rabbit_shovel_prometheus_app.erl", + "src/rabbit_shovel_prometheus_collector.erl", + "src/rabbit_shovel_prometheus_sup.erl", + ], + hdrs = [":public_and_private_hdrs"], + app_name = "rabbitmq_shovel_prometheus", + dest = "test", + erlc_opts = "//:test_erlc_opts", + deps = ["@prometheus//:erlang_app"], + ) + +def test_suite_beam_files(name = "test_suite_beam_files"): + erlang_bytecode( + name = "prometheus_rabbitmq_shovel_collector_SUITE_beam_files", + testonly = True, + srcs = ["test/prometheus_rabbitmq_shovel_collector_SUITE.erl"], + outs = ["test/prometheus_rabbitmq_shovel_collector_SUITE.beam"], + app_name = "rabbitmq_shovel_prometheus", + erlc_opts = "//:test_erlc_opts", + deps = ["//deps/amqp_client:erlang_app", "@prometheus//:erlang_app"], + ) diff --git a/deps/rabbitmq_shovel_prometheus/src/rabbit_shovel_prometheus_app.erl b/deps/rabbitmq_shovel_prometheus/src/rabbit_shovel_prometheus_app.erl new file mode 100644 index 000000000000..662ff4a73b30 --- /dev/null +++ b/deps/rabbitmq_shovel_prometheus/src/rabbit_shovel_prometheus_app.erl @@ -0,0 +1,27 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% +-module(rabbit_shovel_prometheus_app). + +-behavior(application). + +-export([start/0, stop/0, start/2, stop/1]). + +start(normal, []) -> + {ok, _} = application:ensure_all_started(prometheus), + _ = rabbit_shovel_prometheus_collector:start(), + rabbit_shovel_prometheus_sup:start_link(). + +stop(_State) -> + _ = rabbit_shovel_prometheus_collector:stop(), + ok. + + +start() -> + _ = rabbit_shovel_prometheus_collector:start(). + +stop() -> ok. + diff --git a/deps/rabbitmq_shovel_prometheus/src/rabbit_shovel_prometheus_collector.erl b/deps/rabbitmq_shovel_prometheus/src/rabbit_shovel_prometheus_collector.erl new file mode 100644 index 000000000000..acdc6d9df736 --- /dev/null +++ b/deps/rabbitmq_shovel_prometheus/src/rabbit_shovel_prometheus_collector.erl @@ -0,0 +1,51 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% +-module(rabbit_shovel_prometheus_collector). + +-behaviour(prometheus_collector). + +-export([start/0, stop/0]). +-export([deregister_cleanup/1, + collect_mf/2]). + +-import(prometheus_model_helpers, [create_mf/4]). + +%%==================================================================== +%% Collector API +%%==================================================================== + +start() -> + {ok, _} = application:ensure_all_started(prometheus), + prometheus_registry:register_collector(?MODULE). + +stop() -> + prometheus_registry:deregister_collector(?MODULE). + +deregister_cleanup(_) -> ok. + +collect_mf(_Registry, Callback) -> + Status = rabbit_shovel_status:status(500), + {StaticStatusGroups, DynamicStatusGroups} = lists:foldl(fun({_,static,{S, _}, _}, {SMap, DMap}) -> + {maps:update_with(S, fun(C) -> C + 1 end, 1, SMap), DMap}; + ({_,dynamic,{S, _}, _}, {SMap, DMap}) -> + {SMap, maps:update_with(S, fun(C) -> C + 1 end, 1, DMap)} + end, {#{}, #{}}, Status), + + Metrics = [{rabbitmq_shovel_dynamic, gauge, "Number of dynamic shovels", + [{[{status, S}], C} || {S, C} <- maps:to_list(DynamicStatusGroups)]}, + {rabbitmq_shovel_static, gauge, "Number of static shovels", + [{[{status, S}], C} || {S, C} <- maps:to_list(StaticStatusGroups)]} + ], + _ = [add_metric_family(Metric, Callback) || Metric <- Metrics], + ok. + +add_metric_family({Name, Type, Help, Metrics}, Callback) -> + Callback(create_mf(Name, Help, Type, Metrics)). + +%%==================================================================== +%% Private Parts +%%==================================================================== diff --git a/deps/rabbitmq_shovel_prometheus/src/rabbit_shovel_prometheus_sup.erl b/deps/rabbitmq_shovel_prometheus/src/rabbit_shovel_prometheus_sup.erl new file mode 100644 index 000000000000..433c016af9f7 --- /dev/null +++ b/deps/rabbitmq_shovel_prometheus/src/rabbit_shovel_prometheus_sup.erl @@ -0,0 +1,20 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% +-module(rabbit_shovel_prometheus_sup). + +-behaviour(supervisor). + +-export([start_link/0]). +-export([init/1]). + +start_link() -> + supervisor:start_link(?MODULE, []). + +init(_Args) -> + SupFlags = #{strategy => one_for_one, intensity => 1, period => 5}, + ChildSpecs = [], + {ok, {SupFlags, ChildSpecs}}. diff --git a/deps/rabbitmq_shovel_prometheus/test/prometheus_rabbitmq_shovel_collector_SUITE.erl b/deps/rabbitmq_shovel_prometheus/test/prometheus_rabbitmq_shovel_collector_SUITE.erl new file mode 100644 index 000000000000..3aa9efe93168 --- /dev/null +++ b/deps/rabbitmq_shovel_prometheus/test/prometheus_rabbitmq_shovel_collector_SUITE.erl @@ -0,0 +1,279 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(prometheus_rabbitmq_shovel_collector_SUITE). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("amqp_client/include/amqp_client.hrl"). +-include_lib("prometheus/include/prometheus_model.hrl"). + +-compile(export_all). + +-define(DYN_RUNNING_METRIC(Gauge), + #'MetricFamily'{name = <<"rabbitmq_shovel_dynamic">>, + help = "Number of dynamic shovels",type = 'GAUGE', + metric = [#'Metric'{label = [#'LabelPair'{name = <<"status">>, + value = <<"running">>}], + gauge = #'Gauge'{value = Gauge}, + counter = undefined,summary = undefined,untyped = undefined, + histogram = undefined,timestamp_ms = undefined}]}). + +-define(STAT_RUNNING_METRIC(Gauge), + #'MetricFamily'{name = <<"rabbitmq_shovel_static">>, + help = "Number of static shovels",type = 'GAUGE', + metric = [#'Metric'{label = [#'LabelPair'{name = <<"status">>, + value = <<"running">>}], + gauge = #'Gauge'{value = Gauge}, + counter = undefined,summary = undefined,untyped = undefined, + histogram = undefined,timestamp_ms = undefined}]}). + +-define(EMPTY_DYN_METRIC, #'MetricFamily'{name = <<"rabbitmq_shovel_dynamic">>, + help = "Number of dynamic shovels",type = 'GAUGE', + metric = []}). + +-define(EMPTY_STAT_METRIC, #'MetricFamily'{name = <<"rabbitmq_shovel_static">>, + help = "Number of static shovels",type = 'GAUGE', + metric = []}). + + +all() -> + [ + {group, non_parallel_tests} + ]. + +groups() -> + [ + {non_parallel_tests, [], [ + dynamic, + static, + mix + ]} + ]. + +suite() -> + [{timetrap, {minutes, 5}}]. + +%% ------------------------------------------------------------------- +%% Testsuite setup/teardown. +%% ------------------------------------------------------------------- +init_per_suite(Config) -> + rabbit_ct_helpers:log_environment(), + Config1 = rabbit_ct_helpers:set_config(Config, [ + {rmq_nodename_suffix, ?MODULE}, + {ignored_crashes, [ + "server_initiated_close,404", + "writer,send_failed,closed" + ]} + ]), + rabbit_ct_helpers:run_setup_steps(Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). + +end_per_suite(Config) -> + rabbit_ct_helpers:run_teardown_steps(Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()). + +init_per_group(_, Config) -> + Config. + +end_per_group(_, Config) -> + Config. + +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase). + +end_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_finished(Config, Testcase). + +%% ------------------------------------------------------------------- +%% Test cases +%% ------------------------------------------------------------------- + +dynamic(Config) -> + create_dynamic_shovel(Config, <<"test">>), + running = get_shovel_status(Config, <<"test">>), + [?DYN_RUNNING_METRIC(1), ?EMPTY_STAT_METRIC] = get_metrics(Config), + create_dynamic_shovel(Config, <<"test2">>), + running = get_shovel_status(Config, <<"test2">>), + [?DYN_RUNNING_METRIC(2), ?EMPTY_STAT_METRIC] = get_metrics(Config), + clear_param(Config, <<"test">>), + clear_param(Config, <<"test2">>), + [?EMPTY_DYN_METRIC, ?EMPTY_STAT_METRIC] = get_metrics(Config), + ok. + +static(Config) -> + create_static_shovel(Config, static_shovel), + [?EMPTY_DYN_METRIC, ?STAT_RUNNING_METRIC(1)] = get_metrics(Config), + ok = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, clear_shovel, + []), + [?EMPTY_DYN_METRIC, ?EMPTY_STAT_METRIC] = get_metrics(Config), + ok. + + +mix(Config) -> + create_dynamic_shovel(Config, <<"test">>), + running = get_shovel_status(Config, <<"test">>), + create_static_shovel(Config, static_shovel), + + [?DYN_RUNNING_METRIC(1), ?STAT_RUNNING_METRIC(1)] = get_metrics(Config), + + clear_param(Config, <<"test">>), + ok = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, clear_shovel, + []), + [?EMPTY_DYN_METRIC, ?EMPTY_STAT_METRIC] = get_metrics(Config), + ok. + +%% ------------------------------------------------------------------- +%% Internal +%% ------------------------------------------------------------------- + +get_metrics(Config) -> + rabbit_ct_broker_helpers:rpc(Config, 0, + ?MODULE, collect_mf, + [default, rabbit_shovel_prometheus_collector]). + +create_static_shovel(Config, Name) -> + Hostname = ?config(rmq_hostname, Config), + Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp), + Shovel = [{Name, + [{source, + [{uris, [rabbit_misc:format("amqp://~ts:~b", + [Hostname, Port])]}, + + {declarations, [ {'exchange.declare', + [ {exchange, <<"my_fanout">>}, + {type, <<"fanout">>}, + durable + ]}, + {'queue.declare', + [{arguments, + [{<<"x-message-ttl">>, long, 60000}]}]}, + {'queue.bind', + [ {exchange, <<"my_fanout">>}, + {queue, <<>>} + ]} + ]}, + {queue, <<>>}] + }, + {destination, + [ {protocol, amqp091}, + {uris, ["amqp://"]}, + {declarations, [ {'exchange.declare', + [ {exchange, <<"my_direct">>}, + {type, <<"direct">>}, + durable + ]} + ]}, + {publish_properties, [ {delivery_mode, 2} ]}, + {add_forward_headers, true}, + {publish_fields, [ {exchange, <<"my_direct">>}, + {routing_key, <<"from_shovel">>} + ]} + ]}, + {ack_mode, on_confirm}, + {reconnect_delay, 5} + + ]}], + ok = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, setup_shovel, + [Shovel, Name]). + +setup_shovel(ShovelConfig, Name) -> + _ = application:stop(rabbitmq_shovel), + application:set_env(rabbitmq_shovel, shovels, ShovelConfig, infinity), + ok = application:start(rabbitmq_shovel), + await_shovel(Name, static). + +clear_shovel() -> + _ = application:stop(rabbitmq_shovel), + application:unset_env(rabbitmq_shovel, shovels, infinity), + ok = application:start(rabbitmq_shovel). + +make_uri(Config, Node) -> + Hostname = ?config(rmq_hostname, Config), + Port = rabbit_ct_broker_helpers:get_node_config(Config, Node, tcp_port_amqp), + list_to_binary(lists:flatten(io_lib:format("amqp://~ts:~b", + [Hostname, Port]))). + +create_dynamic_shovel(Config, Name) -> + Node = 0, + QueueNode = 0, + Uri = make_uri(Config, QueueNode), + Value = [{<<"src-queue">>, <<"src">>}, + {<<"dest-queue">>, <<"dest">>}], + ok = rabbit_ct_broker_helpers:rpc( + Config, + Node, + rabbit_runtime_parameters, + set, [ + <<"/">>, <<"shovel">>, Name, [{<<"src-uri">>, Uri}, + {<<"dest-uri">>, [Uri]} | + Value], none]), + ok = rabbit_ct_broker_helpers:rpc(Config, 0, ?MODULE, await_shovel, + [Name, dynamic]). + +await_shovel(Name, Type) -> + Ret = await(fun() -> + Status = shovels_from_status(running, Type), + lists:member(Name, Status) + end, 30_000), + Ret. + +shovels_from_status(ExpectedState, dynamic) -> + S = rabbit_shovel_status:status(), + [N || {{<<"/">>, N}, dynamic, {State, _}, _} <- S, State == ExpectedState]; +shovels_from_status(ExpectedState, static) -> + S = rabbit_shovel_status:status(), + [N || {N, static, {State, _}, _} <- S, State == ExpectedState]. + +get_shovel_status(Config, Name) -> + get_shovel_status(Config, 0, Name). + +get_shovel_status(Config, Node, Name) -> + S = rabbit_ct_broker_helpers:rpc( + Config, Node, rabbit_shovel_status, lookup, [{<<"/">>, Name}]), + case S of + not_found -> + not_found; + _ -> + {Status, Info} = proplists:get_value(info, S), + proplists:get_value(blocked_status, Info, Status) + end. + +await(Pred) -> + case Pred() of + true -> ok; + false -> timer:sleep(100), + await(Pred) + end. + +await(_Pred, Timeout) when Timeout =< 0 -> + error(await_timeout); +await(Pred, Timeout) -> + case Pred() of + true -> ok; + Other when Timeout =< 100 -> + error({await_timeout, Other}); + _ -> timer:sleep(100), + await(Pred, Timeout - 100) + end. + +clear_param(Config, Name) -> + clear_param(Config, 0, Name). + +clear_param(Config, Node, Name) -> + rabbit_ct_broker_helpers:rpc(Config, Node, + rabbit_runtime_parameters, clear, [<<"/">>, <<"shovel">>, Name, <<"acting-user">>]). + +-define(PD_KEY, metric_families). +collect_mf(Registry, Collector) -> + put(?PD_KEY, []), + Collector:collect_mf(Registry, fun(MF) -> put(?PD_KEY, [MF | get(?PD_KEY)]) end), + MFs = lists:reverse(get(?PD_KEY)), + erase(?PD_KEY), + MFs. diff --git a/deps/rabbitmq_stomp/include/rabbit_stomp_headers.hrl b/deps/rabbitmq_stomp/include/rabbit_stomp_headers.hrl index 4338af7e0091..a0283dea2044 100644 --- a/deps/rabbitmq_stomp/include/rabbit_stomp_headers.hrl +++ b/deps/rabbitmq_stomp/include/rabbit_stomp_headers.hrl @@ -30,6 +30,7 @@ -define(HEADER_X_STREAM_FILTER, "x-stream-filter"). -define(HEADER_X_STREAM_MATCH_UNFILTERED, "x-stream-match-unfiltered"). -define(HEADER_PRIORITY, "priority"). +-define(HEADER_X_PRIORITY, "x-priority"). -define(HEADER_RECEIPT, "receipt"). -define(HEADER_REDELIVERED, "redelivered"). -define(HEADER_REPLY_TO, "reply-to"). diff --git a/deps/rabbitmq_stomp/src/rabbit_stomp_processor.erl b/deps/rabbitmq_stomp/src/rabbit_stomp_processor.erl index 7eab1bdcc6f8..50a1b68fabf8 100644 --- a/deps/rabbitmq_stomp/src/rabbit_stomp_processor.erl +++ b/deps/rabbitmq_stomp/src/rabbit_stomp_processor.erl @@ -718,7 +718,8 @@ do_subscribe(Destination, DestHdr, Frame, subscribe_arguments(Frame) -> subscribe_arguments([?HEADER_X_STREAM_OFFSET, ?HEADER_X_STREAM_FILTER, - ?HEADER_X_STREAM_MATCH_UNFILTERED], Frame, []). + ?HEADER_X_STREAM_MATCH_UNFILTERED, + ?HEADER_X_PRIORITY], Frame, []). subscribe_arguments([], _Frame , Acc) -> Acc; @@ -749,6 +750,14 @@ subscribe_argument(?HEADER_X_STREAM_MATCH_UNFILTERED, Frame, Acc) -> [{list_to_binary(?HEADER_X_STREAM_MATCH_UNFILTERED), bool, MU}] ++ Acc; not_found -> Acc + end; +subscribe_argument(?HEADER_X_PRIORITY, Frame, Acc) -> + Priority = rabbit_stomp_frame:integer_header(Frame, ?HEADER_X_PRIORITY), + case Priority of + {ok, P} -> + [{list_to_binary(?HEADER_X_PRIORITY), byte, P}] ++ Acc; + not_found -> + Acc end. check_subscription_access(Destination = {topic, _Topic}, diff --git a/deps/rabbitmq_stomp/src/rabbit_stomp_reader.erl b/deps/rabbitmq_stomp/src/rabbit_stomp_reader.erl index ccf7af95f24a..7bb9b8986bf6 100644 --- a/deps/rabbitmq_stomp/src/rabbit_stomp_reader.erl +++ b/deps/rabbitmq_stomp/src/rabbit_stomp_reader.erl @@ -63,55 +63,51 @@ close_connection(Pid, Reason) -> init([SupHelperPid, Ref, Configuration]) -> process_flag(trap_exit, true), - ProxyProtocolEnabled = application:get_env(rabbitmq_stomp, proxy_protocol, false), - case rabbit_networking:handshake(Ref, ProxyProtocolEnabled) of + {ok, Sock} = rabbit_networking:handshake(Ref, + application:get_env(rabbitmq_stomp, proxy_protocol, false)), + RealSocket = rabbit_net:unwrap_socket(Sock), + + case rabbit_net:connection_string(Sock, inbound) of + {ok, ConnStr} -> + ConnName = rabbit_data_coercion:to_binary(ConnStr), + ProcInitArgs = processor_args(Configuration, Sock), + ProcState = rabbit_stomp_processor:initial_state(Configuration, + ProcInitArgs), + + rabbit_log_connection:info("accepting STOMP connection ~tp (~ts)", + [self(), ConnName]), + + ParseState = rabbit_stomp_frame:initial_state(), + _ = register_resource_alarm(), + + LoginTimeout = application:get_env(rabbitmq_stomp, login_timeout, 10_000), + MaxFrameSize = application:get_env(rabbitmq_stomp, max_frame_size, ?DEFAULT_MAX_FRAME_SIZE), + erlang:send_after(LoginTimeout, self(), login_timeout), + + gen_server2:enter_loop(?MODULE, [], + rabbit_event:init_stats_timer( + run_socket(control_throttle( + #reader_state{socket = RealSocket, + conn_name = ConnName, + parse_state = ParseState, + processor_state = ProcState, + heartbeat_sup = SupHelperPid, + heartbeat = {none, none}, + max_frame_size = MaxFrameSize, + current_frame_size = 0, + state = running, + conserve_resources = false, + recv_outstanding = false})), #reader_state.stats_timer), + {backoff, 1000, 1000, 10000}); + {error, enotconn} -> + rabbit_net:fast_close(RealSocket), + terminate(shutdown, undefined); {error, Reason} -> - rabbit_log_connection:error( - "STOMP could not establish connection: ~s", [Reason]), - {stop, Reason}; - {ok, Sock} -> - RealSocket = rabbit_net:unwrap_socket(Sock), - case rabbit_net:connection_string(Sock, inbound) of - {ok, ConnStr} -> - ConnName = rabbit_data_coercion:to_binary(ConnStr), - ProcInitArgs = processor_args(Configuration, Sock), - ProcState = rabbit_stomp_processor:initial_state(Configuration, - ProcInitArgs), - - rabbit_log_connection:info("accepting STOMP connection ~tp (~ts)", - [self(), ConnName]), - - ParseState = rabbit_stomp_frame:initial_state(), - _ = register_resource_alarm(), - - LoginTimeout = application:get_env(rabbitmq_stomp, login_timeout, 10_000), - MaxFrameSize = application:get_env(rabbitmq_stomp, max_frame_size, ?DEFAULT_MAX_FRAME_SIZE), - erlang:send_after(LoginTimeout, self(), login_timeout), - - gen_server2:enter_loop(?MODULE, [], - rabbit_event:init_stats_timer( - run_socket(control_throttle( - #reader_state{socket = RealSocket, - conn_name = ConnName, - parse_state = ParseState, - processor_state = ProcState, - heartbeat_sup = SupHelperPid, - heartbeat = {none, none}, - max_frame_size = MaxFrameSize, - current_frame_size = 0, - state = running, - conserve_resources = false, - recv_outstanding = false})), #reader_state.stats_timer), - {backoff, 1000, 1000, 10000}); - {error, enotconn} -> - rabbit_net:fast_close(RealSocket), - terminate(shutdown, undefined); - {error, Reason} -> - rabbit_net:fast_close(RealSocket), - terminate({network_error, Reason}, undefined) - end + rabbit_net:fast_close(RealSocket), + terminate({network_error, Reason}, undefined) end. + handle_call({info, InfoItems}, _From, State) -> Infos = lists:map( fun(InfoItem) -> diff --git a/deps/rabbitmq_stomp/test/system_SUITE.erl b/deps/rabbitmq_stomp/test/system_SUITE.erl index caf6de6ddc93..c583f2102d1b 100644 --- a/deps/rabbitmq_stomp/test/system_SUITE.erl +++ b/deps/rabbitmq_stomp/test/system_SUITE.erl @@ -17,7 +17,9 @@ -include("rabbit_stomp_headers.hrl"). -define(QUEUE, <<"TestQueue">>). +-define(QUEUE_QQ, <<"TestQueueQQ">>). -define(DESTINATION, "/amq/queue/TestQueue"). +-define(DESTINATION_QQ, "/amq/queue/TestQueueQQ"). all() -> [{group, version_to_group_name(V)} || V <- ?SUPPORTED_VERSIONS]. @@ -28,6 +30,7 @@ groups() -> publish_unauthorized_error, subscribe_error, subscribe, + subscribe_with_x_priority, unsubscribe_ack, subscribe_ack, send, @@ -161,6 +164,44 @@ subscribe(Config) -> {ok, _Client2, _, [<<"hello">>]} = stomp_receive(Client1, "MESSAGE"), ok. +subscribe_with_x_priority(Config) -> + Version = ?config(version, Config), + StompPort = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_stomp), + Channel = ?config(amqp_channel, Config), + ClientA = ?config(stomp_client, Config), + #'queue.declare_ok'{} = + amqp_channel:call(Channel, #'queue.declare'{queue = ?QUEUE_QQ, + durable = true, + arguments = [{<<"x-queue-type">>, longstr, <<"quorum">>}, + {<<"x-single-active-consumer">>, bool, true} + ]}), + + %% subscribe and wait for receipt + rabbit_stomp_client:send( + ClientA, "SUBSCRIBE", [{"destination", ?DESTINATION_QQ}, {"receipt", "foo"}]), + {ok, _ClientA1, _, _} = stomp_receive(ClientA, "RECEIPT"), + + %% subscribe with a higher priority and wait for receipt + {ok, ClientB} = rabbit_stomp_client:connect(Version, StompPort), + rabbit_stomp_client:send( + ClientB, "SUBSCRIBE", [{"destination", ?DESTINATION_QQ}, + {"receipt", "foo"}, + {"x-priority", 10} + ]), + {ok, ClientB1, _, _} = stomp_receive(ClientB, "RECEIPT"), + + %% send from amqp + Method = #'basic.publish'{exchange = <<"">>, routing_key = ?QUEUE_QQ}, + + amqp_channel:call(Channel, Method, #amqp_msg{props = #'P_basic'{}, + payload = <<"hello">>}), + + %% ClientB should receive the message since it has a higher priority + {ok, _ClientB2, _, [<<"hello">>]} = stomp_receive(ClientB1, "MESSAGE"), + #'queue.delete_ok'{} = + amqp_channel:call(Channel, #'queue.delete'{queue = ?QUEUE_QQ}), + ok. + unsubscribe_ack(Config) -> Channel = ?config(amqp_channel, Config), Client = ?config(stomp_client, Config), diff --git a/deps/rabbitmq_stream/src/rabbit_stream_metrics_gc.erl b/deps/rabbitmq_stream/src/rabbit_stream_metrics_gc.erl index e36d735f4a59..2d4dc7f2e85e 100644 --- a/deps/rabbitmq_stream/src/rabbit_stream_metrics_gc.erl +++ b/deps/rabbitmq_stream/src/rabbit_stream_metrics_gc.erl @@ -32,7 +32,7 @@ -spec start_link() -> rabbit_types:ok_pid_or_error(). start_link() -> - gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). + gen_server:start_link({local, ?MODULE}, ?MODULE, [], [{hibernate_after, 0}]). init(_) -> Interval = diff --git a/deps/rabbitmq_stream/src/rabbit_stream_reader.erl b/deps/rabbitmq_stream/src/rabbit_stream_reader.erl index d736b35212fd..ffada5519745 100644 --- a/deps/rabbitmq_stream/src/rabbit_stream_reader.erl +++ b/deps/rabbitmq_stream/src/rabbit_stream_reader.erl @@ -136,13 +136,10 @@ init([KeepaliveSup, heartbeat := Heartbeat, transport := ConnTransport}]) -> process_flag(trap_exit, true), - ProxyProtocolEnabled = - application:get_env(rabbitmq_stream, proxy_protocol, false), - %% Note: - %% This function could return an error if the handshake times out. - %% It is less likely to happen here as compared to MQTT, so - %% crashing with a `badmatch` seems appropriate. - {ok, Sock} = rabbit_networking:handshake(Ref, ProxyProtocolEnabled), + {ok, Sock} = + rabbit_networking:handshake(Ref, + application:get_env(rabbitmq_stream, + proxy_protocol, false)), RealSocket = rabbit_net:unwrap_socket(Sock), case rabbit_net:connection_string(Sock, inbound) of {ok, ConnStr} -> diff --git a/deps/rabbitmq_stream/test/rabbit_stream_SUITE.erl b/deps/rabbitmq_stream/test/rabbit_stream_SUITE.erl index 7152396aa49a..06792b4e739d 100644 --- a/deps/rabbitmq_stream/test/rabbit_stream_SUITE.erl +++ b/deps/rabbitmq_stream/test/rabbit_stream_SUITE.erl @@ -63,7 +63,8 @@ groups() -> offset_lag_calculation, test_super_stream_duplicate_partitions, authentication_error_should_close_with_delay, - unauthorized_vhost_access_should_close_with_delay + unauthorized_vhost_access_should_close_with_delay, + sasl_anonymous ]}, %% Run `test_global_counters` on its own so the global metrics are %% initialised to 0 for each testcase @@ -249,6 +250,16 @@ test_stream(Config) -> test_server(gen_tcp, Stream, Config), ok. +sasl_anonymous(Config) -> + Port = get_port(gen_tcp, Config), + Opts = get_opts(gen_tcp), + {ok, S} = gen_tcp:connect("localhost", Port, Opts), + C0 = rabbit_stream_core:init(0), + C1 = test_peer_properties(gen_tcp, S, C0), + C2 = sasl_handshake(gen_tcp, S, C1), + C3 = test_anonymous_sasl_authenticate(gen_tcp, S, C2), + _C = tune(gen_tcp, S, C3). + test_update_secret(Config) -> Transport = gen_tcp, {S, C0} = connect_and_authenticate(Transport, Config), @@ -1150,17 +1161,20 @@ test_authenticate(Transport, S, C0, Username, Password) -> sasl_handshake(Transport, S, C0) -> SaslHandshakeFrame = request(sasl_handshake), ok = Transport:send(S, SaslHandshakeFrame), - Plain = <<"PLAIN">>, - AmqPlain = <<"AMQPLAIN">>, {Cmd, C1} = receive_commands(Transport, S, C0), case Cmd of {response, _, {sasl_handshake, ?RESPONSE_CODE_OK, Mechanisms}} -> - ?assertEqual([AmqPlain, Plain], lists:sort(Mechanisms)); + ?assertEqual([<<"AMQPLAIN">>, <<"ANONYMOUS">>, <<"PLAIN">>], + lists:sort(Mechanisms)); _ -> ct:fail("invalid cmd ~tp", [Cmd]) end, C1. +test_anonymous_sasl_authenticate(Transport, S, C) -> + Res = sasl_authenticate(Transport, S, C, <<"ANONYMOUS">>, <<>>), + expect_successful_authentication(Res). + test_plain_sasl_authenticate(Transport, S, C1, Username) -> test_plain_sasl_authenticate(Transport, S, C1, Username, Username). @@ -1175,6 +1189,7 @@ expect_successful_authentication({SaslAuth, C2} = _SaslReponse) -> ?assertEqual({response, 2, {sasl_authenticate, ?RESPONSE_CODE_OK}}, SaslAuth), C2. + expect_unsuccessful_authentication({SaslAuth, C2} = _SaslReponse, ExpectedError) -> ?assertEqual({response, 2, {sasl_authenticate, ExpectedError}}, SaslAuth), diff --git a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/pom.xml b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/pom.xml index 9ad65e76e692..8b2eb333c783 100644 --- a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/pom.xml +++ b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/pom.xml @@ -27,11 +27,11 @@ [0.12.0-SNAPSHOT,) - 5.10.3 + 5.11.1 3.26.3 1.2.13 3.12.1 - 3.3.1 + 3.5.0 2.43.0 1.17.0 UTF-8 diff --git a/deps/rabbitmq_stream_common/Makefile b/deps/rabbitmq_stream_common/Makefile index 914a868f1c7c..a6b7c71ae117 100644 --- a/deps/rabbitmq_stream_common/Makefile +++ b/deps/rabbitmq_stream_common/Makefile @@ -7,13 +7,12 @@ define PROJECT_ENV endef -DEPS = +DEPS = osiris TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers -PLT_APPS = osiris - DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk -DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk +# We do not depend on rabbit therefore can't run the broker. +DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk include ../../rabbitmq-components.mk include ../../erlang.mk diff --git a/deps/rabbitmq_stream_management/priv/www/js/tmpl/streamConnection.ejs b/deps/rabbitmq_stream_management/priv/www/js/tmpl/streamConnection.ejs index 571293bf4837..1a5f873dc3e0 100644 --- a/deps/rabbitmq_stream_management/priv/www/js/tmpl/streamConnection.ejs +++ b/deps/rabbitmq_stream_management/priv/www/js/tmpl/streamConnection.ejs @@ -17,7 +17,7 @@ <% if (connection.client_properties.connection_name) { %> - Client-provided name + Client-provided connection name <%= fmt_string(connection.client_properties.connection_name) %> <% } %> diff --git a/deps/rabbitmq_stream_management/test/http_SUITE_data/pom.xml b/deps/rabbitmq_stream_management/test/http_SUITE_data/pom.xml index 5796f0c6f74c..b67c00419339 100644 --- a/deps/rabbitmq_stream_management/test/http_SUITE_data/pom.xml +++ b/deps/rabbitmq_stream_management/test/http_SUITE_data/pom.xml @@ -27,11 +27,11 @@ [0.12.0-SNAPSHOT,) - 5.10.3 + 5.11.1 3.26.3 1.2.13 3.12.1 - 3.3.1 + 3.5.0 2.43.0 1.18.1 4.12.0 diff --git a/deps/rabbitmq_trust_store/Makefile b/deps/rabbitmq_trust_store/Makefile index 77440b74080d..58b73990da58 100644 --- a/deps/rabbitmq_trust_store/Makefile +++ b/deps/rabbitmq_trust_store/Makefile @@ -10,7 +10,7 @@ define PROJECT_ENV endef DEPS = rabbit_common rabbit -LOCAL_DEPS += ssl crypto public_key inets +LOCAL_DEPS = ssl crypto public_key inets ## We need the Cowboy's test utilities TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers amqp_client ct_helper trust_store_http dep_ct_helper = git https://github.com/extend/ct_helper.git master diff --git a/deps/rabbitmq_trust_store/priv/schema/rabbitmq_trust_store.schema b/deps/rabbitmq_trust_store/priv/schema/rabbitmq_trust_store.schema index d3d33c251ccc..d9cc4a2afa51 100644 --- a/deps/rabbitmq_trust_store/priv/schema/rabbitmq_trust_store.schema +++ b/deps/rabbitmq_trust_store/priv/schema/rabbitmq_trust_store.schema @@ -124,7 +124,7 @@ end}. [{datatype, {enum, [true, false]}}]}. {mapping, "trust_store.ssl_options.password", "rabbitmq_trust_store.ssl_options.password", - [{datatype, string}]}. + [{datatype, [tagged_binary, binary]}]}. {mapping, "trust_store.ssl_options.psk_identity", "rabbitmq_trust_store.ssl_options.psk_identity", [{datatype, string}]}. diff --git a/deps/rabbitmq_trust_store/src/rabbit_trust_store_http_provider.erl b/deps/rabbitmq_trust_store/src/rabbit_trust_store_http_provider.erl index 5e0aee535451..a5f0e59dbaf8 100644 --- a/deps/rabbitmq_trust_store/src/rabbit_trust_store_http_provider.erl +++ b/deps/rabbitmq_trust_store/src/rabbit_trust_store_http_provider.erl @@ -65,8 +65,8 @@ join_url(BaseUrl, CertPath) -> string:strip(rabbit_data_coercion:to_list(CertPath), left, $/). init(Config) -> - inets:start(httpc, [{profile, ?PROFILE}]), - _ = application:ensure_all_started(ssl), + _ = inets:start(httpc, [{profile, ?PROFILE}]), + {ok, _} = application:ensure_all_started(ssl), Options = proplists:get_value(proxy_options, Config, []), httpc:set_options(Options, ?PROFILE). diff --git a/deps/rabbitmq_trust_store/test/config_schema_SUITE_data/rabbitmq_trust_store.snippets b/deps/rabbitmq_trust_store/test/config_schema_SUITE_data/rabbitmq_trust_store.snippets index d45f48ecef45..b8d7f0457e3d 100644 --- a/deps/rabbitmq_trust_store/test/config_schema_SUITE_data/rabbitmq_trust_store.snippets +++ b/deps/rabbitmq_trust_store/test/config_schema_SUITE_data/rabbitmq_trust_store.snippets @@ -24,5 +24,5 @@ {url,"https://example.com"}, {ssl_options, [{certfile,"test/config_schema_SUITE_data/certs/cert.pem"}, - {password,"i_am_password"}]}]}], + {password,<<"i_am_password">>}]}]}], [rabbitmq_trust_store]}]. diff --git a/deps/rabbitmq_web_mqtt/BUILD.bazel b/deps/rabbitmq_web_mqtt/BUILD.bazel index f9561e14ffaf..49b62e9f1aa8 100644 --- a/deps/rabbitmq_web_mqtt/BUILD.bazel +++ b/deps/rabbitmq_web_mqtt/BUILD.bazel @@ -103,11 +103,11 @@ eunit( broker_for_integration_suites() rabbitmq_integration_suite( - name = "config_schema_SUITE", + name = "web_mqtt_config_schema_SUITE", ) rabbitmq_integration_suite( - name = "command_SUITE", + name = "web_mqtt_command_SUITE", additional_beam = [ "test/rabbit_web_mqtt_test_util.beam", ], @@ -117,7 +117,7 @@ rabbitmq_integration_suite( ) rabbitmq_integration_suite( - name = "proxy_protocol_SUITE", + name = "web_mqtt_proxy_protocol_SUITE", additional_beam = [ "test/src/rabbit_ws_test_util.beam", "test/src/rfc6455_client.beam", @@ -125,7 +125,23 @@ rabbitmq_integration_suite( ) rabbitmq_integration_suite( - name = "system_SUITE", + name = "web_mqtt_shared_SUITE", + additional_beam = [ + "test/src/rabbit_ws_test_util.beam", + "test/src/rfc6455_client.beam", + ], +) + +rabbitmq_integration_suite( + name = "web_mqtt_system_SUITE", + additional_beam = [ + "test/src/rabbit_ws_test_util.beam", + "test/src/rfc6455_client.beam", + ], +) + +rabbitmq_integration_suite( + name = "web_mqtt_v5_SUITE", additional_beam = [ "test/src/rabbit_ws_test_util.beam", "test/src/rfc6455_client.beam", diff --git a/deps/rabbitmq_web_mqtt/Makefile b/deps/rabbitmq_web_mqtt/Makefile index 9919e7cb82cd..812d467f1911 100644 --- a/deps/rabbitmq_web_mqtt/Makefile +++ b/deps/rabbitmq_web_mqtt/Makefile @@ -19,7 +19,7 @@ export BUILD_WITHOUT_QUIC LOCAL_DEPS = ssl DEPS = rabbit_common rabbit cowboy rabbitmq_mqtt -TEST_DEPS = emqtt rabbitmq_ct_helpers rabbitmq_ct_client_helpers rabbitmq_management +TEST_DEPS = emqtt rabbitmq_ct_helpers rabbitmq_ct_client_helpers rabbitmq_management rabbitmq_stomp rabbitmq_consistent_hash_exchange PLT_APPS += rabbitmqctl elixir cowlib @@ -34,3 +34,9 @@ DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk include ../../rabbitmq-components.mk include ../../erlang.mk + +# We are using mqtt_shared_SUITE from rabbitmq_mqtt. +CT_OPTS += -pa ../rabbitmq_mqtt/test/ + +test-build:: + $(verbose) $(MAKE) -C ../rabbitmq_mqtt test-dir diff --git a/deps/rabbitmq_web_mqtt/app.bzl b/deps/rabbitmq_web_mqtt/app.bzl index 17ab4ecacb84..d7a5de02fdde 100644 --- a/deps/rabbitmq_web_mqtt/app.bzl +++ b/deps/rabbitmq_web_mqtt/app.bzl @@ -93,60 +93,76 @@ def all_srcs(name = "all_srcs"): def test_suite_beam_files(name = "test_suite_beam_files"): erlang_bytecode( - name = "config_schema_SUITE_beam_files", + name = "test_src_rabbit_ws_test_util_beam", testonly = True, - srcs = ["test/config_schema_SUITE.erl"], - outs = ["test/config_schema_SUITE.beam"], + srcs = ["test/src/rabbit_ws_test_util.erl"], + outs = ["test/src/rabbit_ws_test_util.beam"], app_name = "rabbitmq_web_mqtt", erlc_opts = "//:test_erlc_opts", ) erlang_bytecode( - name = "proxy_protocol_SUITE_beam_files", + name = "test_src_rfc6455_client_beam", testonly = True, - srcs = ["test/proxy_protocol_SUITE.erl"], - outs = ["test/proxy_protocol_SUITE.beam"], + srcs = ["test/src/rfc6455_client.erl"], + outs = ["test/src/rfc6455_client.beam"], app_name = "rabbitmq_web_mqtt", erlc_opts = "//:test_erlc_opts", ) + erlang_bytecode( - name = "system_SUITE_beam_files", + name = "test_rabbit_web_mqtt_test_util_beam", testonly = True, - srcs = ["test/system_SUITE.erl"], - outs = ["test/system_SUITE.beam"], + srcs = ["test/rabbit_web_mqtt_test_util.erl"], + outs = ["test/rabbit_web_mqtt_test_util.beam"], app_name = "rabbitmq_web_mqtt", erlc_opts = "//:test_erlc_opts", ) - erlang_bytecode( - name = "test_src_rabbit_ws_test_util_beam", + name = "web_mqtt_command_SUITE_beam_files", testonly = True, - srcs = ["test/src/rabbit_ws_test_util.erl"], - outs = ["test/src/rabbit_ws_test_util.beam"], + srcs = ["test/web_mqtt_command_SUITE.erl"], + outs = ["test/web_mqtt_command_SUITE.beam"], app_name = "rabbitmq_web_mqtt", erlc_opts = "//:test_erlc_opts", + deps = ["//deps/amqp_client:erlang_app", "//deps/rabbitmq_mqtt:erlang_app"], ) erlang_bytecode( - name = "test_src_rfc6455_client_beam", + name = "web_mqtt_config_schema_SUITE_beam_files", testonly = True, - srcs = ["test/src/rfc6455_client.erl"], - outs = ["test/src/rfc6455_client.beam"], + srcs = ["test/web_mqtt_config_schema_SUITE.erl"], + outs = ["test/web_mqtt_config_schema_SUITE.beam"], app_name = "rabbitmq_web_mqtt", erlc_opts = "//:test_erlc_opts", ) erlang_bytecode( - name = "command_SUITE_beam_files", + name = "web_mqtt_proxy_protocol_SUITE_beam_files", testonly = True, - srcs = ["test/command_SUITE.erl"], - outs = ["test/command_SUITE.beam"], + srcs = ["test/web_mqtt_proxy_protocol_SUITE.erl"], + outs = ["test/web_mqtt_proxy_protocol_SUITE.beam"], app_name = "rabbitmq_web_mqtt", erlc_opts = "//:test_erlc_opts", - deps = ["//deps/amqp_client:erlang_app", "//deps/rabbitmq_mqtt:erlang_app"], ) erlang_bytecode( - name = "test_rabbit_web_mqtt_test_util_beam", + name = "web_mqtt_shared_SUITE_beam_files", testonly = True, - srcs = ["test/rabbit_web_mqtt_test_util.erl"], - outs = ["test/rabbit_web_mqtt_test_util.beam"], + srcs = ["test/web_mqtt_shared_SUITE.erl"], + outs = ["test/web_mqtt_shared_SUITE.beam"], + app_name = "rabbitmq_web_mqtt", + erlc_opts = "//:test_erlc_opts", + ) + erlang_bytecode( + name = "web_mqtt_system_SUITE_beam_files", + testonly = True, + srcs = ["test/web_mqtt_system_SUITE.erl"], + outs = ["test/web_mqtt_system_SUITE.beam"], + app_name = "rabbitmq_web_mqtt", + erlc_opts = "//:test_erlc_opts", + ) + erlang_bytecode( + name = "web_mqtt_v5_SUITE_beam_files", + testonly = True, + srcs = ["test/web_mqtt_v5_SUITE.erl"], + outs = ["test/web_mqtt_v5_SUITE.beam"], app_name = "rabbitmq_web_mqtt", erlc_opts = "//:test_erlc_opts", ) diff --git a/deps/rabbitmq_web_mqtt/priv/schema/rabbitmq_web_mqtt.schema b/deps/rabbitmq_web_mqtt/priv/schema/rabbitmq_web_mqtt.schema index 91d6b1878239..e4afd579d4b7 100644 --- a/deps/rabbitmq_web_mqtt/priv/schema/rabbitmq_web_mqtt.schema +++ b/deps/rabbitmq_web_mqtt/priv/schema/rabbitmq_web_mqtt.schema @@ -56,7 +56,7 @@ {mapping, "web_mqtt.ssl.cacertfile", "rabbitmq_web_mqtt.ssl_config.cacertfile", [{datatype, string}, {validators, ["file_accessible"]}]}. {mapping, "web_mqtt.ssl.password", "rabbitmq_web_mqtt.ssl_config.password", - [{datatype, string}]}. + [{datatype, [tagged_binary, binary]}]}. {translation, "rabbitmq_web_mqtt.ssl_config", diff --git a/deps/rabbitmq_web_mqtt/src/rabbit_web_mqtt_handler.erl b/deps/rabbitmq_web_mqtt/src/rabbit_web_mqtt_handler.erl index 176a29e86842..67e99400b500 100644 --- a/deps/rabbitmq_web_mqtt/src/rabbit_web_mqtt_handler.erl +++ b/deps/rabbitmq_web_mqtt/src/rabbit_web_mqtt_handler.erl @@ -176,8 +176,9 @@ websocket_info({'$gen_cast', {duplicate_id, SendWill}}, rabbit_mqtt_processor:send_disconnect(?RC_SESSION_TAKEN_OVER, ProcState), defer_close(?CLOSE_NORMAL, SendWill), {[], State}; -websocket_info({'$gen_cast', {close_connection, Reason}}, State = #state{proc_state = ProcState, - conn_name = ConnName}) -> +websocket_info({'$gen_cast', {close_connection, Reason}}, + State = #state{proc_state = ProcState, + conn_name = ConnName}) -> ?LOG_WARNING("Web MQTT disconnecting client with ID '~s' (~p), reason: ~s", [rabbit_mqtt_processor:info(client_id, ProcState), ConnName, Reason]), case Reason of @@ -215,6 +216,14 @@ websocket_info({keepalive, Req}, State = #state{proc_state = ProcState, [ConnName, Reason]), stop(State) end; +websocket_info(credential_expired, + State = #state{proc_state = ProcState, + conn_name = ConnName}) -> + ?LOG_WARNING("Web MQTT disconnecting client with ID '~s' (~p) because credential expired", + [rabbit_mqtt_processor:info(client_id, ProcState), ConnName]), + rabbit_mqtt_processor:send_disconnect(?RC_MAXIMUM_CONNECT_TIME, ProcState), + defer_close(?CLOSE_NORMAL), + {[], State}; websocket_info(emit_stats, State) -> {[], emit_stats(State), hibernate}; websocket_info({{'DOWN', _QName}, _MRef, process, _Pid, _Reason} = Evt, diff --git a/deps/rabbitmq_web_mqtt/test/command_SUITE.erl b/deps/rabbitmq_web_mqtt/test/web_mqtt_command_SUITE.erl similarity index 93% rename from deps/rabbitmq_web_mqtt/test/command_SUITE.erl rename to deps/rabbitmq_web_mqtt/test/web_mqtt_command_SUITE.erl index c526d8c4f217..04d50f7fb582 100644 --- a/deps/rabbitmq_web_mqtt/test/command_SUITE.erl +++ b/deps/rabbitmq_web_mqtt/test/web_mqtt_command_SUITE.erl @@ -5,7 +5,7 @@ %% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. --module(command_SUITE). +-module(web_mqtt_command_SUITE). -compile([export_all, nowarn_export_all]). -include_lib("eunit/include/eunit.hrl"). @@ -16,6 +16,7 @@ [connect/3, connect/4]). -define(COMMAND, 'Elixir.RabbitMQ.CLI.Ctl.Commands.ListWebMqttConnectionsCommand'). +-define(MQTT_COMMAND, 'Elixir.RabbitMQ.CLI.Ctl.Commands.ListMqttConnectionsCommand'). all() -> [ @@ -93,12 +94,16 @@ run(BaseConfig) -> [] = 'Elixir.Enum':to_list(?COMMAND:run([<<"client_id">>], Opts)), %% Open a WebMQTT connection - C2 = connect(<<"simpleWebMqttClient">>, Config, [{ack_timeout, 1}]), timer:sleep(200), + %% WebMQTT CLI should list only WebMQTT connection. [[{client_id, <<"simpleWebMqttClient">>}]] = - 'Elixir.Enum':to_list(?COMMAND:run([<<"client_id">>], Opts)), + 'Elixir.Enum':to_list(?COMMAND:run([<<"client_id">>], Opts)), + + %% MQTT CLI should list only MQTT connection. + [[{client_id, <<"simpleMqttClient">>}]] = + 'Elixir.Enum':to_list(?MQTT_COMMAND:run([<<"client_id">>], Opts)), C3 = connect(<<"simpleWebMqttClient1">>, Config, [{ack_timeout, 1}]), timer:sleep(200), diff --git a/deps/rabbitmq_web_mqtt/test/config_schema_SUITE.erl b/deps/rabbitmq_web_mqtt/test/web_mqtt_config_schema_SUITE.erl similarity index 97% rename from deps/rabbitmq_web_mqtt/test/config_schema_SUITE.erl rename to deps/rabbitmq_web_mqtt/test/web_mqtt_config_schema_SUITE.erl index 694d7ea5a25a..7b280eccfc1b 100644 --- a/deps/rabbitmq_web_mqtt/test/config_schema_SUITE.erl +++ b/deps/rabbitmq_web_mqtt/test/web_mqtt_config_schema_SUITE.erl @@ -5,7 +5,7 @@ %% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% --module(config_schema_SUITE). +-module(web_mqtt_config_schema_SUITE). -compile(export_all). diff --git a/deps/rabbitmq_web_mqtt/test/web_mqtt_config_schema_SUITE_data/certs/cacert.pem b/deps/rabbitmq_web_mqtt/test/web_mqtt_config_schema_SUITE_data/certs/cacert.pem new file mode 100644 index 000000000000..eaf6b67806ce --- /dev/null +++ b/deps/rabbitmq_web_mqtt/test/web_mqtt_config_schema_SUITE_data/certs/cacert.pem @@ -0,0 +1 @@ +I'm not a certificate diff --git a/deps/rabbitmq_web_mqtt/test/web_mqtt_config_schema_SUITE_data/certs/cert.pem b/deps/rabbitmq_web_mqtt/test/web_mqtt_config_schema_SUITE_data/certs/cert.pem new file mode 100644 index 000000000000..eaf6b67806ce --- /dev/null +++ b/deps/rabbitmq_web_mqtt/test/web_mqtt_config_schema_SUITE_data/certs/cert.pem @@ -0,0 +1 @@ +I'm not a certificate diff --git a/deps/rabbitmq_web_mqtt/test/web_mqtt_config_schema_SUITE_data/certs/key.pem b/deps/rabbitmq_web_mqtt/test/web_mqtt_config_schema_SUITE_data/certs/key.pem new file mode 100644 index 000000000000..eaf6b67806ce --- /dev/null +++ b/deps/rabbitmq_web_mqtt/test/web_mqtt_config_schema_SUITE_data/certs/key.pem @@ -0,0 +1 @@ +I'm not a certificate diff --git a/deps/rabbitmq_web_mqtt/test/config_schema_SUITE_data/rabbitmq_web_mqtt.snippets b/deps/rabbitmq_web_mqtt/test/web_mqtt_config_schema_SUITE_data/rabbitmq_web_mqtt.snippets similarity index 78% rename from deps/rabbitmq_web_mqtt/test/config_schema_SUITE_data/rabbitmq_web_mqtt.snippets rename to deps/rabbitmq_web_mqtt/test/web_mqtt_config_schema_SUITE_data/rabbitmq_web_mqtt.snippets index f8ef2916f6ef..4d592eee3124 100644 --- a/deps/rabbitmq_web_mqtt/test/config_schema_SUITE_data/rabbitmq_web_mqtt.snippets +++ b/deps/rabbitmq_web_mqtt/test/web_mqtt_config_schema_SUITE_data/rabbitmq_web_mqtt.snippets @@ -73,28 +73,28 @@ {ssl_with_listener, "web_mqtt.ssl.listener = 127.0.0.2:15671 web_mqtt.ssl.backlog = 1024 - web_mqtt.ssl.certfile = test/config_schema_SUITE_data/certs/cert.pem - web_mqtt.ssl.keyfile = test/config_schema_SUITE_data/certs/key.pem - web_mqtt.ssl.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem + web_mqtt.ssl.certfile = test/web_mqtt_config_schema_SUITE_data/certs/cert.pem + web_mqtt.ssl.keyfile = test/web_mqtt_config_schema_SUITE_data/certs/key.pem + web_mqtt.ssl.cacertfile = test/web_mqtt_config_schema_SUITE_data/certs/cacert.pem web_mqtt.ssl.password = changeme", [{rabbitmq_web_mqtt, [{ssl_config, [{ip,"127.0.0.2"}, {port,15671}, {backlog,1024}, - {certfile,"test/config_schema_SUITE_data/certs/cert.pem"}, - {keyfile,"test/config_schema_SUITE_data/certs/key.pem"}, - {cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"}, - {password,"changeme"}]}]}], + {certfile,"test/web_mqtt_config_schema_SUITE_data/certs/cert.pem"}, + {keyfile,"test/web_mqtt_config_schema_SUITE_data/certs/key.pem"}, + {cacertfile,"test/web_mqtt_config_schema_SUITE_data/certs/cacert.pem"}, + {password,<<"changeme">>}]}]}], [rabbitmq_web_mqtt]}, {ssl, "web_mqtt.ssl.ip = 127.0.0.2 web_mqtt.ssl.port = 15671 web_mqtt.ssl.backlog = 1024 - web_mqtt.ssl.certfile = test/config_schema_SUITE_data/certs/cert.pem - web_mqtt.ssl.keyfile = test/config_schema_SUITE_data/certs/key.pem - web_mqtt.ssl.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem + web_mqtt.ssl.certfile = test/web_mqtt_config_schema_SUITE_data/certs/cert.pem + web_mqtt.ssl.keyfile = test/web_mqtt_config_schema_SUITE_data/certs/key.pem + web_mqtt.ssl.cacertfile = test/web_mqtt_config_schema_SUITE_data/certs/cacert.pem web_mqtt.ssl.password = changeme web_mqtt.ssl.versions.tls1_2 = tlsv1.2 @@ -105,10 +105,10 @@ {ip,"127.0.0.2"}, {port,15671}, {backlog,1024}, - {certfile,"test/config_schema_SUITE_data/certs/cert.pem"}, - {keyfile,"test/config_schema_SUITE_data/certs/key.pem"}, - {cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"}, - {password,"changeme"}, + {certfile,"test/web_mqtt_config_schema_SUITE_data/certs/cert.pem"}, + {keyfile,"test/web_mqtt_config_schema_SUITE_data/certs/key.pem"}, + {cacertfile,"test/web_mqtt_config_schema_SUITE_data/certs/cacert.pem"}, + {password,<<"changeme">>}, {versions,['tlsv1.2','tlsv1.1']} ]}]}], @@ -117,9 +117,9 @@ {ssl_ciphers, "web_mqtt.ssl.port = 15671 web_mqtt.ssl.backlog = 1024 - web_mqtt.ssl.certfile = test/config_schema_SUITE_data/certs/cert.pem - web_mqtt.ssl.keyfile = test/config_schema_SUITE_data/certs/key.pem - web_mqtt.ssl.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem + web_mqtt.ssl.certfile = test/web_mqtt_config_schema_SUITE_data/certs/cert.pem + web_mqtt.ssl.keyfile = test/web_mqtt_config_schema_SUITE_data/certs/key.pem + web_mqtt.ssl.cacertfile = test/web_mqtt_config_schema_SUITE_data/certs/cacert.pem web_mqtt.ssl.password = changeme web_mqtt.ssl.honor_cipher_order = true @@ -142,10 +142,10 @@ [{ssl_config, [{port,15671}, {backlog,1024}, - {certfile,"test/config_schema_SUITE_data/certs/cert.pem"}, - {keyfile,"test/config_schema_SUITE_data/certs/key.pem"}, - {cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"}, - {password,"changeme"}, + {certfile,"test/web_mqtt_config_schema_SUITE_data/certs/cert.pem"}, + {keyfile,"test/web_mqtt_config_schema_SUITE_data/certs/key.pem"}, + {cacertfile,"test/web_mqtt_config_schema_SUITE_data/certs/cacert.pem"}, + {password,<<"changeme">>}, {honor_cipher_order, true}, {honor_ecc_order, true}, diff --git a/deps/rabbitmq_web_mqtt/test/proxy_protocol_SUITE.erl b/deps/rabbitmq_web_mqtt/test/web_mqtt_proxy_protocol_SUITE.erl similarity index 99% rename from deps/rabbitmq_web_mqtt/test/proxy_protocol_SUITE.erl rename to deps/rabbitmq_web_mqtt/test/web_mqtt_proxy_protocol_SUITE.erl index d13426342d30..7f9e9adb2f8d 100644 --- a/deps/rabbitmq_web_mqtt/test/proxy_protocol_SUITE.erl +++ b/deps/rabbitmq_web_mqtt/test/web_mqtt_proxy_protocol_SUITE.erl @@ -5,7 +5,7 @@ %% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% --module(proxy_protocol_SUITE). +-module(web_mqtt_proxy_protocol_SUITE). -compile([export_all, nowarn_export_all]). diff --git a/deps/rabbitmq_web_mqtt/test/web_mqtt_shared_SUITE.erl b/deps/rabbitmq_web_mqtt/test/web_mqtt_shared_SUITE.erl new file mode 100644 index 000000000000..f3818b34ee06 --- /dev/null +++ b/deps/rabbitmq_web_mqtt/test/web_mqtt_shared_SUITE.erl @@ -0,0 +1,101 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +%% This test suite uses test cases shared by rabbitmq_mqtt. +-module(web_mqtt_shared_SUITE). +-compile([export_all, + nowarn_export_all]). + +all() -> + mqtt_shared_SUITE:all(). + +groups() -> + mqtt_shared_SUITE:groups(). + +suite() -> + mqtt_shared_SUITE:suite(). + +init_per_suite(Config) -> + mqtt_shared_SUITE:init_per_suite(Config). + +end_per_suite(Config) -> + mqtt_shared_SUITE:end_per_suite(Config). + +init_per_group(mqtt, Config) -> + %% This is the main difference with rabbitmq_mqtt. + rabbit_ct_helpers:set_config(Config, {websocket, true}); +init_per_group(Group, Config) -> + mqtt_shared_SUITE:init_per_group(Group, Config). + +end_per_group(Group, Config) -> + mqtt_shared_SUITE:end_per_group(Group, Config). + +init_per_testcase(Testcase, Config) -> + mqtt_shared_SUITE:init_per_testcase(Testcase, Config). + +end_per_testcase(Testcase, Config) -> + mqtt_shared_SUITE:end_per_testcase(Testcase, Config). + +global_counters(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +message_size_metrics(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +block_only_publisher(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +many_qos1_messages(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +session_expiry(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +cli_close_all_connections(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +cli_close_all_user_connections(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +management_plugin_connection(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +management_plugin_enable(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +disconnect(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +pubsub_shared_connection(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +pubsub_separate_connections(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +will_with_disconnect(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +will_without_disconnect(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +decode_basic_properties(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +quorum_queue_rejects(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +events(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +internal_event_handler(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +non_clean_sess_reconnect_qos1(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +non_clean_sess_reconnect_qos0(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +non_clean_sess_reconnect_qos0_and_qos1(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +non_clean_sess_empty_client_id(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +subscribe_same_topic_same_qos(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +subscribe_same_topic_different_qos(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +subscribe_multiple(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +large_message_mqtt_to_mqtt(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +large_message_amqp_to_mqtt(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +keepalive(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +keepalive_turned_off(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +block(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +amqp_to_mqtt_qos0(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +clean_session_disconnect_client(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +clean_session_node_restart(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +clean_session_node_kill(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +rabbit_status_connection_count(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +trace(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +trace_large_message(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +max_packet_size_unauthenticated(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +max_packet_size_authenticated(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +default_queue_type(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +incoming_message_interceptors(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +utf8(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +retained_message_conversion(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +bind_exchange_to_exchange(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +bind_exchange_to_exchange_single_message(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +pubsub(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +queue_down_qos1(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +consuming_classic_queue_down(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +flow_quorum_queue(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +flow_stream(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +rabbit_mqtt_qos0_queue(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +rabbit_mqtt_qos0_queue_kill_node(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +cli_list_queues(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +delete_create_queue(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +session_reconnect(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +session_takeover(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +duplicate_client_id(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +publish_to_all_queue_types_qos0(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +publish_to_all_queue_types_qos1(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +maintenance(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). diff --git a/deps/rabbitmq_web_mqtt/test/system_SUITE.erl b/deps/rabbitmq_web_mqtt/test/web_mqtt_system_SUITE.erl similarity index 99% rename from deps/rabbitmq_web_mqtt/test/system_SUITE.erl rename to deps/rabbitmq_web_mqtt/test/web_mqtt_system_SUITE.erl index 35af6e923d28..3b01af7f1e06 100644 --- a/deps/rabbitmq_web_mqtt/test/system_SUITE.erl +++ b/deps/rabbitmq_web_mqtt/test/web_mqtt_system_SUITE.erl @@ -5,7 +5,7 @@ %% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% --module(system_SUITE). +-module(web_mqtt_system_SUITE). -include_lib("eunit/include/eunit.hrl"). diff --git a/deps/rabbitmq_web_mqtt/test/web_mqtt_v5_SUITE.erl b/deps/rabbitmq_web_mqtt/test/web_mqtt_v5_SUITE.erl new file mode 100644 index 000000000000..5012ddd4d0b8 --- /dev/null +++ b/deps/rabbitmq_web_mqtt/test/web_mqtt_v5_SUITE.erl @@ -0,0 +1,114 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +%% This test suite uses test cases shared by rabbitmq_mqtt. +-module(web_mqtt_v5_SUITE). +-compile([export_all, + nowarn_export_all]). + +all() -> + v5_SUITE:all(). + +groups() -> + v5_SUITE:groups(). + +suite() -> + v5_SUITE:suite(). + +init_per_suite(Config) -> + v5_SUITE:init_per_suite(Config). + +end_per_suite(Config) -> + v5_SUITE:end_per_suite(Config). + +init_per_group(mqtt, Config) -> + %% This is the main difference with rabbitmq_mqtt. + rabbit_ct_helpers:set_config(Config, {websocket, true}); +init_per_group(Group, Config) -> + v5_SUITE:init_per_group(Group, Config). + +end_per_group(Group, Config) -> + v5_SUITE:end_per_group(Group, Config). + +init_per_testcase(Testcase, Config) -> + v5_SUITE:init_per_testcase(Testcase, Config). + +end_per_testcase(Testcase, Config) -> + v5_SUITE:end_per_testcase(Testcase, Config). + +client_set_max_packet_size_publish(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +client_set_max_packet_size_connack(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +client_set_max_packet_size_invalid(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +message_expiry(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +message_expiry_will_message(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +message_expiry_retained_message(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +session_expiry_classic_queue_disconnect_decrease(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +session_expiry_quorum_queue_disconnect_decrease(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +session_expiry_disconnect_zero_to_non_zero(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +session_expiry_disconnect_non_zero_to_zero(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +session_expiry_disconnect_infinity_to_zero(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +session_expiry_disconnect_to_infinity(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +session_expiry_reconnect_non_zero(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +session_expiry_reconnect_zero(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +session_expiry_reconnect_infinity_to_zero(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +client_publish_qos2(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +client_rejects_publish(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +client_receive_maximum_min(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +client_receive_maximum_large(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +unsubscribe_success(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +unsubscribe_topic_not_found(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +subscription_option_no_local(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +subscription_option_no_local_wildcards(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +subscription_option_retain_as_published(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +subscription_option_retain_as_published_wildcards(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +subscription_option_retain_handling(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +subscription_identifier(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +subscription_identifier_amqp091(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +subscription_identifier_at_most_once_dead_letter(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +at_most_once_dead_letter_detect_cycle(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +subscription_options_persisted(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +subscription_options_modify(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +subscription_options_modify_qos1(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +subscription_options_modify_qos0(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +session_upgrade_v3_v5_qos1(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +session_upgrade_v3_v5_qos0(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +session_upgrade_v3_v5_amqp091_pub(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +compatibility_v3_v5(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +session_upgrade_v3_v5_unsubscribe(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +session_upgrade_v4_v5_no_queue_bind_permission(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +amqp091_cc_header(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +publish_property_content_type(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +publish_property_payload_format_indicator(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +publish_property_response_topic_correlation_data(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +publish_property_user_property(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +disconnect_with_will(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +will_qos2(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +will_delay_greater_than_session_expiry(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +will_delay_less_than_session_expiry(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +will_delay_equals_session_expiry(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +will_delay_session_expiry_zero(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +will_delay_reconnect_no_will(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +will_delay_reconnect_with_will(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +will_delay_session_takeover(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +will_delay_message_expiry(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +will_delay_message_expiry_publish_properties(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +will_delay_properties(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +will_properties(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +retain_properties(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +topic_alias_client_to_server(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +topic_alias_server_to_client(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +topic_alias_bidirectional(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +topic_alias_invalid(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +topic_alias_unknown(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +topic_alias_disallowed(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +topic_alias_retained_message(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +topic_alias_disallowed_retained_message(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +extended_auth(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +headers_exchange(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +consistent_hash_exchange(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +session_migrate_v3_v5(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +session_takeover_v3_v5(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +will_delay_node_restart(Config) -> v5_SUITE:?FUNCTION_NAME(Config). diff --git a/deps/rabbitmq_web_stomp/priv/schema/rabbitmq_web_stomp.schema b/deps/rabbitmq_web_stomp/priv/schema/rabbitmq_web_stomp.schema index 273d30cb3a2b..c16e74837563 100644 --- a/deps/rabbitmq_web_stomp/priv/schema/rabbitmq_web_stomp.schema +++ b/deps/rabbitmq_web_stomp/priv/schema/rabbitmq_web_stomp.schema @@ -65,7 +65,7 @@ {mapping, "web_stomp.ssl.cacertfile", "rabbitmq_web_stomp.ssl_config.cacertfile", [{datatype, string}, {validators, ["file_accessible"]}]}. {mapping, "web_stomp.ssl.password", "rabbitmq_web_stomp.ssl_config.password", - [{datatype, string}]}. + [{datatype, [tagged_binary, binary]}]}. {translation, "rabbitmq_web_stomp.ssl_config", diff --git a/deps/rabbitmq_web_stomp/test/config_schema_SUITE_data/rabbitmq_web_stomp.snippets b/deps/rabbitmq_web_stomp/test/config_schema_SUITE_data/rabbitmq_web_stomp.snippets index 8a41ce031b90..fc901e2d05a4 100644 --- a/deps/rabbitmq_web_stomp/test/config_schema_SUITE_data/rabbitmq_web_stomp.snippets +++ b/deps/rabbitmq_web_stomp/test/config_schema_SUITE_data/rabbitmq_web_stomp.snippets @@ -79,7 +79,7 @@ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"}, {keyfile,"test/config_schema_SUITE_data/certs/key.pem"}, {cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"}, - {password,"changeme"}]}]}], + {password,<<"changeme">>}]}]}], [rabbitmq_web_stomp]}, {ssl, @@ -99,7 +99,7 @@ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"}, {keyfile,"test/config_schema_SUITE_data/certs/key.pem"}, {cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"}, - {password,"changeme"}, + {password,<<"changeme">>}, {versions,['tlsv1.2','tlsv1.1']} ]}]}], @@ -136,7 +136,7 @@ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"}, {keyfile,"test/config_schema_SUITE_data/certs/key.pem"}, {cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"}, - {password,"changeme"}, + {password,<<"changeme">>}, {honor_cipher_order, true}, {honor_ecc_order, true}, diff --git a/deps/trust_store_http/Makefile b/deps/trust_store_http/Makefile index 341d187df719..fa7c17d9ac6e 100644 --- a/deps/trust_store_http/Makefile +++ b/deps/trust_store_http/Makefile @@ -10,7 +10,8 @@ LOCAL_DEPS = ssl DEPS = cowboy thoas DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk -DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk +# We do not depend on rabbit therefore can't run the broker. +DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk include ../../rabbitmq-components.mk include ../../erlang.mk diff --git a/deps/trust_store_http/src/trust_store_http.erl b/deps/trust_store_http/src/trust_store_http.erl index d32b647b547c..315196f8b042 100644 --- a/deps/trust_store_http/src/trust_store_http.erl +++ b/deps/trust_store_http/src/trust_store_http.erl @@ -5,8 +5,8 @@ main([]) -> io:format("~nStarting trust store server ~n", []), - application:ensure_all_started(trust_store_http), + {ok, _} = application:ensure_all_started(trust_store_http), io:format("~nTrust store server started on port ~tp ~n", [application:get_env(trust_store_http, port, undefined)]), user_drv:start(), - timer:sleep(infinity). \ No newline at end of file + timer:sleep(infinity). diff --git a/deps/trust_store_http/src/trust_store_http_app.erl b/deps/trust_store_http/src/trust_store_http_app.erl index 2fd861405a51..84a2b7e83d0a 100644 --- a/deps/trust_store_http/src/trust_store_http_app.erl +++ b/deps/trust_store_http/src/trust_store_http_app.erl @@ -15,7 +15,7 @@ start(_Type, _Args) -> {"/certs/[...]", cowboy_static, {dir, Directory, [{mimetypes, {<<"text">>, <<"html">>, []}}]}}]} ]), - case get_ssl_options() of + _ = case get_ssl_options() of undefined -> start_http(Dispatch, Port); SslOptions -> start_https(Dispatch, Port, SslOptions) end, diff --git a/deps/trust_store_http/src/trust_store_list_handler.erl b/deps/trust_store_http/src/trust_store_list_handler.erl index a09bf0306cfe..416dfc253d99 100644 --- a/deps/trust_store_http/src/trust_store_list_handler.erl +++ b/deps/trust_store_http/src/trust_store_list_handler.erl @@ -25,7 +25,7 @@ respond(Files, Req, State) -> respond_error(Reason, Req, State) -> Error = io_lib:format("Error listing certificates ~tp", [Reason]), logger:log(error, "~ts", [Error]), - Req2 = cowboy_req:reply(500, [], iolist_to_binary(Error), Req), + Req2 = cowboy_req:reply(500, #{}, iolist_to_binary(Error), Req), {ok, Req2, State}. json_encode(Files) -> @@ -40,7 +40,6 @@ cert_id(FileName, FileDate, FileHash) -> cert_path(FileName) -> iolist_to_binary(["/certs/", FileName]). --spec list_files(string()) -> [{string(), file:date_time(), integer()}]. list_files(Directory) -> case file:list_dir(Directory) of {ok, FileNames} -> diff --git a/erlang.mk b/erlang.mk index 0e11784cbbc9..1d2e3be2a9c4 100644 --- a/erlang.mk +++ b/erlang.mk @@ -801,7 +801,7 @@ pkg_cuttlefish_description = cuttlefish configuration abstraction pkg_cuttlefish_homepage = https://github.com/Kyorai/cuttlefish pkg_cuttlefish_fetch = git pkg_cuttlefish_repo = https://github.com/Kyorai/cuttlefish -pkg_cuttlefish_commit = master +pkg_cuttlefish_commit = main PACKAGES += damocles pkg_damocles_name = damocles diff --git a/moduleindex.yaml b/moduleindex.yaml index 39c0265ea927..ebadcd41d644 100755 --- a/moduleindex.yaml +++ b/moduleindex.yaml @@ -185,6 +185,9 @@ emqtt: - emqtt_ws enough: - enough +eunit_formatters: +- binomial_heap +- eunit_progress gen_batch_server: - gen_batch_server getopt: @@ -553,6 +556,7 @@ rabbit: - rabbit_amqqueue_sup_sup - rabbit_auth_backend_internal - rabbit_auth_mechanism_amqplain +- rabbit_auth_mechanism_anonymous - rabbit_auth_mechanism_cr_demo - rabbit_auth_mechanism_plain - rabbit_autoheal @@ -644,8 +648,10 @@ rabbit: - rabbit_fifo_dlx_sup - rabbit_fifo_dlx_worker - rabbit_fifo_index +- rabbit_fifo_q - rabbit_fifo_v0 - rabbit_fifo_v1 +- rabbit_fifo_v3 - rabbit_file - rabbit_global_counters - rabbit_guid @@ -664,6 +670,7 @@ rabbit: - rabbit_metrics - rabbit_mirror_queue_misc - rabbit_mnesia +- rabbit_msg_size_metrics - rabbit_msg_store - rabbit_msg_store_gc - rabbit_networking @@ -829,6 +836,7 @@ rabbitmq_auth_backend_oauth2: - rabbit_auth_backend_oauth2 - rabbit_auth_backend_oauth2_app - rabbit_oauth2_config +- rabbit_oauth2_schema - rabbit_oauth2_scope - uaa_jwks - uaa_jwt @@ -890,6 +898,10 @@ rabbitmq_federation: - rabbit_log_federation rabbitmq_federation_management: - rabbit_federation_mgmt +rabbitmq_federation_prometheus: +- rabbit_federation_prometheus_app +- rabbit_federation_prometheus_collector +- rabbit_federation_prometheus_sup rabbitmq_jms_topic_exchange: - rabbit_db_jms_exchange - rabbit_db_jms_exchange_m2k_converter @@ -978,6 +990,7 @@ rabbitmq_management: - rabbit_mgmt_wm_quorum_queue_replicas_delete_member - rabbit_mgmt_wm_quorum_queue_replicas_grow - rabbit_mgmt_wm_quorum_queue_replicas_shrink +- rabbit_mgmt_wm_quorum_queue_status - rabbit_mgmt_wm_rebalance_queues - rabbit_mgmt_wm_redirect - rabbit_mgmt_wm_reset @@ -1084,8 +1097,8 @@ rabbitmq_prometheus: - prometheus_rabbitmq_alarm_metrics_collector - prometheus_rabbitmq_core_metrics_collector - prometheus_rabbitmq_dynamic_collector -- prometheus_rabbitmq_federation_collector - prometheus_rabbitmq_global_metrics_collector +- prometheus_rabbitmq_message_size_metrics_collector - rabbit_prometheus_app - rabbit_prometheus_dispatcher - rabbit_prometheus_handler @@ -1122,8 +1135,13 @@ rabbitmq_shovel: - rabbit_shovel_worker - rabbit_shovel_worker_sup rabbitmq_shovel_management: -- rabbit_shovel_mgmt +- rabbit_shovel_mgmt_shovel +- rabbit_shovel_mgmt_shovels - rabbit_shovel_mgmt_util +rabbitmq_shovel_prometheus: +- rabbit_shovel_prometheus_app +- rabbit_shovel_prometheus_collector +- rabbit_shovel_prometheus_sup rabbitmq_stomp: - Elixir.RabbitMQ.CLI.Ctl.Commands.ListStompConnectionsCommand - rabbit_stomp diff --git a/packaging/docker-image/Dockerfile b/packaging/docker-image/Dockerfile index b74b68d5b468..5fe46736682d 100644 --- a/packaging/docker-image/Dockerfile +++ b/packaging/docker-image/Dockerfile @@ -146,7 +146,6 @@ RUN set -eux; \ --prefix="$ERLANG_INSTALL_PATH_PREFIX" \ --host="$hostArch" \ --build="$buildArch" \ - --disable-hipe \ --disable-sctp \ --disable-silent-rules \ --enable-builtin-zlib \ @@ -166,7 +165,6 @@ RUN set -eux; \ --without-et \ --without-eunit \ --without-ftp \ - --without-hipe \ --without-jinterface \ --without-megaco \ --without-observer \ @@ -329,4 +327,4 @@ RUN set eux; \ rm -rf /var/lib/apt/lists/*; \ rabbitmqadmin --version -EXPOSE 15671 15672 \ No newline at end of file +EXPOSE 15671 15672 diff --git a/plugins.mk b/plugins.mk index 7536c6705ae1..b822296da018 100644 --- a/plugins.mk +++ b/plugins.mk @@ -15,6 +15,7 @@ PLUGINS := rabbitmq_amqp1_0 \ rabbitmq_event_exchange \ rabbitmq_federation \ rabbitmq_federation_management \ + rabbitmq_federation_prometheus \ rabbitmq_jms_topic_exchange \ rabbitmq_management \ rabbitmq_management_agent \ @@ -30,6 +31,7 @@ PLUGINS := rabbitmq_amqp1_0 \ rabbitmq_sharding \ rabbitmq_shovel \ rabbitmq_shovel_management \ + rabbitmq_shovel_prometheus \ rabbitmq_stomp \ rabbitmq_stream \ rabbitmq_stream_management \ diff --git a/rabbitmq-components.mk b/rabbitmq-components.mk index 2962d95b0b27..b6361f61d0cd 100644 --- a/rabbitmq-components.mk +++ b/rabbitmq-components.mk @@ -6,106 +6,34 @@ ifeq ($(.DEFAULT_GOAL),) endif # PROJECT_VERSION defaults to: -# 1. the version exported by rabbitmq-server-release; +# 1. the version exported by environment; # 2. the version stored in `git-revisions.txt`, if it exists; # 3. a version based on git-describe(1), if it is a Git clone; # 4. 0.0.0 +# +# Note that in the case where git-describe(1) is used +# (e.g. during development), running "git gc" may help +# improve the performance. PROJECT_VERSION := $(RABBITMQ_VERSION) ifeq ($(PROJECT_VERSION),) +ifneq ($(wildcard git-revisions.txt),) PROJECT_VERSION = $(shell \ -if test -f git-revisions.txt; then \ head -n1 git-revisions.txt | \ - awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \ -else \ + awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}') +else +PROJECT_VERSION = $(shell \ (git describe --dirty --abbrev=7 --tags --always --first-parent \ - 2>/dev/null || echo rabbitmq_v0_0_0) | \ - sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \ - -e 's/-/./g'; \ -fi) + 2>/dev/null || echo 0.0.0) | \ + sed -e 's/^v//' -e 's/_/./g' -e 's/-/+/' -e 's/-/./g') +endif endif # -------------------------------------------------------------------- # RabbitMQ components. # -------------------------------------------------------------------- -# For RabbitMQ repositories, we want to checkout branches which match -# the parent project. For instance, if the parent project is on a -# release tag, dependencies must be on the same release tag. If the -# parent project is on a topic branch, dependencies must be on the same -# topic branch or fallback to `stable` or `main` whichever was the -# base of the topic branch. - -dep_amqp_client = git_rmq-subfolder rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) main -dep_amqp10_client = git_rmq-subfolder rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) main -dep_oauth2_client = git_rmq-subfolder oauth2-client $(current_rmq_ref) $(base_rmq_ref) main -dep_amqp10_common = git_rmq-subfolder rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbit = git_rmq-subfolder rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbit_common = git_rmq-subfolder rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_amqp1_0 = git_rmq-subfolder rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_auth_backend_cache = git_rmq-subfolder rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_auth_backend_http = git_rmq-subfolder rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_auth_backend_ldap = git_rmq-subfolder rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_auth_backend_oauth2 = git_rmq-subfolder rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_auth_mechanism_ssl = git_rmq-subfolder rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_aws = git_rmq-subfolder rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_cli = git_rmq-subfolder rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_codegen = git_rmq-subfolder rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_consistent_hash_exchange = git_rmq-subfolder rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_ct_client_helpers = git_rmq-subfolder rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_ct_helpers = git_rmq-subfolder rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_event_exchange = git_rmq-subfolder rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_federation = git_rmq-subfolder rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_federation_management = git_rmq-subfolder rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_jms_topic_exchange = git_rmq-subfolder rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_management = git_rmq-subfolder rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_management_agent = git_rmq-subfolder rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_mqtt = git_rmq-subfolder rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_peer_discovery_aws = git_rmq-subfolder rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_peer_discovery_common = git_rmq-subfolder rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_peer_discovery_consul = git_rmq-subfolder rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_peer_discovery_etcd = git_rmq-subfolder rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_peer_discovery_k8s = git_rmq-subfolder rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_prelaunch = git_rmq-subfolder rabbitmq-prelaunch $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_prometheus = git_rmq-subfolder rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_random_exchange = git_rmq-subfolder rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_recent_history_exchange = git_rmq-subfolder rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_sharding = git_rmq-subfolder rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_shovel = git_rmq-subfolder rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_shovel_management = git_rmq-subfolder rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_stomp = git_rmq-subfolder rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_stream = git_rmq-subfolder rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_stream_common = git_rmq-subfolder rabbitmq-stream-common $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_stream_management = git_rmq-subfolder rabbitmq-stream-management $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_top = git_rmq-subfolder rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_tracing = git_rmq-subfolder rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_trust_store = git_rmq-subfolder rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_web_dispatch = git_rmq-subfolder rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_web_stomp = git_rmq-subfolder rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_web_stomp_examples = git_rmq-subfolder rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_web_mqtt = git_rmq-subfolder rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_web_mqtt_examples = git_rmq-subfolder rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live main -dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master - # Third-party dependencies version pinning. # # We do that in this file, which is included by all projects, to ensure @@ -115,88 +43,113 @@ dep_accept = hex 0.3.5 dep_cowboy = hex 2.12.0 dep_cowlib = hex 2.13.0 dep_credentials_obfuscation = hex 3.4.0 -dep_cuttlefish = hex 3.1.0 +dep_cuttlefish = hex 3.4.0 dep_gen_batch_server = hex 0.8.8 dep_jose = hex 1.11.10 -dep_khepri = hex 0.14.0 -dep_khepri_mnesia_migration = hex 0.5.0 +dep_khepri = hex 0.16.0 +dep_khepri_mnesia_migration = hex 0.7.0 +dep_osiris = git https://github.com/rabbitmq/osiris v1.8.3 dep_prometheus = hex 4.11.0 -dep_ra = hex 2.13.5 +dep_ra = hex 2.14.0 dep_ranch = hex 2.1.0 dep_recon = hex 2.5.3 dep_redbug = hex 2.0.7 +dep_systemd = hex 0.6.1 dep_thoas = hex 1.0.0 -dep_observer_cli = hex 1.7.3 +dep_observer_cli = hex 1.7.5 dep_seshat = git https://github.com/rabbitmq/seshat v0.6.1 dep_stdout_formatter = hex 0.2.4 dep_sysmon_handler = hex 1.3.0 -RABBITMQ_COMPONENTS = amqp_client \ - amqp10_common \ - amqp10_client \ - oauth2_client \ - rabbit \ - rabbit_common \ - rabbitmq_amqp1_0 \ - rabbitmq_auth_backend_amqp \ - rabbitmq_auth_backend_cache \ - rabbitmq_auth_backend_http \ - rabbitmq_auth_backend_ldap \ - rabbitmq_auth_backend_oauth2 \ - rabbitmq_auth_mechanism_ssl \ - rabbitmq_aws \ - rabbitmq_boot_steps_visualiser \ - rabbitmq_cli \ - rabbitmq_codegen \ - rabbitmq_consistent_hash_exchange \ - rabbitmq_ct_client_helpers \ - rabbitmq_ct_helpers \ - rabbitmq_delayed_message_exchange \ - rabbitmq_dotnet_client \ - rabbitmq_event_exchange \ - rabbitmq_federation \ - rabbitmq_federation_management \ - rabbitmq_java_client \ - rabbitmq_jms_client \ - rabbitmq_jms_cts \ - rabbitmq_jms_topic_exchange \ - rabbitmq_lvc_exchange \ - rabbitmq_management \ - rabbitmq_management_agent \ - rabbitmq_management_exchange \ - rabbitmq_management_themes \ - rabbitmq_message_timestamp \ - rabbitmq_metronome \ - rabbitmq_mqtt \ - rabbitmq_objc_client \ - rabbitmq_peer_discovery_aws \ - rabbitmq_peer_discovery_common \ - rabbitmq_peer_discovery_consul \ - rabbitmq_peer_discovery_etcd \ - rabbitmq_peer_discovery_k8s \ - rabbitmq_prometheus \ - rabbitmq_random_exchange \ - rabbitmq_recent_history_exchange \ - rabbitmq_routing_node_stamp \ - rabbitmq_rtopic_exchange \ - rabbitmq_server_release \ - rabbitmq_sharding \ - rabbitmq_shovel \ - rabbitmq_shovel_management \ - rabbitmq_stomp \ - rabbitmq_stream \ - rabbitmq_stream_common \ - rabbitmq_stream_management \ - rabbitmq_toke \ - rabbitmq_top \ - rabbitmq_tracing \ - rabbitmq_trust_store \ - rabbitmq_web_dispatch \ - rabbitmq_web_mqtt \ - rabbitmq_web_mqtt_examples \ - rabbitmq_web_stomp \ - rabbitmq_web_stomp_examples \ - rabbitmq_website +# RabbitMQ applications found in the monorepo. +# +# Note that rabbitmq_server_release is not a real application +# but is the name used in the top-level Makefile. + +RABBITMQ_BUILTIN = \ + amqp10_client \ + amqp10_common \ + amqp_client \ + oauth2_client \ + rabbit \ + rabbit_common \ + rabbitmq_amqp1_0 \ + rabbitmq_amqp_client \ + rabbitmq_auth_backend_cache \ + rabbitmq_auth_backend_http \ + rabbitmq_auth_backend_ldap \ + rabbitmq_auth_backend_oauth2 \ + rabbitmq_auth_mechanism_ssl \ + rabbitmq_aws \ + rabbitmq_cli \ + rabbitmq_codegen \ + rabbitmq_consistent_hash_exchange \ + rabbitmq_ct_client_helpers \ + rabbitmq_ct_helpers \ + rabbitmq_event_exchange \ + rabbitmq_federation \ + rabbitmq_federation_management \ + rabbitmq_federation_prometheus \ + rabbitmq_jms_topic_exchange \ + rabbitmq_management \ + rabbitmq_management_agent \ + rabbitmq_mqtt \ + rabbitmq_peer_discovery_aws \ + rabbitmq_peer_discovery_common \ + rabbitmq_peer_discovery_consul \ + rabbitmq_peer_discovery_etcd \ + rabbitmq_peer_discovery_k8s \ + rabbitmq_prelaunch \ + rabbitmq_prometheus \ + rabbitmq_random_exchange \ + rabbitmq_recent_history_exchange \ + rabbitmq_server_release \ + rabbitmq_sharding \ + rabbitmq_shovel \ + rabbitmq_shovel_management \ + rabbitmq_stomp \ + rabbitmq_stream \ + rabbitmq_stream_common \ + rabbitmq_stream_management \ + rabbitmq_top \ + rabbitmq_tracing \ + rabbitmq_trust_store \ + rabbitmq_web_dispatch \ + rabbitmq_web_mqtt \ + rabbitmq_web_mqtt_examples \ + rabbitmq_web_stomp \ + rabbitmq_web_stomp_examples \ + trust_store_http + +# Applications outside of the monorepo maintained by Team RabbitMQ. + +RABBITMQ_COMMUNITY = \ + rabbitmq_auth_backend_amqp \ + rabbitmq_boot_steps_visualiser \ + rabbitmq_delayed_message_exchange \ + rabbitmq_lvc_exchange \ + rabbitmq_management_exchange \ + rabbitmq_management_themes \ + rabbitmq_message_timestamp \ + rabbitmq_metronome \ + rabbitmq_routing_node_stamp \ + rabbitmq_rtopic_exchange + +community_dep = git git@github.com:rabbitmq/$1.git $(if $2,$2,main) +dep_rabbitmq_auth_backend_amqp = $(call community_dep,rabbitmq-auth-backend-amqp) +dep_rabbitmq_boot_steps_visualiser = $(call community_dep,rabbitmq-boot-steps-visualiser,master) +dep_rabbitmq_delayed_message_exchange = $(call community_dep,rabbitmq-delayed-message-exchange) +dep_rabbitmq_lvc_exchange = $(call community_dep,rabbitmq-lvc-exchange) +dep_rabbitmq_management_exchange = $(call community_dep,rabbitmq-management-exchange) +dep_rabbitmq_management_themes = $(call community_dep,rabbitmq-management-themes,master) +dep_rabbitmq_message_timestamp = $(call community_dep,rabbitmq-message-timestamp) +dep_rabbitmq_metronome = $(call community_dep,rabbitmq-metronome,master) +dep_rabbitmq_routing_node_stamp = $(call community_dep,rabbitmq-routing-node-stamp) +dep_rabbitmq_rtopic_exchange = $(call community_dep,rabbitmq-rtopic-exchange) + +# All RabbitMQ applications. + +RABBITMQ_COMPONENTS = $(RABBITMQ_BUILTIN) $(RABBITMQ_COMMUNITY) # Erlang.mk does not rebuild dependencies by default, once they were # compiled once, except for those listed in the `$(FORCE_REBUILD)` @@ -207,137 +160,10 @@ RABBITMQ_COMPONENTS = amqp_client \ FORCE_REBUILD = $(RABBITMQ_COMPONENTS) -# Several components have a custom erlang.mk/build.config, mainly -# to disable eunit. Therefore, we can't use the top-level project's -# erlang.mk copy. -NO_AUTOPATCH += $(RABBITMQ_COMPONENTS) - -ifeq ($(origin current_rmq_ref),undefined) -ifneq ($(wildcard .git),) -current_rmq_ref := $(shell (\ - ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\ - if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi)) -else -current_rmq_ref := main -endif -endif -export current_rmq_ref - -ifeq ($(origin base_rmq_ref),undefined) -ifneq ($(wildcard .git),) -possible_base_rmq_ref := main -ifeq ($(possible_base_rmq_ref),$(current_rmq_ref)) -base_rmq_ref := $(current_rmq_ref) -else -base_rmq_ref := $(shell \ - (git rev-parse --verify -q main >/dev/null && \ - git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \ - git merge-base --is-ancestor $$(git merge-base main HEAD) $(possible_base_rmq_ref) && \ - echo $(possible_base_rmq_ref)) || \ - echo main) -endif -else -base_rmq_ref := main -endif -endif -export base_rmq_ref - -# Repository URL selection. -# -# First, we infer other components' location from the current project -# repository URL, if it's a Git repository: -# - We take the "origin" remote URL as the base -# - The current project name and repository name is replaced by the -# target's properties: -# eg. rabbitmq-common is replaced by rabbitmq-codegen -# eg. rabbit_common is replaced by rabbitmq_codegen -# -# If cloning from this computed location fails, we fallback to RabbitMQ -# upstream which is GitHub. - -# Macro to transform eg. "rabbit_common" to "rabbitmq-common". -rmq_cmp_repo_name = $(word 2,$(dep_$(1))) - -# Upstream URL for the current project. -RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT)) -RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git -RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git - -# Current URL for the current project. If this is not a Git clone, -# default to the upstream Git repository. -ifneq ($(wildcard .git),) -git_origin_fetch_url := $(shell git config remote.origin.url) -git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url) -RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url) -RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url) -else -RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL) -RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL) -endif - -# Macro to replace the following pattern: -# 1. /foo.git -> /bar.git -# 2. /foo -> /bar -# 3. /foo/ -> /bar/ -subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3)))) - -# Macro to replace both the project's name (eg. "rabbit_common") and -# repository name (eg. "rabbitmq-common") by the target's equivalent. -# -# This macro is kept on one line because we don't want whitespaces in -# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell -# single-quoted string. -dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo)) - -dep_rmq_commits = $(if $(dep_$(1)), \ - $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \ - $(pkg_$(1)_commit)) - -define dep_fetch_git_rmq - fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \ - fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \ - if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \ - git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \ - fetch_url="$$$$fetch_url1"; \ - push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \ - elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \ - fetch_url="$$$$fetch_url2"; \ - push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \ - fi; \ - cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \ - $(foreach ref,$(call dep_rmq_commits,$(1)), \ - git checkout -q $(ref) >/dev/null 2>&1 || \ - ) \ - (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \ - 1>&2 && false) ) && \ - (test "$$$$fetch_url" = "$$$$push_url" || \ - git remote set-url --push origin "$$$$push_url") -endef - -define dep_fetch_git_rmq-subfolder - fetch_url1='https://github.com/rabbitmq/rabbitmq-server.git'; \ - fetch_url2='git@github.com:rabbitmq/rabbitmq-server.git'; \ - if [ ! -d $(ERLANG_MK_TMP)/rabbitmq-server ]; then \ - if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \ - git clone -q -n -- "$$$$fetch_url1" $(ERLANG_MK_TMP)/rabbitmq-server; then \ - fetch_url="$$$$fetch_url1"; \ - push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),rabbitmq-server)'; \ - elif git clone -q -n -- "$$$$fetch_url2" $(ERLANG_MK_TMP)/rabbitmq-server; then \ - fetch_url="$$$$fetch_url2"; \ - push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),rabbitmq-server)'; \ - fi; \ - fi; \ - cd $(ERLANG_MK_TMP)/rabbitmq-server && ( \ - $(foreach ref,$(call dep_rmq_commits,$(1)), \ - git checkout -q $(ref) >/dev/null 2>&1 || \ - ) \ - (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \ - 1>&2 && false) ) && \ - (test "$$$$fetch_url" = "$$$$push_url" || \ - git remote set-url --push origin "$$$$push_url") - ln -s $(ERLANG_MK_TMP)/rabbitmq-server/deps/$(call dep_name,$(1)) \ - $(DEPS_DIR)/$(call dep_name,$(1)); -endef +# We disable autopatching for community plugins as they sit in +# their own repository and we want to avoid polluting the git +# status with changes that should not be committed. +NO_AUTOPATCH += $(RABBITMQ_COMMUNITY) # -------------------------------------------------------------------- # Component distribution. @@ -350,7 +176,7 @@ prepare-dist:: @: # -------------------------------------------------------------------- -# Monorepo-specific settings. +# RabbitMQ-specific settings. # -------------------------------------------------------------------- # If the top-level project is a RabbitMQ component, we override diff --git a/rabbitmq.bzl b/rabbitmq.bzl index 8c51a2b16f71..c338031934d6 100644 --- a/rabbitmq.bzl +++ b/rabbitmq.bzl @@ -55,6 +55,7 @@ ALL_PLUGINS = [ "//deps/rabbitmq_event_exchange:erlang_app", "//deps/rabbitmq_federation:erlang_app", "//deps/rabbitmq_federation_management:erlang_app", + "//deps/rabbitmq_federation_prometheus:erlang_app", "//deps/rabbitmq_jms_topic_exchange:erlang_app", "//deps/rabbitmq_management:erlang_app", "//deps/rabbitmq_mqtt:erlang_app", @@ -68,6 +69,7 @@ ALL_PLUGINS = [ "//deps/rabbitmq_sharding:erlang_app", "//deps/rabbitmq_shovel:erlang_app", "//deps/rabbitmq_shovel_management:erlang_app", + "//deps/rabbitmq_shovel_prometheus:erlang_app", "//deps/rabbitmq_stomp:erlang_app", "//deps/rabbitmq_stream:erlang_app", "//deps/rabbitmq_stream_management:erlang_app", @@ -189,6 +191,7 @@ def rabbitmq_suite( "COVERDATA_TO_LCOV_APPS_DIRS": "deps:deps/rabbit/apps", }.items() + test_env.items()), deps = [":test_erlang_app"] + deps + runtime_deps, + ct_run_extra_args = ["-kernel net_ticktime 5"], **kwargs ) return name @@ -259,6 +262,7 @@ def rabbitmq_integration_suite( ":rabbitmq-for-tests-run", ] + tools, deps = assumed_deps + deps + runtime_deps, + ct_run_extra_args = ["-kernel net_ticktime 5"], **kwargs ) @@ -286,14 +290,15 @@ def rabbitmq_integration_suite( "RABBITMQCTL": "$TEST_SRCDIR/$TEST_WORKSPACE/{}/broker-for-tests-home/sbin/rabbitmqctl".format(package), "RABBITMQ_PLUGINS": "$TEST_SRCDIR/$TEST_WORKSPACE/{}/broker-for-tests-home/sbin/rabbitmq-plugins".format(package), "RABBITMQ_QUEUES": "$TEST_SRCDIR/$TEST_WORKSPACE/{}/broker-for-tests-home/sbin/rabbitmq-queues".format(package), - "RABBITMQ_RUN_SECONDARY": "$(location @rabbitmq-server-generic-unix-3.13//:rabbitmq-run)", + "RABBITMQ_RUN_SECONDARY": "$(location @rabbitmq-server-generic-unix-4.0//:rabbitmq-run)", "LANG": "C.UTF-8", }.items() + test_env.items()), tools = [ ":rabbitmq-for-tests-run", - "@rabbitmq-server-generic-unix-3.13//:rabbitmq-run", + "@rabbitmq-server-generic-unix-4.0//:rabbitmq-run", ] + tools, deps = assumed_deps + deps + runtime_deps, + ct_run_extra_args = ["-kernel net_ticktime 5"], **kwargs ) diff --git a/release-notes/3.13.0.md b/release-notes/3.13.0.md index 784549200a41..2db013ade808 100644 --- a/release-notes/3.13.0.md +++ b/release-notes/3.13.0.md @@ -172,14 +172,15 @@ connect to the same node, or inject a pause, or await a certain condition that i is in place. -### TLS Defaults +### TLS Client (LDAP, Shovels, Federation) Defaults Starting with Erlang 26, client side [TLS peer certificate chain verification](https://www.rabbitmq.com/docs/ssl#peer-verification) settings are enabled by default in most contexts: from federation links to shovels to TLS-enabled LDAP client connections. If using TLS peer certificate chain verification is not practical or necessary, it can be disabled. Please refer to the docs of the feature in question, for example, -this one [on TLS-enabled LDAP client](http://rabbitmq.com/docs/ldap/#tls) connections. +this one [on TLS-enabled LDAP client](http://rabbitmq.com/docs/ldap/#tls) connections, +two others on [TLS-enabled dynamic shovels](https://www.rabbitmq.com/docs/shovel#tls) and [dynamic shovel URI query parameters](https://www.rabbitmq.com/docs/uri-query-parameters). ### Management Plugin and HTTP API @@ -232,7 +233,7 @@ Some of it's great features include: * A reworked table of contents and navigation * Search over both doc guides and blog content -**Note**: We hope you enjoy the new website, more improvements are coming soon, we are revising the documentation table of contents that you see now and also adding some navigational topics to help you move around and find the documentation you are looking for faster in the future. We will keep you posted! +**Note**: We hope you enjoy the new website, more improvements are coming soon, we are revising the documentation table of contents that you see now and also adding some navigational topics to help you move around and find the documentation you are looking for faster in the future. We will keep you posted! ### Core Server @@ -251,7 +252,7 @@ Some of it's great features include: that RabbitMQ clusters now **must have a majority of nodes online at all times**, or all client operations will be refused. Like quorum queues and streams, Khepri uses [RabbitMQ's Raft implementation](https://github.com/rabbitmq/ra) under the hood. With Khepri enabled, all key modern features - of RabbitMQ will use the same fundamental approach to recovery from failures, relying on a library that passes a [Jepsen test suite](https://github.com/rabbitmq/ra/#safety-verification). + of RabbitMQ will use the same fundamental approach to recovery from failures, relying on a library that passes a [Jepsen test suite](https://github.com/rabbitmq/ra/#safety-verification). Team RabbitMQ intends to make Khepri the default schema database starting with RabbitMQ 4.0. @@ -259,8 +260,8 @@ Some of it's great features include: * Messages are now internally stored using a new common heavily AMQP 1.0-influenced container format. This is a major step towards a protocol-agnostic core: a common format that encapsulates a sum of data types used by the protocols RabbitMQ supports, plus annotations for routng, dead-lettering state, - and other purposes. - + and other purposes. + AMQP 1.0, AMQP 0-9-1, MQTT and STOMP have or will adopt this internal representation in upcoming releases. RabbitMQ Stream protocol already uses the AMQP 1.0 message container structure internally. @@ -424,7 +425,7 @@ This release includes all bug fixes shipped in the `3.12.x` series. enormously large responses. A couple of relevant queue metrics or state fields were lifted to the top level. - + **This is a potentially breaking change**. Note that [Prometheus](https://www.rabbitmq.com/docs/prometheus) is the recommended option for monitoring, diff --git a/release-notes/3.13.7.md b/release-notes/3.13.7.md new file mode 100644 index 000000000000..93c23fcfdc05 --- /dev/null +++ b/release-notes/3.13.7.md @@ -0,0 +1,162 @@ +## RabbitMQ 3.13.7 + +RabbitMQ `3.13.7` is a maintenance release in the `3.13.x` [release series](https://www.rabbitmq.com/release-information). + +This upgrade is **highly recommended** to all users currently on earlier `3.13.x` series and +in particular between `3.13.3` and `3.13.5`, inclusive. + +Starting June 1st, 2024, community support for this series will only be provided to [regularly contributing users](https://github.com/rabbitmq/rabbitmq-server/blob/main/COMMUNITY_SUPPORT.md) and those +who hold a valid [commercial support license](https://tanzu.vmware.com/rabbitmq/oss). + +Please refer to the upgrade section from the [3.13.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.13.0) +if upgrading from a version prior to 3.13.0. + +This release requires Erlang 26 and supports Erlang versions up to `26.2.x`. +[RabbitMQ and Erlang/OTP Compatibility Matrix](https://www.rabbitmq.com/docs/which-erlang) has more details on +Erlang version requirements for RabbitMQ. + + +### Minimum Supported Erlang Version + +As of 3.13.0, RabbitMQ requires Erlang 26. Nodes **will fail to start** on older Erlang releases. + +Users upgrading from 3.12.x (or older releases) on Erlang 25 to 3.13.x on Erlang 26 +(both RabbitMQ *and* Erlang are upgraded at the same time) **must** consult +the [v3.12.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.12.0) and [v3.13.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v3.13.0) first. + + +## Changes Worth Mentioning + +Release notes can be found on GitHub at [rabbitmq-server/release-notes](https://github.com/rabbitmq/rabbitmq-server/tree/v3.13.x/release-notes). + + +### Core Broker + +#### Bug Fixes + + * Streams recover better from certain node process failures that may leave behind orphaned segment files + (that is, segment files that do not have a corresponding index file) or index files without a corresponding + segment file. + + Kudos to @sysupbda for providing detailed reproduction steps and verifying the fix in the affected environment. + + GitHub issue: [#12073](https://github.com/rabbitmq/rabbitmq-server/pull/12073) + + * Config file [peer discovery](https://www.rabbitmq.com/docs/cluster-formation) now logs warnings for certain common user mistakes. + + GitHub issues: [#11586](https://github.com/rabbitmq/rabbitmq-server/issues/11586), [#11898](https://github.com/rabbitmq/rabbitmq-server/pull/11898) + + * Queue declaration operations now return more useful errors when Khepri is enabled and there's only a minority + of nodes online. + + GitHub issues: [#12020](https://github.com/rabbitmq/rabbitmq-server/pull/12020), [#11991](https://github.com/rabbitmq/rabbitmq-server/pull/11991) + + * Logging is now more defensive around exception handling. Previously a (very rare) logger exception could + lead to the `amq.rabbitmq.log` handler and exchange to be removed. + + Contributed by @gomoripeti. + + GitHub issue: [#12107](https://github.com/rabbitmq/rabbitmq-server/pull/12107) + + * `rabbitmq-upgrade revive` unintentionally tried to perform operations on replicas that are not local to the node. + This could result in an exceptions some of which were not handled and the command failed. + Re-running the command usually helped. + + GitHub issue: [#12038](https://github.com/rabbitmq/rabbitmq-server/pull/12038) + + +#### Enhancements + + * Enabling an experimental feature flag now involves an explicit confirmation. + + GitHub issue: [#12059](https://github.com/rabbitmq/rabbitmq-server/pull/12059) + + * Khepri projections are registered in a safer manner during node boot. + + GitHub issue: [#11837](https://github.com/rabbitmq/rabbitmq-server/pull/11837) + + +### MQTT + +#### Bug Fixes + + * Clients that use JWT tokens are now disconnected when their token expires. Previously all newly attempted + operations with an expired token would be rejected but a completely passive connection was not closed. + + GitHub issue: [#11869](https://github.com/rabbitmq/rabbitmq-server/pull/11869) + +#### Enhancements + + * Connection that provide incorrect credentials now closed with a delay, just like for several + other protocols supported by RabbitMQ, as a throttling mechanism. + + GitHub issue: [#11906](https://github.com/rabbitmq/rabbitmq-server/pull/11906) + + +### CLI Tools + +#### Bug Fixes + + * When the Khepri feature flag is not enabled, `rabbitmq-diagnostics metadata_store_status` will not try to retrieve + and display its status. + + GitHub issue: [#12103](https://github.com/rabbitmq/rabbitmq-server/pull/12103) + +#### Enhancements + + * `rabbitmq-upgrade await_quorum_plus_one` now produces more log messages when the operation times out. + When Khepri is enabled, it now also treats Khepri as a critical Raft-based component that may depend on replica quorum + just like queues and streams do. + + GitHub issue: [#12117](https://github.com/rabbitmq/rabbitmq-server/pull/12117) + + +### Management Plugin + +#### Bug Fixes + + * When no virtual host limits are set, the limits collection was returned as a JSON array (and not a JSON object) + by `GET /api/vhost-limits`. + + GitHub issue: [#12084](https://github.com/rabbitmq/rabbitmq-server/pull/12084) + +#### Enhancements + + * `GET /api/queues/quorum/{vhost}/{name}/status` is a new endpoint that allows clients to retrieve several key quorum queue + replica and Raft metrics. + + Contributed by @SimonUnge. + + GitHub issue: [#12072](https://github.com/rabbitmq/rabbitmq-server/pull/12072) + + +### Shovel Plugin + +#### Bug Fixes + + * `GET /api/shovels/{vhost}/{name}` now correctly returns a single shovel instead of all shovels in the target + virtual host. + + GitHub issue: [#12040](https://github.com/rabbitmq/rabbitmq-server/issues/12040) + + +### Consistent Hashing Exchange Plugin + +#### Bug Fixes + + * For an exchange declared with a `hash-header`, publishing failed with an exception when the client (usually unintentionally) + did not set that header. + + GitHub issue: [#11808](https://github.com/rabbitmq/rabbitmq-server/pull/11808) + + +### Dependency Changes + + * Osiris was [upgraded to `1.8.3`](https://github.com/rabbitmq/osiris/releases) + * Cuttlefish was [upgraded to `3.4.0`](https://github.com/Kyorai/cuttlefish/releases) + * `observer_cli` was [upgraded to `1.7.5`](https://github.com/zhongwencool/observer_cli/releases) + +## Source Code Archives + +To obtain source code of the entire distribution, please download the archive named `rabbitmq-server-3.13.7.tar.xz` +instead of the source tarball produced by GitHub. diff --git a/release-notes/4.0.0.md b/release-notes/4.0.0.md deleted file mode 100644 index 03322c7ac825..000000000000 --- a/release-notes/4.0.0.md +++ /dev/null @@ -1,92 +0,0 @@ -## RabbitMQ 4.0.0-beta.3 - -RabbitMQ `4.0.0-beta.3` is a preview of a new major release. - -Starting June 1st, 2024, community support for this series will only be provided to [regularly contributing users](https://github.com/rabbitmq/rabbitmq-server/blob/main/COMMUNITY_SUPPORT.md) and those who hold a valid [commercial support license](https://tanzu.vmware.com/rabbitmq/oss). - -## Highlights - -Some key improvements in this release are listed below. - - * [Khepri](https://www.youtube.com/watch?v=whVqpgvep90), an [alternative schema data store](https://github.com/rabbitmq/rabbitmq-server/pull/7206) developed to replace Mnesia, - has matured - * AMQP 1.0 is now a core protocol that is always enabled. Its plugin is now a no-op that only exists to simplify upgrades. - * The AMQP 1.0 implementation is now significantly more efficient: its peak throughput is [more than double than that of 3.13.x](https://github.com/rabbitmq/rabbitmq-server/pull/9022) - on some workloads - * [AMQP 1.0 clients now can manage topologies](https://github.com/rabbitmq/rabbitmq-server/pull/10559) similarly to how AMQP 0-9-1 clients do it - * The AMQP 1.0 convention (address format) used for interacting with with AMQP 0-9-1 entities [is now easier to reason about](https://github.com/rabbitmq/rabbitmq-server/pull/11618) - * Mirroring (replication) of classic queues [was removed](https://github.com/rabbitmq/rabbitmq-server/pull/9815) after several years of deprecation. For replicated messaging data types, - use quorum queues and/or streams. Non-replicated classic queues remain and their development continues - * Classic queue [storage efficiency improvements](https://github.com/rabbitmq/rabbitmq-server/pull/11112), in particular recovery time and storage of multi-MiB messages - * Nodes with multiple enabled plugins and little on disk data to recover now [start up to 20-30% faster](https://github.com/rabbitmq/rabbitmq-server/pull/10989) - * CQv1, [the original classic queue storage layer, was removed](https://github.com/rabbitmq/rabbitmq-server/pull/10656) except for the part that's necessary for upgrades - * Several I/O-related metrics are dropped, they should be [monitored at the infrastructure and kernel layers](https://www.rabbitmq.com/docs/monitoring#system-metrics) - -See Compatibility Notes below to learn about **breaking or potentially breaking changes** in this release. - -## Release Artifacts - -RabbitMQ releases are distributed via [GitHub](https://github.com/rabbitmq/rabbitmq-server/releases). -[Debian](https://rabbitmq.com/install-debian.html) and [RPM packages](https://rabbitmq.com/install-rpm.html) are available via Cloudsmith mirrors. - -[Community Docker image](https://hub.docker.com/_/rabbitmq/), [Chocolatey package](https://community.chocolatey.org/packages/rabbitmq), and the [Homebrew formula](https://www.rabbitmq.com/docs/install-homebrew) -are other installation options. They are updated with a delay. - - -## Erlang/OTP Compatibility Notes - -This release [requires Erlang 26.2](https://www.rabbitmq.com/docs/which-erlang). - -[Provisioning Latest Erlang Releases](https://www.rabbitmq.com/docs/which-erlang#erlang-repositories) explains -what package repositories and tools can be used to provision latest patch versions of Erlang 26.x. - - -## Upgrading to 4.0 - -### Documentation guides on upgrades - -See the [Upgrading guide](https://www.rabbitmq.com/docs/upgrade) for documentation on upgrades and [GitHub releases](https://github.com/rabbitmq/rabbitmq-server/releases) -for release notes of individual releases. - -This release series only supports upgrades from `3.13.x`. - -This release requires **all feature flags** in the 3.x series (specifically `3.13.x`) to be enabled before upgrading, -there is no upgrade path from 3.12.14 (or a later patch release) straight to `4.0.0`. - -### Required Feature Flags - -This release does not [graduate](https://www.rabbitmq.com/docs/feature-flags#graduation) any feature flags. - -However, all users are highly encouraged to enable all feature flags before upgrading to this release from -3.13.x. - -### Mixed version cluster compatibility - -RabbitMQ 4.0.0 nodes can run alongside `3.13.x` nodes. `4.0.x`-specific features can only be made available when all nodes in the cluster -upgrade to 4.0.0 or a later patch release in the new series. - -While operating in mixed version mode, some aspects of the system may not behave as expected. The list of known behavior changes will be covered in future updates. -Once all nodes are upgraded to 4.0.0, these irregularities will go away. - -Mixed version clusters are a mechanism that allows rolling upgrade and are not meant to be run for extended -periods of time (no more than a few hours). - -### Recommended Post-upgrade Procedures - -TBD - - -## Changes Worth Mentioning - -TBD - - -### Dependency Changes - - * Ra was [upgraded to `2.13.5`](https://github.com/rabbitmq/ra/releases) - * Khepri was [upgraded to `0.14.0`](https://github.com/rabbitmq/khepri/releases) - -## Source Code Archives - -To obtain source code of the entire distribution, please download the archive named `rabbitmq-server-4.0.0-beta.3.tar.xz` -instead of the source tarball produced by GitHub. diff --git a/release-notes/4.0.1.md b/release-notes/4.0.1.md new file mode 100644 index 000000000000..a9b17b375c1a --- /dev/null +++ b/release-notes/4.0.1.md @@ -0,0 +1,386 @@ +## RabbitMQ 4.0.1 + +RabbitMQ `4.0` is a new major release. + +Starting June 1st, 2024, community support for this series will only be provided to [regularly contributing users](https://github.com/rabbitmq/rabbitmq-server/blob/main/COMMUNITY_SUPPORT.md) +and those who hold a valid [commercial support license](https://tanzu.vmware.com/rabbitmq/oss). + +## Highlights + +Some key improvements in this release are listed below. + + * [Khepri](https://www.youtube.com/watch?v=whVqpgvep90), an [alternative schema data store](https://github.com/rabbitmq/rabbitmq-server/pull/7206) developed to replace Mnesia, + has matured and is now fully supported (it previously was an experimental feature) + * [AMQP 1.0 is now a core protocol](https://www.rabbitmq.com/blog/2024/08/05/native-amqp) that is always enabled. Its plugin is now a no-op that only exists to simplify upgrades. + * The AMQP 1.0 implementation is now significantly more efficient: its peak throughput is [more than double than that of 3.13.x](https://www.rabbitmq.com/blog/2024/08/21/amqp-benchmarks) + on some workloads + * Efficient sub-linear [quorum queue recovery on node startup using checkpoints](https://www.rabbitmq.com/blog/2024/08/28/quorum-queues-in-4.0#faster-recovery-of-long-queues) + * Quorum queues now [support priorities](https://www.rabbitmq.com/blog/2024/08/28/quorum-queues-in-4.0#message-priorities) (but not exactly the same way as classic queues) + * [AMQP 1.0 clients now can manage topologies](https://github.com/rabbitmq/rabbitmq-server/pull/10559) similarly to how AMQP 0-9-1 clients do it + * The AMQP 1.0 convention (address format) used for interacting with with AMQP 0-9-1 entities [is now easier to reason about](https://www.rabbitmq.com/docs/next/amqp#addresses) + * Mirroring (replication) of classic queues [was removed](https://github.com/rabbitmq/rabbitmq-server/pull/9815) after several years of deprecation. For replicated messaging data types, + use quorum queues and/or streams. Non-replicated classic queues remain and their development continues + * Classic queue [storage efficiency improvements](https://github.com/rabbitmq/rabbitmq-server/pull/11112), in particular recovery time and storage of multi-MiB messages + * Nodes with multiple enabled plugins and little on disk data to recover now [start up to 20-30% faster](https://github.com/rabbitmq/rabbitmq-server/pull/10989) + * New exchange type: [Local Random Exchange](https://rabbitmq.com/docs/next/local-random-exchange) + +See Compatibility Notes below to learn about **breaking or potentially breaking changes** in this release. + +## Breaking Changes and Compatibility Notes + +### Classic Queues is Now a Non-Replicated Queue Type + +After three years of deprecation, classic queue mirroring was completely removed in this version. +[Quorum queues](https://www.rabbitmq.com/docs/quorum-queues) and [streams](https://www.rabbitmq.com/docs/streams) are two mature +replicated data types offered by RabbitMQ 4.x. Classic queues continue being supported without any breaking changes +for client libraries and applications but they are now a non-replicated queue type. + +After an upgrade to 4.0, all classic queue mirroring-related parts of policies will have no effect. +Classic queues will continue to work like before but with only one replica. + +Clients will be able to connect to any node to publish to and consume from any non-replicated classic queues. +Therefore applications will be able to use the same classic queues as before. + +See [Mirrored Classic Queues Migration to Quorum Queues](https://www.rabbitmq.com/docs/migrate-mcq-to-qq) for guidance +on how to migrate to quorum queues for the parts of the system that really need to use replication. + +### Quorum Queues Now Have a Default Redelivery Limit + +Quorum queues now have a default [redelivery limit](https://www.rabbitmq.com/docs/next/quorum-queues#poison-message-handling) set to `20`. +Messages that are redelivered 20 times or more will be [dead-lettered](https://www.rabbitmq.com/docs/dlx) or dropped (removed). + +This limit is necessary to protect nodes from consumers that run into infinite fail-requeue-fail-requeue loops. Such +consumers can drive a node out of disk space by making a quorum queue Raft log grow forever without allowing compaction +of older entries to happen. + +If 20 deliveries per message is a common scenario for a queue, a dead-lettering target or a higher limit must be configured +for such queues. The recommended way of doing that is via a [policy](https://www.rabbitmq.com/docs/parameters#policies). +See the [Position Messaging Handling](https://www.rabbitmq.com/docs/next/quorum-queues#poison-message-handling) section +in the quorum queue documentation guide. + +Note that increasing the limit is recommended against: usually the presence of messages that have been redelivered 20 times or more suggests +that a consumer has entered a fail-requeue-fail-requeue loop, in which case even a much higher limit +won't help avoid the dead-lettering. + +For specific cases where the RabbitMQ configuration cannot be updated to include a dead letter policy +the delivery limit can be disabled by setting a delivery limit configuration of `-1`. However, the RabbitMQ team +strongly recommends keeping the delivery limit in place to ensure cluster availability isn't +accidentally sacrificed. + +### CQv1 Storage Implementation was Removed + +CQv1, [the original classic queue storage layer, was removed](https://github.com/rabbitmq/rabbitmq-server/pull/10656) +except for the part that's necessary for upgrades to CQv2 (the 2nd generation). + +In case `rabbitmq.conf` explicitly sets `classic_queue.default_version` to `1` like so + +``` ini +# this configuration value is no longer supported, +# remove this line or set the version to 2 +classic_queue.default_version = 1 +``` + +nodes will now fail to start. Removing the line will make the node start and perform +the migration from CQv1 to CQv2. + +### Settings `cluster_formation.randomized_startup_delay_range.*` were Removed + +The following two deprecated `rabbitmq.conf` settings were [removed](https://github.com/rabbitmq/rabbitmq-server/pull/12050): +``` +cluster_formation.randomized_startup_delay_range.min +cluster_formation.randomized_startup_delay_range.max +``` +RabbitMQ 4.0 will fail to boot if these settings are configured in `rabbitmq.conf`. + +### Several Disk I/O-Related Metrics were Removed + +Several I/O-related metrics are dropped, they should be [monitored at the infrastructure and kernel layers](https://www.rabbitmq.com/docs/monitoring#system-metrics) + +### Default Maximum Message Size Reduced to 16 MiB + +Default maximum message size is reduced to 16 MiB (from 128 MiB). + +The limit can be increased via a `rabbitmq.conf` setting: + +```ini +# 32 MiB +max_message_size = 33554432 +``` + +However, it is recommended that such large multi-MiB messages are put into a blob store, and their +IDs are passed around in messages instead of the entire payload. + +### AMQP 1.0 + +RabbitMQ 3.13 `rabbitmq.conf` setting `rabbitmq_amqp1_0.default_vhost` is unsupported in RabbitMQ 4.0. + +Instead `default_vhost` will be used to determine the default vhost an AMQP 1.0 client connects to(i.e. when the AMQP 1.0 client +does not define the vhost in the `hostname` field of the `open` frame). + +Starting with RabbitMQ 4.0, RabbitMQ strictly validates that +[delivery annotations](https://docs.oasis-open.org/amqp/core/v1.0/os/amqp-core-messaging-v1.0-os.html#type-delivery-annotations), +[message annotations](https://docs.oasis-open.org/amqp/core/v1.0/os/amqp-core-messaging-v1.0-os.html#type-message-annotations), and +[footer](https://docs.oasis-open.org/amqp/core/v1.0/os/amqp-core-messaging-v1.0-os.html#type-footer) contain only +[non-reserved annotation keys](https://docs.oasis-open.org/amqp/core/v1.0/os/amqp-core-messaging-v1.0-os.html#type-annotations). +As a result, clients can only send symbolic keys that begin with `x-`. + +### MQTT + +RabbitMQ 3.13 [rabbitmq.conf](https://www.rabbitmq.com/docs/configure#config-file) settings `mqtt.default_user`, `mqtt.default_password`, +and `amqp1_0.default_user` are unsupported in RabbitMQ 4.0. + +Instead, set the new RabbitMQ 4.0 settings `anonymous_login_user` and `anonymous_login_pass` (both values default to `guest`). +For production scenarios, [disallow anonymous logins](https://www.rabbitmq.com/docs/next/production-checklist#anonymous-login). + +### TLS Client (LDAP, Shovels, Federation) Defaults + +Starting with Erlang 26, client side [TLS peer certificate chain verification](https://www.rabbitmq.com/docs/ssl#peer-verification) settings are enabled by default in most contexts: +from federation links to shovels to TLS-enabled LDAP client connections. + +If using TLS peer certificate chain verification is not practical or necessary, it can be disabled. +Please refer to the docs of the feature in question, for example, +this one [on TLS-enabled LDAP client](http://rabbitmq.com/docs/ldap/#tls) connections, +two others on [TLS-enabled dynamic shovels](https://www.rabbitmq.com/docs/shovel#tls) and [dynamic shovel URI query parameters](https://www.rabbitmq.com/docs/uri-query-parameters). + +### Shovels + +RabbitMQ Shovels will be able connect to a RabbitMQ 4.0 node via AMQP 1.0 only when the Shovel runs on a RabbitMQ node >= `3.13.7`. + +TLS-enabled Shovels will be affected by the TLS client default changes in Erlang 26 (see above). + + +## Erlang/OTP Compatibility Notes + +This release [requires Erlang 26.2](https://www.rabbitmq.com/docs/which-erlang). + +[Provisioning Latest Erlang Releases](https://www.rabbitmq.com/docs/which-erlang#erlang-repositories) explains +what package repositories and tools can be used to provision latest patch versions of Erlang 26.x. + + +## Release Artifacts + +RabbitMQ releases are distributed via [GitHub](https://github.com/rabbitmq/rabbitmq-server/releases). +[Debian](https://rabbitmq.com/docs/install-debian/) and [RPM packages](https://rabbitmq.com/docs/install-rpm/) are available via +repositories maintained by the RabbitMQ Core Team. + +[Community Docker image](https://hub.docker.com/_/rabbitmq/), [Chocolatey package](https://community.chocolatey.org/packages/rabbitmq), and the [Homebrew formula](https://www.rabbitmq.com/docs/install-homebrew) +are other installation options. They are updated with a delay. + +### Known Issue: Incorrect Version in Generic Binary Builds + +Generic binary builds of `4.0.1` incorrectly report their version as `4.0.0+2`. This also applies to plugin versions. This was [addressed in `4.0.2`](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v4.0.2). + +Other artifacts (Debian and RPM packages, the Windows installer) are not affected. + + +## Upgrading to 4.0 + +### Documentation guides on upgrades + +See the [Upgrading guide](https://www.rabbitmq.com/docs/upgrade) for documentation on upgrades and [GitHub releases](https://github.com/rabbitmq/rabbitmq-server/releases) +for release notes of individual releases. + +This release series only supports upgrades from `3.13.x`. + +This release requires **all feature flags** in the 3.x series (specifically `3.13.x`) to be enabled before upgrading, +there is no upgrade path from 3.12.14 (or a later patch release) straight to `4.0.0`. + +### Required Feature Flags + +This release [graduates](https://www.rabbitmq.com/docs/feature-flags#graduation) all feature flags introduced up to `3.13.0`. + +All users must enable all stable [feature flags] before upgrading to 4.0 from +the latest available 3.13.x patch release. + +### Mixed version cluster compatibility + +RabbitMQ 4.0.0 nodes can run alongside `3.13.x` nodes. `4.0.x`-specific features can only be made available when all nodes in the cluster +upgrade to 4.0.0 or a later patch release in the new series. + +While operating in mixed version mode, some aspects of the system may not behave as expected. The list of known behavior changes will be covered in future updates. +Once all nodes are upgraded to 4.0.0, these irregularities will go away. + +Mixed version clusters are a mechanism that allows rolling upgrade and are not meant to be run for extended +periods of time (no more than a few hours). + +### Recommended Post-upgrade Procedures + +#### Configure Dead Lettering or Increase the Limit for Frequently Redelivered Messages + +In environments where messages can experience 20 redeliveries, the affected queues should have [dead lettering](https://www.rabbitmq.com/docs/dlx) +configured (usually via a [policy](https://www.rabbitmq.com/docs/parameters#policies)) to make sure +that messages that are redelivered 20 times are moved to a separate queue (or stream) instead of +being dropped (removed) by the [crash-requeue-redelivery loop protection mechanism](https://www.rabbitmq.com/docs/next/quorum-queues#poison-message-handling). + +Alternatively, the limit can be [increased](https://www.rabbitmq.com/docs/next/quorum-queues#poison-message-handling) using a policy. +This option is recommended against: usually the presence of messages that have been redelivered 20 times or more suggests +that a consumer has entered a fail-requeue-fail-requeue loop, in which case even a much higher limit +won't help avoid the dead-lettering. + + +## Changes Worth Mentioning + +This section is incomplete and will be expanded as 4.0 approaches its release candidate stage. + +### Core Server + +#### Enhancements + + * Efficient sub-linear quorum queue recovery on node startup using checkpoints. + + GitHub issue: [#10637](https://github.com/rabbitmq/rabbitmq-server/pull/10637) + + * Classic queue storage v2 (CQv2) optimizations. For example, CQv2 recovery time on node boot + is now twice as fast for some data sets. + + GitHub issue: [#11112](https://github.com/rabbitmq/rabbitmq-server/pull/11112) + + * Node startup time improvements. For some environments, nodes with very small on disk data sets + now start about 25% quicker. + + GitHub issue: [#10989](https://github.com/rabbitmq/rabbitmq-server/pull/10989) + + * Quorum queues now support [priorities](https://www.rabbitmq.com/docs/next/quorum-queues#priorities). However, + there are difference with how priorities work in classic queues. + + GitHub issue: [#10637](https://github.com/rabbitmq/rabbitmq-server/pull/10637) + + * Per-message metadata stored in the quorum queue Raft log now uses less disk space. + + GitHub issue: [#8261](https://github.com/rabbitmq/rabbitmq-server/issues/8261) + + * Single Active Consumer (SAC) implementation of quorum queues now [respects](https://www.rabbitmq.com/blog/2024/08/28/quorum-queues-in-4.0#consumer-priorities-combined-with-single-active-consumer) consumer priorities. + + GitHub issue: [#8261](https://github.com/rabbitmq/rabbitmq-server/issues/8261) + + * `rabbitmq.conf` now supports [encrypted values](https://www.rabbitmq.com/docs/next/configure#configuration-encryption) + with a prefix: + + ``` ini + default_user = bunnies-444 + default_pass = encrypted:F/bjQkteQENB4rMUXFKdgsJEpYMXYLzBY/AmcYG83Tg8AOUwYP7Oa0Q33ooNEpK9 + ``` + + GitHub issue: [#11989](https://github.com/rabbitmq/rabbitmq-server/pull/11989) + + * All feature flags up to `3.13.0` have [graduated](https://www.rabbitmq.com/docs/feature-flags#graduation) and are now mandatory. + + GitHub issue: [#11659](https://github.com/rabbitmq/rabbitmq-server/pull/11659) + + * Quorum queues now use a default [redelivery limit](https://www.rabbitmq.com/docs/next/quorum-queues#poison-message-handling) of 20. + + GitHub issue: [#11937](https://github.com/rabbitmq/rabbitmq-server/pull/11937) + + * `queue_master_locator` queue setting has been deprecated in favor of `queue_leader_locator` used by quorum queues + and streams. + + GitHub issue: [#10702](https://github.com/rabbitmq/rabbitmq-server/issues/10702) + + +### AMQP 1.0 + +#### Bug Fixes + + * AMQP 0-9-1 to AMQP 1.0 string data type conversion improvements. + + GitHub issue: [#11715](https://github.com/rabbitmq/rabbitmq-server/pull/11715) + +#### Enhancements + + * [AMQP 1.0 is now a core protocol](https://www.rabbitmq.com/blog/2024/08/05/native-amqp) that is always enabled. + Its plugin is now a no-op that only exists to simplify upgrades. + + GitHub issues: [#9022](https://github.com/rabbitmq/rabbitmq-server/pull/9022), [#10662](https://github.com/rabbitmq/rabbitmq-server/pull/10662) + + * The AMQP 1.0 implementation is now significantly more efficient: its peak throughput is [more than double than that of 3.13.x](https://www.rabbitmq.com/blog/2024/08/21/amqp-benchmarks) + on some workloads. + + GitHub issue: [#9022](https://github.com/rabbitmq/rabbitmq-server/pull/9022) + + * For AMQP 1.0, [resource alarms]() only block inbound `TRANSFER` frames instead of blocking all traffic. + + GitHub issue: [#9022](https://github.com/rabbitmq/rabbitmq-server/pull/9022) + + * AMQP 1.0 clients now can manage topologies (queues, exchanges, bindings). + + GitHub issue: [#10559](https://github.com/rabbitmq/rabbitmq-server/pull/10559) + + * AMQP 1.0 implementation now supports a new (v2) address format for referencing queues, exchanges, and so on. + + GitHub issues: [#11604](https://github.com/rabbitmq/rabbitmq-server/pull/11604), [#11618](https://github.com/rabbitmq/rabbitmq-server/pull/11618) + + * AMQP 1.0 implementation now supports consumer priorities. + + GitHub issue: [#11705](https://github.com/rabbitmq/rabbitmq-server/pull/11705) + + * Client-provided connection name will now be logged for AMQP 1.0 connections. + + GitHub issue: [#11958](https://github.com/rabbitmq/rabbitmq-server/issues/11958) + + +### Streams + +#### Enhancements + + * Stream filtering is now supported for AMQP 1.0 clients. + + GitHub issue: [#10098](https://github.com/rabbitmq/rabbitmq-server/pull/10098) + + +### Prometheus Plugin + +#### Enhancements + + * [Detailed memory breakdown](https://www.rabbitmq.com/docs/memory-use) metrics are now exposed via the Prometheus scraping endpoint. + + GitHub issue: [#11743](https://github.com/rabbitmq/rabbitmq-server/issues/11743) + + * New per-exchange and per-queue metrics. + + Contributed by @LoisSotoLopez. + + GitHub issue: [#11559](https://github.com/rabbitmq/rabbitmq-server/pull/11559) + + * Shovel and Federation metrics are now available via two new plugins: `rabbitmq_shovel_prometheus` and `rabbitmq_federation_prometheus`. + + Contributed by @SimonUnge. + + GitHub issue: [#11942](https://github.com/rabbitmq/rabbitmq-server/pull/11942) + + +### Shovel Plugin + +#### Enhancements + + * Shovels now can be configured to use pre-declared topologies. This is primarily useful in environments where + schema definition comes from [definitions](https://www.rabbitmq.com/docs/definitions). + + GitHub issue: [#10501](https://github.com/rabbitmq/rabbitmq-server/issues/10501) + + +### Local Random Exchange Plugin + +This is an initial release that includes [Local Random Exchange](https://www.rabbitmq.com/docs/next/local-random-exchange). + +GitHub issues: [#8334](https://github.com/rabbitmq/rabbitmq-server/pull/8334), [#10091](https://github.com/rabbitmq/rabbitmq-server/pull/10091). + + +### STOMP Plugin + +#### Enhancements + + * STOMP now supports consumer priorities. + + GitHub issue: [#11947](https://github.com/rabbitmq/rabbitmq-server/pull/11947) + + +### Dependency Changes + + * Ra was [upgraded to `2.14.0`](https://github.com/rabbitmq/ra/releases) + * Khepri was [upgraded to `0.16.0`](https://github.com/rabbitmq/khepri/releases) + * Cuttlefish was [upgraded to `3.4.0`](https://github.com/Kyorai/cuttlefish/releases) + +## Source Code Archives + +To obtain source code of the entire distribution, please download the archive named `rabbitmq-server-4.0.1.tar.xz` +instead of the source tarball produced by GitHub. diff --git a/release-notes/4.0.2.md b/release-notes/4.0.2.md new file mode 100644 index 000000000000..c2f7b67801c3 --- /dev/null +++ b/release-notes/4.0.2.md @@ -0,0 +1,42 @@ +## RabbitMQ 4.0.2 + +RabbitMQ `4.0.2` is a maintenance release in the `4.0.x` [release series](https://www.rabbitmq.com/release-information). + +Starting June 1st, 2024, community support for this series will only be provided to [regularly contributing users](https://github.com/rabbitmq/rabbitmq-server/blob/main/COMMUNITY_SUPPORT.md) and those +who hold a valid [commercial support license](https://tanzu.vmware.com/rabbitmq/oss). + +Please refer to the upgrade section from the [4.0 release notes](https://github.com/rabbitmq/rabbitmq-server/releases/tag/v4.0.1) +if upgrading from a version prior to 4.0. + +This release requires Erlang 26 and supports Erlang versions up to `26.2.x`. +[RabbitMQ and Erlang/OTP Compatibility Matrix](https://www.rabbitmq.com/docs/which-erlang) has more details on +Erlang version requirements for RabbitMQ. + + +### Minimum Supported Erlang Version + +As of 4.0, RabbitMQ requires Erlang 26. Nodes **will fail to start** on older Erlang releases. + + +## Changes Worth Mentioning + +Release notes can be found on GitHub at [rabbitmq-server/release-notes](https://github.com/rabbitmq/rabbitmq-server/tree/v4.0.x/release-notes). + + +### Generic Binary Package + +#### Bug Fixes + + * Generic binary packages used an incorrect version (`4.0.0+2` instead of `4.0.1`) at build time + + GitHub issue: [#12339](https://github.com/rabbitmq/rabbitmq-server/issues/12339) + + +### Dependency Changes + +None in this release. + +## Source Code Archives + +To obtain source code of the entire distribution, please download the archive named `rabbitmq-server-4.0.2.tar.xz` +instead of the source tarball produced by GitHub. diff --git a/release-notes/4.1.0.md b/release-notes/4.1.0.md new file mode 100644 index 000000000000..432b4fd641f9 --- /dev/null +++ b/release-notes/4.1.0.md @@ -0,0 +1,5 @@ +## RabbitMQ 4.1.0 + +## Potential incompatibilities + +* The default MQTT [Maximum Packet Size](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901086) changed from 256 MiB to 16 MiB. This default can be overriden by [configuring](https://www.rabbitmq.com/docs/configure#config-file) `mqtt.max_packet_size_authenticated`. Note that this value must not be greater than `max_message_size` (which also defaults to 16 MiB). diff --git a/scripts/bazel/rabbitmq-run.bat b/scripts/bazel/rabbitmq-run.bat index 0970de67d4d9..8e1f08b65318 100644 --- a/scripts/bazel/rabbitmq-run.bat +++ b/scripts/bazel/rabbitmq-run.bat @@ -81,10 +81,6 @@ set RABBITMQ_PLUGINS_EXPAND_DIR=%NODE_TMPDIR%\plugins set RABBITMQ_FEATURE_FLAGS_FILE=%NODE_TMPDIR%\feature_flags set RABBITMQ_ENABLED_PLUGINS_FILE=%NODE_TMPDIR%\enabled_plugins -if not defined RABBITMQ_SERVER_START_ARGS ( - set RABBITMQ_SERVER_START_ARGS=-ra wal_sync_method sync -) - if not defined RABBITMQ_LOG ( set RABBITMQ_LOG=debug,+color ) @@ -115,8 +111,7 @@ if "%CMD%" == "run-broker" ( @echo {rabbitmq_mqtt, []}, @echo {rabbitmq_stomp, []}, @echo {ra, [ - @echo {data_dir, "!RABBITMQ_QUORUM_DIR:\=\\!"}, - @echo {wal_sync_method, sync} + @echo {data_dir, "!RABBITMQ_QUORUM_DIR:\=\\!"} @echo ]}, @echo {osiris, [ @echo {data_dir, "!RABBITMQ_STREAM_DIR:\=\\!"} diff --git a/scripts/bazel/rabbitmq-run.sh b/scripts/bazel/rabbitmq-run.sh index af45cf8a239a..5324a3d559d8 100755 --- a/scripts/bazel/rabbitmq-run.sh +++ b/scripts/bazel/rabbitmq-run.sh @@ -78,8 +78,7 @@ write_config_file() { ${rabbitmq_prometheus_fragment} ]}, {ra, [ - {data_dir, "${RABBITMQ_QUORUM_DIR}"}, - {wal_sync_method, sync} + {data_dir, "${RABBITMQ_QUORUM_DIR}"} ]}, {osiris, [ {data_dir, "${RABBITMQ_STREAM_DIR}"} @@ -195,8 +194,6 @@ fi RABBITMQ_PLUGINS_DIR=${RABBITMQ_PLUGINS_DIR:=${DEFAULT_PLUGINS_DIR}} export RABBITMQ_PLUGINS_DIR -RABBITMQ_SERVER_START_ARGS="${RABBITMQ_SERVER_START_ARGS:=-ra wal_sync_method sync}" -export RABBITMQ_SERVER_START_ARGS # Enable colourful debug logging by default # To change this, set RABBITMQ_LOG to info, notice, warning etc. diff --git a/selenium/.gitignore b/selenium/.gitignore new file mode 100644 index 000000000000..63c36b351eb4 --- /dev/null +++ b/selenium/.gitignore @@ -0,0 +1,9 @@ +node_modules +package-lock.json +screens/*/* +logs +suites/logs/* +suites/screens/* +test/oauth/*/h2/*.trace.db +test/oauth/*/h2/*.lock.db +*/target/* diff --git a/deps/rabbitmq_management/selenium/Dockerfile b/selenium/Dockerfile similarity index 85% rename from deps/rabbitmq_management/selenium/Dockerfile rename to selenium/Dockerfile index 0998b81138a8..8e34be523f28 100644 --- a/deps/rabbitmq_management/selenium/Dockerfile +++ b/selenium/Dockerfile @@ -4,7 +4,6 @@ FROM atools/jdk-maven-node:mvn3-jdk11-node16 as base WORKDIR /code COPY package.json package.json -COPY run-amqp10-roundtriptest run-amqp10-roundtriptest FROM base as test RUN npm install diff --git a/deps/rabbitmq_management/selenium/README.md b/selenium/README.md similarity index 78% rename from deps/rabbitmq_management/selenium/README.md rename to selenium/README.md index 0f9fcee379be..131c0a370648 100644 --- a/deps/rabbitmq_management/selenium/README.md +++ b/selenium/README.md @@ -1,7 +1,38 @@ -# Automated End-to-End testing of the management ui with Selenium - -Selenium webdriver is used to drive web browser's interactions on the management ui. -And Mocha is used as the testing framework for Javascript. +# Automated End-to-End testing with Mocha and Selenium + +## What is it? + +It is a solution that allows you to write end-to-end tests in Javascript. The solution +takes care of: + + - generating the required RabbitMQ configuration + - deploying RabbitMQ with the generated configuration in 3 ways: + - from source via `make run-broker`. + - with docker via a single docker instance. + - with docker compose via a 3-node cluster. + - deploying any other dependencies required by the test case such as: + - keycloak + - uaa + - ldap + - http authentication backend + - http proxy + - http portal + - running the test cases + - capturing the logs from RabbitMQ and all the dependencies + - stopping RabbitMQ and all the dependencies + +## Integration with Github actions + +These are the three github workflows that run end-to-end tests: +- [test-management-ui.yaml](.github/workflows/test-management-ui.yaml) Runs all the test suites +listed on the file [short-suite-management-ui](selenium/short-suite-management-ui). It tests the management ui deployed on a 3-node cluster. It is invoked on every push to a branch. +- [test-management-ui-for-prs.yaml](.github/workflows/test-management-ui.yaml) Runs all the test suites +listed on the file [full-suite-management-ui](selenium/full-suite-management-ui). It tests the management ui deployed on a single docker instance. It is invoked on every push to a PR. +- [test-authnz.yaml](.github/workflows/test-authnz.yaml) Runs all the test suites +listed on the file [full-suite-authnz-messaging](selenium/full-suite-authnz-messaging). It is invoked on every push to a PR and/or branch. + + +## Prerequisites The following must be installed to run the tests: - make @@ -10,9 +41,9 @@ The following must be installed to run the tests: # Organization of test cases -`test` folder contains the test cases written in Javascript using Selenium webdriver. Test cases are grouped into folders based on the area of functionality. -For instance, `test/basic-auth` contains test cases that validates basic authentication. Another example, a bit -more complex, is `test/oauth` where the test cases are stored in subfolders. For instance, `test/oauth/with-sp-initiated` which validate OAuth 2 authorization where users come to RabbitMQ without any token and RabbitMQ initiates the authorization process. +`test` folder contains the test cases written in Javascript using Mocha framework. +Test cases are grouped into folders based on the area of functionality. +For instance, `test/basic-auth` contains test cases that validates basic authentication. Another example, a bit more complex, is `test/oauth` where the test cases are stored in subfolders. For instance, `test/oauth/with-sp-initiated` which validate OAuth 2 authorization where users come to RabbitMQ without any token and RabbitMQ initiates the authorization process. The `test` folder also contains the necessary configuration files. For instance, `test/basic-auth` contains `rabbitmq.conf` file which is also shared by other test cases such as `test/definitions` or `test/limits`. diff --git a/deps/rabbitmq_management/selenium/amqp10-roundtriptest/pom.xml b/selenium/amqp10-roundtriptest/pom.xml similarity index 98% rename from deps/rabbitmq_management/selenium/amqp10-roundtriptest/pom.xml rename to selenium/amqp10-roundtriptest/pom.xml index 01f3780d1142..f39425a50ee4 100644 --- a/deps/rabbitmq_management/selenium/amqp10-roundtriptest/pom.xml +++ b/selenium/amqp10-roundtriptest/pom.xml @@ -10,7 +10,7 @@ 5.9.3 2.3.0 - 1.2.11 + 1.2.13 2.24.0 1.17.0 3.11.0 diff --git a/deps/rabbitmq_management/selenium/run-amqp10-roundtriptest b/selenium/amqp10-roundtriptest/run similarity index 67% rename from deps/rabbitmq_management/selenium/run-amqp10-roundtriptest rename to selenium/amqp10-roundtriptest/run index 4f76fbf41603..b91f0becf7a7 100755 --- a/deps/rabbitmq_management/selenium/run-amqp10-roundtriptest +++ b/selenium/amqp10-roundtriptest/run @@ -7,10 +7,10 @@ if [[ -f "/code/amqp10-roundtriptest" ]]; then echo "Running amqp10-roundtriptest inside mocha-test docker image ..." java -jar /code/amqp10-roundtriptest-1.0-SNAPSHOT-jar-with-dependencies.jar $@ else - if [[ ! -f "amqp10-roundtriptest/target/amqp10-roundtriptest-1.0-SNAPSHOT-jar-with-dependencies.jar" ]]; then + if [[ ! -f "${SCRIPT}/target/amqp10-roundtriptest-1.0-SNAPSHOT-jar-with-dependencies.jar" ]]; then echo "Building amqp10-roundtriptest jar ..." mvn -f amqp10-roundtriptest package $@ fi echo "Running amqp10-roundtriptest jar ..." - java -jar amqp10-roundtriptest/target/amqp10-roundtriptest-1.0-SNAPSHOT-jar-with-dependencies.jar $@ + java -jar ${SCRIPT}/target/amqp10-roundtriptest-1.0-SNAPSHOT-jar-with-dependencies.jar $@ fi diff --git a/deps/rabbitmq_management/selenium/amqp10-roundtriptest/src/main/java/com/rabbitmq/amqp1_0/RoundTripTest.java b/selenium/amqp10-roundtriptest/src/main/java/com/rabbitmq/amqp1_0/RoundTripTest.java similarity index 100% rename from deps/rabbitmq_management/selenium/amqp10-roundtriptest/src/main/java/com/rabbitmq/amqp1_0/RoundTripTest.java rename to selenium/amqp10-roundtriptest/src/main/java/com/rabbitmq/amqp1_0/RoundTripTest.java diff --git a/deps/rabbitmq_management/selenium/bin/components/README.md b/selenium/bin/components/README.md similarity index 100% rename from deps/rabbitmq_management/selenium/bin/components/README.md rename to selenium/bin/components/README.md diff --git a/deps/rabbitmq_management/selenium/bin/components/devkeycloak b/selenium/bin/components/devkeycloak similarity index 100% rename from deps/rabbitmq_management/selenium/bin/components/devkeycloak rename to selenium/bin/components/devkeycloak diff --git a/deps/rabbitmq_management/selenium/bin/components/fakeportal b/selenium/bin/components/fakeportal similarity index 100% rename from deps/rabbitmq_management/selenium/bin/components/fakeportal rename to selenium/bin/components/fakeportal diff --git a/deps/rabbitmq_management/selenium/bin/components/fakeproxy b/selenium/bin/components/fakeproxy similarity index 100% rename from deps/rabbitmq_management/selenium/bin/components/fakeproxy rename to selenium/bin/components/fakeproxy diff --git a/deps/rabbitmq_management/selenium/bin/components/keycloak b/selenium/bin/components/keycloak similarity index 100% rename from deps/rabbitmq_management/selenium/bin/components/keycloak rename to selenium/bin/components/keycloak diff --git a/deps/rabbitmq_management/selenium/bin/components/mock-auth-backend-http b/selenium/bin/components/mock-auth-backend-http similarity index 100% rename from deps/rabbitmq_management/selenium/bin/components/mock-auth-backend-http rename to selenium/bin/components/mock-auth-backend-http diff --git a/deps/rabbitmq_management/selenium/bin/components/mock-auth-backend-ldap b/selenium/bin/components/mock-auth-backend-ldap similarity index 100% rename from deps/rabbitmq_management/selenium/bin/components/mock-auth-backend-ldap rename to selenium/bin/components/mock-auth-backend-ldap diff --git a/deps/rabbitmq_management/selenium/bin/components/prodkeycloak b/selenium/bin/components/prodkeycloak similarity index 100% rename from deps/rabbitmq_management/selenium/bin/components/prodkeycloak rename to selenium/bin/components/prodkeycloak diff --git a/deps/rabbitmq_management/selenium/bin/components/proxy b/selenium/bin/components/proxy similarity index 100% rename from deps/rabbitmq_management/selenium/bin/components/proxy rename to selenium/bin/components/proxy diff --git a/selenium/bin/components/rabbitmq b/selenium/bin/components/rabbitmq new file mode 100644 index 000000000000..9eea9e13c2a7 --- /dev/null +++ b/selenium/bin/components/rabbitmq @@ -0,0 +1,183 @@ +#!/usr/bin/env bash + +init_rabbitmq() { + RABBITMQ_CONFIG_DIR=${TEST_CONFIG_DIR} + RABBITMQ_DOCKER_IMAGE=${RABBITMQ_DOCKER_IMAGE:-rabbitmq} + + print "> RABBITMQ_CONFIG_DIR: ${RABBITMQ_CONFIG_DIR}" + print "> RABBITMQ_DOCKER_IMAGE: ${RABBITMQ_DOCKER_IMAGE}" + [[ -z "${OAUTH_SERVER_CONFIG_BASEDIR}" ]] || print "> OAUTH_SERVER_CONFIG_BASEDIR: ${OAUTH_SERVER_CONFIG_BASEDIR}" + [[ -z "${OAUTH_SERVER_CONFIG_DIR}" ]] || print "> OAUTH_SERVER_CONFIG_DIR: ${OAUTH_SERVER_CONFIG_DIR}" + +} + +start_rabbitmq() { + if [[ "$PROFILES" == *"docker"* ]]; then + if [[ "$PROFILES" == *"cluster"* ]]; then + start_docker_cluster_rabbitmq + else + start_docker_rabbitmq + fi + else + start_local_rabbitmq + fi +} +stop_rabbitmq() { + if [[ "$PROFILES" == *"docker"* ]]; then + if [[ "$PROFILES" == *"cluster"* ]]; then + docker compose -f $CONF_DIR/rabbitmq/compose.yml kill + else + kill_container_if_exist "$component" + fi + else + stop_local_rabbitmq + fi +} +stop_local_rabbitmq() { + RABBITMQ_SERVER_ROOT=$(realpath ../) + gmake --directory=${RABBITMQ_SERVER_ROOT} stop-node +} +save_logs_rabbitmq() { + if [[ "$PROFILES" == *"docker"* ]]; then + if [[ "$PROFILES" == *"cluster"* ]]; then + docker compose -f $CONF_DIR/rabbitmq/compose.yml logs > $LOGS/rabbitmq.log + else + save_container_logs "rabbitmq" + fi + fi +} +start_local_rabbitmq() { + begin "Starting rabbitmq ..." + + init_rabbitmq + + RABBITMQ_SERVER_ROOT=$(realpath ../) + MOUNT_RABBITMQ_CONF="/etc/rabbitmq/rabbitmq.conf" + MOUNT_ADVANCED_CONFIG="/etc/rabbitmq/advanced.config" + + RABBITMQ_TEST_DIR="${RABBITMQ_CONFIG_DIR}" ${BIN_DIR}/gen-rabbitmq-conf ${RABBITMQ_CONFIG_DIR} $ENV_FILE /tmp$MOUNT_RABBITMQ_CONF + + print "> EFFECTIVE RABBITMQ_CONFIG_FILE: /tmp$MOUNT_RABBITMQ_CONF" + ${BIN_DIR}/gen-advanced-config ${RABBITMQ_CONFIG_DIR} $ENV_FILE /tmp$MOUNT_ADVANCED_CONFIG + cp ${RABBITMQ_CONFIG_DIR}/enabled_plugins /tmp/etc/rabbitmq/ + RESULT=$? + cp ${RABBITMQ_CONFIG_DIR}/enabled_plugins /tmp/etc/rabbitmq/ + if [ $RESULT -eq 0 ]; then + print "> EFFECTIVE RABBITMQ_CONFIG_FILE: /tmp$MOUNT_ADVANCED_CONFIG" + gmake --directory=${RABBITMQ_SERVER_ROOT} run-broker \ + RABBITMQ_ENABLED_PLUGINS_FILE=/tmp/etc/rabbitmq/enabled_plugins \ + RABBITMQ_CONFIG_FILE=/tmp$MOUNT_RABBITMQ_CONF \ + RABBITMQ_ADVANCED_CONFIG_FILE=/tmp$MOUNT_ADVANCED_CONFIG + else + gmake --directory=${RABBITMQ_SERVER_ROOT} run-broker \ + RABBITMQ_ENABLED_PLUGINS_FILE=/tmp/etc/rabbitmq/enabled_plugins \ + RABBITMQ_CONFIG_FILE=/tmp$MOUNT_RABBITMQ_CONF + fi + print "> RABBITMQ_TEST_DIR: ${RABBITMQ_CONFIG_DIR}" + + +} +start_docker_cluster_rabbitmq() { + begin "Starting rabbitmq cluster in docker ..." + init_rabbitmq + kill_container_if_exist rabbitmq + kill_container_if_exist rabbitmq1 + kill_container_if_exist rabbitmq2 + + mkdir -p $CONF_DIR/rabbitmq + + RABBITMQ_TEST_DIR="/var/rabbitmq" ${BIN_DIR}/gen-rabbitmq-conf ${RABBITMQ_CONFIG_DIR} $ENV_FILE $CONF_DIR/rabbitmq/rabbitmq.conf + print "> EFFECTIVE RABBITMQ_CONFIG_FILE: $CONF_DIR/rabbitmq/rabbitmq.conf" + ${BIN_DIR}/gen-advanced-config ${RABBITMQ_CONFIG_DIR} $ENV_FILE $CONF_DIR/rabbitmq/advanced.config + RESULT=$? + if [ $RESULT -eq 0 ]; then + if [ -s $RESULT ]; then + print "> EFFECTIVE ADVANCED_CONFIG_FILE: $CONF_DIR/rabbitmq/advanced.config" + else + rm $CONF_DIR/rabbitmq/advanced.config + fi + fi + if [ -f ${RABBITMQ_CONFIG_DIR}/logging.conf ]; then + mkdir -p $CONF_DIR/rabbitmq/conf.d/ + cp ${RABBITMQ_CONFIG_DIR}/logging.conf $CONF_DIR/rabbitmq/conf.d/ + fi + if [ -f ${RABBITMQ_CONFIG_DIR}/enabled_plugins ]; then + cp ${RABBITMQ_CONFIG_DIR}/enabled_plugins $CONF_DIR/rabbitmq + fi + if [ -d ${RABBITMQ_CONFIG_DIR}/certs ]; then + cp -r ${RABBITMQ_CONFIG_DIR}/certs $CONF_DIR/rabbitmq + fi + if [ -d ${RABBITMQ_CONFIG_DIR}/imports ]; then + cp -r ${RABBITMQ_CONFIG_DIR}/imports $CONF_DIR/rabbitmq + fi + + cat > $CONF_DIR/rabbitmq/image_compose.yml < $CONF_DIR/rabbitmq/compose.yml + cat ${BIN_DIR}/components/../rabbit-compose.yml >> $CONF_DIR/rabbitmq/compose.yml + + docker compose -f $CONF_DIR/rabbitmq/compose.yml up -d + + wait_for_message rabbitmq "Server startup complete" + wait_for_message rabbitmq1 "Server startup complete" + wait_for_message rabbitmq2 "Server startup complete" + end "RabbitMQ cluster ready" +} + +start_docker_rabbitmq() { + begin "Starting rabbitmq in docker ..." + + init_rabbitmq + kill_container_if_exist rabbitmq + + mkdir -p $CONF_DIR/rabbitmq + + RABBITMQ_TEST_DIR="/var/rabbitmq" ${BIN_DIR}/gen-rabbitmq-conf ${RABBITMQ_CONFIG_DIR} $ENV_FILE $CONF_DIR/rabbitmq/rabbitmq.conf + print "> EFFECTIVE RABBITMQ_CONFIG_FILE: $CONF_DIR/rabbitmq/rabbitmq.conf" + ${BIN_DIR}/gen-advanced-config ${RABBITMQ_CONFIG_DIR} $ENV_FILE $CONF_DIR/rabbitmq/advanced.config + RESULT=$? + if [ $RESULT -eq 0 ]; then + if [ -s $RESULT ]; then + print "> EFFECTIVE ADVANCED_CONFIG_FILE: $CONF_DIR/rabbitmq/advanced.config" + else + rm $CONF_DIR/rabbitmq/advanced.config + fi + fi + if [ -f ${RABBITMQ_CONFIG_DIR}/logging.conf ]; then + mkdir -p $CONF_DIR/rabbitmq/conf.d/ + cp ${RABBITMQ_CONFIG_DIR}/logging.conf $CONF_DIR/rabbitmq/conf.d/ + fi + if [ -f ${RABBITMQ_CONFIG_DIR}/enabled_plugins ]; then + cp ${RABBITMQ_CONFIG_DIR}/enabled_plugins $CONF_DIR/rabbitmq + fi + if [ -d ${RABBITMQ_CONFIG_DIR}/certs ]; then + cp -r ${RABBITMQ_CONFIG_DIR}/certs $CONF_DIR/rabbitmq + fi + if [ -d ${RABBITMQ_CONFIG_DIR}/imports ]; then + cp -r ${RABBITMQ_CONFIG_DIR}/imports $CONF_DIR/rabbitmq + fi + + print "> RABBITMQ_TEST_DIR: /var/rabbitmq" + + docker run \ + --detach \ + --name rabbitmq \ + --net ${DOCKER_NETWORK} \ + -p 5672:5672 \ + -p 5671:5671 \ + -p 15672:15672 \ + -p 15671:15671 \ + -v $CONF_DIR/rabbitmq/:/etc/rabbitmq \ + -v $CONF_DIR/rabbitmq/:/var/rabbitmq \ + -v ${TEST_DIR}:/config \ + ${RABBITMQ_DOCKER_IMAGE} + + wait_for_message rabbitmq "Server startup complete" + end "RabbitMQ ready" +} diff --git a/deps/rabbitmq_management/selenium/bin/components/selenium b/selenium/bin/components/selenium similarity index 96% rename from deps/rabbitmq_management/selenium/bin/components/selenium rename to selenium/bin/components/selenium index 3ebf955053e1..2563927b4fb9 100644 --- a/deps/rabbitmq_management/selenium/bin/components/selenium +++ b/selenium/bin/components/selenium @@ -1,11 +1,11 @@ #!/usr/bin/env bash -arch=$(uname -i) +arch=$(uname -a) if [[ $arch == arm* ]]; then SELENIUM_DOCKER_IMAGE=selenium/standalone-chrome:123.0 else SELENIUM_DOCKER_IMAGE=seleniarm/standalone-chromium:123.0 -fi +fi start_selenium() { begin "Starting selenium ..." diff --git a/deps/rabbitmq_management/selenium/bin/components/uaa b/selenium/bin/components/uaa similarity index 100% rename from deps/rabbitmq_management/selenium/bin/components/uaa rename to selenium/bin/components/uaa diff --git a/deps/rabbitmq_management/selenium/bin/find-template-files b/selenium/bin/find-template-files similarity index 100% rename from deps/rabbitmq_management/selenium/bin/find-template-files rename to selenium/bin/find-template-files diff --git a/deps/rabbitmq_management/selenium/bin/gen-advanced-config b/selenium/bin/gen-advanced-config similarity index 97% rename from deps/rabbitmq_management/selenium/bin/gen-advanced-config rename to selenium/bin/gen-advanced-config index 6f186afef2be..a0fc7a27df73 100755 --- a/deps/rabbitmq_management/selenium/bin/gen-advanced-config +++ b/selenium/bin/gen-advanced-config @@ -28,5 +28,6 @@ then fi if [ "$FOUND_TEMPLATES_COUNT" -lt 1 ] then + rm $FINAL_CONFIG_FILE exit -1 fi diff --git a/deps/rabbitmq_management/selenium/bin/gen-env-file b/selenium/bin/gen-env-file similarity index 100% rename from deps/rabbitmq_management/selenium/bin/gen-env-file rename to selenium/bin/gen-env-file diff --git a/deps/rabbitmq_management/selenium/bin/gen-httpd-conf b/selenium/bin/gen-httpd-conf similarity index 100% rename from deps/rabbitmq_management/selenium/bin/gen-httpd-conf rename to selenium/bin/gen-httpd-conf diff --git a/deps/rabbitmq_management/selenium/bin/gen-keycloak-json b/selenium/bin/gen-keycloak-json similarity index 100% rename from deps/rabbitmq_management/selenium/bin/gen-keycloak-json rename to selenium/bin/gen-keycloak-json diff --git a/deps/rabbitmq_management/selenium/bin/gen-rabbitmq-conf b/selenium/bin/gen-rabbitmq-conf similarity index 100% rename from deps/rabbitmq_management/selenium/bin/gen-rabbitmq-conf rename to selenium/bin/gen-rabbitmq-conf diff --git a/deps/rabbitmq_management/selenium/bin/gen-uaa-yml b/selenium/bin/gen-uaa-yml similarity index 100% rename from deps/rabbitmq_management/selenium/bin/gen-uaa-yml rename to selenium/bin/gen-uaa-yml diff --git a/selenium/bin/rabbit-compose.yml b/selenium/bin/rabbit-compose.yml new file mode 100644 index 000000000000..81cf57e48df9 --- /dev/null +++ b/selenium/bin/rabbit-compose.yml @@ -0,0 +1,49 @@ + + +# https://docs.docker.com/compose/compose-file/#networks +networks: + rabbitmq_net: + name: rabbitmq_net + external: true + +services: + rmq0: &rabbitmq + # https://hub.docker.com/r/pivotalrabbitmq/rabbitmq-prometheus/tags + << : *rabbitmq_image + networks: + - "rabbitmq_net" + ports: + - "5672:5672" + - "15672:15672" + - "15692:15692" + # https://unix.stackexchange.com/questions/71940/killing-tcp-connection-in-linux + # https://en.wikipedia.org/wiki/Tcpkill + # https://www.digitalocean.com/community/tutorials/iptables-essentials-common-firewall-rules-and-commands#block-an-ip-address + cap_add: + - ALL + hostname: rabbitmq + container_name: rabbitmq + environment: + RABBITMQ_ERLANG_COOKIE: rmq0 + + # we want to simulate hitting thresholds + ulimits: + nofile: + soft: "2000" + hard: "2000" + rmq1: + << : *rabbitmq + container_name: rabbitmq1 + hostname: rabbitmq1 + ports: + - "5677:5672" + - "15677:15672" + - "15697:15692" + rmq2: + << : *rabbitmq + hostname: rabbitmq2 + container_name: rabbitmq2 + ports: + - "5678:5672" + - "15678:15672" + - "15698:15692" diff --git a/deps/rabbitmq_management/selenium/bin/suite_template b/selenium/bin/suite_template similarity index 83% rename from deps/rabbitmq_management/selenium/bin/suite_template rename to selenium/bin/suite_template index 3c608016ade0..faad7cbb8031 100644 --- a/deps/rabbitmq_management/selenium/bin/suite_template +++ b/selenium/bin/suite_template @@ -1,6 +1,9 @@ #!/usr/bin/env bash -#set -x +if [[ ! -z "${DEBUG}" ]]; then + set -x +fi + SCRIPT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" SUITE=$(caller) @@ -32,6 +35,8 @@ SCREENS=${SELENIUM_ROOT_FOLDER}/screens/${SUITE} CONF_DIR=/tmp/selenium/${SUITE} ENV_FILE=$CONF_DIR/.env +rm -rf $CONF_DIR + for f in $SCRIPT/components/*; do if [[ ! "$f" == *README.md ]] then @@ -54,6 +59,9 @@ parse_arguments() { elif [[ "$1" == "stop-others" ]] then echo "stop-others" + elif [[ "$1" == "stop-rabbitmq" ]] + then + echo "stop-rabbitmq" elif [[ "$1" == "test" ]] then echo "test $2" @@ -107,7 +115,10 @@ init_suite() { print "> TEST_CASES_DIR: ${TEST_CASES_DIR} " print "> TEST_CONFIG_DIR: ${TEST_CONFIG_DIR} " print "> DOCKER_NETWORK: ${DOCKER_NETWORK} " - print "> PROFILES: ${PROFILES} " + print "> initial PROFILES: ${PROFILES} " + print "> (+) ADDON_PROFILES: ${ADDON_PROFILES} " + PROFILES="${PROFILES} ${ADDON_PROFILES}" + print "> (=) final PROFILES: ${PROFILES} " print "> ENV_FILE: ${ENV_FILE} " print "> COMMAND: ${COMMAND}" end "Initialized suite" @@ -239,25 +250,68 @@ wait_for_url_docker() { done } - +test_on_cluster() { + IFS=', ' read -r -a array <<< "$RABBITMQ_CLUSTER_NODES" + begin "Running against all nodes in cluster $RABBITMQ_CLUSTER_NODES :" + for item in "${array[@]}" + do + RMQ_HOST_0=${RABBITMQ_HOST:-rabbitmq:15672} + RMQ_HOST=$(rewrite_rabbitmq_hostname ${item} $RMQ_HOST_0) + PUBLIC_RMQ_HOST_0=${PUBLIC_RABBITMQ_HOST:-$RMQ_HOST} + PUBLIC_RMQ_HOST=$(rewrite_rabbitmq_hostname ${item} $PUBLIC_RMQ_HOST_0) + RMQ_URL=$(calculate_rabbitmq_url $PUBLIC_RMQ_HOST) + RMQ_HOSTNAME=${item} + _test $RMQ_HOST \ + $PUBLIC_RMQ_HOST \ + $RMQ_URL \ + $RMQ_HOSTNAME + TEST_RESULT=$? + if [ $TEST_RESULT -ne 0 ]; then + break + fi + done + end "Finishing running test ($TEST_RESULT)" +} +rewrite_rabbitmq_hostname() { + IFS=':' read -r -a array <<< "$2" + if [ "${array[0]}" == "rabbitmq" ]; then + echo "${2//rabbitmq/$1}" + else + echo "$2" + fi +} test() { + if [[ "$PROFILES" == *"cluster"* && ! -z "$RABBITMQ_CLUSTER_NODES" ]]; then + test_on_cluster + else + RMQ_HOST=${RABBITMQ_HOST:-rabbitmq:15672} + PUBLIC_RMQ_HOST=${PUBLIC_RABBITMQ_HOST:-$RMQ_HOST} + _test $RABBITMQ_HOST \ + $PUBLIC_RMQ_HOST \ + $(calculate_rabbitmq_url $PUBLIC_RMQ_HOST) \ + ${RABBITMQ_HOSTNAME:-rabbitmq} + fi +} + +_test() { + RMQ_HOST=$1 + PUBLIC_RMQ_HOST=$2 + RMQ_URL=$3 + RMQ_HOSTNAME=$4 + kill_container_if_exist mocha - begin "Running tests with env variables:" + begin "Running tests against $RMQ_HOSTNAME with these env variable:" - RABBITMQ_HOST=${RABBITMQ_HOST:-rabbitmq:15672} - PUBLIC_RABBITMQ_HOST=${PUBLIC_RABBITMQ_HOST:-$RABBITMQ_HOST} - RABBITMQ_URL=$(calculate_rabbitmq_url $PUBLIC_RABBITMQ_HOST) - RABBITMQ_HOSTNAME=${RABBITMQ_HOSTNAME:-rabbitmq} SELENIUM_TIMEOUT=${SELENIUM_TIMEOUT:-20000} SELENIUM_POLLING=${SELENIUM_POLLING:-500} print "> SELENIUM_TIMEOUT: ${SELENIUM_TIMEOUT}" print "> SELENIUM_POLLING: ${SELENIUM_POLLING}" - print "> RABBITMQ_HOST: ${RABBITMQ_HOST}" - print "> RABBITMQ_HOSTNAME: ${RABBITMQ_HOSTNAME}" - print "> PUBLIC_RABBITMQ_HOST: ${PUBLIC_RABBITMQ_HOST}" + print "> RABBITMQ_HOST: ${RMQ_HOST}" + print "> RABBITMQ_HOSTNAME: ${RMQ_HOSTNAME}" + print "> PUBLIC_RABBITMQ_HOST: ${PUBLIC_RMQ_HOST}" print "> RABBITMQ_PATH: ${RABBITMQ_PATH}" - print "> RABBITMQ_URL: ${RABBITMQ_URL}" + print "> RABBITMQ_URL: ${RMQ_URL}" print "> UAA_URL: ${UAA_URL}" print "> FAKEPORTAL_URL: ${FAKEPORTAL_URL}" mocha_test_tag=($(md5sum $SELENIUM_ROOT_FOLDER/package.json)) @@ -270,8 +324,8 @@ test() { --rm \ --name mocha \ --net ${DOCKER_NETWORK} \ - --env RABBITMQ_URL=${RABBITMQ_URL} \ - --env RABBITMQ_HOSTNAME=${RABBITMQ_HOSTNAME} \ + --env RABBITMQ_URL=${RMQ_URL} \ + --env RABBITMQ_HOSTNAME=${RMQ_HOSTNAME} \ --env UAA_URL=${UAA_URL} \ --env FAKE_PORTAL_URL=${FAKEPORTAL_URL} \ --env RUN_LOCAL=false \ @@ -345,6 +399,9 @@ run_local_with() { if [[ "$COMMAND" == "start-rabbitmq" ]] then start_local_rabbitmq +elif [[ "$COMMAND" == "stop-rabbitmq" ]] + then + stop_local_rabbitmq elif [[ "$COMMAND" == "start-others" ]] then start_local_others @@ -354,6 +411,9 @@ run_local_with() { elif [[ "$COMMAND" == "stop-others" ]] then teardown_local_others + elif [[ "$COMMAND" == "stop-rabbitmq" ]] + then + stop_local_rabbitmq elif [[ "$COMMAND" =~ test[[:space:]]*([^[:space:]]*) ]] then test_local ${BASH_REMATCH[1]} @@ -466,13 +526,15 @@ start_components() { $start done } + teardown_components() { begin "Tear down ..." for i in "${REQUIRED_COMPONENTS[@]}" do local component="$i" + stop="stop_$i" + type "$stop" &>/dev/null && $stop || kill_container_if_exist "$component" print "Tear down $component" - kill_container_if_exist "$component" done end "Finished teardown" } @@ -481,8 +543,9 @@ save_components_logs() { for i in "${REQUIRED_COMPONENTS[@]}" do local component="$i" + save="save_logs_$i" + type "$save" &>/dev/null && $save || save_container_logs "$component" print "Saving logs for component $component" - save_container_logs "$component" done end "Finished saving logs" } diff --git a/deps/rabbitmq_management/selenium/fakeportal/app.js b/selenium/fakeportal/app.js similarity index 100% rename from deps/rabbitmq_management/selenium/fakeportal/app.js rename to selenium/fakeportal/app.js diff --git a/deps/rabbitmq_management/selenium/fakeportal/proxy.js b/selenium/fakeportal/proxy.js similarity index 100% rename from deps/rabbitmq_management/selenium/fakeportal/proxy.js rename to selenium/fakeportal/proxy.js diff --git a/deps/rabbitmq_management/selenium/fakeportal/views/rabbitmq.html b/selenium/fakeportal/views/rabbitmq.html similarity index 100% rename from deps/rabbitmq_management/selenium/fakeportal/views/rabbitmq.html rename to selenium/fakeportal/views/rabbitmq.html diff --git a/deps/rabbitmq_management/selenium/full-suite-authnz-messaging b/selenium/full-suite-authnz-messaging similarity index 100% rename from deps/rabbitmq_management/selenium/full-suite-authnz-messaging rename to selenium/full-suite-authnz-messaging diff --git a/deps/rabbitmq_management/selenium/full-suite-management-ui b/selenium/full-suite-management-ui similarity index 100% rename from deps/rabbitmq_management/selenium/full-suite-management-ui rename to selenium/full-suite-management-ui diff --git a/deps/rabbitmq_management/selenium/package.json b/selenium/package.json similarity index 86% rename from deps/rabbitmq_management/selenium/package.json rename to selenium/package.json index 465febe009f7..5021dc3ef122 100644 --- a/deps/rabbitmq_management/selenium/package.json +++ b/selenium/package.json @@ -6,14 +6,14 @@ "scripts": { "fakeportal": "node fakeportal/app.js", "fakeproxy": "node fakeportal/proxy.js", - "amqp10_roundtriptest": "eval $(cat $ENV_FILE ) &&./run-amqp10-roundtriptest", + "amqp10_roundtriptest": "eval $(cat $ENV_FILE ) && amqp10-roundtriptest/run", "test": " eval $(cat $ENV_FILE ) && mocha --recursive --trace-warnings --timeout 40000" }, "keywords": [], "author": "", "license": "ISC", "dependencies": { - "chromedriver": "^125.0.0", + "chromedriver": "^128.0.0", "ejs": "^3.1.8", "express": "^4.18.2", "geckodriver": "^3.0.2", diff --git a/deps/rabbitmq_management/selenium/run-suites.sh b/selenium/run-suites.sh similarity index 100% rename from deps/rabbitmq_management/selenium/run-suites.sh rename to selenium/run-suites.sh diff --git a/deps/rabbitmq_management/selenium/short-suite-management-ui b/selenium/short-suite-management-ui similarity index 100% rename from deps/rabbitmq_management/selenium/short-suite-management-ui rename to selenium/short-suite-management-ui diff --git a/deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-cache-http-backends.sh b/selenium/suites/authnz-messaging/auth-cache-http-backends.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-cache-http-backends.sh rename to selenium/suites/authnz-messaging/auth-cache-http-backends.sh diff --git a/deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-cache-ldap-backends.sh b/selenium/suites/authnz-messaging/auth-cache-ldap-backends.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-cache-ldap-backends.sh rename to selenium/suites/authnz-messaging/auth-cache-ldap-backends.sh diff --git a/deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-http-backend.sh b/selenium/suites/authnz-messaging/auth-http-backend.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-http-backend.sh rename to selenium/suites/authnz-messaging/auth-http-backend.sh diff --git a/deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-http-internal-backends-with-internal.sh b/selenium/suites/authnz-messaging/auth-http-internal-backends-with-internal.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-http-internal-backends-with-internal.sh rename to selenium/suites/authnz-messaging/auth-http-internal-backends-with-internal.sh diff --git a/deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-http-internal-backends.sh b/selenium/suites/authnz-messaging/auth-http-internal-backends.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-http-internal-backends.sh rename to selenium/suites/authnz-messaging/auth-http-internal-backends.sh diff --git a/deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-internal-backend.sh b/selenium/suites/authnz-messaging/auth-internal-backend.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-internal-backend.sh rename to selenium/suites/authnz-messaging/auth-internal-backend.sh diff --git a/deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-internal-http-backends.sh b/selenium/suites/authnz-messaging/auth-internal-http-backends.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-internal-http-backends.sh rename to selenium/suites/authnz-messaging/auth-internal-http-backends.sh diff --git a/deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-ldap-backend.sh b/selenium/suites/authnz-messaging/auth-ldap-backend.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-ldap-backend.sh rename to selenium/suites/authnz-messaging/auth-ldap-backend.sh diff --git a/deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-oauth-backend-with-devproducer.sh b/selenium/suites/authnz-messaging/auth-oauth-backend-with-devproducer.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-oauth-backend-with-devproducer.sh rename to selenium/suites/authnz-messaging/auth-oauth-backend-with-devproducer.sh diff --git a/deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-oauth-backend-with-prodproducer.sh b/selenium/suites/authnz-messaging/auth-oauth-backend-with-prodproducer.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/authnz-messaging/auth-oauth-backend-with-prodproducer.sh rename to selenium/suites/authnz-messaging/auth-oauth-backend-with-prodproducer.sh diff --git a/deps/rabbitmq_management/selenium/suites/authnz-mgt/basic-auth-behind-proxy.sh b/selenium/suites/authnz-mgt/basic-auth-behind-proxy.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/authnz-mgt/basic-auth-behind-proxy.sh rename to selenium/suites/authnz-mgt/basic-auth-behind-proxy.sh diff --git a/deps/rabbitmq_management/selenium/suites/authnz-mgt/basic-auth-with-mgt-prefix.sh b/selenium/suites/authnz-mgt/basic-auth-with-mgt-prefix.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/authnz-mgt/basic-auth-with-mgt-prefix.sh rename to selenium/suites/authnz-mgt/basic-auth-with-mgt-prefix.sh diff --git a/deps/rabbitmq_management/selenium/suites/authnz-mgt/basic-auth.sh b/selenium/suites/authnz-mgt/basic-auth.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/authnz-mgt/basic-auth.sh rename to selenium/suites/authnz-mgt/basic-auth.sh diff --git a/deps/rabbitmq_management/selenium/suites/authnz-mgt/multi-oauth-with-basic-auth-when-idps-down.sh b/selenium/suites/authnz-mgt/multi-oauth-with-basic-auth-when-idps-down.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/authnz-mgt/multi-oauth-with-basic-auth-when-idps-down.sh rename to selenium/suites/authnz-mgt/multi-oauth-with-basic-auth-when-idps-down.sh diff --git a/deps/rabbitmq_management/selenium/suites/authnz-mgt/multi-oauth-with-basic-auth.sh b/selenium/suites/authnz-mgt/multi-oauth-with-basic-auth.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/authnz-mgt/multi-oauth-with-basic-auth.sh rename to selenium/suites/authnz-mgt/multi-oauth-with-basic-auth.sh diff --git a/deps/rabbitmq_management/selenium/suites/authnz-mgt/multi-oauth-without-basic-auth-and-resource-label-and-scopes.sh b/selenium/suites/authnz-mgt/multi-oauth-without-basic-auth-and-resource-label-and-scopes.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/authnz-mgt/multi-oauth-without-basic-auth-and-resource-label-and-scopes.sh rename to selenium/suites/authnz-mgt/multi-oauth-without-basic-auth-and-resource-label-and-scopes.sh diff --git a/deps/rabbitmq_management/selenium/suites/authnz-mgt/multi-oauth-without-basic-auth.sh b/selenium/suites/authnz-mgt/multi-oauth-without-basic-auth.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/authnz-mgt/multi-oauth-without-basic-auth.sh rename to selenium/suites/authnz-mgt/multi-oauth-without-basic-auth.sh diff --git a/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-and-basic-auth.sh b/selenium/suites/authnz-mgt/oauth-and-basic-auth.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-and-basic-auth.sh rename to selenium/suites/authnz-mgt/oauth-and-basic-auth.sh diff --git a/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-idp-initiated-with-uaa-and-prefix-via-proxy.sh b/selenium/suites/authnz-mgt/oauth-idp-initiated-with-uaa-and-prefix-via-proxy.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-idp-initiated-with-uaa-and-prefix-via-proxy.sh rename to selenium/suites/authnz-mgt/oauth-idp-initiated-with-uaa-and-prefix-via-proxy.sh diff --git a/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-idp-initiated-with-uaa-and-prefix.sh b/selenium/suites/authnz-mgt/oauth-idp-initiated-with-uaa-and-prefix.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-idp-initiated-with-uaa-and-prefix.sh rename to selenium/suites/authnz-mgt/oauth-idp-initiated-with-uaa-and-prefix.sh diff --git a/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-idp-initiated-with-uaa-via-proxy.sh b/selenium/suites/authnz-mgt/oauth-idp-initiated-with-uaa-via-proxy.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-idp-initiated-with-uaa-via-proxy.sh rename to selenium/suites/authnz-mgt/oauth-idp-initiated-with-uaa-via-proxy.sh diff --git a/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-idp-initiated-with-uaa.sh b/selenium/suites/authnz-mgt/oauth-idp-initiated-with-uaa.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-idp-initiated-with-uaa.sh rename to selenium/suites/authnz-mgt/oauth-idp-initiated-with-uaa.sh diff --git a/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-with-keycloak-with-verify-none.sh b/selenium/suites/authnz-mgt/oauth-with-keycloak-with-verify-none.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-with-keycloak-with-verify-none.sh rename to selenium/suites/authnz-mgt/oauth-with-keycloak-with-verify-none.sh diff --git a/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-with-keycloak.sh b/selenium/suites/authnz-mgt/oauth-with-keycloak.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-with-keycloak.sh rename to selenium/suites/authnz-mgt/oauth-with-keycloak.sh diff --git a/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-with-uaa-and-mgt-prefix.sh b/selenium/suites/authnz-mgt/oauth-with-uaa-and-mgt-prefix.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-with-uaa-and-mgt-prefix.sh rename to selenium/suites/authnz-mgt/oauth-with-uaa-and-mgt-prefix.sh diff --git a/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-with-uaa-down-but-with-basic-auth.sh b/selenium/suites/authnz-mgt/oauth-with-uaa-down-but-with-basic-auth.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-with-uaa-down-but-with-basic-auth.sh rename to selenium/suites/authnz-mgt/oauth-with-uaa-down-but-with-basic-auth.sh diff --git a/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-with-uaa-down.sh b/selenium/suites/authnz-mgt/oauth-with-uaa-down.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-with-uaa-down.sh rename to selenium/suites/authnz-mgt/oauth-with-uaa-down.sh diff --git a/deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-with-uaa.sh b/selenium/suites/authnz-mgt/oauth-with-uaa.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/authnz-mgt/oauth-with-uaa.sh rename to selenium/suites/authnz-mgt/oauth-with-uaa.sh diff --git a/deps/rabbitmq_management/selenium/suites/mgt/definitions.sh b/selenium/suites/mgt/definitions.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/mgt/definitions.sh rename to selenium/suites/mgt/definitions.sh diff --git a/deps/rabbitmq_management/selenium/suites/mgt/exchanges.sh b/selenium/suites/mgt/exchanges.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/mgt/exchanges.sh rename to selenium/suites/mgt/exchanges.sh diff --git a/deps/rabbitmq_management/selenium/suites/mgt/limits.sh b/selenium/suites/mgt/limits.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/mgt/limits.sh rename to selenium/suites/mgt/limits.sh diff --git a/deps/rabbitmq_management/selenium/suites/mgt/mgt-only-exchanges.sh b/selenium/suites/mgt/mgt-only-exchanges.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/mgt/mgt-only-exchanges.sh rename to selenium/suites/mgt/mgt-only-exchanges.sh diff --git a/deps/rabbitmq_management/selenium/suites/mgt/vhosts.sh b/selenium/suites/mgt/vhosts.sh similarity index 100% rename from deps/rabbitmq_management/selenium/suites/mgt/vhosts.sh rename to selenium/suites/mgt/vhosts.sh diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/advanced.auth-ldap.config b/selenium/test/authnz-msg-protocols/advanced.auth-ldap.config similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/advanced.auth-ldap.config rename to selenium/test/authnz-msg-protocols/advanced.auth-ldap.config diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/amqp10.js b/selenium/test/authnz-msg-protocols/amqp10.js similarity index 96% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/amqp10.js rename to selenium/test/authnz-msg-protocols/amqp10.js index 3a679bb21587..0901ae039ce3 100644 --- a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/amqp10.js +++ b/selenium/test/authnz-msg-protocols/amqp10.js @@ -1,5 +1,5 @@ const assert = require('assert') -const { getURLForProtocol, tokenFor, openIdConfiguration } = require('../utils') +const { tokenFor, openIdConfiguration } = require('../utils') const { reset, expectUser, expectVhost, expectResource, allow, verifyAll } = require('../mock_http_backend') const {execSync} = require('child_process') diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/enabled_plugins b/selenium/test/authnz-msg-protocols/enabled_plugins similarity index 90% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/enabled_plugins rename to selenium/test/authnz-msg-protocols/enabled_plugins index 59b57cb3828f..37e5fdfce132 100644 --- a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/enabled_plugins +++ b/selenium/test/authnz-msg-protocols/enabled_plugins @@ -12,5 +12,4 @@ rabbitmq_shovel_management,rabbitmq_stomp,rabbitmq_stream, rabbitmq_stream_common,rabbitmq_stream_management,rabbitmq_top, rabbitmq_tracing,rabbitmq_trust_store,rabbitmq_web_dispatch, - rabbitmq_web_mqtt,rabbitmq_web_mqtt_examples,rabbitmq_web_stomp, - rabbitmq_web_stomp_examples]. + rabbitmq_web_mqtt,rabbitmq_web_stomp]. diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.auth-http.docker b/selenium/test/authnz-msg-protocols/env.auth-http.docker similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.auth-http.docker rename to selenium/test/authnz-msg-protocols/env.auth-http.docker diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.auth-http.local b/selenium/test/authnz-msg-protocols/env.auth-http.local similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.auth-http.local rename to selenium/test/authnz-msg-protocols/env.auth-http.local diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.auth-ldap.docker b/selenium/test/authnz-msg-protocols/env.auth-ldap.docker similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.auth-ldap.docker rename to selenium/test/authnz-msg-protocols/env.auth-ldap.docker diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.auth-ldap.local b/selenium/test/authnz-msg-protocols/env.auth-ldap.local similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.auth-ldap.local rename to selenium/test/authnz-msg-protocols/env.auth-ldap.local diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.auth-oauth-dev.docker b/selenium/test/authnz-msg-protocols/env.auth-oauth-dev.docker similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.auth-oauth-dev.docker rename to selenium/test/authnz-msg-protocols/env.auth-oauth-dev.docker diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.auth-oauth-dev.local b/selenium/test/authnz-msg-protocols/env.auth-oauth-dev.local similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.auth-oauth-dev.local rename to selenium/test/authnz-msg-protocols/env.auth-oauth-dev.local diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.auth-oauth-prod.docker b/selenium/test/authnz-msg-protocols/env.auth-oauth-prod.docker similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.auth-oauth-prod.docker rename to selenium/test/authnz-msg-protocols/env.auth-oauth-prod.docker diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.auth-oauth-prod.local b/selenium/test/authnz-msg-protocols/env.auth-oauth-prod.local similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.auth-oauth-prod.local rename to selenium/test/authnz-msg-protocols/env.auth-oauth-prod.local diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.docker.devkeycloak b/selenium/test/authnz-msg-protocols/env.docker.devkeycloak similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.docker.devkeycloak rename to selenium/test/authnz-msg-protocols/env.docker.devkeycloak diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.docker.prodkeycloak b/selenium/test/authnz-msg-protocols/env.docker.prodkeycloak similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.docker.prodkeycloak rename to selenium/test/authnz-msg-protocols/env.docker.prodkeycloak diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.http-user b/selenium/test/authnz-msg-protocols/env.http-user similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.http-user rename to selenium/test/authnz-msg-protocols/env.http-user diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.internal-user b/selenium/test/authnz-msg-protocols/env.internal-user similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.internal-user rename to selenium/test/authnz-msg-protocols/env.internal-user diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.ldap-user b/selenium/test/authnz-msg-protocols/env.ldap-user similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.ldap-user rename to selenium/test/authnz-msg-protocols/env.ldap-user diff --git a/selenium/test/authnz-msg-protocols/env.local b/selenium/test/authnz-msg-protocols/env.local new file mode 100644 index 000000000000..69f43736edd4 --- /dev/null +++ b/selenium/test/authnz-msg-protocols/env.local @@ -0,0 +1 @@ +export IMPORT_DIR=test/authnz-msg-protocols/imports diff --git a/selenium/test/authnz-msg-protocols/env.local.devkeycloak b/selenium/test/authnz-msg-protocols/env.local.devkeycloak new file mode 100644 index 000000000000..8e5a2f2e9285 --- /dev/null +++ b/selenium/test/authnz-msg-protocols/env.local.devkeycloak @@ -0,0 +1,2 @@ +export DEVKEYCLOAK_URL=https://localhost:8442/realms/dev +export DEVKEYCLOAK_CA_CERT=test/multi-oauth/devkeycloak/ca_certificate.pem diff --git a/selenium/test/authnz-msg-protocols/env.local.prodkeycloak b/selenium/test/authnz-msg-protocols/env.local.prodkeycloak new file mode 100644 index 000000000000..c636bf8fcd55 --- /dev/null +++ b/selenium/test/authnz-msg-protocols/env.local.prodkeycloak @@ -0,0 +1,2 @@ +export PRODKEYCLOAK_URL=https://localhost:8443/realms/prod +export PRODKEYCLOAK_CA_CERT=test/multi-oauth/prodkeycloak/ca_certificate.pem diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.oauth-devproducer b/selenium/test/authnz-msg-protocols/env.oauth-devproducer similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.oauth-devproducer rename to selenium/test/authnz-msg-protocols/env.oauth-devproducer diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.oauth-prodproducer b/selenium/test/authnz-msg-protocols/env.oauth-prodproducer similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.oauth-prodproducer rename to selenium/test/authnz-msg-protocols/env.oauth-prodproducer diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/imports/users.json b/selenium/test/authnz-msg-protocols/imports/users.json similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/imports/users.json rename to selenium/test/authnz-msg-protocols/imports/users.json diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/logging.conf b/selenium/test/authnz-msg-protocols/logging.conf similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/logging.conf rename to selenium/test/authnz-msg-protocols/logging.conf diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/mock-auth-backend-http/defaultExpectations.json b/selenium/test/authnz-msg-protocols/mock-auth-backend-http/defaultExpectations.json similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/mock-auth-backend-http/defaultExpectations.json rename to selenium/test/authnz-msg-protocols/mock-auth-backend-http/defaultExpectations.json diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/mock-auth-backend-ldap/import.ldif b/selenium/test/authnz-msg-protocols/mock-auth-backend-ldap/import.ldif similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/mock-auth-backend-ldap/import.ldif rename to selenium/test/authnz-msg-protocols/mock-auth-backend-ldap/import.ldif diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/mqtt.js b/selenium/test/authnz-msg-protocols/mqtt.js similarity index 96% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/mqtt.js rename to selenium/test/authnz-msg-protocols/mqtt.js index e71916003ef9..8a665c871834 100644 --- a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/mqtt.js +++ b/selenium/test/authnz-msg-protocols/mqtt.js @@ -1,5 +1,5 @@ const assert = require('assert') -const { getURLForProtocol, tokenFor, openIdConfiguration } = require('../utils') +const { tokenFor, openIdConfiguration } = require('../utils') const { reset, expectUser, expectVhost, expectResource, allow, verifyAll } = require('../mock_http_backend') const mqtt = require('mqtt'); diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/rabbitmq.auth-http.conf b/selenium/test/authnz-msg-protocols/rabbitmq.auth-http.conf similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/rabbitmq.auth-http.conf rename to selenium/test/authnz-msg-protocols/rabbitmq.auth-http.conf diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/rabbitmq.auth-ldap.conf b/selenium/test/authnz-msg-protocols/rabbitmq.auth-ldap.conf similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/rabbitmq.auth-ldap.conf rename to selenium/test/authnz-msg-protocols/rabbitmq.auth-ldap.conf diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/rabbitmq.auth_backends-cache-http.conf b/selenium/test/authnz-msg-protocols/rabbitmq.auth_backends-cache-http.conf similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/rabbitmq.auth_backends-cache-http.conf rename to selenium/test/authnz-msg-protocols/rabbitmq.auth_backends-cache-http.conf diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/rabbitmq.auth_backends-cache-ldap.conf b/selenium/test/authnz-msg-protocols/rabbitmq.auth_backends-cache-ldap.conf similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/rabbitmq.auth_backends-cache-ldap.conf rename to selenium/test/authnz-msg-protocols/rabbitmq.auth_backends-cache-ldap.conf diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/rabbitmq.auth_backends-http-internal.conf b/selenium/test/authnz-msg-protocols/rabbitmq.auth_backends-http-internal.conf similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/rabbitmq.auth_backends-http-internal.conf rename to selenium/test/authnz-msg-protocols/rabbitmq.auth_backends-http-internal.conf diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/rabbitmq.auth_backends-http.conf b/selenium/test/authnz-msg-protocols/rabbitmq.auth_backends-http.conf similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/rabbitmq.auth_backends-http.conf rename to selenium/test/authnz-msg-protocols/rabbitmq.auth_backends-http.conf diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/rabbitmq.auth_backends-internal-http.conf b/selenium/test/authnz-msg-protocols/rabbitmq.auth_backends-internal-http.conf similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/rabbitmq.auth_backends-internal-http.conf rename to selenium/test/authnz-msg-protocols/rabbitmq.auth_backends-internal-http.conf diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/rabbitmq.auth_backends-internal.conf b/selenium/test/authnz-msg-protocols/rabbitmq.auth_backends-internal.conf similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/rabbitmq.auth_backends-internal.conf rename to selenium/test/authnz-msg-protocols/rabbitmq.auth_backends-internal.conf diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/rabbitmq.auth_backends-ldap.conf b/selenium/test/authnz-msg-protocols/rabbitmq.auth_backends-ldap.conf similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/rabbitmq.auth_backends-ldap.conf rename to selenium/test/authnz-msg-protocols/rabbitmq.auth_backends-ldap.conf diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/rabbitmq.backends-oauth.conf b/selenium/test/authnz-msg-protocols/rabbitmq.backends-oauth.conf similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/rabbitmq.backends-oauth.conf rename to selenium/test/authnz-msg-protocols/rabbitmq.backends-oauth.conf diff --git a/selenium/test/authnz-msg-protocols/rabbitmq.cluster.conf b/selenium/test/authnz-msg-protocols/rabbitmq.cluster.conf new file mode 100644 index 000000000000..144cc7ab05ae --- /dev/null +++ b/selenium/test/authnz-msg-protocols/rabbitmq.cluster.conf @@ -0,0 +1,6 @@ +cluster_name = rabbitmq-selenium + +cluster_formation.peer_discovery_backend = rabbit_peer_discovery_classic_config +cluster_formation.classic_config.nodes.1 = rabbit@rabbitmq +cluster_formation.classic_config.nodes.2 = rabbit@rabbitmq1 +cluster_formation.classic_config.nodes.3 = rabbit@rabbitmq2 diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/rabbitmq.conf b/selenium/test/authnz-msg-protocols/rabbitmq.conf similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/rabbitmq.conf rename to selenium/test/authnz-msg-protocols/rabbitmq.conf diff --git a/deps/rabbitmq_management/selenium/test/basic-auth/ac-administrator-without-vhost-permissions.js b/selenium/test/basic-auth/ac-administrator-without-vhost-permissions.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/basic-auth/ac-administrator-without-vhost-permissions.js rename to selenium/test/basic-auth/ac-administrator-without-vhost-permissions.js diff --git a/deps/rabbitmq_management/selenium/test/basic-auth/ac-management-without-vhost-permissions.js b/selenium/test/basic-auth/ac-management-without-vhost-permissions.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/basic-auth/ac-management-without-vhost-permissions.js rename to selenium/test/basic-auth/ac-management-without-vhost-permissions.js diff --git a/deps/rabbitmq_management/selenium/test/basic-auth/ac-management.js b/selenium/test/basic-auth/ac-management.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/basic-auth/ac-management.js rename to selenium/test/basic-auth/ac-management.js diff --git a/deps/rabbitmq_management/selenium/test/basic-auth/ac-monitoring-without-vhost-permissions.js b/selenium/test/basic-auth/ac-monitoring-without-vhost-permissions.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/basic-auth/ac-monitoring-without-vhost-permissions.js rename to selenium/test/basic-auth/ac-monitoring-without-vhost-permissions.js diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/enabled_plugins b/selenium/test/basic-auth/enabled_plugins similarity index 90% rename from deps/rabbitmq_management/selenium/test/multi-oauth/enabled_plugins rename to selenium/test/basic-auth/enabled_plugins index c91f7ba880c3..ea686b9f2b51 100644 --- a/deps/rabbitmq_management/selenium/test/multi-oauth/enabled_plugins +++ b/selenium/test/basic-auth/enabled_plugins @@ -12,5 +12,4 @@ rabbitmq_shovel_management,rabbitmq_stomp,rabbitmq_stream, rabbitmq_stream_common,rabbitmq_stream_management,rabbitmq_top, rabbitmq_tracing,rabbitmq_trust_store,rabbitmq_web_dispatch, - rabbitmq_web_mqtt,rabbitmq_web_mqtt_examples,rabbitmq_web_stomp, - rabbitmq_web_stomp_examples]. + rabbitmq_web_mqtt,rabbitmq_web_stomp]. diff --git a/deps/rabbitmq_management/selenium/test/basic-auth/env.docker.proxy b/selenium/test/basic-auth/env.docker.proxy similarity index 100% rename from deps/rabbitmq_management/selenium/test/basic-auth/env.docker.proxy rename to selenium/test/basic-auth/env.docker.proxy diff --git a/deps/rabbitmq_management/selenium/test/basic-auth/env.local b/selenium/test/basic-auth/env.local similarity index 100% rename from deps/rabbitmq_management/selenium/test/basic-auth/env.local rename to selenium/test/basic-auth/env.local diff --git a/deps/rabbitmq_management/selenium/test/basic-auth/env.local.proxy b/selenium/test/basic-auth/env.local.proxy similarity index 100% rename from deps/rabbitmq_management/selenium/test/basic-auth/env.local.proxy rename to selenium/test/basic-auth/env.local.proxy diff --git a/deps/rabbitmq_management/selenium/test/basic-auth/happy-login.js b/selenium/test/basic-auth/happy-login.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/basic-auth/happy-login.js rename to selenium/test/basic-auth/happy-login.js diff --git a/deps/rabbitmq_management/selenium/test/basic-auth/httpd-proxy/.htpasswd b/selenium/test/basic-auth/httpd-proxy/.htpasswd similarity index 100% rename from deps/rabbitmq_management/selenium/test/basic-auth/httpd-proxy/.htpasswd rename to selenium/test/basic-auth/httpd-proxy/.htpasswd diff --git a/deps/rabbitmq_management/selenium/test/basic-auth/httpd-proxy/httpd.conf b/selenium/test/basic-auth/httpd-proxy/httpd.conf similarity index 100% rename from deps/rabbitmq_management/selenium/test/basic-auth/httpd-proxy/httpd.conf rename to selenium/test/basic-auth/httpd-proxy/httpd.conf diff --git a/deps/rabbitmq_management/selenium/test/basic-auth/imports/users.json b/selenium/test/basic-auth/imports/users.json similarity index 100% rename from deps/rabbitmq_management/selenium/test/basic-auth/imports/users.json rename to selenium/test/basic-auth/imports/users.json diff --git a/deps/rabbitmq_management/selenium/test/basic-auth/landing.js b/selenium/test/basic-auth/landing.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/basic-auth/landing.js rename to selenium/test/basic-auth/landing.js diff --git a/deps/rabbitmq_management/selenium/test/basic-auth/logging.conf b/selenium/test/basic-auth/logging.conf similarity index 100% rename from deps/rabbitmq_management/selenium/test/basic-auth/logging.conf rename to selenium/test/basic-auth/logging.conf diff --git a/deps/rabbitmq_management/selenium/test/basic-auth/logout.js b/selenium/test/basic-auth/logout.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/basic-auth/logout.js rename to selenium/test/basic-auth/logout.js diff --git a/selenium/test/basic-auth/rabbitmq.cluster.conf b/selenium/test/basic-auth/rabbitmq.cluster.conf new file mode 100644 index 000000000000..144cc7ab05ae --- /dev/null +++ b/selenium/test/basic-auth/rabbitmq.cluster.conf @@ -0,0 +1,6 @@ +cluster_name = rabbitmq-selenium + +cluster_formation.peer_discovery_backend = rabbit_peer_discovery_classic_config +cluster_formation.classic_config.nodes.1 = rabbit@rabbitmq +cluster_formation.classic_config.nodes.2 = rabbit@rabbitmq1 +cluster_formation.classic_config.nodes.3 = rabbit@rabbitmq2 diff --git a/deps/rabbitmq_management/selenium/test/basic-auth/rabbitmq.conf b/selenium/test/basic-auth/rabbitmq.conf similarity index 84% rename from deps/rabbitmq_management/selenium/test/basic-auth/rabbitmq.conf rename to selenium/test/basic-auth/rabbitmq.conf index f5e2add9f1af..7bacc14af27a 100644 --- a/deps/rabbitmq_management/selenium/test/basic-auth/rabbitmq.conf +++ b/selenium/test/basic-auth/rabbitmq.conf @@ -2,3 +2,5 @@ auth_backends.1 = rabbit_auth_backend_internal management.login_session_timeout = 1 load_definitions = ${IMPORT_DIR}/users.json + +loopback_users = none diff --git a/deps/rabbitmq_management/selenium/test/basic-auth/rabbitmq.mgt-prefix.conf b/selenium/test/basic-auth/rabbitmq.mgt-prefix.conf similarity index 100% rename from deps/rabbitmq_management/selenium/test/basic-auth/rabbitmq.mgt-prefix.conf rename to selenium/test/basic-auth/rabbitmq.mgt-prefix.conf diff --git a/deps/rabbitmq_management/selenium/test/basic-auth/session-expired.js b/selenium/test/basic-auth/session-expired.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/basic-auth/session-expired.js rename to selenium/test/basic-auth/session-expired.js diff --git a/deps/rabbitmq_management/selenium/test/basic-auth/unauthorized.js b/selenium/test/basic-auth/unauthorized.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/basic-auth/unauthorized.js rename to selenium/test/basic-auth/unauthorized.js diff --git a/deps/rabbitmq_management/selenium/test/definitions/export.js b/selenium/test/definitions/export.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/definitions/export.js rename to selenium/test/definitions/export.js diff --git a/deps/rabbitmq_management/selenium/test/definitions/import-newguest-user.json b/selenium/test/definitions/import-newguest-user.json similarity index 100% rename from deps/rabbitmq_management/selenium/test/definitions/import-newguest-user.json rename to selenium/test/definitions/import-newguest-user.json diff --git a/deps/rabbitmq_management/selenium/test/definitions/import.js b/selenium/test/definitions/import.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/definitions/import.js rename to selenium/test/definitions/import.js diff --git a/selenium/test/env.cluster b/selenium/test/env.cluster new file mode 100644 index 000000000000..75b4e52bc939 --- /dev/null +++ b/selenium/test/env.cluster @@ -0,0 +1 @@ +export RABBITMQ_CLUSTER_NODES=rabbitmq,rabbitmq1,rabbitmq2 diff --git a/deps/rabbitmq_management/selenium/test/env.docker b/selenium/test/env.docker similarity index 100% rename from deps/rabbitmq_management/selenium/test/env.docker rename to selenium/test/env.docker diff --git a/deps/rabbitmq_management/selenium/test/env.local b/selenium/test/env.local similarity index 100% rename from deps/rabbitmq_management/selenium/test/env.local rename to selenium/test/env.local diff --git a/deps/rabbitmq_management/selenium/test/env.tls.docker b/selenium/test/env.tls.docker similarity index 100% rename from deps/rabbitmq_management/selenium/test/env.tls.docker rename to selenium/test/env.tls.docker diff --git a/deps/rabbitmq_management/selenium/test/env.tls.local b/selenium/test/env.tls.local similarity index 100% rename from deps/rabbitmq_management/selenium/test/env.tls.local rename to selenium/test/env.tls.local diff --git a/deps/rabbitmq_management/selenium/test/exchanges/management.js b/selenium/test/exchanges/management.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/exchanges/management.js rename to selenium/test/exchanges/management.js diff --git a/deps/rabbitmq_management/selenium/test/limits/users.js b/selenium/test/limits/users.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/limits/users.js rename to selenium/test/limits/users.js diff --git a/deps/rabbitmq_management/selenium/test/limits/virtual-hosts.js b/selenium/test/limits/virtual-hosts.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/limits/virtual-hosts.js rename to selenium/test/limits/virtual-hosts.js diff --git a/deps/rabbitmq_management/selenium/test/mgt-only/enabled_plugins b/selenium/test/mgt-only/enabled_plugins similarity index 90% rename from deps/rabbitmq_management/selenium/test/mgt-only/enabled_plugins rename to selenium/test/mgt-only/enabled_plugins index ea2a6a29ba53..12c30741f785 100644 --- a/deps/rabbitmq_management/selenium/test/mgt-only/enabled_plugins +++ b/selenium/test/mgt-only/enabled_plugins @@ -12,5 +12,4 @@ rabbitmq_shovel_management,rabbitmq_stomp,rabbitmq_stream, rabbitmq_stream_common,rabbitmq_stream_management,rabbitmq_top, rabbitmq_tracing,rabbitmq_trust_store,rabbitmq_web_dispatch, - rabbitmq_web_mqtt,rabbitmq_web_mqtt_examples,rabbitmq_web_stomp, - rabbitmq_web_stomp_examples]. + rabbitmq_web_mqtt,rabbitmq_web_stomp]. diff --git a/deps/rabbitmq_management/selenium/test/mgt-only/imports/users.json b/selenium/test/mgt-only/imports/users.json similarity index 100% rename from deps/rabbitmq_management/selenium/test/mgt-only/imports/users.json rename to selenium/test/mgt-only/imports/users.json diff --git a/deps/rabbitmq_management/selenium/test/mgt-only/logging.conf b/selenium/test/mgt-only/logging.conf similarity index 100% rename from deps/rabbitmq_management/selenium/test/mgt-only/logging.conf rename to selenium/test/mgt-only/logging.conf diff --git a/deps/rabbitmq_management/selenium/test/mgt-only/rabbitmq.conf b/selenium/test/mgt-only/rabbitmq.conf similarity index 89% rename from deps/rabbitmq_management/selenium/test/mgt-only/rabbitmq.conf rename to selenium/test/mgt-only/rabbitmq.conf index d82fa0963fd1..b41e3430727e 100644 --- a/deps/rabbitmq_management/selenium/test/mgt-only/rabbitmq.conf +++ b/selenium/test/mgt-only/rabbitmq.conf @@ -3,3 +3,5 @@ auth_backends.1 = rabbit_auth_backend_internal management.login_session_timeout = 150 management_agent.disable_metrics_collector = true load_definitions = ${RABBITMQ_TEST_DIR}/imports/users.json + +loopback_users = none diff --git a/deps/rabbitmq_management/selenium/test/mock_http_backend.js b/selenium/test/mock_http_backend.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/mock_http_backend.js rename to selenium/test/mock_http_backend.js diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/certs/ca_certificate.pem b/selenium/test/multi-oauth/certs/ca_certificate.pem similarity index 100% rename from deps/rabbitmq_management/selenium/test/multi-oauth/certs/ca_certificate.pem rename to selenium/test/multi-oauth/certs/ca_certificate.pem diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/certs/server_rabbitmq_certificate.pem b/selenium/test/multi-oauth/certs/server_rabbitmq_certificate.pem similarity index 100% rename from deps/rabbitmq_management/selenium/test/multi-oauth/certs/server_rabbitmq_certificate.pem rename to selenium/test/multi-oauth/certs/server_rabbitmq_certificate.pem diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/certs/server_rabbitmq_key.pem b/selenium/test/multi-oauth/certs/server_rabbitmq_key.pem similarity index 100% rename from deps/rabbitmq_management/selenium/test/multi-oauth/certs/server_rabbitmq_key.pem rename to selenium/test/multi-oauth/certs/server_rabbitmq_key.pem diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/devkeycloak/ca_certificate.pem b/selenium/test/multi-oauth/devkeycloak/ca_certificate.pem similarity index 100% rename from deps/rabbitmq_management/selenium/test/multi-oauth/devkeycloak/ca_certificate.pem rename to selenium/test/multi-oauth/devkeycloak/ca_certificate.pem diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/devkeycloak/dev-realm.json b/selenium/test/multi-oauth/devkeycloak/dev-realm.json similarity index 100% rename from deps/rabbitmq_management/selenium/test/multi-oauth/devkeycloak/dev-realm.json rename to selenium/test/multi-oauth/devkeycloak/dev-realm.json diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/devkeycloak/server_devkeycloak.p12 b/selenium/test/multi-oauth/devkeycloak/server_devkeycloak.p12 similarity index 100% rename from deps/rabbitmq_management/selenium/test/multi-oauth/devkeycloak/server_devkeycloak.p12 rename to selenium/test/multi-oauth/devkeycloak/server_devkeycloak.p12 diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/devkeycloak/server_devkeycloak_certificate.pem b/selenium/test/multi-oauth/devkeycloak/server_devkeycloak_certificate.pem similarity index 100% rename from deps/rabbitmq_management/selenium/test/multi-oauth/devkeycloak/server_devkeycloak_certificate.pem rename to selenium/test/multi-oauth/devkeycloak/server_devkeycloak_certificate.pem diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/devkeycloak/server_devkeycloak_key.pem b/selenium/test/multi-oauth/devkeycloak/server_devkeycloak_key.pem similarity index 100% rename from deps/rabbitmq_management/selenium/test/multi-oauth/devkeycloak/server_devkeycloak_key.pem rename to selenium/test/multi-oauth/devkeycloak/server_devkeycloak_key.pem diff --git a/deps/rabbitmq_management/selenium/test/oauth/enabled_plugins b/selenium/test/multi-oauth/enabled_plugins similarity index 90% rename from deps/rabbitmq_management/selenium/test/oauth/enabled_plugins rename to selenium/test/multi-oauth/enabled_plugins index c91f7ba880c3..ea686b9f2b51 100644 --- a/deps/rabbitmq_management/selenium/test/oauth/enabled_plugins +++ b/selenium/test/multi-oauth/enabled_plugins @@ -12,5 +12,4 @@ rabbitmq_shovel_management,rabbitmq_stomp,rabbitmq_stream, rabbitmq_stream_common,rabbitmq_stream_management,rabbitmq_top, rabbitmq_tracing,rabbitmq_trust_store,rabbitmq_web_dispatch, - rabbitmq_web_mqtt,rabbitmq_web_mqtt_examples,rabbitmq_web_stomp, - rabbitmq_web_stomp_examples]. + rabbitmq_web_mqtt,rabbitmq_web_stomp]. diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/env.docker b/selenium/test/multi-oauth/env.docker similarity index 100% rename from deps/rabbitmq_management/selenium/test/multi-oauth/env.docker rename to selenium/test/multi-oauth/env.docker diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/env.docker.devkeycloak b/selenium/test/multi-oauth/env.docker.devkeycloak similarity index 100% rename from deps/rabbitmq_management/selenium/test/multi-oauth/env.docker.devkeycloak rename to selenium/test/multi-oauth/env.docker.devkeycloak diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/env.docker.prodkeycloak b/selenium/test/multi-oauth/env.docker.prodkeycloak similarity index 100% rename from deps/rabbitmq_management/selenium/test/multi-oauth/env.docker.prodkeycloak rename to selenium/test/multi-oauth/env.docker.prodkeycloak diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/env.local b/selenium/test/multi-oauth/env.local similarity index 100% rename from deps/rabbitmq_management/selenium/test/multi-oauth/env.local rename to selenium/test/multi-oauth/env.local diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.local.devkeycloak b/selenium/test/multi-oauth/env.local.devkeycloak similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.local.devkeycloak rename to selenium/test/multi-oauth/env.local.devkeycloak diff --git a/deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.local.prodkeycloak b/selenium/test/multi-oauth/env.local.prodkeycloak similarity index 100% rename from deps/rabbitmq_management/selenium/test/authnz-msg-protocols/env.local.prodkeycloak rename to selenium/test/multi-oauth/env.local.prodkeycloak diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/imports/users.json b/selenium/test/multi-oauth/imports/users.json similarity index 100% rename from deps/rabbitmq_management/selenium/test/multi-oauth/imports/users.json rename to selenium/test/multi-oauth/imports/users.json diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/prodkeycloak/ca_certificate.pem b/selenium/test/multi-oauth/prodkeycloak/ca_certificate.pem similarity index 100% rename from deps/rabbitmq_management/selenium/test/multi-oauth/prodkeycloak/ca_certificate.pem rename to selenium/test/multi-oauth/prodkeycloak/ca_certificate.pem diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/prodkeycloak/prod-realm.json b/selenium/test/multi-oauth/prodkeycloak/prod-realm.json similarity index 100% rename from deps/rabbitmq_management/selenium/test/multi-oauth/prodkeycloak/prod-realm.json rename to selenium/test/multi-oauth/prodkeycloak/prod-realm.json diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/prodkeycloak/server_prodkeycloak.p12 b/selenium/test/multi-oauth/prodkeycloak/server_prodkeycloak.p12 similarity index 100% rename from deps/rabbitmq_management/selenium/test/multi-oauth/prodkeycloak/server_prodkeycloak.p12 rename to selenium/test/multi-oauth/prodkeycloak/server_prodkeycloak.p12 diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/prodkeycloak/server_prodkeycloak_certificate.pem b/selenium/test/multi-oauth/prodkeycloak/server_prodkeycloak_certificate.pem similarity index 100% rename from deps/rabbitmq_management/selenium/test/multi-oauth/prodkeycloak/server_prodkeycloak_certificate.pem rename to selenium/test/multi-oauth/prodkeycloak/server_prodkeycloak_certificate.pem diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/prodkeycloak/server_prodkeycloak_key.pem b/selenium/test/multi-oauth/prodkeycloak/server_prodkeycloak_key.pem similarity index 100% rename from deps/rabbitmq_management/selenium/test/multi-oauth/prodkeycloak/server_prodkeycloak_key.pem rename to selenium/test/multi-oauth/prodkeycloak/server_prodkeycloak_key.pem diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/rabbitmq.basic-management.conf b/selenium/test/multi-oauth/rabbitmq.basic-management.conf similarity index 100% rename from deps/rabbitmq_management/selenium/test/multi-oauth/rabbitmq.basic-management.conf rename to selenium/test/multi-oauth/rabbitmq.basic-management.conf diff --git a/selenium/test/multi-oauth/rabbitmq.cluster.conf b/selenium/test/multi-oauth/rabbitmq.cluster.conf new file mode 100644 index 000000000000..144cc7ab05ae --- /dev/null +++ b/selenium/test/multi-oauth/rabbitmq.cluster.conf @@ -0,0 +1,6 @@ +cluster_name = rabbitmq-selenium + +cluster_formation.peer_discovery_backend = rabbit_peer_discovery_classic_config +cluster_formation.classic_config.nodes.1 = rabbit@rabbitmq +cluster_formation.classic_config.nodes.2 = rabbit@rabbitmq1 +cluster_formation.classic_config.nodes.3 = rabbit@rabbitmq2 diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/rabbitmq.conf b/selenium/test/multi-oauth/rabbitmq.conf similarity index 98% rename from deps/rabbitmq_management/selenium/test/multi-oauth/rabbitmq.conf rename to selenium/test/multi-oauth/rabbitmq.conf index a53547c10edf..81a8c55a9161 100644 --- a/deps/rabbitmq_management/selenium/test/multi-oauth/rabbitmq.conf +++ b/selenium/test/multi-oauth/rabbitmq.conf @@ -46,3 +46,5 @@ management.oauth_resource_servers.2.oauth_client_id = rabbit_dev_mgt_ui management.oauth_resource_servers.3.id = rabbit_internal management.oauth_resource_servers.3.disabled = true + +loopback_users = none diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/rabbitmq.enable-basic-auth.conf b/selenium/test/multi-oauth/rabbitmq.enable-basic-auth.conf similarity index 100% rename from deps/rabbitmq_management/selenium/test/multi-oauth/rabbitmq.enable-basic-auth.conf rename to selenium/test/multi-oauth/rabbitmq.enable-basic-auth.conf diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/rabbitmq.tls.conf b/selenium/test/multi-oauth/rabbitmq.tls.conf similarity index 100% rename from deps/rabbitmq_management/selenium/test/multi-oauth/rabbitmq.tls.conf rename to selenium/test/multi-oauth/rabbitmq.tls.conf diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/rabbitmq.with-resource-label.conf b/selenium/test/multi-oauth/rabbitmq.with-resource-label.conf similarity index 100% rename from deps/rabbitmq_management/selenium/test/multi-oauth/rabbitmq.with-resource-label.conf rename to selenium/test/multi-oauth/rabbitmq.with-resource-label.conf diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/rabbitmq.with-resource-scopes.conf b/selenium/test/multi-oauth/rabbitmq.with-resource-scopes.conf similarity index 100% rename from deps/rabbitmq_management/selenium/test/multi-oauth/rabbitmq.with-resource-scopes.conf rename to selenium/test/multi-oauth/rabbitmq.with-resource-scopes.conf diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/with-basic-auth-idps-down/happy-login.js b/selenium/test/multi-oauth/with-basic-auth-idps-down/happy-login.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/multi-oauth/with-basic-auth-idps-down/happy-login.js rename to selenium/test/multi-oauth/with-basic-auth-idps-down/happy-login.js diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/with-basic-auth-idps-down/landing.js b/selenium/test/multi-oauth/with-basic-auth-idps-down/landing.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/multi-oauth/with-basic-auth-idps-down/landing.js rename to selenium/test/multi-oauth/with-basic-auth-idps-down/landing.js diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/with-basic-auth/happy-login.js b/selenium/test/multi-oauth/with-basic-auth/happy-login.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/multi-oauth/with-basic-auth/happy-login.js rename to selenium/test/multi-oauth/with-basic-auth/happy-login.js diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/with-basic-auth/landing.js b/selenium/test/multi-oauth/with-basic-auth/landing.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/multi-oauth/with-basic-auth/landing.js rename to selenium/test/multi-oauth/with-basic-auth/landing.js diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/without-basic-auth/happy-login.js b/selenium/test/multi-oauth/without-basic-auth/happy-login.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/multi-oauth/without-basic-auth/happy-login.js rename to selenium/test/multi-oauth/without-basic-auth/happy-login.js diff --git a/deps/rabbitmq_management/selenium/test/multi-oauth/without-basic-auth/landing.js b/selenium/test/multi-oauth/without-basic-auth/landing.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/multi-oauth/without-basic-auth/landing.js rename to selenium/test/multi-oauth/without-basic-auth/landing.js diff --git a/deps/rabbitmq_management/selenium/test/oauth/certs/ca_certificate.pem b/selenium/test/oauth/certs/ca_certificate.pem similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/certs/ca_certificate.pem rename to selenium/test/oauth/certs/ca_certificate.pem diff --git a/deps/rabbitmq_management/selenium/test/oauth/certs/server_rabbitmq_certificate.pem b/selenium/test/oauth/certs/server_rabbitmq_certificate.pem similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/certs/server_rabbitmq_certificate.pem rename to selenium/test/oauth/certs/server_rabbitmq_certificate.pem diff --git a/deps/rabbitmq_management/selenium/test/oauth/certs/server_rabbitmq_key.pem b/selenium/test/oauth/certs/server_rabbitmq_key.pem similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/certs/server_rabbitmq_key.pem rename to selenium/test/oauth/certs/server_rabbitmq_key.pem diff --git a/deps/rabbitmq_management/selenium/test/basic-auth/enabled_plugins b/selenium/test/oauth/enabled_plugins similarity index 57% rename from deps/rabbitmq_management/selenium/test/basic-auth/enabled_plugins rename to selenium/test/oauth/enabled_plugins index c91f7ba880c3..8dbd7d6cbf63 100644 --- a/deps/rabbitmq_management/selenium/test/basic-auth/enabled_plugins +++ b/selenium/test/oauth/enabled_plugins @@ -4,13 +4,13 @@ rabbitmq_auth_backend_oauth2,rabbitmq_auth_mechanism_ssl,rabbitmq_aws, rabbitmq_consistent_hash_exchange,rabbitmq_event_exchange, rabbitmq_federation,rabbitmq_federation_management, - rabbitmq_jms_topic_exchange,rabbitmq_management,rabbitmq_management_agent, - rabbitmq_mqtt,rabbitmq_peer_discovery_aws,rabbitmq_peer_discovery_common, + rabbitmq_federation_prometheus,rabbitmq_jms_topic_exchange, + rabbitmq_management,rabbitmq_management_agent,rabbitmq_mqtt, + rabbitmq_peer_discovery_aws,rabbitmq_peer_discovery_common, rabbitmq_peer_discovery_consul,rabbitmq_peer_discovery_etcd, rabbitmq_peer_discovery_k8s,rabbitmq_prometheus,rabbitmq_random_exchange, rabbitmq_recent_history_exchange,rabbitmq_sharding,rabbitmq_shovel, - rabbitmq_shovel_management,rabbitmq_stomp,rabbitmq_stream, - rabbitmq_stream_common,rabbitmq_stream_management,rabbitmq_top, - rabbitmq_tracing,rabbitmq_trust_store,rabbitmq_web_dispatch, - rabbitmq_web_mqtt,rabbitmq_web_mqtt_examples,rabbitmq_web_stomp, - rabbitmq_web_stomp_examples]. + rabbitmq_shovel_management,rabbitmq_shovel_prometheus,rabbitmq_stomp, + rabbitmq_stream,rabbitmq_stream_common,rabbitmq_stream_management, + rabbitmq_top,rabbitmq_tracing,rabbitmq_trust_store,rabbitmq_web_dispatch, + rabbitmq_web_mqtt,rabbitmq_web_stomp]. diff --git a/deps/rabbitmq_management/selenium/test/oauth/env.docker b/selenium/test/oauth/env.docker similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/env.docker rename to selenium/test/oauth/env.docker diff --git a/deps/rabbitmq_management/selenium/test/oauth/env.docker.fakeportal b/selenium/test/oauth/env.docker.fakeportal similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/env.docker.fakeportal rename to selenium/test/oauth/env.docker.fakeportal diff --git a/deps/rabbitmq_management/selenium/test/oauth/env.docker.fakeproxy b/selenium/test/oauth/env.docker.fakeproxy similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/env.docker.fakeproxy rename to selenium/test/oauth/env.docker.fakeproxy diff --git a/deps/rabbitmq_management/selenium/test/oauth/env.docker.keycloak b/selenium/test/oauth/env.docker.keycloak similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/env.docker.keycloak rename to selenium/test/oauth/env.docker.keycloak diff --git a/deps/rabbitmq_management/selenium/test/oauth/env.docker.uaa b/selenium/test/oauth/env.docker.uaa similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/env.docker.uaa rename to selenium/test/oauth/env.docker.uaa diff --git a/deps/rabbitmq_management/selenium/test/oauth/env.enabled_basic_auth b/selenium/test/oauth/env.enabled_basic_auth similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/env.enabled_basic_auth rename to selenium/test/oauth/env.enabled_basic_auth diff --git a/deps/rabbitmq_management/selenium/test/oauth/env.fakeportal-oauth-provider b/selenium/test/oauth/env.fakeportal-oauth-provider similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/env.fakeportal-oauth-provider rename to selenium/test/oauth/env.fakeportal-oauth-provider diff --git a/deps/rabbitmq_management/selenium/test/oauth/env.fakeproxy b/selenium/test/oauth/env.fakeproxy similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/env.fakeproxy rename to selenium/test/oauth/env.fakeproxy diff --git a/deps/rabbitmq_management/selenium/test/oauth/env.keycloak b/selenium/test/oauth/env.keycloak similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/env.keycloak rename to selenium/test/oauth/env.keycloak diff --git a/deps/rabbitmq_management/selenium/test/oauth/env.keycloak-oauth-provider b/selenium/test/oauth/env.keycloak-oauth-provider similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/env.keycloak-oauth-provider rename to selenium/test/oauth/env.keycloak-oauth-provider diff --git a/selenium/test/oauth/env.local b/selenium/test/oauth/env.local new file mode 100644 index 000000000000..80cfe7430e52 --- /dev/null +++ b/selenium/test/oauth/env.local @@ -0,0 +1 @@ +export OAUTH_SERVER_CONFIG_BASEDIR=selenium/test diff --git a/deps/rabbitmq_management/selenium/test/oauth/env.local.fakeportal b/selenium/test/oauth/env.local.fakeportal similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/env.local.fakeportal rename to selenium/test/oauth/env.local.fakeportal diff --git a/deps/rabbitmq_management/selenium/test/oauth/env.local.fakeproxy b/selenium/test/oauth/env.local.fakeproxy similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/env.local.fakeproxy rename to selenium/test/oauth/env.local.fakeproxy diff --git a/deps/rabbitmq_management/selenium/test/oauth/env.local.keycloak b/selenium/test/oauth/env.local.keycloak similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/env.local.keycloak rename to selenium/test/oauth/env.local.keycloak diff --git a/deps/rabbitmq_management/selenium/test/oauth/env.local.uaa b/selenium/test/oauth/env.local.uaa similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/env.local.uaa rename to selenium/test/oauth/env.local.uaa diff --git a/deps/rabbitmq_management/selenium/test/oauth/env.mgt-prefix b/selenium/test/oauth/env.mgt-prefix similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/env.mgt-prefix rename to selenium/test/oauth/env.mgt-prefix diff --git a/deps/rabbitmq_management/selenium/test/oauth/env.uaa b/selenium/test/oauth/env.uaa similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/env.uaa rename to selenium/test/oauth/env.uaa diff --git a/deps/rabbitmq_management/selenium/test/oauth/env.uaa-oauth-provider b/selenium/test/oauth/env.uaa-oauth-provider similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/env.uaa-oauth-provider rename to selenium/test/oauth/env.uaa-oauth-provider diff --git a/deps/rabbitmq_management/selenium/test/oauth/imports/users.json b/selenium/test/oauth/imports/users.json similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/imports/users.json rename to selenium/test/oauth/imports/users.json diff --git a/deps/rabbitmq_management/selenium/test/oauth/keycloak/ca_certificate.pem b/selenium/test/oauth/keycloak/ca_certificate.pem similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/keycloak/ca_certificate.pem rename to selenium/test/oauth/keycloak/ca_certificate.pem diff --git a/deps/rabbitmq_management/selenium/test/oauth/keycloak/server_keycloak_certificate.pem b/selenium/test/oauth/keycloak/server_keycloak_certificate.pem similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/keycloak/server_keycloak_certificate.pem rename to selenium/test/oauth/keycloak/server_keycloak_certificate.pem diff --git a/deps/rabbitmq_management/selenium/test/oauth/keycloak/server_keycloak_key.pem b/selenium/test/oauth/keycloak/server_keycloak_key.pem similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/keycloak/server_keycloak_key.pem rename to selenium/test/oauth/keycloak/server_keycloak_key.pem diff --git a/deps/rabbitmq_management/selenium/test/oauth/keycloak/signing-key.pem b/selenium/test/oauth/keycloak/signing-key.pem similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/keycloak/signing-key.pem rename to selenium/test/oauth/keycloak/signing-key.pem diff --git a/deps/rabbitmq_management/selenium/test/oauth/keycloak/test-realm.json b/selenium/test/oauth/keycloak/test-realm.json similarity index 99% rename from deps/rabbitmq_management/selenium/test/oauth/keycloak/test-realm.json rename to selenium/test/oauth/keycloak/test-realm.json index c287be00464f..7e812c257494 100644 --- a/deps/rabbitmq_management/selenium/test/oauth/keycloak/test-realm.json +++ b/selenium/test/oauth/keycloak/test-realm.json @@ -1468,8 +1468,8 @@ "enabled" : true, "alwaysDisplayInConsole" : false, "clientAuthenticatorType" : "client-secret", - "redirectUris" : [ "${RABBITMQ_SCHEME}://${RABBITMQ_HOST}${RABBITMQ_PATH}/*" ], - "webOrigins" : [ "+" ], + "redirectUris" : [ "*" ], + "webOrigins" : [ "*" ], "notBefore" : 0, "bearerOnly" : false, "consentRequired" : false, diff --git a/deps/rabbitmq_management/selenium/test/oauth/logging.conf b/selenium/test/oauth/logging.conf similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/logging.conf rename to selenium/test/oauth/logging.conf diff --git a/selenium/test/oauth/rabbitmq.cluster.conf b/selenium/test/oauth/rabbitmq.cluster.conf new file mode 100644 index 000000000000..144cc7ab05ae --- /dev/null +++ b/selenium/test/oauth/rabbitmq.cluster.conf @@ -0,0 +1,6 @@ +cluster_name = rabbitmq-selenium + +cluster_formation.peer_discovery_backend = rabbit_peer_discovery_classic_config +cluster_formation.classic_config.nodes.1 = rabbit@rabbitmq +cluster_formation.classic_config.nodes.2 = rabbit@rabbitmq1 +cluster_formation.classic_config.nodes.3 = rabbit@rabbitmq2 diff --git a/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.conf b/selenium/test/oauth/rabbitmq.conf similarity index 94% rename from deps/rabbitmq_management/selenium/test/oauth/rabbitmq.conf rename to selenium/test/oauth/rabbitmq.conf index d8534a9a1fe0..02b0227d4bf8 100644 --- a/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.conf +++ b/selenium/test/oauth/rabbitmq.conf @@ -10,3 +10,6 @@ auth_oauth2.resource_server_id = rabbitmq auth_oauth2.preferred_username_claims.1 = user_name auth_oauth2.preferred_username_claims.2 = preferred_username auth_oauth2.preferred_username_claims.3 = email + + +loopback_users = none diff --git a/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.enable-basic-auth.conf b/selenium/test/oauth/rabbitmq.enable-basic-auth.conf similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/rabbitmq.enable-basic-auth.conf rename to selenium/test/oauth/rabbitmq.enable-basic-auth.conf diff --git a/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.fakeportal-mgt-oauth-provider.conf b/selenium/test/oauth/rabbitmq.fakeportal-mgt-oauth-provider.conf similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/rabbitmq.fakeportal-mgt-oauth-provider.conf rename to selenium/test/oauth/rabbitmq.fakeportal-mgt-oauth-provider.conf diff --git a/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.idp-initiated.conf b/selenium/test/oauth/rabbitmq.idp-initiated.conf similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/rabbitmq.idp-initiated.conf rename to selenium/test/oauth/rabbitmq.idp-initiated.conf diff --git a/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.keycloak-mgt-oauth-provider.conf b/selenium/test/oauth/rabbitmq.keycloak-mgt-oauth-provider.conf similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/rabbitmq.keycloak-mgt-oauth-provider.conf rename to selenium/test/oauth/rabbitmq.keycloak-mgt-oauth-provider.conf diff --git a/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.keycloak-oauth-provider.conf b/selenium/test/oauth/rabbitmq.keycloak-oauth-provider.conf similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/rabbitmq.keycloak-oauth-provider.conf rename to selenium/test/oauth/rabbitmq.keycloak-oauth-provider.conf diff --git a/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.keycloak-verify-none-oauth-provider.conf b/selenium/test/oauth/rabbitmq.keycloak-verify-none-oauth-provider.conf similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/rabbitmq.keycloak-verify-none-oauth-provider.conf rename to selenium/test/oauth/rabbitmq.keycloak-verify-none-oauth-provider.conf diff --git a/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.load-user-definitions.conf b/selenium/test/oauth/rabbitmq.load-user-definitions.conf similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/rabbitmq.load-user-definitions.conf rename to selenium/test/oauth/rabbitmq.load-user-definitions.conf diff --git a/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.mgt-prefix.conf b/selenium/test/oauth/rabbitmq.mgt-prefix.conf similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/rabbitmq.mgt-prefix.conf rename to selenium/test/oauth/rabbitmq.mgt-prefix.conf diff --git a/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.tls.conf b/selenium/test/oauth/rabbitmq.tls.conf similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/rabbitmq.tls.conf rename to selenium/test/oauth/rabbitmq.tls.conf diff --git a/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.uaa-mgt-oauth-provider.conf b/selenium/test/oauth/rabbitmq.uaa-mgt-oauth-provider.conf similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/rabbitmq.uaa-mgt-oauth-provider.conf rename to selenium/test/oauth/rabbitmq.uaa-mgt-oauth-provider.conf diff --git a/deps/rabbitmq_management/selenium/test/oauth/rabbitmq.uaa-oauth-provider.conf b/selenium/test/oauth/rabbitmq.uaa-oauth-provider.conf similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/rabbitmq.uaa-oauth-provider.conf rename to selenium/test/oauth/rabbitmq.uaa-oauth-provider.conf diff --git a/deps/rabbitmq_management/selenium/test/oauth/uaa/log4j2.properties b/selenium/test/oauth/uaa/log4j2.properties similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/uaa/log4j2.properties rename to selenium/test/oauth/uaa/log4j2.properties diff --git a/deps/rabbitmq_management/selenium/test/oauth/uaa/signing-key.pem b/selenium/test/oauth/uaa/signing-key.pem similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/uaa/signing-key.pem rename to selenium/test/oauth/uaa/signing-key.pem diff --git a/deps/rabbitmq_management/selenium/test/oauth/uaa/uaa.yml b/selenium/test/oauth/uaa/uaa.yml similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/uaa/uaa.yml rename to selenium/test/oauth/uaa/uaa.yml diff --git a/deps/rabbitmq_management/selenium/test/oauth/with-basic-auth-idp-down/happy-login.js b/selenium/test/oauth/with-basic-auth-idp-down/happy-login.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/with-basic-auth-idp-down/happy-login.js rename to selenium/test/oauth/with-basic-auth-idp-down/happy-login.js diff --git a/deps/rabbitmq_management/selenium/test/oauth/with-basic-auth-idp-down/landing.js b/selenium/test/oauth/with-basic-auth-idp-down/landing.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/with-basic-auth-idp-down/landing.js rename to selenium/test/oauth/with-basic-auth-idp-down/landing.js diff --git a/deps/rabbitmq_management/selenium/test/oauth/with-basic-auth/happy-login.js b/selenium/test/oauth/with-basic-auth/happy-login.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/with-basic-auth/happy-login.js rename to selenium/test/oauth/with-basic-auth/happy-login.js diff --git a/deps/rabbitmq_management/selenium/test/oauth/with-basic-auth/landing.js b/selenium/test/oauth/with-basic-auth/landing.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/with-basic-auth/landing.js rename to selenium/test/oauth/with-basic-auth/landing.js diff --git a/deps/rabbitmq_management/selenium/test/oauth/with-basic-auth/unauthorized.js b/selenium/test/oauth/with-basic-auth/unauthorized.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/with-basic-auth/unauthorized.js rename to selenium/test/oauth/with-basic-auth/unauthorized.js diff --git a/deps/rabbitmq_management/selenium/test/oauth/with-idp-down/landing.js b/selenium/test/oauth/with-idp-down/landing.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/with-idp-down/landing.js rename to selenium/test/oauth/with-idp-down/landing.js diff --git a/deps/rabbitmq_management/selenium/test/oauth/with-idp-initiated-via-proxy/happy-login.js b/selenium/test/oauth/with-idp-initiated-via-proxy/happy-login.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/with-idp-initiated-via-proxy/happy-login.js rename to selenium/test/oauth/with-idp-initiated-via-proxy/happy-login.js diff --git a/deps/rabbitmq_management/selenium/test/oauth/with-idp-initiated/happy-login.js b/selenium/test/oauth/with-idp-initiated/happy-login.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/with-idp-initiated/happy-login.js rename to selenium/test/oauth/with-idp-initiated/happy-login.js diff --git a/deps/rabbitmq_management/selenium/test/oauth/with-idp-initiated/landing.js b/selenium/test/oauth/with-idp-initiated/landing.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/with-idp-initiated/landing.js rename to selenium/test/oauth/with-idp-initiated/landing.js diff --git a/deps/rabbitmq_management/selenium/test/oauth/with-idp-initiated/logout.js b/selenium/test/oauth/with-idp-initiated/logout.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/with-idp-initiated/logout.js rename to selenium/test/oauth/with-idp-initiated/logout.js diff --git a/deps/rabbitmq_management/selenium/test/oauth/with-idp-initiated/token-expires.js b/selenium/test/oauth/with-idp-initiated/token-expires.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/with-idp-initiated/token-expires.js rename to selenium/test/oauth/with-idp-initiated/token-expires.js diff --git a/deps/rabbitmq_management/selenium/test/oauth/with-idp-initiated/unauthorized.js b/selenium/test/oauth/with-idp-initiated/unauthorized.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/with-idp-initiated/unauthorized.js rename to selenium/test/oauth/with-idp-initiated/unauthorized.js diff --git a/deps/rabbitmq_management/selenium/test/oauth/with-multi-resources/happy-login.js b/selenium/test/oauth/with-multi-resources/happy-login.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/with-multi-resources/happy-login.js rename to selenium/test/oauth/with-multi-resources/happy-login.js diff --git a/deps/rabbitmq_management/selenium/test/oauth/with-multi-resources/landing.js b/selenium/test/oauth/with-multi-resources/landing.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/with-multi-resources/landing.js rename to selenium/test/oauth/with-multi-resources/landing.js diff --git a/deps/rabbitmq_management/selenium/test/oauth/with-sp-initiated/happy-login.js b/selenium/test/oauth/with-sp-initiated/happy-login.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/with-sp-initiated/happy-login.js rename to selenium/test/oauth/with-sp-initiated/happy-login.js diff --git a/deps/rabbitmq_management/selenium/test/oauth/with-sp-initiated/landing.js b/selenium/test/oauth/with-sp-initiated/landing.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/with-sp-initiated/landing.js rename to selenium/test/oauth/with-sp-initiated/landing.js diff --git a/deps/rabbitmq_management/selenium/test/oauth/with-sp-initiated/logout.js b/selenium/test/oauth/with-sp-initiated/logout.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/with-sp-initiated/logout.js rename to selenium/test/oauth/with-sp-initiated/logout.js diff --git a/deps/rabbitmq_management/selenium/test/oauth/with-sp-initiated/redirection-after-login.js b/selenium/test/oauth/with-sp-initiated/redirection-after-login.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/with-sp-initiated/redirection-after-login.js rename to selenium/test/oauth/with-sp-initiated/redirection-after-login.js diff --git a/deps/rabbitmq_management/selenium/test/oauth/with-sp-initiated/token-refresh.js b/selenium/test/oauth/with-sp-initiated/token-refresh.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/oauth/with-sp-initiated/token-refresh.js rename to selenium/test/oauth/with-sp-initiated/token-refresh.js diff --git a/deps/rabbitmq_management/selenium/test/oauth/with-sp-initiated/unauthorized.js b/selenium/test/oauth/with-sp-initiated/unauthorized.js similarity index 97% rename from deps/rabbitmq_management/selenium/test/oauth/with-sp-initiated/unauthorized.js rename to selenium/test/oauth/with-sp-initiated/unauthorized.js index 846f2f91f158..5a81f6e18a06 100644 --- a/deps/rabbitmq_management/selenium/test/oauth/with-sp-initiated/unauthorized.js +++ b/selenium/test/oauth/with-sp-initiated/unauthorized.js @@ -47,7 +47,7 @@ describe('An user without management tag', function () { }) it('should get redirected to home page again without error message', async function(){ - await homePage.isLoaded() + await driver.sleep(250) const visible = await homePage.isWarningVisible() assert.ok(!visible) }) diff --git a/deps/rabbitmq_management/selenium/test/pageobjects/AdminTab.js b/selenium/test/pageobjects/AdminTab.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/pageobjects/AdminTab.js rename to selenium/test/pageobjects/AdminTab.js diff --git a/deps/rabbitmq_management/selenium/test/pageobjects/BasePage.js b/selenium/test/pageobjects/BasePage.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/pageobjects/BasePage.js rename to selenium/test/pageobjects/BasePage.js diff --git a/deps/rabbitmq_management/selenium/test/pageobjects/ExchangePage.js b/selenium/test/pageobjects/ExchangePage.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/pageobjects/ExchangePage.js rename to selenium/test/pageobjects/ExchangePage.js diff --git a/deps/rabbitmq_management/selenium/test/pageobjects/ExchangesPage.js b/selenium/test/pageobjects/ExchangesPage.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/pageobjects/ExchangesPage.js rename to selenium/test/pageobjects/ExchangesPage.js diff --git a/deps/rabbitmq_management/selenium/test/pageobjects/FakePortalPage.js b/selenium/test/pageobjects/FakePortalPage.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/pageobjects/FakePortalPage.js rename to selenium/test/pageobjects/FakePortalPage.js diff --git a/deps/rabbitmq_management/selenium/test/pageobjects/KeycloakLoginPage.js b/selenium/test/pageobjects/KeycloakLoginPage.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/pageobjects/KeycloakLoginPage.js rename to selenium/test/pageobjects/KeycloakLoginPage.js diff --git a/deps/rabbitmq_management/selenium/test/pageobjects/LimitsAdminTab.js b/selenium/test/pageobjects/LimitsAdminTab.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/pageobjects/LimitsAdminTab.js rename to selenium/test/pageobjects/LimitsAdminTab.js diff --git a/deps/rabbitmq_management/selenium/test/pageobjects/LoginPage.js b/selenium/test/pageobjects/LoginPage.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/pageobjects/LoginPage.js rename to selenium/test/pageobjects/LoginPage.js diff --git a/deps/rabbitmq_management/selenium/test/pageobjects/OverviewPage.js b/selenium/test/pageobjects/OverviewPage.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/pageobjects/OverviewPage.js rename to selenium/test/pageobjects/OverviewPage.js diff --git a/deps/rabbitmq_management/selenium/test/pageobjects/SSOHomePage.js b/selenium/test/pageobjects/SSOHomePage.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/pageobjects/SSOHomePage.js rename to selenium/test/pageobjects/SSOHomePage.js diff --git a/deps/rabbitmq_management/selenium/test/pageobjects/UAALoginPage.js b/selenium/test/pageobjects/UAALoginPage.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/pageobjects/UAALoginPage.js rename to selenium/test/pageobjects/UAALoginPage.js diff --git a/deps/rabbitmq_management/selenium/test/pageobjects/VhostAdminTab.js b/selenium/test/pageobjects/VhostAdminTab.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/pageobjects/VhostAdminTab.js rename to selenium/test/pageobjects/VhostAdminTab.js diff --git a/deps/rabbitmq_management/selenium/test/pageobjects/VhostsAdminTab.js b/selenium/test/pageobjects/VhostsAdminTab.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/pageobjects/VhostsAdminTab.js rename to selenium/test/pageobjects/VhostsAdminTab.js diff --git a/deps/rabbitmq_management/selenium/test/utils.js b/selenium/test/utils.js similarity index 91% rename from deps/rabbitmq_management/selenium/test/utils.js rename to selenium/test/utils.js index efa9a5196c95..c71ab1a13d7e 100644 --- a/deps/rabbitmq_management/selenium/test/utils.js +++ b/selenium/test/utils.js @@ -9,13 +9,20 @@ const KeycloakLoginPage = require('./pageobjects/KeycloakLoginPage') const assert = require('assert') const uaaUrl = process.env.UAA_URL || 'http://localhost:8080' -const baseUrl = process.env.RABBITMQ_URL || 'http://localhost:15672/' +const baseUrl = randomly_pick_baseurl(process.env.RABBITMQ_URL) || 'http://localhost:15672/' const hostname = process.env.RABBITMQ_HOSTNAME || 'localhost' const runLocal = String(process.env.RUN_LOCAL).toLowerCase() != 'false' const seleniumUrl = process.env.SELENIUM_URL || 'http://selenium:4444' const screenshotsDir = process.env.SCREENSHOTS_DIR || '/screens' const profiles = process.env.PROFILES || '' +function randomly_pick_baseurl(baseUrl) { + urls = baseUrl.split(",") + return urls[getRandomInt(urls.length)] +} +function getRandomInt(max) { + return Math.floor(Math.random() * max); +} class CaptureScreenshot { driver test @@ -49,8 +56,14 @@ module.exports = { if (!runLocal) { builder = builder.usingServer(seleniumUrl) } - var chromeCapabilities = Capabilities.chrome(); + let chromeCapabilities = Capabilities.chrome(); chromeCapabilities.setAcceptInsecureCerts(true); + chromeCapabilities.set('goog:chromeOptions', { + args: [ + "--lang=en", + "--disable-search-engine-choice-screen" + ] + }); driver = builder .forBrowser('chrome') .withCapabilities(chromeCapabilities) diff --git a/deps/rabbitmq_management/selenium/test/vhosts/admin-vhosts.js b/selenium/test/vhosts/admin-vhosts.js similarity index 100% rename from deps/rabbitmq_management/selenium/test/vhosts/admin-vhosts.js rename to selenium/test/vhosts/admin-vhosts.js