From b675ce29f022bc9d46f20ef32e065b0bb9684c8b Mon Sep 17 00:00:00 2001 From: Ayanda Dube Date: Fri, 6 Sep 2024 16:49:54 +0100 Subject: [PATCH 01/33] Shutdown peer QQ FSMs on connected nodes on force-shrink execution for cluster wide consistency, ensuring only the leader is active/running --- deps/rabbit/src/rabbit_quorum_queue.erl | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/deps/rabbit/src/rabbit_quorum_queue.erl b/deps/rabbit/src/rabbit_quorum_queue.erl index f936891d056..e609d727d4f 100644 --- a/deps/rabbit/src/rabbit_quorum_queue.erl +++ b/deps/rabbit/src/rabbit_quorum_queue.erl @@ -1376,6 +1376,7 @@ delete_member(Q, Node) when ?amqqueue_is_quorum(Q) -> _ = rabbit_amqqueue:update(QName, Fun), case ra:force_delete_server(?RA_SYSTEM, ServerId) of ok -> + rabbit_log:info("Deleted a replica of quorum ~ts on node ~ts", [rabbit_misc:rs(QName), Node]), ok; {error, {badrpc, nodedown}} -> ok; @@ -1957,6 +1958,7 @@ force_shrink_member_to_current_member(VHost, Name) -> case rabbit_amqqueue:lookup(QName) of {ok, Q} when ?is_amqqueue(Q) -> {RaName, _} = amqqueue:get_pid(Q), + OtherNodes = lists:delete(Node, get_nodes(Q)), ok = ra_server_proc:force_shrink_members_to_current_member({RaName, Node}), Fun = fun (Q0) -> TS0 = amqqueue:get_type_state(Q0), @@ -1964,6 +1966,7 @@ force_shrink_member_to_current_member(VHost, Name) -> amqqueue:set_type_state(Q, TS) end, _ = rabbit_amqqueue:update(QName, Fun), + _ = [ra:force_delete_server(?RA_SYSTEM, {RaName, N}) || N <- OtherNodes], rabbit_log:warning("Disaster recovery procedure: shrinking finished"); _ -> rabbit_log:warning("Disaster recovery procedure: shrinking failed, queue ~p not found at vhost ~p", [Name, VHost]), @@ -1976,6 +1979,7 @@ force_all_queues_shrink_member_to_current_member() -> _ = [begin QName = amqqueue:get_name(Q), {RaName, _} = amqqueue:get_pid(Q), + OtherNodes = lists:delete(Node, get_nodes(Q)), rabbit_log:warning("Disaster recovery procedure: shrinking queue ~p", [QName]), ok = ra_server_proc:force_shrink_members_to_current_member({RaName, Node}), Fun = fun (QQ) -> @@ -1983,7 +1987,8 @@ force_all_queues_shrink_member_to_current_member() -> TS = TS0#{nodes => [Node]}, amqqueue:set_type_state(QQ, TS) end, - _ = rabbit_amqqueue:update(QName, Fun) + _ = rabbit_amqqueue:update(QName, Fun), + _ = [ra:force_delete_server(?RA_SYSTEM, {RaName, N}) || N <- OtherNodes] end || Q <- rabbit_amqqueue:list(), amqqueue:get_type(Q) == ?MODULE], rabbit_log:warning("Disaster recovery procedure: shrinking finished"), ok. From 60ee35ea7e269dca3eecc84a68fd1a5feaa64ec2 Mon Sep 17 00:00:00 2001 From: Ayanda Dube Date: Tue, 1 Oct 2024 15:16:16 +0100 Subject: [PATCH 02/33] QQ tests for force-shrink to current member operations --- deps/rabbit/test/quorum_queue_SUITE.erl | 83 ++++++++++++++++++++++++- 1 file changed, 82 insertions(+), 1 deletion(-) diff --git a/deps/rabbit/test/quorum_queue_SUITE.erl b/deps/rabbit/test/quorum_queue_SUITE.erl index 0643842bf51..5c625fa26eb 100644 --- a/deps/rabbit/test/quorum_queue_SUITE.erl +++ b/deps/rabbit/test/quorum_queue_SUITE.erl @@ -92,7 +92,9 @@ groups() -> format, add_member_2, single_active_consumer_priority_take_over, - single_active_consumer_priority + single_active_consumer_priority, + force_shrink_member_to_current_member, + force_all_queues_shrink_member_to_current_member ] ++ all_tests()}, {cluster_size_5, [], [start_queue, @@ -1151,6 +1153,85 @@ single_active_consumer_priority(Config) -> rpc:call(Server0, ra, local_query, [RaNameQ3, QueryFun])), ok. +force_shrink_member_to_current_member(Config) -> + [Server0, Server1, Server2] = + rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + Ch = rabbit_ct_client_helpers:open_channel(Config, Server0), + QQ = ?config(queue_name, Config), + ?assertEqual({'queue.declare_ok', QQ, 0, 0}, + declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])), + + RaName = ra_name(QQ), + rabbit_ct_client_helpers:publish(Ch, QQ, 3), + wait_for_messages_ready([Server0], RaName, 3), + + {ok, Q0} = rpc:call(Server0, rabbit_amqqueue, lookup, [QQ, <<"/">>]), + #{nodes := Nodes0} = amqqueue:get_type_state(Q0), + ?assertEqual(3, length(Nodes0)), + + rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_quorum_queue, + force_shrink_member_to_current_member, [<<"/">>, QQ]), + + wait_for_messages_ready([Server0], RaName, 3), + + {ok, Q1} = rpc:call(Server0, rabbit_amqqueue, lookup, [QQ, <<"/">>]), + #{nodes := Nodes1} = amqqueue:get_type_state(Q1), + ?assertEqual(1, length(Nodes1)), + + %% grow queues back to all nodes + [rpc:call(Server0, rabbit_quorum_queue, grow, [S, <<"/">>, <<".*">>, all]) || S <- [Server1, Server2]], + + wait_for_messages_ready([Server0], RaName, 3), + {ok, Q2} = rpc:call(Server0, rabbit_amqqueue, lookup, [QQ, <<"/">>]), + #{nodes := Nodes2} = amqqueue:get_type_state(Q2), + ?assertEqual(3, length(Nodes2)). + +force_all_queues_shrink_member_to_current_member(Config) -> + [Server0, Server1, Server2] = + rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + Ch = rabbit_ct_client_helpers:open_channel(Config, Server0), + QQ = ?config(queue_name, Config), + AQ = ?config(alt_queue_name, Config), + ?assertEqual({'queue.declare_ok', QQ, 0, 0}, + declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])), + ?assertEqual({'queue.declare_ok', AQ, 0, 0}, + declare(Ch, AQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])), + + QQs = [QQ, AQ], + + [begin + RaName = ra_name(Q), + rabbit_ct_client_helpers:publish(Ch, Q, 3), + wait_for_messages_ready([Server0], RaName, 3), + {ok, Q0} = rpc:call(Server0, rabbit_amqqueue, lookup, [Q, <<"/">>]), + #{nodes := Nodes0} = amqqueue:get_type_state(Q0), + ?assertEqual(3, length(Nodes0)) + end || Q <- QQs], + + rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_quorum_queue, + force_all_queues_shrink_member_to_current_member, []), + + [begin + RaName = ra_name(Q), + wait_for_messages_ready([Server0], RaName, 3), + {ok, Q0} = rpc:call(Server0, rabbit_amqqueue, lookup, [Q, <<"/">>]), + #{nodes := Nodes0} = amqqueue:get_type_state(Q0), + ?assertEqual(1, length(Nodes0)) + end || Q <- QQs], + + %% grow queues back to all nodes + [rpc:call(Server0, rabbit_quorum_queue, grow, [S, <<"/">>, <<".*">>, all]) || S <- [Server1, Server2]], + + [begin + RaName = ra_name(Q), + wait_for_messages_ready([Server0], RaName, 3), + {ok, Q0} = rpc:call(Server0, rabbit_amqqueue, lookup, [Q, <<"/">>]), + #{nodes := Nodes0} = amqqueue:get_type_state(Q0), + ?assertEqual(3, length(Nodes0)) + end || Q <- QQs]. + priority_queue_fifo(Config) -> %% testing: if hi priority messages are published before lo priority %% messages they are always consumed first (fifo) From c26aa3b1c76dd82fb9e67852b6a2f030a92bf7cc Mon Sep 17 00:00:00 2001 From: Ayanda Dube Date: Wed, 2 Oct 2024 11:28:41 +0100 Subject: [PATCH 03/33] Implement force_vhost_queues_shrink_member_to_current_member/1 --- deps/rabbit/src/rabbit_quorum_queue.erl | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/deps/rabbit/src/rabbit_quorum_queue.erl b/deps/rabbit/src/rabbit_quorum_queue.erl index e609d727d4f..7902bd1258a 100644 --- a/deps/rabbit/src/rabbit_quorum_queue.erl +++ b/deps/rabbit/src/rabbit_quorum_queue.erl @@ -74,6 +74,7 @@ -export([validate_policy/1, merge_policy_value/3]). -export([force_shrink_member_to_current_member/2, + force_vhost_queues_shrink_member_to_current_member/1, force_all_queues_shrink_member_to_current_member/0]). %% for backwards compatibility @@ -1973,8 +1974,17 @@ force_shrink_member_to_current_member(VHost, Name) -> {error, not_found} end. +force_vhost_queues_shrink_member_to_current_member(VHost) when is_binary(VHost) -> + rabbit_log:warning("Disaster recovery procedure: shrinking all quorum queues in vhost ~tp to a single node cluster", [VHost]), + ListQQs = fun() -> rabbit_amqqueue:list(VHost) end, + force_all_queues_shrink_member_to_current_member(ListQQs). + force_all_queues_shrink_member_to_current_member() -> rabbit_log:warning("Disaster recovery procedure: shrinking all quorum queues to a single node cluster"), + ListQQs = fun() -> rabbit_amqqueue:list() end, + force_all_queues_shrink_member_to_current_member(ListQQs). + +force_all_queues_shrink_member_to_current_member(ListQQFun) when is_function(ListQQFun) -> Node = node(), _ = [begin QName = amqqueue:get_name(Q), @@ -1989,7 +1999,7 @@ force_all_queues_shrink_member_to_current_member() -> end, _ = rabbit_amqqueue:update(QName, Fun), _ = [ra:force_delete_server(?RA_SYSTEM, {RaName, N}) || N <- OtherNodes] - end || Q <- rabbit_amqqueue:list(), amqqueue:get_type(Q) == ?MODULE], + end || Q <- ListQQFun(), amqqueue:get_type(Q) == ?MODULE], rabbit_log:warning("Disaster recovery procedure: shrinking finished"), ok. From de0c0dbd89b7c278a1145833cbeda6b7d3de34eb Mon Sep 17 00:00:00 2001 From: Ayanda Dube Date: Wed, 2 Oct 2024 12:28:18 +0100 Subject: [PATCH 04/33] Add test for QQ force_vhost_queues_shrink_member_to_current_member/1 --- deps/rabbit/test/quorum_queue_SUITE.erl | 69 ++++++++++++++++++++++++- 1 file changed, 68 insertions(+), 1 deletion(-) diff --git a/deps/rabbit/test/quorum_queue_SUITE.erl b/deps/rabbit/test/quorum_queue_SUITE.erl index 5c625fa26eb..ff2ebac93de 100644 --- a/deps/rabbit/test/quorum_queue_SUITE.erl +++ b/deps/rabbit/test/quorum_queue_SUITE.erl @@ -94,7 +94,8 @@ groups() -> single_active_consumer_priority_take_over, single_active_consumer_priority, force_shrink_member_to_current_member, - force_all_queues_shrink_member_to_current_member + force_all_queues_shrink_member_to_current_member, + force_vhost_queues_shrink_member_to_current_member ] ++ all_tests()}, {cluster_size_5, [], [start_queue, @@ -1232,6 +1233,72 @@ force_all_queues_shrink_member_to_current_member(Config) -> ?assertEqual(3, length(Nodes0)) end || Q <- QQs]. +force_vhost_queues_shrink_member_to_current_member(Config) -> + [Server0, Server1, Server2] = + rabbit_ct_broker_helpers:get_node_configs(Config, nodename), + + Ch0 = rabbit_ct_client_helpers:open_channel(Config, Server0), + QQ = ?config(queue_name, Config), + AQ = ?config(alt_queue_name, Config), + ?assertEqual({'queue.declare_ok', QQ, 0, 0}, + declare(Ch0, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])), + ?assertEqual({'queue.declare_ok', AQ, 0, 0}, + declare(Ch0, AQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])), + + QQs = [QQ, AQ], + + VHost1 = <<"/">>, + VHost2 = <<"another-vhost">>, + VHosts = [VHost1, VHost2], + + User = ?config(rmq_username, Config), + ok = rabbit_ct_broker_helpers:add_vhost(Config, Server0, VHost2, User), + ok = rabbit_ct_broker_helpers:set_full_permissions(Config, User, VHost2), + Conn1 = rabbit_ct_client_helpers:open_unmanaged_connection(Config, Server0, VHost2), + {ok, Ch1} = amqp_connection:open_channel(Conn1), + ?assertEqual({'queue.declare_ok', QQ, 0, 0}, + declare(Ch1, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])), + ?assertEqual({'queue.declare_ok', AQ, 0, 0}, + declare(Ch1, AQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}])), + + [rabbit_ct_client_helpers:publish(Ch, Q, 3) || Q <- QQs, Ch <- [Ch0, Ch1]], + + [begin + QQRes = rabbit_misc:r(VHost, queue, Q), + {ok, RaName} = rpc:call(Server0, rabbit_queue_type_util, qname_to_internal_name, [QQRes]), + wait_for_messages_ready([Server0], RaName, 3), + {ok, Q0} = rpc:call(Server0, rabbit_amqqueue, lookup, [Q, VHost]), + #{nodes := Nodes0} = amqqueue:get_type_state(Q0), + ?assertEqual(3, length(Nodes0)) + end || Q <- QQs, VHost <- VHosts], + + rabbit_ct_broker_helpers:rpc(Config, 0, rabbit_quorum_queue, + force_vhost_queues_shrink_member_to_current_member, [VHost2]), + + [begin + QQRes = rabbit_misc:r(VHost, queue, Q), + {ok, RaName} = rpc:call(Server0, rabbit_queue_type_util, qname_to_internal_name, [QQRes]), + wait_for_messages_ready([Server0], RaName, 3), + {ok, Q0} = rpc:call(Server0, rabbit_amqqueue, lookup, [Q, VHost]), + #{nodes := Nodes0} = amqqueue:get_type_state(Q0), + case VHost of + VHost1 -> ?assertEqual(3, length(Nodes0)); + VHost2 -> ?assertEqual(1, length(Nodes0)) + end + end || Q <- QQs, VHost <- VHosts], + + %% grow queues back to all nodes in VHost2 only + [rpc:call(Server0, rabbit_quorum_queue, grow, [S, VHost2, <<".*">>, all]) || S <- [Server1, Server2]], + + [begin + QQRes = rabbit_misc:r(VHost, queue, Q), + {ok, RaName} = rpc:call(Server0, rabbit_queue_type_util, qname_to_internal_name, [QQRes]), + wait_for_messages_ready([Server0], RaName, 3), + {ok, Q0} = rpc:call(Server0, rabbit_amqqueue, lookup, [Q, VHost]), + #{nodes := Nodes0} = amqqueue:get_type_state(Q0), + ?assertEqual(3, length(Nodes0)) + end || Q <- QQs, VHost <- VHosts]. + priority_queue_fifo(Config) -> %% testing: if hi priority messages are published before lo priority %% messages they are always consumed first (fifo) From dd5ec3ccc0715ac47c2dc2f82191263bfc860204 Mon Sep 17 00:00:00 2001 From: Ayanda Dube Date: Thu, 3 Oct 2024 10:57:11 +0100 Subject: [PATCH 05/33] Update QQ force-shrink logging --- deps/rabbit/src/rabbit_quorum_queue.erl | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/deps/rabbit/src/rabbit_quorum_queue.erl b/deps/rabbit/src/rabbit_quorum_queue.erl index 7902bd1258a..94e2148aec3 100644 --- a/deps/rabbit/src/rabbit_quorum_queue.erl +++ b/deps/rabbit/src/rabbit_quorum_queue.erl @@ -1953,9 +1953,10 @@ notify_decorators(QName, F, A) -> is_stateful() -> true. force_shrink_member_to_current_member(VHost, Name) -> - rabbit_log:warning("Disaster recovery procedure: shrinking ~p queue at vhost ~p to a single node cluster", [Name, VHost]), Node = node(), QName = rabbit_misc:r(VHost, queue, Name), + QNameFmt = rabbit_misc:rs(QName), + rabbit_log:warning("Shrinking ~ts to a single node: ~ts", [QNameFmt, Node]), case rabbit_amqqueue:lookup(QName) of {ok, Q} when ?is_amqqueue(Q) -> {RaName, _} = amqqueue:get_pid(Q), @@ -1968,19 +1969,19 @@ force_shrink_member_to_current_member(VHost, Name) -> end, _ = rabbit_amqqueue:update(QName, Fun), _ = [ra:force_delete_server(?RA_SYSTEM, {RaName, N}) || N <- OtherNodes], - rabbit_log:warning("Disaster recovery procedure: shrinking finished"); + rabbit_log:warning("Shrinking ~ts finished", [QNameFmt]); _ -> - rabbit_log:warning("Disaster recovery procedure: shrinking failed, queue ~p not found at vhost ~p", [Name, VHost]), + rabbit_log:warning("Shrinking failed, ~ts not found", [QNameFmt]), {error, not_found} end. force_vhost_queues_shrink_member_to_current_member(VHost) when is_binary(VHost) -> - rabbit_log:warning("Disaster recovery procedure: shrinking all quorum queues in vhost ~tp to a single node cluster", [VHost]), + rabbit_log:warning("Shrinking all quorum queues in vhost '~ts' to a single node: ~ts", [VHost, node()]), ListQQs = fun() -> rabbit_amqqueue:list(VHost) end, force_all_queues_shrink_member_to_current_member(ListQQs). force_all_queues_shrink_member_to_current_member() -> - rabbit_log:warning("Disaster recovery procedure: shrinking all quorum queues to a single node cluster"), + rabbit_log:warning("Shrinking all quorum queues to a single node: ~ts", [node()]), ListQQs = fun() -> rabbit_amqqueue:list() end, force_all_queues_shrink_member_to_current_member(ListQQs). @@ -1990,7 +1991,7 @@ force_all_queues_shrink_member_to_current_member(ListQQFun) when is_function(Lis QName = amqqueue:get_name(Q), {RaName, _} = amqqueue:get_pid(Q), OtherNodes = lists:delete(Node, get_nodes(Q)), - rabbit_log:warning("Disaster recovery procedure: shrinking queue ~p", [QName]), + rabbit_log:warning("Shrinking queue ~ts to a single node: ~ts", [rabbit_misc:rs(QName), Node]), ok = ra_server_proc:force_shrink_members_to_current_member({RaName, Node}), Fun = fun (QQ) -> TS0 = amqqueue:get_type_state(QQ), @@ -2000,7 +2001,7 @@ force_all_queues_shrink_member_to_current_member(ListQQFun) when is_function(Lis _ = rabbit_amqqueue:update(QName, Fun), _ = [ra:force_delete_server(?RA_SYSTEM, {RaName, N}) || N <- OtherNodes] end || Q <- ListQQFun(), amqqueue:get_type(Q) == ?MODULE], - rabbit_log:warning("Disaster recovery procedure: shrinking finished"), + rabbit_log:warning("Shrinking finished"), ok. is_minority(All, Up) -> From 94943bbe5e6e790cd64888e4b801a23f433310f7 Mon Sep 17 00:00:00 2001 From: Luke Bakken Date: Wed, 2 Oct 2024 08:22:44 -0700 Subject: [PATCH 06/33] Use `
` tag when collecting text --- .github/DISCUSSION_TEMPLATE/questions.yml | 65 ++++++++++++++++++++--- 1 file changed, 57 insertions(+), 8 deletions(-) diff --git a/.github/DISCUSSION_TEMPLATE/questions.yml b/.github/DISCUSSION_TEMPLATE/questions.yml index 79edea07647..b15d2f4a737 100644 --- a/.github/DISCUSSION_TEMPLATE/questions.yml +++ b/.github/DISCUSSION_TEMPLATE/questions.yml @@ -72,7 +72,14 @@ body: id: diagnostics_status attributes: label: rabbitmq-diagnostics status output - value: See https://www.rabbitmq.com/docs/cli to learn how to use rabbitmq-diagnostics + value: | + See https://www.rabbitmq.com/docs/cli to learn how to use rabbitmq-diagnostics +
+ + ``` + # PASTE OUTPUT HERE, BETWEEN BACKTICKS + ``` +
validations: required: true - type: textarea @@ -80,7 +87,14 @@ body: attributes: label: Logs from node 1 (with sensitive values edited out) description: Relevant RabbitMQ logs with sensitive values edited out - value: See https://www.rabbitmq.com/docs/logging to learn how to collect logs + value: | + See https://www.rabbitmq.com/docs/logging to learn how to collect logs +
+ + ``` + # PASTE LOG HERE, BETWEEN BACKTICKS + ``` +
validations: required: true - type: textarea @@ -88,7 +102,14 @@ body: attributes: label: Logs from node 2 (if applicable, with sensitive values edited out) description: Relevant RabbitMQ logs with sensitive values edited out - value: See https://www.rabbitmq.com/docs/logging to learn how to collect logs + value: | + See https://www.rabbitmq.com/docs/logging to learn how to collect logs +
+ + ``` + # PASTE LOG HERE, BETWEEN BACKTICKS + ``` +
validations: required: false - type: textarea @@ -96,7 +117,14 @@ body: attributes: label: Logs from node 3 (if applicable, with sensitive values edited out) description: Relevant RabbitMQ logs with sensitive values edited out - value: See https://www.rabbitmq.com/docs/logging to learn how to collect logs + value: | + See https://www.rabbitmq.com/docs/logging to learn how to collect logs +
+ + ``` + # PASTE LOG HERE, BETWEEN BACKTICKS + ``` +
validations: required: false - type: textarea @@ -104,7 +132,14 @@ body: attributes: label: rabbitmq.conf description: rabbitmq.conf contents - value: See https://www.rabbitmq.com/docs/configure#config-location to learn how to find rabbitmq.conf file location + value: | + See https://www.rabbitmq.com/docs/configure#config-location to learn how to find rabbitmq.conf file location +
+ + ``` + # PASTE rabbitmq.conf HERE, BETWEEN BACKTICKS + ``` +
validations: required: true - type: textarea @@ -126,7 +161,14 @@ body: attributes: label: advanced.config description: advanced.config contents (if applicable) - value: See https://www.rabbitmq.com/docs/configure#config-location to learn how to find advanced.config file location + value: | + See https://www.rabbitmq.com/docs/configure#config-location to learn how to find advanced.config file location +
+ + ``` + # PASTE advanced.config HERE, BETWEEN BACKTICKS + ``` +
validations: required: false - type: textarea @@ -135,9 +177,12 @@ body: label: Application code description: Relevant messaging-related parts of application code value: | +
+ ```python - # relevant messaging-related parts of your code go here + # PASTE CODE HERE, BETWEEN BACKTICKS ``` +
validations: required: false - type: textarea @@ -146,8 +191,12 @@ body: label: Kubernetes deployment file description: Kubernetes deployment YAML that demonstrates how RabbitMQ is deployed (if applicable) value: | +
+ ```yaml # Relevant parts of K8S deployment that demonstrate how RabbitMQ is deployed + # PASTE YAML HERE, BETWEEN BACKTICKS ``` +
validations: - required: false \ No newline at end of file + required: false From ff53cd8623e19c23b1e8c1c16bdc0bd3f0c45e8b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 2 Oct 2024 18:28:46 +0000 Subject: [PATCH 07/33] build(deps): bump google-github-actions/auth from 2.1.5 to 2.1.6 Bumps [google-github-actions/auth](https://github.com/google-github-actions/auth) from 2.1.5 to 2.1.6. - [Release notes](https://github.com/google-github-actions/auth/releases) - [Changelog](https://github.com/google-github-actions/auth/blob/main/CHANGELOG.md) - [Commits](https://github.com/google-github-actions/auth/compare/v2.1.5...v2.1.6) --- updated-dependencies: - dependency-name: google-github-actions/auth dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/rabbitmq_peer_discovery_aws.yaml | 2 +- .github/workflows/test-authnz.yaml | 2 +- .github/workflows/test-management-ui-for-pr.yaml | 2 +- .github/workflows/test-management-ui.yaml | 2 +- .github/workflows/test-mixed-versions.yaml | 2 +- .github/workflows/test-plugin-mixed.yaml | 2 +- .github/workflows/test-plugin.yaml | 2 +- .github/workflows/test.yaml | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/rabbitmq_peer_discovery_aws.yaml b/.github/workflows/rabbitmq_peer_discovery_aws.yaml index e6199e3a2ac..4550510131f 100644 --- a/.github/workflows/rabbitmq_peer_discovery_aws.yaml +++ b/.github/workflows/rabbitmq_peer_discovery_aws.yaml @@ -66,7 +66,7 @@ jobs: ecs-cli --version - name: AUTHENTICATE TO GOOGLE CLOUD if: steps.authorized.outputs.authorized == 'true' - uses: google-github-actions/auth@v2.1.5 + uses: google-github-actions/auth@v2.1.6 with: credentials_json: ${{ secrets.REMOTE_CACHE_CREDENTIALS_JSON }} - name: CONFIGURE BAZEL diff --git a/.github/workflows/test-authnz.yaml b/.github/workflows/test-authnz.yaml index 37ee55edc02..2b0342b0382 100644 --- a/.github/workflows/test-authnz.yaml +++ b/.github/workflows/test-authnz.yaml @@ -58,7 +58,7 @@ jobs: https://cdn.jsdelivr.net/hex - name: Authenticate To Google Cloud - uses: google-github-actions/auth@v2.1.5 + uses: google-github-actions/auth@v2.1.6 with: credentials_json: ${{ secrets.REMOTE_CACHE_CREDENTIALS_JSON }} diff --git a/.github/workflows/test-management-ui-for-pr.yaml b/.github/workflows/test-management-ui-for-pr.yaml index bff96254f4e..98ec573b739 100644 --- a/.github/workflows/test-management-ui-for-pr.yaml +++ b/.github/workflows/test-management-ui-for-pr.yaml @@ -38,7 +38,7 @@ jobs: https://cdn.jsdelivr.net/hex - name: Authenticate To Google Cloud - uses: google-github-actions/auth@v2.1.5 + uses: google-github-actions/auth@v2.1.6 with: credentials_json: ${{ secrets.REMOTE_CACHE_CREDENTIALS_JSON }} diff --git a/.github/workflows/test-management-ui.yaml b/.github/workflows/test-management-ui.yaml index 4ca3bcd0194..b05a80cb4e9 100644 --- a/.github/workflows/test-management-ui.yaml +++ b/.github/workflows/test-management-ui.yaml @@ -52,7 +52,7 @@ jobs: https://cdn.jsdelivr.net/hex - name: Authenticate To Google Cloud - uses: google-github-actions/auth@v2.1.5 + uses: google-github-actions/auth@v2.1.6 with: credentials_json: ${{ secrets.REMOTE_CACHE_CREDENTIALS_JSON }} diff --git a/.github/workflows/test-mixed-versions.yaml b/.github/workflows/test-mixed-versions.yaml index d287d8e437e..f79c4bce883 100644 --- a/.github/workflows/test-mixed-versions.yaml +++ b/.github/workflows/test-mixed-versions.yaml @@ -74,7 +74,7 @@ jobs: https://builds.hex.pm https://cdn.jsdelivr.net/hex - name: AUTHENTICATE TO GOOGLE CLOUD - uses: google-github-actions/auth@v2.1.5 + uses: google-github-actions/auth@v2.1.6 with: credentials_json: ${{ secrets.REMOTE_CACHE_CREDENTIALS_JSON }} - name: BUILD SECONDARY UMBRELLA ARCHIVE diff --git a/.github/workflows/test-plugin-mixed.yaml b/.github/workflows/test-plugin-mixed.yaml index 7d3b1dba628..edffefaaeea 100644 --- a/.github/workflows/test-plugin-mixed.yaml +++ b/.github/workflows/test-plugin-mixed.yaml @@ -51,7 +51,7 @@ jobs: https://builds.hex.pm https://cdn.jsdelivr.net/hex - name: AUTHENTICATE TO GOOGLE CLOUD - uses: google-github-actions/auth@v2.1.5 + uses: google-github-actions/auth@v2.1.6 with: credentials_json: ${{ secrets.REMOTE_CACHE_CREDENTIALS_JSON }} - name: CONFIGURE BAZEL diff --git a/.github/workflows/test-plugin.yaml b/.github/workflows/test-plugin.yaml index dc0a1f327db..3998013c03e 100644 --- a/.github/workflows/test-plugin.yaml +++ b/.github/workflows/test-plugin.yaml @@ -51,7 +51,7 @@ jobs: https://builds.hex.pm https://cdn.jsdelivr.net/hex - name: AUTHENTICATE TO GOOGLE CLOUD - uses: google-github-actions/auth@v2.1.5 + uses: google-github-actions/auth@v2.1.6 with: credentials_json: ${{ secrets.REMOTE_CACHE_CREDENTIALS_JSON }} - name: CONFIGURE BAZEL diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index b71b77fa2e5..d4b0802441c 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -51,7 +51,7 @@ jobs: run: | echo "value=bazel-repo-cache-${{ hashFiles('MODULE.bazel') }}" | tee -a $GITHUB_OUTPUT - name: AUTHENTICATE TO GOOGLE CLOUD - uses: google-github-actions/auth@v2.1.5 + uses: google-github-actions/auth@v2.1.6 with: credentials_json: ${{ secrets.REMOTE_CACHE_CREDENTIALS_JSON }} - name: REPO CACHE From a0681a4c5343514c106af0bbf2f7e94bcb5826bb Mon Sep 17 00:00:00 2001 From: Michael Davis Date: Mon, 30 Sep 2024 18:08:01 -0400 Subject: [PATCH 08/33] Represent `rabbit_binding:deletions()` with a map instead of dict The `dict:dict()` typing of `rabbit_binding` appears to be a historical artifact. `dict` has been superseded by `maps`. Switching to a map makes deletions easier to inspect manually and faster. Though if deletions grow so large that the map representation is important, manipulation of the deletions is unlikely to be expensive compared to any other operations that produced them, so performance is probably irrelevant. This commit refactors the bottom section of the `rabbit_binding` module to switch to a map, switch the `deletions()` type to an opaque, eliminating a TODO created when using Erlang/OTP 17.1, and the deletion value to a record. We eliminate some historical artifacts and "cruft": * Deletions taking multiple forms needlessly, specifically the shape `{X, deleted | not_deleted, Bindings, none}` no longer being handled. `process_deletions/2` was responsible for creating this shape. Instead we now use a record to clearly define the fields. * Clauses to catch `{error, not_found}` are unnecessary after minor refactors of the callers. Removing them makes the type specs cleaner. * `rabbit_binding:process_deletions/1` has no need to update or change the deletions. This function uses `maps:foreach/2` instead and returns `ok` instead of mapped deletions. * Remove `undefined` from the typespec of deletions. This value is no longer possible with a refactor to `maybe_auto_delete_exchange_in_*` functions for Mnesia and Khepri. The value was nonsensical since you cannot delete bindings for an exchange that does not exist. --- deps/rabbit/src/rabbit_amqqueue.erl | 16 +- deps/rabbit/src/rabbit_binding.erl | 235 +++++++++++++------ deps/rabbit/src/rabbit_db_binding.erl | 50 ++-- deps/rabbit/src/rabbit_db_exchange.erl | 8 +- deps/rabbit/src/rabbit_exchange.erl | 27 +-- deps/rabbit/test/rabbit_db_binding_SUITE.erl | 8 +- deps/rabbit/test/rabbit_db_queue_SUITE.erl | 4 +- 7 files changed, 217 insertions(+), 131 deletions(-) diff --git a/deps/rabbit/src/rabbit_amqqueue.erl b/deps/rabbit/src/rabbit_amqqueue.erl index 5f73f81c500..2ef86b0203d 100644 --- a/deps/rabbit/src/rabbit_amqqueue.erl +++ b/deps/rabbit/src/rabbit_amqqueue.erl @@ -1818,8 +1818,8 @@ internal_delete(Queue, ActingUser, Reason) -> {error, timeout} = Err -> Err; Deletions -> - _ = rabbit_binding:process_deletions(Deletions), - rabbit_binding:notify_deletions(Deletions, ?INTERNAL_USER), + ok = rabbit_binding:process_deletions(Deletions), + ok = rabbit_binding:notify_deletions(Deletions, ?INTERNAL_USER), rabbit_core_metrics:queue_deleted(QueueName), ok = rabbit_event:notify(queue_deleted, [{name, QueueName}, @@ -1942,14 +1942,14 @@ filter_transient_queues_to_delete(Node) -> end. notify_queue_binding_deletions(QueueDeletions) when is_list(QueueDeletions) -> - Deletions = rabbit_binding:process_deletions( - lists:foldl(fun rabbit_binding:combine_deletions/2, - rabbit_binding:new_deletions(), - QueueDeletions)), + Deletions = lists:foldl( + fun rabbit_binding:combine_deletions/2, + rabbit_binding:new_deletions(), QueueDeletions), + ok = rabbit_binding:process_deletions(Deletions), rabbit_binding:notify_deletions(Deletions, ?INTERNAL_USER); notify_queue_binding_deletions(QueueDeletions) -> - Deletions = rabbit_binding:process_deletions(QueueDeletions), - rabbit_binding:notify_deletions(Deletions, ?INTERNAL_USER). + ok = rabbit_binding:process_deletions(QueueDeletions), + rabbit_binding:notify_deletions(QueueDeletions, ?INTERNAL_USER). notify_transient_queues_deleted(QueueDeletions) -> lists:foreach( diff --git a/deps/rabbit/src/rabbit_binding.erl b/deps/rabbit/src/rabbit_binding.erl index cf7f79b51e6..bde550e2d0a 100644 --- a/deps/rabbit/src/rabbit_binding.erl +++ b/deps/rabbit/src/rabbit_binding.erl @@ -13,7 +13,7 @@ -export([list/1, list_for_source/1, list_for_destination/1, list_for_source_and_destination/2, list_for_source_and_destination/3, list_explicit/0]). --export([new_deletions/0, combine_deletions/2, add_deletion/3, +-export([new_deletions/0, combine_deletions/2, add_deletion/5, process_deletions/1, notify_deletions/2, group_bindings_fold/3]). -export([info_keys/0, info/1, info/2, info_all/1, info_all/2, info_all/4]). @@ -22,6 +22,9 @@ -export([reverse_route/1, index_route/1]). -export([binding_type/2]). +%% For testing only +-export([fetch_deletion/2]). + -define(DEFAULT_EXCHANGE(VHostPath), #resource{virtual_host = VHostPath, kind = exchange, name = <<>>}). @@ -50,9 +53,12 @@ rabbit_types:ok_or_error(rabbit_types:amqp_error())). -type bindings() :: [rabbit_types:binding()]. -%% TODO this should really be opaque but that seems to confuse 17.1's -%% dialyzer into objecting to everything that uses it. --type deletions() :: dict:dict(). +-record(deletion, {exchange :: rabbit_types:exchange(), + %% Whether the exchange was deleted. + deleted :: boolean(), + bindings :: sets:set(rabbit_types:binding())}). + +-opaque deletions() :: #{XName :: rabbit_exchange:name() => #deletion{}}. %%---------------------------------------------------------------------------- @@ -159,6 +165,19 @@ binding_type0(false, true) -> binding_type0(_, _) -> transient. +binding_checks(Binding, InnerFun) -> + fun(Src, Dst) -> + case rabbit_exchange:validate_binding(Src, Binding) of + ok -> + %% this argument is used to check queue exclusivity; + %% in general, we want to fail on that in preference to + %% anything else + InnerFun(Src, Dst); + Err -> + Err + end + end. + -spec remove(rabbit_types:binding(), rabbit_types:username()) -> bind_res(). remove(Binding, ActingUser) -> remove(Binding, fun (_Src, _Dst) -> ok end, ActingUser). @@ -360,57 +379,96 @@ index_route(#route{binding = #binding{source = Source, %% ---------------------------------------------------------------------------- %% Binding / exchange deletion abstraction API %% ---------------------------------------------------------------------------- - -anything_but( NotThis, NotThis, NotThis) -> NotThis; -anything_but( NotThis, NotThis, This) -> This; -anything_but( NotThis, This, NotThis) -> This; -anything_but(_NotThis, This, This) -> This. +%% +%% `deletions()' describe a set of removals of bindings and/or exchanges from +%% the metadata store. +%% +%% This deletion collection is used for two purposes: +%% +%%
    +%%
  • "Processing" of deletions. Processing here means that the +%% exchanges and bindings are passed into the {@link rabbit_exchange} +%% callbacks. When an exchange is deleted the `rabbit_exchange:delete/1' +%% callback is invoked and when the exchange is not deleted but some bindings +%% are deleted the `rabbit_exchange:remove_bindings/2' is invoked.
  • +%%
  • Notification of metadata deletion. Like other internal +%% notifications, {@link rabbit_binding:notify_deletions()} uses {@link +%% rabbit_event} to notify any interested consumers of a resource deletion. +%% An example consumer of {@link rabbit_event} is the `rabbitmq_event_exchange' +%% plugin which publishes these notifications as messages.
  • +%%
+%% +%% The point of collecting deletions into this opaque type is to be able to +%% collect all bindings deleted for a given exchange into a list. This allows +%% us to invoke the `rabbit_exchange:remove_bindings/2' callback with all +%% deleted bindings at once rather than passing each deleted binding +%% individually. -spec new_deletions() -> deletions(). -new_deletions() -> dict:new(). - --spec add_deletion - (rabbit_exchange:name(), - {'undefined' | rabbit_types:exchange(), - 'deleted' | 'not_deleted', - bindings()}, - deletions()) -> - deletions(). - -add_deletion(XName, Entry, Deletions) -> - dict:update(XName, fun (Entry1) -> merge_entry(Entry1, Entry) end, - Entry, Deletions). +new_deletions() -> #{}. + +-spec add_deletion(XName, X, XDeleted, Bindings, Deletions) -> Deletions1 + when + XName :: rabbit_exchange:name(), + X :: rabbit_types:exchange(), + XDeleted :: deleted | not_deleted, + Bindings :: bindings(), + Deletions :: deletions(), + Deletions1 :: deletions(). + +add_deletion(XName, X, WasDeleted, Bindings, Deletions) + when (WasDeleted =:= deleted orelse WasDeleted =:= not_deleted) andalso + is_list(Bindings) andalso is_map(Deletions) -> + WasDeleted1 = case WasDeleted of + deleted -> true; + not_deleted -> false + end, + Bindings1 = sets:from_list(Bindings, [{version, 2}]), + Deletion = #deletion{exchange = X, + deleted = WasDeleted1, + bindings = Bindings1}, + maps:update_with( + XName, + fun(Deletion1) -> + merge_deletion(Deletion1, Deletion) + end, Deletion, Deletions). -spec combine_deletions(deletions(), deletions()) -> deletions(). -combine_deletions(Deletions1, Deletions2) -> - dict:merge(fun (_XName, Entry1, Entry2) -> merge_entry(Entry1, Entry2) end, - Deletions1, Deletions2). - -merge_entry({X1, Deleted1, Bindings1}, {X2, Deleted2, Bindings2}) -> - {anything_but(undefined, X1, X2), - anything_but(not_deleted, Deleted1, Deleted2), - Bindings1 ++ Bindings2}; -merge_entry({X1, Deleted1, Bindings1, none}, {X2, Deleted2, Bindings2, none}) -> - {anything_but(undefined, X1, X2), - anything_but(not_deleted, Deleted1, Deleted2), - Bindings1 ++ Bindings2, none}. - -notify_deletions({error, not_found}, _) -> - ok; -notify_deletions(Deletions, ActingUser) -> - dict:fold(fun (XName, {_X, deleted, Bs, _}, ok) -> - notify_exchange_deletion(XName, ActingUser), - notify_bindings_deletion(Bs, ActingUser); - (_XName, {_X, not_deleted, Bs, _}, ok) -> - notify_bindings_deletion(Bs, ActingUser); - (XName, {_X, deleted, Bs}, ok) -> +combine_deletions(Deletions1, Deletions2) + when is_map(Deletions1) andalso is_map(Deletions2) -> + maps:merge_with( + fun (_XName, Deletion1, Deletion2) -> + merge_deletion(Deletion1, Deletion2) + end, Deletions1, Deletions2). + +merge_deletion( + #deletion{deleted = Deleted1, bindings = Bindings1}, + #deletion{exchange = X2, deleted = Deleted2, bindings = Bindings2}) -> + %% Assume that X2 is more up to date than X1. + X = X2, + Deleted = Deleted1 orelse Deleted2, + Bindings = sets:union(Bindings1, Bindings2), + #deletion{exchange = X, + deleted = Deleted, + bindings = Bindings}. + +-spec notify_deletions(Deletions, ActingUser) -> ok when + Deletions :: rabbit_binding:deletions(), + ActingUser :: rabbit_types:username(). + +notify_deletions(Deletions, ActingUser) when is_map(Deletions) -> + maps:foreach( + fun (XName, #deletion{deleted = XDeleted, bindings = Bindings}) -> + case XDeleted of + true -> notify_exchange_deletion(XName, ActingUser), - notify_bindings_deletion(Bs, ActingUser); - (_XName, {_X, not_deleted, Bs}, ok) -> - notify_bindings_deletion(Bs, ActingUser) - end, ok, Deletions). + notify_bindings_deletion(Bindings, ActingUser); + false -> + notify_bindings_deletion(Bindings, ActingUser) + end + end, Deletions). notify_exchange_deletion(XName, ActingUser) -> ok = rabbit_event:notify( @@ -418,35 +476,58 @@ notify_exchange_deletion(XName, ActingUser) -> [{name, XName}, {user_who_performed_action, ActingUser}]). -notify_bindings_deletion(Bs, ActingUser) -> - [rabbit_event:notify(binding_deleted, - info(B) ++ [{user_who_performed_action, ActingUser}]) - || B <- Bs], - ok. +notify_bindings_deletion(Bindings, ActingUser) -> + sets:fold( + fun(Binding, ok) -> + rabbit_event:notify( + binding_deleted, + info(Binding) ++ [{user_who_performed_action, ActingUser}]), + ok + end, ok, Bindings). --spec process_deletions(deletions()) -> deletions(). +-spec process_deletions(deletions()) -> ok. process_deletions(Deletions) -> - dict:map(fun (_XName, {X, deleted, Bindings}) -> - Bs = lists:flatten(Bindings), - Serial = rabbit_exchange:serial(X), - rabbit_exchange:callback(X, delete, Serial, [X]), - {X, deleted, Bs, none}; - (_XName, {X, not_deleted, Bindings}) -> - Bs = lists:flatten(Bindings), - Serial = rabbit_exchange:serial(X), - rabbit_exchange:callback(X, remove_bindings, Serial, [X, Bs]), - {X, not_deleted, Bs, none} - end, Deletions). - -binding_checks(Binding, InnerFun) -> - fun(Src, Dst) -> - case rabbit_exchange:validate_binding(Src, Binding) of - ok -> - %% this argument is used to check queue exclusivity; - %% in general, we want to fail on that in preference to - %% anything else - InnerFun(Src, Dst); - Err -> - Err - end + maps:foreach( + fun (_XName, #deletion{exchange = X, + deleted = XDeleted, + bindings = Bindings}) -> + Serial = rabbit_exchange:serial(X), + case XDeleted of + true -> + rabbit_exchange:callback(X, delete, Serial, [X]); + false -> + Bindings1 = sets:to_list(Bindings), + rabbit_exchange:callback( + X, remove_bindings, Serial, [X, Bindings1]) + end + end, Deletions). + +-spec fetch_deletion(XName, Deletions) -> Ret when + XName :: rabbit_exchange:name(), + Deletions :: deletions(), + Ret :: {X, WasDeleted, Bindings}, + X :: rabbit_types:exchange(), + WasDeleted :: deleted | not_deleted, + Bindings :: bindings(). +%% @doc Fetches the deletions for the given exchange name. +%% +%% This function is only intended for use in tests. +%% +%% @private + +fetch_deletion(XName, Deletions) -> + case maps:find(XName, Deletions) of + {ok, #deletion{exchange = X, + deleted = Deleted, + bindings = Bindings}} -> + WasDeleted = case Deleted of + true -> + deleted; + false -> + not_deleted + end, + Bindings1 = sets:to_list(Bindings), + {X, WasDeleted, Bindings1}; + error -> + error end. diff --git a/deps/rabbit/src/rabbit_db_binding.erl b/deps/rabbit/src/rabbit_db_binding.erl index 942b3a64811..9bb02277ca5 100644 --- a/deps/rabbit/src/rabbit_db_binding.erl +++ b/deps/rabbit/src/rabbit_db_binding.erl @@ -302,7 +302,10 @@ delete_in_mnesia(Src, Dst, B) -> should_index_table(Src), fun delete/3), Deletions0 = maybe_auto_delete_exchange_in_mnesia( B#binding.source, [B], rabbit_binding:new_deletions(), false), - fun() -> {ok, rabbit_binding:process_deletions(Deletions0)} end. + fun() -> + ok = rabbit_binding:process_deletions(Deletions0), + {ok, Deletions0} + end. absent_errs_only_in_mnesia(Names) -> Errs = [E || Name <- Names, @@ -352,7 +355,8 @@ delete_in_khepri(#binding{source = SrcName, {error, _} = Err -> Err; Deletions -> - {ok, rabbit_binding:process_deletions(Deletions)} + ok = rabbit_binding:process_deletions(Deletions), + {ok, Deletions} end. exists_in_khepri(Path, Binding) -> @@ -379,15 +383,18 @@ delete_in_khepri(Binding) -> end. maybe_auto_delete_exchange_in_khepri(XName, Bindings, Deletions, OnlyDurable) -> - {Entry, Deletions1} = - case rabbit_db_exchange:maybe_auto_delete_in_khepri(XName, OnlyDurable) of - {not_deleted, X} -> - {{X, not_deleted, Bindings}, Deletions}; - {deleted, X, Deletions2} -> - {{X, deleted, Bindings}, - rabbit_binding:combine_deletions(Deletions, Deletions2)} - end, - rabbit_binding:add_deletion(XName, Entry, Deletions1). + case rabbit_db_exchange:maybe_auto_delete_in_khepri(XName, OnlyDurable) of + {not_deleted, undefined} -> + Deletions; + {not_deleted, X} -> + rabbit_binding:add_deletion( + XName, X, not_deleted, Bindings, Deletions); + {deleted, X, Deletions1} -> + Deletions2 = rabbit_binding:combine_deletions( + Deletions, Deletions1), + rabbit_binding:add_deletion( + XName, X, deleted, Bindings, Deletions2) + end. %% ------------------------------------------------------------------- %% get_all(). @@ -1152,15 +1159,18 @@ sync_index_route(_, _, _) -> OnlyDurable :: boolean(), Ret :: rabbit_binding:deletions(). maybe_auto_delete_exchange_in_mnesia(XName, Bindings, Deletions, OnlyDurable) -> - {Entry, Deletions1} = - case rabbit_db_exchange:maybe_auto_delete_in_mnesia(XName, OnlyDurable) of - {not_deleted, X} -> - {{X, not_deleted, Bindings}, Deletions}; - {deleted, X, Deletions2} -> - {{X, deleted, Bindings}, - rabbit_binding:combine_deletions(Deletions, Deletions2)} - end, - rabbit_binding:add_deletion(XName, Entry, Deletions1). + case rabbit_db_exchange:maybe_auto_delete_in_mnesia(XName, OnlyDurable) of + {not_deleted, undefined} -> + Deletions; + {not_deleted, X} -> + rabbit_binding:add_deletion( + XName, X, not_deleted, Bindings, Deletions); + {deleted, X, Deletions1} -> + Deletions2 = rabbit_binding:combine_deletions( + Deletions, Deletions1), + rabbit_binding:add_deletion( + XName, X, deleted, Bindings, Deletions2) + end. %% Instead of locking entire table on remove operations we can lock the %% affected resource only. diff --git a/deps/rabbit/src/rabbit_db_exchange.erl b/deps/rabbit/src/rabbit_db_exchange.erl index f8c37a22428..ef6b9f3c61a 100644 --- a/deps/rabbit/src/rabbit_db_exchange.erl +++ b/deps/rabbit/src/rabbit_db_exchange.erl @@ -573,7 +573,7 @@ next_serial_in_khepri_tx(#exchange{name = XName}) -> IfUnused :: boolean(), Exchange :: rabbit_types:exchange(), Binding :: rabbit_types:binding(), - Deletions :: dict:dict(), + Deletions :: rabbit_binding:deletions(), Ret :: {deleted, Exchange, [Binding], Deletions} | {error, not_found} | {error, in_use} | @@ -624,7 +624,7 @@ unconditional_delete_in_mnesia(X, OnlyDurable) -> RemoveBindingsForSource :: boolean(), Exchange :: rabbit_types:exchange(), Binding :: rabbit_types:binding(), - Deletions :: dict:dict(), + Deletions :: rabbit_binding:deletions(), Ret :: {error, not_found} | {error, in_use} | {deleted, Exchange, [Binding], Deletions}. delete_in_mnesia(X = #exchange{name = XName}, OnlyDurable, RemoveBindingsForSource) -> ok = mnesia:delete({?MNESIA_TABLE, XName}), @@ -695,7 +695,7 @@ delete_all_in_mnesia_tx(VHostName) -> {deleted, #exchange{name = XName}, Bindings, XDeletions} = unconditional_delete_in_mnesia( X, false), XDeletions1 = rabbit_binding:add_deletion( - XName, {X, deleted, Bindings}, XDeletions), + XName, X, deleted, Bindings, XDeletions), rabbit_binding:combine_deletions(Acc, XDeletions1) end, rabbit_binding:new_deletions(), Xs), {ok, Deletions}. @@ -716,7 +716,7 @@ delete_all_in_khepri_tx(VHostName) -> rabbit_db_binding:delete_all_for_exchange_in_khepri( X, false, true), Deletions1 = rabbit_binding:add_deletion( - XName, {X, deleted, Bindings}, XDeletions), + XName, X, deleted, Bindings, XDeletions), rabbit_binding:combine_deletions(Deletions, Deletions1) end, rabbit_binding:new_deletions(), NodeProps), {ok, Deletions}. diff --git a/deps/rabbit/src/rabbit_exchange.erl b/deps/rabbit/src/rabbit_exchange.erl index b4037f9a807..391b6b8934e 100644 --- a/deps/rabbit/src/rabbit_exchange.erl +++ b/deps/rabbit/src/rabbit_exchange.erl @@ -470,13 +470,15 @@ delete(XName, IfUnused, Username) -> _ = rabbit_runtime_parameters:set(XName#resource.virtual_host, ?EXCHANGE_DELETE_IN_PROGRESS_COMPONENT, XName#resource.name, true, Username), - Deletions = process_deletions(rabbit_db_exchange:delete(XName, IfUnused)), - case Deletions of - {error, _} -> - Deletions; - _ -> - rabbit_binding:notify_deletions(Deletions, Username), - ok + case rabbit_db_exchange:delete(XName, IfUnused) of + {deleted, #exchange{name = XName} = X, Bs, Deletions} -> + Deletions1 = rabbit_binding:add_deletion( + XName, X, deleted, Bs, Deletions), + ok = rabbit_binding:process_deletions(Deletions1), + ok = rabbit_binding:notify_deletions(Deletions1, Username), + ok; + {error, _} = Err -> + Err end after rabbit_runtime_parameters:clear(XName#resource.virtual_host, @@ -491,17 +493,10 @@ delete(XName, IfUnused, Username) -> delete_all(VHostName, ActingUser) -> {ok, Deletions} = rabbit_db_exchange:delete_all(VHostName), - Deletions1 = rabbit_binding:process_deletions(Deletions), - rabbit_binding:notify_deletions(Deletions1, ActingUser), + ok = rabbit_binding:process_deletions(Deletions), + ok = rabbit_binding:notify_deletions(Deletions, ActingUser), ok. -process_deletions({error, _} = E) -> - E; -process_deletions({deleted, #exchange{name = XName} = X, Bs, Deletions}) -> - rabbit_binding:process_deletions( - rabbit_binding:add_deletion( - XName, {X, deleted, Bs}, Deletions)). - -spec ensure_deleted(ExchangeName, IfUnused, Username) -> Ret when ExchangeName :: name(), IfUnused :: boolean(), diff --git a/deps/rabbit/test/rabbit_db_binding_SUITE.erl b/deps/rabbit/test/rabbit_db_binding_SUITE.erl index 9055e4ff1dd..07eb0aea09d 100644 --- a/deps/rabbit/test/rabbit_db_binding_SUITE.erl +++ b/deps/rabbit/test/rabbit_db_binding_SUITE.erl @@ -131,8 +131,8 @@ delete1(_Config) -> Ret = rabbit_db_binding:delete(Binding, fun(_, _) -> ok end), ?assertMatch({ok, _}, Ret), {ok, Deletions} = Ret, - ?assertMatch({#exchange{}, not_deleted, [#binding{}], none}, - dict:fetch(XName1, Deletions)), + ?assertMatch({#exchange{}, not_deleted, [#binding{}]}, + rabbit_binding:fetch_deletion(XName1, Deletions)), ?assertEqual(false, rabbit_db_binding:exists(Binding)), passed. @@ -152,8 +152,8 @@ auto_delete1(_Config) -> Ret = rabbit_db_binding:delete(Binding, fun(_, _) -> ok end), ?assertMatch({ok, _}, Ret), {ok, Deletions} = Ret, - ?assertMatch({#exchange{}, deleted, [#binding{}], none}, - dict:fetch(XName1, Deletions)), + ?assertMatch({#exchange{}, not_deleted, [#binding{}]}, + rabbit_binding:fetch_deletion(XName1, Deletions)), ?assertEqual(false, rabbit_db_binding:exists(Binding)), passed. diff --git a/deps/rabbit/test/rabbit_db_queue_SUITE.erl b/deps/rabbit/test/rabbit_db_queue_SUITE.erl index f66e8fd236c..06ff1a4889d 100644 --- a/deps/rabbit/test/rabbit_db_queue_SUITE.erl +++ b/deps/rabbit/test/rabbit_db_queue_SUITE.erl @@ -292,8 +292,8 @@ delete1(_Config) -> ?assertEqual({ok, Q}, rabbit_db_queue:get(QName)), %% TODO Can we handle the deletions outside of rabbit_db_queue? Probably not because %% they should be done in a single transaction, but what a horrid API to have! - Dict = rabbit_db_queue:delete(QName, normal), - ?assertEqual(0, dict:size(Dict)), + Deletions = rabbit_db_queue:delete(QName, normal), + ?assertEqual(rabbit_binding:new_deletions(), Deletions), ?assertEqual(ok, rabbit_db_queue:delete(QName, normal)), ?assertEqual({error, not_found}, rabbit_db_queue:get(QName)), passed. From 21f1bdd7457594469dc95f73e665b5942993dfd1 Mon Sep 17 00:00:00 2001 From: Ayanda Dube Date: Tue, 1 Oct 2024 14:32:37 +0100 Subject: [PATCH 09/33] add support for the leader info item in classic queues --- deps/rabbit/src/rabbit_amqqueue_process.erl | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/deps/rabbit/src/rabbit_amqqueue_process.erl b/deps/rabbit/src/rabbit_amqqueue_process.erl index f1daf31f0a9..5e3e966ddcd 100644 --- a/deps/rabbit/src/rabbit_amqqueue_process.erl +++ b/deps/rabbit/src/rabbit_amqqueue_process.erl @@ -119,7 +119,8 @@ arguments, owner_pid, exclusive, - user_who_performed_action + user_who_performed_action, + leader ]). -define(INFO_KEYS, [pid | ?CREATION_EVENT_KEYS ++ ?STATISTICS_KEYS -- [name, type]]). @@ -1083,6 +1084,7 @@ i(auto_delete, #q{q = Q}) -> amqqueue:is_auto_delete(Q); i(arguments, #q{q = Q}) -> amqqueue:get_arguments(Q); i(pid, _) -> self(); +i(leader, State) -> node(i(pid, State)); i(owner_pid, #q{q = Q}) when ?amqqueue_exclusive_owner_is(Q, none) -> ''; i(owner_pid, #q{q = Q}) -> From 5ec83ade2780f6e91cdff06b408a6622ec343388 Mon Sep 17 00:00:00 2001 From: Ayanda Dube Date: Tue, 1 Oct 2024 14:39:36 +0100 Subject: [PATCH 10/33] test and assert new classic queue leader info item --- deps/rabbit/test/classic_queue_SUITE.erl | 1 + 1 file changed, 1 insertion(+) diff --git a/deps/rabbit/test/classic_queue_SUITE.erl b/deps/rabbit/test/classic_queue_SUITE.erl index 5b54d7150fb..e1e828124ff 100644 --- a/deps/rabbit/test/classic_queue_SUITE.erl +++ b/deps/rabbit/test/classic_queue_SUITE.erl @@ -83,6 +83,7 @@ leader_locator_client_local(Config) -> {<<"x-queue-leader-locator">>, longstr, <<"client-local">>}])), {ok, Leader0} = rabbit_ct_broker_helpers:rpc(Config, Server, rabbit_amqqueue, lookup, [rabbit_misc:r(<<"/">>, queue, Q)]), Leader = amqqueue:qnode(Leader0), + ?assertEqual([{leader, Leader}], rabbit_ct_broker_helpers:rpc(Config, Server, rabbit_amqqueue, info, [Leader0, [leader]])), ?assertEqual(Server, Leader), ?assertMatch(#'queue.delete_ok'{}, amqp_channel:call(Ch, #'queue.delete'{queue = Q})) From d91bb4313f5091911ca1dbec9e4d021334089b46 Mon Sep 17 00:00:00 2001 From: Ayanda Dube Date: Wed, 2 Oct 2024 13:34:48 +0100 Subject: [PATCH 11/33] support members info item in classic queues, which will always be the leader --- deps/rabbit/src/rabbit_amqqueue_process.erl | 4 +++- deps/rabbit/test/classic_queue_SUITE.erl | 3 ++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/deps/rabbit/src/rabbit_amqqueue_process.erl b/deps/rabbit/src/rabbit_amqqueue_process.erl index 5e3e966ddcd..63f886bd376 100644 --- a/deps/rabbit/src/rabbit_amqqueue_process.erl +++ b/deps/rabbit/src/rabbit_amqqueue_process.erl @@ -120,7 +120,8 @@ owner_pid, exclusive, user_who_performed_action, - leader + leader, + members ]). -define(INFO_KEYS, [pid | ?CREATION_EVENT_KEYS ++ ?STATISTICS_KEYS -- [name, type]]). @@ -1085,6 +1086,7 @@ i(arguments, #q{q = Q}) -> amqqueue:get_arguments(Q); i(pid, _) -> self(); i(leader, State) -> node(i(pid, State)); +i(members, State) -> [i(leader, State)]; i(owner_pid, #q{q = Q}) when ?amqqueue_exclusive_owner_is(Q, none) -> ''; i(owner_pid, #q{q = Q}) -> diff --git a/deps/rabbit/test/classic_queue_SUITE.erl b/deps/rabbit/test/classic_queue_SUITE.erl index e1e828124ff..1336c6bdbcd 100644 --- a/deps/rabbit/test/classic_queue_SUITE.erl +++ b/deps/rabbit/test/classic_queue_SUITE.erl @@ -83,7 +83,8 @@ leader_locator_client_local(Config) -> {<<"x-queue-leader-locator">>, longstr, <<"client-local">>}])), {ok, Leader0} = rabbit_ct_broker_helpers:rpc(Config, Server, rabbit_amqqueue, lookup, [rabbit_misc:r(<<"/">>, queue, Q)]), Leader = amqqueue:qnode(Leader0), - ?assertEqual([{leader, Leader}], rabbit_ct_broker_helpers:rpc(Config, Server, rabbit_amqqueue, info, [Leader0, [leader]])), + ?assertEqual([{leader, Leader}, {members, [Leader]}], + rabbit_ct_broker_helpers:rpc(Config, Server, rabbit_amqqueue, info, [Leader0, [leader, members]])), ?assertEqual(Server, Leader), ?assertMatch(#'queue.delete_ok'{}, amqp_channel:call(Ch, #'queue.delete'{queue = Q})) From 6eb2e7958f17209c7f83a51a7a98001ded3250a4 Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Wed, 2 Oct 2024 16:06:51 -0400 Subject: [PATCH 12/33] Actions: align google-github-actions/auth versions --- .github/workflows/templates/test-mixed-versions.template.yaml | 2 +- .github/workflows/templates/test.template.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/templates/test-mixed-versions.template.yaml b/.github/workflows/templates/test-mixed-versions.template.yaml index 94747911f97..02135223e45 100644 --- a/.github/workflows/templates/test-mixed-versions.template.yaml +++ b/.github/workflows/templates/test-mixed-versions.template.yaml @@ -96,7 +96,7 @@ jobs: https://builds.hex.pm https://cdn.jsdelivr.net/hex - name: AUTHENTICATE TO GOOGLE CLOUD - uses: google-github-actions/auth@v2.1.5 + uses: google-github-actions/auth@v2.1.6 with: credentials_json: ${{ secrets.REMOTE_CACHE_CREDENTIALS_JSON }} - name: BUILD SECONDARY UMBRELLA ARCHIVE diff --git a/.github/workflows/templates/test.template.yaml b/.github/workflows/templates/test.template.yaml index 4999f4ccc22..4f7234af328 100644 --- a/.github/workflows/templates/test.template.yaml +++ b/.github/workflows/templates/test.template.yaml @@ -73,7 +73,7 @@ jobs: run: | echo "value=bazel-repo-cache-${{ hashFiles('MODULE.bazel') }}" | tee -a $GITHUB_OUTPUT - name: AUTHENTICATE TO GOOGLE CLOUD - uses: google-github-actions/auth@v2.1.5 + uses: google-github-actions/auth@v2.1.6 with: credentials_json: ${{ secrets.REMOTE_CACHE_CREDENTIALS_JSON }} - name: REPO CACHE From afbb47272adc1d869bc6cc1d721885170b146781 Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Thu, 3 Oct 2024 01:12:39 -0400 Subject: [PATCH 13/33] This assertion does not belong to this leader-locator test --- deps/rabbit/test/classic_queue_SUITE.erl | 2 -- 1 file changed, 2 deletions(-) diff --git a/deps/rabbit/test/classic_queue_SUITE.erl b/deps/rabbit/test/classic_queue_SUITE.erl index 1336c6bdbcd..5b54d7150fb 100644 --- a/deps/rabbit/test/classic_queue_SUITE.erl +++ b/deps/rabbit/test/classic_queue_SUITE.erl @@ -83,8 +83,6 @@ leader_locator_client_local(Config) -> {<<"x-queue-leader-locator">>, longstr, <<"client-local">>}])), {ok, Leader0} = rabbit_ct_broker_helpers:rpc(Config, Server, rabbit_amqqueue, lookup, [rabbit_misc:r(<<"/">>, queue, Q)]), Leader = amqqueue:qnode(Leader0), - ?assertEqual([{leader, Leader}, {members, [Leader]}], - rabbit_ct_broker_helpers:rpc(Config, Server, rabbit_amqqueue, info, [Leader0, [leader, members]])), ?assertEqual(Server, Leader), ?assertMatch(#'queue.delete_ok'{}, amqp_channel:call(Ch, #'queue.delete'{queue = Q})) From da883b2048a2810e41264a390a8b22d205fa0946 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20P=C3=A9dron?= Date: Thu, 3 Oct 2024 12:49:04 +0200 Subject: [PATCH 14/33] rabbit_feature_flags: Lock registry once and enable many feature flags [Why] Before this change, the controller was looping on all feature flags to enable, then for each: 1. it checked if it was supported 2. it acquired the registry lock 3. it enabled the feature flag 4. it released the registry lock It was done this way to not acquire the log if the feature flag was unsupported in the first place. However, this put more load on the lock mechanism. [How] This commit changes the order. The controller acquires the registry lock once, then loops on feature flags to enable. The support check is now under the registry lock. --- deps/rabbit/src/rabbit_ff_controller.erl | 42 ++++++++++++------------ 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/deps/rabbit/src/rabbit_ff_controller.erl b/deps/rabbit/src/rabbit_ff_controller.erl index 13a2b4f5153..0fef2c57502 100644 --- a/deps/rabbit/src/rabbit_ff_controller.erl +++ b/deps/rabbit/src/rabbit_ff_controller.erl @@ -823,12 +823,29 @@ refresh_after_app_load_task() -> Ret :: ok | {error, Reason}, Reason :: term(). -enable_many(#{states_per_node := _} = Inventory, [FeatureName | Rest]) -> +enable_many(#{states_per_node := _} = Inventory, FeatureNames) -> + %% We acquire a lock before making any change to the registry. This is not + %% used by the controller (because it is already using a globally + %% registered name to prevent concurrent runs). But this is used in + %% `rabbit_feature_flags:is_enabled()' to block while the state is + %% `state_changing'. + rabbit_ff_registry_factory:acquire_state_change_lock(), + Ret = enable_many_locked(Inventory, FeatureNames), + rabbit_ff_registry_factory:release_state_change_lock(), + Ret. + +-spec enable_many_locked(Inventory, FeatureNames) -> Ret when + Inventory :: rabbit_feature_flags:cluster_inventory(), + FeatureNames :: [rabbit_feature_flags:feature_name()], + Ret :: ok | {error, Reason}, + Reason :: term(). + +enable_many_locked(#{states_per_node := _} = Inventory, [FeatureName | Rest]) -> case enable_if_supported(Inventory, FeatureName) of - {ok, Inventory1} -> enable_many(Inventory1, Rest); + {ok, Inventory1} -> enable_many_locked(Inventory1, Rest); Error -> Error end; -enable_many(_Inventory, []) -> +enable_many_locked(_Inventory, []) -> ok. -spec enable_if_supported(Inventory, FeatureName) -> Ret when @@ -845,7 +862,7 @@ enable_if_supported(#{states_per_node := _} = Inventory, FeatureName) -> "Feature flags: `~ts`: supported; continuing", [FeatureName], #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}), - lock_registry_and_enable(Inventory, FeatureName); + enable_with_registry_locked(Inventory, FeatureName); false -> ?LOG_DEBUG( "Feature flags: `~ts`: unsupported; aborting", @@ -854,23 +871,6 @@ enable_if_supported(#{states_per_node := _} = Inventory, FeatureName) -> {error, unsupported} end. --spec lock_registry_and_enable(Inventory, FeatureName) -> Ret when - Inventory :: rabbit_feature_flags:cluster_inventory(), - FeatureName :: rabbit_feature_flags:feature_name(), - Ret :: {ok, Inventory} | {error, Reason}, - Reason :: term(). - -lock_registry_and_enable(#{states_per_node := _} = Inventory, FeatureName) -> - %% We acquire a lock before making any change to the registry. This is not - %% used by the controller (because it is already using a globally - %% registered name to prevent concurrent runs). But this is used in - %% `rabbit_feature_flags:is_enabled()' to block while the state is - %% `state_changing'. - rabbit_ff_registry_factory:acquire_state_change_lock(), - Ret = enable_with_registry_locked(Inventory, FeatureName), - rabbit_ff_registry_factory:release_state_change_lock(), - Ret. - -spec enable_with_registry_locked(Inventory, FeatureName) -> Ret when Inventory :: rabbit_feature_flags:cluster_inventory(), FeatureName :: rabbit_feature_flags:feature_name(), From c7d7fccbe59df528fff14914fdfcfa3c333d2f40 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20P=C3=A9dron?= Date: Thu, 3 Oct 2024 12:54:24 +0200 Subject: [PATCH 15/33] rabbit_feature_flags: Fix copyright year The subsystem didn't exist before 2019. The deprecated features support was added in 2023. --- deps/rabbit/src/rabbit_depr_ff_extra.erl | 2 +- deps/rabbit/src/rabbit_deprecated_features.erl | 6 ++++-- deps/rabbit/src/rabbit_feature_flags.erl | 6 ++++-- deps/rabbit/src/rabbit_ff_controller.erl | 6 ++++-- deps/rabbit/src/rabbit_ff_extra.erl | 3 ++- deps/rabbit/src/rabbit_ff_registry.erl | 6 ++++-- deps/rabbit/src/rabbit_ff_registry_factory.erl | 3 ++- deps/rabbit/src/rabbit_ff_registry_wrapper.erl | 6 ++++-- 8 files changed, 25 insertions(+), 13 deletions(-) diff --git a/deps/rabbit/src/rabbit_depr_ff_extra.erl b/deps/rabbit/src/rabbit_depr_ff_extra.erl index 5267c3efbfb..2b499843316 100644 --- a/deps/rabbit/src/rabbit_depr_ff_extra.erl +++ b/deps/rabbit/src/rabbit_depr_ff_extra.erl @@ -2,7 +2,7 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2023 Broadcom. All Rights Reserved. The term “Broadcom” +%% Copyright (c) 2023-2024 Broadcom. All Rights Reserved. The term “Broadcom” %% refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @doc diff --git a/deps/rabbit/src/rabbit_deprecated_features.erl b/deps/rabbit/src/rabbit_deprecated_features.erl index 93289be033e..ffafec5757b 100644 --- a/deps/rabbit/src/rabbit_deprecated_features.erl +++ b/deps/rabbit/src/rabbit_deprecated_features.erl @@ -2,11 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% Copyright (c) 2023-2024 Broadcom. All Rights Reserved. The term “Broadcom” +%% refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @author The RabbitMQ team -%% @copyright 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% @copyright 2023-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. +%% and/or its subsidiaries. All rights reserved. %% %% @doc %% This module provides an API to manage deprecated features in RabbitMQ. It diff --git a/deps/rabbit/src/rabbit_feature_flags.erl b/deps/rabbit/src/rabbit_feature_flags.erl index 07883d080ff..3d2b19f8c7c 100644 --- a/deps/rabbit/src/rabbit_feature_flags.erl +++ b/deps/rabbit/src/rabbit_feature_flags.erl @@ -2,11 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% Copyright (c) 2019-2024 Broadcom. All Rights Reserved. The term “Broadcom” +%% refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @author The RabbitMQ team -%% @copyright 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% @copyright 2019-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. +%% and/or its subsidiaries. All rights reserved. %% %% @doc %% This module offers a framework to declare capabilities a RabbitMQ node diff --git a/deps/rabbit/src/rabbit_ff_controller.erl b/deps/rabbit/src/rabbit_ff_controller.erl index 0fef2c57502..c522e1cd6c1 100644 --- a/deps/rabbit/src/rabbit_ff_controller.erl +++ b/deps/rabbit/src/rabbit_ff_controller.erl @@ -2,11 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% Copyright (c) 2019-2024 Broadcom. All Rights Reserved. The term “Broadcom” +%% refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @author The RabbitMQ team -%% @copyright 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% @copyright 2019-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. +%% and/or its subsidiaries. All rights reserved. %% %% @doc %% The feature flag controller is responsible for synchronization and managing diff --git a/deps/rabbit/src/rabbit_ff_extra.erl b/deps/rabbit/src/rabbit_ff_extra.erl index 9eba7218593..0171c420085 100644 --- a/deps/rabbit/src/rabbit_ff_extra.erl +++ b/deps/rabbit/src/rabbit_ff_extra.erl @@ -2,7 +2,8 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% @copyright 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% @copyright 2019-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. +%% and/or its subsidiaries. All rights reserved. %% %% @doc %% This module provides extra functions unused by the feature flags diff --git a/deps/rabbit/src/rabbit_ff_registry.erl b/deps/rabbit/src/rabbit_ff_registry.erl index 864ff564dc6..eca99ebd9ec 100644 --- a/deps/rabbit/src/rabbit_ff_registry.erl +++ b/deps/rabbit/src/rabbit_ff_registry.erl @@ -2,11 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% Copyright (c) 2019-2024 Broadcom. All Rights Reserved. The term “Broadcom” +%% refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @author The RabbitMQ team -%% @copyright 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% @copyright 2019-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. +%% and/or its subsidiaries. All rights reserved. %% %% @doc %% This module exposes the API of the {@link rabbit_feature_flags} diff --git a/deps/rabbit/src/rabbit_ff_registry_factory.erl b/deps/rabbit/src/rabbit_ff_registry_factory.erl index 0d91a7b6495..68d81be6cf4 100644 --- a/deps/rabbit/src/rabbit_ff_registry_factory.erl +++ b/deps/rabbit/src/rabbit_ff_registry_factory.erl @@ -2,7 +2,8 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% Copyright (c) 2019-2024 Broadcom. All Rights Reserved. The term “Broadcom” +%% refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% -module(rabbit_ff_registry_factory). diff --git a/deps/rabbit/src/rabbit_ff_registry_wrapper.erl b/deps/rabbit/src/rabbit_ff_registry_wrapper.erl index beef32f657c..a5f63eb64de 100644 --- a/deps/rabbit/src/rabbit_ff_registry_wrapper.erl +++ b/deps/rabbit/src/rabbit_ff_registry_wrapper.erl @@ -2,11 +2,13 @@ %% License, v. 2.0. If a copy of the MPL was not distributed with this %% file, You can obtain one at https://mozilla.org/MPL/2.0/. %% -%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% Copyright (c) 2019-2024 Broadcom. All Rights Reserved. The term “Broadcom” +%% refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% %% @author The RabbitMQ team -%% @copyright 2007-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% @copyright 2019-2024 Broadcom. The term “Broadcom” refers to Broadcom Inc. +%% and/or its subsidiaries. All rights reserved. %% %% @doc %% This module sits in front of {@link rabbit_ff_registry}. From 4f9dcf84193c7d9d42afda2fe84f8f2f7ba73fc0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20P=C3=A9dron?= Date: Thu, 3 Oct 2024 16:54:55 +0200 Subject: [PATCH 16/33] rabbit_feature_flags: Hide required feature flags from the registry init logged list [Why] Showing that required feature flags are enabled over and over is not useful and only adds noise to the logs. [How] Required feature flags and removed deprecated features are not lists explicitly. We just log their respective numbers to still be clear that they exist. Before: list of feature flags found: [x] classic_mirrored_queue_version [x] classic_queue_type_delivery_support [x] direct_exchange_routing_v2 [x] feature_flags_v2 [x] implicit_default_bindings [ ] khepri_db [x] message_containers_deaths_v2 [x] quorum_queue_non_voters [~] rabbit_exchange_type_local_random [x] rabbitmq_4.0.0 ... list of deprecated features found: [ ] amqp_address_v1 [x] classic_queue_mirroring [ ] global_qos [ ] queue_master_locator [ ] ram_node_type [ ] transient_nonexcl_queues After: list of feature flags found: [ ] khepri_db [x] message_containers_deaths_v2 [x] quorum_queue_non_voters [~] rabbit_exchange_type_local_random [x] rabbitmq_4.0.0 list of deprecated features found: [ ] amqp_address_v1 [ ] global_qos [ ] queue_master_locator [ ] ram_node_type [ ] transient_nonexcl_queues required feature flags not listed above: 18 removed deprecated features not listed above: 1 --- .../rabbit/src/rabbit_ff_registry_factory.erl | 91 ++++++++++++------- 1 file changed, 60 insertions(+), 31 deletions(-) diff --git a/deps/rabbit/src/rabbit_ff_registry_factory.erl b/deps/rabbit/src/rabbit_ff_registry_factory.erl index 68d81be6cf4..a0197171efa 100644 --- a/deps/rabbit/src/rabbit_ff_registry_factory.erl +++ b/deps/rabbit/src/rabbit_ff_registry_factory.erl @@ -443,37 +443,66 @@ do_initialize_registry(#{feature_flags := AllFeatureFlags, written_to_disk := WrittenToDisk} = Inventory) -> %% We log the state of those feature flags. ?LOG_DEBUG( - lists:flatten( - "Feature flags: list of feature flags found:\n" ++ - [io_lib:format( - "Feature flags: [~ts] ~ts~n", - [case maps:get(FeatureName, FeatureStates, false) of - true -> "x"; - state_changing -> "~"; - false -> " " - end, - FeatureName]) - || FeatureName <- lists:sort(maps:keys(AllFeatureFlags)), - ?IS_FEATURE_FLAG(maps:get(FeatureName, AllFeatureFlags))] ++ - "Feature flags: list of deprecated features found:\n" ++ - [io_lib:format( - "Feature flags: [~ts] ~ts~n", - [case maps:get(FeatureName, FeatureStates, false) of - true -> "x"; - state_changing -> "~"; - false -> " " - end, - FeatureName]) - || FeatureName <- lists:sort(maps:keys(AllFeatureFlags)), - ?IS_DEPRECATION(maps:get(FeatureName, AllFeatureFlags))] ++ - [io_lib:format( - "Feature flags: scanned applications: ~tp~n" - "Feature flags: feature flag states written to disk: ~ts", - [ScannedApps, - case WrittenToDisk of - true -> "yes"; - false -> "no" - end])]), + begin + AllFeatureNames = lists:sort(maps:keys(AllFeatureFlags)), + {FeatureNames, + DeprFeatureNames} = lists:partition( + fun(FeatureName) -> + FeatureProps = maps:get( + FeatureName, + AllFeatureFlags), + ?IS_FEATURE_FLAG(FeatureProps) + end, AllFeatureNames), + + IsRequired = fun(FeatureName) -> + FeatureProps = maps:get( + FeatureName, + AllFeatureFlags), + required =:= + rabbit_feature_flags:get_stability( + FeatureProps) + end, + {ReqFeatureNames, + NonReqFeatureNames} = lists:partition(IsRequired, FeatureNames), + {ReqDeprFeatureNames, + NonReqDeprFeatureNames} = lists:partition( + IsRequired, DeprFeatureNames), + + lists:flatten( + "Feature flags: list of feature flags found:\n" ++ + [io_lib:format( + "Feature flags: [~ts] ~ts~n", + [case maps:get(FeatureName, FeatureStates, false) of + true -> "x"; + state_changing -> "~"; + false -> " " + end, + FeatureName]) + || FeatureName <- NonReqFeatureNames] ++ + "Feature flags: list of deprecated features found:\n" ++ + [io_lib:format( + "Feature flags: [~ts] ~ts~n", + [case maps:get(FeatureName, FeatureStates, false) of + true -> "x"; + state_changing -> "~"; + false -> " " + end, + FeatureName]) + || FeatureName <- NonReqDeprFeatureNames] ++ + [io_lib:format( + "Feature flags: required feature flags not listed above: ~b~n" + "Feature flags: removed deprecated features not listed " + "above: ~b~n" + "Feature flags: scanned applications: ~0tp~n" + "Feature flags: feature flag states written to disk: ~ts", + [length(ReqFeatureNames), + length(ReqDeprFeatureNames), + ScannedApps, + case WrittenToDisk of + true -> "yes"; + false -> "no" + end])]) + end, #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS} ), From ff210aa0c2599744826aceea684c5f97c24014d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20P=C3=A9dron?= Date: Thu, 3 Oct 2024 16:58:36 +0200 Subject: [PATCH 17/33] rabbit_feature_flags: Log a inventory matrix instead of dumping the map [Why] The inventory map is huge and difficult to read when it is logged as is. [How] Logging a matrix is much more compact and to the point. Before: Feature flags: inventory of node `rabbit-1@giotto`: #{feature_flags => #{rabbit_exchange_type_local_random => #{name => rabbit_exchange_type_local_random, desc => "Local random exchange",stability => stable, provided_by => rabbit}, message_containers_deaths_v2 => #{name => message_containers_deaths_v2, desc => "Bug fix for dead letter cycle detection", ... After: Feature flags: inventory queried from node `rabbit-2@giotto`: ,-- rabbit-2@giotto | amqp_address_v1: classic_mirrored_queue_version: x classic_queue_mirroring: x classic_queue_type_delivery_support: x ... --- deps/rabbit/src/rabbit_ff_controller.erl | 64 ++++++++++++++++++++---- 1 file changed, 55 insertions(+), 9 deletions(-) diff --git a/deps/rabbit/src/rabbit_ff_controller.erl b/deps/rabbit/src/rabbit_ff_controller.erl index c522e1cd6c1..f18d30cbddc 100644 --- a/deps/rabbit/src/rabbit_ff_controller.erl +++ b/deps/rabbit/src/rabbit_ff_controller.erl @@ -440,17 +440,10 @@ check_node_compatibility_task1(NodeA, NodesA, NodeB, NodesB, NodeAAsVirigin) {ok, InventoryA0} -> InventoryA = virtually_reset_inventory( InventoryA0, NodeAAsVirigin), - ?LOG_DEBUG( - "Feature flags: inventory of node `~ts`:~n~tp", - [NodeA, InventoryA], - #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}), + log_inventory(NodeA, InventoryA), case collect_inventory_on_nodes(NodesB) of {ok, InventoryB} -> - ?LOG_DEBUG( - "Feature flags: inventory of node " - "`~ts`:~n~tp", - [NodeB, InventoryB], - #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}), + log_inventory(NodeB, InventoryB), case are_compatible(InventoryA, InventoryB) of true -> ?LOG_NOTICE( @@ -485,6 +478,59 @@ check_node_compatibility_task1(NodeA, NodesA, NodeB, NodesB, NodeAAsVirigin) {error, {aborted_feature_flags_compat_check, Error}} end. +log_inventory( + FromNode, + #{feature_flags := FeatureFlags, states_per_node := StatesPerNode}) -> + ?LOG_DEBUG( + begin + AllFeatureNames = lists:sort(maps:keys(FeatureFlags)), + Nodes = lists:sort(maps:keys(StatesPerNode)), + LongestFeatureName = lists:foldl( + fun(FeatureName, MaxLength) -> + Length = length( + atom_to_list( + FeatureName)), + if + Length > MaxLength -> Length; + true -> MaxLength + end + end, 0, AllFeatureNames), + NodeInitialPrefix = lists:duplicate(LongestFeatureName + 1, $\s), + {Header, + HeaderTail} = lists:foldl( + fun(Node, {String, Prefix}) -> + String1 = io_lib:format( + "~ts~ts ,-- ~ts~n", + [String, Prefix, Node]), + NextPrefix = Prefix ++ " |", + {String1, NextPrefix} + end, {"", NodeInitialPrefix}, Nodes), + lists:flatten( + io_lib:format( + "Feature flags: inventory queried from node `~ts`:~n", + [FromNode]) ++ + Header ++ + HeaderTail ++ + [io_lib:format("~n~*ts:", [LongestFeatureName, FeatureName]) ++ + [io_lib:format( + " ~s", + [begin + State = maps:get( + FeatureName, + maps:get(Node, StatesPerNode), + false), + case State of + true -> "x"; + state_changing -> "~"; + false -> " " + end + end]) + || Node <- Nodes] + || FeatureName <- AllFeatureNames] ++ + []) + end, + #{domain_ => ?RMQLOG_DOMAIN_FEAT_FLAGS}). + -spec list_nodes_clustered_with(Node) -> Ret when Node :: node(), Ret :: Members | Error, From 588c62cd7e87c5b69fba2b00032b6d2f6c872e39 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20P=C3=A9dron?= Date: Thu, 3 Oct 2024 17:03:04 +0200 Subject: [PATCH 18/33] rabbit_feature_flags: Fix style --- deps/rabbit/src/rabbit_feature_flags.erl | 2 +- deps/rabbit/src/rabbit_ff_controller.erl | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/deps/rabbit/src/rabbit_feature_flags.erl b/deps/rabbit/src/rabbit_feature_flags.erl index 3d2b19f8c7c..12fc1b7b939 100644 --- a/deps/rabbit/src/rabbit_feature_flags.erl +++ b/deps/rabbit/src/rabbit_feature_flags.erl @@ -744,7 +744,7 @@ get_stability(FeatureName) when is_atom(FeatureName) -> undefined -> undefined; FeatureProps -> get_stability(FeatureProps) end; -get_stability(FeatureProps) when ?IS_FEATURE_FLAG(FeatureProps) -> +get_stability(FeatureProps) when ?IS_FEATURE_FLAG(FeatureProps) -> maps:get(stability, FeatureProps, stable); get_stability(FeatureProps) when ?IS_DEPRECATION(FeatureProps) -> Phase = rabbit_deprecated_features:get_phase(FeatureProps), diff --git a/deps/rabbit/src/rabbit_ff_controller.erl b/deps/rabbit/src/rabbit_ff_controller.erl index f18d30cbddc..822f38b01e9 100644 --- a/deps/rabbit/src/rabbit_ff_controller.erl +++ b/deps/rabbit/src/rabbit_ff_controller.erl @@ -888,12 +888,14 @@ enable_many(#{states_per_node := _} = Inventory, FeatureNames) -> Ret :: ok | {error, Reason}, Reason :: term(). -enable_many_locked(#{states_per_node := _} = Inventory, [FeatureName | Rest]) -> +enable_many_locked( + #{states_per_node := _} = Inventory, [FeatureName | Rest]) -> case enable_if_supported(Inventory, FeatureName) of {ok, Inventory1} -> enable_many_locked(Inventory1, Rest); Error -> Error end; -enable_many_locked(_Inventory, []) -> +enable_many_locked( + _Inventory, []) -> ok. -spec enable_if_supported(Inventory, FeatureName) -> Ret when From 282b68cf2881bfc6a17770b9900959052dc925ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20P=C3=A9dron?= Date: Fri, 4 Oct 2024 15:46:00 +0200 Subject: [PATCH 19/33] rabbit_feature_flags: Hide required feature flags in management UI [Why] They just add noise to the UI and there is nothing the user can do about them at that point. Given their number will only increase, let's hide them to let the user focus on the feature flags they can act on. --- .../priv/www/js/tmpl/feature-flags.ejs | 23 +++++++++++-------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/feature-flags.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/feature-flags.ejs index a2ed48ad457..070acdb3942 100644 --- a/deps/rabbitmq_management/priv/www/js/tmpl/feature-flags.ejs +++ b/deps/rabbitmq_management/priv/www/js/tmpl/feature-flags.ejs @@ -30,9 +30,14 @@ <% for (var i = 0; i < feature_flags.length; i++) { var feature_flag = feature_flags[i]; - if (feature_flag.stability == "experimental") { - continue; - } + if (feature_flag.stability == "required") { + /* Hide required feature flags. There is nothing the user can do + * about them and they just add noise to the UI. */ + continue; + } + if (feature_flag.stability == "experimental") { + continue; + } var state_color = "grey"; if (feature_flag.state == "enabled") { state_color = "green"; @@ -103,9 +108,9 @@ These flags can be enabled in production deployments after an appropriate amount <% for (var i = 0; i < feature_flags.length; i++) { var feature_flag = feature_flags[i]; - if (feature_flag.stability != "experimental") { - continue; - } + if (feature_flag.stability != "experimental") { + continue; + } var state_color = "grey"; if (feature_flag.state == "enabled") { state_color = "green"; @@ -119,14 +124,14 @@ These flags can be enabled in production deployments after an appropriate amount <%= fmt_string(feature_flag.name) %> <% if (feature_flag.state == "disabled") { %> -
+

-
+
-
+
<% } else { %> Date: Fri, 4 Oct 2024 18:08:08 +0000 Subject: [PATCH 20/33] build(deps-dev): bump junit.jupiter.version Bumps `junit.jupiter.version` from 5.11.1 to 5.11.2. Updates `org.junit.jupiter:junit-jupiter-engine` from 5.11.1 to 5.11.2 - [Release notes](https://github.com/junit-team/junit5/releases) - [Commits](https://github.com/junit-team/junit5/compare/r5.11.1...r5.11.2) Updates `org.junit.jupiter:junit-jupiter-params` from 5.11.1 to 5.11.2 - [Release notes](https://github.com/junit-team/junit5/releases) - [Commits](https://github.com/junit-team/junit5/compare/r5.11.1...r5.11.2) --- updated-dependencies: - dependency-name: org.junit.jupiter:junit-jupiter-engine dependency-type: direct:development update-type: version-update:semver-patch - dependency-name: org.junit.jupiter:junit-jupiter-params dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- deps/rabbitmq_stream_management/test/http_SUITE_data/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deps/rabbitmq_stream_management/test/http_SUITE_data/pom.xml b/deps/rabbitmq_stream_management/test/http_SUITE_data/pom.xml index b67c0041933..ae3801d4706 100644 --- a/deps/rabbitmq_stream_management/test/http_SUITE_data/pom.xml +++ b/deps/rabbitmq_stream_management/test/http_SUITE_data/pom.xml @@ -27,7 +27,7 @@ [0.12.0-SNAPSHOT,) - 5.11.1 + 5.11.2 3.26.3 1.2.13 3.12.1 From fd21435e9ef5f6ddd367e86654c5ba72401e6fa5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 4 Oct 2024 18:10:26 +0000 Subject: [PATCH 21/33] build(deps-dev): bump junit.jupiter.version Bumps `junit.jupiter.version` from 5.11.1 to 5.11.2. Updates `org.junit.jupiter:junit-jupiter-engine` from 5.11.1 to 5.11.2 - [Release notes](https://github.com/junit-team/junit5/releases) - [Commits](https://github.com/junit-team/junit5/compare/r5.11.1...r5.11.2) Updates `org.junit.jupiter:junit-jupiter-params` from 5.11.1 to 5.11.2 - [Release notes](https://github.com/junit-team/junit5/releases) - [Commits](https://github.com/junit-team/junit5/compare/r5.11.1...r5.11.2) --- updated-dependencies: - dependency-name: org.junit.jupiter:junit-jupiter-engine dependency-type: direct:development update-type: version-update:semver-patch - dependency-name: org.junit.jupiter:junit-jupiter-params dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/pom.xml b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/pom.xml index 8b2eb333c78..05a4277063b 100644 --- a/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/pom.xml +++ b/deps/rabbitmq_stream/test/rabbit_stream_SUITE_data/pom.xml @@ -27,7 +27,7 @@ [0.12.0-SNAPSHOT,) - 5.11.1 + 5.11.2 3.26.3 1.2.13 3.12.1 From b9f3378712a31da16c47212fa1111673a01dadf7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 4 Oct 2024 18:13:14 +0000 Subject: [PATCH 22/33] build(deps-dev): bump org.junit.jupiter:junit-jupiter-params Bumps [org.junit.jupiter:junit-jupiter-params](https://github.com/junit-team/junit5) from 5.11.1 to 5.11.2. - [Release notes](https://github.com/junit-team/junit5/releases) - [Commits](https://github.com/junit-team/junit5/compare/r5.11.1...r5.11.2) --- updated-dependencies: - dependency-name: org.junit.jupiter:junit-jupiter-params dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .../examples/rabbitmq_auth_backend_spring_boot/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/pom.xml b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/pom.xml index fd64bfacc31..82d8f5801d9 100644 --- a/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/pom.xml +++ b/deps/rabbitmq_auth_backend_http/examples/rabbitmq_auth_backend_spring_boot/pom.xml @@ -35,7 +35,7 @@ 17 17 - 5.11.1 + 5.11.2 com.rabbitmq.examples From 178addbc162f88d69c02e2715b928afae6ad48bf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 4 Oct 2024 18:32:14 +0000 Subject: [PATCH 23/33] build(deps-dev): bump org.junit.jupiter:junit-jupiter Bumps [org.junit.jupiter:junit-jupiter](https://github.com/junit-team/junit5) from 5.11.1 to 5.11.2. - [Release notes](https://github.com/junit-team/junit5/releases) - [Commits](https://github.com/junit-team/junit5/compare/r5.11.1...r5.11.2) --- updated-dependencies: - dependency-name: org.junit.jupiter:junit-jupiter dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- deps/rabbitmq_mqtt/test/java_SUITE_data/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deps/rabbitmq_mqtt/test/java_SUITE_data/pom.xml b/deps/rabbitmq_mqtt/test/java_SUITE_data/pom.xml index 450edf13d40..cf1f1bac5dd 100644 --- a/deps/rabbitmq_mqtt/test/java_SUITE_data/pom.xml +++ b/deps/rabbitmq_mqtt/test/java_SUITE_data/pom.xml @@ -16,7 +16,7 @@ [1.2.5,) [1.2.5,) 5.22.0 - 5.11.1 + 5.11.2 3.26.3 1.2.13 3.5.0 From f8ef332158a0704b46e4e095e3ad77a2467b9b8f Mon Sep 17 00:00:00 2001 From: Johan Rhodin Date: Fri, 4 Oct 2024 14:38:26 -0500 Subject: [PATCH 24/33] Remove mention of global prefetch --- deps/rabbitmq_management/priv/www/js/global.js | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/deps/rabbitmq_management/priv/www/js/global.js b/deps/rabbitmq_management/priv/www/js/global.js index 295e36454ff..42d7a8f34e2 100644 --- a/deps/rabbitmq_management/priv/www/js/global.js +++ b/deps/rabbitmq_management/priv/www/js/global.js @@ -305,15 +305,12 @@ var HELP = { ', 'channel-prefetch': - 'Channel prefetch counts. \ + 'Channel prefetch count.\

\ - Each channel can have two prefetch counts: A per-consumer count, which \ - will limit each new consumer created on the channel, and a global \ - count, which is shared between all consumers on the channel.\ + Each channel can have a prefetch count. The prefetch is the number of messages that will be held \ + by the client. Setting a value of 0 will result in an unlimited prefetch. \

\ -

\ - This column shows one, the other, or both limits if they are set. \ -

', + ', 'file-descriptors': '

File descriptor count and limit, as reported by the operating \ From 5c4814a09f93492e03cb6b0e36ce0fa9bf8e1c47 Mon Sep 17 00:00:00 2001 From: Michael Klishin Date: Fri, 4 Oct 2024 20:47:37 -0400 Subject: [PATCH 25/33] Remove multiple mentions of global prefetch As suggested by @johanrhodin in #12454. This keeps the Prometheus plugin part but marks it as deprecated. We can remove it in 4.1. --- deps/rabbit/docs/rabbitmqctl.8 | 2 -- deps/rabbit/src/rabbit_channel.erl | 3 ++- .../lib/rabbitmq/cli/ctl/commands/list_channels_command.ex | 2 +- deps/rabbitmq_management/priv/www/js/tmpl/channel.ejs | 4 ---- deps/rabbitmq_management/priv/www/js/tmpl/channels-list.ejs | 3 --- .../collectors/prometheus_rabbitmq_core_metrics_collector.erl | 2 +- 6 files changed, 4 insertions(+), 12 deletions(-) diff --git a/deps/rabbit/docs/rabbitmqctl.8 b/deps/rabbit/docs/rabbitmqctl.8 index 063f92c1690..fd7b5f31ef6 100644 --- a/deps/rabbit/docs/rabbitmqctl.8 +++ b/deps/rabbit/docs/rabbitmqctl.8 @@ -942,8 +942,6 @@ The number of not yet confirmed published messages. On channels not in confirm mode, this remains 0. .It Cm prefetch_count QoS prefetch limit for new consumers, 0 if unlimited. -.It Cm global_prefetch_count -QoS prefetch limit for the entire channel, 0 if unlimited. .El .Pp If no diff --git a/deps/rabbit/src/rabbit_channel.erl b/deps/rabbit/src/rabbit_channel.erl index 4be86370c39..7eee4f0c81d 100644 --- a/deps/rabbit/src/rabbit_channel.erl +++ b/deps/rabbit/src/rabbit_channel.erl @@ -184,7 +184,6 @@ acks_uncommitted, pending_raft_commands, prefetch_count, - global_prefetch_count, state, garbage_collection]). @@ -2272,6 +2271,8 @@ i(pending_raft_commands, #ch{queue_states = QS}) -> i(state, #ch{cfg = #conf{state = running}}) -> credit_flow:state(); i(state, #ch{cfg = #conf{state = State}}) -> State; i(prefetch_count, #ch{cfg = #conf{consumer_prefetch = C}}) -> C; +%% Retained for backwards compatibility e.g. in mixed version clusters, +%% can be removed starting with 4.2. MK. i(global_prefetch_count, #ch{limiter = Limiter}) -> rabbit_limiter:get_prefetch_limit(Limiter); i(interceptors, #ch{interceptor_state = IState}) -> diff --git a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_channels_command.ex b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_channels_command.ex index 37c38955e07..a20496b5c1c 100644 --- a/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_channels_command.ex +++ b/deps/rabbitmq_cli/lib/rabbitmq/cli/ctl/commands/list_channels_command.ex @@ -16,7 +16,7 @@ defmodule RabbitMQ.CLI.Ctl.Commands.ListChannelsCommand do @info_keys ~w(pid connection name number user vhost transactional confirm consumer_count messages_unacknowledged messages_uncommitted acks_uncommitted messages_unconfirmed - prefetch_count global_prefetch_count)a + prefetch_count)a def info_keys(), do: @info_keys diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/channel.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/channel.ejs index 0eae88802fe..cf37d225a67 100644 --- a/deps/rabbitmq_management/priv/www/js/tmpl/channel.ejs +++ b/deps/rabbitmq_management/priv/www/js/tmpl/channel.ejs @@ -38,10 +38,6 @@ Prefetch count <%= channel.prefetch_count %> - - Global prefetch count - <%= channel.global_prefetch_count %> - diff --git a/deps/rabbitmq_management/priv/www/js/tmpl/channels-list.ejs b/deps/rabbitmq_management/priv/www/js/tmpl/channels-list.ejs index 8acf57ab4de..ef6c543bbaf 100644 --- a/deps/rabbitmq_management/priv/www/js/tmpl/channels-list.ejs +++ b/deps/rabbitmq_management/priv/www/js/tmpl/channels-list.ejs @@ -157,9 +157,6 @@ <% if (channel.prefetch_count != 0) { %> <%= channel.prefetch_count %>
<% } %> - <% if (channel.global_prefetch_count != 0) { %> - <%= channel.global_prefetch_count %> (global) - <% } %> <% } %> <% if (show_column('channels', 'msgs-unacked')) { %> diff --git a/deps/rabbitmq_prometheus/src/collectors/prometheus_rabbitmq_core_metrics_collector.erl b/deps/rabbitmq_prometheus/src/collectors/prometheus_rabbitmq_core_metrics_collector.erl index ac2a6438398..c6dfd43e2ff 100644 --- a/deps/rabbitmq_prometheus/src/collectors/prometheus_rabbitmq_core_metrics_collector.erl +++ b/deps/rabbitmq_prometheus/src/collectors/prometheus_rabbitmq_core_metrics_collector.erl @@ -170,7 +170,7 @@ {2, undefined, channel_messages_uncommitted, gauge, "Messages received in a transaction but not yet committed", messages_uncommitted}, {2, undefined, channel_acks_uncommitted, gauge, "Message acknowledgements in a transaction not yet committed", acks_uncommitted}, {2, undefined, consumer_prefetch, gauge, "Limit of unacknowledged messages for each consumer", prefetch_count}, - {2, undefined, channel_prefetch, gauge, "Total limit of unacknowledged messages for all consumers on a channel", global_prefetch_count} + {2, undefined, channel_prefetch, gauge, "Deprecated and will be removed in a future version", global_prefetch_count} ]}, {channel_exchange_metrics, [ From 9c905668eccc92b60079179bcfcce5a2fd7ad280 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20G=C3=B6m=C3=B6ri?= Date: Mon, 30 Sep 2024 01:01:52 +0200 Subject: [PATCH 26/33] Don't start invalid but enabled plugins at startup For example during the startup after RabbitMQ was upgraded but an enabled community plugin wasn't, and the plugin's broker version requirement isn't met any more, RabbitMQ still started the plugin after logging an error. --- deps/rabbit/src/rabbit_plugins.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deps/rabbit/src/rabbit_plugins.erl b/deps/rabbit/src/rabbit_plugins.erl index 959a1a6de7c..228c04bc208 100644 --- a/deps/rabbit/src/rabbit_plugins.erl +++ b/deps/rabbit/src/rabbit_plugins.erl @@ -265,7 +265,7 @@ prepare_plugins(Enabled) -> [ExpandDir, E2]}}) end, [prepare_plugin(Plugin, ExpandDir) || Plugin <- ValidPlugins], - Wanted. + [P#plugin.name || P <- ValidPlugins]. maybe_warn_about_invalid_plugins([]) -> ok; From 06968c1b156c8c5c584fc2ac13f1f3cf31e32c0c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Hoguin?= Date: Wed, 2 Oct 2024 15:32:34 +0200 Subject: [PATCH 27/33] Make parallel-ct properly detect test failures The problem comes from `ct_master` which doesn't tell us in the return value whether the tests succeeded. In order to get that information a CT hook was created. But then we run into another problem: despite its documentation claiming otherwise, `ct_master` does not handle `ct_hooks` instructions in the test spec. So for the time being we fork `ct_master` into a new `ct_master_fork` module and insert our hook directly in the code. Later on we will submit patches to OTP. --- deps/rabbit/Makefile | 14 +- .../src/ct_master_fork.erl | 964 ++++++++++++++++++ .../src/cth_parallel_ct_detect_failure.erl | 23 + deps/rabbitmq_mqtt/Makefile | 14 +- 4 files changed, 1011 insertions(+), 4 deletions(-) create mode 100644 deps/rabbitmq_ct_helpers/src/ct_master_fork.erl create mode 100644 deps/rabbitmq_ct_helpers/src/cth_parallel_ct_detect_failure.erl diff --git a/deps/rabbit/Makefile b/deps/rabbit/Makefile index f47d655be09..24110ce28db 100644 --- a/deps/rabbit/Makefile +++ b/deps/rabbit/Makefile @@ -239,12 +239,22 @@ define ct_master.erl peer:call(Pid2, persistent_term, put, [rabbit_ct_tcp_port_base, 25000]), peer:call(Pid3, persistent_term, put, [rabbit_ct_tcp_port_base, 27000]), peer:call(Pid4, persistent_term, put, [rabbit_ct_tcp_port_base, 29000]), - ct_master:run("$1"), + ct_master_fork:run("$1"), + Fail1 = peer:call(Pid1, cth_parallel_ct_detect_failure, has_failures, []), + Fail2 = peer:call(Pid2, cth_parallel_ct_detect_failure, has_failures, []), + Fail3 = peer:call(Pid3, cth_parallel_ct_detect_failure, has_failures, []), + Fail4 = peer:call(Pid4, cth_parallel_ct_detect_failure, has_failures, []), peer:stop(Pid4), peer:stop(Pid3), peer:stop(Pid2), peer:stop(Pid1), - halt() + if + Fail1 -> halt(1); + Fail2 -> halt(2); + Fail3 -> halt(3); + Fail4 -> halt(4); + true -> halt(0) + end endef PARALLEL_CT_SET_1_A = amqp_client unit_cluster_formation_locking_mocks unit_cluster_formation_sort_nodes unit_collections unit_config_value_encryption unit_connection_tracking diff --git a/deps/rabbitmq_ct_helpers/src/ct_master_fork.erl b/deps/rabbitmq_ct_helpers/src/ct_master_fork.erl new file mode 100644 index 00000000000..443635fe912 --- /dev/null +++ b/deps/rabbitmq_ct_helpers/src/ct_master_fork.erl @@ -0,0 +1,964 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2006-2024. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% + +-module(ct_master_fork). +%-moduledoc """ +%Distributed test execution control for `Common Test`. +% +%This module exports functions for running `Common Test` nodes on multiple hosts +%in parallel. +%""". + +-export([run/1,run/3,run/4]). +-export([run_on_node/2,run_on_node/3]). +-export([run_test/1,run_test/2]). +-export([get_event_mgr_ref/0]). +-export([basic_html/1,esc_chars/1]). + +-export([abort/0,abort/1,progress/0]). + +-export([init_master/7, init_node_ctrl/3]). + +-export([status/2]). + +-include_lib("common_test/include/ct_event.hrl"). +-include_lib("common_test/src/ct_util.hrl"). + +%-doc "Filename of test spec to be executed.". +-type test_spec() :: file:name_all(). + +-record(state, {node_ctrl_pids=[], + logdirs=[], + results=[], + locks=[], + blocked=[] + }). + +-export_type([test_spec/0]). + +%-doc """ +%Tests are spawned on `Node` using `ct:run_test/1` +%""". +-spec run_test(Node, Opts) -> 'ok' + when Node :: node(), + Opts :: [OptTuples], + OptTuples :: {'dir', TestDirs} + | {'suite', Suites} + | {'group', Groups} + | {'testcase', Cases} + | {'spec', TestSpecs} + | {'join_specs', boolean()} + | {'label', Label} + | {'config', CfgFiles} + | {'userconfig', UserConfig} + | {'allow_user_terms', boolean()} + | {'logdir', LogDir} + | {'silent_connections', Conns} + | {'stylesheet', CSSFile} + | {'cover', CoverSpecFile} + | {'cover_stop', boolean()} + | {'step', StepOpts} + | {'event_handler', EventHandlers} + | {'include', InclDirs} + | {'auto_compile', boolean()} + | {'abort_if_missing_suites', boolean()} + | {'create_priv_dir', CreatePrivDir} + | {'multiply_timetraps', M} + | {'scale_timetraps', boolean()} + | {'repeat', N} + | {'duration', DurTime} + | {'until', StopTime} + | {'force_stop', ForceStop} + | {'decrypt', DecryptKeyOrFile} + | {'refresh_logs', LogDir} + | {'logopts', LogOpts} + | {'verbosity', VLevels} + | {'basic_html', boolean()} + | {'esc_chars', boolean()} + | {'keep_logs',KeepSpec} + | {'ct_hooks', CTHs} + | {'ct_hooks_order', CTHsOrder} + | {'enable_builtin_hooks', boolean()} + | {'release_shell', boolean()}, + TestDirs :: [string()] | string(), + Suites :: [string()] | [atom()] | string() | atom(), + Cases :: [atom()] | atom(), + Groups :: GroupNameOrPath | [GroupNameOrPath], + GroupNameOrPath :: [atom()] | atom() | 'all', + TestSpecs :: [string()] | string(), + Label :: string() | atom(), + CfgFiles :: [string()] | string(), + UserConfig :: [{CallbackMod, CfgStrings}] | {CallbackMod, CfgStrings}, + CallbackMod :: atom(), + CfgStrings :: [string()] | string(), + LogDir :: string(), + Conns :: 'all' | [atom()], + CSSFile :: string(), + CoverSpecFile :: string(), + StepOpts :: [StepOpt], + StepOpt :: 'config' | 'keep_inactive', + EventHandlers :: EH | [EH], + EH :: atom() | {atom(), InitArgs} | {[atom()], InitArgs}, + InitArgs :: [term()], + InclDirs :: [string()] | string(), + CreatePrivDir :: 'auto_per_run' | 'auto_per_tc' | 'manual_per_tc', + M :: integer(), + N :: integer(), + DurTime :: HHMMSS, + HHMMSS :: string(), + StopTime :: YYMoMoDDHHMMSS | HHMMSS, + YYMoMoDDHHMMSS :: string(), + ForceStop :: 'skip_rest' | boolean(), + DecryptKeyOrFile :: {'key', DecryptKey} | {'file', DecryptFile}, + DecryptKey :: string(), + DecryptFile :: string(), + LogOpts :: [LogOpt], + LogOpt :: 'no_nl' | 'no_src', + VLevels :: VLevel | [{Category, VLevel}], + VLevel :: integer(), + Category :: atom(), + KeepSpec :: 'all' | pos_integer(), + CTHs :: [CTHModule | {CTHModule, CTHInitArgs}], + CTHsOrder :: atom(), + CTHModule :: atom(), + CTHInitArgs :: term(). +run_test(Node,Opts) -> + run_test([{Node,Opts}]). + +%-doc false. +run_test({Node,Opts}) -> + run_test([{Node,Opts}]); +run_test(NodeOptsList) when is_list(NodeOptsList) -> + start_master(NodeOptsList). + +%-doc """ +%Tests are spawned on the nodes as specified in `TestSpecs`. Each specification +%in `TestSpec` is handled separately. However, it is also possible to specify a +%list of specifications to be merged into one specification before the tests are +%executed. Any test without a particular node specification is also executed on +%the nodes in `InclNodes`. Nodes in the `ExclNodes` list are excluded from the +%test. +%""". +-spec run(TestSpecs, AllowUserTerms, InclNodes, ExclNodes) -> [{Specs, 'ok'} | {'error', Reason}] + when TestSpecs :: TestSpec | [TestSpec] | [[TestSpec]], + TestSpec :: test_spec(), + AllowUserTerms :: boolean(), + InclNodes :: [node()], + ExclNodes :: [node()], + Specs :: [file:filename_all()], + Reason :: term(). +run([TS|TestSpecs],AllowUserTerms,InclNodes,ExclNodes) when is_list(TS), + is_list(InclNodes), + is_list(ExclNodes) -> + %% Note: [Spec] means run one test with Spec + %% [Spec1,Spec2] means run two tests separately + %% [[Spec1,Spec2]] means run one test, with the two specs merged + case catch ct_testspec:collect_tests_from_file([TS],InclNodes, + AllowUserTerms) of + {error,Reason} -> + [{error,Reason} | run(TestSpecs,AllowUserTerms,InclNodes,ExclNodes)]; + Tests -> + RunResult = + lists:map( + fun({Specs,TSRec=#testspec{logdir=AllLogDirs, + config=StdCfgFiles, + userconfig=UserCfgFiles, + include=AllIncludes, + init=AllInitOpts, + event_handler=AllEvHs}}) -> + AllCfgFiles = + {StdCfgFiles,UserCfgFiles}, + RunSkipPerNode = + ct_testspec:prepare_tests(TSRec), + RunSkipPerNode2 = + exclude_nodes(ExclNodes,RunSkipPerNode), + TSList = if is_integer(hd(TS)) -> [TS]; + true -> TS end, + {Specs,run_all(RunSkipPerNode2,AllLogDirs, + AllCfgFiles,AllEvHs, + AllIncludes,[],[],AllInitOpts,TSList)} + end, Tests), + RunResult ++ run(TestSpecs,AllowUserTerms,InclNodes,ExclNodes) + end; +run([],_,_,_) -> + []; +run(TS,AllowUserTerms,InclNodes,ExclNodes) when is_list(InclNodes), + is_list(ExclNodes) -> + run([TS],AllowUserTerms,InclNodes,ExclNodes). + +%-doc(#{equiv => run(TestSpecs, false, InclNodes, ExclNodes)}). +-spec run(TestSpecs, InclNodes, ExclNodes) -> [{Specs, 'ok'} | {'error', Reason}] + when TestSpecs :: TestSpec | [TestSpec] | [[TestSpec]], + TestSpec :: test_spec(), + InclNodes :: [node()], + ExclNodes :: [node()], + Specs :: [file:filename_all()], + Reason :: term(). +run(TestSpecs,InclNodes,ExclNodes) -> + run(TestSpecs,false,InclNodes,ExclNodes). + +%-doc """ +%Run tests on spawned nodes as specified in `TestSpecs` (see `run/4`). +% +%Equivalent to [`run(TestSpecs, false, [], [])`](`run/4`) if +%called with TestSpecs being list of strings; +% +%Equivalent to [`run([TS], false, [], [])`](`run/4`) if +%called with TS being string. +%""". +-spec run(TestSpecs) -> [{Specs, 'ok'} | {'error', Reason}] + when TestSpecs :: TestSpec | [TestSpec] | [[TestSpec]], + TestSpec :: test_spec(), + Specs :: [file:filename_all()], + Reason :: term(). +run(TestSpecs=[TS|_]) when is_list(TS) -> + run(TestSpecs,false,[],[]); +run(TS) -> + run([TS],false,[],[]). + + +exclude_nodes([ExclNode|ExNs],RunSkipPerNode) -> + exclude_nodes(ExNs,lists:keydelete(ExclNode,1,RunSkipPerNode)); +exclude_nodes([],RunSkipPerNode) -> + RunSkipPerNode. + + +%-doc """ +%Tests are spawned on `Node` according to `TestSpecs`. +%""". +-spec run_on_node(TestSpecs, AllowUserTerms, Node) -> [{Specs, 'ok'} | {'error', Reason}] + when TestSpecs :: TestSpec | [TestSpec] | [[TestSpec]], + TestSpec :: test_spec(), + AllowUserTerms :: boolean(), + Node :: node(), + Specs :: [file:filename_all()], + Reason :: term(). +run_on_node([TS|TestSpecs],AllowUserTerms,Node) when is_list(TS),is_atom(Node) -> + case catch ct_testspec:collect_tests_from_file([TS],[Node], + AllowUserTerms) of + {error,Reason} -> + [{error,Reason} | run_on_node(TestSpecs,AllowUserTerms,Node)]; + Tests -> + RunResult = + lists:map( + fun({Specs,TSRec=#testspec{logdir=AllLogDirs, + config=StdCfgFiles, + init=AllInitOpts, + include=AllIncludes, + userconfig=UserCfgFiles, + event_handler=AllEvHs}}) -> + AllCfgFiles = {StdCfgFiles,UserCfgFiles}, + {Run,Skip} = ct_testspec:prepare_tests(TSRec,Node), + TSList = if is_integer(hd(TS)) -> [TS]; + true -> TS end, + {Specs,run_all([{Node,Run,Skip}],AllLogDirs, + AllCfgFiles,AllEvHs, + AllIncludes, [],[],AllInitOpts,TSList)} + end, Tests), + RunResult ++ run_on_node(TestSpecs,AllowUserTerms,Node) + end; +run_on_node([],_,_) -> + []; +run_on_node(TS,AllowUserTerms,Node) when is_atom(Node) -> + run_on_node([TS],AllowUserTerms,Node). + +%-doc(#{equiv => run_on_node(TestSpecs, false, Node)}). +-spec run_on_node(TestSpecs, Node) -> [{Specs, 'ok'} | {'error', Reason}] + when TestSpecs :: TestSpec | [TestSpec] | [[TestSpec]], + TestSpec :: test_spec(), + Node :: node(), + Specs :: [file:filename_all()], + Reason :: term(). +run_on_node(TestSpecs,Node) -> + run_on_node(TestSpecs,false,Node). + + + +run_all([{Node,Run,Skip}|Rest],AllLogDirs, + {AllStdCfgFiles, AllUserCfgFiles}=AllCfgFiles, + AllEvHs,AllIncludes,NodeOpts,LogDirs,InitOptions,Specs) -> + LogDir = + lists:foldl(fun({N,Dir},_Found) when N == Node -> + Dir; + ({_N,_Dir},Found) -> + Found; + (Dir,".") -> + Dir; + (_Dir,Found) -> + Found + end,".",AllLogDirs), + + StdCfgFiles = + lists:foldr(fun({N,F},Fs) when N == Node -> [F|Fs]; + ({_N,_F},Fs) -> Fs; + (F,Fs) -> [F|Fs] + end,[],AllStdCfgFiles), + UserCfgFiles = + lists:foldr(fun({N,F},Fs) when N == Node -> [{userconfig, F}|Fs]; + ({_N,_F},Fs) -> Fs; + (F,Fs) -> [{userconfig, F}|Fs] + end,[],AllUserCfgFiles), + + Includes = lists:foldr(fun({N,I},Acc) when N =:= Node -> + [I|Acc]; + ({_,_},Acc) -> + Acc; + (I,Acc) -> + [I | Acc] + end, [], AllIncludes), + EvHs = + lists:foldr(fun({N,H,A},Hs) when N == Node -> [{H,A}|Hs]; + ({_N,_H,_A},Hs) -> Hs; + ({H,A},Hs) -> [{H,A}|Hs] + end,[],AllEvHs), + + NO = {Node,[{prepared_tests,{Run,Skip},Specs}, + {ct_hooks, [cth_parallel_ct_detect_failure]}, + {logdir,LogDir}, + {include, Includes}, + {config,StdCfgFiles}, + {event_handler,EvHs}] ++ UserCfgFiles}, + run_all(Rest,AllLogDirs,AllCfgFiles,AllEvHs,AllIncludes, + [NO|NodeOpts],[LogDir|LogDirs],InitOptions,Specs); +run_all([],AllLogDirs,_,AllEvHs,_AllIncludes, + NodeOpts,LogDirs,InitOptions,Specs) -> + Handlers = [{H,A} || {Master,H,A} <- AllEvHs, Master == master], + MasterLogDir = case lists:keysearch(master,1,AllLogDirs) of + {value,{_,Dir}} -> Dir; + false -> "." + end, + log(tty,"Master Logdir","~ts",[MasterLogDir]), + start_master(lists:reverse(NodeOpts),Handlers,MasterLogDir, + LogDirs,InitOptions,Specs), + ok. + + +%-doc """ +%Stops all running tests. +%""". +-spec abort() -> 'ok'. +abort() -> + call(abort). + +%-doc """ +%Stops tests on specified nodes. +%""". +-spec abort(Nodes) -> 'ok' + when Nodes :: Node | [Node], + Node :: node(). +abort(Nodes) when is_list(Nodes) -> + call({abort,Nodes}); + +abort(Node) when is_atom(Node) -> + abort([Node]). + +%-doc """ +%Returns test progress. If `Status` is `ongoing`, tests are running on the node +%and are not yet finished. +%""". +-spec progress() -> [{Node, Status}] + when Node :: node(), + Status :: atom(). +progress() -> + call(progress). + +%-doc """ +%Gets a reference to the `Common Test` master event manager. The reference can be +%used to, for example, add a user-specific event handler while tests are running. +% +%_Example:_ +% +%```erlang +%gen_event:add_handler(ct_master:get_event_mgr_ref(), my_ev_h, []) +%``` +%""". +%-doc(#{since => <<"OTP 17.5">>}). +-spec get_event_mgr_ref() -> atom(). +get_event_mgr_ref() -> + ?CT_MEVMGR_REF. + +%-doc """ +%If set to `true`, the `ct_master logs` are written on a primitive HTML format, +%not using the `Common Test` CSS style sheet. +%""". +%-doc(#{since => <<"OTP R15B01">>}). +-spec basic_html(Bool) -> 'ok' + when Bool :: boolean(). +basic_html(Bool) -> + application:set_env(common_test_master, basic_html, Bool), + ok. + +%-doc false. +esc_chars(Bool) -> + application:set_env(common_test_master, esc_chars, Bool), + ok. + +%%%----------------------------------------------------------------- +%%% MASTER, runs on central controlling node. +%%%----------------------------------------------------------------- +start_master(NodeOptsList) -> + start_master(NodeOptsList,[],".",[],[],[]). + +start_master(NodeOptsList,EvHandlers,MasterLogDir,LogDirs,InitOptions,Specs) -> + Master = spawn_link(?MODULE,init_master,[self(),NodeOptsList,EvHandlers, + MasterLogDir,LogDirs, + InitOptions,Specs]), + receive + {Master,Result} -> Result + end. + +%-doc false. +init_master(Parent,NodeOptsList,EvHandlers,MasterLogDir,LogDirs, + InitOptions,Specs) -> + case whereis(ct_master) of + undefined -> + register(ct_master,self()), + ct_util:mark_process(), + ok; + _Pid -> + io:format("~nWarning: ct_master already running!~n"), + exit(aborted) +% case io:get_line('[y/n]>') of +% "y\n" -> +% ok; +% "n\n" -> +% exit(aborted); +% _ -> +% init_master(NodeOptsList,LogDirs) +% end + end, + + %% start master logger + {MLPid,_} = ct_master_logs:start(MasterLogDir, + [N || {N,_} <- NodeOptsList]), + log(all,"Master Logger process started","~w",[MLPid]), + + case Specs of + [] -> ok; + _ -> + SpecsStr = lists:map(fun(Name) -> + Name ++ " " + end,Specs), + ct_master_logs:log("Test Specification file(s)","~ts", + [lists:flatten(SpecsStr)]) + end, + + %% start master event manager and add default handler + {ok, _} = start_ct_master_event(), + ct_master_event:add_handler(), + %% add user handlers for master event manager + Add = fun({H,Args}) -> + log(all,"Adding Event Handler","~w",[H]), + case gen_event:add_handler(?CT_MEVMGR_REF,H,Args) of + ok -> ok; + {'EXIT',Why} -> exit(Why); + Other -> exit({event_handler,Other}) + end + end, + lists:foreach(Add,EvHandlers), + + %% double check event manager is started and registered + case whereis(?CT_MEVMGR) of + undefined -> + exit({?CT_MEVMGR,undefined}); + Pid when is_pid(Pid) -> + ok + end, + init_master1(Parent,NodeOptsList,InitOptions,LogDirs). + +start_ct_master_event() -> + case ct_master_event:start_link() of + {error, {already_started, Pid}} -> + {ok, Pid}; + Else -> + Else + end. + +init_master1(Parent,NodeOptsList,InitOptions,LogDirs) -> + {Inaccessible,NodeOptsList1,InitOptions1} = init_nodes(NodeOptsList, + InitOptions), + case Inaccessible of + [] -> + init_master2(Parent,NodeOptsList,LogDirs); + _ -> + io:format("~nThe following nodes are inaccessible: ~p~n~n", + [Inaccessible]), + io:format("Proceed(p), Rescan(r) or Abort(a)? "), + case io:get_line('[p/r/a]>') of + "p\n" -> + log(html,"Inaccessible Nodes", + "Proceeding without: ~p",[Inaccessible]), + init_master2(Parent,NodeOptsList1,LogDirs); + "r\n" -> + init_master1(Parent,NodeOptsList,InitOptions1,LogDirs); + _ -> + log(html,"Aborting Tests","",[]), + ct_master_event:stop(), + ct_master_logs:stop(), + exit(aborted) + end + end. + +init_master2(Parent,NodeOptsList,LogDirs) -> + process_flag(trap_exit,true), + Cookie = erlang:get_cookie(), + log(all,"Cookie","~tw",[Cookie]), + log(all,"Starting Tests", + "Tests starting on: ~p",[[N || {N,_} <- NodeOptsList]]), + SpawnAndMon = + fun({Node,Opts}) -> + monitor_node(Node,true), + log(all,"Test Info","Starting test(s) on ~w...",[Node]), + {spawn_link(Node,?MODULE,init_node_ctrl,[self(),Cookie,Opts]), + Node} + end, + NodeCtrlPids = lists:map(SpawnAndMon,NodeOptsList), + Result = master_loop(#state{node_ctrl_pids=NodeCtrlPids, + logdirs=LogDirs}), + Parent ! {self(),Result}. + +master_loop(#state{node_ctrl_pids=[], + logdirs=LogDirs, + results=Finished}) -> + Str = + lists:map(fun({Node,Result}) -> + io_lib:format("~-40.40.*ts~tp\n", + [$_,atom_to_list(Node),Result]) + end,lists:reverse(Finished)), + log(all,"TEST RESULTS","~ts", [Str]), + log(all,"Info","Updating log files",[]), + refresh_logs(LogDirs,[]), + + ct_master_event:stop(), + ct_master_logs:stop(), + ok; + +master_loop(State=#state{node_ctrl_pids=NodeCtrlPids, + results=Results, + locks=Locks, + blocked=Blocked}) -> + receive + {'EXIT',Pid,Reason} -> + case get_node(Pid,NodeCtrlPids) of + {Node,NodeCtrlPids1} -> + monitor_node(Node,false), + case Reason of + normal -> + log(all,"Test Info", + "Test(s) on node ~w finished.",[Node]), + master_loop(State#state{node_ctrl_pids=NodeCtrlPids1}); + Bad -> + Error = + case Bad of + What when What=/=killed,is_atom(What) -> + {error,Bad}; + _ -> + Bad + end, + log(all,"Test Info", + "Test on node ~w failed! Reason: ~tp", + [Node,Error]), + {Locks1,Blocked1} = + update_queue(exit,Node,Locks,Blocked), + master_loop(State#state{node_ctrl_pids=NodeCtrlPids1, + results=[{Node, + Error}|Results], + locks=Locks1, + blocked=Blocked1}) + end; + undefined -> + %% ignore (but report) exit from master_logger etc + log(all,"Test Info", + "Warning! Process ~w has terminated. Reason: ~tp", + [Pid,Reason]), + master_loop(State) + end; + + {nodedown,Node} -> + case get_pid(Node,NodeCtrlPids) of + {_Pid,NodeCtrlPids1} -> + monitor_node(Node,false), + log(all,"Test Info","No connection to testnode ~w!",[Node]), + {Locks1,Blocked1} = + update_queue(exit,Node,Locks,Blocked), + master_loop(State#state{node_ctrl_pids=NodeCtrlPids1, + results=[{Node,nodedown}|Results], + locks=Locks1, + blocked=Blocked1}); + undefined -> + master_loop(State) + end; + + {Pid,{result,Result}} -> + {Node,_} = get_node(Pid,NodeCtrlPids), + master_loop(State#state{results=[{Node,Result}|Results]}); + + {call,progress,From} -> + reply(master_progress(NodeCtrlPids,Results),From), + master_loop(State); + + {call,abort,From} -> + lists:foreach(fun({Pid,Node}) -> + log(all,"Test Info", + "Aborting tests on ~w",[Node]), + exit(Pid,kill) + end,NodeCtrlPids), + reply(ok,From), + master_loop(State); + + {call,{abort,Nodes},From} -> + lists:foreach(fun(Node) -> + case lists:keysearch(Node,2,NodeCtrlPids) of + {value,{Pid,Node}} -> + log(all,"Test Info", + "Aborting tests on ~w",[Node]), + exit(Pid,kill); + false -> + ok + end + end,Nodes), + reply(ok,From), + master_loop(State); + + {call,#event{name=Name,node=Node,data=Data},From} -> + {Op,Lock} = + case Name of + start_make -> + {take,{make,Data}}; + finished_make -> + {release,{make,Data}}; + start_write_file -> + {take,{write_file,Data}}; + finished_write_file -> + {release,{write_file,Data}} + end, + {Locks1,Blocked1} = + update_queue(Op,Node,From,Lock,Locks,Blocked), + if Op == release -> reply(ok,From); + true -> ok + end, + master_loop(State#state{locks=Locks1, + blocked=Blocked1}); + + {cast,Event} when is_record(Event,event) -> + ct_master_event:notify(Event), + master_loop(State) + + end. + + +update_queue(take,Node,From,Lock={Op,Resource},Locks,Blocked) -> + %% Locks: [{{Operation,Resource},Node},...] + %% Blocked: [{{Operation,Resource},Node,WaitingPid},...] + case lists:keysearch(Lock,1,Locks) of + {value,{_Lock,Owner}} -> % other node has lock + log(html,"Lock Info","Node ~w blocked on ~w by ~w. Resource: ~tp", + [Node,Op,Owner,Resource]), + Blocked1 = Blocked ++ [{Lock,Node,From}], + {Locks,Blocked1}; + false -> % go ahead + Locks1 = [{Lock,Node}|Locks], + reply(ok,From), + {Locks1,Blocked} + end; + +update_queue(release,Node,_From,Lock={Op,Resource},Locks,Blocked) -> + Locks1 = lists:delete({Lock,Node},Locks), + case lists:keysearch(Lock,1,Blocked) of + {value,E={Lock,SomeNode,WaitingPid}} -> + Blocked1 = lists:delete(E,Blocked), + log(html,"Lock Info","Node ~w proceeds with ~w. Resource: ~tp", + [SomeNode,Op,Resource]), + reply(ok,WaitingPid), % waiting process may start + {Locks1,Blocked1}; + false -> + {Locks1,Blocked} + end. + +update_queue(exit,Node,Locks,Blocked) -> + NodeLocks = lists:foldl(fun({L,N},Ls) when N == Node -> + [L|Ls]; + (_,Ls) -> + Ls + end,[],Locks), + release_locks(Node,NodeLocks,Locks,Blocked). + +release_locks(Node,[Lock|Ls],Locks,Blocked) -> + {Locks1,Blocked1} = update_queue(release,Node,undefined,Lock,Locks,Blocked), + release_locks(Node,Ls,Locks1,Blocked1); +release_locks(_,[],Locks,Blocked) -> + {Locks,Blocked}. + +get_node(Pid,NodeCtrlPids) -> + case lists:keysearch(Pid,1,NodeCtrlPids) of + {value,{Pid,Node}} -> + {Node,lists:keydelete(Pid,1,NodeCtrlPids)}; + false -> + undefined + end. + +get_pid(Node,NodeCtrlPids) -> + case lists:keysearch(Node,2,NodeCtrlPids) of + {value,{Pid,Node}} -> + {Pid,lists:keydelete(Node,2,NodeCtrlPids)}; + false -> + undefined + end. + +ping_nodes(NodeOptions)-> + ping_nodes(NodeOptions, [], []). + +ping_nodes([NO={Node,_Opts}|NOs],Inaccessible,NodeOpts) -> + case net_adm:ping(Node) of + pong -> + ping_nodes(NOs,Inaccessible,[NO|NodeOpts]); + _ -> + ping_nodes(NOs,[Node|Inaccessible],NodeOpts) + end; +ping_nodes([],Inaccessible,NodeOpts) -> + {lists:reverse(Inaccessible),lists:reverse(NodeOpts)}. + +master_progress(NodeCtrlPids,Results) -> + Results ++ lists:map(fun({_Pid,Node}) -> + {Node,ongoing} + end,NodeCtrlPids). + +%% refresh those dirs where more than one node has written logs +refresh_logs([D|Dirs],Refreshed) -> + case lists:member(D,Dirs) of + true -> + case lists:keymember(D,1,Refreshed) of + true -> + refresh_logs(Dirs,Refreshed); + false -> + {ok,Cwd} = file:get_cwd(), + case catch ct_run:refresh_logs(D, unknown) of + {'EXIT',Reason} -> + ok = file:set_cwd(Cwd), + refresh_logs(Dirs,[{D,{error,Reason}}|Refreshed]); + Result -> + refresh_logs(Dirs,[{D,Result}|Refreshed]) + end + end; + false -> + refresh_logs(Dirs,Refreshed) + end; +refresh_logs([],Refreshed) -> + Str = + lists:map(fun({D,Result}) -> + io_lib:format("Refreshing logs in ~tp... ~tp", + [D,Result]) + end,Refreshed), + log(all,"Info","~ts", [Str]). + +%%%----------------------------------------------------------------- +%%% NODE CONTROLLER, runs and controls tests on a test node. +%%%----------------------------------------------------------------- +%-doc false. +init_node_ctrl(MasterPid,Cookie,Opts) -> + %% make sure tests proceed even if connection to master is lost + process_flag(trap_exit, true), + ct_util:mark_process(), + MasterNode = node(MasterPid), + group_leader(whereis(user),self()), + io:format("~n********** node_ctrl process ~w started on ~w **********~n", + [self(),node()]), + %% initially this node must have the same cookie as the master node + %% but now we set it explicitly for the connection so that test suites + %% can change the cookie for the node if they wish + case erlang:get_cookie() of + Cookie -> % first time or cookie not changed + erlang:set_cookie(node(MasterPid),Cookie); + _ -> + ok + end, + case whereis(ct_util_server) of + undefined -> ok; + Pid -> exit(Pid,kill) + end, + + %% start a local event manager + {ok, _} = start_ct_event(), + ct_event:add_handler([{master,MasterPid}]), + + %% log("Running test with options: ~tp~n", [Opts]), + Result = case (catch ct:run_test(Opts)) of + ok -> finished_ok; + Other -> Other + end, + + %% stop local event manager + ct_event:stop(), + + case net_adm:ping(MasterNode) of + pong -> + MasterPid ! {self(),{result,Result}}; + pang -> + io:format("Warning! Connection to master node ~w is lost. " + "Can't report result!~n~n", [MasterNode]) + end. + +start_ct_event() -> + case ct_event:start_link() of + {error, {already_started, Pid}} -> + {ok, Pid}; + Else -> + Else + end. + +%%%----------------------------------------------------------------- +%%% Event handling +%%%----------------------------------------------------------------- +%-doc false. +status(MasterPid,Event=#event{name=start_make}) -> + call(MasterPid,Event); +status(MasterPid,Event=#event{name=finished_make}) -> + call(MasterPid,Event); +status(MasterPid,Event=#event{name=start_write_file}) -> + call(MasterPid,Event); +status(MasterPid,Event=#event{name=finished_write_file}) -> + call(MasterPid,Event); +status(MasterPid,Event) -> + cast(MasterPid,Event). + +%%%----------------------------------------------------------------- +%%% Internal +%%%----------------------------------------------------------------- + +log(To,Heading,Str,Args) -> + if To == all ; To == tty -> + Chars = ["=== ",Heading," ===\n", + io_lib:format(Str,Args),"\n"], + io:put_chars(Chars); + true -> + ok + end, + if To == all ; To == html -> + ct_master_logs:log(Heading,Str,Args); + true -> + ok + end. + + +call(Msg) -> + call(whereis(ct_master),Msg). + +call(undefined,_Msg) -> + {error,not_running}; + +call(Pid,Msg) -> + Ref = erlang:monitor(process,Pid), + Pid ! {call,Msg,self()}, + Return = receive + {Pid,Result} -> + Result; + {'DOWN', Ref, _, _, _} -> + {error,master_died} + end, + erlang:demonitor(Ref, [flush]), + Return. + +reply(Result,To) -> + To ! {self(),Result}, + ok. + +init_nodes(NodeOptions, InitOptions)-> + _ = ping_nodes(NodeOptions), + start_nodes(InitOptions), + eval_on_nodes(InitOptions), + {Inaccessible, NodeOptions1}=ping_nodes(NodeOptions), + InitOptions1 = filter_accessible(InitOptions, Inaccessible), + {Inaccessible, NodeOptions1, InitOptions1}. + +% only nodes which are inaccessible now, should be initiated later +filter_accessible(InitOptions, Inaccessible)-> + [{Node,Option}||{Node,Option}<-InitOptions, lists:member(Node, Inaccessible)]. + +start_nodes(InitOptions)-> + lists:foreach(fun({NodeName, Options})-> + [NodeS,HostS]=string:lexemes(atom_to_list(NodeName), "@"), + Node=list_to_atom(NodeS), + Host=list_to_atom(HostS), + HasNodeStart = lists:keymember(node_start, 1, Options), + IsAlive = lists:member(NodeName, nodes()), + case {HasNodeStart, IsAlive} of + {false, false}-> + io:format("WARNING: Node ~w is not alive but has no " + "node_start option~n", [NodeName]); + {false, true}-> + io:format("Node ~w is alive~n", [NodeName]); + {true, false}-> + {node_start, NodeStart} = lists:keyfind(node_start, 1, Options), + {value, {callback_module, Callback}, NodeStart2}= + lists:keytake(callback_module, 1, NodeStart), + case Callback:start(Host, Node, NodeStart2) of + {ok, NodeName} -> + io:format("Node ~w started successfully " + "with callback ~w~n", [NodeName,Callback]); + {error, Reason, _NodeName} -> + io:format("Failed to start node ~w with callback ~w! " + "Reason: ~tp~n", [NodeName, Callback, Reason]) + end; + {true, true}-> + io:format("WARNING: Node ~w is alive but has node_start " + "option~n", [NodeName]) + end + end, + InitOptions). + +eval_on_nodes(InitOptions)-> + lists:foreach(fun({NodeName, Options})-> + HasEval = lists:keymember(eval, 1, Options), + IsAlive = lists:member(NodeName, nodes()), + case {HasEval, IsAlive} of + {false,_}-> + ok; + {true,false}-> + io:format("WARNING: Node ~w is not alive but has eval " + "option~n", [NodeName]); + {true,true}-> + {eval, MFAs} = lists:keyfind(eval, 1, Options), + evaluate(NodeName, MFAs) + end + end, + InitOptions). + +evaluate(Node, [{M,F,A}|MFAs])-> + case rpc:call(Node, M, F, A) of + {badrpc,Reason}-> + io:format("WARNING: Failed to call ~w:~tw/~w on node ~w " + "due to ~tp~n", [M,F,length(A),Node,Reason]); + Result-> + io:format("Called ~w:~tw/~w on node ~w, result: ~tp~n", + [M,F,length(A),Node,Result]) + end, + evaluate(Node, MFAs); +evaluate(_Node, [])-> + ok. + +%cast(Msg) -> +% cast(whereis(ct_master),Msg). + +cast(undefined,_Msg) -> + {error,not_running}; + +cast(Pid,Msg) -> + Pid ! {cast,Msg}, + ok. diff --git a/deps/rabbitmq_ct_helpers/src/cth_parallel_ct_detect_failure.erl b/deps/rabbitmq_ct_helpers/src/cth_parallel_ct_detect_failure.erl new file mode 100644 index 00000000000..428e37468bf --- /dev/null +++ b/deps/rabbitmq_ct_helpers/src/cth_parallel_ct_detect_failure.erl @@ -0,0 +1,23 @@ +-module(cth_parallel_ct_detect_failure). + +-export([init/2]). +-export([on_tc_fail/4]). +-export([has_failures/0]). + +init(_Id, _Opts) -> + {ok, undefined}. + +%% We silence failures in end_per_suite/end_per_group +%% to mirror the default behavior. It should be modified +%% so that they are configured failures as well, but can +%% be done at a later time. +on_tc_fail(_SuiteName, end_per_suite, _Reason, CTHState) -> + CTHState; +on_tc_fail(_SuiteName, {end_per_group, _GroupName}, _Reason, CTHState) -> + CTHState; +on_tc_fail(_SuiteName, _TestName, _Reason, CTHState) -> + persistent_term:put(?MODULE, true), + CTHState. + +has_failures() -> + persistent_term:get(?MODULE, false). diff --git a/deps/rabbitmq_mqtt/Makefile b/deps/rabbitmq_mqtt/Makefile index 63427c94932..48dcca6c934 100644 --- a/deps/rabbitmq_mqtt/Makefile +++ b/deps/rabbitmq_mqtt/Makefile @@ -82,12 +82,22 @@ define ct_master.erl peer:call(Pid2, persistent_term, put, [rabbit_ct_tcp_port_base, 25000]), peer:call(Pid3, persistent_term, put, [rabbit_ct_tcp_port_base, 27000]), peer:call(Pid4, persistent_term, put, [rabbit_ct_tcp_port_base, 29000]), - ct_master:run("$1"), + ct_master_fork:run("$1"), + Fail1 = peer:call(Pid1, cth_parallel_ct_detect_failure, has_failures, []), + Fail2 = peer:call(Pid2, cth_parallel_ct_detect_failure, has_failures, []), + Fail3 = peer:call(Pid3, cth_parallel_ct_detect_failure, has_failures, []), + Fail4 = peer:call(Pid4, cth_parallel_ct_detect_failure, has_failures, []), peer:stop(Pid4), peer:stop(Pid3), peer:stop(Pid2), peer:stop(Pid1), - halt() + if + Fail1 -> halt(1); + Fail2 -> halt(2); + Fail3 -> halt(3); + Fail4 -> halt(4); + true -> halt(0) + end endef PARALLEL_CT_SET_1_A = auth retainer From c93c699b619a518577189675760c6526ecbc7e57 Mon Sep 17 00:00:00 2001 From: Karl Nilsson Date: Thu, 3 Oct 2024 12:15:29 +0100 Subject: [PATCH 28/33] QQ: fix bug with discards using a consumer_id() Fixes a pattern matching bug for discards that come in after a consumer has been cancelled. Because the rabbit_fifo_client does not keep the integer consumer key after cancellation, late acks, returns, and discards use the full {CTag, Pid} consumer id version. As this is a state machine change the machine version has been increased to 5. The same bug is present for the `modify` command also however as AMQP does not allow late settlements we don't have to make this fix conditional on the machine version as it cannot happen. --- deps/rabbit/src/rabbit_fifo.erl | 39 +++++++++++++----- deps/rabbit/test/quorum_queue_SUITE.erl | 32 +++++++++++++++ deps/rabbit/test/rabbit_fifo_SUITE.erl | 53 +++++++------------------ 3 files changed, 75 insertions(+), 49 deletions(-) diff --git a/deps/rabbit/src/rabbit_fifo.erl b/deps/rabbit/src/rabbit_fifo.erl index 1960eaf03a6..b0f0a43967f 100644 --- a/deps/rabbit/src/rabbit_fifo.erl +++ b/deps/rabbit/src/rabbit_fifo.erl @@ -265,15 +265,27 @@ apply(Meta, #settle{msg_ids = MsgIds, _ -> {State, ok} end; -apply(Meta, #discard{consumer_key = ConsumerKey, - msg_ids = MsgIds}, +apply(#{machine_version := 4} = Meta, + #discard{consumer_key = ConsumerKey, + msg_ids = MsgIds}, #?STATE{consumers = Consumers } = State0) -> + %% buggy version that would have not found the consumer if the ConsumerKey + %% was a consumer_id() case find_consumer(ConsumerKey, Consumers) of {ConsumerKey, #consumer{} = Con} -> discard(Meta, MsgIds, ConsumerKey, Con, true, #{}, State0); _ -> {State0, ok} end; +apply(Meta, #discard{consumer_key = ConsumerKey, + msg_ids = MsgIds}, + #?STATE{consumers = Consumers } = State0) -> + case find_consumer(ConsumerKey, Consumers) of + {ActualConsumerKey, #consumer{} = Con} -> + discard(Meta, MsgIds, ActualConsumerKey, Con, true, #{}, State0); + _ -> + {State0, ok} + end; apply(Meta, #return{consumer_key = ConsumerKey, msg_ids = MsgIds}, #?STATE{consumers = Cons} = State) -> @@ -291,13 +303,14 @@ apply(Meta, #modify{consumer_key = ConsumerKey, msg_ids = MsgIds}, #?STATE{consumers = Cons} = State) -> case find_consumer(ConsumerKey, Cons) of - {ConsumerKey, #consumer{checked_out = Checked}} + {ActualConsumerKey, #consumer{checked_out = Checked}} when Undel == false -> - return(Meta, ConsumerKey, MsgIds, DelFailed, + return(Meta, ActualConsumerKey, MsgIds, DelFailed, Anns, Checked, [], State); - {ConsumerKey, #consumer{} = Con} + {ActualConsumerKey, #consumer{} = Con} when Undel == true -> - discard(Meta, MsgIds, ConsumerKey, Con, DelFailed, Anns, State); + discard(Meta, MsgIds, ActualConsumerKey, + Con, DelFailed, Anns, State); _ -> {State, ok} end; @@ -898,13 +911,14 @@ get_checked_out(CKey, From, To, #?STATE{consumers = Consumers}) -> end. -spec version() -> pos_integer(). -version() -> 4. +version() -> 5. which_module(0) -> rabbit_fifo_v0; which_module(1) -> rabbit_fifo_v1; which_module(2) -> rabbit_fifo_v3; which_module(3) -> rabbit_fifo_v3; -which_module(4) -> ?MODULE. +which_module(4) -> ?MODULE; +which_module(5) -> ?MODULE. -define(AUX, aux_v3). @@ -2520,7 +2534,7 @@ make_checkout({_, _} = ConsumerId, Spec0, Meta) -> make_settle(ConsumerKey, MsgIds) when is_list(MsgIds) -> #settle{consumer_key = ConsumerKey, msg_ids = MsgIds}. --spec make_return(consumer_id(), [msg_id()]) -> protocol(). +-spec make_return(consumer_key(), [msg_id()]) -> protocol(). make_return(ConsumerKey, MsgIds) -> #return{consumer_key = ConsumerKey, msg_ids = MsgIds}. @@ -2528,7 +2542,7 @@ make_return(ConsumerKey, MsgIds) -> is_return(Command) -> is_record(Command, return). --spec make_discard(consumer_id(), [msg_id()]) -> protocol(). +-spec make_discard(consumer_key(), [msg_id()]) -> protocol(). make_discard(ConsumerKey, MsgIds) -> #discard{consumer_key = ConsumerKey, msg_ids = MsgIds}. @@ -2701,7 +2715,10 @@ convert(Meta, 1, To, State) -> convert(Meta, 2, To, State) -> convert(Meta, 3, To, rabbit_fifo_v3:convert_v2_to_v3(State)); convert(Meta, 3, To, State) -> - convert(Meta, 4, To, convert_v3_to_v4(Meta, State)). + convert(Meta, 4, To, convert_v3_to_v4(Meta, State)); +convert(Meta, 4, To, State) -> + %% no conversion needed, this version only includes a logic change + convert(Meta, 5, To, State). smallest_raft_index(#?STATE{messages = Messages, ra_indexes = Indexes, diff --git a/deps/rabbit/test/quorum_queue_SUITE.erl b/deps/rabbit/test/quorum_queue_SUITE.erl index ff2ebac93de..deaf095409d 100644 --- a/deps/rabbit/test/quorum_queue_SUITE.erl +++ b/deps/rabbit/test/quorum_queue_SUITE.erl @@ -177,6 +177,7 @@ all_tests() -> per_message_ttl_expiration_too_high, consumer_priorities, cancel_consumer_gh_3729, + cancel_consumer_gh_12424, cancel_and_consume_with_same_tag, validate_messages_on_queue, amqpl_headers, @@ -3748,6 +3749,37 @@ cancel_consumer_gh_3729(Config) -> ok = rabbit_ct_client_helpers:close_channel(Ch). +cancel_consumer_gh_12424(Config) -> + QQ = ?config(queue_name, Config), + + Server = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + Ch = rabbit_ct_client_helpers:open_channel(Config, Server), + + ExpectedDeclareRslt0 = #'queue.declare_ok'{queue = QQ, message_count = 0, consumer_count = 0}, + DeclareRslt0 = declare(Ch, QQ, [{<<"x-queue-type">>, longstr, <<"quorum">>}]), + ?assertMatch(ExpectedDeclareRslt0, DeclareRslt0), + + ok = publish(Ch, QQ), + + ok = subscribe(Ch, QQ, false), + + DeliveryTag = receive + {#'basic.deliver'{delivery_tag = DT}, _} -> + DT + after 5000 -> + flush(100), + ct:fail("basic.deliver timeout") + end, + + ok = cancel(Ch), + + R = #'basic.reject'{delivery_tag = DeliveryTag, requeue = false}, + ok = amqp_channel:cast(Ch, R), + wait_for_messages(Config, [[QQ, <<"0">>, <<"0">>, <<"0">>]]), + + ok. + + %% Test the scenario where a message is published to a quorum queue cancel_and_consume_with_same_tag(Config) -> %% https://github.com/rabbitmq/rabbitmq-server/issues/5927 QQ = ?config(queue_name, Config), diff --git a/deps/rabbit/test/rabbit_fifo_SUITE.erl b/deps/rabbit/test/rabbit_fifo_SUITE.erl index 8d45aecca10..e14b9406eee 100644 --- a/deps/rabbit/test/rabbit_fifo_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_SUITE.erl @@ -42,12 +42,12 @@ groups() -> ]. init_per_group(tests, Config) -> - [{machine_version, 4} | Config]; + [{machine_version, 5} | Config]; init_per_group(machine_version_conversion, Config) -> Config. init_per_testcase(_Testcase, Config) -> - FF = ?config(machine_version, Config) == 4, + FF = ?config(machine_version, Config) == 5, ok = meck:new(rabbit_feature_flags, [passthrough]), meck:expect(rabbit_feature_flags, is_enabled, fun (_) -> FF end), Config. @@ -804,6 +804,19 @@ discarded_message_with_dead_letter_handler_emits_log_effect_test(Config) -> ok. +discard_after_cancel_test(Config) -> + Cid = {?FUNCTION_NAME_B, self()}, + {State0, _} = enq(Config, 1, 1, first, test_init(test)), + {State1, #{key := _CKey, + next_msg_id := MsgId}, _Effects1} = + checkout(Config, ?LINE, Cid, 10, State0), + {State2, _, _} = apply(meta(Config, ?LINE), + rabbit_fifo:make_checkout(Cid, cancel, #{}), State1), + {State, _, _} = apply(meta(Config, ?LINE), + rabbit_fifo:make_discard(Cid, [MsgId]), State2), + ct:pal("State ~p", [State]), + ok. + enqueued_msg_with_delivery_count_test(Config) -> State00 = init(#{name => test, queue_resource => rabbit_misc:r(<<"/">>, queue, <<"test">>), @@ -2786,45 +2799,9 @@ modify_test(Config) -> ok. -ttb_test(Config) -> - S0 = init(#{name => ?FUNCTION_NAME, - queue_resource => - rabbit_misc:r("/", queue, ?FUNCTION_NAME_B)}), - - - S1 = do_n(5_000_000, - fun (N, Acc) -> - I = (5_000_000 - N), - element(1, enq(Config, I, I, ?FUNCTION_NAME_B, Acc)) - end, S0), - - - - {T1, _Res} = timer:tc(fun () -> - do_n(100, fun (_, S) -> - term_to_binary(S), - S1 end, S1) - end), - ct:pal("T1 took ~bus", [T1]), - - - {T2, _} = timer:tc(fun () -> - do_n(100, fun (_, S) -> term_to_iovec(S), S1 end, S1) - end), - ct:pal("T2 took ~bus", [T2]), - - ok. - %% Utility %% -do_n(0, _, A) -> - A; -do_n(N, Fun, A0) -> - A = Fun(N, A0), - do_n(N-1, Fun, A). - - init(Conf) -> rabbit_fifo:init(Conf). make_register_enqueuer(Pid) -> rabbit_fifo:make_register_enqueuer(Pid). apply(Meta, Entry, State) -> rabbit_fifo:apply(Meta, Entry, State). From 4c991a194f15bd9731c68c0d43ea6f5a69433cc9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arnaud=20Cogolu=C3=A8gnes?= Date: Mon, 7 Oct 2024 16:43:06 +0200 Subject: [PATCH 29/33] Set broker version to 4.1.0-alpha.1 in dev Docker image --- .github/workflows/oci-arm64-make.yaml | 2 +- .github/workflows/oci-make.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/oci-arm64-make.yaml b/.github/workflows/oci-arm64-make.yaml index 8af0a78ed11..884da30fba9 100644 --- a/.github/workflows/oci-arm64-make.yaml +++ b/.github/workflows/oci-arm64-make.yaml @@ -45,7 +45,7 @@ jobs: - name: make package-generic-unix if: steps.authorized.outputs.authorized == 'true' run: | - make package-generic-unix PROJECT_VERSION=4.0.0 + make package-generic-unix PROJECT_VERSION=4.1.0-alpha.1 - name: Upload package-generic-unix if: steps.authorized.outputs.authorized == 'true' uses: actions/upload-artifact@v4.3.1 diff --git a/.github/workflows/oci-make.yaml b/.github/workflows/oci-make.yaml index 3d631d7f7ae..f8b9611c1c2 100644 --- a/.github/workflows/oci-make.yaml +++ b/.github/workflows/oci-make.yaml @@ -38,7 +38,7 @@ jobs: - name: make package-generic-unix if: steps.authorized.outputs.authorized == 'true' run: | - make package-generic-unix PROJECT_VERSION=4.0.0 + make package-generic-unix PROJECT_VERSION=4.1.0-alpha.1 - name: Upload package-generic-unix if: steps.authorized.outputs.authorized == 'true' uses: actions/upload-artifact@v4.3.1 From f556f11c62647fcf1633e40f49901bbce7170191 Mon Sep 17 00:00:00 2001 From: David Ansari Date: Mon, 7 Oct 2024 17:12:26 +0200 Subject: [PATCH 30/33] Support AMQP filter expressions (#12415) * Support AMQP filter expressions ## What? This PR implements the following property filter expressions for AMQP clients consuming from streams as defined in [AMQP Filter Expressions Version 1.0 Working Draft 09](https://groups.oasis-open.org/higherlogic/ws/public/document?document_id=66227): * properties filters [section 4.2.4] * application-properties filters [section 4.2.5] String prefix and suffix matching is also supported. This PR also fixes a bug where RabbitMQ would accept wrong filters. Specifically, prior to this PR the values of the filter-set's map were allowed to be symbols. However, "every value MUST be either null or of a described type which provides the archetype filter." ## Why? This feature adds the ability to RabbitMQ to have multiple concurrent clients each consuming only a subset of messages while maintaining message order. This feature also reduces network traffic between RabbitMQ and clients by only dispatching those messages that the clients are actually interested in. Note that AMQP filter expressions are more fine grained than the [bloom filter based stream filtering](https://www.rabbitmq.com/blog/2023/10/16/stream-filtering) because * they do not suffer false positives * the unit of filtering is per-message instead of per-chunk * matching can be performed on **multiple** values in the properties and application-properties sections * prefix and suffix matching on the actual values is supported. Both, AMQP filter expressions and bloom filters can be used together. ## How? If a filter isn't valid, RabbitMQ ignores the filter. RabbitMQ only replies with filters it actually supports and validated successfully to comply with: "The receiving endpoint sets its desired filter, the sending endpoint [RabbitMQ] sets the filter actually in place (including any filters defaulted at the node)." * Delete streams test case The test suite constructed a wrong filter-set. Specifically the value of the filter-set didn't use a described type as mandated by the spec. Using https://azure.github.io/amqpnetlite/api/Amqp.Types.DescribedValue.html throws errors that the descriptor can't be encoded. Given that this code path is already tests via the amqp_filtex_SUITE, this F# test gets therefore deleted. * Re-introduce the AMQP filter-set bug Since clients might rely on the wrong filter-set value type, we support the bug behind a deprecated feature flag and gradually remove support this bug. * Revert "Delete streams test case" This reverts commit c95cfeaef74160894050ae51a563bf839384d2d7. --- .../src/amqp10_client_session.erl | 8 +- deps/amqp10_client/src/amqp10_msg.erl | 5 +- deps/amqp10_common/app.bzl | 2 +- deps/amqp10_common/include/amqp10_filtex.hrl | 15 + deps/rabbit/BUILD.bazel | 16 + deps/rabbit/Makefile | 2 +- deps/rabbit/app.bzl | 20 + deps/rabbit/ct.test.spec | 1 + deps/rabbit/src/mc.erl | 2 +- deps/rabbit/src/mc_amqp.erl | 42 +- deps/rabbit/src/rabbit_amqp_filtex.erl | 196 ++++++ deps/rabbit/src/rabbit_amqp_reader.erl | 6 +- deps/rabbit/src/rabbit_amqp_session.erl | 188 +++--- deps/rabbit/src/rabbit_amqp_util.erl | 11 +- deps/rabbit/src/rabbit_queue_type.erl | 8 + deps/rabbit/src/rabbit_quorum_queue.erl | 2 +- deps/rabbit/src/rabbit_stream_queue.erl | 62 +- deps/rabbit/test/amqp_address_SUITE.erl | 22 +- deps/rabbit/test/amqp_auth_SUITE.erl | 31 +- deps/rabbit/test/amqp_client_SUITE.erl | 137 +--- deps/rabbit/test/amqp_filtex_SUITE.erl | 591 ++++++++++++++++++ deps/rabbit/test/amqp_utils.erl | 139 ++++ .../test/protocol_interop_SUITE.erl | 71 ++- moduleindex.yaml | 1 + 24 files changed, 1275 insertions(+), 303 deletions(-) create mode 100644 deps/amqp10_common/include/amqp10_filtex.hrl create mode 100644 deps/rabbit/src/rabbit_amqp_filtex.erl create mode 100644 deps/rabbit/test/amqp_filtex_SUITE.erl create mode 100644 deps/rabbit/test/amqp_utils.erl diff --git a/deps/amqp10_client/src/amqp10_client_session.erl b/deps/amqp10_client/src/amqp10_client_session.erl index c1e5eb46214..981e291a385 100644 --- a/deps/amqp10_client/src/amqp10_client_session.erl +++ b/deps/amqp10_client/src/amqp10_client_session.erl @@ -737,15 +737,13 @@ translate_terminus_durability(configuration) -> 1; translate_terminus_durability(unsettled_state) -> 2. translate_filters(Filters) - when is_map(Filters) andalso - map_size(Filters) == 0 -> + when map_size(Filters) =:= 0 -> undefined; -translate_filters(Filters) - when is_map(Filters) -> +translate_filters(Filters) -> {map, maps:fold( fun - (<<"apache.org:legacy-amqp-headers-binding:map">> = K, V, Acc) when is_map(V) -> + (<<"apache.org:legacy-amqp-headers-binding:map">> = K, V, Acc) when is_map(V) -> %% special case conversion Key = sym(K), [{Key, {described, Key, translate_legacy_amqp_headers_binding(V)}} | Acc]; diff --git a/deps/amqp10_client/src/amqp10_msg.erl b/deps/amqp10_client/src/amqp10_msg.erl index fa046cc6065..0f60c9bb8c2 100644 --- a/deps/amqp10_client/src/amqp10_msg.erl +++ b/deps/amqp10_client/src/amqp10_msg.erl @@ -433,7 +433,10 @@ wrap_ap_value(V) when is_integer(V) -> case V < 0 of true -> {int, V}; false -> {uint, V} - end. + end; +wrap_ap_value(V) when is_number(V) -> + %% AMQP double and Erlang float are both 64-bit. + {double, V}. %% LOCAL header_value(durable, undefined) -> false; diff --git a/deps/amqp10_common/app.bzl b/deps/amqp10_common/app.bzl index a233c945ceb..5e41032a8eb 100644 --- a/deps/amqp10_common/app.bzl +++ b/deps/amqp10_common/app.bzl @@ -72,7 +72,7 @@ def all_srcs(name = "all_srcs"): ) filegroup( name = "public_hdrs", - srcs = ["include/amqp10_framing.hrl", "include/amqp10_types.hrl"], + srcs = ["include/amqp10_filtex.hrl", "include/amqp10_framing.hrl", "include/amqp10_types.hrl"], ) filegroup( name = "private_hdrs", diff --git a/deps/amqp10_common/include/amqp10_filtex.hrl b/deps/amqp10_common/include/amqp10_filtex.hrl new file mode 100644 index 00000000000..a1743ea9669 --- /dev/null +++ b/deps/amqp10_common/include/amqp10_filtex.hrl @@ -0,0 +1,15 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + + +%% AMQP Filter Expressions Version 1.0 Working Draft 09 +%% https://groups.oasis-open.org/higherlogic/ws/public/document?document_id=66227 + +-define(DESCRIPTOR_NAME_PROPERTIES_FILTER, <<"amqp:properties-filter">>). +-define(DESCRIPTOR_CODE_PROPERTIES_FILTER, 16#173). + +-define(DESCRIPTOR_NAME_APPLICATION_PROPERTIES_FILTER, <<"amqp:application-properties-filter">>). +-define(DESCRIPTOR_CODE_APPLICATION_PROPERTIES_FILTER, 16#174). diff --git a/deps/rabbit/BUILD.bazel b/deps/rabbit/BUILD.bazel index 9bebe9be3ed..d9910dc90e1 100644 --- a/deps/rabbit/BUILD.bazel +++ b/deps/rabbit/BUILD.bazel @@ -1207,6 +1207,7 @@ rabbitmq_integration_suite( name = "amqp_client_SUITE", size = "large", additional_beam = [ + ":test_amqp_utils_beam", ":test_event_recorder_beam", ], shard_count = 3, @@ -1215,6 +1216,16 @@ rabbitmq_integration_suite( ], ) +rabbitmq_integration_suite( + name = "amqp_filtex_SUITE", + additional_beam = [ + ":test_amqp_utils_beam", + ], + runtime_deps = [ + "//deps/rabbitmq_amqp_client:erlang_app", + ], +) + rabbitmq_integration_suite( name = "amqp_proxy_protocol_SUITE", size = "medium", @@ -1235,6 +1246,7 @@ rabbitmq_integration_suite( rabbitmq_integration_suite( name = "amqp_auth_SUITE", additional_beam = [ + ":test_amqp_utils_beam", ":test_event_recorder_beam", ], shard_count = 2, @@ -1246,6 +1258,9 @@ rabbitmq_integration_suite( rabbitmq_integration_suite( name = "amqp_address_SUITE", shard_count = 2, + additional_beam = [ + ":test_amqp_utils_beam", + ], runtime_deps = [ "//deps/rabbitmq_amqp_client:erlang_app", ], @@ -1358,6 +1373,7 @@ eunit( ":test_clustering_utils_beam", ":test_event_recorder_beam", ":test_rabbit_ct_hook_beam", + ":test_amqp_utils_beam", ], target = ":test_erlang_app", test_env = { diff --git a/deps/rabbit/Makefile b/deps/rabbit/Makefile index 24110ce28db..aad618f4211 100644 --- a/deps/rabbit/Makefile +++ b/deps/rabbit/Makefile @@ -258,7 +258,7 @@ define ct_master.erl endef PARALLEL_CT_SET_1_A = amqp_client unit_cluster_formation_locking_mocks unit_cluster_formation_sort_nodes unit_collections unit_config_value_encryption unit_connection_tracking -PARALLEL_CT_SET_1_B = amqp_address amqp_auth amqp_credit_api_v2 amqp_system signal_handling single_active_consumer unit_access_control_authn_authz_context_propagation unit_access_control_credential_validation unit_amqp091_content_framing unit_amqp091_server_properties unit_app_management +PARALLEL_CT_SET_1_B = amqp_address amqp_auth amqp_credit_api_v2 amqp_filtex amqp_system signal_handling single_active_consumer unit_access_control_authn_authz_context_propagation unit_access_control_credential_validation unit_amqp091_content_framing unit_amqp091_server_properties unit_app_management PARALLEL_CT_SET_1_C = amqp_proxy_protocol amqpl_consumer_ack amqpl_direct_reply_to backing_queue bindings rabbit_db_maintenance rabbit_db_msup rabbit_db_policy rabbit_db_queue rabbit_db_topic_exchange rabbit_direct_reply_to_prop cluster_limit cluster_minority term_to_binary_compat_prop topic_permission transactions unicode unit_access_control PARALLEL_CT_SET_1_D = amqqueue_backward_compatibility channel_interceptor channel_operation_timeout classic_queue classic_queue_prop config_schema peer_discovery_dns peer_discovery_tmp_hidden_node per_node_limit per_user_connection_channel_limit diff --git a/deps/rabbit/app.bzl b/deps/rabbit/app.bzl index 4832861d978..cf5a2d1769b 100644 --- a/deps/rabbit/app.bzl +++ b/deps/rabbit/app.bzl @@ -45,6 +45,7 @@ def all_beam_files(name = "all_beam_files"): "src/rabbit_access_control.erl", "src/rabbit_alarm.erl", "src/rabbit_amqp1_0.erl", + "src/rabbit_amqp_filtex.erl", "src/rabbit_amqp_management.erl", "src/rabbit_amqp_reader.erl", "src/rabbit_amqp_session.erl", @@ -302,6 +303,7 @@ def all_test_beam_files(name = "all_test_beam_files"): "src/rabbit_access_control.erl", "src/rabbit_alarm.erl", "src/rabbit_amqp1_0.erl", + "src/rabbit_amqp_filtex.erl", "src/rabbit_amqp_management.erl", "src/rabbit_amqp_reader.erl", "src/rabbit_amqp_session.erl", @@ -578,6 +580,7 @@ def all_srcs(name = "all_srcs"): "src/rabbit_access_control.erl", "src/rabbit_alarm.erl", "src/rabbit_amqp1_0.erl", + "src/rabbit_amqp_filtex.erl", "src/rabbit_amqp_management.erl", "src/rabbit_amqp_reader.erl", "src/rabbit_amqp_session.erl", @@ -2195,3 +2198,20 @@ def test_suite_beam_files(name = "test_suite_beam_files"): erlc_opts = "//:test_erlc_opts", deps = ["//deps/amqp_client:erlang_app"], ) + erlang_bytecode( + name = "amqp_filtex_SUITE_beam_files", + testonly = True, + srcs = ["test/amqp_filtex_SUITE.erl"], + outs = ["test/amqp_filtex_SUITE.beam"], + app_name = "rabbit", + erlc_opts = "//:test_erlc_opts", + deps = ["//deps/amqp10_common:erlang_app"], + ) + erlang_bytecode( + name = "test_amqp_utils_beam", + testonly = True, + srcs = ["test/amqp_utils.erl"], + outs = ["test/amqp_utils.beam"], + app_name = "rabbit", + erlc_opts = "//:test_erlc_opts", + ) diff --git a/deps/rabbit/ct.test.spec b/deps/rabbit/ct.test.spec index e1027d06105..60d65d2d563 100644 --- a/deps/rabbit/ct.test.spec +++ b/deps/rabbit/ct.test.spec @@ -16,6 +16,7 @@ , amqp_auth_SUITE , amqp_client_SUITE , amqp_credit_api_v2_SUITE +, amqp_filtex_SUITE , amqp_proxy_protocol_SUITE , amqp_system_SUITE , amqpl_consumer_ack_SUITE diff --git a/deps/rabbit/src/mc.erl b/deps/rabbit/src/mc.erl index 465c7054f08..9c23ac13daf 100644 --- a/deps/rabbit/src/mc.erl +++ b/deps/rabbit/src/mc.erl @@ -301,7 +301,7 @@ message_id(BasicMsg) -> mc_compat:message_id(BasicMsg). -spec property(atom(), state()) -> - {utf8, binary()} | undefined. + tagged_value(). property(Property, #?MODULE{protocol = Proto, data = Data}) -> Proto:property(Property, Data); diff --git a/deps/rabbit/src/mc_amqp.erl b/deps/rabbit/src/mc_amqp.erl index be63597c3f9..ed6c4b4145d 100644 --- a/deps/rabbit/src/mc_amqp.erl +++ b/deps/rabbit/src/mc_amqp.erl @@ -21,7 +21,7 @@ -define(MESSAGE_ANNOTATIONS_GUESS_SIZE, 100). --define(SIMPLE_VALUE(V), +-define(IS_SIMPLE_VALUE(V), is_binary(V) orelse is_number(V) orelse is_boolean(V)). @@ -145,16 +145,32 @@ property(Prop, #v1{bare_and_footer = Bin, Props = amqp10_framing:decode(PropsDescribed), property0(Prop, Props). -property0(correlation_id, #'v1_0.properties'{correlation_id = Corr}) -> - Corr; -property0(message_id, #'v1_0.properties'{message_id = MsgId}) -> - MsgId; -property0(user_id, #'v1_0.properties'{user_id = UserId}) -> - UserId; -property0(subject, #'v1_0.properties'{subject = Subject}) -> - Subject; -property0(to, #'v1_0.properties'{to = To}) -> - To; +property0(message_id, #'v1_0.properties'{message_id = Val}) -> + Val; +property0(user_id, #'v1_0.properties'{user_id = Val}) -> + Val; +property0(to, #'v1_0.properties'{to = Val}) -> + Val; +property0(subject, #'v1_0.properties'{subject = Val}) -> + Val; +property0(reply_to, #'v1_0.properties'{reply_to = Val}) -> + Val; +property0(correlation_id, #'v1_0.properties'{correlation_id = Val}) -> + Val; +property0(content_type, #'v1_0.properties'{content_type = Val}) -> + Val; +property0(content_encoding, #'v1_0.properties'{content_encoding = Val}) -> + Val; +property0(absolute_expiry_time, #'v1_0.properties'{absolute_expiry_time = Val}) -> + Val; +property0(creation_time, #'v1_0.properties'{creation_time = Val}) -> + Val; +property0(group_id, #'v1_0.properties'{group_id = Val}) -> + Val; +property0(group_sequence, #'v1_0.properties'{group_sequence = Val}) -> + Val; +property0(reply_to_group_id, #'v1_0.properties'{reply_to_group_id = Val}) -> + Val; property0(_Prop, #'v1_0.properties'{}) -> undefined. @@ -454,7 +470,7 @@ message_annotations_as_simple_map(#v1{message_annotations = Content}) -> message_annotations_as_simple_map0(Content) -> %% the section record format really is terrible lists:filtermap(fun({{symbol, K}, {_T, V}}) - when ?SIMPLE_VALUE(V) -> + when ?IS_SIMPLE_VALUE(V) -> {true, {K, V}}; (_) -> false @@ -480,7 +496,7 @@ application_properties_as_simple_map( application_properties_as_simple_map0(Content, L) -> %% the section record format really is terrible lists:foldl(fun({{utf8, K}, {_T, V}}, Acc) - when ?SIMPLE_VALUE(V) -> + when ?IS_SIMPLE_VALUE(V) -> [{K, V} | Acc]; ({{utf8, K}, V}, Acc) when V =:= undefined orelse is_boolean(V) -> diff --git a/deps/rabbit/src/rabbit_amqp_filtex.erl b/deps/rabbit/src/rabbit_amqp_filtex.erl new file mode 100644 index 00000000000..bcdd289e472 --- /dev/null +++ b/deps/rabbit/src/rabbit_amqp_filtex.erl @@ -0,0 +1,196 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +%% AMQP Filter Expressions Version 1.0 Working Draft 09 +%% https://groups.oasis-open.org/higherlogic/ws/public/document?document_id=66227 +-module(rabbit_amqp_filtex). + +-include_lib("amqp10_common/include/amqp10_filtex.hrl"). + +-export([validate/1, + filter/2]). + +-type simple_type() :: number() | binary() | atom(). +-type affix() :: {suffix, non_neg_integer(), binary()} | + {prefix, non_neg_integer(), binary()}. +-type filter_expression_value() :: simple_type() | affix(). +-type filter_expression() :: {properties, [{FieldName :: atom(), filter_expression_value()}]} | + {application_properties, [{binary(), filter_expression_value()}]}. +-type filter_expressions() :: [filter_expression()]. +-export_type([filter_expressions/0]). + +-spec validate(tuple()) -> + {ok, filter_expression()} | error. +validate({described, Descriptor, {map, KVList}}) -> + try validate0(Descriptor, KVList) + catch throw:{?MODULE, _, _} -> + error + end; +validate(_) -> + error. + +-spec filter(filter_expressions(), mc:state()) -> + boolean(). +filter(Filters, Mc) -> + %% "A message will pass through a filter-set if and only if + %% it passes through each of the named filters." [3.5.8] + lists:all(fun(Filter) -> + filter0(Filter, Mc) + end, Filters). + +%%%%%%%%%%%%%%%% +%%% Internal %%% +%%%%%%%%%%%%%%%% + +filter0({properties, KVList}, Mc) -> + %% "The filter evaluates to true if all properties enclosed in the filter expression + %% match the respective properties in the message." + %% [filtex-v1.0-wd09 4.2.4] + lists:all(fun({FieldName, RefVal}) -> + TaggedVal = mc:property(FieldName, Mc), + Val = unwrap(TaggedVal), + match_simple_type(RefVal, Val) + end, KVList); +filter0({application_properties, KVList}, Mc) -> + AppProps = mc:routing_headers(Mc, []), + %% "The filter evaluates to true if all properties enclosed in the filter expression + %% match the respective entries in the application-properties section in the message." + %% [filtex-v1.0-wd09 4.2.5] + lists:all(fun({Key, RefVal}) -> + case AppProps of + #{Key := Val} -> + match_simple_type(RefVal, Val); + _ -> + false + end + end, KVList). + +%% [filtex-v1.0-wd09 4.1.1] +%% "A reference field value in a property filter expression matches +%% its corresponding message metadata field value if: +%% [...] +match_simple_type(null, _Val) -> + %% * The reference field value is NULL + true; +match_simple_type({suffix, SuffixSize, Suffix}, Val) -> + %% * Suffix. The message metadata field matches the expression if the ordinal values of the + %% characters of the suffix expression equal the ordinal values of the same number of + %% characters trailing the message metadata field value. + case is_binary(Val) of + true -> + case Val of + <<_:(size(Val) - SuffixSize)/binary, Suffix:SuffixSize/binary>> -> + true; + _ -> + false + end; + false -> + false + end; +match_simple_type({prefix, PrefixSize, Prefix}, Val) -> + %% * Prefix. The message metadata field matches the expression if the ordinal values of the + %% characters of the prefix expression equal the ordinal values of the same number of + %% characters leading the message metadata field value. + case Val of + <> -> + true; + _ -> + false + end; +match_simple_type(RefVal, Val) -> + %% * the reference field value is of a floating-point or integer number type + %% and the message metadata field is of a different floating-point or integer number type, + %% the reference value and the metadata field value are within the value range of both types, + %% and the values are equal when treated as a floating-point" + RefVal == Val. + +validate0(Descriptor, KVList) when + (Descriptor =:= {symbol, ?DESCRIPTOR_NAME_PROPERTIES_FILTER} orelse + Descriptor =:= {ulong, ?DESCRIPTOR_CODE_PROPERTIES_FILTER}) andalso + KVList =/= [] -> + validate_props(KVList, []); +validate0(Descriptor, KVList0) when + (Descriptor =:= {symbol, ?DESCRIPTOR_NAME_APPLICATION_PROPERTIES_FILTER} orelse + Descriptor =:= {ulong, ?DESCRIPTOR_CODE_APPLICATION_PROPERTIES_FILTER}) andalso + KVList0 =/= [] -> + KVList = lists:map(fun({{utf8, Key}, {utf8, String}}) -> + {Key, parse_string_modifier_prefix(String)}; + ({{utf8, Key}, TaggedVal}) -> + {Key, unwrap(TaggedVal)} + end, KVList0), + {ok, {application_properties, KVList}}; +validate0(_, _) -> + error. + +validate_props([], Acc) -> + {ok, {properties, lists:reverse(Acc)}}; +validate_props([{{symbol, <<"message-id">>}, TaggedVal} | Rest], Acc) -> + case parse_message_id(TaggedVal) of + {ok, Val} -> + validate_props(Rest, [{message_id, Val} | Acc]); + error -> + error + end; +validate_props([{{symbol, <<"user-id">>}, {binary, Val}} | Rest], Acc) -> + validate_props(Rest, [{user_id, Val} | Acc]); +validate_props([{{symbol, <<"to">>}, {utf8, Val}} | Rest], Acc) -> + validate_props(Rest, [{to, parse_string_modifier_prefix(Val)} | Acc]); +validate_props([{{symbol, <<"subject">>}, {utf8, Val}} | Rest], Acc) -> + validate_props(Rest, [{subject, parse_string_modifier_prefix(Val)} | Acc]); +validate_props([{{symbol, <<"reply-to">>}, {utf8, Val}} | Rest], Acc) -> + validate_props(Rest, [{reply_to, parse_string_modifier_prefix(Val)} | Acc]); +validate_props([{{symbol, <<"correlation-id">>}, TaggedVal} | Rest], Acc) -> + case parse_message_id(TaggedVal) of + {ok, Val} -> + validate_props(Rest, [{correlation_id, Val} | Acc]); + error -> + error + end; +validate_props([{{symbol, <<"content-type">>}, {symbol, Val}} | Rest], Acc) -> + validate_props(Rest, [{content_type, Val} | Acc]); +validate_props([{{symbol, <<"content-encoding">>}, {symbol, Val}} | Rest], Acc) -> + validate_props(Rest, [{content_encoding, Val} | Acc]); +validate_props([{{symbol, <<"absolute-expiry-time">>}, {timestamp, Val}} | Rest], Acc) -> + validate_props(Rest, [{absolute_expiry_time, Val} | Acc]); +validate_props([{{symbol, <<"creation-time">>}, {timestamp, Val}} | Rest], Acc) -> + validate_props(Rest, [{creation_time, Val} | Acc]); +validate_props([{{symbol, <<"group-id">>}, {utf8, Val}} | Rest], Acc) -> + validate_props(Rest, [{group_id, parse_string_modifier_prefix(Val)} | Acc]); +validate_props([{{symbol, <<"group-sequence">>}, {uint, Val}} | Rest], Acc) -> + validate_props(Rest, [{group_sequence, Val} | Acc]); +validate_props([{{symbol, <<"reply-to-group-id">>}, {utf8, Val}} | Rest], Acc) -> + validate_props(Rest, [{reply_to_group_id, parse_string_modifier_prefix(Val)} | Acc]); +validate_props(_, _) -> + error. + +parse_message_id({ulong, Val}) -> + {ok, Val}; +parse_message_id({uuid, Val}) -> + {ok, Val}; +parse_message_id({binary, Val}) -> + {ok, Val}; +parse_message_id({utf8, Val}) -> + {ok, parse_string_modifier_prefix(Val)}; +parse_message_id(_) -> + error. + +%% [filtex-v1.0-wd09 4.1.1] +parse_string_modifier_prefix(<<"$s:", Suffix/binary>>) -> + {suffix, size(Suffix), Suffix}; +parse_string_modifier_prefix(<<"$p:", Prefix/binary>>) -> + {prefix, size(Prefix), Prefix}; +parse_string_modifier_prefix(<<"$$", _/binary>> = String) -> + %% "Escape prefix for case-sensitive matching of a string starting with ‘&’" + string:slice(String, 1); +parse_string_modifier_prefix(<<"$", _/binary>> = String) -> + throw({?MODULE, invalid_reference_field_value, String}); +parse_string_modifier_prefix(String) -> + String. + +unwrap({_Tag, V}) -> + V; +unwrap(V) -> + V. diff --git a/deps/rabbit/src/rabbit_amqp_reader.erl b/deps/rabbit/src/rabbit_amqp_reader.erl index 0ad228a4e65..bcfa6a1dcc8 100644 --- a/deps/rabbit/src/rabbit_amqp_reader.erl +++ b/deps/rabbit/src/rabbit_amqp_reader.erl @@ -518,16 +518,16 @@ handle_connection_frame( ok = rabbit_event:notify(connection_created, Infos), ok = rabbit_amqp1_0:register_connection(self()), Caps = [%% https://docs.oasis-open.org/amqp/linkpair/v1.0/cs01/linkpair-v1.0-cs01.html#_Toc51331306 - {symbol, <<"LINK_PAIR_V1_0">>}, + <<"LINK_PAIR_V1_0">>, %% https://docs.oasis-open.org/amqp/anonterm/v1.0/cs01/anonterm-v1.0-cs01.html#doc-anonymous-relay - {symbol, <<"ANONYMOUS-RELAY">>}], + <<"ANONYMOUS-RELAY">>], Open = #'v1_0.open'{ channel_max = {ushort, EffectiveChannelMax}, max_frame_size = {uint, IncomingMaxFrameSize}, %% "the value in idle-time-out SHOULD be half the peer's actual timeout threshold" [2.4.5] idle_time_out = {uint, ReceiveTimeoutMillis div 2}, container_id = {utf8, rabbit_nodes:cluster_name()}, - offered_capabilities = {array, symbol, Caps}, + offered_capabilities = rabbit_amqp_util:capabilities(Caps), properties = server_properties()}, ok = send_on_channel0(Sock, Open), State; diff --git a/deps/rabbit/src/rabbit_amqp_session.erl b/deps/rabbit/src/rabbit_amqp_session.erl index 31d5348b56b..3be9ea2b00f 100644 --- a/deps/rabbit/src/rabbit_amqp_session.erl +++ b/deps/rabbit/src/rabbit_amqp_session.erl @@ -30,6 +30,12 @@ }} }). +-rabbit_deprecated_feature( + {amqp_filter_set_bug, + #{deprecation_phase => permitted_by_default, + doc_url => "https://docs.oasis-open.org/amqp/core/v1.0/os/amqp-core-messaging-v1.0-os.html#type-filter-set" + }}). + %% This is the link credit that we grant to sending clients. %% We are free to choose whatever we want, sending clients must obey. %% Default soft limits / credits in deps/rabbit/Makefile are: @@ -1284,12 +1290,13 @@ handle_attach(#'v1_0.attach'{role = ?AMQP_ROLE_SENDER, end; handle_attach(#'v1_0.attach'{role = ?AMQP_ROLE_RECEIVER, - name = LinkName, - handle = Handle = ?UINT(HandleInt), - source = Source, - snd_settle_mode = SndSettleMode, - rcv_settle_mode = RcvSettleMode, - max_message_size = MaybeMaxMessageSize} = Attach, + name = LinkName, + handle = Handle = ?UINT(HandleInt), + source = Source = #'v1_0.source'{filter = DesiredFilter}, + snd_settle_mode = SndSettleMode, + rcv_settle_mode = RcvSettleMode, + max_message_size = MaybeMaxMessageSize, + properties = Properties}, State0 = #state{queue_states = QStates0, outgoing_links = OutgoingLinks0, permission_cache = PermCache0, @@ -1359,6 +1366,10 @@ handle_attach(#'v1_0.attach'{role = ?AMQP_ROLE_RECEIVER, credit_api_v1, credit_api_v1} end, + ConsumerArgs0 = parse_attach_properties(Properties), + {EffectiveFilter, ConsumerFilter, ConsumerArgs1} = + parse_filter(DesiredFilter), + ConsumerArgs = ConsumerArgs0 ++ ConsumerArgs1, Spec = #{no_ack => SndSettled, channel_pid => self(), limiter_pid => none, @@ -1366,11 +1377,14 @@ handle_attach(#'v1_0.attach'{role = ?AMQP_ROLE_RECEIVER, mode => Mode, consumer_tag => handle_to_ctag(HandleInt), exclusive_consume => false, - args => consumer_arguments(Attach), + args => ConsumerArgs, + filter => ConsumerFilter, ok_msg => undefined, acting_user => Username}, case rabbit_queue_type:consume(Q, Spec, QStates0) of {ok, QStates} -> + OfferedCaps0 = rabbit_queue_type:amqp_capabilities(QType), + OfferedCaps = rabbit_amqp_util:capabilities(OfferedCaps0), A = #'v1_0.attach'{ name = LinkName, handle = Handle, @@ -1382,10 +1396,13 @@ handle_attach(#'v1_0.attach'{role = ?AMQP_ROLE_RECEIVER, %% will be requeued. That's why the we only support RELEASED as the default outcome. source = Source#'v1_0.source'{ default_outcome = #'v1_0.released'{}, - outcomes = outcomes(Source)}, + outcomes = outcomes(Source), + %% "the sending endpoint sets the filter actually in place" [3.5.3] + filter = EffectiveFilter}, role = ?AMQP_ROLE_SENDER, %% Echo back that we will respect the client's requested max-message-size. - max_message_size = MaybeMaxMessageSize}, + max_message_size = MaybeMaxMessageSize, + offered_capabilities = OfferedCaps}, MaxMessageSize = max_message_size(MaybeMaxMessageSize), Link = #outgoing_link{ queue_name = queue_resource(Vhost, QNameBin), @@ -2705,11 +2722,10 @@ parse_target_v2_string(String) -> end. parse_target_v2_string0(<<"/exchanges/", Rest/binary>>) -> - Key = cp_slash, - Pattern = try persistent_term:get(Key) + Pattern = try persistent_term:get(cp_slash) catch error:badarg -> Cp = binary:compile_pattern(<<"/">>), - ok = persistent_term:put(Key, Cp), + ok = persistent_term:put(cp_slash, Cp), Cp end, case binary:split(Rest, Pattern, [global]) of @@ -2980,87 +2996,89 @@ encode_frames(T, Msg, MaxPayloadSize, Transfers) -> lists:reverse([[T, Msg] | Transfers]) end. -consumer_arguments(#'v1_0.attach'{ - source = #'v1_0.source'{filter = Filter}, - properties = Properties}) -> - properties_to_consumer_args(Properties) ++ - filter_to_consumer_args(Filter). - -properties_to_consumer_args({map, KVList}) -> +parse_attach_properties(undefined) -> + []; +parse_attach_properties({map, KVList}) -> Key = {symbol, <<"rabbitmq:priority">>}, case proplists:lookup(Key, KVList) of {Key, Val = {int, _Prio}} -> [mc_amqpl:to_091(<<"x-priority">>, Val)]; _ -> [] - end; -properties_to_consumer_args(_) -> - []. - -filter_to_consumer_args({map, KVList}) -> - filter_to_consumer_args( - [<<"rabbitmq:stream-offset-spec">>, - <<"rabbitmq:stream-filter">>, - <<"rabbitmq:stream-match-unfiltered">>], - KVList, - []); -filter_to_consumer_args(_) -> - []. + end. -filter_to_consumer_args([], _KVList, Acc) -> - Acc; -filter_to_consumer_args([<<"rabbitmq:stream-offset-spec">> = H | T], KVList, Acc) -> - Key = {symbol, H}, - Arg = case keyfind_unpack_described(Key, KVList) of - {_, {timestamp, Ts}} -> - [{<<"x-stream-offset">>, timestamp, Ts div 1000}]; %% 0.9.1 uses second based timestamps - {_, {utf8, Spec}} -> - [{<<"x-stream-offset">>, longstr, Spec}]; %% next, last, first and "10m" etc - {_, {_, Offset}} when is_integer(Offset) -> - [{<<"x-stream-offset">>, long, Offset}]; %% integer offset - _ -> - [] - end, - filter_to_consumer_args(T, KVList, Arg ++ Acc); -filter_to_consumer_args([<<"rabbitmq:stream-filter">> = H | T], KVList, Acc) -> - Key = {symbol, H}, - Arg = case keyfind_unpack_described(Key, KVList) of - {_, {list, Filters0}} when is_list(Filters0) -> - Filters = lists:foldl(fun({utf8, Filter}, L) -> - [{longstr, Filter} | L]; - (_, L) -> - L - end, [], Filters0), - [{<<"x-stream-filter">>, array, Filters}]; - {_, {utf8, Filter}} -> - [{<<"x-stream-filter">>, longstr, Filter}]; - _ -> - [] - end, - filter_to_consumer_args(T, KVList, Arg ++ Acc); -filter_to_consumer_args([<<"rabbitmq:stream-match-unfiltered">> = H | T], KVList, Acc) -> - Key = {symbol, H}, - Arg = case keyfind_unpack_described(Key, KVList) of - {_, MU} when is_boolean(MU) -> - [{<<"x-stream-match-unfiltered">>, bool, MU}]; - _ -> - [] - end, - filter_to_consumer_args(T, KVList, Arg ++ Acc); -filter_to_consumer_args([_ | T], KVList, Acc) -> - filter_to_consumer_args(T, KVList, Acc). - -keyfind_unpack_described(Key, KvList) -> - %% filterset values _should_ be described values - %% they aren't always however for historical reasons so we need this bit of - %% code to return a plain value for the given filter key - case lists:keyfind(Key, 1, KvList) of - {Key, {described, Key, Value}} -> - {Key, Value}; - {Key, _} = Kv -> - Kv; +parse_filter(undefined) -> + {undefined, [], []}; +parse_filter({map, DesiredKVList}) -> + {EffectiveKVList, ConsusumerFilter, ConsumerArgs} = + lists:foldr(fun parse_filters/2, {[], [], []}, DesiredKVList), + {{map, EffectiveKVList}, ConsusumerFilter, ConsumerArgs}. + +parse_filters(Filter = {{symbol, _Key}, {described, {symbol, <<"rabbitmq:stream-offset-spec">>}, Value}}, + Acc = {EffectiveFilters, ConsumerFilter, ConsumerArgs}) -> + case Value of + {timestamp, Ts} -> + %% 0.9.1 uses second based timestamps + Arg = {<<"x-stream-offset">>, timestamp, Ts div 1000}, + {[Filter | EffectiveFilters], ConsumerFilter, [Arg | ConsumerArgs]}; + {utf8, Spec} -> + %% next, last, first and "10m" etc + Arg = {<<"x-stream-offset">>, longstr, Spec}, + {[Filter | EffectiveFilters], ConsumerFilter, [Arg | ConsumerArgs]}; + {_Type, Offset} + when is_integer(Offset) andalso Offset >= 0 -> + Arg = {<<"x-stream-offset">>, long, Offset}, + {[Filter | EffectiveFilters], ConsumerFilter, [Arg | ConsumerArgs]}; + _ -> + Acc + end; +parse_filters(Filter = {{symbol, _Key}, {described, {symbol, <<"rabbitmq:stream-filter">>}, Value}}, + Acc = {EffectiveFilters, ConsumerFilter, ConsumerArgs}) -> + case Value of + {list, Filters0} -> + Filters = lists:filtermap(fun({utf8, Filter0}) -> + {true, {longstr, Filter0}}; + (_) -> + false + end, Filters0), + Arg = {<<"x-stream-filter">>, array, Filters}, + {[Filter | EffectiveFilters], ConsumerFilter, [Arg | ConsumerArgs]}; + + {utf8, Filter0} -> + Arg = {<<"x-stream-filter">>, longstr, Filter0}, + {[Filter | EffectiveFilters], ConsumerFilter, [Arg | ConsumerArgs]}; + _ -> + Acc + end; +parse_filters(Filter = {{symbol, _Key}, {described, {symbol, <<"rabbitmq:stream-match-unfiltered">>}, Match}}, + {EffectiveFilters, ConsumerFilter, ConsumerArgs}) + when is_boolean(Match) -> + Arg = {<<"x-stream-match-unfiltered">>, bool, Match}, + {[Filter | EffectiveFilters], ConsumerFilter, [Arg | ConsumerArgs]}; +parse_filters({Symbol = {symbol, <<"rabbitmq:stream-", _/binary>>}, Value}, Acc) + when element(1, Value) =/= described -> + case rabbit_deprecated_features:is_permitted(amqp_filter_set_bug) of + true -> + parse_filters({Symbol, {described, Symbol, Value}}, Acc); false -> - false + Acc + end; +parse_filters(Filter = {{symbol, _Key}, Value}, + Acc = {EffectiveFilters, ConsumerFilter, ConsumerArgs}) -> + case rabbit_amqp_filtex:validate(Value) of + {ok, FilterExpression = {FilterType, _}} -> + case proplists:is_defined(FilterType, ConsumerFilter) of + true -> + %% For now, let's prohibit multiple top level filters of the same type + %% (properties or application-properties). There should be no use case. + %% In future, we can allow multiple times the same top level grouping + %% filter expression type (all/any/not). + Acc; + false -> + {[Filter | EffectiveFilters], [FilterExpression | ConsumerFilter], ConsumerArgs} + end; + error -> + Acc end. validate_attach(#'v1_0.attach'{target = #'v1_0.coordinator'{}}) -> diff --git a/deps/rabbit/src/rabbit_amqp_util.erl b/deps/rabbit/src/rabbit_amqp_util.erl index 3257cef9370..e1ef95d77fa 100644 --- a/deps/rabbit/src/rabbit_amqp_util.erl +++ b/deps/rabbit/src/rabbit_amqp_util.erl @@ -8,7 +8,8 @@ -module(rabbit_amqp_util). -include("rabbit_amqp.hrl"). --export([protocol_error/3]). +-export([protocol_error/3, + capabilities/1]). -spec protocol_error(term(), io:format(), [term()]) -> no_return(). @@ -17,3 +18,11 @@ protocol_error(Condition, Msg, Args) -> Reason = #'v1_0.error'{condition = Condition, description = {utf8, Description}}, exit(Reason). + +-spec capabilities([binary()]) -> + undefined | {array, symbol, [{symbol, binary()}]}. +capabilities([]) -> + undefined; +capabilities(Capabilities) -> + Caps = [{symbol, C} || C <- Capabilities], + {array, symbol, Caps}. diff --git a/deps/rabbit/src/rabbit_queue_type.erl b/deps/rabbit/src/rabbit_queue_type.erl index 938588da666..219fccdf2f2 100644 --- a/deps/rabbit/src/rabbit_queue_type.erl +++ b/deps/rabbit/src/rabbit_queue_type.erl @@ -54,6 +54,7 @@ fold_state/3, is_policy_applicable/2, is_server_named_allowed/1, + amqp_capabilities/1, arguments/1, arguments/2, notify_decorators/1, @@ -125,6 +126,7 @@ consumer_tag := rabbit_types:ctag(), exclusive_consume => boolean(), args => rabbit_framing:amqp_table(), + filter => rabbit_amqp_filtex:filter_expressions(), ok_msg := term(), acting_user := rabbit_types:username()}. -type cancel_reason() :: cancel | remove. @@ -476,6 +478,12 @@ is_server_named_allowed(Type) -> Capabilities = Type:capabilities(), maps:get(server_named, Capabilities, false). +-spec amqp_capabilities(queue_type()) -> + [binary()]. +amqp_capabilities(Type) -> + Capabilities = Type:capabilities(), + maps:get(?FUNCTION_NAME, Capabilities, []). + -spec arguments(arguments()) -> [binary()]. arguments(ArgumentType) -> Args0 = lists:map(fun(T) -> diff --git a/deps/rabbit/src/rabbit_quorum_queue.erl b/deps/rabbit/src/rabbit_quorum_queue.erl index 94e2148aec3..6d4eb2cae82 100644 --- a/deps/rabbit/src/rabbit_quorum_queue.erl +++ b/deps/rabbit/src/rabbit_quorum_queue.erl @@ -926,7 +926,7 @@ consume(Q, Spec, QState0) when ?amqqueue_is_quorum(Q) -> exclusive_consume := ExclusiveConsume, args := Args, ok_msg := OkMsg, - acting_user := ActingUser} = Spec, + acting_user := ActingUser} = Spec, %% TODO: validate consumer arguments %% currently quorum queues do not support any arguments QName = amqqueue:get_name(Q), diff --git a/deps/rabbit/src/rabbit_stream_queue.erl b/deps/rabbit/src/rabbit_stream_queue.erl index a7aa3a5a18c..a011dc09a65 100644 --- a/deps/rabbit/src/rabbit_stream_queue.erl +++ b/deps/rabbit/src/rabbit_stream_queue.erl @@ -78,13 +78,14 @@ ack :: boolean(), start_offset = 0 :: non_neg_integer(), listening_offset = 0 :: non_neg_integer(), - last_consumed_offset = 0 :: non_neg_integer(), + last_consumed_offset :: non_neg_integer(), log :: undefined | osiris_log:state(), chunk_iterator :: undefined | osiris_log:chunk_iterator(), %% These messages were already read ahead from the Osiris log, %% were part of an uncompressed sub batch, and are buffered in %% reversed order until the consumer has more credits to consume them. buffer_msgs_rev = [] :: [rabbit_amqqueue:qmsg()], + filter :: rabbit_amqp_filtex:filter_expressions(), reader_options :: map()}). -record(stream_client, {stream_id :: string(), @@ -333,7 +334,8 @@ consume(Q, Spec, #stream_client{} = QState0) %% begins sending maybe_send_reply(ChPid, OkMsg), _ = rabbit_stream_coordinator:register_local_member_listener(Q), - begin_stream(QState, ConsumerTag, OffsetSpec, Mode, AckRequired, filter_spec(Args)) + Filter = maps:get(filter, Spec, []), + begin_stream(QState, ConsumerTag, OffsetSpec, Mode, AckRequired, Filter, filter_spec(Args)) end; {undefined, _} -> {protocol_error, precondition_failed, @@ -424,7 +426,7 @@ query_local_pid(#stream_client{stream_id = StreamId} = State) -> begin_stream(#stream_client{name = QName, readers = Readers0, local_pid = LocalPid} = State, - Tag, Offset, Mode, AckRequired, Options) + Tag, Offset, Mode, AckRequired, Filter, Options) when is_pid(LocalPid) -> CounterSpec = {{?MODULE, QName, Tag, self()}, []}, {ok, Seg0} = osiris:init_reader(LocalPid, Offset, CounterSpec, Options), @@ -451,6 +453,7 @@ begin_stream(#stream_client{name = QName, listening_offset = NextOffset, last_consumed_offset = StartOffset, log = Seg0, + filter = Filter, reader_options = Options}, {ok, State#stream_client{readers = Readers0#{Tag => Str0}}}. @@ -1158,7 +1161,8 @@ stream_entries(QName, Name, LocalPid, #stream{chunk_iterator = Iter0, delivery_count = DC, credit = Credit, - start_offset = StartOffset} = Str0, Acc0) -> + start_offset = StartOffset, + filter = Filter} = Str0, Acc0) -> case osiris_log:iterator_next(Iter0) of end_of_chunk -> case chunk_iterator(Str0, LocalPid) of @@ -1172,7 +1176,7 @@ stream_entries(QName, Name, LocalPid, {batch, _NumRecords, 0, _Len, BatchedEntries} -> {MsgsRev, NumMsgs} = parse_uncompressed_subbatch( BatchedEntries, Offset, StartOffset, - QName, Name, LocalPid, {[], 0}), + QName, Name, LocalPid, Filter, {[], 0}), case Credit >= NumMsgs of true -> {Str0#stream{chunk_iterator = Iter, @@ -1199,12 +1203,19 @@ stream_entries(QName, Name, LocalPid, _SimpleEntry -> case Offset >= StartOffset of true -> - Msg = entry_to_msg(Entry, Offset, QName, Name, LocalPid), - {Str0#stream{chunk_iterator = Iter, - delivery_count = delivery_count_add(DC, 1), - credit = Credit - 1, - last_consumed_offset = Offset}, - [Msg | Acc0]}; + case entry_to_msg(Entry, Offset, QName, + Name, LocalPid, Filter) of + none -> + {Str0#stream{chunk_iterator = Iter, + last_consumed_offset = Offset}, + Acc0}; + Msg -> + {Str0#stream{chunk_iterator = Iter, + delivery_count = delivery_count_add(DC, 1), + credit = Credit - 1, + last_consumed_offset = Offset}, + [Msg | Acc0]} + end; false -> {Str0#stream{chunk_iterator = Iter}, Acc0} end @@ -1236,25 +1247,30 @@ chunk_iterator(#stream{credit = Credit, end. %% Deliver each record of an uncompressed sub batch individually. -parse_uncompressed_subbatch(<<>>, _Offset, _StartOffset, _QName, _Name, _LocalPid, Acc) -> +parse_uncompressed_subbatch( + <<>>, _Offset, _StartOffset, _QName, _Name, _LocalPid, _Filter, Acc) -> Acc; parse_uncompressed_subbatch( <<0:1, %% simple entry Len:31/unsigned, Entry:Len/binary, Rem/binary>>, - Offset, StartOffset, QName, Name, LocalPid, Acc0 = {AccList, AccCount}) -> + Offset, StartOffset, QName, Name, LocalPid, Filter, Acc0 = {AccList, AccCount}) -> Acc = case Offset >= StartOffset of true -> - Msg = entry_to_msg(Entry, Offset, QName, Name, LocalPid), - {[Msg | AccList], AccCount + 1}; + case entry_to_msg(Entry, Offset, QName, Name, LocalPid, Filter) of + none -> + Acc0; + Msg -> + {[Msg | AccList], AccCount + 1} + end; false -> Acc0 end, - parse_uncompressed_subbatch(Rem, Offset + 1, StartOffset, QName, Name, LocalPid, Acc). + parse_uncompressed_subbatch(Rem, Offset + 1, StartOffset, QName, + Name, LocalPid, Filter, Acc). -entry_to_msg(Entry, Offset, #resource{kind = queue, - name = QName}, Name, LocalPid) -> +entry_to_msg(Entry, Offset, #resource{kind = queue, name = QName}, Name, LocalPid, Filter) -> Mc0 = mc:init(mc_amqp, Entry, #{}), %% If exchange or routing_keys annotation isn't present the entry most likely came %% from the rabbitmq-stream plugin so we'll choose defaults that simulate use @@ -1268,7 +1284,12 @@ entry_to_msg(Entry, Offset, #resource{kind = queue, _ -> Mc1 end, Mc = mc:set_annotation(<<"x-stream-offset">>, Offset, Mc2), - {Name, LocalPid, Offset, false, Mc}. + case rabbit_amqp_filtex:filter(Filter, Mc) of + true -> + {Name, LocalPid, Offset, false, Mc}; + false -> + none + end. capabilities() -> #{unsupported_policies => [%% Classic policies @@ -1288,6 +1309,9 @@ capabilities() -> consumer_arguments => [<<"x-stream-offset">>, <<"x-stream-filter">>, <<"x-stream-match-unfiltered">>], + %% AMQP property filter expressions + %% https://groups.oasis-open.org/higherlogic/ws/public/document?document_id=66227 + amqp_capabilities => [<<"AMQP_FILTEX_PROP_V1_0">>], server_named => false}. notify_decorators(Q) when ?is_amqqueue(Q) -> diff --git a/deps/rabbit/test/amqp_address_SUITE.erl b/deps/rabbit/test/amqp_address_SUITE.erl index 910e1068eee..f5a0f74b893 100644 --- a/deps/rabbit/test/amqp_address_SUITE.erl +++ b/deps/rabbit/test/amqp_address_SUITE.erl @@ -18,6 +18,9 @@ [rpc/4]). -import(rabbit_ct_helpers, [eventually/1]). +-import(amqp_utils, + [flush/1, + wait_for_credit/1]). all() -> [ @@ -651,17 +654,6 @@ connection_config(Config) -> container_id => <<"my container">>, sasl => {plain, <<"guest">>, <<"guest">>}}. -% before we can send messages we have to wait for credit from the server -wait_for_credit(Sender) -> - receive - {amqp10_event, {link, Sender, credited}} -> - flush(?FUNCTION_NAME), - ok - after 5000 -> - flush(?FUNCTION_NAME), - ct:fail(?FUNCTION_NAME) - end. - wait_for_settled(State, Tag) -> receive {amqp10_disposition, {State, Tag}} -> @@ -671,11 +663,3 @@ wait_for_settled(State, Tag) -> flush(Reason), ct:fail(Reason) end. - -flush(Prefix) -> - receive Msg -> - ct:pal("~tp flushed: ~p~n", [Prefix, Msg]), - flush(Prefix) - after 1 -> - ok - end. diff --git a/deps/rabbit/test/amqp_auth_SUITE.erl b/deps/rabbit/test/amqp_auth_SUITE.erl index 920f779172d..6bd905a9242 100644 --- a/deps/rabbit/test/amqp_auth_SUITE.erl +++ b/deps/rabbit/test/amqp_auth_SUITE.erl @@ -21,6 +21,10 @@ -import(event_recorder, [assert_event_type/2, assert_event_prop/2]). +-import(amqp_utils, + [flush/1, + wait_for_credit/1, + close_connection_sync/1]). all() -> [ @@ -1077,34 +1081,7 @@ amqp_error(Condition, Description) condition = Condition, description = {utf8, Description}}. -% before we can send messages we have to wait for credit from the server -wait_for_credit(Sender) -> - receive - {amqp10_event, {link, Sender, credited}} -> - flush(?FUNCTION_NAME), - ok - after 5000 -> - flush("wait_for_credit timed out"), - ct:fail(credited_timeout) - end. - -flush(Prefix) -> - receive Msg -> - ct:pal("~ts flushed: ~p~n", [Prefix, Msg]), - flush(Prefix) - after 1 -> - ok - end. - delete_all_queues(Config) -> Qs = rpc(Config, rabbit_amqqueue, list, []), [{ok, _QLen} = rpc(Config, rabbit_amqqueue, delete, [Q, false, false, <<"fake-user">>]) || Q <- Qs]. - -close_connection_sync(Connection) - when is_pid(Connection) -> - ok = amqp10_client:close_connection(Connection), - receive {amqp10_event, {connection, Connection, {closed, normal}}} -> ok - after 5000 -> flush(missing_closed), - ct:fail("missing CLOSE from server") - end. diff --git a/deps/rabbit/test/amqp_client_SUITE.erl b/deps/rabbit/test/amqp_client_SUITE.erl index acc4dd004cd..e8c64690a01 100644 --- a/deps/rabbit/test/amqp_client_SUITE.erl +++ b/deps/rabbit/test/amqp_client_SUITE.erl @@ -27,6 +27,17 @@ -import(event_recorder, [assert_event_type/2, assert_event_prop/2]). +-import(amqp_utils, + [init/1, init/2, + connection_config/1, connection_config/2, + flush/1, + wait_for_credit/1, + wait_for_accepts/1, + send_messages/3, send_messages/4, + detach_link_sync/1, + end_session_sync/1, + wait_for_session_end/1, + close_connection_sync/1]). all() -> [ @@ -100,7 +111,7 @@ groups() -> max_message_size_client_to_server, max_message_size_server_to_client, global_counters, - stream_filtering, + stream_bloom_filter, available_messages_classic_queue, available_messages_quorum_queue, available_messages_stream, @@ -3255,7 +3266,7 @@ target_queue_deleted(Config) -> after 5000 -> ct:fail({missing_accepted, DTag1}) end, - N0 = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename), + N0 = get_node_config(Config, 0, nodename), RaName = ra_name(QuorumQ), ServerId0 = {RaName, N0}, {ok, Members, _Leader} = ra:members(ServerId0), @@ -3937,7 +3948,7 @@ global_counters(Config) -> ok = end_session_sync(Session), ok = amqp10_client:close_connection(Connection). -stream_filtering(Config) -> +stream_bloom_filter(Config) -> Stream = atom_to_binary(?FUNCTION_NAME), Address = rabbitmq_amqp_address:queue(Stream), Ch = rabbit_ct_client_helpers:open_channel(Config), @@ -4476,7 +4487,7 @@ handshake_timeout(Config) -> Par = ?FUNCTION_NAME, {ok, DefaultVal} = rpc(Config, application, get_env, [App, Par]), ok = rpc(Config, application, set_env, [App, Par, 200]), - Port = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp), + Port = get_node_config(Config, 0, tcp_port_amqp), {ok, Socket} = gen_tcp:connect("localhost", Port, [{active, false}]), ?assertEqual({error, closed}, gen_tcp:recv(Socket, 0, 400)), ok = rpc(Config, application, set_env, [App, Par, DefaultVal]). @@ -5762,16 +5773,6 @@ link_max_per_session(Config) -> %% internal %% -init(Config) -> - init(0, Config). - -init(Node, Config) -> - OpnConf = connection_config(Node, Config), - {ok, Connection} = amqp10_client:open_connection(OpnConf), - {ok, Session} = amqp10_client:begin_session_sync(Connection), - {ok, LinkPair} = rabbitmq_amqp_client:attach_management_link_pair_sync(Session, <<"my link pair">>), - {Connection, Session, LinkPair}. - receive_all_messages(Receiver, Accept) -> receive_all_messages0(Receiver, Accept, []). @@ -5786,26 +5787,6 @@ receive_all_messages0(Receiver, Accept, Acc) -> lists:reverse(Acc) end. -connection_config(Config) -> - connection_config(0, Config). - -connection_config(Node, Config) -> - Host = ?config(rmq_hostname, Config), - Port = rabbit_ct_broker_helpers:get_node_config(Config, Node, tcp_port_amqp), - #{address => Host, - port => Port, - container_id => <<"my container">>, - sasl => {plain, <<"guest">>, <<"guest">>}}. - -flush(Prefix) -> - receive - Msg -> - ct:pal("~p flushed: ~p~n", [Prefix, Msg]), - flush(Prefix) - after 1 -> - ok - end. - open_and_close_connection(Config) -> OpnConf = connection_config(Config), {ok, Connection} = amqp10_client:open_connection(OpnConf), @@ -5814,58 +5795,6 @@ open_and_close_connection(Config) -> end, ok = close_connection_sync(Connection). -% before we can send messages we have to wait for credit from the server -wait_for_credit(Sender) -> - receive - {amqp10_event, {link, Sender, credited}} -> - ok - after 5000 -> - flush("wait_for_credit timed out"), - ct:fail(credited_timeout) - end. - -detach_link_sync(Link) -> - ok = amqp10_client:detach_link(Link), - ok = wait_for_link_detach(Link). - -wait_for_link_detach(Link) -> - receive - {amqp10_event, {link, Link, {detached, normal}}} -> - flush(?FUNCTION_NAME), - ok - after 5000 -> - flush("wait_for_link_detach timed out"), - ct:fail({link_detach_timeout, Link}) - end. - -end_session_sync(Session) -> - ok = amqp10_client:end_session(Session), - ok = wait_for_session_end(Session). - -wait_for_session_end(Session) -> - receive - {amqp10_event, {session, Session, {ended, _}}} -> - flush(?FUNCTION_NAME), - ok - after 5000 -> - flush("wait_for_session_end timed out"), - ct:fail({session_end_timeout, Session}) - end. - -close_connection_sync(Connection) -> - ok = amqp10_client:close_connection(Connection), - ok = wait_for_connection_close(Connection). - -wait_for_connection_close(Connection) -> - receive - {amqp10_event, {connection, Connection, {closed, normal}}} -> - flush(?FUNCTION_NAME), - ok - after 5000 -> - flush("wait_for_connection_close timed out"), - ct:fail({connection_close_timeout, Connection}) - end. - wait_for_accepted(Tag) -> wait_for_settlement(Tag, accepted). @@ -5878,16 +5807,6 @@ wait_for_settlement(Tag, State) -> ct:fail({settled_timeout, Tag}) end. -wait_for_accepts(0) -> - ok; -wait_for_accepts(N) -> - receive - {amqp10_disposition,{accepted,_}} -> - wait_for_accepts(N - 1) - after 5000 -> - ct:fail({missing_accepted, N}) - end. - delete_queue(Session, QName) -> {ok, LinkPair} = rabbitmq_amqp_client:attach_management_link_pair_sync( Session, <<"delete queue">>), @@ -5938,32 +5857,6 @@ count_received_messages0(Receiver, Count) -> Count end. -send_messages(Sender, Left, Settled) -> - send_messages(Sender, Left, Settled, <<>>). - -send_messages(_, 0, _, _) -> - ok; -send_messages(Sender, Left, Settled, BodySuffix) -> - Bin = integer_to_binary(Left), - Body = <>, - Msg = amqp10_msg:new(Bin, Body, Settled), - case amqp10_client:send_msg(Sender, Msg) of - ok -> - send_messages(Sender, Left - 1, Settled, BodySuffix); - {error, insufficient_credit} -> - ok = wait_for_credit(Sender), - %% The credited event we just processed could have been received some time ago, - %% i.e. we might have 0 credits right now. This happens in the following scenario: - %% 1. We (test case proc) send a message successfully, the client session proc decrements remaining link credit from 1 to 0. - %% 2. The server grants our client session proc new credits. - %% 3. The client session proc sends us (test case proc) a credited event. - %% 4. We didn't even notice that we ran out of credits temporarily. We send the next message, it succeeds, - %% but do not process the credited event in our mailbox. - %% So, we must be defensive here and assume that the next amqp10_client:send/2 call might return {error, insufficient_credit} - %% again causing us then to really wait to receive a credited event (instead of just processing an old credited event). - send_messages(Sender, Left, Settled, BodySuffix) - end. - assert_link_credit_runs_out(_Sender, 0) -> ct:fail(sufficient_link_credit); assert_link_credit_runs_out(Sender, Left) -> diff --git a/deps/rabbit/test/amqp_filtex_SUITE.erl b/deps/rabbit/test/amqp_filtex_SUITE.erl new file mode 100644 index 00000000000..51469821a83 --- /dev/null +++ b/deps/rabbit/test/amqp_filtex_SUITE.erl @@ -0,0 +1,591 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +%% Test suite for +%% AMQP Filter Expressions Version 1.0 Working Draft 09 +-module(amqp_filtex_SUITE). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("amqp10_common/include/amqp10_filtex.hrl"). + +-compile([nowarn_export_all, + export_all]). + +-import(rabbit_ct_broker_helpers, + [rpc/4]). +-import(rabbit_ct_helpers, + [eventually/1]). +-import(amqp_utils, + [init/1, + flush/1, + wait_for_credit/1, + wait_for_accepts/1, + send_messages/3, + detach_link_sync/1, + end_session_sync/1, + wait_for_session_end/1, + close_connection_sync/1]). + +all() -> + [ + {group, cluster_size_1} + ]. + +groups() -> + [ + {cluster_size_1, [shuffle], + [ + properties_section, + application_properties_section, + multiple_sections, + filter_few_messages_from_many, + string_modifier + ]} + ]. + +init_per_suite(Config) -> + {ok, _} = application:ensure_all_started(amqp10_client), + rabbit_ct_helpers:log_environment(), + rabbit_ct_helpers:merge_app_env( + Config, {rabbit, [{quorum_tick_interval, 1000}, + {stream_tick_interval, 1000} + ]}). + +end_per_suite(Config) -> + Config. + +init_per_group(_Group, Config) -> + Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"), + Config1 = rabbit_ct_helpers:set_config( + Config, [{rmq_nodename_suffix, Suffix}]), + rabbit_ct_helpers:run_setup_steps( + Config1, + rabbit_ct_broker_helpers:setup_steps() ++ + rabbit_ct_client_helpers:setup_steps()). + +end_per_group(_, Config) -> + rabbit_ct_helpers:run_teardown_steps(Config, + rabbit_ct_client_helpers:teardown_steps() ++ + rabbit_ct_broker_helpers:teardown_steps()). + +init_per_testcase(Testcase, Config) -> + rabbit_ct_helpers:testcase_started(Config, Testcase). + +end_per_testcase(Testcase, Config) -> + %% Assert that every testcase cleaned up. + eventually(?_assertEqual([], rpc(Config, rabbit_amqqueue, list, []))), + %% Wait for sessions to terminate before starting the next test case. + eventually(?_assertEqual([], rpc(Config, rabbit_amqp_session, list_local, []))), + rabbit_ct_helpers:testcase_finished(Config, Testcase). + +properties_section(Config) -> + Stream = atom_to_binary(?FUNCTION_NAME), + Address = rabbitmq_amqp_address:queue(Stream), + {Connection, Session, LinkPair} = init(Config), + {ok, #{}} = rabbitmq_amqp_client:declare_queue( + LinkPair, + Stream, + #{arguments => #{<<"x-queue-type">> => {utf8, <<"stream">>}}}), + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"sender">>, Address), + ok = wait_for_credit(Sender), + + Now = erlang:system_time(millisecond), + To = rabbitmq_amqp_address:exchange(<<"some exchange">>, <<"routing key">>), + ReplyTo = rabbitmq_amqp_address:queue(<<"some queue">>), + ok = amqp10_client:send_msg( + Sender, + amqp10_msg:set_properties( + #{message_id => {ulong, 999}, + user_id => <<"guest">>, + to => To, + subject => <<"🐇"/utf8>>, + reply_to => ReplyTo, + correlation_id => <<"corr-123">>, + content_type => <<"text/plain">>, + content_encoding => <<"some encoding">>, + absolute_expiry_time => Now + 100_000, + creation_time => Now, + group_id => <<"my group ID">>, + group_sequence => 16#ff_ff_ff_ff, + reply_to_group_id => <<"other group ID">>}, + amqp10_msg:new(<<"t1">>, <<"m1">>))), + ok = amqp10_client:send_msg( + Sender, + amqp10_msg:new(<<"t2">>, <<"m2">>)), + ok = amqp10_client:send_msg( + Sender, + amqp10_msg:set_properties( + #{group_id => <<"my group ID">>}, + amqp10_msg:new(<<"t3">>, <<"m3">>))), + + ok = wait_for_accepts(3), + ok = detach_link_sync(Sender), + flush(sent), + + PropsFilter1 = [ + {{symbol, <<"message-id">>}, {ulong, 999}}, + {{symbol, <<"user-id">>}, {binary, <<"guest">>}}, + {{symbol, <<"subject">>}, {utf8, <<"🐇"/utf8>>}}, + {{symbol, <<"to">>}, {utf8, To}}, + {{symbol, <<"reply-to">>}, {utf8, ReplyTo}}, + {{symbol, <<"correlation-id">>}, {utf8, <<"corr-123">>}}, + {{symbol, <<"content-type">>}, {symbol, <<"text/plain">>}}, + {{symbol, <<"content-encoding">>}, {symbol, <<"some encoding">>}}, + {{symbol, <<"absolute-expiry-time">>}, {timestamp, Now + 100_000}}, + {{symbol, <<"creation-time">>}, {timestamp, Now}}, + {{symbol, <<"group-id">>}, {utf8, <<"my group ID">>}}, + {{symbol, <<"group-sequence">>}, {uint, 16#ff_ff_ff_ff}}, + {{symbol, <<"reply-to-group-id">>}, {utf8, <<"other group ID">>}} + ], + Filter1 = #{<<"rabbitmq:stream-offset-spec">> => <<"first">>, + ?DESCRIPTOR_NAME_PROPERTIES_FILTER => {map, PropsFilter1}}, + {ok, Receiver1} = amqp10_client:attach_receiver_link( + Session, <<"receiver 1">>, Address, + settled, configuration, Filter1), + ok = amqp10_client:flow_link_credit(Receiver1, 10, never), + receive {amqp10_msg, Receiver1, R1M1} -> + ?assertEqual([<<"m1">>], amqp10_msg:body(R1M1)) + after 5000 -> ct:fail({missing_msg, ?LINE}) + end, + ok = assert_no_msg_received(?LINE), + ok = detach_link_sync(Receiver1), + + PropsFilter2 = [{{symbol, <<"group-id">>}, {utf8, <<"my group ID">>}}], + Filter2 = #{<<"rabbitmq:stream-offset-spec">> => <<"first">>, + ?DESCRIPTOR_NAME_PROPERTIES_FILTER => {map, PropsFilter2}}, + {ok, Receiver2} = amqp10_client:attach_receiver_link( + Session, <<"receiver 2">>, Address, + unsettled, configuration, Filter2), + {ok, R2M1} = amqp10_client:get_msg(Receiver2), + {ok, R2M2} = amqp10_client:get_msg(Receiver2), + ok = amqp10_client:accept_msg(Receiver2, R2M1), + ok = amqp10_client:accept_msg(Receiver2, R2M2), + ?assertEqual([<<"m1">>], amqp10_msg:body(R2M1)), + ?assertEqual([<<"m3">>], amqp10_msg:body(R2M2)), + ok = detach_link_sync(Receiver2), + + %% Filter is in place, but no message matches. + PropsFilter3 = [{{symbol, <<"group-id">>}, {utf8, <<"no match">>}}], + Filter3 = #{<<"rabbitmq:stream-offset-spec">> => <<"first">>, + ?DESCRIPTOR_NAME_PROPERTIES_FILTER => {map, PropsFilter3}}, + {ok, Receiver3} = amqp10_client:attach_receiver_link( + Session, <<"receiver 3">>, Address, + unsettled, configuration, Filter3), + ok = amqp10_client:flow_link_credit(Receiver3, 10, never), + ok = assert_no_msg_received(?LINE), + ok = detach_link_sync(Receiver3), + + %% Wrong type should fail validation in the server. + %% RabbitMQ should exclude this filter in its reply attach frame because + %% "the sending endpoint [RabbitMQ] sets the filter actually in place". + %% Hence, no filter expression is actually in place and we should receive all messages. + PropsFilter4 = [{{symbol, <<"group-id">>}, {uint, 3}}], + Filter4 = #{<<"rabbitmq:stream-offset-spec">> => <<"first">>, + ?DESCRIPTOR_NAME_PROPERTIES_FILTER => {map, PropsFilter4}}, + {ok, Receiver4} = amqp10_client:attach_receiver_link( + Session, <<"receiver 4">>, Address, + unsettled, configuration, Filter4), + {ok, R4M1} = amqp10_client:get_msg(Receiver4), + {ok, R4M2} = amqp10_client:get_msg(Receiver4), + {ok, R4M3} = amqp10_client:get_msg(Receiver4), + ok = amqp10_client:accept_msg(Receiver4, R4M1), + ok = amqp10_client:accept_msg(Receiver4, R4M2), + ok = amqp10_client:accept_msg(Receiver4, R4M3), + ?assertEqual([<<"m1">>], amqp10_msg:body(R4M1)), + ?assertEqual([<<"m2">>], amqp10_msg:body(R4M2)), + ?assertEqual([<<"m3">>], amqp10_msg:body(R4M3)), + ok = detach_link_sync(Receiver4), + + {ok, _} = rabbitmq_amqp_client:delete_queue(LinkPair, Stream), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = end_session_sync(Session), + ok = close_connection_sync(Connection). + +application_properties_section(Config) -> + Stream = atom_to_binary(?FUNCTION_NAME), + Address = rabbitmq_amqp_address:queue(Stream), + {Connection, Session, LinkPair} = init(Config), + {ok, #{}} = rabbitmq_amqp_client:declare_queue( + LinkPair, + Stream, + #{arguments => #{<<"x-queue-type">> => {utf8, <<"stream">>}}}), + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"sender">>, Address), + ok = wait_for_credit(Sender), + + ok = amqp10_client:send_msg( + Sender, + amqp10_msg:set_application_properties( + #{<<"k1">> => -2, + <<"k2">> => 10, + <<"k3">> => false, + <<"k4">> => true, + <<"k5">> => <<"hey">>}, + amqp10_msg:new(<<"t1">>, <<"m1">>))), + ok = amqp10_client:send_msg( + Sender, + amqp10_msg:set_application_properties( + #{<<"k2">> => 10.1}, + amqp10_msg:new(<<"t2">>, <<"m2">>))), + ok = amqp10_client:send_msg( + Sender, + amqp10_msg:new(<<"t3">>, <<"m3">>)), + ok = amqp10_client:send_msg( + Sender, + amqp10_msg:set_application_properties( + #{<<"k2">> => 10.0}, + amqp10_msg:new(<<"t4">>, <<"m4">>))), + + ok = wait_for_accepts(4), + ok = detach_link_sync(Sender), + flush(sent), + + AppPropsFilter0 = [{{utf8, <<"k5">>}, {symbol, <<"no match">>}}], + Filter0 = #{<<"rabbitmq:stream-offset-spec">> => <<"first">>, + ?DESCRIPTOR_NAME_APPLICATION_PROPERTIES_FILTER => {map, AppPropsFilter0}}, + {ok, Receiver0} = amqp10_client:attach_receiver_link( + Session, <<"receiver 0">>, Address, + unsettled, configuration, Filter0), + ok = amqp10_client:flow_link_credit(Receiver0, 10, never), + ok = assert_no_msg_received(?LINE), + ok = detach_link_sync(Receiver0), + + AppPropsFilter1 = [ + {{utf8, <<"k1">>}, {int, -2}}, + {{utf8, <<"k5">>}, {symbol, <<"hey">>}}, + {{utf8, <<"k4">>}, {boolean, true}}, + {{utf8, <<"k3">>}, false} + ], + Filter1 = #{<<"rabbitmq:stream-offset-spec">> => <<"first">>, + ?DESCRIPTOR_NAME_APPLICATION_PROPERTIES_FILTER => {map, AppPropsFilter1}}, + {ok, Receiver1} = amqp10_client:attach_receiver_link( + Session, <<"receiver 1">>, Address, + settled, configuration, Filter1), + ok = amqp10_client:flow_link_credit(Receiver1, 10, never), + receive {amqp10_msg, Receiver1, R1M1} -> + ?assertEqual([<<"m1">>], amqp10_msg:body(R1M1)) + after 5000 -> ct:fail({missing_msg, ?LINE}) + end, + ok = assert_no_msg_received(?LINE), + ok = detach_link_sync(Receiver1), + + %% Due to simple type matching [filtex-v1.0-wd09 §4.1.1] + %% we expect integer 10 to also match number 10.0. + AppPropsFilter2 = [{{utf8, <<"k2">>}, {uint, 10}}], + Filter2 = #{<<"rabbitmq:stream-offset-spec">> => <<"first">>, + ?DESCRIPTOR_NAME_APPLICATION_PROPERTIES_FILTER => {map, AppPropsFilter2}}, + {ok, Receiver2} = amqp10_client:attach_receiver_link( + Session, <<"receiver 2">>, Address, + unsettled, configuration, Filter2), + {ok, R2M1} = amqp10_client:get_msg(Receiver2), + {ok, R2M2} = amqp10_client:get_msg(Receiver2), + ok = amqp10_client:accept_msg(Receiver2, R2M1), + ok = amqp10_client:accept_msg(Receiver2, R2M2), + ?assertEqual([<<"m1">>], amqp10_msg:body(R2M1)), + ?assertEqual([<<"m4">>], amqp10_msg:body(R2M2)), + ok = detach_link_sync(Receiver2), + + %% A reference field value of NULL should always match. [filtex-v1.0-wd09 §4.1.1] + AppPropsFilter3 = [{{utf8, <<"k2">>}, null}], + Filter3 = #{<<"rabbitmq:stream-offset-spec">> => <<"first">>, + ?DESCRIPTOR_NAME_APPLICATION_PROPERTIES_FILTER => {map, AppPropsFilter3}}, + {ok, Receiver3} = amqp10_client:attach_receiver_link( + Session, <<"receiver 3">>, Address, + unsettled, configuration, Filter3), + {ok, R3M1} = amqp10_client:get_msg(Receiver3), + {ok, R3M2} = amqp10_client:get_msg(Receiver3), + {ok, R3M3} = amqp10_client:get_msg(Receiver3), + ok = amqp10_client:accept_msg(Receiver3, R3M1), + ok = amqp10_client:accept_msg(Receiver3, R3M2), + ok = amqp10_client:accept_msg(Receiver3, R3M3), + ?assertEqual([<<"m1">>], amqp10_msg:body(R3M1)), + ?assertEqual([<<"m2">>], amqp10_msg:body(R3M2)), + ?assertEqual([<<"m4">>], amqp10_msg:body(R3M3)), + ok = detach_link_sync(Receiver3), + + {ok, _} = rabbitmq_amqp_client:delete_queue(LinkPair, Stream), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = end_session_sync(Session), + ok = close_connection_sync(Connection). + +%% Test filter expressions matching multiple message sections. +multiple_sections(Config) -> + Stream = atom_to_binary(?FUNCTION_NAME), + Address = rabbitmq_amqp_address:queue(Stream), + {Connection, Session, LinkPair} = init(Config), + {ok, #{}} = rabbitmq_amqp_client:declare_queue( + LinkPair, + Stream, + #{arguments => #{<<"x-queue-type">> => {utf8, <<"stream">>}}}), + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"sender">>, Address), + ok = wait_for_credit(Sender), + + ok = amqp10_client:send_msg( + Sender, + amqp10_msg:set_properties( + #{subject => <<"The Subject">>}, + amqp10_msg:new(<<"t1">>, <<"m1">>))), + ok = amqp10_client:send_msg( + Sender, + amqp10_msg:set_application_properties( + #{<<"The Key">> => -123}, + amqp10_msg:new(<<"t2">>, <<"m2">>))), + ok = amqp10_client:send_msg( + Sender, + amqp10_msg:set_properties( + #{subject => <<"The Subject">>}, + amqp10_msg:set_application_properties( + #{<<"The Key">> => -123}, + amqp10_msg:new(<<"t3">>, <<"m3">>)))), + + ok = wait_for_accepts(3), + ok = detach_link_sync(Sender), + flush(sent), + + PropsFilter = [{{symbol, <<"subject">>}, {utf8, <<"The Subject">>}}], + Filter1 = #{?DESCRIPTOR_NAME_PROPERTIES_FILTER => {map, PropsFilter}, + <<"rabbitmq:stream-offset-spec">> => <<"first">>}, + {ok, Receiver1} = amqp10_client:attach_receiver_link( + Session, <<"receiver 1">>, Address, + unsettled, configuration, Filter1), + {ok, R1M1} = amqp10_client:get_msg(Receiver1), + {ok, R1M3} = amqp10_client:get_msg(Receiver1), + ok = amqp10_client:accept_msg(Receiver1, R1M1), + ok = amqp10_client:accept_msg(Receiver1, R1M3), + ?assertEqual([<<"m1">>], amqp10_msg:body(R1M1)), + ?assertEqual([<<"m3">>], amqp10_msg:body(R1M3)), + ok = detach_link_sync(Receiver1), + + AppPropsFilter = [{{utf8, <<"The Key">>}, {byte, -123}}], + Filter2 = #{?DESCRIPTOR_NAME_APPLICATION_PROPERTIES_FILTER => {map, AppPropsFilter}, + <<"rabbitmq:stream-offset-spec">> => <<"first">>}, + {ok, Receiver2} = amqp10_client:attach_receiver_link( + Session, <<"receiver 2">>, Address, + unsettled, configuration, Filter2), + {ok, R2M2} = amqp10_client:get_msg(Receiver2), + {ok, R2M3} = amqp10_client:get_msg(Receiver2), + ok = amqp10_client:accept_msg(Receiver2, R2M2), + ok = amqp10_client:accept_msg(Receiver2, R2M3), + ?assertEqual([<<"m2">>], amqp10_msg:body(R2M2)), + ?assertEqual([<<"m3">>], amqp10_msg:body(R2M3)), + ok = detach_link_sync(Receiver2), + + Filter3 = #{?DESCRIPTOR_NAME_PROPERTIES_FILTER => {map, PropsFilter}, + ?DESCRIPTOR_NAME_APPLICATION_PROPERTIES_FILTER => {map, AppPropsFilter}, + <<"rabbitmq:stream-offset-spec">> => <<"first">>}, + {ok, Receiver3} = amqp10_client:attach_receiver_link( + Session, <<"receiver 3">>, Address, + unsettled, configuration, Filter3), + {ok, R3M3} = amqp10_client:get_msg(Receiver3), + ok = amqp10_client:accept_msg(Receiver3, R3M3), + ?assertEqual([<<"m3">>], amqp10_msg:body(R3M3)), + ok = detach_link_sync(Receiver3), + + {ok, _} = rabbitmq_amqp_client:delete_queue(LinkPair, Stream), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = end_session_sync(Session), + ok = close_connection_sync(Connection). + +%% Filter a small subset from many messages. +%% We test here that flow control still works correctly. +filter_few_messages_from_many(Config) -> + Stream = atom_to_binary(?FUNCTION_NAME), + Address = rabbitmq_amqp_address:queue(Stream), + {Connection, Session, LinkPair} = init(Config), + {ok, #{}} = rabbitmq_amqp_client:declare_queue( + LinkPair, + Stream, + #{arguments => #{<<"x-queue-type">> => {utf8, <<"stream">>}}}), + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"sender">>, Address), + ok = wait_for_credit(Sender), + + ok = amqp10_client:send_msg( + Sender, + amqp10_msg:set_properties( + #{group_id => <<"my group ID">>}, + amqp10_msg:new(<<"t1">>, <<"first msg">>))), + ok = send_messages(Sender, 1000, false), + ok = amqp10_client:send_msg( + Sender, + amqp10_msg:set_properties( + #{group_id => <<"my group ID">>}, + amqp10_msg:new(<<"t2">>, <<"last msg">>))), + ok = wait_for_accepts(1002), + ok = detach_link_sync(Sender), + flush(sent), + + %% Our filter should cause us to receive only the first and + %% last message out of the 1002 messages in the stream. + PropsFilter = [{{symbol, <<"group-id">>}, {utf8, <<"my group ID">>}}], + Filter = #{<<"rabbitmq:stream-offset-spec">> => <<"first">>, + ?DESCRIPTOR_NAME_PROPERTIES_FILTER => {map, PropsFilter}}, + {ok, Receiver} = amqp10_client:attach_receiver_link( + Session, <<"receiver">>, Address, + unsettled, configuration, Filter), + + ok = amqp10_client:flow_link_credit(Receiver, 2, never), + receive {amqp10_msg, Receiver, M1} -> + ?assertEqual([<<"first msg">>], amqp10_msg:body(M1)), + ok = amqp10_client:accept_msg(Receiver, M1) + after 5000 -> ct:fail({missing_msg, ?LINE}) + end, + receive {amqp10_msg, Receiver, M2} -> + ?assertEqual([<<"last msg">>], amqp10_msg:body(M2)), + ok = amqp10_client:accept_msg(Receiver, M2) + after 5000 -> ct:fail({missing_msg, ?LINE}) + end, + ok = detach_link_sync(Receiver), + + {ok, _} = rabbitmq_amqp_client:delete_queue(LinkPair, Stream), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = end_session_sync(Session), + ok = close_connection_sync(Connection). + +string_modifier(Config) -> + Stream = atom_to_binary(?FUNCTION_NAME), + Address = rabbitmq_amqp_address:queue(Stream), + {Connection, Session, LinkPair} = init(Config), + {ok, #{}} = rabbitmq_amqp_client:declare_queue( + LinkPair, + Stream, + #{arguments => #{<<"x-queue-type">> => {utf8, <<"stream">>}}}), + {ok, Sender} = amqp10_client:attach_sender_link(Session, <<"sender">>, Address), + ok = wait_for_credit(Sender), + + ok = amqp10_client:send_msg( + Sender, + amqp10_msg:set_properties( + #{to => <<"abc 1">>, + reply_to => <<"abc 2">>, + subject => <<"abc 3">>, + group_id => <<"abc 4">>, + reply_to_group_id => <<"abc 5">>, + message_id => {utf8, <<"abc 6">>}, + correlation_id => <<"abc 7">>, + group_sequence => 16#ff_ff_ff_ff}, + amqp10_msg:set_application_properties( + #{<<"k1">> => <<"abc 8">>, + <<"k2">> => <<"abc 9">>}, + amqp10_msg:new(<<"t1">>, <<"m1">>)))), + ok = amqp10_client:send_msg( + Sender, + amqp10_msg:set_application_properties( + #{<<"k1">> => <<"abc">>}, + amqp10_msg:new(<<"t2">>, <<"m2">>))), + ok = amqp10_client:send_msg( + Sender, + amqp10_msg:set_properties( + #{subject => <<"$Hello">>, + reply_to_group_id => <<"xyz 5">>}, + amqp10_msg:new(<<"t3">>, <<"m3">>))), + + ok = wait_for_accepts(3), + ok = detach_link_sync(Sender), + flush(sent), + + PropsFilter1 = [ + {{symbol, <<"to">>}, {utf8, <<"$p:abc ">>}}, + {{symbol, <<"reply-to">>}, {utf8, <<"$p:abc">>}}, + {{symbol, <<"subject">>}, {utf8, <<"$p:ab">>}}, + {{symbol, <<"group-id">>}, {utf8, <<"$p:a">>}}, + {{symbol, <<"reply-to-group-id">>}, {utf8, <<"$s:5">>}}, + {{symbol, <<"correlation-id">>}, {utf8, <<"$s:abc 7">>}}, + {{symbol, <<"message-id">>}, {utf8, <<"$p:abc 6">>}} + ], + AppPropsFilter1 = [ + {{utf8, <<"k1">>}, {utf8, <<"$s: 8">>}}, + {{utf8, <<"k2">>}, {utf8, <<"$p:abc ">>}} + ], + Filter1 = #{?DESCRIPTOR_NAME_PROPERTIES_FILTER => {map, PropsFilter1}, + ?DESCRIPTOR_NAME_APPLICATION_PROPERTIES_FILTER => {map, AppPropsFilter1}, + <<"rabbitmq:stream-offset-spec">> => <<"first">>}, + {ok, Receiver1} = amqp10_client:attach_receiver_link( + Session, <<"receiver 1">>, Address, + settled, configuration, Filter1), + ok = amqp10_client:flow_link_credit(Receiver1, 10, never), + receive {amqp10_msg, Receiver1, R1M1} -> + ?assertEqual([<<"m1">>], amqp10_msg:body(R1M1)) + after 5000 -> ct:fail({missing_msg, ?LINE}) + end, + ok = assert_no_msg_received(?LINE), + ok = detach_link_sync(Receiver1), + + %% Same filters as before except for subject which shouldn't match anymore. + PropsFilter2 = lists:keyreplace( + {symbol, <<"subject">>}, 1, PropsFilter1, + {{symbol, <<"subject">>}, {utf8, <<"$s:xxxxxxxxxxxxxx">>}}), + Filter2 = #{?DESCRIPTOR_NAME_PROPERTIES_FILTER => {map, PropsFilter2}, + ?DESCRIPTOR_NAME_APPLICATION_PROPERTIES_FILTER => {map, AppPropsFilter1}, + <<"rabbitmq:stream-offset-spec">> => <<"first">>}, + {ok, Receiver2} = amqp10_client:attach_receiver_link( + Session, <<"receiver 2">>, Address, + settled, configuration, Filter2), + ok = amqp10_client:flow_link_credit(Receiver2, 10, never), + ok = assert_no_msg_received(?LINE), + ok = detach_link_sync(Receiver2), + + PropsFilter3 = [{{symbol, <<"reply-to-group-id">>}, {utf8, <<"$s: 5">>}}], + Filter3 = #{?DESCRIPTOR_NAME_PROPERTIES_FILTER => {map, PropsFilter3}, + <<"rabbitmq:stream-offset-spec">> => <<"first">>}, + {ok, Receiver3} = amqp10_client:attach_receiver_link( + Session, <<"receiver 3">>, Address, + settled, configuration, Filter3), + ok = amqp10_client:flow_link_credit(Receiver3, 10, never), + receive {amqp10_msg, Receiver3, R3M1} -> + ?assertEqual([<<"m1">>], amqp10_msg:body(R3M1)) + after 5000 -> ct:fail({missing_msg, ?LINE}) + end, + receive {amqp10_msg, Receiver3, R3M3} -> + ?assertEqual([<<"m3">>], amqp10_msg:body(R3M3)) + after 5000 -> ct:fail({missing_msg, ?LINE}) + end, + ok = detach_link_sync(Receiver3), + + %% '$$" is the escape prefix for case-sensitive matching of a string starting with ‘&’ + PropsFilter4 = [{{symbol, <<"subject">>}, {utf8, <<"$$Hello">>}}], + Filter4 = #{?DESCRIPTOR_NAME_PROPERTIES_FILTER => {map, PropsFilter4}, + <<"rabbitmq:stream-offset-spec">> => <<"first">>}, + {ok, Receiver4} = amqp10_client:attach_receiver_link( + Session, <<"receiver 4">>, Address, + settled, configuration, Filter4), + {ok, R4M3} = amqp10_client:get_msg(Receiver4), + ?assertEqual([<<"m3">>], amqp10_msg:body(R4M3)), + ok = detach_link_sync(Receiver4), + + %% Starting the reference field value with $ is invalid without using a valid modifier + %% prefix is invalid. + %% RabbitMQ should exclude this filter in its reply attach frame because + %% "the sending endpoint [RabbitMQ] sets the filter actually in place". + %% Hence, no filter expression is actually in place and we should receive all messages. + PropsFilter5 = [{{symbol, <<"subject">>}, {utf8, <<"$Hello">>}}], + Filter5 = #{?DESCRIPTOR_NAME_PROPERTIES_FILTER => {map, PropsFilter5}, + <<"rabbitmq:stream-offset-spec">> => <<"first">>}, + {ok, Receiver5} = amqp10_client:attach_receiver_link( + Session, <<"receiver 5">>, Address, + settled, configuration, Filter5), + {ok, R5M1} = amqp10_client:get_msg(Receiver5), + ?assertEqual([<<"m1">>], amqp10_msg:body(R5M1)), + {ok, R5M2} = amqp10_client:get_msg(Receiver5), + ?assertEqual([<<"m2">>], amqp10_msg:body(R5M2)), + {ok, R5M3} = amqp10_client:get_msg(Receiver5), + ?assertEqual([<<"m3">>], amqp10_msg:body(R5M3)), + ok = detach_link_sync(Receiver5), + + {ok, _} = rabbitmq_amqp_client:delete_queue(LinkPair, Stream), + ok = rabbitmq_amqp_client:detach_management_link_pair_sync(LinkPair), + ok = end_session_sync(Session), + ok = close_connection_sync(Connection). + +%% ------------------------------------------------------------------- +%% Helpers +%% ------------------------------------------------------------------- + +assert_no_msg_received(Line) -> + receive {amqp10_msg, _, _} = Msg -> + ct:fail({received_unexpected_msg, Line, Msg}) + after 10 -> + ok + end. diff --git a/deps/rabbit/test/amqp_utils.erl b/deps/rabbit/test/amqp_utils.erl new file mode 100644 index 00000000000..f1816a07c22 --- /dev/null +++ b/deps/rabbit/test/amqp_utils.erl @@ -0,0 +1,139 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2023 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. +%% + +-module(amqp_utils). + +-export([init/1, init/2, + connection_config/1, connection_config/2, + flush/1, + wait_for_credit/1, + wait_for_accepts/1, + send_messages/3, send_messages/4, + detach_link_sync/1, + end_session_sync/1, + wait_for_session_end/1, + close_connection_sync/1]). + +init(Config) -> + init(0, Config). + +init(Node, Config) -> + OpnConf = connection_config(Node, Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + {ok, LinkPair} = rabbitmq_amqp_client:attach_management_link_pair_sync(Session, <<"my link pair">>), + {Connection, Session, LinkPair}. + +connection_config(Config) -> + connection_config(0, Config). + +connection_config(Node, Config) -> + Host = proplists:get_value(rmq_hostname, Config), + Port = rabbit_ct_broker_helpers:get_node_config(Config, Node, tcp_port_amqp), + #{address => Host, + port => Port, + container_id => <<"my container">>, + sasl => {plain, <<"guest">>, <<"guest">>}}. + +flush(Prefix) -> + receive + Msg -> + ct:pal("~p flushed: ~p~n", [Prefix, Msg]), + flush(Prefix) + after 1 -> + ok + end. + +% Before we can send messages we have to wait for credit from the server. +wait_for_credit(Sender) -> + receive + {amqp10_event, {link, Sender, credited}} -> + ok + after 5000 -> + flush("wait_for_credit timed out"), + ct:fail(credited_timeout) + end. + +wait_for_accepts(0) -> + ok; +wait_for_accepts(N) -> + receive + {amqp10_disposition, {accepted, _}} -> + wait_for_accepts(N - 1) + after 5000 -> + ct:fail({missing_accepted, N}) + end. + +send_messages(Sender, Left, Settled) -> + send_messages(Sender, Left, Settled, <<>>). + +send_messages(_, 0, _, _) -> + ok; +send_messages(Sender, Left, Settled, BodySuffix) -> + Bin = integer_to_binary(Left), + Body = <>, + Msg = amqp10_msg:new(Bin, Body, Settled), + case amqp10_client:send_msg(Sender, Msg) of + ok -> + send_messages(Sender, Left - 1, Settled, BodySuffix); + {error, insufficient_credit} -> + ok = wait_for_credit(Sender), + %% The credited event we just processed could have been received some time ago, + %% i.e. we might have 0 credits right now. This happens in the following scenario: + %% 1. We (test case proc) send a message successfully, the client session proc decrements remaining link credit from 1 to 0. + %% 2. The server grants our client session proc new credits. + %% 3. The client session proc sends us (test case proc) a credited event. + %% 4. We didn't even notice that we ran out of credits temporarily. We send the next message, it succeeds, + %% but do not process the credited event in our mailbox. + %% So, we must be defensive here and assume that the next amqp10_client:send/2 call might return {error, insufficient_credit} + %% again causing us then to really wait to receive a credited event (instead of just processing an old credited event). + send_messages(Sender, Left, Settled, BodySuffix) + end. + +detach_link_sync(Link) -> + ok = amqp10_client:detach_link(Link), + ok = wait_for_link_detach(Link). + +wait_for_link_detach(Link) -> + receive + {amqp10_event, {link, Link, {detached, normal}}} -> + flush(?FUNCTION_NAME), + ok + after 5000 -> + flush("wait_for_link_detach timed out"), + ct:fail({link_detach_timeout, Link}) + end. + +end_session_sync(Session) + when is_pid(Session) -> + ok = amqp10_client:end_session(Session), + ok = wait_for_session_end(Session). + +wait_for_session_end(Session) -> + receive + {amqp10_event, {session, Session, {ended, _}}} -> + flush(?FUNCTION_NAME), + ok + after 5000 -> + flush("wait_for_session_end timed out"), + ct:fail({session_end_timeout, Session}) + end. + +close_connection_sync(Connection) + when is_pid(Connection) -> + ok = amqp10_client:close_connection(Connection), + ok = wait_for_connection_close(Connection). + +wait_for_connection_close(Connection) -> + receive + {amqp10_event, {connection, Connection, {closed, normal}}} -> + flush(?FUNCTION_NAME), + ok + after 5000 -> + flush("wait_for_connection_close timed out"), + ct:fail({connection_close_timeout, Connection}) + end. diff --git a/deps/rabbitmq_stream/test/protocol_interop_SUITE.erl b/deps/rabbitmq_stream/test/protocol_interop_SUITE.erl index 872424f5322..3b0b7914361 100644 --- a/deps/rabbitmq_stream/test/protocol_interop_SUITE.erl +++ b/deps/rabbitmq_stream/test/protocol_interop_SUITE.erl @@ -14,6 +14,7 @@ -include_lib("eunit/include/eunit.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). -include_lib("amqp10_common/include/amqp10_framing.hrl"). +-include_lib("amqp10_common/include/amqp10_filtex.hrl"). all() -> [{group, tests}]. @@ -24,7 +25,8 @@ groups() -> amqpl, amqp_credit_multiple_grants, amqp_credit_single_grant, - amqp_attach_sub_batch + amqp_attach_sub_batch, + amqp_filter_expression ] }]. @@ -270,6 +272,51 @@ amqp_attach_sub_batch(Config) -> ok = amqp10_client:detach_link(Receiver), ok = amqp10_client:close_connection(Connection). +%% Test that AMQP filter expressions work when messages +%% are published via the stream protocol and consumed via AMQP. +amqp_filter_expression(Config) -> + Stream = atom_to_binary(?FUNCTION_NAME), + publish_via_stream_protocol(Stream, Config), + + %% Consume from the stream via AMQP 1.0. + OpnConf = connection_config(Config), + {ok, Connection} = amqp10_client:open_connection(OpnConf), + {ok, Session} = amqp10_client:begin_session_sync(Connection), + Address = <<"/queue/", Stream/binary>>, + + AppPropsFilter = [{{utf8, <<"my key">>}, + {utf8, <<"my value">>}}], + {ok, Receiver} = amqp10_client:attach_receiver_link( + Session, <<"test-receiver">>, Address, settled, configuration, + #{<<"rabbitmq:stream-offset-spec">> => <<"first">>, + ?DESCRIPTOR_NAME_APPLICATION_PROPERTIES_FILTER => {map, AppPropsFilter} + }), + + ok = amqp10_client:flow_link_credit(Receiver, 100, never), + receive {amqp10_msg, Receiver, M2} -> + ?assertEqual([<<"m2">>], amqp10_msg:body(M2)) + after 5000 -> ct:fail({missing_msg, ?LINE}) + end, + receive {amqp10_msg, Receiver, M4} -> + ?assertEqual([<<"m4">>], amqp10_msg:body(M4)) + after 5000 -> ct:fail({missing_msg, ?LINE}) + end, + receive {amqp10_msg, Receiver, M5} -> + ?assertEqual([<<"m5">>], amqp10_msg:body(M5)) + after 5000 -> ct:fail({missing_msg, ?LINE}) + end, + receive {amqp10_msg, Receiver, M6} -> + ?assertEqual([<<"m6">>], amqp10_msg:body(M6)) + after 5000 -> ct:fail({missing_msg, ?LINE}) + end, + receive {amqp10_msg, _, _} = Msg -> + ct:fail({received_unexpected_msg, Msg}) + after 10 -> ok + end, + + ok = amqp10_client:detach_link(Receiver), + ok = amqp10_client:close_connection(Connection). + %% ------------------------------------------------------------------- %% Helpers %% ------------------------------------------------------------------- @@ -310,7 +357,9 @@ publish_via_stream_protocol(Stream, Config) -> {{response, 1, {declare_publisher, _}}, C7} = receive_stream_commands(S, C6), M1 = simple_entry(1, <<"m1">>), - M2 = simple_entry(2, <<"m2">>), + M2 = simple_entry(2, <<"m2">>, #'v1_0.application_properties'{ + content = [{{utf8, <<"my key">>}, + {utf8, <<"my value">>}}]}), M3 = simple_entry(3, <<"m3">>), Messages1 = [M1, M2, M3], PublishFrame1 = rabbit_stream_core:frame({publish, PublisherId, length(Messages1), Messages1}), @@ -342,11 +391,25 @@ simple_entry(Sequence, Body) DataSectSize = byte_size(DataSect), <>. -%% Here, each AMQP 1.0 encoded message contains a single data section. +%% Streams contain AMQP 1.0 encoded messages. +%% In this case, the AMQP 1.0 encoded message consists of an application-properties section and a data section. +simple_entry(Sequence, Body, AppProps) + when is_binary(Body) -> + AppPropsSect = iolist_to_binary(amqp10_framing:encode_bin(AppProps)), + DataSect = iolist_to_binary(amqp10_framing:encode_bin(#'v1_0.data'{content = Body})), + Sects = <>, + SectSize = byte_size(Sects), + <>. + +%% Here, each AMQP 1.0 encoded message consists of an application-properties section and a data section. %% All data sections are delivered uncompressed in 1 batch. sub_batch_entry_uncompressed(Sequence, Bodies) -> Batch = lists:foldl(fun(Body, Acc) -> - Sect = iolist_to_binary(amqp10_framing:encode_bin(#'v1_0.data'{content = Body})), + AppProps = #'v1_0.application_properties'{ + content = [{{utf8, <<"my key">>}, {utf8, <<"my value">>}}]}, + Sect0 = iolist_to_binary(amqp10_framing:encode_bin(AppProps)), + Sect1 = iolist_to_binary(amqp10_framing:encode_bin(#'v1_0.data'{content = Body})), + Sect = <>, <> end, <<>>, Bodies), Size = byte_size(Batch), diff --git a/moduleindex.yaml b/moduleindex.yaml index ebadcd41d64..1ce6bae902c 100755 --- a/moduleindex.yaml +++ b/moduleindex.yaml @@ -543,6 +543,7 @@ rabbit: - rabbit_access_control - rabbit_alarm - rabbit_amqp1_0 +- rabbit_amqp_filtex - rabbit_amqp_management - rabbit_amqp_reader - rabbit_amqp_session From ad1726cf3a2bd98e8598276aeecce04d8ceb0bb6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20P=C3=A9dron?= Date: Fri, 4 Oct 2024 10:59:09 +0200 Subject: [PATCH 31/33] rabbit_feature_flags: Accept "+feature1,-feature2" in $RABBITMQ_FEATURE_FLAGS [Why] Before this patch, the $RABBITMQ_FEATURE_FLAGS environment variable took an exhaustive list of feature flags to enable. This list overrode the default of enabling all stable feature flags. It made it inconvenient when a user wanted to enable an experimental feature flag like `khepri_db` while still leaving the default behavior. [How] $RABBITMQ_FEATURE_FLAGS now acceps the following syntax: RABBITMQ_FEATURE_FLAGS=+feature1,-feature2 This will start RabbitMQ with all stable feature flags, plus `feature1`, but without `feature2`. For users setting `forced_feature_flags_on_init` in the config, the corresponding syntax is: {forced_feature_flags_on_init, {rel, [feature1], [feature2]}} --- deps/rabbit/src/rabbit_ff_controller.erl | 167 +++++++++++++++-------- deps/rabbit_common/src/rabbit_env.erl | 14 +- 2 files changed, 116 insertions(+), 65 deletions(-) diff --git a/deps/rabbit/src/rabbit_ff_controller.erl b/deps/rabbit/src/rabbit_ff_controller.erl index 822f38b01e9..d6f11a73c9a 100644 --- a/deps/rabbit/src/rabbit_ff_controller.erl +++ b/deps/rabbit/src/rabbit_ff_controller.erl @@ -675,48 +675,83 @@ enable_task(FeatureNames) -> end. enable_default_task() -> - FeatureNames = get_forced_feature_flag_names(), - case FeatureNames of - undefined -> + case get_forced_feature_flag_names() of + {ok, undefined} -> ?LOG_DEBUG( "Feature flags: starting an unclustered node for the first " "time: all stable feature flags will be enabled by default", #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}), {ok, Inventory} = collect_inventory_on_nodes([node()]), - #{feature_flags := FeatureFlags} = Inventory, - StableFeatureNames = - maps:fold( - fun(FeatureName, FeatureProps, Acc) -> - Stability = rabbit_feature_flags:get_stability( - FeatureProps), - case Stability of - stable -> [FeatureName | Acc]; - _ -> Acc - end - end, [], FeatureFlags), + StableFeatureNames = get_stable_feature_flags(Inventory), enable_many(Inventory, StableFeatureNames); - [] -> + {ok, []} -> ?LOG_DEBUG( "Feature flags: starting an unclustered node for the first " "time: all feature flags are forcibly left disabled from " "the $RABBITMQ_FEATURE_FLAGS environment variable", #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}), ok; - _ -> + {ok, FeatureNames} when is_list(FeatureNames) -> ?LOG_DEBUG( "Feature flags: starting an unclustered node for the first " "time: only the following feature flags specified in the " "$RABBITMQ_FEATURE_FLAGS environment variable will be enabled: " - "~tp", + "~0tp", [FeatureNames], #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}), {ok, Inventory} = collect_inventory_on_nodes([node()]), - enable_many(Inventory, FeatureNames) + enable_many(Inventory, FeatureNames); + {ok, {rel, Plus, Minus}} -> + ?LOG_DEBUG( + "Feature flags: starting an unclustered node for the first " + "time: all stable feature flags will be enabled, after " + "applying changes from $RABBITMQ_FEATURE_FLAGS: adding ~0tp, " + "skipping ~0tp", + [Plus, Minus], + #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}), + {ok, Inventory} = collect_inventory_on_nodes([node()]), + StableFeatureNames = get_stable_feature_flags(Inventory), + Unsupported = lists:filter( + fun(FeatureName) -> + not is_known_and_supported( + Inventory, FeatureName) + end, Minus), + case Unsupported of + [] -> + FeatureNames = (StableFeatureNames -- Minus) ++ Plus, + enable_many(Inventory, FeatureNames); + _ -> + ?LOG_ERROR( + "Feature flags: unsupported feature flags to skip in " + "$RABBITMQ_FEATURE_FLAGS: ~0tp", + [Unsupported], + #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}), + {error, unsupported} + end; + {error, syntax_error_in_envvar} = Error -> + ?LOG_DEBUG( + "Feature flags: invalid mix of `feature_flag` and " + "`+/-feature_flag` in $RABBITMQ_FEATURE_FLAGS", + #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}), + Error end. +get_stable_feature_flags(#{feature_flags := FeatureFlags}) -> + maps:fold( + fun(FeatureName, FeatureProps, Acc) -> + Stability = rabbit_feature_flags:get_stability(FeatureProps), + case Stability of + stable -> [FeatureName | Acc]; + _ -> Acc + end + end, [], FeatureFlags). + -spec get_forced_feature_flag_names() -> Ret when - Ret :: FeatureNames | undefined, - FeatureNames :: [rabbit_feature_flags:feature_name()]. + Ret :: {ok, Abs | Rel | undefined} | {error, syntax_error_in_envvar}, + Abs :: [rabbit_feature_flags:feature_name()], + Rel :: {rel, + [rabbit_feature_flags:feature_name()], + [rabbit_feature_flags:feature_name()]}. %% @doc Returns the (possibly empty) list of feature flags the user wants to %% enable out-of-the-box when starting a node for the first time. %% @@ -737,43 +772,64 @@ enable_default_task() -> %% @private get_forced_feature_flag_names() -> - Ret = case get_forced_feature_flag_names_from_env() of - undefined -> get_forced_feature_flag_names_from_config(); - List -> List - end, - case Ret of - undefined -> - ok; - [] -> - ?LOG_INFO( - "Feature flags: automatic enablement of feature flags " - "disabled (i.e. none will be enabled automatically)", - #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}); - _ -> - ?LOG_INFO( - "Feature flags: automatic enablement of feature flags " - "limited to the following list: ~tp", - [Ret], - #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}) - end, - Ret. + case get_forced_feature_flag_names_from_env() of + {ok, undefined} -> get_forced_feature_flag_names_from_config(); + {ok, _} = Ret -> Ret; + {error, _} = Error -> Error + end. -spec get_forced_feature_flag_names_from_env() -> Ret when - Ret :: FeatureNames | undefined, - FeatureNames :: [rabbit_feature_flags:feature_name()]. + Ret :: {ok, Abs | Rel | undefined} | {error, syntax_error_in_envvar}, + Abs :: [rabbit_feature_flags:feature_name()], + Rel :: {rel, + [rabbit_feature_flags:feature_name()], + [rabbit_feature_flags:feature_name()]}. %% @private get_forced_feature_flag_names_from_env() -> - case rabbit_prelaunch:get_context() of - #{forced_feature_flags_on_init := ForcedFFs} - when is_list(ForcedFFs) -> - ForcedFFs; - _ -> - undefined + Value = case rabbit_prelaunch:get_context() of + #{forced_feature_flags_on_init := ForcedFFs} -> ForcedFFs; + _ -> undefined + end, + case Value of + undefined -> + {ok, Value}; + [] -> + {ok, Value}; + [[Op | _] | _] when Op =:= $+ orelse Op =:= $- -> + lists:foldr( + fun + ([$+ | NameS], {ok, {rel, Plus, Minus}}) -> + Name = list_to_atom(NameS), + Plus1 = [Name | Plus], + {ok, {rel, Plus1, Minus}}; + ([$- | NameS], {ok, {rel, Plus, Minus}}) -> + Name = list_to_atom(NameS), + Minus1 = [Name | Minus], + {ok, {rel, Plus, Minus1}}; + (_, {error, _} = Error) -> + Error; + (_, _) -> + {error, syntax_error_in_envvar} + end, {ok, {rel, [], []}}, Value); + _ when is_list(Value) -> + lists:foldr( + fun + (Name, {ok, Abs}) when is_atom(Name) -> + {ok, [Name | Abs]}; + ([C | _] = NameS, {ok, Abs}) + when C =/= $+ andalso C =/= $- -> + Name = list_to_atom(NameS), + {ok, [Name | Abs]}; + (_, {error, _} = Error) -> + Error; + (_, _) -> + {error, syntax_error_in_envvar} + end, {ok, []}, Value) end. -spec get_forced_feature_flag_names_from_config() -> Ret when - Ret :: FeatureNames | undefined, + Ret :: {ok, FeatureNames | undefined}, FeatureNames :: [rabbit_feature_flags:feature_name()]. %% @private @@ -781,15 +837,8 @@ get_forced_feature_flag_names_from_config() -> Value = application:get_env( rabbit, forced_feature_flags_on_init, undefined), case Value of - undefined -> - Value; - _ when is_list(Value) -> - case lists:all(fun is_atom/1, Value) of - true -> Value; - false -> undefined - end; - _ -> - undefined + undefined -> {ok, Value}; + _ when is_list(Value) -> {ok, Value} end. -spec sync_cluster_task() -> Ret when @@ -914,7 +963,7 @@ enable_if_supported(#{states_per_node := _} = Inventory, FeatureName) -> #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}), enable_with_registry_locked(Inventory, FeatureName); false -> - ?LOG_DEBUG( + ?LOG_ERROR( "Feature flags: `~ts`: unsupported; aborting", [FeatureName], #{domain => ?RMQLOG_DOMAIN_FEAT_FLAGS}), diff --git a/deps/rabbit_common/src/rabbit_env.erl b/deps/rabbit_common/src/rabbit_env.erl index 4f222ab707f..237a9668962 100644 --- a/deps/rabbit_common/src/rabbit_env.erl +++ b/deps/rabbit_common/src/rabbit_env.erl @@ -999,13 +999,15 @@ forced_feature_flags_on_init(Context) -> case Value of false -> %% get_prefixed_env_var() considers an empty string - %% is the same as an undefined environment variable. - update_context(Context, - forced_feature_flags_on_init, undefined, default); + %% as an undefined environment variable. + update_context( + Context, + forced_feature_flags_on_init, undefined, default); _ -> - Flags = [list_to_atom(V) || V <- string:lexemes(Value, ",")], - update_context(Context, - forced_feature_flags_on_init, Flags, environment) + FeatureNames = string:lexemes(Value, ","), + update_context( + Context, + forced_feature_flags_on_init, FeatureNames, environment) end. log_feature_flags_registry(Context) -> From 1dbe038de99cf72beb674cc90bfac806ac5bd243 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20P=C3=A9dron?= Date: Fri, 4 Oct 2024 11:09:00 +0200 Subject: [PATCH 32/33] rabbit_env: Drop $RABBITMQ_LOG_FF_REGISTRY [Why] Its use was removed when the registry was converted from a compiled module to a persistent_term. --- deps/rabbit_common/src/rabbit_env.erl | 13 ------------- deps/rabbit_common/test/rabbit_env_SUITE.erl | 7 ------- 2 files changed, 20 deletions(-) diff --git a/deps/rabbit_common/src/rabbit_env.erl b/deps/rabbit_common/src/rabbit_env.erl index 237a9668962..16a9bbf22a8 100644 --- a/deps/rabbit_common/src/rabbit_env.erl +++ b/deps/rabbit_common/src/rabbit_env.erl @@ -65,7 +65,6 @@ "RABBITMQ_KEEP_PID_FILE_ON_EXIT", "RABBITMQ_LOG", "RABBITMQ_LOG_BASE", - "RABBITMQ_LOG_FF_REGISTRY", "RABBITMQ_LOGS", "RABBITMQ_MNESIA_BASE", "RABBITMQ_MNESIA_DIR", @@ -150,7 +149,6 @@ get_context_after_reloading_env(Context) -> fun keep_pid_file_on_exit/1, fun feature_flags_file/1, fun forced_feature_flags_on_init/1, - fun log_feature_flags_registry/1, fun plugins_path/1, fun plugins_expand_dir/1, fun enabled_plugins_file/1, @@ -1010,17 +1008,6 @@ forced_feature_flags_on_init(Context) -> forced_feature_flags_on_init, FeatureNames, environment) end. -log_feature_flags_registry(Context) -> - case get_prefixed_env_var("RABBITMQ_LOG_FF_REGISTRY") of - false -> - update_context(Context, - log_feature_flags_registry, false, default); - Value -> - Log = value_is_yes(Value), - update_context(Context, - log_feature_flags_registry, Log, environment) - end. - %% ------------------------------------------------------------------- %% %% RABBITMQ_PLUGINS_DIR diff --git a/deps/rabbit_common/test/rabbit_env_SUITE.erl b/deps/rabbit_common/test/rabbit_env_SUITE.erl index 0961a37a185..e10ad2f6b42 100644 --- a/deps/rabbit_common/test/rabbit_env_SUITE.erl +++ b/deps/rabbit_common/test/rabbit_env_SUITE.erl @@ -187,7 +187,6 @@ check_default_values(_) -> interactive_shell => default, keep_pid_file_on_exit => default, log_base_dir => default, - log_feature_flags_registry => default, log_levels => default, main_config_file => default, main_log_file => default, @@ -231,7 +230,6 @@ check_default_values(_) -> interactive_shell => false, keep_pid_file_on_exit => false, log_base_dir => "/var/log/rabbitmq", - log_feature_flags_registry => false, log_levels => undefined, main_config_file => "/etc/rabbitmq/rabbitmq", main_log_file => "/var/log/rabbitmq/" ++ NodeS ++ ".log", @@ -282,7 +280,6 @@ check_default_values(_) -> interactive_shell => false, keep_pid_file_on_exit => false, log_base_dir => "%APPDATA%/RabbitMQ/log", - log_feature_flags_registry => false, log_levels => undefined, main_config_file => "%APPDATA%/RabbitMQ/rabbitmq", main_log_file => "%APPDATA%/RabbitMQ/log/" ++ NodeS ++ ".log", @@ -408,7 +405,6 @@ check_values_from_reachable_remote_node(Config) -> interactive_shell => default, keep_pid_file_on_exit => default, log_base_dir => default, - log_feature_flags_registry => default, log_levels => default, main_config_file => default, main_log_file => default, @@ -452,7 +448,6 @@ check_values_from_reachable_remote_node(Config) -> interactive_shell => false, keep_pid_file_on_exit => false, log_base_dir => "/var/log/rabbitmq", - log_feature_flags_registry => false, log_levels => undefined, main_config_file => "/etc/rabbitmq/rabbitmq", main_log_file => "/var/log/rabbitmq/" ++ NodeS ++ ".log", @@ -540,7 +535,6 @@ check_values_from_offline_remote_node(_) -> interactive_shell => default, keep_pid_file_on_exit => default, log_base_dir => default, - log_feature_flags_registry => default, log_levels => default, main_config_file => default, main_log_file => default, @@ -584,7 +578,6 @@ check_values_from_offline_remote_node(_) -> interactive_shell => false, keep_pid_file_on_exit => false, log_base_dir => "/var/log/rabbitmq", - log_feature_flags_registry => false, log_levels => undefined, main_config_file => "/etc/rabbitmq/rabbitmq", main_log_file => "/var/log/rabbitmq/" ++ NodeS ++ ".log", From a3fcda30309df0958571ad01047245882843ec4f Mon Sep 17 00:00:00 2001 From: David Ansari Date: Mon, 7 Oct 2024 17:55:58 +0200 Subject: [PATCH 33/33] Update 4.1.0 release notes --- release-notes/4.1.0.md | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/release-notes/4.1.0.md b/release-notes/4.1.0.md index 432b4fd641f..ca80cfa5963 100644 --- a/release-notes/4.1.0.md +++ b/release-notes/4.1.0.md @@ -1,5 +1,23 @@ ## RabbitMQ 4.1.0 +## Highlights + +### AMQP 1.0 Filter Expressions + +[PR #12415](https://github.com/rabbitmq/rabbitmq-server/pull/12415) implements `properties` and `appliation-properties` filters of [AMQP Filter Expressions Version 1.0 Working Draft 09](https://groups.oasis-open.org/higherlogic/ws/public/document?document_id=66227) when consuming from a stream via AMQP 1.0. +String prefix and suffix matching is also supported. + +This feature: +* adds the ability to RabbitMQ to have multiple concurrent clients each consuming only a subset of messages while maintaining message order, and +* reduces network traffic between RabbitMQ and clients by only dispatching those messages that the clients are actually interested in. + +### Prometheus histogram for message sizes + +[PR #12342](https://github.com/rabbitmq/rabbitmq-server/pull/12342) exposes a Prometheus histogram for message sizes received by RabbitMQ. + +This feature allows operators to gain insights into the message sizes being published to RabbitMQ, such as average message size, number of messages per pre-defined bucket (which can both be computed accurately), and percentiles (which will be approximated). +Each metric is labelled by protocol (AMQP 1.0, AMQP 0.9.1, MQTT 5.0, MQTT 3.1.1, and MQTT 3.1). + ## Potential incompatibilities -* The default MQTT [Maximum Packet Size](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901086) changed from 256 MiB to 16 MiB. This default can be overriden by [configuring](https://www.rabbitmq.com/docs/configure#config-file) `mqtt.max_packet_size_authenticated`. Note that this value must not be greater than `max_message_size` (which also defaults to 16 MiB). +* The default MQTT [Maximum Packet Size](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901086) changed from 256 MiB to 16 MiB. This default can be overridden by [configuring](https://www.rabbitmq.com/docs/configure#config-file) `mqtt.max_packet_size_authenticated`. Note that this value must not be greater than `max_message_size` (which also defaults to 16 MiB).