From 399d3b4a0ddf9852be459b1862fad7e9ad1cc2fb Mon Sep 17 00:00:00 2001
From: April M <36110273+aimurphy@users.noreply.github.com>
Date: Wed, 7 Jan 2026 09:11:35 -0800
Subject: [PATCH 1/9] update links
---
modules/ROOT/nav.adoc | 11 ++++-----
modules/ROOT/pages/index.adoc | 4 ++--
.../pages/starlight/index.adoc | 23 -------------------
3 files changed, 7 insertions(+), 31 deletions(-)
delete mode 100644 modules/use-cases-architectures/pages/starlight/index.adoc
diff --git a/modules/ROOT/nav.adoc b/modules/ROOT/nav.adoc
index e218c39..8f8c741 100644
--- a/modules/ROOT/nav.adoc
+++ b/modules/ROOT/nav.adoc
@@ -1,4 +1,4 @@
-.Processing data
+.Process data
* Change Data Capture (CDC)
** xref:use-cases-architectures:change-data-capture/index.adoc[]
** xref:use-cases-architectures:change-data-capture/table-schema-evolution.adoc[]
@@ -6,11 +6,10 @@
** xref:use-cases-architectures:change-data-capture/questions-and-patterns.adoc[]
* xref:astra-streaming:getting-started:real-time-data-pipelines-tutorial.adoc[Build real-time data pipelines with {product}]
-.Migrating to {pulsar}
-* xref:use-cases-architectures:starlight/index.adoc[]
-* xref:use-cases-architectures:starlight/kafka/index.adoc[]
-* xref:use-cases-architectures:starlight/rabbitmq/index.adoc[]
-* xref:use-cases-architectures:starlight/jms/index.adoc[]
+.Migrate to {pulsar}
+* xref:starlight-for-kafka:ROOT:index.adoc[]
+* xref:starlight-for-rabbitmq:ROOT:index.adoc[]
+* xref:starlight-for-jms:ROOT:index.adoc[]
.APIs and References
* Connectors
diff --git a/modules/ROOT/pages/index.adoc b/modules/ROOT/pages/index.adoc
index 8806857..cc795d8 100644
--- a/modules/ROOT/pages/index.adoc
+++ b/modules/ROOT/pages/index.adoc
@@ -106,8 +106,8 @@
- xref:starlight-for-jms::index.adoc[Starlight for JMS]
- - xref:starlight-for-kafka::index.adoc[]
- - xref:starlight-for-rabbitmq::index.adoc[]
+ - xref:starlight-for-kafka::index.adoc[Starlight for Kafka]
+ - xref:starlight-for-rabbitmq::index.adoc[Starlight for RabbitMQ]
diff --git a/modules/use-cases-architectures/pages/starlight/index.adoc b/modules/use-cases-architectures/pages/starlight/index.adoc
deleted file mode 100644
index 2ca23cd..0000000
--- a/modules/use-cases-architectures/pages/starlight/index.adoc
+++ /dev/null
@@ -1,23 +0,0 @@
-= {company} Starlight Suite of {pulsar-reg} Extensions
-:navtitle: Starlight Extensions
-
-The Starlight suite of extensions is a collection of {pulsar-reg} protocol handlers that extend an existing {pulsar-short} cluster.
-The goal of all the extensions is to create a native, seamless interaction with a {pulsar-short} cluster using existing tooling and clients.
-
-== {kafka-for-astra}
-
-{kafka-for-astra} brings native Apache Kafka(R) protocol support to {pulsar} by introducing a Kafka protocol handler on {pulsar-short} brokers.
-
-xref:use-cases-architectures:starlight/kafka/index.adoc[Get started now] | xref:starlight-for-kafka:ROOT:index.adoc[Configuring] | https://github.com/datastax/starlight-for-kafka[Source Code]
-
-== {starlight-rabbitmq}
-
-{starlight-rabbitmq} combines the industry-standard AMQP 0.9.1 (RabbitMQ) API with the cloud-native and horizontally scalable {pulsar-short} streaming platform, providing a powerful way to modernize your RabbitMQ infrastructure, improve performance, and reduce costs.
-
-xref:use-cases-architectures:starlight/rabbitmq/index.adoc[Get started now] | xref:starlight-for-rabbitmq:ROOT:index.adoc[Configuring] | https://github.com/datastax/starlight-for-rabbitmq[Source Code]
-
-== Starlight for JMS
-
-Starlight for JMS allows enterprises to take advantage of the scalability and resiliency of a modern streaming platform to run their existing JMS applications. Because {pulsar-short} is open-source and cloud-native, Starlight for JMS enables enterprises to move their JMS applications to run on-premises and in any cloud environment.
-
-xref:use-cases-architectures:starlight/jms/index.adoc[Get started now] | xref:starlight-for-jms:ROOT:index.adoc[Configuring] | https://github.com/datastax/pulsar-jms[Source Code]
\ No newline at end of file
From eac2dba52267d17a5e63fb7fd0484737581f2df0 Mon Sep 17 00:00:00 2001
From: April M <36110273+aimurphy@users.noreply.github.com>
Date: Wed, 7 Jan 2026 09:37:01 -0800
Subject: [PATCH 2/9] combine Luna Streaming starlight content
---
antora.yml | 2 -
example-guide.adoc | 2 +-
modules/ROOT/nav.adoc | 2 +-
modules/ROOT/pages/index.adoc | 2 +-
.../functions/pages/astream-functions.adoc | 36 ++--
modules/functions/pages/deploy-in-sink.adoc | 4 +-
modules/functions/pages/index.adoc | 2 +-
modules/pulsar-io/pages/connectors/index.adoc | 12 +-
.../pages/connectors/sinks/astra-db.adoc | 2 +-
.../pages/connectors/sinks/cloud-storage.adoc | 4 +-
.../connectors/sinks/elastic-search.adoc | 6 +-
.../connectors/sinks/google-bigquery.adoc | 4 +-
.../connectors/sinks/jdbc-clickhouse.adoc | 6 +-
.../pages/connectors/sinks/jdbc-mariadb.adoc | 6 +-
.../pages/connectors/sinks/jdbc-postgres.adoc | 6 +-
.../pages/connectors/sinks/jdbc-sqllite.adoc | 6 +-
.../pages/connectors/sinks/kafka.adoc | 6 +-
.../pages/connectors/sinks/kinesis.adoc | 6 +-
.../pages/connectors/sinks/snowflake.adoc | 4 +-
.../connectors/sources/data-generator.adoc | 4 +-
.../connectors/sources/debezium-mongodb.adoc | 4 +-
.../connectors/sources/debezium-mysql.adoc | 6 +-
.../connectors/sources/debezium-oracle.adoc | 6 +-
.../connectors/sources/debezium-postgres.adoc | 6 +-
.../sources/debezium-sqlserver.adoc | 6 +-
.../pages/connectors/sources/kafka.adoc | 4 +-
.../pages/connectors/sources/kinesis.adoc | 4 +-
.../partials/connectors/sinks/monitoring.adoc | 2 +-
.../connectors/sources/monitoring.adoc | 2 +-
.../partials/subscription-prereq.adoc | 4 +-
.../consuming-change-data.adoc | 4 +-
.../pages/starlight/jms/index.adoc | 14 +-
.../pages/starlight/kafka/index.adoc | 178 +++++++++++++++++-
.../pages/starlight/rabbitmq/index.adoc | 143 +++++++++++++-
34 files changed, 393 insertions(+), 112 deletions(-)
diff --git a/antora.yml b/antora.yml
index 07823f3..54b8ec9 100644
--- a/antora.yml
+++ b/antora.yml
@@ -9,8 +9,6 @@ nav:
asciidoc:
attributes:
company: 'DataStax'
- product: 'Astra Streaming'
- product-short: 'Astra'
astra-db: 'Astra DB'
astra-ui: 'Astra Portal'
astra-url: 'https://astra.datastax.com'
diff --git a/example-guide.adoc b/example-guide.adoc
index 507185c..8969c71 100644
--- a/example-guide.adoc
+++ b/example-guide.adoc
@@ -9,7 +9,7 @@
A guide is a document offering step by step instruction to reach some goal. Guides are technical in nature and tend to make assumptions about the consumer's environment. To help create guides that will work in most environments, please follow these ideas.
* *Keep links to a minimum* - when someone is learning a new concept for the first time and xref:README.adoc[every] other xref:README.adoc[word] is linked it xref:README.adoc[makes] things xref:README.adoc[confusing] and hard to get a good flow going. Instead, annotate a word or phrase and provide a "Resources" area at the bottom of the guide.
-* *Separate products and runtimes in tabs* - it is common to reach the same result through multiple ways. An example is creating a tenant/namespace/topic in {product} and Luna Streaming. Both have the same result but get there in very different ways. Offer each as a tab and let the consumer choose their path. The step after the tabbed step can assume the consumer has complete the previious step and is in a known state. Runtimes follow the same pattern. Weather one is using Java or C#, they are still creating a {pulsar-short} client to interact with the cluster. Create a single step in the guide with multiple tabs for each runtime.
+* *Separate products and runtimes in tabs* - it is common to reach the same result through multiple ways. An example is creating a tenant/namespace/topic in Astra Streaming and Luna Streaming. Both have the same result but get there in very different ways. Offer each as a tab and let the consumer choose their path. The step after the tabbed step can assume the consumer has complete the previious step and is in a known state. Runtimes follow the same pattern. Weather one is using Java or C#, they are still creating a {pulsar-short} client to interact with the cluster. Create a single step in the guide with multiple tabs for each runtime.
* *Be thoughtful about the names you use* - if you are leaning a new concept or feature with no background on the product, words matter. Labeling a tab as "Luna Helm" and then referring to it as "{pulsar-short} Helm Chart" are two distinct things to that reader. The author of the document has such deep understanding that they consider those things the same - and technically they are at {company}. But the read isn't from {company}, so be mindful of their context.
* *Talk in first person* - humans create the guides and humans consume the guides. Write as if you are paired with your consumer in doing what ever the guide does. Use "we", "us", "you".
====
diff --git a/modules/ROOT/nav.adoc b/modules/ROOT/nav.adoc
index 8f8c741..28c1349 100644
--- a/modules/ROOT/nav.adoc
+++ b/modules/ROOT/nav.adoc
@@ -4,7 +4,7 @@
** xref:use-cases-architectures:change-data-capture/table-schema-evolution.adoc[]
** xref:use-cases-architectures:change-data-capture/consuming-change-data.adoc[]
** xref:use-cases-architectures:change-data-capture/questions-and-patterns.adoc[]
-* xref:astra-streaming:getting-started:real-time-data-pipelines-tutorial.adoc[Build real-time data pipelines with {product}]
+* xref:astra-streaming:getting-started:real-time-data-pipelines-tutorial.adoc[Build real-time data pipelines with Astra Streaming]
.Migrate to {pulsar}
* xref:starlight-for-kafka:ROOT:index.adoc[]
diff --git a/modules/ROOT/pages/index.adoc b/modules/ROOT/pages/index.adoc
index cc795d8..6a3f2d6 100644
--- a/modules/ROOT/pages/index.adoc
+++ b/modules/ROOT/pages/index.adoc
@@ -19,7 +19,7 @@
We've included best practices for Apache Pulsar, a full connector reference,
and examples for getting the most out of Astra's CDC feature.
- xref:astra-streaming:getting-started:real-time-data-pipelines-tutorial.adoc[Build real-time data pipelines with {product}]
+ xref:astra-streaming:getting-started:real-time-data-pipelines-tutorial.adoc[Build real-time data pipelines with Astra Streaming]
diff --git a/modules/functions/pages/astream-functions.adoc b/modules/functions/pages/astream-functions.adoc
index 0caf19b..817c164 100644
--- a/modules/functions/pages/astream-functions.adoc
+++ b/modules/functions/pages/astream-functions.adoc
@@ -4,7 +4,7 @@
Functions are lightweight compute processes that enable you to process each message received on a topic.
You can apply custom logic to that message, transforming or enriching it, and then output it to a different topic.
-Functions run inside {product} and are therefore serverless.
+Functions run inside Astra Streaming and are therefore serverless.
You write the code for your function in Java, Python, or Go, then upload the code.
It is automatically run for each message published to the specified input topic.
@@ -12,13 +12,13 @@ Functions are implemented using https://pulsar.apache.org/docs/en/functions-over
[IMPORTANT]
====
-Custom functions require a xref:astra-streaming:operations:astream-pricing.adoc[paid {product} plan].
+Custom functions require a xref:astra-streaming:operations:astream-pricing.adoc[paid Astra Streaming plan].
====
== Deploy Python functions in a zip file
-{product} supports Python-based {pulsar-short} functions.
-These functions can be packaged in a zip file and deployed to {product} or {pulsar-short}.
+Astra Streaming supports Python-based {pulsar-short} functions.
+These functions can be packaged in a zip file and deployed to Astra Streaming or {pulsar-short}.
The same zip file can be deployed to either environment.
To demonstrate this, the following steps create function configuration YAML file, package all necessary function files as a zip archive, and then use the `pulsar-admin` CLI to deploy the zip.
@@ -152,8 +152,8 @@ Replace the following:
* `**INPUT_TOPIC_NAME**`: The input topic for the function
* `**OUTPUT_TOPIC_NAME**`: The output topic for the function
-. Use `pulsar-admin` to deploy the Python zip to {product} or {pulsar-short}.
-The command below assumes you've properly configured the `client.conf` file for `pulsar-admin` commands against your {pulsar-short} cluster. If you are using {product}, see xref:astra-streaming:developing:configure-pulsar-env.adoc[] for more information.
+. Use `pulsar-admin` to deploy the Python zip to Astra Streaming or {pulsar-short}.
+The command below assumes you've properly configured the `client.conf` file for `pulsar-admin` commands against your {pulsar-short} cluster. If you are using Astra Streaming, see xref:astra-streaming:developing:configure-pulsar-env.adoc[] for more information.
+
[source,console]
----
@@ -163,7 +163,7 @@ bin/pulsar-admin functions create --function-config-file /absolute/path/to/func-
. Verify that the function was deployed:
+
* Go to the {astra-ui} to see your newly deployed function listed under the **Functions** tab for your tenant.
-See <> for more information on testing and monitoring your function in {product}.
+See <> for more information on testing and monitoring your function in Astra Streaming.
* Use the `pulsar-admin` CLI to list functions for a specific tenant and namespace:
+
[source,bash,subs="+quotes"]
@@ -173,8 +173,8 @@ bin/pulsar-admin functions list --tenant **TENANT_NAME** --namespace **NAMESPACE
== Deploy Java functions in a JAR file
-{product} supports Java-based {pulsar-short} functions which are packaged in a JAR file.
-The JAR can be deployed to {product} or {pulsar-short}.
+Astra Streaming supports Java-based {pulsar-short} functions which are packaged in a JAR file.
+The JAR can be deployed to Astra Streaming or {pulsar-short}.
The same JAR file can be deployed to either environment.
In this example, you'll create a function JAR file using Maven, then use the `pulsar-admin` CLI to deploy the JAR.
@@ -289,15 +289,15 @@ userConfig:
+
[IMPORTANT]
====
-{product} requires the `inputs` topic to have a message schema defined before deploying the function.
+Astra Streaming requires the `inputs` topic to have a message schema defined before deploying the function.
Otherwise, deployment errors may occur.
Use the {astra-ui} to define the message schema for a topic.
====
-. Use the `pulsar-admin` CLI to deploy your function JAR to {product} or {pulsar-short}.
+. Use the `pulsar-admin` CLI to deploy your function JAR to Astra Streaming or {pulsar-short}.
+
The following command assumes you've properly configured the `client.conf` file for `pulsar-admin` commands against your {pulsar-short} cluster.
-If you are using {product}, see xref:astra-streaming:developing:configure-pulsar-env.adoc[] for more information.
+If you are using Astra Streaming, see xref:astra-streaming:developing:configure-pulsar-env.adoc[] for more information.
+
[source,bash]
----
@@ -307,7 +307,7 @@ bin/pulsar-admin functions create --function-config-file /absolute/path/to/func
. Verify that the function was deployed:
+
* Go to the {astra-ui} to see your newly deployed function listed under the **Functions** tab for your tenant.
-See <> for more information on testing and monitoring your function in {product}.
+See <> for more information on testing and monitoring your function in Astra Streaming.
* Use the `pulsar-admin` CLI to list functions for a specific tenant and namespace:
+
[source,bash,subs="+quotes"]
@@ -315,9 +315,9 @@ See <> for more information
bin/pulsar-admin functions list --tenant **TENANT_NAME** --namespace **NAMESPACE_NAME**
----
-== Add functions in {product} dashboard
+== Add functions in Astra Streaming dashboard
-Add functions in the **Functions** tab of the {product} dashboard.
+Add functions in the **Functions** tab of the Astra Streaming dashboard.
. Select *Create Function* to get started.
@@ -326,7 +326,7 @@ Add functions in the **Functions** tab of the {product} dashboard.
image::astream-name-function.png[Function and Namespace]
. Select the file you want to pull the function from and which function you want to use within that file.
-{product} generates a list of acceptable classes.
+Astra Streaming generates a list of acceptable classes.
+
image::astream-exclamation-function.png[Exclamation Function]
+
@@ -416,7 +416,7 @@ If you want to use different topics, change `in`, `out`, and `log` accordingly.
. Verify that the response is `Created Successfully!`.
This indicates that the function was deployed and ready to run when triggered by incoming messages.
+
-If the response is `402 Payment Required` with `Reason: only qualified organizations can create functions`, then you must upgrade to a xref:astra-streaming:operations:astream-pricing.adoc[paid {product} plan].
+If the response is `402 Payment Required` with `Reason: only qualified organizations can create functions`, then you must upgrade to a xref:astra-streaming:operations:astream-pricing.adoc[paid Astra Streaming plan].
+
You can also verify that a function was created by checking the **Functions** tab or by running `./pulsar-admin functions list --tenant **TENANT_NAME**`.
@@ -512,4 +512,4 @@ A *Function-name Deleted Successfully!* message confirms the function was perman
== Next steps
-Learn more about developing functions for {product} and {pulsar-short} https://pulsar.apache.org/docs/en/functions-develop/[here].
\ No newline at end of file
+Learn more about developing functions for Astra Streaming and {pulsar-short} https://pulsar.apache.org/docs/en/functions-develop/[here].
\ No newline at end of file
diff --git a/modules/functions/pages/deploy-in-sink.adoc b/modules/functions/pages/deploy-in-sink.adoc
index a1c5516..5e6f718 100644
--- a/modules/functions/pages/deploy-in-sink.adoc
+++ b/modules/functions/pages/deploy-in-sink.adoc
@@ -5,11 +5,11 @@ Before this update, functions transformed data either after it was written to a
This required either an intermediate topic, with additional storage, IO, and latency, or a custom connector. +
Now, functions can be deployed at sink creation and apply preprocessing to sink topic writes. +
-== Create sink function in {product}
+== Create sink function in Astra Streaming
Creating a sink function is similar to creating a sink in the {astra-ui}, but with a few additional steps.
-. xref:pulsar-io:connectors/index.adoc[Create a sink] as described in the {product} documentation.
+. xref:pulsar-io:connectors/index.adoc[Create a sink] as described in the Astra Streaming documentation.
. During sink creation, select the transform function you want to run inside the sink.
+
diff --git a/modules/functions/pages/index.adoc b/modules/functions/pages/index.adoc
index d9d14c2..44485b9 100644
--- a/modules/functions/pages/index.adoc
+++ b/modules/functions/pages/index.adoc
@@ -158,7 +158,7 @@ transform-function-2
======
[#deploy-as]
-== Deploy with {product}
+== Deploy with Astra Streaming
Deploy transform functions in the *Functions* tab of the {astra-ui}.
diff --git a/modules/pulsar-io/pages/connectors/index.adoc b/modules/pulsar-io/pages/connectors/index.adoc
index 119d70f..e553573 100644
--- a/modules/pulsar-io/pages/connectors/index.adoc
+++ b/modules/pulsar-io/pages/connectors/index.adoc
@@ -1,16 +1,16 @@
= Connectors
:navtitle: Connector Overview
-{product} offers fully-managed {pulsar-reg} connectors.
+Astra Streaming offers fully-managed {pulsar-reg} connectors.
Create, monitor, and manage both source and sink connectors through our simple UI, the `pulsar-admin` CLI, or RESTful API.
Connect popular data sources to {pulsar} topics or sink data from {pulsar-short} topics to popular systems.
-Below is a list of {pulsar} source and sink connectors supported by {product}.
+Below is a list of {pulsar} source and sink connectors supported by Astra Streaming.
[IMPORTANT]
====
-{product} doesn't support custom sink or source connectors.
+Astra Streaming doesn't support custom sink or source connectors.
====
[#sink-connectors]
@@ -154,7 +154,7 @@ xref:connectors/sources/kinesis.adoc[Kinesis source connector documentation]
== Experimental Connectors
-{company} is always experimenting with connectors. Below are the connectors currently in development that have not yet been promoted to official support in *{product}*.
+{company} is always experimenting with connectors. Below are the connectors currently in development that have not yet been promoted to official support in *Astra Streaming*.
To get access to these connectors, contact {support-url}[{company} Support].
@@ -238,7 +238,7 @@ Zeebe +
== Listing Sink Connectors
-To list available sink connectors in your {product} tenant, use any of the following.
+To list available sink connectors in your Astra Streaming tenant, use any of the following.
[tabs]
====
@@ -288,7 +288,7 @@ curl "$WEB_SERVICE_URL/admin/v3/sinks/builtinsinks" -H "Authorization: $ASTRA_ST
== Listing Source Connectors
-To list available source connectors in your {product} tenant, use any of the following.
+To list available source connectors in your Astra Streaming tenant, use any of the following.
[tabs]
====
diff --git a/modules/pulsar-io/pages/connectors/sinks/astra-db.adoc b/modules/pulsar-io/pages/connectors/sinks/astra-db.adoc
index 0c2d9a5..6e4adfd 100644
--- a/modules/pulsar-io/pages/connectors/sinks/astra-db.adoc
+++ b/modules/pulsar-io/pages/connectors/sinks/astra-db.adoc
@@ -4,7 +4,7 @@
{company} {astra-db} Sink Connector is based on the open-source xref:pulsar-connector:ROOT:index.adoc[{cass-reg} sink connector for {pulsar-reg}]. Depending on how you deploy the connector, it can be used to sink topic messages with a table in {astra-db} or a table in a {cass-short} cluster outside of DB.
-The {product} portal provides simple way to connect this sink and a table in {astra-db} with simply a token. Using `pulsar-admin` or the REST API, you can configure the sink to connect with a {cass-short} connection manually.
+The Astra Streaming portal provides simple way to connect this sink and a table in {astra-db} with simply a token. Using `pulsar-admin` or the REST API, you can configure the sink to connect with a {cass-short} connection manually.
This reference assumes you are manually connecting to a {cass-short} table.
diff --git a/modules/pulsar-io/pages/connectors/sinks/cloud-storage.adoc b/modules/pulsar-io/pages/connectors/sinks/cloud-storage.adoc
index 68979f9..4ea937d 100644
--- a/modules/pulsar-io/pages/connectors/sinks/cloud-storage.adoc
+++ b/modules/pulsar-io/pages/connectors/sinks/cloud-storage.adoc
@@ -141,9 +141,9 @@ include::partial$connectors/sinks/monitoring.adoc[]
== Connector Reference
-With the Cloud Storage Sink there are two sets of parameters: {product} parameters and cloud storage provider parameters.
+With the Cloud Storage Sink there are two sets of parameters: Astra Streaming parameters and cloud storage provider parameters.
-=== {product} parameters for Cloud Storage Sink
+=== Astra Streaming parameters for Cloud Storage Sink
[%header,format=csv,cols="2,1,1,3"]
|===
diff --git a/modules/pulsar-io/pages/connectors/sinks/elastic-search.adoc b/modules/pulsar-io/pages/connectors/sinks/elastic-search.adoc
index 416c922..9a776ab 100644
--- a/modules/pulsar-io/pages/connectors/sinks/elastic-search.adoc
+++ b/modules/pulsar-io/pages/connectors/sinks/elastic-search.adoc
@@ -8,7 +8,7 @@ Use Elasticsearch to store, search, and manage data for logs, metrics, search ba
[NOTE]
====
-{product} currently supports {pulsar-reg} {pulsar-version}, which uses the https://opensearch.org/docs/1.2/clients/java-rest-high-level/[OpenSearch 1.2.4 library] to interact with
+Astra Streaming currently supports {pulsar-reg} {pulsar-version}, which uses the https://opensearch.org/docs/1.2/clients/java-rest-high-level/[OpenSearch 1.2.4 library] to interact with
Elasticsearch.
====
@@ -33,7 +33,7 @@ include::partial$connectors/sinks/monitoring.adoc[]
There are two sets of parameters that support sink connectors.
-=== {product}
+=== Astra Streaming
[%header,format=csv,cols="2,1,1,3"]
|===
@@ -44,7 +44,7 @@ include::example$connectors/sinks/astra.csv[]
These values are provided in the "configs" area.
-The {product} Elasticsearch sink connector supports all configuration properties provided by {pulsar}. Please refer to the https://pulsar.apache.org/docs/io-elasticsearch-sink/#property[connector properties] for a complete list.
+The Astra Streaming Elasticsearch sink connector supports all configuration properties provided by {pulsar}. Please refer to the https://pulsar.apache.org/docs/io-elasticsearch-sink/#property[connector properties] for a complete list.
== What's next?
diff --git a/modules/pulsar-io/pages/connectors/sinks/google-bigquery.adoc b/modules/pulsar-io/pages/connectors/sinks/google-bigquery.adoc
index 6a0682f..9c7952c 100644
--- a/modules/pulsar-io/pages/connectors/sinks/google-bigquery.adoc
+++ b/modules/pulsar-io/pages/connectors/sinks/google-bigquery.adoc
@@ -22,9 +22,9 @@ include::partial$connectors/sinks/monitoring.adoc[]
== Connector Reference
-The BigQuery sink has multiple sets of parameters: the {product} parameters, the Kafka Connect Adapter parameters, and the Google BigQuery parameters. Each set of parameters provides a way to coordinate how data will be streamed from {pulsar-short} to BigQuery.
+The BigQuery sink has multiple sets of parameters: the Astra Streaming parameters, the Kafka Connect Adapter parameters, and the Google BigQuery parameters. Each set of parameters provides a way to coordinate how data will be streamed from {pulsar-short} to BigQuery.
-=== {product}
+=== Astra Streaming
[%header,format=csv,cols="2,1,1,3"]
|===
diff --git a/modules/pulsar-io/pages/connectors/sinks/jdbc-clickhouse.adoc b/modules/pulsar-io/pages/connectors/sinks/jdbc-clickhouse.adoc
index c1cc7c6..061c2c7 100644
--- a/modules/pulsar-io/pages/connectors/sinks/jdbc-clickhouse.adoc
+++ b/modules/pulsar-io/pages/connectors/sinks/jdbc-clickhouse.adoc
@@ -7,7 +7,7 @@ real-time.
[NOTE]
====
-{product} currently supports {pulsar-reg} {pulsar-version}, which uses the https://github.com/ClickHouse/clickhouse-jdbc[Clickhouse 0.3.2 library] to interact with Clickhouse.
+Astra Streaming currently supports {pulsar-reg} {pulsar-version}, which uses the https://github.com/ClickHouse/clickhouse-jdbc[Clickhouse 0.3.2 library] to interact with Clickhouse.
====
== Get Started
@@ -26,7 +26,7 @@ include::partial$connectors/sinks/monitoring.adoc[]
There are two sets of parameters that support sink connectors.
-=== {product}
+=== Astra Streaming
[%header,format=csv,cols="2,1,1,3"]
|===
@@ -37,7 +37,7 @@ include::example$connectors/sinks/astra.csv[]
These values are provided in the "configs" area.
-The {product} JDBC Clickhouse sink connector supports all configuration properties provided by {pulsar}. Please refer to the https://pulsar.apache.org/docs/io-jdbc-sink#property[connector
+The Astra Streaming JDBC Clickhouse sink connector supports all configuration properties provided by {pulsar}. Please refer to the https://pulsar.apache.org/docs/io-jdbc-sink#property[connector
properties]
for a complete list.
diff --git a/modules/pulsar-io/pages/connectors/sinks/jdbc-mariadb.adoc b/modules/pulsar-io/pages/connectors/sinks/jdbc-mariadb.adoc
index dfcca38..e961ff2 100644
--- a/modules/pulsar-io/pages/connectors/sinks/jdbc-mariadb.adoc
+++ b/modules/pulsar-io/pages/connectors/sinks/jdbc-mariadb.adoc
@@ -8,7 +8,7 @@ Read more about {pulsar-reg}'s JDBC sink connector https://pulsar.apache.org/doc
[NOTE]
====
-{product} currently supports {pulsar} {pulsar-version}, which uses the https://mariadb.com/kb/en/about-mariadb-connector-j/[MariaDB Connector/J 2.7.5 library] to interact with MariaDB.
+Astra Streaming currently supports {pulsar} {pulsar-version}, which uses the https://mariadb.com/kb/en/about-mariadb-connector-j/[MariaDB Connector/J 2.7.5 library] to interact with MariaDB.
====
== Get Started
@@ -27,7 +27,7 @@ include::partial$connectors/sinks/monitoring.adoc[]
There are two sets of parameters that support sink connectors.
-=== {product}
+=== Astra Streaming
[%header,format=csv,cols="2,1,1,3"]
|===
@@ -38,4 +38,4 @@ include::example$connectors/sinks/astra.csv[]
These values are provided in the "configs" area.
-The {product} JDBC MariaDB sink connector supports all configuration properties provided by {pulsar-short}. Please refer to the https://pulsar.apache.org/docs/io-jdbc-sink#property[connector properties] for a complete list.
\ No newline at end of file
+The Astra Streaming JDBC MariaDB sink connector supports all configuration properties provided by {pulsar-short}. Please refer to the https://pulsar.apache.org/docs/io-jdbc-sink#property[connector properties] for a complete list.
\ No newline at end of file
diff --git a/modules/pulsar-io/pages/connectors/sinks/jdbc-postgres.adoc b/modules/pulsar-io/pages/connectors/sinks/jdbc-postgres.adoc
index f7f1c8e..66acace 100644
--- a/modules/pulsar-io/pages/connectors/sinks/jdbc-postgres.adoc
+++ b/modules/pulsar-io/pages/connectors/sinks/jdbc-postgres.adoc
@@ -10,7 +10,7 @@ The PostgreSQL JDBC Driver is an open source JDBC driver written in Pure Java (T
[NOTE]
====
-{product} currently supports {pulsar-reg} {pulsar-version}, which uses the https://jdbc.postgresql.org/documentation/setup/[PostgreSQL JDBC 42.4.1 library] to interact with PostgreSQL.
+Astra Streaming currently supports {pulsar-reg} {pulsar-version}, which uses the https://jdbc.postgresql.org/documentation/setup/[PostgreSQL JDBC 42.4.1 library] to interact with PostgreSQL.
====
== Get Started
@@ -29,7 +29,7 @@ include::partial$connectors/sinks/monitoring.adoc[]
There are two sets of parameters that support sink connectors.
-=== {product}
+=== Astra Streaming
[%header,format=csv,cols="2,1,1,3"]
|===
@@ -40,7 +40,7 @@ include::example$connectors/sinks/astra.csv[]
These values are provided in the "configs" area.
-The {product} JDBC PostgreSQL sink connector supports all configuration properties provided by {pulsar-short}. Refer to the https://pulsar.apache.org/docs/io-jdbc-sink#property[connector
+The Astra Streaming JDBC PostgreSQL sink connector supports all configuration properties provided by {pulsar-short}. Refer to the https://pulsar.apache.org/docs/io-jdbc-sink#property[connector
properties] for a complete list.
== What's next?
diff --git a/modules/pulsar-io/pages/connectors/sinks/jdbc-sqllite.adoc b/modules/pulsar-io/pages/connectors/sinks/jdbc-sqllite.adoc
index 7a7ee1c..cec3bd7 100644
--- a/modules/pulsar-io/pages/connectors/sinks/jdbc-sqllite.adoc
+++ b/modules/pulsar-io/pages/connectors/sinks/jdbc-sqllite.adoc
@@ -12,7 +12,7 @@ SQLite JDBC is a library for accessing and creating SQLite database files in Jav
[NOTE]
====
-{product} currently supports {pulsar-reg} {pulsar-version}, which uses the https://github.com/xerial/sqlite-jdbc[Xerial 3.8.11.2 library] to interact with SQLite.
+Astra Streaming currently supports {pulsar-reg} {pulsar-version}, which uses the https://github.com/xerial/sqlite-jdbc[Xerial 3.8.11.2 library] to interact with SQLite.
====
== Get Started
@@ -31,7 +31,7 @@ include::partial$connectors/sinks/monitoring.adoc[]
There are two sets of parameters that support sink connectors.
-=== {product}
+=== Astra Streaming
[%header,format=csv,cols="2,1,1,3"]
|===
@@ -42,7 +42,7 @@ include::example$connectors/sinks/astra.csv[]
These values are provided in the "configs" area.
-The {product} JDBC MariaDB sink connector supports all configuration properties provided by {pulsar-short}. Please refer to the https://pulsar.apache.org/docs/io-jdbc-sink#property[connector
+The Astra Streaming JDBC MariaDB sink connector supports all configuration properties provided by {pulsar-short}. Please refer to the https://pulsar.apache.org/docs/io-jdbc-sink#property[connector
properties]
for a complete list.
diff --git a/modules/pulsar-io/pages/connectors/sinks/kafka.adoc b/modules/pulsar-io/pages/connectors/sinks/kafka.adoc
index 59aeaa2..e0d6548 100644
--- a/modules/pulsar-io/pages/connectors/sinks/kafka.adoc
+++ b/modules/pulsar-io/pages/connectors/sinks/kafka.adoc
@@ -6,7 +6,7 @@ Apache Kafka(R) is an open-source distributed event streaming platform used by t
[NOTE]
====
-{product} currently supports {pulsar-reg} {pulsar-version}, which uses the https://github.com/apache/kafka/tree/2.7[Kafka 2.7.2 library] to interact with
+Astra Streaming currently supports {pulsar-reg} {pulsar-version}, which uses the https://github.com/apache/kafka/tree/2.7[Kafka 2.7.2 library] to interact with
Kafka.
====
@@ -26,7 +26,7 @@ include::partial$connectors/sinks/monitoring.adoc[]
There are two sets of parameters that support sink connectors.
-=== {product}
+=== Astra Streaming
[%header,format=csv,cols="2,1,1,3"]
|===
@@ -37,7 +37,7 @@ include::example$connectors/sinks/astra.csv[]
These values are provided in the "configs" area.
-The {product} Kafka sink connector supports all configuration properties provided by {pulsar-short}. Please refer to the https://pulsar.apache.org/docs/io-kafka-sink#property[connector properties] for a complete list.
+The Astra Streaming Kafka sink connector supports all configuration properties provided by {pulsar-short}. Please refer to the https://pulsar.apache.org/docs/io-kafka-sink#property[connector properties] for a complete list.
== What's next?
diff --git a/modules/pulsar-io/pages/connectors/sinks/kinesis.adoc b/modules/pulsar-io/pages/connectors/sinks/kinesis.adoc
index c9aaa95..a5cf9e5 100644
--- a/modules/pulsar-io/pages/connectors/sinks/kinesis.adoc
+++ b/modules/pulsar-io/pages/connectors/sinks/kinesis.adoc
@@ -10,7 +10,7 @@ The Amazon Kinesis Client Library for Java (Amazon KCL) enables Java developers
[NOTE]
====
-{product} currently supports {pulsar-reg} {pulsar-version}, which uses the https://github.com/awslabs/amazon-kinesis-client[Amazon Kinesis 2.2.8 library] and the https://github.com/aws/aws-sdk-java[AWS Java SDK 0.14.0 library] to interact with Kinesis.
+Astra Streaming currently supports {pulsar-reg} {pulsar-version}, which uses the https://github.com/awslabs/amazon-kinesis-client[Amazon Kinesis 2.2.8 library] and the https://github.com/aws/aws-sdk-java[AWS Java SDK 0.14.0 library] to interact with Kinesis.
====
== Get Started
@@ -29,7 +29,7 @@ include::partial$connectors/sinks/monitoring.adoc[]
There are two sets of parameters that support sink connectors.
-=== {product}
+=== Astra Streaming
[%header,format=csv,cols="2,1,1,3"]
|===
@@ -40,7 +40,7 @@ include::example$connectors/sinks/astra.csv[]
These values are provided in the "configs" area.
-The {product} Kinesis sink connector supports all configuration properties provided by {pulsar-short}. Please refer to the https://pulsar.apache.org/docs/io-kinesis-sink#property[connector properties] for a complete list.
+The Astra Streaming Kinesis sink connector supports all configuration properties provided by {pulsar-short}. Please refer to the https://pulsar.apache.org/docs/io-kinesis-sink#property[connector properties] for a complete list.
== What's next?
diff --git a/modules/pulsar-io/pages/connectors/sinks/snowflake.adoc b/modules/pulsar-io/pages/connectors/sinks/snowflake.adoc
index 73ec8e0..75cb8a7 100644
--- a/modules/pulsar-io/pages/connectors/sinks/snowflake.adoc
+++ b/modules/pulsar-io/pages/connectors/sinks/snowflake.adoc
@@ -22,7 +22,7 @@ include::partial$connectors/sinks/monitoring.adoc[]
There are two sets of parameters that support sink connectors.
-=== {product}
+=== Astra Streaming
[%header,format=csv,cols="2,1,1,3"]
|===
@@ -33,7 +33,7 @@ include::example$connectors/sinks/astra.csv[]
These values are provided in the "configs" area.
-The {product} Snowflake sink connector supports all configuration properties provided by {company}. Please refer to the https://github.com/datastax/snowflake-connector#configuration[connector
+The Astra Streaming Snowflake sink connector supports all configuration properties provided by {company}. Please refer to the https://github.com/datastax/snowflake-connector#configuration[connector
properties] for a complete list.
== What's next?
diff --git a/modules/pulsar-io/pages/connectors/sources/data-generator.adoc b/modules/pulsar-io/pages/connectors/sources/data-generator.adoc
index df248e0..469dc0c 100644
--- a/modules/pulsar-io/pages/connectors/sources/data-generator.adoc
+++ b/modules/pulsar-io/pages/connectors/sources/data-generator.adoc
@@ -8,7 +8,7 @@ The Data Generator source connector creates fake data on an {pulsar-reg} topic u
The connector will produce data indefinitely while it is running.
-{product} currently supports {pulsar} {pulsar-version}, which uses version 0.5.9 of the jfairy library.
+Astra Streaming currently supports {pulsar} {pulsar-version}, which uses version 0.5.9 of the jfairy library.
For a reference of the full "Person" class, https://github.com/apache/pulsar/blob/branch-{pulsar-version}/pulsar-io/data-generator/src/main/java/org/apache/pulsar/io/datagenerator/Person.java[view the source].
@@ -28,7 +28,7 @@ include::partial$connectors/sources/monitoring.adoc[]
There are two sets of parameters that support source connectors.
-=== {product}
+=== Astra Streaming
[%header,format=csv,cols="2,1,1,3"]
|===
diff --git a/modules/pulsar-io/pages/connectors/sources/debezium-mongodb.adoc b/modules/pulsar-io/pages/connectors/sources/debezium-mongodb.adoc
index d6966e6..b05e385 100644
--- a/modules/pulsar-io/pages/connectors/sources/debezium-mongodb.adoc
+++ b/modules/pulsar-io/pages/connectors/sources/debezium-mongodb.adoc
@@ -6,7 +6,7 @@ Debezium’s MongoDB connector tracks a MongoDB replica set or a MongoDB sharded
The connector automatically handles the addition or removal of shards in a sharded cluster, changes in membership of each replica set, elections within each replica set, and the resolution of communications problems.
-{product} currently supports {pulsar} {pulsar-version}, which uses Debezium {debezium-version} libraries. For a list of supported MongoDB versions, please refer to the https://debezium.io/releases/{debezium-version}/[Debezium documentation].
+Astra Streaming currently supports {pulsar} {pulsar-version}, which uses Debezium {debezium-version} libraries. For a list of supported MongoDB versions, please refer to the https://debezium.io/releases/{debezium-version}/[Debezium documentation].
== Get Started
@@ -24,7 +24,7 @@ include::partial$connectors/sources/monitoring.adoc[]
There are two sets of parameters that support source connectors.
-=== {product}
+=== Astra Streaming
[%header,format=csv,cols="2,1,1,3"]
|===
diff --git a/modules/pulsar-io/pages/connectors/sources/debezium-mysql.adoc b/modules/pulsar-io/pages/connectors/sources/debezium-mysql.adoc
index d51dd47..3cf3172 100644
--- a/modules/pulsar-io/pages/connectors/sources/debezium-mysql.adoc
+++ b/modules/pulsar-io/pages/connectors/sources/debezium-mysql.adoc
@@ -4,7 +4,7 @@
The Debezium MySQL connector reads the binlog, produces change events for row-level INSERT, UPDATE, and DELETE operations, and emits these change events as messages in an {pulsar-reg} topic.
-{product} currently supports {pulsar} {pulsar-version}, which uses Debezium {debezium-version} libraries.
+Astra Streaming currently supports {pulsar} {pulsar-version}, which uses Debezium {debezium-version} libraries.
== Get Started
@@ -22,7 +22,7 @@ include::partial$connectors/sources/monitoring.adoc[]
There are two sets of parameters that support source connectors.
-=== {product}
+=== Astra Streaming
[%header,format=csv,cols="2,1,1,3"]
|===
@@ -33,7 +33,7 @@ include::example$connectors/sources/astra.csv[]
These values are provided in the "configs" area.
-The {product} MySQL source connector supports all configuration properties provided in Debezium's connector. Please refer to https://debezium.io/documentation/reference/{debezium-version}/connectors/mysql.html#mysql-connector-properties[Debezium MySQL connector properties] for a complete list.
+The Astra Streaming MySQL source connector supports all configuration properties provided in Debezium's connector. Please refer to https://debezium.io/documentation/reference/{debezium-version}/connectors/mysql.html#mysql-connector-properties[Debezium MySQL connector properties] for a complete list.
== What's next?
diff --git a/modules/pulsar-io/pages/connectors/sources/debezium-oracle.adoc b/modules/pulsar-io/pages/connectors/sources/debezium-oracle.adoc
index 316faad..2634b47 100644
--- a/modules/pulsar-io/pages/connectors/sources/debezium-oracle.adoc
+++ b/modules/pulsar-io/pages/connectors/sources/debezium-oracle.adoc
@@ -6,7 +6,7 @@ Debezium’s Oracle connector captures and records row-level changes that occur
The connector can be configured to emit change events for specific subsets of schemas and tables, or to ignore, mask, or truncate values in specific columns.
-{product} currently supports {pulsar-reg} {pulsar-version}, which uses Debezium {debezium-version} libraries.
+Astra Streaming currently supports {pulsar-reg} {pulsar-version}, which uses Debezium {debezium-version} libraries.
== Get Started
@@ -24,7 +24,7 @@ include::partial$connectors/sources/monitoring.adoc[]
There are two sets of parameters that support source connectors.
-=== {product}
+=== Astra Streaming
[%header,format=csv,cols="2,1,1,3"]
|===
@@ -33,7 +33,7 @@ include::example$connectors/sources/astra.csv[]
=== Debezium Oracle
-The {product} Oracle source connector supports all configuration properties provided in Debezium's connector. Please refer to https://debezium.io/documentation/reference/{debezium-version}/connectors/mysql.html#oracle-connector-properties[Debezium Oracle connector properties] for a complete list.
+The Astra Streaming Oracle source connector supports all configuration properties provided in Debezium's connector. Please refer to https://debezium.io/documentation/reference/{debezium-version}/connectors/mysql.html#oracle-connector-properties[Debezium Oracle connector properties] for a complete list.
== What's next?
diff --git a/modules/pulsar-io/pages/connectors/sources/debezium-postgres.adoc b/modules/pulsar-io/pages/connectors/sources/debezium-postgres.adoc
index 26a92cc..f05326d 100644
--- a/modules/pulsar-io/pages/connectors/sources/debezium-postgres.adoc
+++ b/modules/pulsar-io/pages/connectors/sources/debezium-postgres.adoc
@@ -4,7 +4,7 @@
The PostgreSQL connector produces a change event for every row-level insert, update, and delete operation that it captures, and sends change event records for each table in a separate {pulsar-reg} topic.
-{product} currently supports {pulsar} {pulsar-version}, which uses Debezium {debezium-version} libraries. For a list of supported PostgreSQL versions, please refer to the
+Astra Streaming currently supports {pulsar} {pulsar-version}, which uses Debezium {debezium-version} libraries. For a list of supported PostgreSQL versions, please refer to the
https://debezium.io/releases/{debezium-version}/[Debezium documentation].
== Get Started
@@ -23,7 +23,7 @@ include::partial$connectors/sources/monitoring.adoc[]
There are two sets of parameters that support source connectors.
-=== {product}
+=== Astra Streaming
[%header,format=csv,cols="2,1,1,3"]
|===
@@ -34,7 +34,7 @@ include::example$connectors/sources/astra.csv[]
These values are provided in the "configs" area.
-The {product} PostgreSQL source connector supports all configuration properties provided in Debezium's connector. Please refer to https://debezium.io/documentation/reference/{debezium-version}/connectors/postgresql.html#postgresql-connector-properties[Debezium PostgreSQL connector properties] for a complete list.
+The Astra Streaming PostgreSQL source connector supports all configuration properties provided in Debezium's connector. Please refer to https://debezium.io/documentation/reference/{debezium-version}/connectors/postgresql.html#postgresql-connector-properties[Debezium PostgreSQL connector properties] for a complete list.
== What's next?
diff --git a/modules/pulsar-io/pages/connectors/sources/debezium-sqlserver.adoc b/modules/pulsar-io/pages/connectors/sources/debezium-sqlserver.adoc
index 28fcf79..1ad9c47 100644
--- a/modules/pulsar-io/pages/connectors/sources/debezium-sqlserver.adoc
+++ b/modules/pulsar-io/pages/connectors/sources/debezium-sqlserver.adoc
@@ -6,7 +6,7 @@ The Debezium SQL Server connector is based on the change data capture feature av
The SQL Server capture process monitors designated databases and tables and stores changes into specifically created change tables with stored procedure facades.
-{product} currently supports {pulsar-reg} {pulsar-version}, which uses Debezium {debezium-version} libraries. For a list of supported SQL Server versions, please refer to the
+Astra Streaming currently supports {pulsar-reg} {pulsar-version}, which uses Debezium {debezium-version} libraries. For a list of supported SQL Server versions, please refer to the
https://debezium.io/releases/{debezium-version}/[Debezium documentation].
== Get Started
@@ -51,7 +51,7 @@ include::partial$connectors/sources/monitoring.adoc[]
There are two sets of parameters that support source connectors.
-=== {product}
+=== Astra Streaming
[%header,format=csv,cols="2,1,1,3"]
|===
@@ -62,7 +62,7 @@ include::example$connectors/sources/astra.csv[]
These values are provided in the "configs" area.
-The {product} SQL Server source connector supports all configuration properties provided in Debezium's connector. Please refer to https://debezium.io/documentation/reference/{debezium-version}/connectors/sqlserver.html#sqlserver-connector-properties[Debezium SQL Server connector properties] for a complete list.
+The Astra Streaming SQL Server source connector supports all configuration properties provided in Debezium's connector. Please refer to https://debezium.io/documentation/reference/{debezium-version}/connectors/sqlserver.html#sqlserver-connector-properties[Debezium SQL Server connector properties] for a complete list.
== What's next?
diff --git a/modules/pulsar-io/pages/connectors/sources/kafka.adoc b/modules/pulsar-io/pages/connectors/sources/kafka.adoc
index 287f4a9..d27fc37 100644
--- a/modules/pulsar-io/pages/connectors/sources/kafka.adoc
+++ b/modules/pulsar-io/pages/connectors/sources/kafka.adoc
@@ -22,7 +22,7 @@ include::partial$connectors/sources/monitoring.adoc[]
There are two sets of parameters that support source connectors.
-=== {product}
+=== Astra Streaming
[%header,format=csv,cols="2,1,1,3"]
|===
@@ -60,5 +60,5 @@ The deserializer is set by a specific implementation of https://github.com/apach
|===
-The {product} Kafka source connector supports all configuration properties provided by {pulsar}.
+The Astra Streaming Kafka source connector supports all configuration properties provided by {pulsar}.
For a complete list, see the https://pulsar.apache.org/docs/io-kafka-source#property[Kafka source connector properties].
diff --git a/modules/pulsar-io/pages/connectors/sources/kinesis.adoc b/modules/pulsar-io/pages/connectors/sources/kinesis.adoc
index bb601ea..9c5bde9 100644
--- a/modules/pulsar-io/pages/connectors/sources/kinesis.adoc
+++ b/modules/pulsar-io/pages/connectors/sources/kinesis.adoc
@@ -22,7 +22,7 @@ include::partial$connectors/sources/monitoring.adoc[]
There are two sets of parameters that support source connectors.
-=== {product}
+=== Astra Streaming
[%header,format=csv,cols="2,1,1,3"]
|===
@@ -71,5 +71,5 @@ If `awsCredentialPluginName` set to empty, the Kinesis sink creates a default AW
|===
-The {product} Kinesis source connector supports all configuration properties provided by {pulsar}.
+The Astra Streaming Kinesis source connector supports all configuration properties provided by {pulsar}.
For a complete list, see the https://pulsar.apache.org/docs/io-kinesis-source#configuration[Kinesis source connector properties].
diff --git a/modules/pulsar-io/partials/connectors/sinks/monitoring.adoc b/modules/pulsar-io/partials/connectors/sinks/monitoring.adoc
index 0dab3e4..14ec323 100644
--- a/modules/pulsar-io/partials/connectors/sinks/monitoring.adoc
+++ b/modules/pulsar-io/partials/connectors/sinks/monitoring.adoc
@@ -261,4 +261,4 @@ Status response for individual connector instance:
=== Metrics
-{product} exposes Prometheus formatted metrics for every connector. Refer to the xref:astra-streaming:operations:astream-scrape-metrics.adoc[scrape metrics with Prometheus] page for more detail.
\ No newline at end of file
+Astra Streaming exposes Prometheus formatted metrics for every connector. Refer to the xref:astra-streaming:operations:astream-scrape-metrics.adoc[scrape metrics with Prometheus] page for more detail.
\ No newline at end of file
diff --git a/modules/pulsar-io/partials/connectors/sources/monitoring.adoc b/modules/pulsar-io/partials/connectors/sources/monitoring.adoc
index c9e4d67..c3bcf2f 100644
--- a/modules/pulsar-io/partials/connectors/sources/monitoring.adoc
+++ b/modules/pulsar-io/partials/connectors/sources/monitoring.adoc
@@ -193,4 +193,4 @@ Status response for individual connector instance:
=== Metrics
-{product} exposes Prometheus formatted metrics for every connector. Refer to xref:astra-streaming:operations:astream-scrape-metrics.adoc[scrape metrics with Prometheus] page for more detail.
\ No newline at end of file
+Astra Streaming exposes Prometheus formatted metrics for every connector. Refer to xref:astra-streaming:operations:astream-scrape-metrics.adoc[scrape metrics with Prometheus] page for more detail.
\ No newline at end of file
diff --git a/modules/subscriptions/partials/subscription-prereq.adoc b/modules/subscriptions/partials/subscription-prereq.adoc
index 6cacdae..f5070da 100644
--- a/modules/subscriptions/partials/subscription-prereq.adoc
+++ b/modules/subscriptions/partials/subscription-prereq.adoc
@@ -6,11 +6,11 @@ To run this example, you'll need:
* https://openjdk.java.net/install/[Java OpenJDK 11]
-* A configured {product} instance with at least one streaming tenant and one topic. See the xref:astra-streaming:getting-started:index.adoc[{product} quick start] for instructions.
+* A configured Astra Streaming instance with at least one streaming tenant and one topic. See the xref:astra-streaming:getting-started:index.adoc[Astra Streaming quick start] for instructions.
* A local clone of the https://github.com/datastax/pulsar-subscription-example[{company} {pulsar-short} Subscription Example repository]
-* Modify the `src/main/resources/application.properties` in the `pulsar-subscription-example` repo to connect to your {product} cluster, as below:
+* Modify the `src/main/resources/application.properties` in the `pulsar-subscription-example` repo to connect to your Astra Streaming cluster, as below:
+
[source,bash]
----
diff --git a/modules/use-cases-architectures/pages/change-data-capture/consuming-change-data.adoc b/modules/use-cases-architectures/pages/change-data-capture/consuming-change-data.adoc
index 56a295b..2d881b8 100644
--- a/modules/use-cases-architectures/pages/change-data-capture/consuming-change-data.adoc
+++ b/modules/use-cases-architectures/pages/change-data-capture/consuming-change-data.adoc
@@ -14,7 +14,7 @@ Each client handles message consumption a little differently but there is one ov
Below are example implementations for each runtime consuming messages from the CDC data topic.
-While these examples are in the `astra-streaming-examples` repository, they are not {product}-specific.
+While these examples are in the `astra-streaming-examples` repository, they are not Astra Streaming-specific.
You can use these examples to consume CDC data topics in your own {cass-short}/{pulsar-short} clusters.
* svg:common::icons/logos/csharp.svg[role="icon text-xl",name="C#"] https://github.com/datastax/astra-streaming-examples/blob/master/csharp/astra-cdc/Program.cs[{csharp} CDC project example]
@@ -27,7 +27,7 @@ You can use these examples to consume CDC data topics in your own {cass-short}/{
It is very common to have a function consuming the CDC data. Functions usually perform additional processing on the data and pass it to another topic. Similar to a client consumer, it will need to deserialize the message data. Below are examples of different functions consuming messages from the CDC data topic.
-While these examples are in the `astra-streaming-examples` repository, they are not {product}-specific. You can use these examples to consume CDC data topics in your own {cass-short}/{pulsar-short} clusters.
+While these examples are in the `astra-streaming-examples` repository, they are not Astra Streaming-specific. You can use these examples to consume CDC data topics in your own {cass-short}/{pulsar-short} clusters.
* svg:common::icons/logos/go.svg[role="icon text-xl",name="Go"] https://github.com/datastax/astra-streaming-examples/blob/master/go/astra-cdc/main/main.go[Golang CDC project example]
* svg:common::icons/logos/java.svg[role="icon text-xl",name="Java"] https://github.com/datastax/astra-streaming-examples/blob/master/java/astra-cdc/javaexamples/functions/CDCFunction.java[Java CDC function example]
diff --git a/modules/use-cases-architectures/pages/starlight/jms/index.adoc b/modules/use-cases-architectures/pages/starlight/jms/index.adoc
index 2758aae..c6c8221 100644
--- a/modules/use-cases-architectures/pages/starlight/jms/index.adoc
+++ b/modules/use-cases-architectures/pages/starlight/jms/index.adoc
@@ -12,16 +12,16 @@ To get started, you need the following:
* A working {pulsar-short} cluster.
* Access to the cluster's admin port 8080 and the binary port 6650.
-This guide uses {product} to get started with Starlight for JMS.
+This guide uses Astra Streaming to get started with Starlight for JMS.
For more information, see the xref:starlight-for-jms:ROOT:index.adoc[Starlight for JMS documentation].
[tabs]
====
-{product}::
+Astra Streaming::
+
--
-If you don't have a tenant in {product}, follow our "xref:astra-streaming:getting-started:index.adoc[]" guide.
+If you don't have a tenant in Astra Streaming, follow our "xref:astra-streaming:getting-started:index.adoc[]" guide.
--
Luna Streaming::
@@ -38,9 +38,9 @@ Using a standalone cluster? The Starlight for JMS docs provide the "xref:starlig
== Messaging with Starlight for JMS
-=== Retrieve connection properties in {product}
+=== Retrieve connection properties in Astra Streaming
-. In the {product} portal "Connect" tab, the "{pulsar-short}" area provides important connection information.
+. In the Astra Streaming portal "Connect" tab, the "{pulsar-short}" area provides important connection information.
. Scroll down to the "Tenant Details" area to find your {pulsar-short} connection information.
+
@@ -51,7 +51,7 @@ image:pulsar-client-settings.png[]
This example uses Maven for the project structure.
If you prefer Gradle or another tool, this code should still be a good fit.
-For complete source code examples, see the https://github.com/datastax/astra-streaming-examples[{product} examples repository].
+For complete source code examples, see the https://github.com/datastax/astra-streaming-examples[Astra Streaming examples repository].
. Create a new Maven project.
+
@@ -81,7 +81,7 @@ cd StarlightForJMSClient
If you cloned the example repository, replace the entire contents of the file with the following code.
Your editor will report an error because this isn't a complete script yet.
+
-Replace placeholders with the values you previously retrieved from {product}.
+Replace placeholders with the values you previously retrieved from Astra Streaming.
+
[source,java]
----
diff --git a/modules/use-cases-architectures/pages/starlight/kafka/index.adoc b/modules/use-cases-architectures/pages/starlight/kafka/index.adoc
index e7a18cd..2767930 100644
--- a/modules/use-cases-architectures/pages/starlight/kafka/index.adoc
+++ b/modules/use-cases-architectures/pages/starlight/kafka/index.adoc
@@ -20,14 +20,14 @@ Choose the option that best fits your needs.
[tabs]
====
-{product}::
+Astra Streaming::
+
--
If you want a working Kafka extension as quickly as possible, this is your best bet.
This is also a good option for those that already have a streaming tenant and are looking to extend it.
-. Sign in to your {product-short} account and navigate to your streaming tenant.
+. Sign in to your Astra account and navigate to your streaming tenant.
+
TIP: Don't have a streaming tenant? Follow our "xref:astra-streaming:getting-started:index.adoc[]" guide.
@@ -39,12 +39,170 @@ TIP: Don't have a streaming tenant? Follow our "xref:astra-streaming:getting-sta
. Select the "Enable Kafka" button to confirm your understanding.
-Your {product} tenant is ready for prime time! Continue to the next section of the guide to see it in action.
+Your Astra Streaming tenant is ready for prime time! Continue to the next section of the guide to see it in action.
--
Luna Streaming::
+
--
The {kafka-for-astra} extension is included in the `luna-streaming-all` image used to deploy a Luna cluster. The Luna helm chart makes deploying the Kafka extension quite easy. Follow the "xref:luna-streaming:components:starlight-for-kafka.adoc[]" guide to create a simple {pulsar-short} cluster with the {kafka-for-astra} extension ready for use.
+
+== Use Starlight for Kafka with Luna Streaming
+
+Starlight for Kafka brings the native Apache Kafka protocol support to {pulsar} by introducing a Kafka protocol handler on {pulsar-short} brokers.
+By adding the Starlight for Kafka protocol handler to your {pulsar-short} cluster, you can migrate your existing Kafka applications and services to {pulsar-short} without modifying the code.
+
+The following steps explain how to deploy a Luna Streaming Helm chart with the Starlight for Kafka protocol handler extension.
+
+=== Prerequisites
+
+* https://helm.sh/docs/intro/install/[Helm 3 CLI] (we used version 3.8.0)
+* https://www.apache.org/dyn/closer.cgi?path=/kafka/3.3.1/kafka_2.13-3.3.1.tgz[Kafka CLI] (we used version 3.3.1)
+* https://kubernetes.io/docs/tasks/tools/[Kubectl CLI] (we used version 1.23.4)
+* Enough access to a K8s cluster to create a namespace, deployments, and pods
+
+=== Install Luna Streaming Helm chart
+
+. Add the {company} Helm chart repo to your Helm store.
++
+[source,shell]
+----
+helm repo add datastax-pulsar https://datastax.github.io/pulsar-helm-chart
+----
+
+. Install the Helm chart using a minimalist values file.
+This command creates a Helm release named `my-pulsar-cluster` using the {company} Luna Helm chart, within the K8s namespace `datastax-pulsar`.
+The minimal cluster creates only the essential components and has no ingress or load balanced services.
++
+[source,shell]
+----
+VALUES_URL="https://raw.githubusercontent.com/datastaxdevs/luna-streaming-examples/main/starlight-for-kafka/values.yaml"
+helm install \
+ --namespace datastax-pulsar \
+ --create-namespace \
+ --values $VALUES_URL \
+ --version 3.0.4 \
+ my-pulsar-cluster \
+ datastax-pulsar/pulsar
+----
+
+. Wait for the broker pod to be in a running state. You might see a few restarts as your components start up.
++
+[source,shell]
+----
+kubectl -n datastax-pulsar wait --for=condition=Ready pod/pulsar-broker-0 --timeout=120s
+----
+
+=== Forward service port
+
+You'll need to interact with a few of the services in the K8s cluster.
+Map a few ports to those services.
+
+In a new terminal, port forward {pulsar-short}'s admin service:
+
+[source,shell]
+----
+kubectl port-forward -n datastax-pulsar service/pulsar-broker 8080:8080
+----
+
+In a separate terminal window, port forward the Starlight for Kafka serivce:
+
+[source,shell]
+----
+kubectl port-forward -n datastax-pulsar service/pulsar-proxy 9092:9092
+----
+
+=== Inspect the created namespaces and topics
+
+The Luna Streaming Helm Chart automatically creates a tenant named "public" and a namespace within that tenant named "default".
+
+The Starlight for Kafka extension creates a few namespaces and topics to function correctly.
+
+List the namespaces in the "public" tenant to see what was created:
+
+[source,shell]
+----
+~/apache-pulsar-2.10.1$ ./bin/pulsar-admin namespaces list public
+----
+
+The output should be similar to the following.
+
+[source,shell]
+----
+public/__kafka
+public/__kafka_producerid
+public/default
+----
+
+Notice the namespaces prefixed with "__kafka".
+These are used by the service for different functions.
+To learn more about Starlight for Kafka operations, see the S4K xref:starlight-for-kafka:ROOT:index.adoc[documentation].
+
+=== Produce a message with the Kafka CLI
+
+If you hadn't noticed, we never opened the {pulsar-short} binary port to accept new messages.
+Only the admin port and the Kafka port are open.
+To further show how native Starlight for Kafka is to {pulsar-short}, we will use the Kafka CLI to produce and consume messages from {pulsar-short}.
+
+From within the Kafka directory, run the following command to start the shell:
+
+[source,shell]
+----
+~/kafka_2.13-3.3.1$ ./bin/kafka-console-producer.sh --topic quickstart --bootstrap-server localhost:9092
+----
+
+Type a message, press Enter to send it, then Ctrl-C to exit the producer shell.
+
+[source,shell]
+----
+This my first message
+----
+
+Wait a second! We never created a topic! And where did the "quickstart" topic come from?!
+
+The default behavior of Starlight for Kafka is to create a new single partition, persistent topic when one is not present.
+You can configure this behavior and many other S4K parameters in the https://github.com/datastaxdevs/luna-streaming-examples/blob/main/starlight-for-kafka/values.yaml[Helm chart].
+Learn more about the configuration values xref:starlight-for-kafka:configuration:starlight-kafka-configuration.adoc[here].
+
+Let's have a look at the topic that was created. From your {pulsar-short} home folder, run the following command:
+
+[source,shell]
+----
+~/apache-pulsar-2.10.1$ ./bin/pulsar-admin topics list public/default
+----
+
+The output will include the newly created topic:
+
+[source,shell]
+----
+persistent://public/default/quickstart-partition-0
+----
+
+== Consume the new message with the Kafka CLI
+
+Let's use the Kafka CLI to consume the message we just produced.
+
+Start the consumer shell from the Kafka home folder with the following command:
+
+[source,shell]
+----
+~/kafka_2.13-3.3.1$ ./bin/kafka-console-consumer.sh --topic quickstart --from-beginning --bootstrap-server localhost:9092
+----
+
+The data of our new message will be output. Enter Ctrl-C to exit the shell.
+
+[source,shell]
+----
+This my first message
+----
+
+=== Next steps
+
+Kafka users and existing applications using Kafka can enjoy the many benefits of a {pulsar-short} cluster, while never having to change tooling or libraries.
+Other folks that are more comfortable with {pulsar-short} tooling and clients can also interact with the same topics. Together, new and legacy applications work together to create modern solutions.
+
+Here are links to other guides and resource you might be interested in.
+
+* xref:streaming-learning:use-cases-architectures:starlight/kafka/index.adoc[Messaging with Starlight for Kafka]
--
Self Managed::
+
@@ -57,14 +215,14 @@ Already have your own {pulsar-short} cluster? Or maybe you're using a standalone
{kafka-for-astra} supports quite a few different use cases. With a {pulsar-short} cluster between producers and consumers you can interchange the type of producer and consumer to fit your needs.
-*The below examples are using an {product} tenant as the Kafka bootstrap server.* If you are using Luna Streaming or a self-managed tenant, switch the bootstrap server URL for your own.
+*The below examples are using an Astra Streaming tenant as the Kafka bootstrap server.* If you are using Luna Streaming or a self-managed tenant, switch the bootstrap server URL for your own.
-=== Retrieve Kafka connection properties in {product}
+=== Retrieve Kafka connection properties in Astra Streaming
-In the {product} portal "Connect" tab, the "kafka" area provides important connection information.
+In the Astra Streaming portal "Connect" tab, the "kafka" area provides important connection information.
You will need this connection information to create a working Kafka client or use the CLI.
-image:kafka-client-settings.png[{product} kafka settings]
+image:kafka-client-settings.png[Astra Streaming kafka settings]
TIP: Click the clipboard icon to copy the Kafka connection values, as well as a working token to paste in code.
@@ -78,7 +236,7 @@ Kafka CLI::
Download the latest Kafka distribution https://www.apache.org/dyn/closer.cgi?path=/kafka/3.3.1/kafka_2.13-3.3.1.tgz[here].
With the tarball extracted, the producer and consumer CLIs are in the 'bin' folder.
-. To get started, let's set a few variables. If you've completed our "xref:astra-streaming:getting-started:index.adoc[Getting started with {product}]" guide, the below values will be a perfect fit for your existing tenant.
+. To get started, let's set a few variables. If you've completed our "xref:astra-streaming:getting-started:index.adoc[Getting started with Astra Streaming]" guide, the below values will be a perfect fit for your existing tenant.
+
[source,shell]
----
@@ -127,7 +285,7 @@ Kafka Client (Java)::
This example uses Maven for the project structure.
If you prefer Gradle or another tool, this code should still be a good fit.
-For complete source code examples, see the https://github.com/datastax/astra-streaming-examples[{product} examples repository].
+For complete source code examples, see the https://github.com/datastax/astra-streaming-examples[Astra Streaming examples repository].
. Create a new Maven project.
+
@@ -157,7 +315,7 @@ cd StarlightForKafkaClient
If you cloned the example repo, replace the entire contents of `App.java` with the following code.
Your editor will report an error because this isn't a complete script yet.
+
-Replace placeholders with the values you previously retrieved from {product}.
+Replace placeholders with the values you previously retrieved from Astra Streaming.
+
[source,java]
----
diff --git a/modules/use-cases-architectures/pages/starlight/rabbitmq/index.adoc b/modules/use-cases-architectures/pages/starlight/rabbitmq/index.adoc
index fa704ec..c12db8f 100644
--- a/modules/use-cases-architectures/pages/starlight/rabbitmq/index.adoc
+++ b/modules/use-cases-architectures/pages/starlight/rabbitmq/index.adoc
@@ -21,12 +21,12 @@ Choose the option that best fits your needs.
[tabs]
====
-{product}::
+Astra Streaming::
+
--
If you want a working RabbitMQ extension as quickly as possible, this is your best bet. This is also a good option for those that already have a streaming tenant and are looking to extend it.
-. Sign in to your {product-short} account and navigate to your streaming tenant.
+. Sign in to your Astra account and navigate to your streaming tenant.
+
TIP: Don't have a streaming tenant? Follow our "xref:astra-streaming:getting-started:index.adoc[]" guide.
@@ -38,12 +38,137 @@ TIP: Don't have a streaming tenant? Follow our "xref:astra-streaming:getting-sta
. Click the "Enable RabbitMQ" button to confirm your understanding.
-Your {product} tenant is ready for prime time! Continue to the next section of the guide to see it in action.
+Your Astra Streaming tenant is ready for prime time! Continue to the next section of the guide to see it in action.
--
Luna Streaming::
+
--
The {starlight-rabbitmq} extension is included in the `luna-streaming-all` image used to deploy a Luna cluster. The Luna helm chart makes deploying the Kafka extension quite easy. Follow the "xref:luna-streaming:components:starlight-for-rabbitmq.adoc[]" guide to create a simple {pulsar-short} cluster with the {starlight-rabbitmq} extension ready for use.
+
+== Use Starlight for RabbitMQ with Luna Streaming
+
+Starlight for RabbitMQ brings native https://www.rabbitmq.com/[RabbitMQ] protocol support to https://pulsar.apache.org/[{pulsar-reg}] by introducing a RabbitMQ protocol handler on {pulsar-short} brokers or {pulsar-short} proxies.
+By adding the Starlight for RabbitMQ protocol handler to your {pulsar-short} cluster, you can migrate your existing RabbitMQ applications and services to {pulsar-short} without modifying the code.
+
+The following steps explain how to deploy a Luna Streaming Helm chart with the Starlight for RabbitMQ protocol handler extension.
+
+=== Prerequisites
+
+* https://helm.sh/docs/intro/install/[Helm 3 CLI] (we used version 3.8.0)
+* https://kubernetes.io/docs/tasks/tools/[Kubectl CLI] (we used version 1.23.4)
+* Python (we used version 3.8.10)
+* Enough access to a K8s cluster to create a namespace, deployments, and pods
+
+=== Install Luna Streaming Helm chart
+
+. Add the {company} Helm chart repo to your Helm store.
++
+[source,shell]
+----
+helm repo add datastax-pulsar https://datastax.github.io/pulsar-helm-chart
+----
+
+. Install the Helm chart using a minimalist values file.
+This command creates a Helm release named `my-pulsar-cluster` using the {company} Luna Helm chart, within the K8s namespace `datastax-pulsar`.
+The minimal cluster creates only the essential components and has no ingress or load balanced services.
++
+[source,shell]
+----
+VALUES_URL="https://raw.githubusercontent.com/datastaxdevs/luna-streaming-examples/main/starlight-for-rabbitmq/values.yaml"
+helm install \
+ --namespace datastax-pulsar \
+ --create-namespace \
+ --values $VALUES_URL \
+ --version 3.0.4 \
+ my-pulsar-cluster \
+ datastax-pulsar/pulsar
+----
+
+. Wait for the broker pod to be in a running state. You might see a few restarts as your components start up.
++
+[source,shell]
+----
+kubectl -n datastax-pulsar wait --for=condition=Ready pod/pulsar-broker-0 --timeout=120s
+----
+
+=== Forward service port
+
+You'll need to interact with a few of the services in the K8s cluster.
+Map a few ports to those services.
+
+In a new terminal, port forward {pulsar-short}'s admin service:
+
+[source,shell]
+----
+kubectl port-forward -n datastax-pulsar service/pulsar-broker 8080:8080
+----
+
+In a separate terminal window, port forward the Starlight for RabbitMQ service:
+
+[source,shell]
+----
+kubectl port-forward -n datastax-pulsar service/pulsar-proxy 5672:5672
+----
+
+=== Produce a message with the RabbitMQ Python client
+
+If you hadn't noticed, we never opened the {pulsar-short} binary port to accept new messages.
+Only the admin port and the RabbitMQ port are open.
+To further demonstrate how native Starlight for RabbitMQ is, we will use the Pika RabbitMQ Python library to produce and consume messages from {pulsar-short}.
+
+Save the following Python script to a safe place as `test-queue.py`.
+The script assumes you have opened the localhost:5672 port.
+
+[source,python]
+----
+#!/usr/bin/env python
+import pika
+
+connection = pika.BlockingConnection(pika.ConnectionParameters(port=5672))
+channel = connection.channel()
+
+try:
+ channel.queue_declare("test-queue")
+ print("created test-queue queue")
+
+ channel.basic_publish(exchange="", routing_key="test-queue", body="test".encode('utf-8'))
+ print("published message test")
+
+ _, _, res = channel.basic_get(queue="test-queue", auto_ack=True)
+ assert res is not None, "should have received a message"
+ print("received message: " + res.decode())
+
+ channel.queue_delete("test-queue")
+ print("deleted test-queue queue")
+
+finally:
+ connection.close()
+----
+
+Open a terminal and return to the safe place where you saved the Python script.
+Run the following command to execute the Python program.
+
+[source,shell]
+----
+python ./test-queue.py
+----
+
+The output should look like the following.
+
+[source,shell]
+----
+created test-queue queue
+published message test
+received message: test
+deleted test-queue queue
+----
+
+=== Next steps
+
+The Luna Helm chart deployed Starlight for RabbitMQ on the {pulsar-short} proxy and opened the correct port.
+Your application will now "talk" to {pulsar-short} as if it were a real RabbitMQ host.
+
+* xref:streaming-learning:use-cases-architectures:starlight/rabbitmq/index.adoc[Messaging with Starlight for RabbitMQ]
--
Self Managed::
+
@@ -57,14 +182,14 @@ Already have your own {pulsar-short} Cluster? Or maybe you're using a standalone
{starlight-rabbitmq} supports quite a few different use cases.
With a {pulsar-short} cluster between publishers and consumers you can interchange the type of publisher and consumer to fit your needs.
-*The below examples are using an {product} tenant as the AMQP listener.* If you are using Luna Streaming or a self-managed tenant, switch the listener URL for your own.
+*The below examples are using an Astra Streaming tenant as the AMQP listener.* If you are using Luna Streaming or a self-managed tenant, switch the listener URL for your own.
-=== Retrieve RabbitMQ connection properties in {product}
+=== Retrieve RabbitMQ connection properties in Astra Streaming
-In the {product} portal "Connect" tab, the "RabbitMQ" area provides important connection information.
+In the Astra Streaming portal "Connect" tab, the "RabbitMQ" area provides important connection information.
You will need this connection information to create a working RabbitMQ client or use the CLI.
-image:rabbitmq-client-settings.png[{product} RabbitMQ settings]
+image:rabbitmq-client-settings.png[Astra Streaming RabbitMQ settings]
TIP: Click the clipboard icon to copy the RabbitMQ connection values, as well as a working token to paste in code.
@@ -73,7 +198,7 @@ TIP: Click the clipboard icon to copy the RabbitMQ connection values, as well as
This example uses Maven for the project structure for a Rabbit MQ Java client.
If you prefer Gradle or another tool, this code should still be a good fit.
-For complete source code examples, see the https://github.com/datastax/astra-streaming-examples[{product} examples repository].
+For complete source code examples, see the https://github.com/datastax/astra-streaming-examples[Astra Streaming examples repository].
. Create a new Maven project.
+
@@ -103,7 +228,7 @@ cd StarlightForRabbitMqClient
If you cloned the example repo, replace the entire contents with the following code.
Your editor will report errors because this isn't a complete program yet.
+
-Replace placeholders with the values you previously retrieved from {product}.
+Replace placeholders with the values you previously retrieved from Astra Streaming.
+
[source,java]
----
From 2eae7afba90fb8caccfe9323b40aaf88f2c2258c Mon Sep 17 00:00:00 2001
From: April M <36110273+aimurphy@users.noreply.github.com>
Date: Wed, 7 Jan 2026 15:29:03 -0800
Subject: [PATCH 3/9] revising unified pages b4 move to starlight repos
---
antora.yml | 2 +-
modules/ROOT/nav.adoc | 6 +-
.../pages/starlight/kafka/index.adoc | 222 ++++++++++------
.../pages/starlight/rabbitmq/index.adoc | 248 +++++++++++++-----
4 files changed, 336 insertions(+), 142 deletions(-)
diff --git a/antora.yml b/antora.yml
index 54b8ec9..448e154 100644
--- a/antora.yml
+++ b/antora.yml
@@ -19,7 +19,7 @@ asciidoc:
pulsar-version: '3.1' #DO NOT INCLUDE PATCH VERSION ..
debezium-version: '1.7'
astra-streaming-examples-repo: 'https://raw.githubusercontent.com/datastax/astra-streaming-examples/master'
- kafka-for-astra: 'Starlight for Kafka'
+ starlight-kafka: 'Starlight for Kafka'
starlight-rabbitmq: 'Starlight for RabbitMQ'
cass: Apache Cassandra
cass-short: Cassandra
diff --git a/modules/ROOT/nav.adoc b/modules/ROOT/nav.adoc
index 28c1349..0ad3b74 100644
--- a/modules/ROOT/nav.adoc
+++ b/modules/ROOT/nav.adoc
@@ -7,9 +7,9 @@
* xref:astra-streaming:getting-started:real-time-data-pipelines-tutorial.adoc[Build real-time data pipelines with Astra Streaming]
.Migrate to {pulsar}
-* xref:starlight-for-kafka:ROOT:index.adoc[]
-* xref:starlight-for-rabbitmq:ROOT:index.adoc[]
-* xref:starlight-for-jms:ROOT:index.adoc[]
+* xref:starlight-for-kafka:ROOT:index.adoc[{starlight-kafka}]
+* xref:starlight-for-rabbitmq:ROOT:index.adoc[{starlight-rabbitmq}]
+* xref:starlight-for-jms:ROOT:index.adoc[Starlight for JMS]
.APIs and References
* Connectors
diff --git a/modules/use-cases-architectures/pages/starlight/kafka/index.adoc b/modules/use-cases-architectures/pages/starlight/kafka/index.adoc
index 2767930..b2ec8fc 100644
--- a/modules/use-cases-architectures/pages/starlight/kafka/index.adoc
+++ b/modules/use-cases-architectures/pages/starlight/kafka/index.adoc
@@ -1,68 +1,134 @@
-= Getting started with the {kafka-for-astra} extension
-:navtitle: {kafka-for-astra}
-:description: Learn how to get started using the {kafka-for-astra} extension with {pulsar-reg} and get hands on with Kafka producer and consumer interacting with a topic.
+= Get started with the {starlight-kafka} extension
+:navtitle: {starlight-kafka}
+:description: Use the {starlight-kafka} extension with {pulsar-reg}.
-{kafka-for-astra} brings the native Apache Kafka(R) protocol support to {pulsar-reg} by introducing a Kafka protocol handler on {pulsar-short} brokers. By adding the {kafka-for-astra} protocol handler to your existing {pulsar-short} cluster, you can migrate your existing Kafka applications and services to {pulsar-short} without modifying the code.
+The https://github.com/datastax/starlight-for-kafka[{starlight-kafka} extension] brings the native Apache Kafka(R) protocol support to {pulsar-reg} by introducing a Kafka protocol handler on {pulsar-short} brokers.
+By adding the {starlight-kafka} protocol handler to your existing {pulsar-short} cluster, you can migrate your existing Kafka applications and services to {pulsar-short} without modifying the code.
-If source code is your thing, visit the https://github.com/datastax/starlight-for-kafka[project's repo on GitHub].
+For more information about the {starlight-kafka} architecture, see the xref:starlight-for-kafka:ROOT:index.adoc[{starlight-kafka} documentation].
-== Architecture reference
+image:s4k-architecture.png[{starlight-kafka} Architecture]
-If you would like to get deep into how {kafka-for-astra} works, xref:starlight-for-kafka:ROOT:index.adoc[read the docs].
+== Establish the Kafka protocol handler
-image:s4k-architecture.png[{kafka-for-astra} Architecture]
-
-== Establishing the Kafka protocol handler
-
-Before a Kafka client can interact with your {pulsar-short} cluster, you need the {kafka-for-astra} protocol handler installed in the cluster.
+Before a Kafka client can interact with your {pulsar-short} cluster, you need the {starlight-kafka} protocol handler installed in the cluster.
Installation looks a bit different depending on where your {pulsar-short} cluster is running.
Choose the option that best fits your needs.
[tabs]
-====
+======
Astra Streaming::
+
--
+Get started producing and consuming Kafka messages on a {pulsar-short} cluster.
+
+=== Enable {starlight-kafka} on an Astra Streaming tenant
-If you want a working Kafka extension as quickly as possible, this is your best bet.
-This is also a good option for those that already have a streaming tenant and are looking to extend it.
+. Sign in to your Astra account, go to Astra Streaming, and then xref:astra-streaming:getting-started:index.adoc[create a tenant] or click an existing tenant.
-. Sign in to your Astra account and navigate to your streaming tenant.
+. Go to your tenant's **Connect** tab, select **Kafka**, and then click **Enable Kafka**.
+
+. Review the information about the {starlight-kafka} extension, and then click **Enable Kafka** to confirm that you want to enable this extension on your tenant.
+
-TIP: Don't have a streaming tenant? Follow our "xref:astra-streaming:getting-started:index.adoc[]" guide.
+[IMPORTANT]
+====
+This action creates a configuration file and the following three namespaces in your Astra Streaming tenant automatically:
-. Go to the "Connect" tab and choose the "Kafka" option.
+* `kafka`: Produces and consumes messages
+* `+__kafka+`: Supports required Kafka functionality
+* `+__kafka_unlimited+`: Stores metadata
+
+These namespaces are required for the {starlight-kafka} extension to function properly.
+These are permanent namespaces that cannot be removed except by deleting the entire tenant and all of its data.
+====
-. Click "Enable Kafka".
+. Save the Kafka configuration details to a file named `ssl.properties`.
+The actual values depend on your Astra Streaming tenant's configuration and cloud provider.
++
+.ssl.properties
+[source,plain,subs="+quotes"]
+----
+username: **TENANT_NAME**
+password: token:***
+bootstrap.servers: kafka-**PROVIDER**-**REGION**.streaming.datastax.com:9093
+schema.registry.url: https://kafka-**PROVIDER**-**REGION**.streaming.datastax.com:8081
+security.protocol: SASL_SSL
+sasl.mechanism: PLAIN
+----
-. A message will let you know of the additions (and restrictions) that come with using {kafka-for-astra}.
+=== Connect Kafka and {pulsar-short}
+// Move to messaging section?
+This example uses tools included with the https://kafka.apache.org/downloads[Apache Kafka tarball].
-. Select the "Enable Kafka" button to confirm your understanding.
+. In Astra Streaming, create a new topic in the `kafka` namespace.
++
+No specific name is required.
-Your Astra Streaming tenant is ready for prime time! Continue to the next section of the guide to see it in action.
---
-Luna Streaming::
+. In your Kafka instance, move your `ssl.properties` file to your `Kafka_2.13-3.1.0/config` folder.
+These values are required for SSL encryption.
+The exact values depend on your Astra Streaming tenant's configuration.
+
---
-The {kafka-for-astra} extension is included in the `luna-streaming-all` image used to deploy a Luna cluster. The Luna helm chart makes deploying the Kafka extension quite easy. Follow the "xref:luna-streaming:components:starlight-for-kafka.adoc[]" guide to create a simple {pulsar-short} cluster with the {kafka-for-astra} extension ready for use.
+[source,properties,subs="+quotes"]
+----
+bootstrap.servers=kafka-**PROVIDER**-**REGION**.streaming.datastax.com:9093
+security.protocol=SASL_SSL
+sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username='**TENANT_NAME**' password='token:{pulsar tenant token}'
+sasl.mechanism=PLAIN
+session.timeout.ms=45000
+----
-== Use Starlight for Kafka with Luna Streaming
+. In the `Kafka` directory, create a Kafka producer to produce messages on the topic you created in the `kafka` namespace:
++
+[source,shell,subs="+quotes"]
+----
+bin/kafka-console-producer \
+--broker-list kafka-**PROVIDER**-**REGION**.streaming.datastax.com:9093 \
+--topic **TENANT_NAME**/kafka/**TOPIC_NAME** \
+--producer.config config/ssl.properties
+----
++
+Once the producer is ready, it accepts standard input from the user:
++
+[source,console]
+----
+>hello pulsar
+----
+
+. In a new terminal window, create a Kafka consumer to consume messages from the beginning of your topic:
++
+[source,shell]
+----
+bin/kafka-console-consumer \
+--bootstrap-server kafka-**PROVIDER**-**REGION**.streaming.datastax.com:9093 \
+--topic **TENANT_NAME**/kafka/**TOPIC_NAME** \
+--consumer.config config/ssl.properties \
+--from-beginning
+----
-Starlight for Kafka brings the native Apache Kafka protocol support to {pulsar} by introducing a Kafka protocol handler on {pulsar-short} brokers.
-By adding the Starlight for Kafka protocol handler to your {pulsar-short} cluster, you can migrate your existing Kafka applications and services to {pulsar-short} without modifying the code.
+. Send a few messages in the terminal to generate traffic on the tenant.
-The following steps explain how to deploy a Luna Streaming Helm chart with the Starlight for Kafka protocol handler extension.
+. In Astra Streaming, go to your tenant's **Namespaces and Topics** tab to inspect the activity in the `kafka` namespace.
++
+To verify that your Kafka messages are being produced and consumed in your Astra Streaming {pulsar-short} cluster, check the **Data In** metrics.
+The number of messages should be, at least, equal to the number of messages you sent with your Kafka producer on the command line.
+--
+
+Luna Streaming::
++
+--
+The {starlight-kafka} extension is included in the `luna-streaming-all` image used to deploy a Luna cluster.
+The Luna Helm chart simplifies deployment of the Kafka extension.
-=== Prerequisites
+The following steps explain how to deploy a Luna Streaming Helm chart to create a simple {pulsar-short} cluster with the Starlight for Kafka protocol handler extension.
+. Make sure you meet the following prerequisites:
++
* https://helm.sh/docs/intro/install/[Helm 3 CLI] (we used version 3.8.0)
* https://www.apache.org/dyn/closer.cgi?path=/kafka/3.3.1/kafka_2.13-3.3.1.tgz[Kafka CLI] (we used version 3.3.1)
* https://kubernetes.io/docs/tasks/tools/[Kubectl CLI] (we used version 1.23.4)
* Enough access to a K8s cluster to create a namespace, deployments, and pods
-=== Install Luna Streaming Helm chart
-
-. Add the {company} Helm chart repo to your Helm store.
+. Add the {company} Helm chart repo to your Helm store:
+
[source,shell]
----
@@ -85,60 +151,59 @@ helm install \
datastax-pulsar/pulsar
----
-. Wait for the broker pod to be in a running state. You might see a few restarts as your components start up.
+. Wait for the broker pod to reach a running state.
+It might restart a few times while your components start up.
+
[source,shell]
----
kubectl -n datastax-pulsar wait --for=condition=Ready pod/pulsar-broker-0 --timeout=120s
----
-=== Forward service port
-
-You'll need to interact with a few of the services in the K8s cluster.
-Map a few ports to those services.
-
-In a new terminal, port forward {pulsar-short}'s admin service:
-
+. Forward service ports so you can interact with certain services on the Kubernetes cluster:
++
+.. In a new terminal, port forward {pulsar-short}'s admin service:
++
[source,shell]
----
kubectl port-forward -n datastax-pulsar service/pulsar-broker 8080:8080
----
-In a separate terminal window, port forward the Starlight for Kafka serivce:
-
+.. In a separate terminal window, port forward the Starlight for Kafka service:
++
[source,shell]
----
kubectl port-forward -n datastax-pulsar service/pulsar-proxy 9092:9092
----
-=== Inspect the created namespaces and topics
-
-The Luna Streaming Helm Chart automatically creates a tenant named "public" and a namespace within that tenant named "default".
-
-The Starlight for Kafka extension creates a few namespaces and topics to function correctly.
-
-List the namespaces in the "public" tenant to see what was created:
-
+. Inspect the created namespaces and topics.
++
+The Luna Streaming Helm Chart automatically creates a tenant named `public` and a namespace within that tenant named `default`.
+The Starlight for Kafka extension creates a few namespaces and topics automatically because these are required for the extension to function correctly.
++
+List the namespaces in the `public` tenant:
++
[source,shell]
----
-~/apache-pulsar-2.10.1$ ./bin/pulsar-admin namespaces list public
+~/apache-pulsar-3.1.3$ ./bin/pulsar-admin namespaces list public
----
-
-The output should be similar to the following.
-
++
+The output should be similar to the following:
++
[source,shell]
----
public/__kafka
public/__kafka_producerid
public/default
----
-
-Notice the namespaces prefixed with "__kafka".
++
+The automatically generated namespaces prefixed with "+__kafka+".
These are used by the service for different functions.
-To learn more about Starlight for Kafka operations, see the S4K xref:starlight-for-kafka:ROOT:index.adoc[documentation].
+For more information, see the xref:starlight-for-kafka:ROOT:index.adoc[Starlight for Kafka documentation].
=== Produce a message with the Kafka CLI
+//Move to messaging section
+
If you hadn't noticed, we never opened the {pulsar-short} binary port to accept new messages.
Only the admin port and the Kafka port are open.
To further show how native Starlight for Kafka is to {pulsar-short}, we will use the Kafka CLI to produce and consume messages from {pulsar-short}.
@@ -157,9 +222,7 @@ Type a message, press Enter to send it, then Ctrl-C to exit the producer shell.
This my first message
----
-Wait a second! We never created a topic! And where did the "quickstart" topic come from?!
-
-The default behavior of Starlight for Kafka is to create a new single partition, persistent topic when one is not present.
+A `quickstart` topic is created automatically because the default behavior of Starlight for Kafka is to create a new single partition, persistent topic when one is not present.
You can configure this behavior and many other S4K parameters in the https://github.com/datastaxdevs/luna-streaming-examples/blob/main/starlight-for-kafka/values.yaml[Helm chart].
Learn more about the configuration values xref:starlight-for-kafka:configuration:starlight-kafka-configuration.adoc[here].
@@ -167,7 +230,7 @@ Let's have a look at the topic that was created. From your {pulsar-short} home f
[source,shell]
----
-~/apache-pulsar-2.10.1$ ./bin/pulsar-admin topics list public/default
+~/apache-pulsar-3.1.3$ ./bin/pulsar-admin topics list public/default
----
The output will include the newly created topic:
@@ -178,7 +241,7 @@ persistent://public/default/quickstart-partition-0
----
== Consume the new message with the Kafka CLI
-
+//Move to messaging section
Let's use the Kafka CLI to consume the message we just produced.
Start the consumer shell from the Kafka home folder with the following command:
@@ -188,37 +251,40 @@ Start the consumer shell from the Kafka home folder with the following command:
~/kafka_2.13-3.3.1$ ./bin/kafka-console-consumer.sh --topic quickstart --from-beginning --bootstrap-server localhost:9092
----
-The data of our new message will be output. Enter Ctrl-C to exit the shell.
+The data of our new message will be output:
[source,shell]
----
This my first message
----
-=== Next steps
-
-Kafka users and existing applications using Kafka can enjoy the many benefits of a {pulsar-short} cluster, while never having to change tooling or libraries.
-Other folks that are more comfortable with {pulsar-short} tooling and clients can also interact with the same topics. Together, new and legacy applications work together to create modern solutions.
-
-Here are links to other guides and resource you might be interested in.
-
-* xref:streaming-learning:use-cases-architectures:starlight/kafka/index.adoc[Messaging with Starlight for Kafka]
+Enter Ctrl-C to exit the shell.
--
+
Self Managed::
+
--
-Already have your own {pulsar-short} cluster? Or maybe you're using a standalone cluster? {kafka-for-astra} can easily be a part of that cluster! Follow the "xref:starlight-for-kafka:installation:starlight-kafka-quickstart.adoc[]" guide.
+Already have your own {pulsar-short} cluster? Or maybe you're using a standalone cluster? {starlight-kafka} can easily be a part of that cluster! Follow the "xref:starlight-for-kafka:installation:starlight-kafka-quickstart.adoc[]" guide.
--
-====
+======
-== Messaging with {kafka-for-astra}
+== Message with {starlight-kafka}
-{kafka-for-astra} supports quite a few different use cases. With a {pulsar-short} cluster between producers and consumers you can interchange the type of producer and consumer to fit your needs.
+{starlight-kafka} supports quite a few different use cases. With a {pulsar-short} cluster between producers and consumers you can interchange the type of producer and consumer to fit your needs.
-*The below examples are using an Astra Streaming tenant as the Kafka bootstrap server.* If you are using Luna Streaming or a self-managed tenant, switch the bootstrap server URL for your own.
+//Use tabs instead of astra only. Compare with what is already in the target Starlight topics.
+[TIP]
+====
+The following examples use an Astra Streaming tenant as the Kafka bootstrap server.
+
+If you are using Luna Streaming or a self-managed tenant, use the bootstrap server URL for your tenant.
+====
=== Retrieve Kafka connection properties in Astra Streaming
+//Already covered on the Astra tab above. Image isn't needed.
+//See if this can be made generic or already provided for both in the Starlight docs
+
In the Astra Streaming portal "Connect" tab, the "kafka" area provides important connection information.
You will need this connection information to create a working Kafka client or use the CLI.
diff --git a/modules/use-cases-architectures/pages/starlight/rabbitmq/index.adoc b/modules/use-cases-architectures/pages/starlight/rabbitmq/index.adoc
index c12db8f..2c53427 100644
--- a/modules/use-cases-architectures/pages/starlight/rabbitmq/index.adoc
+++ b/modules/use-cases-architectures/pages/starlight/rabbitmq/index.adoc
@@ -1,67 +1,196 @@
-= Getting started with the {starlight-rabbitmq} extension
+= Get started with the {starlight-rabbitmq} extension
:navtitle: {starlight-rabbitmq}
-:description: Learn how to get started using the {starlight-rabbitmq} extension with {pulsar-short} and get hands on by publishing and consuming messages from a topic.
+:description: Use the {starlight-rabbitmq} extension with {pulsar-reg}.
-{starlight-rabbitmq} acts as a proxy between your https://www.rabbitmq.com/[RabbitMQ] application and https://pulsar.apache.org/[{pulsar-reg}] cluster.
-It implements the AMQP 0.9.1 protocol used by RabbitMQ clients and translates AMQP frames and concepts to {pulsar-short} concepts.
+The https://github.com/datastax/starlight-for-rabbitmq[{starlight-rabbitmq} extension] enables https://www.rabbitmq.com/[RabbitMQ] protocol support for https://pulsar.apache.org/[{pulsar-reg}] by introducing a RabbitMQ protocol handler on {pulsar-short} brokers or {pulsar-short} proxies.
-If source code is your thing, visit the https://github.com/datastax/starlight-for-rabbitmq[project's repo on GitHub].
+The extension acts as a proxy between your https://www.rabbitmq.com/[RabbitMQ] application and https://pulsar.apache.org/[{pulsar-reg}] cluster.
+It implements the AMQP 0.9.1 protocol used by RabbitMQ clients and translates AMQP frames and concepts to {pulsar-short} concepts.
-== Architecture reference
+By adding the Starlight for RabbitMQ protocol handler to your {pulsar-short} cluster, you can migrate your existing RabbitMQ applications and services to {pulsar-short} without modifying the code.
-If you want to dive deep into how {starlight-rabbitmq} works, xref:starlight-for-rabbitmq:ROOT:index.adoc[read the documentation].
+For more information about the {starlight-rabbitmq} architecture, see the xref:starlight-for-rabbitmq:ROOT:index.adoc[{starlight-rabbitmq} documentation].
image:s4r-architecture.png[{starlight-rabbitmq} Architecture]
-== Establishing the RabbitMQ protocol handler
+== Establish the RabbitMQ protocol handler
Before you can use a RabbitMQ client to interact with your {pulsar-short} cluster, you need the {starlight-rabbitmq} protocol handler installed in the cluster.
Installation looks a bit different depending on where your {pulsar-short} cluster is running.
Choose the option that best fits your needs.
[tabs]
-====
+======
Astra Streaming::
+
--
-If you want a working RabbitMQ extension as quickly as possible, this is your best bet. This is also a good option for those that already have a streaming tenant and are looking to extend it.
+Get started producing and consuming RabbitMQ messages on an Astra Streaming {pulsar-short} cluster.
+
+. Sign in to your Astra account, go to Astra Streaming, and then xref:astra-streaming:getting-started:index.adoc[create a tenant] or click an existing tenant.
-. Sign in to your Astra account and navigate to your streaming tenant.
+. Go to your tenant's **Connect** tab, select **RabbitMQ**, and then click **Enable RabbitMQ**.
+
+. Review the information about the {starlight-rabbitmq} extension, and then click **Enable RabbitMQ** to confirm that you want to enable this extension on your tenant.
+
-TIP: Don't have a streaming tenant? Follow our "xref:astra-streaming:getting-started:index.adoc[]" guide.
+[IMPORTANT]
+====
+This action creates a configuration file and a `rabbitmq` namespace in your Astra Streaming tenant automatically.
-. Go to the "Connect" tab and choose the "RabbitMQ" option.
+The `rabbitmq` namespace is required for the {starlight-rabbitmq} extension to function properly.
+It is a permanent namespace that cannot be removed except by deleting the entire tenant and all of its data.
+====
-. Click "Enable RabbitMQ".
+. Save the RabbitMQ configuration details to a file named `rabbitmq.conf`.
+The actual values depend on your Astra Streaming tenant's configuration and cloud provider.
++
+.rabbitmq.conf
+[source,conf,subs="+quotes"]
+----
+username: **TENANT_NAME**
+password: token:***
+host: rabbitmq-**PROVIDER**-**REGION**.streaming.datastax.com
+port: 5671
+virtual_host: **PROVIDER**/rabbitmq
+----
-. A message will let you know of the additions (and restrictions) that come with using {starlight-rabbitmq}.
+=== Connect RabbitMQ and {pulsar-short}
+//Move to messaging section?
+This example uses a Python script to create a connection between RabbitMQ and your {pulsar-short} tenant.
+It also establishes a message queue named `queuename`, prints ten messages, and then closes the connection.
-. Click the "Enable RabbitMQ" button to confirm your understanding.
+. Create a `connect-test.py` file containing the following code:
++
+.connect-test.py
+[source,python,subs="+quotes"]
+----
+import ssl
+import pika
-Your Astra Streaming tenant is ready for prime time! Continue to the next section of the guide to see it in action.
---
-Luna Streaming::
+virtual_host = "**VIRTUAL_HOST**"
+token = "**TOKEN**"
+
+context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
+context.verify_mode = ssl.CERT_NONE
+context.check_hostname = False
+context.load_default_certs()
+ssl_options = pika.SSLOptions(context)
+
+connection = pika.BlockingConnection(pika.ConnectionParameters(
+ virtual_host=virtual_host,
+ host="**HOST**",
+ ssl_options=ssl_options,
+ port=**PORT**,
+ credentials=pika.PlainCredentials("", token)))
+print("connection success")
+
+channel = connection.channel()
+print("started a channel")
+
+channel.queue_declare(queue='queuename')
+
+for x in range(10):
+ channel.basic_publish(exchange='',
+ routing_key='routingkey',
+ body='message body goes here')
+ print(" sent one")
+
+connection.close()
+----
+
---
-The {starlight-rabbitmq} extension is included in the `luna-streaming-all` image used to deploy a Luna cluster. The Luna helm chart makes deploying the Kafka extension quite easy. Follow the "xref:luna-streaming:components:starlight-for-rabbitmq.adoc[]" guide to create a simple {pulsar-short} cluster with the {starlight-rabbitmq} extension ready for use.
+Replace the following with values from your `rabbitmq.conf` file:
++
+* `**VIRTUAL_HOST**`
+* `**TOKEN**` (from the `password` field)
+* `**HOST**`
+* `**PORT**`
-== Use Starlight for RabbitMQ with Luna Streaming
+. Save the `connect-test.py` file.
-Starlight for RabbitMQ brings native https://www.rabbitmq.com/[RabbitMQ] protocol support to https://pulsar.apache.org/[{pulsar-reg}] by introducing a RabbitMQ protocol handler on {pulsar-short} brokers or {pulsar-short} proxies.
-By adding the Starlight for RabbitMQ protocol handler to your {pulsar-short} cluster, you can migrate your existing RabbitMQ applications and services to {pulsar-short} without modifying the code.
+. Run `connect-test.py`:
++
+[source,shell]
+----
+python3 connect-test.py
+----
+
+. Make sure the result is similar to the following:
++
+[source,console]
+----
+connection success
+started a channel
+ sent one
+ sent one
+ sent one
+ sent one
+ sent one
+ sent one
+ sent one
+ sent one
+ sent one
+ sent one
+----
+
+. Navigate to your `rabbitmq` namespace dashboard in Astra Streaming, and then monitor the namespace's activity.
++
+If configured correctly, you should have new topics called `amq.default.__queuename` and `amq.default_routingkey` that were created by the Python script, as well as an increasing amount of traffic and messages.
+Your RabbitMQ messages are being published to a {pulsar-short} topic.
+
+=== RabbitMQ exchanges and {pulsar-short} topics
+
+//TBD if duplicated from Starlight docs and/or unique to Astra
+
+{starlight-rabbitmq} maps RabbitMQ _exchanges_ to {pulsar-short} _topics_, as described in the following table:
-The following steps explain how to deploy a Luna Streaming Helm chart with the Starlight for RabbitMQ protocol handler extension.
+[cols="1,1,1,1"]
+|===
+|Exchange |Routing key |{pulsar-short} topic name |Usage example
-=== Prerequisites
+|`amp.direct`
+|used
+|`amq.direct.__{routing key}`
+|`channel.basic_publish(exchange='amp.direct',`
+|`amp.default` or empty string
+|used
+|`amq.default.__{routing key}`
+|`channel.basic_publish(exchange="),`
+
+|`amp.match`
+|not used
+|`amp.match`
+|`channel.basic_publish(exchange=amp.match),`
+
+|`amp.fanout`
+|not used
+|`amp.fanout`
+|`channel.basic_publish(exchange='amp.fanout'),`
+
+|`headers`
+|not used
+|Name of the header
+|`channel.exchange_declare(exchange='header_logs', exchange_type='headers')
+channel.basic_publish(exchange='header_logs'),`
+
+|===
+--
+
+Luna Streaming::
++
+--
+The {starlight-rabbitmq} extension is included in the `luna-streaming-all` image used to deploy a Luna cluster.
+The Luna Helm chart simplifies deployment of the the RabbitMQ extension.
+
+The following steps explain how to deploy a Luna Streaming Helm chart to create a simple {pulsar-short} cluster with the {starlight-rabbitmq} extension ready for use.
+
+. Make sure you meet the following prerequisites:
++
* https://helm.sh/docs/intro/install/[Helm 3 CLI] (we used version 3.8.0)
* https://kubernetes.io/docs/tasks/tools/[Kubectl CLI] (we used version 1.23.4)
* Python (we used version 3.8.10)
* Enough access to a K8s cluster to create a namespace, deployments, and pods
-=== Install Luna Streaming Helm chart
-
-. Add the {company} Helm chart repo to your Helm store.
+. Add the {company} Helm chart repo to your Helm store
+
[source,shell]
----
@@ -84,34 +213,35 @@ helm install \
datastax-pulsar/pulsar
----
-. Wait for the broker pod to be in a running state. You might see a few restarts as your components start up.
+. Wait for the broker pod to reach a running state.
+It might restart a few times while your components start up.
+
[source,shell]
----
kubectl -n datastax-pulsar wait --for=condition=Ready pod/pulsar-broker-0 --timeout=120s
----
-=== Forward service port
-
-You'll need to interact with a few of the services in the K8s cluster.
-Map a few ports to those services.
-
-In a new terminal, port forward {pulsar-short}'s admin service:
-
+. Forward service ports so you can interact with certain services on the Kubernetes cluster:
++
+.. In a new terminal, port forward {pulsar-short}'s admin service:
++
[source,shell]
----
kubectl port-forward -n datastax-pulsar service/pulsar-broker 8080:8080
----
-In a separate terminal window, port forward the Starlight for RabbitMQ service:
-
+.. In a separate terminal window, port forward the Starlight for RabbitMQ service:
++
[source,shell]
----
kubectl port-forward -n datastax-pulsar service/pulsar-proxy 5672:5672
----
-=== Produce a message with the RabbitMQ Python client
+The Luna Helm chart deployed Starlight for RabbitMQ on the {pulsar-short} proxy and opened the correct port.
+Your application can now communicate with {pulsar-short} as if it were a real RabbitMQ host.
+=== Produce a message with the RabbitMQ Python client
+//Move to messaging section
If you hadn't noticed, we never opened the {pulsar-short} binary port to accept new messages.
Only the admin port and the RabbitMQ port are open.
To further demonstrate how native Starlight for RabbitMQ is, we will use the Pika RabbitMQ Python library to produce and consume messages from {pulsar-short}.
@@ -163,29 +293,32 @@ received message: test
deleted test-queue queue
----
-=== Next steps
-
-The Luna Helm chart deployed Starlight for RabbitMQ on the {pulsar-short} proxy and opened the correct port.
-Your application will now "talk" to {pulsar-short} as if it were a real RabbitMQ host.
-
-* xref:streaming-learning:use-cases-architectures:starlight/rabbitmq/index.adoc[Messaging with Starlight for RabbitMQ]
--
+
Self Managed::
+
--
Already have your own {pulsar-short} Cluster? Or maybe you're using a standalone cluster? {starlight-rabbitmq} can easily be a part of that cluster! Follow the "xref:starlight-for-rabbitmq:installation:getting-started.adoc[]" guide.
--
-====
+======
-== Messaging with {starlight-rabbitmq}
+== Message with {starlight-rabbitmq}
{starlight-rabbitmq} supports quite a few different use cases.
With a {pulsar-short} cluster between publishers and consumers you can interchange the type of publisher and consumer to fit your needs.
-*The below examples are using an Astra Streaming tenant as the AMQP listener.* If you are using Luna Streaming or a self-managed tenant, switch the listener URL for your own.
+[TIP]
+====
+The following examples use an Astra Streaming tenant as the AMQP listener.
+
+If you are using Luna Streaming or a self-managed tenant, use the listener URL for your tenant.
+====
=== Retrieve RabbitMQ connection properties in Astra Streaming
+//Already covered on the Astra tab above. Image isn't needed.
+//See if this can be made generic or already provided for both in the Starlight docs
+
In the Astra Streaming portal "Connect" tab, the "RabbitMQ" area provides important connection information.
You will need this connection information to create a working RabbitMQ client or use the CLI.
@@ -230,7 +363,7 @@ Your editor will report errors because this isn't a complete program yet.
+
Replace placeholders with the values you previously retrieved from Astra Streaming.
+
-[source,java]
+[source,java,subs="+quotes"]
----
package org.example;
@@ -245,11 +378,11 @@ import java.util.concurrent.TimeoutException;
public class App {
private static final String username = "";
- private static final String password = "";
- private static final String host = "";
+ private static final String password = "**PULSAR_TOKEN**";
+ private static final String host = "**SERVICE_URL**";
private static final int port = 5671;
- private static final String virtual_host = "/"; //The "rabbitmq" namespace should have been created when you enabled S4R
- private static final String queueName = ""; //This will get created automatically
+ private static final String virtual_host = "**TENANT_NAME**>/rabbitmq"; //The "rabbitmq" namespace should have been created when you enabled S4R
+ private static final String queueName = "**TOPIC_NAME**"; //This will get created automatically if it doesn't already exist
private static final String amqp_URI = String.format("amqps://%s:%s@%s:%d/%s", username, password, host, port, virtual_host.replace("/","%2f"));
public static void main(String[] args) throws IOException, TimeoutException, URISyntaxException, NoSuchAlgorithmException, KeyManagementException, InterruptedException {
@@ -323,9 +456,4 @@ java -jar target/StarlightForRabbitMqClient-1.0-SNAPSHOT-jar-with-dependencies.j
Sent 'Hello World!'
Received 'Hello World!'
----
-====
-
-== Next steps
-
-* xref:starlight-for-rabbitmq:ROOT:index.adoc[{starlight-rabbitmq} documentation]
-* xref:luna-streaming:components:starlight-for-rabbitmq.adoc[]
\ No newline at end of file
+====
\ No newline at end of file
From 08a919a8269f3691673282e2c2a01309d0243e7a Mon Sep 17 00:00:00 2001
From: April M <36110273+aimurphy@users.noreply.github.com>
Date: Thu, 8 Jan 2026 07:14:59 -0800
Subject: [PATCH 4/9] move table
---
.../pages/starlight/rabbitmq/index.adoc | 78 +++++++++----------
1 file changed, 39 insertions(+), 39 deletions(-)
diff --git a/modules/use-cases-architectures/pages/starlight/rabbitmq/index.adoc b/modules/use-cases-architectures/pages/starlight/rabbitmq/index.adoc
index 2c53427..5aca196 100644
--- a/modules/use-cases-architectures/pages/starlight/rabbitmq/index.adoc
+++ b/modules/use-cases-architectures/pages/starlight/rabbitmq/index.adoc
@@ -135,44 +135,6 @@ started a channel
+
If configured correctly, you should have new topics called `amq.default.__queuename` and `amq.default_routingkey` that were created by the Python script, as well as an increasing amount of traffic and messages.
Your RabbitMQ messages are being published to a {pulsar-short} topic.
-
-=== RabbitMQ exchanges and {pulsar-short} topics
-
-//TBD if duplicated from Starlight docs and/or unique to Astra
-
-{starlight-rabbitmq} maps RabbitMQ _exchanges_ to {pulsar-short} _topics_, as described in the following table:
-
-[cols="1,1,1,1"]
-|===
-|Exchange |Routing key |{pulsar-short} topic name |Usage example
-
-|`amp.direct`
-|used
-|`amq.direct.__{routing key}`
-|`channel.basic_publish(exchange='amp.direct',`
-
-|`amp.default` or empty string
-|used
-|`amq.default.__{routing key}`
-|`channel.basic_publish(exchange="),`
-
-|`amp.match`
-|not used
-|`amp.match`
-|`channel.basic_publish(exchange=amp.match),`
-
-|`amp.fanout`
-|not used
-|`amp.fanout`
-|`channel.basic_publish(exchange='amp.fanout'),`
-
-|`headers`
-|not used
-|Name of the header
-|`channel.exchange_declare(exchange='header_logs', exchange_type='headers')
-channel.basic_publish(exchange='header_logs'),`
-
-|===
--
Luna Streaming::
@@ -456,4 +418,42 @@ java -jar target/StarlightForRabbitMqClient-1.0-SNAPSHOT-jar-with-dependencies.j
Sent 'Hello World!'
Received 'Hello World!'
----
-====
\ No newline at end of file
+====
+
+== RabbitMQ exchanges and {pulsar-short} topics
+
+//Move to Publishing messages section under Pulsar protocol handler in Starlight for RabbitMQ docs.
+
+{starlight-rabbitmq} maps RabbitMQ _exchanges_ to {pulsar-short} _topics_, as described in the following table:
+
+[cols="1,1,1,1"]
+|===
+|Exchange |Routing key |{pulsar-short} topic name |Usage example
+
+|`amp.direct`
+|used
+|`amq.direct.__{routing key}`
+|`channel.basic_publish(exchange='amp.direct',`
+
+|`amp.default` or empty string
+|used
+|`amq.default.__{routing key}`
+|`channel.basic_publish(exchange="),`
+
+|`amp.match`
+|not used
+|`amp.match`
+|`channel.basic_publish(exchange=amp.match),`
+
+|`amp.fanout`
+|not used
+|`amp.fanout`
+|`channel.basic_publish(exchange='amp.fanout'),`
+
+|`headers`
+|not used
+|Name of the header
+|`channel.exchange_declare(exchange='header_logs', exchange_type='headers')
+channel.basic_publish(exchange='header_logs'),`
+
+|===
\ No newline at end of file
From 05e83b83b6f074980d226769c7063398190db512 Mon Sep 17 00:00:00 2001
From: April M <36110273+aimurphy@users.noreply.github.com>
Date: Thu, 8 Jan 2026 10:51:17 -0800
Subject: [PATCH 5/9] finish revising rabbitmq tabs
---
modules/ROOT/nav.adoc | 2 +-
.../images/rabbitmq-client-settings.png | Bin 31342 -> 0 bytes
.../pages/starlight/kafka/index.adoc | 22 +-
.../pages/starlight/rabbitmq/index.adoc | 540 ++++++++++++------
4 files changed, 378 insertions(+), 186 deletions(-)
delete mode 100644 modules/use-cases-architectures/images/rabbitmq-client-settings.png
diff --git a/modules/ROOT/nav.adoc b/modules/ROOT/nav.adoc
index 5b28e03..a77c655 100644
--- a/modules/ROOT/nav.adoc
+++ b/modules/ROOT/nav.adoc
@@ -1,6 +1,6 @@
.Process data
* xref:cdc-for-cassandra:ROOT:cdc-concepts.adoc[Change Data Capture (CDC)]
-* xref:astra-streaming:getting-started:real-time-data-pipelines-tutorial.adoc[Build real-time data pipelines with {product}]
+* xref:astra-streaming:getting-started:real-time-data-pipelines-tutorial.adoc[Build real-time data pipelines with Astra Streaming]
.Migrate to {pulsar}
* xref:starlight-for-kafka:ROOT:index.adoc[{starlight-kafka}]
diff --git a/modules/use-cases-architectures/images/rabbitmq-client-settings.png b/modules/use-cases-architectures/images/rabbitmq-client-settings.png
deleted file mode 100644
index c5b33aeceb609957570b5891b881e59e70d8808b..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001
literal 31342
zcmce;byOTrvp?pOmo4RTUuQan67axG0&BRsr2
z*?4&P1b2yV?^qiX-@m=!`x1;iCcCyLx-H-H(zNi!!=vc=
z=fEETR=mN(3nA1}RW=E-*vU
zq%WZJVeehiMzlrv}_%!a{1yy^8l-QX6
zriTHaKIp92J7CRE50vPM3cjh8XC?Xy(_y5@AqTI>2<9=|o+#l7*isSw9iJT}8f8
zmBYVydF4S7I?54`xro&F#?&jD5#wbnV?YSKMEgw;TCXknOoQ)}I*y_(lEnZQ@p7T$
zVz@&vORxnuCS?f8_^X2nMggQtGOn67pOk#>%(|(wd~1wb<8PI66Ckeii_o~CXs}(O
zHUdS`ek?xx7?d;vRTNzH=23e1oh+xQ%s`2(+^9R>$MNGovJB2^u?WrLn$<4+ZmdcI
z4axMr;A=wN2+D%knKV6{*yB0I`ZpbHe)T)>Sx1z^D3S!^S2yBcS1BB$_v9l_J#^=CDqco)Pnrhm677+|Mfh$~W#nDh>~JUQNs5B$4*O+I}oZ5yhz{hqQZlW6_aqv?JQ
zW=%xVIvpjLqXOwkgoYD2Vx}-4Hudo?pYTAtx*Jw{M?SNyHQix@YI?1MB#n1mnR+Up
zrxcgF{p2LOD|Ql|Diahb@78l(?#(BW&o%96!Fh*&{6W?kg3Z>ivO1~)6Xc&>S
zhq!ea%Bdzn%Q9(k~|MW2l-*NKW#%F*;|w#&R{$hRdpKaRM*c1U&uKDOb2M>9WH
zxcLc-%kk#BoFkL>K?=clt0uqYMmLHwIs3n1TD};3>Q%?4CL9w5dw$dw5w@b#TyrO0
zZC&<3O5k4q1m(?nhO3hMhq;+BL!pgI#leslo|rq21E-ehIU3oB
zT4KbWovV0TsV!R!rMRzt_rqtUAs;^7^);+5q^T*8B-FpL7xvsLJ7z+$E!w74d|L
zn8tpJq^!y{ln5B+JPaG6?86d7Xz=VJzdX1)x;vVL1V^(VK_4i(t~l!&e3KOtE89mL
z6&Yat7no&LYlfrjE>IDDl-Ed4Mc^AI!MRZUi8pP=j(+{4P!${Ovm>c)|B=>90Kt^#
z+eAYp5}4U+aQR-4@?PMWQYU7~Q7R6({C+Xx%TSu^mXzWlHwAfzaivDVmyEbGmi(U9
z)JcOkl+U!DBy-(tA?qq({)6pq1pSnebO;3EfzQfgR{8FVH0
zVnxCA{Zf7rO$(aFC4a7p{w^IX?=Me=b&GSk(Wr_X8oR!M?>^<@W$@+qu!FaC*T*#HRq*nR
zzX|ILD9YXHOP4^CjR^Qa>T596aJU2oojS1}gwt>^^Iu$b(VX9>8Wv;7t7t6G6F3P+
z#6oAjJ91^{qmFFBh}Cn~La#;>7;-lwYb+J0+8w*!dYwGe$kS~BL1}Nf@pK!+pRRK@
z+qYH2GjSID5T>n@pWCeDKUPE)TLa3PN10AqEKa)hs%(dqP-il-?KX=_q3uJLOo$Vm
zVAhGFL)$3Ub3htL8?fFgEJ#z3?D5C;5ujoY%xyBj?S=H#+LYXxnCn!gCCFkYD~|{J
znsD3SO6w7!kDC7I)$=}!8V)2uy0^9Xzt%_|--kRpRG
zWnGc4?bM{QquWBf&$h8(5$+W^0w}omgR(eiTa4B1(eVi%(!r!fh2tfiL1n>TjO)P-
zT^vXAY9i9@-TV1Lrg<&Fsit;Sj)Oq=-#NT>$Q+B!Q1k02DFL(9jBs_q&tp4LYuQYi
zdrjo&J5KYv+NMQ+2j>|J!o{x^Czc;$yXBf!Bb&~`0*w{PRlG2xGlh}WSEtJL7trH^
zWiH=ntW83n#?|VXXtRBcN0eRb-cgf_xZ<>F7YC}EZeZ;f;3&rr(1z>tZ@rmNyzs=F
z{ZRSwqM5@j6Iz;{xB{OUu*?wdq=2?ryAPC^8JLu2$hi1ZO{f@-*zYQ4#f!5+&95H2pTF3D@?
zppZGmYvNc;Ir;pLuk&|`dWq8*L6`NA`Nb1Tc-82v(Wy82jw9jUs)lhaJir;~Qg}7H
z;4PgoujD9zf8ogSxZSE83|Uo|(TF-~<)zb-8tt#dF@IpHw|DYP(F#Sk8OonD4i2=M
zP33qts^IJ`cZsL*qnVy6v3w+EPhoB(f(G77NlZr%F<G`g-+xdg7C=P
zzz@6Yo+H1XS&r6~C`^R~>aivX{^MedZws_5mzff#ob>0Ii|re&qAptl;pUIYb=4+l
zl|Ii)-Xlx_&EC;5Og=Ij1TJ@Q@#D1Kkk?8CY{GdTp`IT^J3R_t_#EW1nPUlwN<7)f
z6+dX2VV2iZc)-e-n5BXXka2qXKuNa;qUIJpO9l+tGPv}c=AYv|UOP*@%9YoX9dNOQ
zkp$>?6fY9zEoDk9Y_$HaWB3cX^~oJT+(PF&@3VZTjiG1j;?)|)tnGm_^wMN4W~PHi
zqU&v8;obBF(Jo_r-?W_;!vN&YD7|o_gZw1|637R(&+lEbe7I_ly2ve4D%$sSO%O)#
zl02XhjU0QTQD&R`mf`n6JwdKqVNWJsu^W3r%fz96GHo06wh<&)KK6+3-amhbiirO4
z;A3XLmCAC@yy<(tTQ(6*;IJ5dr4VwUqBlQ+|MZ>(l8D*Jcf#tUt`6$H2Mac2S*qcuF|p6Q
z){FC;2oC#Dtn}pk67Em;)vbUm6Al3&v+%oG^E1r*U=6tzH{~MJd&&gVSgSb|mY2q+
z=lVBIqlUBiQOr{~Z3T2C4d_h#5-b=XbMs<>GLgh6rXLo-5hF*hkLGr~F7Q)qWrbQE
zOmr^!6GMT9%~}R8$=4|-P$Sbc1#+RB11gN&kJ7%fK>qQWz>!|=MsO4b9f_s66HH7%
zYBxNLW!Mu9GuxE5#=i$L8xzjJK43b@Mw9mI%h=+uXKP>7zZU!}K>Tpb@+R)GZ_^U;
zsn0k6maxT!a=lt4q{A#O<#zbaoVKX~0drq_OgzRfc1jgTAo00mZ;+n#70+Wf~e$5X5%j2m8LUk-mH&
zEYN)O>>|T)5by-6-!TI!pV8(y`%z1=vIZy<4TI2@QWI
zX@9bjJ?ypj3*bE|{DC#cR7K>#=18hQDLS-Z;hjn|r^d}nL2uV($M{a%3fC@mlL&9B
z_*TA;6ZsExV9asjQx4%yCy(dsmxewNE*
z`(#k46+^~Dx)q)__aKI)
zjyA}Ajt_ED^S@>gee#ldQ^y~CMCYAUc3Z_vaKj4)_7G0E)LiXu&$(e)el*WWyTHe=
z0Psl9w?MsS-dR|4n14ZFVQeBBAC_)&`_W`3nG&nI1xS&;GLuS<=Td96IAh#j7)hjOT}9
zwBhDaW+%R>ifb}^uD(xwYLaatx^50TL@{viI;g*{RH%hu)?f
zZ**s)(>BMXKB#`L6a1oiZUZ@9UvtrHe|Q^xY6At#W*2yr6j?e*`y?F|z9I8;gpZ3X
zH~RKmW~EPAEtBE9JQbZ8CqLi`p=ROH7mKRh#QUsMgqef3)fiq5advtzM~Q~^c_T||
z)Y9A&9^394*V8~zKVe02h7*nR=jix(&GsQ)n%8o`Zep!KAr*f}&JixZwf~j==Vb=#
zN7$uz^Wu)n0zK-}=6hgmp|@590{LQ=X@zEoJE1`PqaMV`hIjfd)Jb!-kl#lrPe$sI
zP1+)kU530Z5?Hu_MylE9{YW?;Q(r&<&-6S&o_wJ5f!zntrySc3KB9}7a&N07-MP_}
zGsjDI3BLQoW+I+Jl0LR!G*2os?9ak!ROc-|ChwkwZWYF9!KgNE^fVRlrLbqg=}MDc
zmuwI_vtM`Q?Xc5rLgC)AxvijY((1?NT@D_Hi+!gZnmHds>!b&qazetI^oRS65v{;n
zab>w#dWaDcjrL*88vhSzq^N->M{Z%QlwavSCaAZvlEb6cXbR$^@K3
z_a#W>t7g2Ujx~kmzJuEmbl6;r+q{Clolz8IK;bs^O7Z{^MVa)4t6eIZ`DgOW-~(ur
zdcZsd#_9!A&{~&T*3qO#(5sRHvsQM^!5@2Ote_@6&G1#nP8^coYYzp9y$L2#v-MNA
z3{cE~%l+uf<=jsRfH^QK8Mtc{&NU|oka#-B6W?5T*GsAMf%4wUC1GRa^+Qh`(-_`7
zsy??*8w{sjbh#D?#}xRh@Mk7=oS7}Xzsx3lN11tRHdtzh=jKeqQ_vG)9=|q=Q3}ya
z#y+WlPLAL6`b8Vl?)RI?nS3U8KDpC&rN$pI?Dw-!A`xdKHa+E{vuocryB#IBzB8G(
zTa_d(C7Crf3Kv%;I`?+h-xr<(fx5U{kYDDKs<<#m{qXr#>t`b?bK~U3q|2&D&nESo
zhJA2O3gE->VN2{t#-^8#;<9Z>v>;g3Mln5cs^IUY|B;hzDC4Sw~9Sg_w1`8(5&u>G2x*<9P_=d5596phf1I7!l5Wc5s^jvI|
zhEceSq5BE?bIGqQ=xsx203b1X;j}Fc8W_0!R%k(;(*f
zbZQ}N!)lsIF0NM+YVsihI!Nz$&GhECSa)H+eb#rtS~}H#1^inHG#^4ht{3)%Z*9{O
zKk)k)D#ENG%yCnJ6+$C@S<%{Fn$MJP9ghC<0C+d)QRq1eRzWv^i0ud*{;*SWSTTO+5dgInbT@Lc5l-4ZvP61+$fZQgCU++
zXPll@yOjoqN(YE!LFJY%-V}@F7V`P+Tov632D(4rJeD%7Z(;GtfF?Zk@rbvz9AkvezZqyniL?JThTNWbU*8w|=h=VAu=oGu1@9wt2F_PM|01{+LK^C1
zJ!C(>rb>nyd{_WZW2nB{p4{h_@1#fXND!SU1mHYUkwF5>kLmXX2kJ9tiez67Jig+q
zotv9^;2y5L=kQsOMZ?hAD#j64VGOZ$JsybrFg~```hD)%Ly!LAhmFutnpsef_8IB>4UD^SYl&+oB=
zR|y+Aq{u&i0#rs`AsO<)bX)opE$G#&7xFEQ%mwVkmd&owTxQ9?Adtj!tC%-|^D?#+
zeKb%G3jI(Ob=?AzG8juV2vOSYQJ3X6&VG#sV*Yrj(zAEYd@~GWYytz`_wlJ6j`tKwi39r9B9
zN*o@ucq1XF`!mg#4hn5$@e3f24obVuAzxkVd-U?QHJfsl{=MlAG+rCRo6f-l5w~Mt
zX&uI!LoxVLqO%snLBY;2P-rY5hGL9Po1mOK>!>bqtLYxK)Fj#6ZUcS2dR^gBmkGq>QyGGj`(qCabt^W_#4
zPBaYkSRxVbxz06t#qD~yMW}5M@>^OybzBAT!C})p<%GiSsT=W41B0zJX}l-uyOI>8
zQk$v}M~O|=TylE+x@Qw*sW1I%Ux7JHdw;IVGkUL$Mx-R&oHJSZy)?0H%*nwB{ofhj
zgM&_gX~Y|Y2~FXoQo|XEg=UjS^K+8^if~B^a|~2sEi%_#@Fz&zj7%Wr|;2gY5?bQ0XoxUCK`o!bRfUCU`HK{Ctr0Kaul8ohWY*uON#YbuC}E<
z$j^%N%qN3(i0zM;)J$F>uc`~IGl2T}ApRuVgO_OmtZ@Tn@%x|7#gvbQFPcVPDxHry
zc%F()Sg@_Un_b022Ru6l$kqoF*%hxhCokpA%-`$Q#|&b23eDEW
z>Qu(Lerzy!CWn_eHWuuLemP9ap7d9TQW^(euj9Lizp`o=M!P|ct?7bn(GGh%pt0XA
zi0&h7;Gx=%v^+PYP{~ty?S|iA0Z5xqzk6u%ZM;>(_u)*(UMXd5K=fc>`PE1Us(bd?
z&6P>$KDj0Xy44#s>0!WEw(^DcYeJh9q#ZiT3Q@kaHA6t?5aPXEc>;t?Em>pAPt9-k
zRI6J2)<|i;U(H>13wmAph6)kaU|xk&ygVAOi)9RwqkAdZ-&(q4-e-+i1FBA_XKqGJ>J(t1%4q$HPCmIObbO?9z@#u_myl1I)DxmHG$SVXWsXk
z7q+&ZTJ!O(-I1a7Bq-x)>M2KcMisrh=>j?Qtt6#a7X}Pw@#V*Vo<(liTm>lDTu+kB
zdFjdfyoD#0`+o8YbCh{Gy*NhU(DUJT)P^%l>3?3#iXHL993pa`%yH@oL6|!a5sIg<
zpS?6isV
z8)wILF{`jD;!C0@(iO`YVMBTG*GVYhV{a!m*Tns0K7|JE
zH*3@^c9pa8`I{ZDUj03v4w1hqZ+ee(^)`v<<4=C*IO64PH5K_YwkNnKkD-$CYe9gG
z&JRf9@f)F&&=$M-8ur!`eDAP^=%N)a?*pYS>W;+y`)lwcA3#RLCdSa+y7O)@2fo?c
z*zk)U=@Q(b@}GlYbl+>J*+Gx~B$}(g1zpv{|743L%3y~VhXg#!nSVO}q6QR(e&)vRi{0
z@Rr~zUhboQU=V}>{Ys*OVW70?S4UMy$6&*(N^h|jP7(~ZOs-xM
z?h&XP8|V-0D^~*;R4>mA+8=?%@*F0Gd-VIrk47Rl5!|N8G9AFPZ@Yf)u4Ja#jfVMe
z-ullx^KaOBh(?SG)>O6pT|6KfYKY)DZB0t%17`5b<+G0o6y~p%b
zM~iC@SSu?qhaR79>W>~AaD14|y?vSGja3xDPEuEkZpW=lY7|!y$#nZ8xsNCc)d`YtQL+FfBWTFIt?L_+E27HE>jSrs=f23tT-I=yYgcx!
zzaFY@iYc3^4A0mEHLmlzx#CCR;#e${s
zJ!JkvRBPra(;kff50_GGTmN(fPVITa?93!f59%SALxTP3ee*@
zy+m*x1bH)b8*7z};^vYmJ96XUM|q;E%kZ}Aj(fgw>
zm@~(;j$(;U%eU?eA>mp#yXX9frDo?H`%?@aYAD?QW?t}*5o@ueM=vPv>6x~ol$4|+
zc7=z}yxP44F*_UQfK$tB`9!_9Ii?G0p-uezUg<#Dzxs?C&0$vDuC39Fg8Vd{zPlpG
z3>oM+eGX($(YI`-DJV|TC26BXex$dg2!eaa-q-!{d}MSqBPZV=f<)=t_|it9=o}r3o^^Y6z0mY*9`llgejTa+McAi`0G34Ueo7m
z2td78M=r@IgwK1Kk)NoKQ!mQ$sK(~9lF+GCr&AWvfc4%6y!2-t}g2bW&C
zjg4_~?}EJZLm*rd70W`N&R4NvslrPca=Mg9ZPkW1V7mO>@zm-9dRz^sDBSv~lPr&UzH8-CGh@Deiv
zWm4Z!oZy@*yw)9}KA)VI8WwxfW^r>6VlwRi1y(25TW5gM*1*=$NBSc!mWs#f=7vSK
zXYV;%w6Qx{TK_H5!8lvax`5HX&ilGX=QPwI{JTsV@
zfJ8_?M)ORkReM{f=SxMqS&}d%uf38^Y!v4!WJ+IGno3+>TYma5&3_UpX`;6@w0m
z-F@E*i`Qp6=+f`-r|qYtV=5hbV}99qPDtx&t$cF
zMlTY5BXeB|8Zer9O|2e_$sU^%9QxX+6x*0Moigh_JD`+y^{4nreK|L$L=EAB^&C0V
zY?E36Fu6#*&xtag5RDWDyk1D_W(={HOD&T-xJEVqZA9a}?4Fp^%!>;6xtFspa{7tv
zk5(E}3wV}RMnWU^w~({(sjb8$d$X2?-Ld7iB`7=Y)0c6$Ug
z`Cn`d1hh*3Y)92+{(LyCTia}zR+F#r2L|st(eXC+(jsI1ma#GtYHjyh#jv&~3*fNg
z>0RU>uhw~aGUmcFL;t+vG5jprWso*KIa=DZn;aVO0NFI#wtos-OJ87nZL4y#_uTz*
zMLjG(uM6HbggU#|+B$6L{{55V9$r8UjlbOxuhldLPmv0?SugCG{E*wwWJD~SkZilP
zMgK*dd*gQ-f&qTF+U*hc*074s)bA?(=yT`K50X8%d=|YQ81?G@;x>jn2y`D<l#X<$x3(fS)g=v3~!>6Dqg<{T`u+!Nv(i@S_mTjq~?2
zjJ%%i5}w0(6Z_QOpm=|m@=j@_(q$0Gf|b~F4!zbEW&kDOZ^F@)*MaI`VQ#EAAN?OJ
zjm;h-m`Odd411KKw_jJ4Bh1>Sg?2ILw3s*3m3ze1dp)3}rDB+Mm&{vSU}nBPkaAa6
z&Ena4$oHjgk^zTx`)xd0Vb2jYkh8^BnU0*Z(X5L`;dDCsiFV4lx7!;%Hm7gNAGQj9
z_BD;LN%?;e)SWsGr3?T+9kNdmD~(;i5?7Y1$RWGjgQr{{fP{7iet>Ce?(P^Z9woME
zZYa1KTwGhaSIZ@=7b!D{?-3{HbG%`WdrLi32lN~msj-;~eyC(M{mKw-#!)0d4y}I_
zfU9P8OTI;=J!%9Bra7+^GbJJ7^AIn%a*!e(R|9pTSVIvgUh7jMyK~;GyX*jX$!R
zLq+;R_77qxlB^xc4`z4M-(eB#)$MPHGH!~Q(JMpS64YLsg4veBbG0WfFhvuR4xq0!
zmY^Wsa~E5?*e_WFk>FkfnXaBVP=0UWP#cb?7I3UqftmMS^ERt3vr1^P@XP*7ZL4I9
zh-3jnBNiEA#>O{nXSQ?18B7czP5lPf@0QA*+qdY=$PVwXNT_Y8Y_4BWB6_8qmm(Zz
z6}sZR42o)}u^#u>3l7M2EVgKmE1PC=`cQr9%lBi3wA?CKD}Uf#5j<6yHXpqTuSlos
zX}i)+K4Vim`HIgf%gM}AB!&*1i0hAmlh2=M+OCdsePNJDMtQ4(e8WIa$TgbxQ;>oB
zP9=MvW?36b$oW12N?p2uO~Uh}ga7JDvwBm7dlE)2CK
zE9HH(xLdoO%l@}~%ZaoNj%IzwFSp+lMe03SJWVK=;uGuofK~B3(Whl=JDb_!i=~xb
zrzwsHZR`0U-=dwj8bT+Q9cSUm6S`u&yvc>qHklVLR(#$?if1ShiXp==|IIAdg@JoA
zsIOK6HFQ#Y?VOGaTE^{qa-^kUvvx-sf`4$igmdjnVH-8)*(j?6DxpmF+XUr_tZlvLPzE
z;Tl!~`ge4D(w^WPXSFDM&j}>mAurJ7{xMK#<+Qw-)(a$CoxL&9cenGerF+4pM$tA0
zA*+a=`?cE+-oA}mHrfe=J})|EiadeQ3GUJUH}+|c*i<`;gVfDe)1yi7ttQaE9QKIb
zWY$A@9v`Q&dA|ck_gA*KdreoTZ}KdhHaXX1otd>nN6z)dKV~}(l{Cm$5Gf}fx+*@-
z88SO*yX=9@dvhCA&ZInAB+UHQPls*l*Mp)|%?yQ3i419%+8cFICUD7{+ss(V7fF;O
za{oz|42}?qsp0mnu~0*0%{Nl}yPNyJA~aA4mThbRbIpj%zOvBpPn%9`b9k4`T$P7m
z&S2AjKBh7`)n!QEkI7MmH;K%KzIC$7&5j&6R=sMYN%0*wq!r}-QO|ZfdU*V;l$Y@5
z!D5jy_7?=xf(c^4k{lowJHIX7H%C`P1AG&DGc-K6%M
z{o-P!ru!f|@Udh`p73aCv&44Hl7eoRjy%0@ctdKfOeoaaVH}lP%!`>@@HklAay5x3
zxDrPv21X|prLbX^CxZOp&31^_-bdNdM_TC;MwVTB`mLzoaJ{nDAn)Vh&qMNsbIE{O
zmj>RwBZVT63e7AaY*}wSd~@*S?vuGcFP|kS2rirV
z4z&E?nH2{Pyce7^Ai2L+4R-reS6r3muE4K)nQSR;)#4w_eQ>qCSZpJ(<}w$1D;vT-cz^
zSHPDuL-{x1RYZO8G#8~WBmN7$0HtW7Zn-#Ft&M>{EMB2dLf)|7a=)GbT*v?M4*jUW
zYB&rGbmXH}-WBN(84F&rpXVCaK5JfdZAa}@oS)X^4lBj5>Q!HcXm7G$cq(ZZvB
z;b030N3l4dSOsSCrX+yYzl1l!k~xRCbD8mlP_aEn1Lwnv%IjF{O>Rg1gDV9~w`-Fz
z<0nJ!H#P7m!rl>4%11Q$m~SleRjp9o*VR+Y6=faJ&!*6l-d(z($Pcl@U#7!U$!Mfa
zjtr-KD4uJ#4zQ;@$@TVKp)WmVuvy}Gv1l$*04N08I2UZ@w^E$mV&wsDhl!F-X-Cdn
zW0?WYE(xC#j9B;=X7`9I!B&6ARo>{0wp6%`Q|yCY%a7rM@D`emm6AknRfY7L9g)%F
zsXF%LM+Xs6khd;T0Eg_mNB*yP1+Cr}b@}jCfO=3m!XGOxp2IuMifynN23hi7>HRJFdSk2~H*5z9mI$6k==#3RmBE`$+DW*Ac12xF
zysBVW>+S3JIdZAP>e6JA3fIIgiX<8V}<_`^vw;?UOh?03dZNX)u*G!(l@xZklByzlY0GDTjJ!%vl8->X)Wx^(rcgYeAgQ2Rw176;A)_~k-PDfd
z@uoXA>i;^luQtRUKNMJee5RZR^Qlu3b~i)Gz5J1G7C5z2Km?{iFBi(Xhj-xxZ-u}d
z+33E^IP)K9xhd5=-!BZuk8<4J3_Zv8lpR;=*9s1Y4fh5OUwBJ7K4rxY)zEklXn8B{
zdp>Dia(-~Dv;el}Xq!dhi)iZ|ewCCR`|o_yLbLsV8;gr1g%?XKJ3Fvhs8((#+PC
zzM5IB(7lX%V$MIDAdiiVul-7`*?v|3&X%`z%uVLL;8DfgGNC5(-ZT4lv+HyfD&g77
zu-JeknLH#LR%L45X_|9IdcY(*@5q4brCQI=SJ`Lf->k}?L|@6A9xqX-cy^Jh1V9FS
zE73EChu36gZjH3(DZ5HS8eO-`0-gyNjsT?``Q$x!MRL5xf)lTwRK7?V~-iA&0?M%w+=nKf|{*P9pnmzVHJtMKfT#Y>{wBV*zid)aUxWCTuQr=^X)PzM!Bel7X`R|t{
zFSNeb{-?3NJSX-(;7POnO=h(B#R{fwbJU~tJk>JnqUq%Z=Rc}V07tZ_KmnUDvw;5Y
zPw5v+2~HxxXKv=r)^L&KPmv^(1Vq`Y_7@Nx_zWtv6Jx}6Ktlwf)f#LwrTx%_In?*1
ztsmxugHl^pkqQx*t&Fc#n|L|}l0&j}nGqL?h?C7j;zg-7HX|PUgX+4A<_!rrPjhft
zt5&<6Dt8%fXzHCNYtnY{Ywrz|m@fWS8XnVZPVv`+J}WtV%A~&)65QPJ{t8zI=bCN)
zw+n(yRsssL-|1YQL7OiM!3OQVa7*p44(>QVaD!Xe__FYvF?!_K?cO
zh5D3M0-myhiBeju!=b*>k1i`JVto6suwW7Zj3bVuJZJ1_>ed)wovNWrRSx3R&6gC|
z{^d}b&$4($@UzIUuT2ar=o_)eq?mVk8vw}QO&A7}$frrig!?M};Mw;;f~1jpjYIYD
z=p&kx6EY@e2F&51TM)8pxHq_Z`Roz5?i2~43>*K@5H)K4u@9HfH~MX+e&c%*sg}ll
zCQ&?ZTQj?@gcoTR!R$8;@7qoMV?syWY%Jd4}HxSc;k
zhk-TKP#r`)+~Tjw)*n%P759~$fs!Q;8NG9bE_Mv{Z6rPNLrUYR8Rf6{>DnT%SaC9&
zD-N3d(mF3qX9>Yp$R_3}ZsuMk=;Of_PwSc&V+tzcz*&;4r!(6@`NOh{w{yfEW4J~M
zu>+>)G`$dNDaVC^GM~JFs)C-UE6B%dTM1(o){hP%1Hw>zxL=Q$M`i=n7DrL5-O;n^
z@{LbjF9nCwOrgZQ#`fN}(H4=uW7-N{q3UjFLc9;ooA~SYD#uzquB0dZOT~@qqa1=T
z3PH;7VIBJOGKp~2^r$|@P>&+=f|a^H#K
z`IbV+A}81r|E(gFh!I36=AhCgRe#T^%jxWgzj)a0=&2Ts2%NE(dPe3EYEev7t6AY@
zxkc^Ub)?04hZ~DrF2Fu4e>WhgqSt>_`K0c*pz1i5eK0@Z5ki5^@S{gnRQp>gyBivl
zO3$pr=Tg`DBY{C;5N0zRhyh!R*10}UV{bi`Pw&@i1<&5e^?qRh%SkPstF;rdaRxuy
zN=8o>ulaHFQ=Z&w@hx@k1^!HNUpM2fXp`&KGf9Fp_nfvB8JP`#`FtgTAil&Qj(Smj
zmtrRZT9rFNvmQKBbFG*(f
zh;9!r(f)SkvkS*^UKd{cd9%t8II+j$H#xkiI$O3CL}VlI
z{Z}~#I&dTo_zHk3x1P+b#kj?+1y5B_&?`lJQzQAa|EW^sX6CVpi-tF)k>-=&((G_JEX(U7uDgNJ`wG@w0&Dl4lU>np_?
z=R(f@0c7YJx!cVL76Wcwko|?#&ZPY*>i@7zhlj60g69;LeLyFeeA(20Q%o6?_>0M#DpHNs}y?O7yGT%y~+9?HovRX
z|1jxI_G)$i``h&N#e_DTn1N8%|G{Fa7TW!iV-K}ryi7FfC)X;@lnk)=VpYe9N($=~Wn
zpXW2bnKSb}bLM;gVP^l}9d}&!E3WH$W2%t`NSZ>c4Ns=NY7+>)r?y^sE&69*vvDeI
zj$iH(6I8Ud((0L^GJaO@0WIMW$)7d$&FhRw{b$zHDM;&oU%D_n^=43@;h+YXz^{3>LYw|C=?-rV=3Km`rZXf9JFH
zo9(w$ZC}@#_g|=LsQ)%u-(S*59Srk
zlYSs*9<%Mhnf~b=Au3^~HkXIS?4%Z4Hf!WAEp|c%l8i=G+SW=&`9DM)eRV|{_`a}c
zSp~E6>2anuYda;y_Eh_eIhjB_^rcm22@in~!Me?t$kW03!UbM|I7J1~xOUOD)&Iao-k=R%+g)uJFU{73aYYdXzz45>*B>9ZW!*MG8j3KoS^7UoYdU
zDb+>|FW%hueP_tu?(Q++?g*S@mU0-glYaX*J=uP;_3}vdl(Ru=R2ZOc-C)e9a)o2Y
zUgd2sN@w_rP~ws0<(M)XJNK8<>;CRp$gK$qpYQ@*sW~xEQFHv=B;QybA;%lM4rT_
zg+BQ;e{!S89q}IutEJj1YaBe>Ca682`=KtuF2o+7W><2x|%L|!TcWT3g=<2){-lh
z{LUJe>VJliyKgCWH;-(Kxa^j$2(^2|T;;Cq;qZ>=Rt^Ehm@h9hJ!j$BZW}cqw4fg%
zw_&QQF3nfEQS
zdGILAOe@dy?ud%)pMU&@8>IHl6r)G&6343C?0uex)7zuxVHpj0;Z~uH3VC6MpN~Ts
zOQyJ4%*@{L*T~Nw*@R`F2#yp0g;+8*qkk5H_7~Tldru|L3Jd4LTMw&t9>|B%a(hu!
zwvdhCl*ns83WX#v73eV)15kLAavV{+r_aI%!XGCC2WaxEBVJJdUENf5&!;T3EHzKr
z8*p~}PlOURlskTAS5$FJV?P8J_r^b;F43VxlF#q^n@JsUOo*FGfxtt(VyBRfBhe#^
zb50awj@%5{_~~R%xT5#kYi6_0Y3>W|s^s=NC%B2X99KQ>>Ff@cUFCJ1+b6uq?|3Cf^nUTS
zJEc_YGZqW+N)oDM9z98!!E~kp)%059>pH9Q-}U$%unJOuBqrk*
zE-uP!#a9Jjo9Y6b)R^D~yfr*3^QPMd_j-^Kq0@9>{s;H8!kuHv-WG+P&5ac!tl_s@bjt!Ks?d$j`I{Nx201*UgR
z;$o#_B5gz3FG&z)oIzUoZ}ja*cu`T$`iQ+fJQPpmH;KAh
zddrTvpoLRL7ke$5LcIrnF#5>jT$dm<^r1U5?kW+#9%whckS%?Vw63P{
z^BG1(HQ_fKV<*J_I*V`uA{Kh{YpmU=(!$>rpJ}r%s7wK@br7{fWos`}{p~WIf6Vsi
zkQJ1J{e-|fSULf6h>OvuerzNEv^VH`*~eG
zXEJ?qf;$G@ZChid>v!XQjasE#rKah3K(ByGauj?Q-1%w
zCazh|DXWE-z;5t(@bWp>RK_?ZgU;~pjSJogXx!atRFq%WIEjjWUeRfDo+k|d_rb<#
z9ZSl)GP@x=&tC&<|1C-q%lj9g$q_rWGmQCA3RJ|)uKx|r8koENe{Ul9zh9~J>j|dx
z#U5clL`N8h>O!`sm!~CKCkf!aZguKt_QAG*6eY8FQTF$=*LJks+c@=hTyDgwf;+X_
zToV#QbzcbA(vC;FjLmZY4r=zVG>RZ0~?<$z+h=KGi8u}8`
z_o_qv?wHjikgu~1czT;fUzzi*E?UsY##9Ayq}#HuDhgjQ_sj-o#mO$8t8wX70N1)j`Qc3diixT9xLdAjqCh|Q*4*VCkndf6)cNZKjIfBSJ#_0C(lmXdP~W=
zTOGy9Ay=M#q@>49**Y5PHCH@Q_mbPdbhRFR6!m_`@Mg(Z1J#)cBj8*v+VCGR=*7kt
zGi3GdGf%&PX3?89q#xg;6^h2gF|F<~WrjozJx_)hcVzYNGui!<(?1)9
zfZp1WAXgfS@nH;33S;N1-!ysi_#g*6ta$J(K=K>*?ug)D|x_B=eKBAw%&`*9?Zl
z=kf2<40C$z1b^bNTVq;-c9k-EA`9NGg5G%3kVz4^13_vvXO}p)(=Y*IwO6wza6ak{
zSED%CS2wD{n2S8>x$t}i!zK7hm#rjtwQ_WHSfH*S$d)azK%pslo3h{(Yt?r*(k8KG
z^dt7A;|EJD7PZ@q;$^#juk0hw`|(s?!x6`CM?UqlI3vS~-?Fakh=Xs|Q`pKkWNT~CB%S_z4<8km
zpRLF#oGoi}XNLtTdMVD-f#ZD!>@ir4FsRvY97kDM!2@
z+r+uO!8uQh3Lz8sd<3Aq?yb7*MS2$u2y%kU@C{ds6JdJ>0)34fO&naE?#+Ff>MBX%
z%g_3jr6{GX^>&g2eO=Iu^Pv_rV{rGj?#2BtJ!CYqhK|Q{`#fjQcZq!_sH^739e}N3
z!79K&QuH`kN>-~Sst0ekRAT3ue1gWepc$Y?BF0=Na*#>Ku<;U
zZ)t~GlN9o^`OeCqWR!I#s9^Uk3myBC`zWS*z;laM>X2oESAEV^^Hsth)6r|ogl}sB
zHaW{Cv+zc}PvuN_mXP}ce
zx?ykgBS=_y$UGuYb0nLEJpTIWrOt^+5*vrNwRbG6>@J&rGpfSFv^fUdDeK&5HpNM1
z^g+ipx?xRi5_VD`a0PGs51sz(vAuw)kA{>{bXp(R($uPf<5&9Bye95W#`jwQZxZvJ
zy(0O}>MC90dQxynL;0Tdk5Q5lp|^l=se)jk>^)w;TEa$G
zw&7_lF#9%Y<)O?*Iz50P)IxgZAWbMCxq1{%p0P*@^8bqb7Ns%F!dmA<7D8pv__%;L
z)@{P>=_eBBXxg^4&;t1!PMTi!T!t)mGe9Wc;5Rb4sH$MjPu9mpq5^6iQKx#V!5wkv
zx&t)thk`UUoB=|>;ZB_iErXDE({dy2TF$C7F?@*O$4>8@jDMwUqNS%ODv%Cl!YyM)OfoBg~osR+9|{6>SH_(4$K!L4)@>*MOE
z>H@ygx_eH(#eN&`QC5&AlxF-1DbJn7FZ_gGOmq1#e!p>d*D5zIhA`oU54l!>%hJ54>9cCN;$+BSD++O
zFsFI-4tB|deD%q_6jF7#uG58#7&+Kk-02m-Ds{a}$8%Tq)>m62)&WoFyhCv{V5b
z*?xlfwziU#)$$B}4_48r%-?>0pi(pt!6=5^{{%wrSB0(pFyL$CZ&bu)_<5lMM%h~b`b0pbcG#Xa;*#0lbUj5KoM@Z#H`C>!hlCw<*
znsZDTBoGqS-+QIsq!_sP^Fm4&_IAQ;xHql`w3X6ove6dZsuI6(o5*BCxShq3Q*LsH
zg)EZq>f2I3BUaHrlmUXX-~UYT6lW}^>bG>1`~t0k>Ji;HF%fz=E7{c}_qnCn4?`V%
zu=S`rVS9UF=rKBMf!po%>slR~X?27JzG;NGgAde@yGP68
z|8*ZxQ8#{rce0b=jk?BCguSx`^O?{rd6i`oC1}nw+Ic`0(GfRssnpYy7GT1yqYyLTJ@+QN=7ER^oLXY%)3wNg2JfEU
z5iP#)tJE;RRX^js``$;>tLY-f7wZyaEB2>i=NIySZb#Pof3ed?L3}_4jt+^#e_!*?
zY%zkDt*QY0V6HzU?(g)5X*~Y#ZA{JE$Fga}zOMC;Uom#e_kX(q#(zJ+XSEoR^T^fK
zumCoC^Sn^~%xz_K`g^-Ti6f$pyKaoKSXNSNeheU*#^|bs_0dyFP=CoMO>B*QYk$a=dUs8&1}FAizGxujM$uVI
zwA@-){ivn*HfXnXCVa&yJw^!4aP2xHuYCO|KX*+2Ze2_)>ey28*Ci^(qfu=VX
zZYxNS7JUJW#ryYZHM^o;3q{X*&pLoXA-5774A+WCh;oAyiyr0+p{=T
zGrY=yh${b&8P(*0{*R8jVJ#{7HIUWyd`=FHE<#ZvvfryEJ%pOOPf&yuoB55R6z?65
zRt1oY=xD}JH*YOg5CCyZv*$-nR3!hgW@$tit91)HeaGGN&NUiUN798BibkZRD8`&BL@YSGd?`6f
zz61X-)&J59Ep9s!lzz8LQ7kfT(VMFmcQqmGil7qGpQq-K+Jn$d2o+SVPS;~?6Op)iv9Q4J
zJD^n~c5l0@GsWxiAI0Wsq7v5Kq&JoQB9KbR^%a|{x&6WV+aNRI*o!rS6Zh?fu4+w0
z;?UEPux(1`83^}wn|w!@D_PkWWH~!K_lKe2O@(0|ac!=<>5Tw2G>KHI$9`m~a7rF^
zam6Np%`l)M&m2(d@q<=1nqhBw{P@v*sO?9ezGUJxcJ6r)uFBmh*3a*Be-ln2e3e?P
ziq`>}x!=c>Iwn^`bm5c*;SIG55_yF*C3h50-h7Hm75r=tO+K-TPaGY)xm2)RECCdm
zYOn{gBO;AIK0ROESY?PX{5000Sr%LWI=5i(XW|28oCi<@wM3KnXkGXoUXf!1X(=b7
z>kg?bF3Er5M|v@}rv@w66WuUBA$L{0&ueol_sHJoqe)m`0k$l4Cg9_)@cRuxbBzinZRP(O^d-6^jQT;HfO%Zz;%r{C|Q
z@w@xpMkLCxVj@Y0yBegfG#xaItaW7zshjJ
HT92e0i{-sYp}3uFO6
z1EWC#4!OFrA{CX|L|8+?&zUrlvDxR&uo}q5}S3
zYRdPxeyM%UHHkR8(5L%0WE}_GT>GULdCt*~>F0C#?4a>!tS2v8KeauhSh}iwX3_cq
zqDDjUjHv?6JrBic%jo3Y)?;cHEKXdinLxaLM~e=Jr0)PU2*De}Ss%*dVfWn4
z`E`^8=Mn9bDVw6;qI!0j)%^WFvW}_$%8tZ
z035k{OxQg{(rX4S8^H*f^rK=PQg*8l;Cf<<66(+)E6CLkA>BFFDrz%h
zHue3ODml&e5h3XR&Xw7}uniXg$OO`>8gPsJ!B*+zLn4fYn{*En#7BS&H)I`h+F4ip
zNta*C4>k<5dl)vei?Lq5N8))G2$TVJLHGkQ)Sr}p5A*oRzn($Va9ntwGP;k_9Ja`%
zC4~^9z0B*2+};07hny!qzkDjZVoX$m8r+(bKX`n6nNXOPY>7JMvww$^mpx+}_V7P=
zCzgjWiTO9jL;s~<6C~Meg=~E3cpY%7qXOLHS_#e$h1%Sr
z;hm2cQ{mL1g}3cIl*TyD#$!&-xp$`U$~dtb0d8?UIop#kIXN;hoRq;|hCk%EN&_mm
z%hg2xFu%0ZbZ_F9(;=VxMQX#aJYopsHzps{!i%6-|LY$|+B~r!o^KuNBiU`AXij=
zDv>S0%SU=uGI8jP@Ag>V1uNXQPVo&-`;Oj^C#H)(CMqO0a`I
zIBCW4`Pk*hN7JP7(H%}14dz}aWO3~zen?GCS!8f_f2YELHmn<}Xnx^UsaIE*bKy^h
zTyKES)SnI(#v1aPFR(fPAQ8fyxZnT4R=x7c_%_GwxLR|Q9c4$yu_AWc#B&`vNxyPT
zwBTada2MN&->!K_{Y4mBLQJ;%nBz)viRECIzl@86;90TJ~cGUOGiQ5tj{djSd$
z;ru@IRu`R|cGNC60-DfG|1C3-+^+*i%eizmd5W=CjZ+TA`aGn>=_-`g5%0LWxD~|)
zv@bMkGh?@bd0qD}PyuUM^`6Ik-;rg{Yew-QEY28B7!JrBeqnhD`tXzEiBy#}(229L
z+e_4fV|RBQwv==?gj7zd+ee_D4uogZRR=XxkW*KDumO*s5?*vD2|Pc&kCwJgnb(te
zmB^;PJ!2`6?{GA3#rrztX@l8XBa{Af+$L(uo|b2m|mG6q8D0%&xJ&1d2<
zE_gkhCY&3rvUqy88aT3Ya@_qzWpK}jqS6n01^1UheB}?bZDg#s6+A!=p_5j~m1k@z
z%*yK&3Ewy0mIw9NVxqmR*B++%@I~dxHjhf%2R#7z(^#iKS;pom0FOjOO>270G=gTW
zj2OsTmpW~iH`3xqO2C!G%$o)ie7)+@I-
zg-XYg77OcbLx0fjyG>Ga1j|&t>BNy!YBXrSfBNS~TuSy#0PT2
zGFgYgWEuQ+DytWeq%-|3j6@qNan4y=TAD>C+yUFR-OWVjhqeo*PDUn^jSC(qNL%@k
zYQZ@t<({+SX?8ed$|ZL>-WSCmFotWF;xCmuVh+4f)QTbGj6b`jRbu=To^I
zIt$4svAq0K*Rmg`T2zjTar-*KOB}Qhc>9w=Hkw8}YjuPG9;tx3zcwzpx$p&8u~dWo
zgQiEQ&_>1Nxw?_y=&n?DtIGo*;f~QFzbH$lGtJLnsrr5kucHkEWaL2ms6#Z9Z8<3Q
ztGY(H2tRnr_3-{hd|m#iEhKW~{fdI_#Y_EsRWhJNa}16BX9l;8EognDf9yp?TOyHa
zj+|pg3*UH%QrPBUiY_$Er^l2Z<9acGZI?vuPA(L
zhE7NUtuOdqW-ol`(brrU3`wq+kH6;xXY{QxdktH=yJ|GcVbv^1W>Y+T*qLt3
zlwSOUF
z)DXNAm=Fu&nPDzOqYbDVhXXbvY=qD(qr(NXvq+)2A(9Rc#d}BR8D?FQwliWDQ`?hs
zGx3AiRGEafr=;8Z^iJHln2XbcDZa2HM~H6kjf8;Z#U3=dGZPLY4YJPg2m{;Yb0VrG
z`ZlWxr!pui;*7XMM?Ua_V4>hc2tLV16G2S_R{=M|#^;AaFHFDS%8<6xV{s8|oOH$z
zX;r@1yfwv&%InfeshItkIb=iKoC!@{^H02Fa!x+Js7@PLDi#fFS=V~iMaPngG-!UQ
zUSl>kZ+sS%cTb0>xqS6qf=$QsbX7UIk>Sdx`5keRP=7&miq8*HS{0cN%AERG@9P*S
zQ1cPFCReX4Ea;9)i)3nWIdd#mxl*(rx?KQ4dvcs|s@r1{OK}QVrhYP-gt_dVNBNkf
zpDU=C#&c0DvgIANl=qgyC-|xK`GBR_F$xQ>L2mfZc8P&^&w~?OQ%7~cJF}MegbmvA
zsMA=khx~L|Cmk9YCE5U^){L=Tl~u;g%}$$>MoJK8T(VxF(HZPOm8MbKBXC5-i~
z#ZfHt-yxEd1wgptsW()cAVO=OlE26c7L-LHQ*JFE};C@eR%TOzws
zZ2hX`JF@>#0sofjIBY{~`e$iOqe=8>8o`H>YR)=EJS5r|RS~^0T{@mfA%3HLvbY7@
z!I({Z(SJ@Y(h$
ze=krrv%Z0+-X;mLwxHlanwl$RDr#y%P4@fTjBb#+(l6WAqp;Y1yrNSml6(6Xt#hjs
z8Ltqhd9!PBe|Lo{UGNB}dz*EZ^HNYDM)kt8LeDo=g(!rjCnB|DUeR$re>S@rwQo~7
zaOBi*g)=NG@XaQD?q
zS{k1%;-}JOE$;S*q}jEU6`j!O_L@z)#7b{?^!3UaQJfdrx4X{VTB{lbK!oZ5CpbBw
zWp}b)5OUroLJ>6jES7bx2uj`7J?8zW;2Ge{65)jO6ECNp2b-)X;vSr*S5Cneyw8Ba
zfaFX>tc?zyxA#~u1(v5lVy$^Jn-!jo^Mv^n?#;W+tc<424#X>My?#?(m8rj*%f+cG|f#lqJexP
z4!T&N@xzZ!W>?c(hORk`of%9_=9FjRS)mtnG8Em64}}U(FICdY>*^%h!6ba9*GnC%dmpZkusm7V5i^nGdMRTk!oY5JGk
ze1jG6@DHjLyDD_UeM1RVieuXTYK>LQlzL!ik)=>4#Ojdq20{g5c1|R6#lgqSjlz<#PAqPg{K#Q`ZT8p3`4UIp5
zgV3i8KX#0nm?AzZTzND?`?UByyjOn;2L#$9OR;YC
z4UjvXXsO+!qiuu?c-Hu|727?bhX|Twap5UWRt$LW1>r!IIZ;ZAM`o!c;3AkVUiCMiRgT9Mg$>8tGWf+g
z?}s#M%$tpnQEwh5NB
z$ZNqvf>TjyG&M<|ioN{a>*HLN_jZGnvUAf5euOrQB-$LJbZbyZIe)7YJJ(Vxj09c(
zz6}3Y+;}t32RAbGbav-3F=8}
z7)y^NOdd^AC|4A=uM0MvR_n=~g2L9(4Whg(Gi<@I`c}z&Kliwv`|fx1=z=3&F=)+M
zHEi{-Z8XtOLf^6;Axcd*e;lMWm;ahDTvbe*l%cNVQZ*O(2AF_x`R_EwwTg_d;0?(V
z0%b%_$lSzB(X%jq&7QIwmAO^QvFFh3h;z;{VJ{^}&|8=)AJ_22O!5ur#IJOWyzfc!
z_1O&1icg(nUI#0abV6T93DPO9;czt6H;-w4>$wHhOHwtobg~0114m=}XRC@*RVH2U
zCFXr-{hT{GYcsD-?Q@*FpnIDhUPN4=eass?8{A-q%zEzH
zr;E@?Brgx!HzY+_0;2EQqJZRB%S~gSeyf
z73Jyr8FKBpDC1513wyMrlBIv++Zi%zeBR2_My@Aqact
z)PG4*!}nFnHtTQhwV7zkia<}BF4u);YxVsflN~hevqmAnqDtRgo>sU)XPIHxfNNGJ
zd&>Vxa$MXBdLxs0_VQC3OM&JI*Jc3PHW(xg)IEOWELLOYRlR5)8whsJZ~%|h>od_x%!jT9;2EY-BS{H@qlm%DFgOOzq-pgj(S;-v~k{>rN@>vxr
z;uNBa9fDbD=WWOzUw!4aS1HyafA7(yNMolWFyKfp%D2QnrVS}WqPsxqxtb|
z=?LP}tW31P#b}@3^-n@=A)`wJ?SI5D^cOqO($HVq{Qrkm{{QV{{#)|&|NJq64NB`@
z<$SMKy6^p4c=lg~bG5G~NEYd?G@ySKz3j2@NWA`^T5RYNxlK>I(4f#(?TWuIxaH2Y
zD#I^DhTF?9hDsM-`gyp2c=|T5Z~{I#@#c-p>T}-xvI+G);~yay9Lr0>J>.
++
+. Create a `connect-test.py` file containing the following code, and then replace the placeholders with the values from your `rabbitmq.conf` file:
+
.connect-test.py
[source,python,subs="+quotes"]
@@ -67,7 +180,7 @@ import ssl
import pika
virtual_host = "**VIRTUAL_HOST**"
-token = "**TOKEN**"
+token = "**PASSWORD**"
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
context.verify_mode = ssl.CERT_NONE
@@ -97,23 +210,18 @@ for x in range(10):
connection.close()
----
+
-Replace the following with values from your `rabbitmq.conf` file:
+. Optional: Change the values for `queue`, `routing_key`, and `body`.
+`queuename` and `routingkey` become the names of {pulsar-short} topics in your Astra Streaming tenant.
+The `body` is the content of each message that is sent.
+
-* `**VIRTUAL_HOST**`
-* `**TOKEN**` (from the `password` field)
-* `**HOST**`
-* `**PORT**`
-
-. Save the `connect-test.py` file.
-
-. Run `connect-test.py`:
+. Save and run the `connect-test.py` script:
+
[source,shell]
----
python3 connect-test.py
----
-
-. Make sure the result is similar to the following:
++
+. Make sure the output shows that the connection was successful and ten messages were sent:
+
[source,console]
----
@@ -130,87 +238,180 @@ started a channel
sent one
sent one
----
-
-. Navigate to your `rabbitmq` namespace dashboard in Astra Streaming, and then monitor the namespace's activity.
+
-If configured correctly, you should have new topics called `amq.default.__queuename` and `amq.default_routingkey` that were created by the Python script, as well as an increasing amount of traffic and messages.
-Your RabbitMQ messages are being published to a {pulsar-short} topic.
---
-
-Luna Streaming::
+. In Astra Streaming, go to your tenant's **Namespaces and Topics** tab to inspect the activity in the `rabbitmq` namespace.
+
---
-The {starlight-rabbitmq} extension is included in the `luna-streaming-all` image used to deploy a Luna cluster.
-The Luna Helm chart simplifies deployment of the the RabbitMQ extension.
+If you everything was configured correctly, then the `rabbitmq` namespace should have topics named `amq.default.__queuename` and `amq.default_routingkey` that were created by the Python script.
+Additionally, the namespace's metrics should reflect that at least 10 messages were published and consumed by your Astra Streaming {pulsar-short} topics.
-The following steps explain how to deploy a Luna Streaming Helm chart to create a simple {pulsar-short} cluster with the {starlight-rabbitmq} extension ready for use.
-
-. Make sure you meet the following prerequisites:
+Java::
+
-* https://helm.sh/docs/intro/install/[Helm 3 CLI] (we used version 3.8.0)
-* https://kubernetes.io/docs/tasks/tools/[Kubectl CLI] (we used version 1.23.4)
-* Python (we used version 3.8.10)
-* Enough access to a K8s cluster to create a namespace, deployments, and pods
-
-. Add the {company} Helm chart repo to your Helm store
+The following example uses a Java program to create a connection between RabbitMQ and your Astra Streaming tenant, and then it establishes a message queue and sends a message.
++
+. Enable {starlight-rabbitmq} and get the `rabbitmq.conf` connection details, as explained in <>.
++
+. Create a new Maven project:
+
[source,shell]
----
-helm repo add datastax-pulsar https://datastax.github.io/pulsar-helm-chart
+mvn archetype:generate \
+ -DgroupId=org.example \
+ -DartifactId=StarlightForRabbitMqClient \
+ -DarchetypeArtifactId=maven-archetype-quickstart \
+ -DinteractiveMode=false
----
-
-. Install the Helm chart using a minimalist values file.
-This command creates a Helm release named `my-pulsar-cluster` using the {company} Luna Helm chart, within the K8s namespace `datastax-pulsar`.
-The minimal cluster creates only the essential components and has no ingress or load balanced services.
++
+. Change to the new project directory:
+
[source,shell]
----
-VALUES_URL="https://raw.githubusercontent.com/datastaxdevs/luna-streaming-examples/main/starlight-for-rabbitmq/values.yaml"
-helm install \
- --namespace datastax-pulsar \
- --create-namespace \
- --values $VALUES_URL \
- --version 3.0.4 \
- my-pulsar-cluster \
- datastax-pulsar/pulsar
+cd StarlightForRabbitMqClient
+----
++
+. Open the new project in your IDE, and then add the RabbitMQ client dependency to `pom.xml`:
++
+.pom.xml
+[source,xml]
+----
+
+ com.rabbitmq
+ amqp-client
+ 5.16.0
+
+----
++
+. Open the `App.java` file at `src/main/java/org/example/App.java`, and then delete any preeixsting code in this file.
+In the next steps, you will add code to this file to create a complete program that produces and consumes messages.
++
+. Paste the following code in the file, and then replace the placeholders with the values from your `rabbitmq.conf` file.
+Your editor will report errors because this isn't a complete program yet.
++
+.App.java
+[source,java,subs="+quotes"]
----
+package org.example;
-. Wait for the broker pod to reach a running state.
-It might restart a few times while your components start up.
+import com.rabbitmq.client.*;
+
+import java.io.IOException;
+import java.net.URISyntaxException;
+import java.nio.charset.StandardCharsets;
+import java.security.KeyManagementException;
+import java.security.NoSuchAlgorithmException;
+import java.util.concurrent.TimeoutException;
+
+public class App {
+ private static final String username = "";
+ private static final String password = "**PASSWORD**";
+ private static final String host = "**HOST**";
+ private static final int port = 5671;
+ private static final String virtual_host = "**VIRTUAL_HOST**";
+ private static final String queueName = "queuename";
+ private static final String amqp_URI = String.format("amqps://%s:%s@%s:%d/%s", username, password, host, port, virtual_host.replace("/","%2f"));
+
+ public static void main(String[] args) throws IOException, TimeoutException, URISyntaxException, NoSuchAlgorithmException, KeyManagementException, InterruptedException {
+----
+
-[source,shell]
+. Optional: Replace `queuename` with another name for the queue that publishes and consumes messages.
+This name is also used as the corresponding topic name in Astra Streaming.
+If the topic doesn't exist, Astra Streaming creates it automatically when the producer sends the first message.
++
+. Add the following code to create a connection, channel, and queue that is used by both the producer and consumer:
++
+.App.java
+[source,java]
----
-kubectl -n datastax-pulsar wait --for=condition=Ready pod/pulsar-broker-0 --timeout=120s
+ ConnectionFactory factory = new ConnectionFactory();
+ factory.setUri(amqp_URI);
+
+ /*
+ You could also set each value individually
+ factory.setHost(host);
+ factory.setPort(port);
+ factory.setUsername(username);
+ factory.setPassword(password);
+ factory.setVirtualHost(virtual_host);
+ factory.useSslProtocol();
+ */
+
+ Connection connection = factory.newConnection();
+ Channel channel = connection.createChannel();
+
+ channel.queueDeclare(queueName, false, false, false, null);
+----
++
+. Add the producer code, which is a simple flow that sends a single message and awaits acknowledgment:
++
+.App.java
+[source,java]
+----
+ String publishMessage = "Hello World!";
+ channel.basicPublish("", queueName, null, publishMessage.getBytes());
+ System.out.println(" Sent '" + publishMessage + "'");
+----
++
+. Add the consumer code, which creates a basic consumer with callback on message receipt.
+Because the consumer isn't a blocking thread, the `sleep` allows time for messages to be received and processed.
++
+.App.java
+[source,java]
----
+ DeliverCallback deliverCallback = (consumerTag, delivery) -> {
+ String consumeMessage = new String(delivery.getBody(), StandardCharsets.UTF_8);
+ System.out.println(" Received '" + consumeMessage + "'");
+ };
+
+ channel.basicConsume(queueName, true, deliverCallback, consumerTag -> { });
-. Forward service ports so you can interact with certain services on the Kubernetes cluster:
+ Thread.sleep(4000); // wait a bit for messages to be received
+
+ channel.close();
+ connection.close();
+ }
+}
+----
+
-.. In a new terminal, port forward {pulsar-short}'s admin service:
+. Save `App.java`, and then build and run the JAR file for the complete program:
+
[source,shell]
----
-kubectl port-forward -n datastax-pulsar service/pulsar-broker 8080:8080
+mvn clean package assembly:single
+java -jar target/StarlightForRabbitMqClient-1.0-SNAPSHOT-jar-with-dependencies.jar
----
-
-.. In a separate terminal window, port forward the Starlight for RabbitMQ service:
++
+. Make sure the result shows that a message was sent and received:
+
[source,shell]
----
-kubectl port-forward -n datastax-pulsar service/pulsar-proxy 5672:5672
+Sent 'Hello World!'
+Received 'Hello World!'
----
++
+. In Astra Streaming, go to your tenant's **Namespaces and Topics** tab to inspect the activity in the `rabbitmq` namespace.
++
+If you everything was configured correctly, then the `rabbitmq` namespace should have a topic named `amq.default.__queuename` that was created by the Java program.
+Additionally, the namespace's metrics should reflect that at least one message was published and consumed by your Astra Streaming {pulsar-short} topic.
+====
-The Luna Helm chart deployed Starlight for RabbitMQ on the {pulsar-short} proxy and opened the correct port.
-Your application can now communicate with {pulsar-short} as if it were a real RabbitMQ host.
-
-=== Produce a message with the RabbitMQ Python client
-//Move to messaging section
-If you hadn't noticed, we never opened the {pulsar-short} binary port to accept new messages.
-Only the admin port and the RabbitMQ port are open.
-To further demonstrate how native Starlight for RabbitMQ is, we will use the Pika RabbitMQ Python library to produce and consume messages from {pulsar-short}.
+--
-Save the following Python script to a safe place as `test-queue.py`.
-The script assumes you have opened the localhost:5672 port.
+Luna Streaming::
++
+--
+To use a RabbitMQ client with {starlight-rabbitmq}, you use your Luna Streaming {pulsar-short} tenant as the AMQP listener.
+The following examples use a connection on `localhost:5672`.
+For other connection methods, see the documentation for your preferred RabbitMQ client library.
+[tabs]
+====
+Python::
++
+The following example uses the Pika RabbitMQ Python library to produce and consume messages from {pulsar-short}.
++
+. Save the following Python script to a safe place as `test-queue.py`.
+The example script assumes you have opened the `localhost:5672` port, as explained in <>.
++
+.test-queue.py
[source,python]
----
#!/usr/bin/env python
@@ -236,17 +437,18 @@ try:
finally:
connection.close()
----
-
-Open a terminal and return to the safe place where you saved the Python script.
-Run the following command to execute the Python program.
-
++
+. Optional: Replace `test-queue` with another name for the queue and routing key. These names are also used as the corresponding topic names in your {pulsar-short} tenant. If the topic doesn't exist, it is created automatically when the producer sends the first message.
++
+. Save and run the `test-queue.py` script:
++
[source,shell]
----
python ./test-queue.py
----
-
-The output should look like the following.
-
++
+. Make sure the output shows that the queue was created and a message was sent:
++
[source,shell]
----
created test-queue queue
@@ -254,48 +456,15 @@ published message test
received message: test
deleted test-queue queue
----
-
---
-
-Self Managed::
+
---
-Already have your own {pulsar-short} Cluster? Or maybe you're using a standalone cluster? {starlight-rabbitmq} can easily be a part of that cluster! Follow the "xref:starlight-for-rabbitmq:installation:getting-started.adoc[]" guide.
---
-======
-
-== Message with {starlight-rabbitmq}
-
-{starlight-rabbitmq} supports quite a few different use cases.
-With a {pulsar-short} cluster between publishers and consumers you can interchange the type of publisher and consumer to fit your needs.
-
-[TIP]
-====
-The following examples use an Astra Streaming tenant as the AMQP listener.
-
-If you are using Luna Streaming or a self-managed tenant, use the listener URL for your tenant.
-====
-
-=== Retrieve RabbitMQ connection properties in Astra Streaming
-
-//Already covered on the Astra tab above. Image isn't needed.
-//See if this can be made generic or already provided for both in the Starlight docs
-
-In the Astra Streaming portal "Connect" tab, the "RabbitMQ" area provides important connection information.
-You will need this connection information to create a working RabbitMQ client or use the CLI.
-
-image:rabbitmq-client-settings.png[Astra Streaming RabbitMQ settings]
-
-TIP: Click the clipboard icon to copy the RabbitMQ connection values, as well as a working token to paste in code.
-
-=== Produce and consume a message
+. Use the {pulsar} admin CLI or the Luna Streaming {pulsar-short} Admin Console to inspect your tenant's activity.
+Make sure the `test-queue` topic was created and a message was published and consumed.
-This example uses Maven for the project structure for a Rabbit MQ Java client.
-If you prefer Gradle or another tool, this code should still be a good fit.
-
-For complete source code examples, see the https://github.com/datastax/astra-streaming-examples[Astra Streaming examples repository].
-
-. Create a new Maven project.
+Java::
++
+The following example uses a Java program to create a connection between RabbitMQ and your Luna Streaming {pulsar-short} tenant, and then it establishes a message queue and sends a message.
++
+. Create a new Maven project:
+
[source,shell]
----
@@ -304,12 +473,18 @@ mvn archetype:generate \
-DartifactId=StarlightForRabbitMqClient \
-DarchetypeArtifactId=maven-archetype-quickstart \
-DinteractiveMode=false
-
+----
++
+. Change to the new project directory:
++
+[source,shell]
+----
cd StarlightForRabbitMqClient
----
-
-. Open the new project in your IDE or text editor, and then add the RabbitMQ client dependency to `pom.xml`:
+
+. Open the new project in your IDE, and then add the RabbitMQ client dependency to `pom.xml`:
++
+.pom.xml
[source,xml]
----
@@ -318,14 +493,17 @@ cd StarlightForRabbitMqClient
5.16.0
----
-
-. Open the file `src/main/java/org/example/App.java`, and then enter the following code.
-If you cloned the example repo, replace the entire contents with the following code.
-Your editor will report errors because this isn't a complete program yet.
+
-Replace placeholders with the values you previously retrieved from Astra Streaming.
+. Open the `App.java` file at `src/main/java/org/example/App.java`, and then delete any preeixsting code in this file.
+In the next steps, you will add code to this file to create a complete program that produces and consumes messages.
+
-[source,java,subs="+quotes"]
+. Paste the following code in the file.
+This code creates a connection, channel, and queue that is used by both the producer and consumer.
+It uses the default connection values to connect on `localhost:5672`, which was port forwarded in <>.
+Your editor will report errors because this isn't a complete program yet.
++
+.App.java
+[source,java]
----
package org.example;
@@ -339,52 +517,42 @@ import java.security.NoSuchAlgorithmException;
import java.util.concurrent.TimeoutException;
public class App {
- private static final String username = "";
- private static final String password = "**PULSAR_TOKEN**";
- private static final String host = "**SERVICE_URL**";
- private static final int port = 5671;
- private static final String virtual_host = "**TENANT_NAME**>/rabbitmq"; //The "rabbitmq" namespace should have been created when you enabled S4R
- private static final String queueName = "**TOPIC_NAME**"; //This will get created automatically if it doesn't already exist
- private static final String amqp_URI = String.format("amqps://%s:%s@%s:%d/%s", username, password, host, port, virtual_host.replace("/","%2f"));
+ private static final String queueName = "queuename";
public static void main(String[] args) throws IOException, TimeoutException, URISyntaxException, NoSuchAlgorithmException, KeyManagementException, InterruptedException {
-----
-. Add the code to create a connection, channel, and queue that will be used by both the producer and consumer:
-+
-[source,java]
-----
+ // Use the default values to connect on localhost:5672
ConnectionFactory factory = new ConnectionFactory();
- factory.setUri(amqp_URI);
-
- /*
- You could also set each value individually
- factory.setHost(host);
- factory.setPort(port);
- factory.setUsername(username);
- factory.setPassword(password);
- factory.setVirtualHost(virtual_host);
- factory.useSslProtocol();
- */
+ factory.setUsername(userName);
+ factory.setPassword(password);
+ factory.setVirtualHost(virtualHost);
+ factory.setHost(hostName);
+ factory.setPort(portNumber);
Connection connection = factory.newConnection();
Channel channel = connection.createChannel();
channel.queueDeclare(queueName, false, false, false, null);
----
-
++
+. Optional: Replace `queuename` with another name for the queue that publishes and consumes messages.
+This name is also used as the corresponding topic name in Astra Streaming.
+If the topic doesn't exist,it is created automatically when the producer sends the first message.
++
. Add the producer code, which is a simple flow that sends a single message and awaits acknowledgment:
+
+.App.java
[source,java]
----
String publishMessage = "Hello World!";
channel.basicPublish("", queueName, null, publishMessage.getBytes());
System.out.println(" Sent '" + publishMessage + "'");
----
-
++
. Add the consumer code, which creates a basic consumer with callback on message receipt.
Because the consumer isn't a blocking thread, the `sleep` allows time for messages to be received and processed.
+
+.App.java
[source,java]
----
DeliverCallback deliverCallback = (consumerTag, delivery) -> {
@@ -401,8 +569,8 @@ Because the consumer isn't a blocking thread, the `sleep` allows time for messag
}
}
----
-
-. Build and run the JAR file for the complete program:
++
+. Save `App.java`, and then build and run the JAR file for the complete program:
+
[source,shell]
----
@@ -410,50 +578,70 @@ mvn clean package assembly:single
java -jar target/StarlightForRabbitMqClient-1.0-SNAPSHOT-jar-with-dependencies.jar
----
+
-.Result
-[%collapsible]
-====
+. Make sure the result shows that a message was sent and received:
++
[source,shell]
----
Sent 'Hello World!'
Received 'Hello World!'
----
++
+. Use the {pulsar-short} Admin CLI or the Luna Streaming {pulsar-short} Admin Console to inspect your tenant's activity.
+Make sure the `queuename` topic was created and a message was published and consumed.
====
+--
+
+Self-managed::
++
+--
+To use a RabbitMQ client with {starlight-rabbitmq}, you use your {pulsar-short} tenant as the AMQP listener.
+You can also connect on `localhost` if you have port forwarded the RabbitMQ port (5672) to your local machine.
+
+For `localhost` examples, see the Luna Streaming tab.
+However, the specific connection details and valid connection methods depend on your cluster's configuration.
+
+For more information, see the documentation for your preferred RabbitMQ client library.
+--
+======
+
== RabbitMQ exchanges and {pulsar-short} topics
//Move to Publishing messages section under Pulsar protocol handler in Starlight for RabbitMQ docs.
{starlight-rabbitmq} maps RabbitMQ _exchanges_ to {pulsar-short} _topics_, as described in the following table:
-[cols="1,1,1,1"]
+[cols=4]
|===
|Exchange |Routing key |{pulsar-short} topic name |Usage example
|`amp.direct`
-|used
+|Used
|`amq.direct.__{routing key}`
-|`channel.basic_publish(exchange='amp.direct',`
+|`channel.basic_publish(exchange='amp.direct'),`
|`amp.default` or empty string
-|used
+|Used
|`amq.default.__{routing key}`
|`channel.basic_publish(exchange="),`
|`amp.match`
-|not used
+|Not used
|`amp.match`
|`channel.basic_publish(exchange=amp.match),`
|`amp.fanout`
-|not used
+|Not used
|`amp.fanout`
|`channel.basic_publish(exchange='amp.fanout'),`
|`headers`
-|not used
-|Name of the header
-|`channel.exchange_declare(exchange='header_logs', exchange_type='headers')
-channel.basic_publish(exchange='header_logs'),`
-
+|Not used
+|Header name
+a|
+[source,python]
+----
+channel.exchange_declare(exchange='header_logs', exchange_type='headers')
+channel.basic_publish(exchange='header_logs'),
+----
|===
\ No newline at end of file
From 55b1b4dd8008657775ef9baf7a1269130ebec7f1 Mon Sep 17 00:00:00 2001
From: April M <36110273+aimurphy@users.noreply.github.com>
Date: Thu, 8 Jan 2026 17:06:19 -0800
Subject: [PATCH 6/9] revise and reorg starlight topics b4 move
---
.../images/kafka-client-settings.png | Bin 23894 -> 0 bytes
.../pages/starlight/kafka/index.adoc | 477 +++++++++---------
.../pages/starlight/rabbitmq/index.adoc | 36 +-
3 files changed, 260 insertions(+), 253 deletions(-)
delete mode 100644 modules/use-cases-architectures/images/kafka-client-settings.png
diff --git a/modules/use-cases-architectures/images/kafka-client-settings.png b/modules/use-cases-architectures/images/kafka-client-settings.png
deleted file mode 100644
index 331392e005138c1cf0668b3c9e6f551b31966216..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001
literal 23894
zcmd43cUV(d_b;yFjOZvbib@p*3kXPW(nnExks4`1q?dqz2mu1)s30gsI-z8Q&=Mfh
zTS8Qtln^N)5K2ZMK@xfhB_ZMBjPt(V`{#Y0`@7HY{E_FJefHUP?a%tGwbxGKJxe3r
zpM`%ua^wiFiSZqqBS(IOA31XLMmT12BuNH
zij4k@h7Y4R;=4K(^JJCy_#+=|rp;%BJxe=dptuOU(i`%Fsp6IOqdua=AfAD%8;zp!
z$CU%umj^o3Pbj7d{&8?uQgHeA1i^Lj^+b={BOAGaqW5NMR%?LvSeAW6bt4f7j9$Y7
zSf6H*NP1j&1OyOXec<`a?GqBu|M?Mq%gxw7^6%5_6K`A3$DjWnuPYVd|IPi`_=Uua
z|2`VZoRLTIX}>tp9L!4y94@o*ySOn#$!^+9cC!JCB7pT{(T&sn^wGIA|Z>}893
z$^Zar7CZ~S)w_5bL409%@t(KOXk4frI8L%Zl|%N7joh^btGZ!H_qz@>
zy!#d^*BNidFU5~t|F=l%;INVxcBoU8H~#63+#Pr0($kfA%e>y8j*$uCNg~((qPN{!
zl~l5guuVYRwKK09Ru$ofZ-+K6j|E5CB}V0o$ibsrjlFLDU4XBI|LWoAdWz9;=>C|c
zwW>;6(SwhT-jiLHVOuVOH&VzdFNZFvq|StA?9n&Fn`LosZp}l1L}0$`M-Z+KHWs
z;1v6f$HIz963j`*`iMD_at%sX-a$?C0wtQVk?cLG=2q1ov8SJ_0D&MUMaFDFKbW|<
zUZbrp%s1zny~3lQk&`j|j%$oO{%BnGS_^AeBy3V`07vU3pZ?etO5y($rOBy#8J*?G
zUQkk*Pgwe#$4Sl2B61A!BG^%L_HJa~rUOGIMj*K*uF-L1SYw}+w}(4ZPwI9ZW2U!1
zfo9xl<@u^XwGG=cLGjw|y^~>2$b@A>{9g|&XG&s8xearM33u!-fb2j>6xg*D3
zWGBlTGoxL7S6p10v}f2%go4@$-UD1d=)5a)d}02Rdp(7Uz{Y^0*J2#Y>n=ek$<1vA
zIl<~E^jfOK)IqaDxG#NYBm5Q=DW2nDru3RkJ7}cov|wlm9kfe#bwj=_0OQavq;TwY
z=MGOSpwnVk@}N5gStPxHg21DV(I^7xc8f#aKr9*{7pR
zTgNP)!#B>E2;0Ceg4>#2D}W65DEZ1$JLteBs~Qy|A=-ku-KiCjLh{&gakEUy209j
zz?G#t>S_nqW&u`ei-V%zt{b)C%oh(sJ3xX$_cXjc|qAsS4tC#$YZ&lu$c
zG%)XKR$aAm=!llUDLGkC3mr$20u_p@OYvtBg*sqSNn4eZ;W^1Wl8iLPBfz|OAuL$j
zgQ7htXFmHv-K_;n#%En5E+;VV9lH#(SXi|MDBrpXd|0#3?)Tgs0n}(XJDrxsrUD?o
zC60R(|3GcqO5`je#?H^Fv#Sg|rP3%t=~x8~qzp@Wlp^|d*v1E{La
z;=d(?nwq26`z89W&_IBdwf^1wwHezjtX5SZKe0)DcA)>#PWhjs(J1p;C9@o|iodp04fS^4DCdPA{ZsGpYeL;D#pJ5IgwVcf#a*54Bhju*S&vsRE6rWX6>N4hZs0ZY0v!?
z)!6fUv&|wCXI~%-+mf0uI{KGYoh#=PFQxkAp1t#GxHmxc>=0c;1^Z6+MRsmubQGj^
zKjy$Nj8e*tb!!sK&i!;>rA9x2$-lGsr1ayCy;ah*X~hGw^3-^-u3oHn_=VNp;P?&A
zlJ{<2*VxGN7SW~c&&;QjvGu&z@#Rf+3*8_NOF*weM9UZB#8C1L2-XL=Hzoqj9{Tgu
zKIrNh@$FP%Tp&9c*Zy*GdX{E%rbC&+DK1jhJ6yo>m=>${0O)v-O=T2LTw&=6vF8ij
zIar1U&H^uk6dt@Sajs6pcI~eluhrX4Ie__*T=TZ?(EfJg^R%E1Q*zcVTBpD~AFrn-
ze!#8To5;V@y%CLKeFE4NN6uVTFUYmGk&Hw{EvE1)Sy
zRv6vafrLc^&)jyx)sPF+@Y5`fkH0)Z(EG9`DPYP_%Ng!FVimWa>D@P8bW~^&>FU25
zOE<}2dl~7cTyYL({8r;sNjaU;bN+KrRCSA}+{)X2O6k{w%lm`kNdY};2K7>w_=6a5
zOC8b<)HBnlp}t8AHM!zwt22AsK>Oaz8r-@VR`MF(!8&lS*HdowZ*0i76GhpJ&zDxu
zl!QzaYgNVJ%$q>PF;R}+3B`PIs}kHhsqc*%pdVl)rZg27K3HIqdn06@Y`qo-9
zB$<@5M}w5aEm4S2g4ODvO8m5#joXuGN$^&H@l1Wmyd`Oq*C2(mL4%(+U`fCy1RaoCIRG_ad5e8&lWRP_UZ;-tbCmSxXd$
zA-M*68G5J`vO^6yLnhirLNLx)gxSQn$8BqLDN9hnf6~8{Hox^Hl&{hodnsI^b~0HxF<|V;AON4{zx}!nCF_bn|3O_?
zyBl_3Sx*!WdO{(t?LY~pu8)On)T`-1%4YHwSedUtLfdjrGPM?lb+H^@@vkSwwV^Q(
z;U(r-A{Le1v~JfuELLVW7W=k1%9JTrLTSH-vTh4^jp3ZFA6P9f{jxoQV5Ife+ZCXm
zRDLPhk58gWmv%Z2T(Gv+=^Zq1JG(-*!IpaJIeqM<`1++@gv5B3J30p&0)ZOK9p3$A
z^C3|yuy(W?6aNHuxfCN86s$3dj|`A5ufiIVH2Zg#%R8JuHi_{k&B+mZBdrG<%#eIL
z(9Ik`zMNG@AP~%c=6_)YPrM6p!RMP~D+r1G!z5KN_N&6Pdr>AguSp*3@a8`1M{^g(
zy))>MZfUoqBKxn_SWhXrMMVXXv#t%()d?Nilp>W`;Pot;Dq9UsBizRU)veJOOk7G*
zB6$C2Yttx51FA{WON`yy5(G!67jS;A%$QpCjC&q$=R;Ps5bN=KVP|J0-Ktv>sA*-M
z74PGl^0+FEq#tE~81{idRf4gBVr})ak6qB}!sSFR*m>52sWR)-FT~WAS`jU^YFf~g
z@%dK*GSSycv#K5J#F@z7b5N&tciGLHxSlJ&p=&fXVU~<{bqXx2`{-WZ}%#
z%^)vk$(5EZRS4p{O!OVov0fDQ316iOHnOXVeA5e9>uryXzS76qP;HO3MO|!roHfa1
z|LR%Qqzm_+Rh?QmI0jC5sgBY#t}Cz!64Kd%cf-u}Fm;6X9X
z2Uvchzn(JQ&wen>(}YObqn-h1USv
zT?$&fEZOL%(}wfgfEx6LheHpXs0V>;A0OQ8CSYkU(Yi^(tCTZRq;j1b8`koLkth>5
z3r7~MbLUe&FZjlK1s6_U5ESj)&BC;CGuaghHUhlfD*xNGVJ&t
zs^Ko$i%kh-`pe3)^bO$-Nua3^{RiYP5VC4>_nnA#%(N1$@U{d
zdDRjWPJdC$>h^(bbO?r;Do`ERSj>9l4tRlG-%H4$b5&0p$oxms>x_YnFKD$wvRViA
z)~7fwtCNA_Q}z-&Eu6ObPwa2;Q9eXxw&Ro25K0U&^V0-L+
z3K4r|Ygp`LtDff$e;vdZMds6p=P23-hhW$38s=Gb5KP@Hwk9fTB3n(7fDC<-dWh=&
zoSMuhL(9CC>ut`iRt5nfHE
zW43kw@`2iW*%DqCICEnHHEKl2;@NsS2$b&pl`j~&!au7ZMfkGAB0m=ar4N&zmyNZ0
z+;t+{DVx^ew1rb^$;-)7kqD;|WcXIOM%L1C>L6PoUp$sW>i@N6UwklqZQ`UAX6Nf32q1#nT>{cOs4KYGe$g32&qCg*uCW{
zFsJXEZD?OOp%b@6cV_D3KmzSY4+dCqo47}fFYSk);HT|HigR9Ni9<;r7sqWAQi3q!
z4IM$HW1Z>kN!%*R40WluPAugDWORFDaNBn{Pxr2PTdf%2$&I+yBz;;!;Zd_
zY3xLc*W%G%d6-3(oX{))7;zNur)*D(T
zle)lvn}<&IXrM)OWzM2!o-K3C;k)tv`6ob=gxcEHw(wA4C|Ss3NXKz4`O40#TfRz5>keUrQUG+S$gIlq&MBj{
z6~$)I`Dh)X_U5rhihunO=CAME#`ih&AU<;{W^yHNVP!3BDW@35Y%&frd5-GJ!9
zo#UeBK1FSKmpI}m^|j9L=U5q<%z7T^cH~#WT*)YVaBRFBH!RWEw;ujkXJM0+FuU6a
zRazB^Q#FF+soJ)avv^2q+n^V1cH09m(M<7XfBBj)b@6~AR`tGMBdwXb*UrLT#0eDY
zZx(=kLt&bXR-L(}tx<0rZYOWiYTy{hPoA}PpGHV{6}{3)<=xx+>7L`FM|zGr@5-#}
zHbGMm^M@*%$_FWnKq`8lr6*=nCF(X`FYFt2yc;v@GG%i-%
zZ#MUh)E4S4iH_c{*hcerwW-HYhl1UWnNnVXa_YP{kMrmSXMao+b~$wWu?e&aalaIND>8&bF9U4e&$(11MD?<_}Xufl+Ve*sE~x$}?c@
zZ;e7terw_wKI9fBSX@s_(XZoRMW&;aj3x#~(R|l=lcEgH6}I$GHB9d8O4bL0r(SGT
zX>#ima8RuSGc2ahTH_QbWCkF6}$hj~m)4!8-X?ZQ>d
zpv|~iU=%o1$lcf?m7NIJX}vfydV5{6eoiD@dwSz{@Wzb3TEa}{y53Y-b9-Hok`*?5
z{1yB&56|F2MA&GJd|Pa*i*CDmREe({pm|_5asoX)=G;aOCqxMCuv~$#o8C{(d$kb`
zyaT*plaK3{11#-HSxVLiWCf%CcWI!k;nlaZ8Va-ZlEOJ&QxgflHv11Zeb8mLICFLD
zX+~B&s7VCBh^BRQZ|AFMadl)M%s5k8*D@(&BU7WMk?V=CcXxq}_QV=FBWPa-CYivV
zFt@BH)~NwvdJllp1vRfs!&~VMpqaI)i&nGkHP){4xa}{Y=2bK#VV*je9o`eek7FYOq?v>PM7r0h#^Vn~u0x
zXKei*g1%V2bh#!PH~L{p%)TQ>cRCt=f>hbkWOd1Aw%6Cr6~7u+6Tyq-iqwHzb0Ji2
zohv3}!hr}wd#YcA1+%;&b!AXqJIl)ALJ*gnod?9Ypt$!s*k~PpoW8Dxucrq%t^@>f^xb3ci7UX6uh=6-
zAk-3czX=bjw-Yal%kuWI_xS|Uq^~wwh$@~q8B%~%r+@BoZa|Jw?Pqi}4gB#_!THGtj0YPBh_QMz~U)WO-~`wJ1?i&O(Dk}UJ_d`tjC
zn`)P_wT2sNXP;4_SqXzfWJ-Ysmf&tIEszSQ@!*O*ruWw68KkG3)YpZg
zXj?2$J-n-6Z`UA
zCF*PqMBrM#;j><2X^b5GtwH!MJ~h`wThKv(f2sKO;*h8kJ~;P!|9v>+xHW{|&B+|N
zdXNXv-JF>#bLxs_>pl;sAyb@#;XHZ^}GUOC0~hWkn;q*PKuMGZ%w!J
zAha-!dMP7`PBm*Mep)ni=YTVkMd;3=*`QnC{s_l3~Lxu~{K_!9y
z6qdgY8{8{SPMHeMa?p2A5jfBXlmnXHR3Ai_(K~7f0_*tfmj%Tb%=(oOIA!k+*|PQLWx1m4?l?XkPQO
z>N=p5qNT>CaY{o`6P4mEIkUj&=Mn7Vk1NJ*p6u;z_lD7}!bMuRkLgrbYUCTdicyie
z2#8Hmf3%W5oL$?-4h3QkhS9Zzq6k=CRHIC1fHR#3lg%@xf}e<#94u1?M(CbEX=oBF
zsy4jCxq)w*uXq~`_e5MIlETDcYKx5lK7Q256=xB>TbKfHO+`-enUt|OpRH>c*jJDG
z`nBVT)Agl5*LJOqHHqLrSALz{$ISL31P?s&t|4lFF{y4c08FsF>ufpM1e)T3t50BS
zrD-bp+ig6PGOt&nMa1JOEAmSS%&PjDc~)APt7?svK43Ns5O~hx##bH!at|jMy;HiW
z)!xw3s~!#=E;mRfLbSk;)v7T5vPBx==B)V-$}|)kD5>hvyJbe`WUcZID1oxVE$;Qs
zuhXUq5H*D$2x4ustj67uxD?wHBL^_Lo4&KZR{K@DZ|f!i!9s*{N0*C&mwDp
z8w#t%mt@;l*nahQ#0wefwyP>!&h8+8)0ruhy{nf+lY5z^E)bROzY5MDd{yJ-=(Y1A
z#YeR=YVJ}m+r@U<|0g2EyY~$_n+a@8KIk@w#L2AAkjV}p-q3v?zRd86Z3pupG=>cd
z(VuEC1Y0tC9vJF&YXtzO_Ax_?O*PycwTs#)4lC^5;`B|mCaqE1v8?Rknhk5yt;WOI
zt(dFkEuS2!P4`eNw|YENZxO-vAw0e3Wqe8XfD$LB)SeROIn)u+Z*C!~3aDZwgX@=6
z#B&R~eTXqT^AI3d=q!V?cujUs7$>4V@OFtq~vkdElf2q?5q=`eFJz*@CgmIHEw@4(NGI!qm)p
zmtH)0-G(7)Z*^kf1t}t7pn;M+4hfhOyG;8a+!F>r&EN1;RW%cj;CcK8$XSU(SmGSn
zAkOnVodt-4PUP%Tp{?tm-O0>PSg2PAeT`HMYp}S~?3-G5<}L>SS|b^+ML04!AtBZb
z2PeBFvcUARm4(F}Fikr4mvoL%Rv>oz$&NjBhVAfy<7%~)qPLIvJXVX4de)bS%!-+i
zX2_77-)LDTAdrI|^#$C2N2J@BCyhXdKcW2@IYCx`;X3b-ku7nG%ZK5uG
zwCn#G(Xk+?FdE?S=8~E%Ioqp(x51MQ*ukxfU`fA%Mk+skiZ--#9oVj9JzG}6gh003
zGH;UWWINFTO*0XuO=J`&HhZ}ILNWnDGHumc90RYR>+i-sR6)0MLsz|^h}b!@Mt#jm
zQl#iZNRGQ_VWOVeeQ;L5{qgWNtdg54@(LRS@f#Uid@N)`BR3vj4$xeY?Ty@zs+l7)U#$vz17~CiHDfD6YLKspLEB{X?K$kv5a`Ot!vW%!Oi8D
zh0fcWj*m8cboX<$Qs?Hb=*bQ=ZEJp+>k(+SOw^%F|H@UDI;|ZgWJmrJ|u*9aCAN~
zx}F69oxP36>-XC;DQtkRRQT9egu2BaTy(1wpf
zI&HGjXRU1Z{OIjT`6kTrR);}voZZet@MZ^HD0?uZ*51*Z4K^QprWdQ5JPIMn(8}||fv}y2
zt^HSLZR_i~E`oWz*Y#~e^!wT*eK*cjcd)MBS7`EA-yT4g8~c;b?EyNkZxTax<*6Ohg~#qRwc=Eg
z&jjoG*5pR^{us=~6{v~;#8mk*iPfwuOd)p1!1IvhWO*6S>^*P(VAy(RQeM0&TY5+S
zMB=TTy#&gv=_-bK9QB5hT%6w0cL$;1`NZZ5PYK<3_$O`AY0W|leEvOU-SWqdf9!?@
z%{e$leej7hogSNibI9XPWg~)MStS&(`k+b|1B0EyfF`@B~{eHdA*F~iqL1Pu|`1@JM@3qc-f^r*c
zHxRDUUi!$OZA`EjgOv`nTH1%RYszQ?Cj<0TWD-}9g2lcglCs0YhrF*;?yLqLA>k^}!Y8?xoi%k!=_&j&X
z2_gVlJySZ=u=x9Uq^dMWZC-2UAXlRg{o~${u{O8e;d~tMP&1QnFV2#@R(H1VIWjVG
zM%bTDSFfmbLsZLWZ3{&^QrS)qD$09q^5SMyGfB5y9LJaFqS#k+?`xv!5Vm7#=`;Er&y@*oUEp5fl
z=hJX%w`(_+Ti3QZ_2;9}DI3b{bDb;=>=
zj>r2u-}Ph;Vk%%150v3Dcf2diU+g+3u#A3YfhYQ;Q-`N&w04fxuN@0`aEgY(k1>+W
zT3tie`io|=JCs?W4wxLTm*n8!XhFnWa&X-(M-`Oyq1;_B!QXFX@J+@7ch)x566S=r
z%OJk%^FwsAD1E;^tGYE>9TW4I{$bNi09Gy1zBV>I_f$%;L77!(2ZqViIQlH>cB?ca
z+S-WAH;cO{+mBzu-ROEzJ;7$*;4Ggt>%(xY$vnaKneaiVzwEFWtC`e~>Wev_wbALX
zk1fzE{5&yTUR@jubzEesP0h~ZFGWt=a0ZSQMqt;f^aTv5O8u#J(^hpgC!Np+$TdBd
zU4RZWGr(~Bo3cbL-mOyXGtp-KD{^jbu!G?kyERbV<4&_-(>WW1fM|;laU7!gZDU^K
znE|weSC&0@rr9QAQImG6?f`-4_+5G@j{TlczuXzs;S7uRJ{w$OHCw$?2vxKVp5)u4
zN!v^YUBZKeI11(5b`+=Xj0|Nkxv`c|2&_Uk+zwAoMYtfFCNYdTbU_5)GHS^CyM*;K
zMs}79w%2N&-JglK!;JVbLc+HBd_27MRN5r6!e;j!5+o>Ml2S=WCB!?rEq=T7SlRsA
zo_vI+;(|8*QnYavf_%ZP%>@n1-|L83noi;b46QWP_@U~)tmV*lfQZpKLOZ{r(QbtaDM^qap^!gFLC?mQroUp;|wLu}dWSb<73IZ!D-_Nv(9>7pO%EcN$3
z$7G@v?fBkkMjQ9h-5p$=v*Ks(ydm!jUt^mOiP?Bz+e=%5_B^USxAM6{g&7xax=8BMCIn=9%KK~wd%fS3Y;
z=s<;sBGT&dbbS~}zx)h5^mJvt!am8RmiAIpwBd^nbvZ#^p-qASITWgL2``bl>0bi(
zFjQ{&GD_}zGIRATV{V~r(>`7e|6Ichd{kCDg;VJ=;0XIU~|4y!E6vWw&{U;fXKb&n3Hs1&1HfHPvvGo+aucSv@)UrL+_=^x2CKW5a
zEHZn?x&6-orqh|1PYg`CBd)gP*84@YqnL-IVbrhfcZJUAxU0`zEXoLbDs~-YE8?gz
zmG7-I$-YT^>p<(3Deo{NAdhrw_*0HWO!1;JL*FWB=-iMGjgAW#g3F()s}7xVwVg?z
z#3we%5!~M1UrDYk!??-xGAjYFm$A~7^f@{_m4D|{c#zB1$fE(DnF(wd-Hd=zYjvw#
z?~Q%p9M%7I0L+v>+zB1DuD9B2yjZy|&^N0VtRr^NZ#ASQ6M$Ajus2<|b7aU?vCiJ5
z%r*agX-PLrHyd_hmDyW6GQm#&0SiTKO*GoK@QuK}vWK(>#O0aPTz&*vBK5iAgXCx?
zacKy#%cy33J`mfS5n#YQRu^RKMF_8C*|#VUJyN?h=F^2*uAHBCt)CGLgFFE%xuZ00
zd|DcnP9A_bVGfOi)@3pi-=<;cWylF952)HhVkW5&>T6uSHHl~nc=W~EE6jvxDVBNm9^inwS{6d064TcKdFX_cF!a`78Lk^%jB
zB1jLU3g$l8??u0`_^RGtq#gnT)E(v&CIQ8X4t)J)1b;&JM?W$A+X_yR@bb3I<5@<{
zx4I3mJ9((>mnZ?V|{(^nX&lY?_6T9qJRTBKi`0Abc~(rHysq+N1rE=)m)A?G+bo4#?zuZGu#Eq$J>-6&259@I
z`Fld_JkSynso+CwpPB!&2zxK9f33?24u-}0<
zN3CC;&ASYv-ul55;s58Xk%F@-shIlYa!_JrMa~U8-}xNm(Mw`2eVvS1*UZjRK3?6%
zdY(@FS~N4voecj=dLXTi^&dAcV*7U%Z6`I?HTb{?qUdhy*jRLE@%R`gSMTFot*?UL
z)}mTY3Xr}0_~=y_gbo;)
z$5;&iVL!%$Aa=FzNvIYerwpR+GdzbZKElxk~%s&WgtQ?Q%N|(LB2blG1l@
z`}3e<80}LSMVUh*ex;Tx!Y{tPXiT;^Wze6<-ugJU=Vk?U4h}=Xw}IjJYnhKWvy`42
zmgwh|@LAV4132GOMWs(BS8D?netj