diff --git a/antora.yml b/antora.yml index 07823f3..448e154 100644 --- a/antora.yml +++ b/antora.yml @@ -9,8 +9,6 @@ nav: asciidoc: attributes: company: 'DataStax' - product: 'Astra Streaming' - product-short: 'Astra' astra-db: 'Astra DB' astra-ui: 'Astra Portal' astra-url: 'https://astra.datastax.com' @@ -21,7 +19,7 @@ asciidoc: pulsar-version: '3.1' #DO NOT INCLUDE PATCH VERSION .. debezium-version: '1.7' astra-streaming-examples-repo: 'https://raw.githubusercontent.com/datastax/astra-streaming-examples/master' - kafka-for-astra: 'Starlight for Kafka' + starlight-kafka: 'Starlight for Kafka' starlight-rabbitmq: 'Starlight for RabbitMQ' cass: Apache Cassandra cass-short: Cassandra diff --git a/example-guide.adoc b/example-guide.adoc index 507185c..8969c71 100644 --- a/example-guide.adoc +++ b/example-guide.adoc @@ -9,7 +9,7 @@ A guide is a document offering step by step instruction to reach some goal. Guides are technical in nature and tend to make assumptions about the consumer's environment. To help create guides that will work in most environments, please follow these ideas. * *Keep links to a minimum* - when someone is learning a new concept for the first time and xref:README.adoc[every] other xref:README.adoc[word] is linked it xref:README.adoc[makes] things xref:README.adoc[confusing] and hard to get a good flow going. Instead, annotate a word or phrase and provide a "Resources" area at the bottom of the guide. -* *Separate products and runtimes in tabs* - it is common to reach the same result through multiple ways. An example is creating a tenant/namespace/topic in {product} and Luna Streaming. Both have the same result but get there in very different ways. Offer each as a tab and let the consumer choose their path. The step after the tabbed step can assume the consumer has complete the previious step and is in a known state. Runtimes follow the same pattern. Weather one is using Java or C#, they are still creating a {pulsar-short} client to interact with the cluster. Create a single step in the guide with multiple tabs for each runtime. +* *Separate products and runtimes in tabs* - it is common to reach the same result through multiple ways. An example is creating a tenant/namespace/topic in Astra Streaming and Luna Streaming. Both have the same result but get there in very different ways. Offer each as a tab and let the consumer choose their path. The step after the tabbed step can assume the consumer has complete the previious step and is in a known state. Runtimes follow the same pattern. Weather one is using Java or C#, they are still creating a {pulsar-short} client to interact with the cluster. Create a single step in the guide with multiple tabs for each runtime. * *Be thoughtful about the names you use* - if you are leaning a new concept or feature with no background on the product, words matter. Labeling a tab as "Luna Helm" and then referring to it as "{pulsar-short} Helm Chart" are two distinct things to that reader. The author of the document has such deep understanding that they consider those things the same - and technically they are at {company}. But the read isn't from {company}, so be mindful of their context. * *Talk in first person* - humans create the guides and humans consume the guides. Write as if you are paired with your consumer in doing what ever the guide does. Use "we", "us", "you". ==== diff --git a/modules/ROOT/nav.adoc b/modules/ROOT/nav.adoc index 581aa15..a77c655 100644 --- a/modules/ROOT/nav.adoc +++ b/modules/ROOT/nav.adoc @@ -1,12 +1,11 @@ -.Processing data +.Process data * xref:cdc-for-cassandra:ROOT:cdc-concepts.adoc[Change Data Capture (CDC)] -* xref:astra-streaming:getting-started:real-time-data-pipelines-tutorial.adoc[Build real-time data pipelines with {product}] +* xref:astra-streaming:getting-started:real-time-data-pipelines-tutorial.adoc[Build real-time data pipelines with Astra Streaming] -.Migrating to {pulsar} -* xref:use-cases-architectures:starlight/index.adoc[] -* xref:use-cases-architectures:starlight/kafka/index.adoc[] -* xref:use-cases-architectures:starlight/rabbitmq/index.adoc[] -* xref:use-cases-architectures:starlight/jms/index.adoc[] +.Migrate to {pulsar} +* xref:starlight-for-kafka:ROOT:index.adoc[{starlight-kafka}] +* xref:starlight-for-rabbitmq:ROOT:index.adoc[{starlight-rabbitmq}] +* xref:starlight-for-jms:ROOT:index.adoc[Starlight for JMS] .APIs and References * Connectors diff --git a/modules/ROOT/pages/index.adoc b/modules/ROOT/pages/index.adoc index 4d4de4d..7e7adc6 100644 --- a/modules/ROOT/pages/index.adoc +++ b/modules/ROOT/pages/index.adoc @@ -19,7 +19,7 @@ We've included best practices for Apache Pulsar, a full connector reference, and examples for getting the most out of CDC. - xref:astra-streaming:getting-started:real-time-data-pipelines-tutorial.adoc[Build real-time data pipelines with {product}] + xref:astra-streaming:getting-started:real-time-data-pipelines-tutorial.adoc[Build real-time data pipelines with Astra Streaming] @@ -106,8 +106,8 @@
  • xref:starlight-for-jms::index.adoc[Starlight for JMS]
  • -
  • xref:starlight-for-kafka::index.adoc[]
  • -
  • xref:starlight-for-rabbitmq::index.adoc[]
  • +
  • xref:starlight-for-kafka::index.adoc[Starlight for Kafka]
  • +
  • xref:starlight-for-rabbitmq::index.adoc[Starlight for RabbitMQ]
diff --git a/modules/functions/pages/astream-functions.adoc b/modules/functions/pages/astream-functions.adoc index 0caf19b..817c164 100644 --- a/modules/functions/pages/astream-functions.adoc +++ b/modules/functions/pages/astream-functions.adoc @@ -4,7 +4,7 @@ Functions are lightweight compute processes that enable you to process each message received on a topic. You can apply custom logic to that message, transforming or enriching it, and then output it to a different topic. -Functions run inside {product} and are therefore serverless. +Functions run inside Astra Streaming and are therefore serverless. You write the code for your function in Java, Python, or Go, then upload the code. It is automatically run for each message published to the specified input topic. @@ -12,13 +12,13 @@ Functions are implemented using https://pulsar.apache.org/docs/en/functions-over [IMPORTANT] ==== -Custom functions require a xref:astra-streaming:operations:astream-pricing.adoc[paid {product} plan]. +Custom functions require a xref:astra-streaming:operations:astream-pricing.adoc[paid Astra Streaming plan]. ==== == Deploy Python functions in a zip file -{product} supports Python-based {pulsar-short} functions. -These functions can be packaged in a zip file and deployed to {product} or {pulsar-short}. +Astra Streaming supports Python-based {pulsar-short} functions. +These functions can be packaged in a zip file and deployed to Astra Streaming or {pulsar-short}. The same zip file can be deployed to either environment. To demonstrate this, the following steps create function configuration YAML file, package all necessary function files as a zip archive, and then use the `pulsar-admin` CLI to deploy the zip. @@ -152,8 +152,8 @@ Replace the following: * `**INPUT_TOPIC_NAME**`: The input topic for the function * `**OUTPUT_TOPIC_NAME**`: The output topic for the function -. Use `pulsar-admin` to deploy the Python zip to {product} or {pulsar-short}. -The command below assumes you've properly configured the `client.conf` file for `pulsar-admin` commands against your {pulsar-short} cluster. If you are using {product}, see xref:astra-streaming:developing:configure-pulsar-env.adoc[] for more information. +. Use `pulsar-admin` to deploy the Python zip to Astra Streaming or {pulsar-short}. +The command below assumes you've properly configured the `client.conf` file for `pulsar-admin` commands against your {pulsar-short} cluster. If you are using Astra Streaming, see xref:astra-streaming:developing:configure-pulsar-env.adoc[] for more information. + [source,console] ---- @@ -163,7 +163,7 @@ bin/pulsar-admin functions create --function-config-file /absolute/path/to/func- . Verify that the function was deployed: + * Go to the {astra-ui} to see your newly deployed function listed under the **Functions** tab for your tenant. -See <> for more information on testing and monitoring your function in {product}. +See <> for more information on testing and monitoring your function in Astra Streaming. * Use the `pulsar-admin` CLI to list functions for a specific tenant and namespace: + [source,bash,subs="+quotes"] @@ -173,8 +173,8 @@ bin/pulsar-admin functions list --tenant **TENANT_NAME** --namespace **NAMESPACE == Deploy Java functions in a JAR file -{product} supports Java-based {pulsar-short} functions which are packaged in a JAR file. -The JAR can be deployed to {product} or {pulsar-short}. +Astra Streaming supports Java-based {pulsar-short} functions which are packaged in a JAR file. +The JAR can be deployed to Astra Streaming or {pulsar-short}. The same JAR file can be deployed to either environment. In this example, you'll create a function JAR file using Maven, then use the `pulsar-admin` CLI to deploy the JAR. @@ -289,15 +289,15 @@ userConfig: + [IMPORTANT] ==== -{product} requires the `inputs` topic to have a message schema defined before deploying the function. +Astra Streaming requires the `inputs` topic to have a message schema defined before deploying the function. Otherwise, deployment errors may occur. Use the {astra-ui} to define the message schema for a topic. ==== -. Use the `pulsar-admin` CLI to deploy your function JAR to {product} or {pulsar-short}. +. Use the `pulsar-admin` CLI to deploy your function JAR to Astra Streaming or {pulsar-short}. + The following command assumes you've properly configured the `client.conf` file for `pulsar-admin` commands against your {pulsar-short} cluster. -If you are using {product}, see xref:astra-streaming:developing:configure-pulsar-env.adoc[] for more information. +If you are using Astra Streaming, see xref:astra-streaming:developing:configure-pulsar-env.adoc[] for more information. + [source,bash] ---- @@ -307,7 +307,7 @@ bin/pulsar-admin functions create --function-config-file /absolute/path/to/func . Verify that the function was deployed: + * Go to the {astra-ui} to see your newly deployed function listed under the **Functions** tab for your tenant. -See <> for more information on testing and monitoring your function in {product}. +See <> for more information on testing and monitoring your function in Astra Streaming. * Use the `pulsar-admin` CLI to list functions for a specific tenant and namespace: + [source,bash,subs="+quotes"] @@ -315,9 +315,9 @@ See <> for more information bin/pulsar-admin functions list --tenant **TENANT_NAME** --namespace **NAMESPACE_NAME** ---- -== Add functions in {product} dashboard +== Add functions in Astra Streaming dashboard -Add functions in the **Functions** tab of the {product} dashboard. +Add functions in the **Functions** tab of the Astra Streaming dashboard. . Select *Create Function* to get started. @@ -326,7 +326,7 @@ Add functions in the **Functions** tab of the {product} dashboard. image::astream-name-function.png[Function and Namespace] . Select the file you want to pull the function from and which function you want to use within that file. -{product} generates a list of acceptable classes. +Astra Streaming generates a list of acceptable classes. + image::astream-exclamation-function.png[Exclamation Function] + @@ -416,7 +416,7 @@ If you want to use different topics, change `in`, `out`, and `log` accordingly. . Verify that the response is `Created Successfully!`. This indicates that the function was deployed and ready to run when triggered by incoming messages. + -If the response is `402 Payment Required` with `Reason: only qualified organizations can create functions`, then you must upgrade to a xref:astra-streaming:operations:astream-pricing.adoc[paid {product} plan]. +If the response is `402 Payment Required` with `Reason: only qualified organizations can create functions`, then you must upgrade to a xref:astra-streaming:operations:astream-pricing.adoc[paid Astra Streaming plan]. + You can also verify that a function was created by checking the **Functions** tab or by running `./pulsar-admin functions list --tenant **TENANT_NAME**`. @@ -512,4 +512,4 @@ A *Function-name Deleted Successfully!* message confirms the function was perman == Next steps -Learn more about developing functions for {product} and {pulsar-short} https://pulsar.apache.org/docs/en/functions-develop/[here]. \ No newline at end of file +Learn more about developing functions for Astra Streaming and {pulsar-short} https://pulsar.apache.org/docs/en/functions-develop/[here]. \ No newline at end of file diff --git a/modules/functions/pages/deploy-in-sink.adoc b/modules/functions/pages/deploy-in-sink.adoc index a1c5516..5e6f718 100644 --- a/modules/functions/pages/deploy-in-sink.adoc +++ b/modules/functions/pages/deploy-in-sink.adoc @@ -5,11 +5,11 @@ Before this update, functions transformed data either after it was written to a This required either an intermediate topic, with additional storage, IO, and latency, or a custom connector. + Now, functions can be deployed at sink creation and apply preprocessing to sink topic writes. + -== Create sink function in {product} +== Create sink function in Astra Streaming Creating a sink function is similar to creating a sink in the {astra-ui}, but with a few additional steps. -. xref:pulsar-io:connectors/index.adoc[Create a sink] as described in the {product} documentation. +. xref:pulsar-io:connectors/index.adoc[Create a sink] as described in the Astra Streaming documentation. . During sink creation, select the transform function you want to run inside the sink. + diff --git a/modules/functions/pages/index.adoc b/modules/functions/pages/index.adoc index d9d14c2..44485b9 100644 --- a/modules/functions/pages/index.adoc +++ b/modules/functions/pages/index.adoc @@ -158,7 +158,7 @@ transform-function-2 ====== [#deploy-as] -== Deploy with {product} +== Deploy with Astra Streaming Deploy transform functions in the *Functions* tab of the {astra-ui}. diff --git a/modules/pulsar-io/pages/connectors/index.adoc b/modules/pulsar-io/pages/connectors/index.adoc index 119d70f..e553573 100644 --- a/modules/pulsar-io/pages/connectors/index.adoc +++ b/modules/pulsar-io/pages/connectors/index.adoc @@ -1,16 +1,16 @@ = Connectors :navtitle: Connector Overview -{product} offers fully-managed {pulsar-reg} connectors. +Astra Streaming offers fully-managed {pulsar-reg} connectors. Create, monitor, and manage both source and sink connectors through our simple UI, the `pulsar-admin` CLI, or RESTful API. Connect popular data sources to {pulsar} topics or sink data from {pulsar-short} topics to popular systems. -Below is a list of {pulsar} source and sink connectors supported by {product}. +Below is a list of {pulsar} source and sink connectors supported by Astra Streaming. [IMPORTANT] ==== -{product} doesn't support custom sink or source connectors. +Astra Streaming doesn't support custom sink or source connectors. ==== [#sink-connectors] @@ -154,7 +154,7 @@ xref:connectors/sources/kinesis.adoc[Kinesis source connector documentation] == Experimental Connectors -{company} is always experimenting with connectors. Below are the connectors currently in development that have not yet been promoted to official support in *{product}*. +{company} is always experimenting with connectors. Below are the connectors currently in development that have not yet been promoted to official support in *Astra Streaming*. To get access to these connectors, contact {support-url}[{company} Support]. @@ -238,7 +238,7 @@ Zeebe + == Listing Sink Connectors -To list available sink connectors in your {product} tenant, use any of the following. +To list available sink connectors in your Astra Streaming tenant, use any of the following. [tabs] ==== @@ -288,7 +288,7 @@ curl "$WEB_SERVICE_URL/admin/v3/sinks/builtinsinks" -H "Authorization: $ASTRA_ST == Listing Source Connectors -To list available source connectors in your {product} tenant, use any of the following. +To list available source connectors in your Astra Streaming tenant, use any of the following. [tabs] ==== diff --git a/modules/pulsar-io/pages/connectors/sinks/astra-db.adoc b/modules/pulsar-io/pages/connectors/sinks/astra-db.adoc index 0c2d9a5..6e4adfd 100644 --- a/modules/pulsar-io/pages/connectors/sinks/astra-db.adoc +++ b/modules/pulsar-io/pages/connectors/sinks/astra-db.adoc @@ -4,7 +4,7 @@ {company} {astra-db} Sink Connector is based on the open-source xref:pulsar-connector:ROOT:index.adoc[{cass-reg} sink connector for {pulsar-reg}]. Depending on how you deploy the connector, it can be used to sink topic messages with a table in {astra-db} or a table in a {cass-short} cluster outside of DB. -The {product} portal provides simple way to connect this sink and a table in {astra-db} with simply a token. Using `pulsar-admin` or the REST API, you can configure the sink to connect with a {cass-short} connection manually. +The Astra Streaming portal provides simple way to connect this sink and a table in {astra-db} with simply a token. Using `pulsar-admin` or the REST API, you can configure the sink to connect with a {cass-short} connection manually. This reference assumes you are manually connecting to a {cass-short} table. diff --git a/modules/pulsar-io/pages/connectors/sinks/cloud-storage.adoc b/modules/pulsar-io/pages/connectors/sinks/cloud-storage.adoc index 68979f9..4ea937d 100644 --- a/modules/pulsar-io/pages/connectors/sinks/cloud-storage.adoc +++ b/modules/pulsar-io/pages/connectors/sinks/cloud-storage.adoc @@ -141,9 +141,9 @@ include::partial$connectors/sinks/monitoring.adoc[] == Connector Reference -With the Cloud Storage Sink there are two sets of parameters: {product} parameters and cloud storage provider parameters. +With the Cloud Storage Sink there are two sets of parameters: Astra Streaming parameters and cloud storage provider parameters. -=== {product} parameters for Cloud Storage Sink +=== Astra Streaming parameters for Cloud Storage Sink [%header,format=csv,cols="2,1,1,3"] |=== diff --git a/modules/pulsar-io/pages/connectors/sinks/elastic-search.adoc b/modules/pulsar-io/pages/connectors/sinks/elastic-search.adoc index 416c922..9a776ab 100644 --- a/modules/pulsar-io/pages/connectors/sinks/elastic-search.adoc +++ b/modules/pulsar-io/pages/connectors/sinks/elastic-search.adoc @@ -8,7 +8,7 @@ Use Elasticsearch to store, search, and manage data for logs, metrics, search ba [NOTE] ==== -{product} currently supports {pulsar-reg} {pulsar-version}, which uses the https://opensearch.org/docs/1.2/clients/java-rest-high-level/[OpenSearch 1.2.4 library] to interact with +Astra Streaming currently supports {pulsar-reg} {pulsar-version}, which uses the https://opensearch.org/docs/1.2/clients/java-rest-high-level/[OpenSearch 1.2.4 library] to interact with Elasticsearch. ==== @@ -33,7 +33,7 @@ include::partial$connectors/sinks/monitoring.adoc[] There are two sets of parameters that support sink connectors. -=== {product} +=== Astra Streaming [%header,format=csv,cols="2,1,1,3"] |=== @@ -44,7 +44,7 @@ include::example$connectors/sinks/astra.csv[] These values are provided in the "configs" area. -The {product} Elasticsearch sink connector supports all configuration properties provided by {pulsar}. Please refer to the https://pulsar.apache.org/docs/io-elasticsearch-sink/#property[connector properties] for a complete list. +The Astra Streaming Elasticsearch sink connector supports all configuration properties provided by {pulsar}. Please refer to the https://pulsar.apache.org/docs/io-elasticsearch-sink/#property[connector properties] for a complete list. == What's next? diff --git a/modules/pulsar-io/pages/connectors/sinks/google-bigquery.adoc b/modules/pulsar-io/pages/connectors/sinks/google-bigquery.adoc index 6a0682f..9c7952c 100644 --- a/modules/pulsar-io/pages/connectors/sinks/google-bigquery.adoc +++ b/modules/pulsar-io/pages/connectors/sinks/google-bigquery.adoc @@ -22,9 +22,9 @@ include::partial$connectors/sinks/monitoring.adoc[] == Connector Reference -The BigQuery sink has multiple sets of parameters: the {product} parameters, the Kafka Connect Adapter parameters, and the Google BigQuery parameters. Each set of parameters provides a way to coordinate how data will be streamed from {pulsar-short} to BigQuery. +The BigQuery sink has multiple sets of parameters: the Astra Streaming parameters, the Kafka Connect Adapter parameters, and the Google BigQuery parameters. Each set of parameters provides a way to coordinate how data will be streamed from {pulsar-short} to BigQuery. -=== {product} +=== Astra Streaming [%header,format=csv,cols="2,1,1,3"] |=== diff --git a/modules/pulsar-io/pages/connectors/sinks/jdbc-clickhouse.adoc b/modules/pulsar-io/pages/connectors/sinks/jdbc-clickhouse.adoc index c1cc7c6..061c2c7 100644 --- a/modules/pulsar-io/pages/connectors/sinks/jdbc-clickhouse.adoc +++ b/modules/pulsar-io/pages/connectors/sinks/jdbc-clickhouse.adoc @@ -7,7 +7,7 @@ real-time. [NOTE] ==== -{product} currently supports {pulsar-reg} {pulsar-version}, which uses the https://github.com/ClickHouse/clickhouse-jdbc[Clickhouse 0.3.2 library] to interact with Clickhouse. +Astra Streaming currently supports {pulsar-reg} {pulsar-version}, which uses the https://github.com/ClickHouse/clickhouse-jdbc[Clickhouse 0.3.2 library] to interact with Clickhouse. ==== == Get Started @@ -26,7 +26,7 @@ include::partial$connectors/sinks/monitoring.adoc[] There are two sets of parameters that support sink connectors. -=== {product} +=== Astra Streaming [%header,format=csv,cols="2,1,1,3"] |=== @@ -37,7 +37,7 @@ include::example$connectors/sinks/astra.csv[] These values are provided in the "configs" area. -The {product} JDBC Clickhouse sink connector supports all configuration properties provided by {pulsar}. Please refer to the https://pulsar.apache.org/docs/io-jdbc-sink#property[connector +The Astra Streaming JDBC Clickhouse sink connector supports all configuration properties provided by {pulsar}. Please refer to the https://pulsar.apache.org/docs/io-jdbc-sink#property[connector properties] for a complete list. diff --git a/modules/pulsar-io/pages/connectors/sinks/jdbc-mariadb.adoc b/modules/pulsar-io/pages/connectors/sinks/jdbc-mariadb.adoc index dfcca38..e961ff2 100644 --- a/modules/pulsar-io/pages/connectors/sinks/jdbc-mariadb.adoc +++ b/modules/pulsar-io/pages/connectors/sinks/jdbc-mariadb.adoc @@ -8,7 +8,7 @@ Read more about {pulsar-reg}'s JDBC sink connector https://pulsar.apache.org/doc [NOTE] ==== -{product} currently supports {pulsar} {pulsar-version}, which uses the https://mariadb.com/kb/en/about-mariadb-connector-j/[MariaDB Connector/J 2.7.5 library] to interact with MariaDB. +Astra Streaming currently supports {pulsar} {pulsar-version}, which uses the https://mariadb.com/kb/en/about-mariadb-connector-j/[MariaDB Connector/J 2.7.5 library] to interact with MariaDB. ==== == Get Started @@ -27,7 +27,7 @@ include::partial$connectors/sinks/monitoring.adoc[] There are two sets of parameters that support sink connectors. -=== {product} +=== Astra Streaming [%header,format=csv,cols="2,1,1,3"] |=== @@ -38,4 +38,4 @@ include::example$connectors/sinks/astra.csv[] These values are provided in the "configs" area. -The {product} JDBC MariaDB sink connector supports all configuration properties provided by {pulsar-short}. Please refer to the https://pulsar.apache.org/docs/io-jdbc-sink#property[connector properties] for a complete list. \ No newline at end of file +The Astra Streaming JDBC MariaDB sink connector supports all configuration properties provided by {pulsar-short}. Please refer to the https://pulsar.apache.org/docs/io-jdbc-sink#property[connector properties] for a complete list. \ No newline at end of file diff --git a/modules/pulsar-io/pages/connectors/sinks/jdbc-postgres.adoc b/modules/pulsar-io/pages/connectors/sinks/jdbc-postgres.adoc index f7f1c8e..66acace 100644 --- a/modules/pulsar-io/pages/connectors/sinks/jdbc-postgres.adoc +++ b/modules/pulsar-io/pages/connectors/sinks/jdbc-postgres.adoc @@ -10,7 +10,7 @@ The PostgreSQL JDBC Driver is an open source JDBC driver written in Pure Java (T [NOTE] ==== -{product} currently supports {pulsar-reg} {pulsar-version}, which uses the https://jdbc.postgresql.org/documentation/setup/[PostgreSQL JDBC 42.4.1 library] to interact with PostgreSQL. +Astra Streaming currently supports {pulsar-reg} {pulsar-version}, which uses the https://jdbc.postgresql.org/documentation/setup/[PostgreSQL JDBC 42.4.1 library] to interact with PostgreSQL. ==== == Get Started @@ -29,7 +29,7 @@ include::partial$connectors/sinks/monitoring.adoc[] There are two sets of parameters that support sink connectors. -=== {product} +=== Astra Streaming [%header,format=csv,cols="2,1,1,3"] |=== @@ -40,7 +40,7 @@ include::example$connectors/sinks/astra.csv[] These values are provided in the "configs" area. -The {product} JDBC PostgreSQL sink connector supports all configuration properties provided by {pulsar-short}. Refer to the https://pulsar.apache.org/docs/io-jdbc-sink#property[connector +The Astra Streaming JDBC PostgreSQL sink connector supports all configuration properties provided by {pulsar-short}. Refer to the https://pulsar.apache.org/docs/io-jdbc-sink#property[connector properties] for a complete list. == What's next? diff --git a/modules/pulsar-io/pages/connectors/sinks/jdbc-sqllite.adoc b/modules/pulsar-io/pages/connectors/sinks/jdbc-sqllite.adoc index 7a7ee1c..cec3bd7 100644 --- a/modules/pulsar-io/pages/connectors/sinks/jdbc-sqllite.adoc +++ b/modules/pulsar-io/pages/connectors/sinks/jdbc-sqllite.adoc @@ -12,7 +12,7 @@ SQLite JDBC is a library for accessing and creating SQLite database files in Jav [NOTE] ==== -{product} currently supports {pulsar-reg} {pulsar-version}, which uses the https://github.com/xerial/sqlite-jdbc[Xerial 3.8.11.2 library] to interact with SQLite. +Astra Streaming currently supports {pulsar-reg} {pulsar-version}, which uses the https://github.com/xerial/sqlite-jdbc[Xerial 3.8.11.2 library] to interact with SQLite. ==== == Get Started @@ -31,7 +31,7 @@ include::partial$connectors/sinks/monitoring.adoc[] There are two sets of parameters that support sink connectors. -=== {product} +=== Astra Streaming [%header,format=csv,cols="2,1,1,3"] |=== @@ -42,7 +42,7 @@ include::example$connectors/sinks/astra.csv[] These values are provided in the "configs" area. -The {product} JDBC MariaDB sink connector supports all configuration properties provided by {pulsar-short}. Please refer to the https://pulsar.apache.org/docs/io-jdbc-sink#property[connector +The Astra Streaming JDBC MariaDB sink connector supports all configuration properties provided by {pulsar-short}. Please refer to the https://pulsar.apache.org/docs/io-jdbc-sink#property[connector properties] for a complete list. diff --git a/modules/pulsar-io/pages/connectors/sinks/kafka.adoc b/modules/pulsar-io/pages/connectors/sinks/kafka.adoc index 59aeaa2..e0d6548 100644 --- a/modules/pulsar-io/pages/connectors/sinks/kafka.adoc +++ b/modules/pulsar-io/pages/connectors/sinks/kafka.adoc @@ -6,7 +6,7 @@ Apache Kafka(R) is an open-source distributed event streaming platform used by t [NOTE] ==== -{product} currently supports {pulsar-reg} {pulsar-version}, which uses the https://github.com/apache/kafka/tree/2.7[Kafka 2.7.2 library] to interact with +Astra Streaming currently supports {pulsar-reg} {pulsar-version}, which uses the https://github.com/apache/kafka/tree/2.7[Kafka 2.7.2 library] to interact with Kafka. ==== @@ -26,7 +26,7 @@ include::partial$connectors/sinks/monitoring.adoc[] There are two sets of parameters that support sink connectors. -=== {product} +=== Astra Streaming [%header,format=csv,cols="2,1,1,3"] |=== @@ -37,7 +37,7 @@ include::example$connectors/sinks/astra.csv[] These values are provided in the "configs" area. -The {product} Kafka sink connector supports all configuration properties provided by {pulsar-short}. Please refer to the https://pulsar.apache.org/docs/io-kafka-sink#property[connector properties] for a complete list. +The Astra Streaming Kafka sink connector supports all configuration properties provided by {pulsar-short}. Please refer to the https://pulsar.apache.org/docs/io-kafka-sink#property[connector properties] for a complete list. == What's next? diff --git a/modules/pulsar-io/pages/connectors/sinks/kinesis.adoc b/modules/pulsar-io/pages/connectors/sinks/kinesis.adoc index c9aaa95..a5cf9e5 100644 --- a/modules/pulsar-io/pages/connectors/sinks/kinesis.adoc +++ b/modules/pulsar-io/pages/connectors/sinks/kinesis.adoc @@ -10,7 +10,7 @@ The Amazon Kinesis Client Library for Java (Amazon KCL) enables Java developers [NOTE] ==== -{product} currently supports {pulsar-reg} {pulsar-version}, which uses the https://github.com/awslabs/amazon-kinesis-client[Amazon Kinesis 2.2.8 library] and the https://github.com/aws/aws-sdk-java[AWS Java SDK 0.14.0 library] to interact with Kinesis. +Astra Streaming currently supports {pulsar-reg} {pulsar-version}, which uses the https://github.com/awslabs/amazon-kinesis-client[Amazon Kinesis 2.2.8 library] and the https://github.com/aws/aws-sdk-java[AWS Java SDK 0.14.0 library] to interact with Kinesis. ==== == Get Started @@ -29,7 +29,7 @@ include::partial$connectors/sinks/monitoring.adoc[] There are two sets of parameters that support sink connectors. -=== {product} +=== Astra Streaming [%header,format=csv,cols="2,1,1,3"] |=== @@ -40,7 +40,7 @@ include::example$connectors/sinks/astra.csv[] These values are provided in the "configs" area. -The {product} Kinesis sink connector supports all configuration properties provided by {pulsar-short}. Please refer to the https://pulsar.apache.org/docs/io-kinesis-sink#property[connector properties] for a complete list. +The Astra Streaming Kinesis sink connector supports all configuration properties provided by {pulsar-short}. Please refer to the https://pulsar.apache.org/docs/io-kinesis-sink#property[connector properties] for a complete list. == What's next? diff --git a/modules/pulsar-io/pages/connectors/sinks/snowflake.adoc b/modules/pulsar-io/pages/connectors/sinks/snowflake.adoc index 73ec8e0..75cb8a7 100644 --- a/modules/pulsar-io/pages/connectors/sinks/snowflake.adoc +++ b/modules/pulsar-io/pages/connectors/sinks/snowflake.adoc @@ -22,7 +22,7 @@ include::partial$connectors/sinks/monitoring.adoc[] There are two sets of parameters that support sink connectors. -=== {product} +=== Astra Streaming [%header,format=csv,cols="2,1,1,3"] |=== @@ -33,7 +33,7 @@ include::example$connectors/sinks/astra.csv[] These values are provided in the "configs" area. -The {product} Snowflake sink connector supports all configuration properties provided by {company}. Please refer to the https://github.com/datastax/snowflake-connector#configuration[connector +The Astra Streaming Snowflake sink connector supports all configuration properties provided by {company}. Please refer to the https://github.com/datastax/snowflake-connector#configuration[connector properties] for a complete list. == What's next? diff --git a/modules/pulsar-io/pages/connectors/sources/data-generator.adoc b/modules/pulsar-io/pages/connectors/sources/data-generator.adoc index df248e0..469dc0c 100644 --- a/modules/pulsar-io/pages/connectors/sources/data-generator.adoc +++ b/modules/pulsar-io/pages/connectors/sources/data-generator.adoc @@ -8,7 +8,7 @@ The Data Generator source connector creates fake data on an {pulsar-reg} topic u The connector will produce data indefinitely while it is running. -{product} currently supports {pulsar} {pulsar-version}, which uses version 0.5.9 of the jfairy library. +Astra Streaming currently supports {pulsar} {pulsar-version}, which uses version 0.5.9 of the jfairy library. For a reference of the full "Person" class, https://github.com/apache/pulsar/blob/branch-{pulsar-version}/pulsar-io/data-generator/src/main/java/org/apache/pulsar/io/datagenerator/Person.java[view the source]. @@ -28,7 +28,7 @@ include::partial$connectors/sources/monitoring.adoc[] There are two sets of parameters that support source connectors. -=== {product} +=== Astra Streaming [%header,format=csv,cols="2,1,1,3"] |=== diff --git a/modules/pulsar-io/pages/connectors/sources/debezium-mongodb.adoc b/modules/pulsar-io/pages/connectors/sources/debezium-mongodb.adoc index d6966e6..b05e385 100644 --- a/modules/pulsar-io/pages/connectors/sources/debezium-mongodb.adoc +++ b/modules/pulsar-io/pages/connectors/sources/debezium-mongodb.adoc @@ -6,7 +6,7 @@ Debezium’s MongoDB connector tracks a MongoDB replica set or a MongoDB sharded The connector automatically handles the addition or removal of shards in a sharded cluster, changes in membership of each replica set, elections within each replica set, and the resolution of communications problems. -{product} currently supports {pulsar} {pulsar-version}, which uses Debezium {debezium-version} libraries. For a list of supported MongoDB versions, please refer to the https://debezium.io/releases/{debezium-version}/[Debezium documentation]. +Astra Streaming currently supports {pulsar} {pulsar-version}, which uses Debezium {debezium-version} libraries. For a list of supported MongoDB versions, please refer to the https://debezium.io/releases/{debezium-version}/[Debezium documentation]. == Get Started @@ -24,7 +24,7 @@ include::partial$connectors/sources/monitoring.adoc[] There are two sets of parameters that support source connectors. -=== {product} +=== Astra Streaming [%header,format=csv,cols="2,1,1,3"] |=== diff --git a/modules/pulsar-io/pages/connectors/sources/debezium-mysql.adoc b/modules/pulsar-io/pages/connectors/sources/debezium-mysql.adoc index d51dd47..3cf3172 100644 --- a/modules/pulsar-io/pages/connectors/sources/debezium-mysql.adoc +++ b/modules/pulsar-io/pages/connectors/sources/debezium-mysql.adoc @@ -4,7 +4,7 @@ The Debezium MySQL connector reads the binlog, produces change events for row-level INSERT, UPDATE, and DELETE operations, and emits these change events as messages in an {pulsar-reg} topic. -{product} currently supports {pulsar} {pulsar-version}, which uses Debezium {debezium-version} libraries. +Astra Streaming currently supports {pulsar} {pulsar-version}, which uses Debezium {debezium-version} libraries. == Get Started @@ -22,7 +22,7 @@ include::partial$connectors/sources/monitoring.adoc[] There are two sets of parameters that support source connectors. -=== {product} +=== Astra Streaming [%header,format=csv,cols="2,1,1,3"] |=== @@ -33,7 +33,7 @@ include::example$connectors/sources/astra.csv[] These values are provided in the "configs" area. -The {product} MySQL source connector supports all configuration properties provided in Debezium's connector. Please refer to https://debezium.io/documentation/reference/{debezium-version}/connectors/mysql.html#mysql-connector-properties[Debezium MySQL connector properties] for a complete list. +The Astra Streaming MySQL source connector supports all configuration properties provided in Debezium's connector. Please refer to https://debezium.io/documentation/reference/{debezium-version}/connectors/mysql.html#mysql-connector-properties[Debezium MySQL connector properties] for a complete list. == What's next? diff --git a/modules/pulsar-io/pages/connectors/sources/debezium-oracle.adoc b/modules/pulsar-io/pages/connectors/sources/debezium-oracle.adoc index 316faad..2634b47 100644 --- a/modules/pulsar-io/pages/connectors/sources/debezium-oracle.adoc +++ b/modules/pulsar-io/pages/connectors/sources/debezium-oracle.adoc @@ -6,7 +6,7 @@ Debezium’s Oracle connector captures and records row-level changes that occur The connector can be configured to emit change events for specific subsets of schemas and tables, or to ignore, mask, or truncate values in specific columns. -{product} currently supports {pulsar-reg} {pulsar-version}, which uses Debezium {debezium-version} libraries. +Astra Streaming currently supports {pulsar-reg} {pulsar-version}, which uses Debezium {debezium-version} libraries. == Get Started @@ -24,7 +24,7 @@ include::partial$connectors/sources/monitoring.adoc[] There are two sets of parameters that support source connectors. -=== {product} +=== Astra Streaming [%header,format=csv,cols="2,1,1,3"] |=== @@ -33,7 +33,7 @@ include::example$connectors/sources/astra.csv[] === Debezium Oracle -The {product} Oracle source connector supports all configuration properties provided in Debezium's connector. Please refer to https://debezium.io/documentation/reference/{debezium-version}/connectors/mysql.html#oracle-connector-properties[Debezium Oracle connector properties] for a complete list. +The Astra Streaming Oracle source connector supports all configuration properties provided in Debezium's connector. Please refer to https://debezium.io/documentation/reference/{debezium-version}/connectors/mysql.html#oracle-connector-properties[Debezium Oracle connector properties] for a complete list. == What's next? diff --git a/modules/pulsar-io/pages/connectors/sources/debezium-postgres.adoc b/modules/pulsar-io/pages/connectors/sources/debezium-postgres.adoc index 26a92cc..f05326d 100644 --- a/modules/pulsar-io/pages/connectors/sources/debezium-postgres.adoc +++ b/modules/pulsar-io/pages/connectors/sources/debezium-postgres.adoc @@ -4,7 +4,7 @@ The PostgreSQL connector produces a change event for every row-level insert, update, and delete operation that it captures, and sends change event records for each table in a separate {pulsar-reg} topic. -{product} currently supports {pulsar} {pulsar-version}, which uses Debezium {debezium-version} libraries. For a list of supported PostgreSQL versions, please refer to the +Astra Streaming currently supports {pulsar} {pulsar-version}, which uses Debezium {debezium-version} libraries. For a list of supported PostgreSQL versions, please refer to the https://debezium.io/releases/{debezium-version}/[Debezium documentation]. == Get Started @@ -23,7 +23,7 @@ include::partial$connectors/sources/monitoring.adoc[] There are two sets of parameters that support source connectors. -=== {product} +=== Astra Streaming [%header,format=csv,cols="2,1,1,3"] |=== @@ -34,7 +34,7 @@ include::example$connectors/sources/astra.csv[] These values are provided in the "configs" area. -The {product} PostgreSQL source connector supports all configuration properties provided in Debezium's connector. Please refer to https://debezium.io/documentation/reference/{debezium-version}/connectors/postgresql.html#postgresql-connector-properties[Debezium PostgreSQL connector properties] for a complete list. +The Astra Streaming PostgreSQL source connector supports all configuration properties provided in Debezium's connector. Please refer to https://debezium.io/documentation/reference/{debezium-version}/connectors/postgresql.html#postgresql-connector-properties[Debezium PostgreSQL connector properties] for a complete list. == What's next? diff --git a/modules/pulsar-io/pages/connectors/sources/debezium-sqlserver.adoc b/modules/pulsar-io/pages/connectors/sources/debezium-sqlserver.adoc index 28fcf79..1ad9c47 100644 --- a/modules/pulsar-io/pages/connectors/sources/debezium-sqlserver.adoc +++ b/modules/pulsar-io/pages/connectors/sources/debezium-sqlserver.adoc @@ -6,7 +6,7 @@ The Debezium SQL Server connector is based on the change data capture feature av The SQL Server capture process monitors designated databases and tables and stores changes into specifically created change tables with stored procedure facades. -{product} currently supports {pulsar-reg} {pulsar-version}, which uses Debezium {debezium-version} libraries. For a list of supported SQL Server versions, please refer to the +Astra Streaming currently supports {pulsar-reg} {pulsar-version}, which uses Debezium {debezium-version} libraries. For a list of supported SQL Server versions, please refer to the https://debezium.io/releases/{debezium-version}/[Debezium documentation]. == Get Started @@ -51,7 +51,7 @@ include::partial$connectors/sources/monitoring.adoc[] There are two sets of parameters that support source connectors. -=== {product} +=== Astra Streaming [%header,format=csv,cols="2,1,1,3"] |=== @@ -62,7 +62,7 @@ include::example$connectors/sources/astra.csv[] These values are provided in the "configs" area. -The {product} SQL Server source connector supports all configuration properties provided in Debezium's connector. Please refer to https://debezium.io/documentation/reference/{debezium-version}/connectors/sqlserver.html#sqlserver-connector-properties[Debezium SQL Server connector properties] for a complete list. +The Astra Streaming SQL Server source connector supports all configuration properties provided in Debezium's connector. Please refer to https://debezium.io/documentation/reference/{debezium-version}/connectors/sqlserver.html#sqlserver-connector-properties[Debezium SQL Server connector properties] for a complete list. == What's next? diff --git a/modules/pulsar-io/pages/connectors/sources/kafka.adoc b/modules/pulsar-io/pages/connectors/sources/kafka.adoc index 287f4a9..d27fc37 100644 --- a/modules/pulsar-io/pages/connectors/sources/kafka.adoc +++ b/modules/pulsar-io/pages/connectors/sources/kafka.adoc @@ -22,7 +22,7 @@ include::partial$connectors/sources/monitoring.adoc[] There are two sets of parameters that support source connectors. -=== {product} +=== Astra Streaming [%header,format=csv,cols="2,1,1,3"] |=== @@ -60,5 +60,5 @@ The deserializer is set by a specific implementation of https://github.com/apach |=== -The {product} Kafka source connector supports all configuration properties provided by {pulsar}. +The Astra Streaming Kafka source connector supports all configuration properties provided by {pulsar}. For a complete list, see the https://pulsar.apache.org/docs/io-kafka-source#property[Kafka source connector properties]. diff --git a/modules/pulsar-io/pages/connectors/sources/kinesis.adoc b/modules/pulsar-io/pages/connectors/sources/kinesis.adoc index bb601ea..9c5bde9 100644 --- a/modules/pulsar-io/pages/connectors/sources/kinesis.adoc +++ b/modules/pulsar-io/pages/connectors/sources/kinesis.adoc @@ -22,7 +22,7 @@ include::partial$connectors/sources/monitoring.adoc[] There are two sets of parameters that support source connectors. -=== {product} +=== Astra Streaming [%header,format=csv,cols="2,1,1,3"] |=== @@ -71,5 +71,5 @@ If `awsCredentialPluginName` set to empty, the Kinesis sink creates a default AW |=== -The {product} Kinesis source connector supports all configuration properties provided by {pulsar}. +The Astra Streaming Kinesis source connector supports all configuration properties provided by {pulsar}. For a complete list, see the https://pulsar.apache.org/docs/io-kinesis-source#configuration[Kinesis source connector properties]. diff --git a/modules/pulsar-io/partials/connectors/sinks/monitoring.adoc b/modules/pulsar-io/partials/connectors/sinks/monitoring.adoc index 0dab3e4..14ec323 100644 --- a/modules/pulsar-io/partials/connectors/sinks/monitoring.adoc +++ b/modules/pulsar-io/partials/connectors/sinks/monitoring.adoc @@ -261,4 +261,4 @@ Status response for individual connector instance: === Metrics -{product} exposes Prometheus formatted metrics for every connector. Refer to the xref:astra-streaming:operations:astream-scrape-metrics.adoc[scrape metrics with Prometheus] page for more detail. \ No newline at end of file +Astra Streaming exposes Prometheus formatted metrics for every connector. Refer to the xref:astra-streaming:operations:astream-scrape-metrics.adoc[scrape metrics with Prometheus] page for more detail. \ No newline at end of file diff --git a/modules/pulsar-io/partials/connectors/sources/monitoring.adoc b/modules/pulsar-io/partials/connectors/sources/monitoring.adoc index c9e4d67..c3bcf2f 100644 --- a/modules/pulsar-io/partials/connectors/sources/monitoring.adoc +++ b/modules/pulsar-io/partials/connectors/sources/monitoring.adoc @@ -193,4 +193,4 @@ Status response for individual connector instance: === Metrics -{product} exposes Prometheus formatted metrics for every connector. Refer to xref:astra-streaming:operations:astream-scrape-metrics.adoc[scrape metrics with Prometheus] page for more detail. \ No newline at end of file +Astra Streaming exposes Prometheus formatted metrics for every connector. Refer to xref:astra-streaming:operations:astream-scrape-metrics.adoc[scrape metrics with Prometheus] page for more detail. \ No newline at end of file diff --git a/modules/subscriptions/partials/subscription-prereq.adoc b/modules/subscriptions/partials/subscription-prereq.adoc index 6cacdae..f5070da 100644 --- a/modules/subscriptions/partials/subscription-prereq.adoc +++ b/modules/subscriptions/partials/subscription-prereq.adoc @@ -6,11 +6,11 @@ To run this example, you'll need: * https://openjdk.java.net/install/[Java OpenJDK 11] -* A configured {product} instance with at least one streaming tenant and one topic. See the xref:astra-streaming:getting-started:index.adoc[{product} quick start] for instructions. +* A configured Astra Streaming instance with at least one streaming tenant and one topic. See the xref:astra-streaming:getting-started:index.adoc[Astra Streaming quick start] for instructions. * A local clone of the https://github.com/datastax/pulsar-subscription-example[{company} {pulsar-short} Subscription Example repository] -* Modify the `src/main/resources/application.properties` in the `pulsar-subscription-example` repo to connect to your {product} cluster, as below: +* Modify the `src/main/resources/application.properties` in the `pulsar-subscription-example` repo to connect to your Astra Streaming cluster, as below: + [source,bash] ---- diff --git a/modules/use-cases-architectures/images/kafka-client-settings.png b/modules/use-cases-architectures/images/kafka-client-settings.png deleted file mode 100644 index 331392e..0000000 Binary files a/modules/use-cases-architectures/images/kafka-client-settings.png and /dev/null differ diff --git a/modules/use-cases-architectures/images/pulsar-client-settings.png b/modules/use-cases-architectures/images/pulsar-client-settings.png deleted file mode 100644 index 7830eba..0000000 Binary files a/modules/use-cases-architectures/images/pulsar-client-settings.png and /dev/null differ diff --git a/modules/use-cases-architectures/images/rabbitmq-client-settings.png b/modules/use-cases-architectures/images/rabbitmq-client-settings.png deleted file mode 100644 index c5b33ae..0000000 Binary files a/modules/use-cases-architectures/images/rabbitmq-client-settings.png and /dev/null differ diff --git a/modules/use-cases-architectures/images/s4k-architecture.png b/modules/use-cases-architectures/images/s4k-architecture.png deleted file mode 100644 index 47af0ce..0000000 Binary files a/modules/use-cases-architectures/images/s4k-architecture.png and /dev/null differ diff --git a/modules/use-cases-architectures/images/s4r-architecture.png b/modules/use-cases-architectures/images/s4r-architecture.png deleted file mode 100644 index da4692b..0000000 Binary files a/modules/use-cases-architectures/images/s4r-architecture.png and /dev/null differ diff --git a/modules/use-cases-architectures/pages/starlight/index.adoc b/modules/use-cases-architectures/pages/starlight/index.adoc deleted file mode 100644 index 2ca23cd..0000000 --- a/modules/use-cases-architectures/pages/starlight/index.adoc +++ /dev/null @@ -1,23 +0,0 @@ -= {company} Starlight Suite of {pulsar-reg} Extensions -:navtitle: Starlight Extensions - -The Starlight suite of extensions is a collection of {pulsar-reg} protocol handlers that extend an existing {pulsar-short} cluster. -The goal of all the extensions is to create a native, seamless interaction with a {pulsar-short} cluster using existing tooling and clients. - -== {kafka-for-astra} - -{kafka-for-astra} brings native Apache Kafka(R) protocol support to {pulsar} by introducing a Kafka protocol handler on {pulsar-short} brokers. - -xref:use-cases-architectures:starlight/kafka/index.adoc[Get started now] | xref:starlight-for-kafka:ROOT:index.adoc[Configuring] | https://github.com/datastax/starlight-for-kafka[Source Code] - -== {starlight-rabbitmq} - -{starlight-rabbitmq} combines the industry-standard AMQP 0.9.1 (RabbitMQ) API with the cloud-native and horizontally scalable {pulsar-short} streaming platform, providing a powerful way to modernize your RabbitMQ infrastructure, improve performance, and reduce costs. - -xref:use-cases-architectures:starlight/rabbitmq/index.adoc[Get started now] | xref:starlight-for-rabbitmq:ROOT:index.adoc[Configuring] | https://github.com/datastax/starlight-for-rabbitmq[Source Code] - -== Starlight for JMS - -Starlight for JMS allows enterprises to take advantage of the scalability and resiliency of a modern streaming platform to run their existing JMS applications. Because {pulsar-short} is open-source and cloud-native, Starlight for JMS enables enterprises to move their JMS applications to run on-premises and in any cloud environment. - -xref:use-cases-architectures:starlight/jms/index.adoc[Get started now] | xref:starlight-for-jms:ROOT:index.adoc[Configuring] | https://github.com/datastax/pulsar-jms[Source Code] \ No newline at end of file diff --git a/modules/use-cases-architectures/pages/starlight/jms/index.adoc b/modules/use-cases-architectures/pages/starlight/jms/index.adoc deleted file mode 100644 index 2758aae..0000000 --- a/modules/use-cases-architectures/pages/starlight/jms/index.adoc +++ /dev/null @@ -1,174 +0,0 @@ -= Getting started with Starlight for JMS -:navtitle: Starlight for JMS -:description: Learn how to get started using the Starlight for JMS API and get hands on with a producer and consumer interacting with a topic. - -Starlight for JMS is a highly compliant JMS implementation designed to run on a modern streaming platform. -This guide will get you up and running with a simple Java JMS client that can talk to an {pulsar-reg} streaming instance. - -== Prerequisites - -To get started, you need the following: - -* A working {pulsar-short} cluster. -* Access to the cluster's admin port 8080 and the binary port 6650. - -This guide uses {product} to get started with Starlight for JMS. -For more information, see the xref:starlight-for-jms:ROOT:index.adoc[Starlight for JMS documentation]. - -[tabs] -==== -{product}:: -+ --- - -If you don't have a tenant in {product}, follow our "xref:astra-streaming:getting-started:index.adoc[]" guide. - --- -Luna Streaming:: -+ --- -Follow the "xref:luna-streaming:install-upgrade:quickstart-helm-installs.adoc[]" guide to get a cluster going. --- -Self Managed:: -+ --- -Using a standalone cluster? The Starlight for JMS docs provide the "xref:starlight-for-jms:jms-migration:pulsar-jms-quickstart-sa.adoc[]" guide. --- -==== - -== Messaging with Starlight for JMS - -=== Retrieve connection properties in {product} - -. In the {product} portal "Connect" tab, the "{pulsar-short}" area provides important connection information. - -. Scroll down to the "Tenant Details" area to find your {pulsar-short} connection information. -+ -image:pulsar-client-settings.png[] - -=== Produce and consume a message - -This example uses Maven for the project structure. -If you prefer Gradle or another tool, this code should still be a good fit. - -For complete source code examples, see the https://github.com/datastax/astra-streaming-examples[{product} examples repository]. - -. Create a new Maven project. -+ -[source,shell] ----- -mvn archetype:generate \ - -DgroupId=org.example \ - -DartifactId=StarlightForJMSClient \ - -DarchetypeArtifactId=maven-archetype-quickstart \ - -DinteractiveMode=false - -cd StarlightForJMSClient ----- - -. Open the new project in your favorite IDE or text editor and add the jms dependency to "pom.xml". -+ -[source,xml] ----- - - com.datastax.oss - pulsar-jms-all - 1.0.0 - ----- - -. Open the file `src/main/java/org/example/App.java`, and then enter the following contents. -If you cloned the example repository, replace the entire contents of the file with the following code. -Your editor will report an error because this isn't a complete script yet. -+ -Replace placeholders with the values you previously retrieved from {product}. -+ -[source,java] ----- -package org.example; - -import com.datastax.oss.pulsar.jms.PulsarConnectionFactory; - -import javax.jms.JMSContext; -import javax.jms.Message; -import javax.jms.MessageListener; -import javax.jms.Queue; -import java.util.HashMap; -import java.util.Map; - -public class App -{ - private static String webServiceUrl = ""; - private static String brokerServiceUrl = ""; - private static String pulsarToken = ""; - private static String tenantName = ""; - private static final String namespace = ""; - private static final String topicName = ""; - private static final String topic = String.format("persistent://%s/%s/%s", tenantName,namespace,topicName); - public static void main( String[] args ) throws Exception - { ----- - -. Add the following code to build the configuration that will be used by both the producer and consumer: -+ -[source,java] ----- - Map properties = new HashMap<>(); - properties.put("webServiceUrl",webServiceUrl); - properties.put("brokerServiceUrl",brokerServiceUrl); - properties.put("authPlugin","org.apache.pulsar.client.impl.auth.AuthenticationToken"); - properties.put("authParams",pulsarToken); ----- - -. Add the following code that defines a simple 'PulsarConnectionFactory' that creates a JMS queue using the full {pulsar-short} topic address, then creates a message listener callback function that watches the queue, and then produces a single message on the queue. -+ -[source,java] ----- - try (PulsarConnectionFactory factory = new PulsarConnectionFactory(properties); ){ - JMSContext context = factory.createContext(); - Queue queue = context.createQueue(topic); - - context.createConsumer(queue).setMessageListener(new MessageListener() { - @Override - public void onMessage(Message message) { - try { - System.out.println("Received: " + message.getBody(String.class)); - } catch (Exception err) { - err.printStackTrace(); - } - } - }); - - String message = "Hello there!"; - System.out.println("Sending: "+message); - context.createProducer().send(queue, message); - - Thread.sleep(4000); //wait for the message to be consumed - } - } -} ----- - -. Build and run a JAR file for this program: -+ -[source,shell] ----- -mvn clean package assembly:single -java -jar target/StarlightForJMSClient-1.0-SNAPSHOT-jar-with-dependencies.jar ----- -+ -.Result -[%collapsible] -==== -[source,console] ----- -Sending: Hello there! -Received: Hello there! ----- -==== - -== Next steps - -* xref:starlight-for-jms:examples:pulsar-jms-implementation.adoc[] -* xref:starlight-for-jms:reference:pulsar-jms-mappings.adoc[] -* xref:starlight-for-jms:reference:pulsar-jms-reference.adoc[] \ No newline at end of file diff --git a/modules/use-cases-architectures/pages/starlight/kafka/index.adoc b/modules/use-cases-architectures/pages/starlight/kafka/index.adoc deleted file mode 100644 index e7a18cd..0000000 --- a/modules/use-cases-architectures/pages/starlight/kafka/index.adoc +++ /dev/null @@ -1,275 +0,0 @@ -= Getting started with the {kafka-for-astra} extension -:navtitle: {kafka-for-astra} -:description: Learn how to get started using the {kafka-for-astra} extension with {pulsar-reg} and get hands on with Kafka producer and consumer interacting with a topic. - -{kafka-for-astra} brings the native Apache Kafka(R) protocol support to {pulsar-reg} by introducing a Kafka protocol handler on {pulsar-short} brokers. By adding the {kafka-for-astra} protocol handler to your existing {pulsar-short} cluster, you can migrate your existing Kafka applications and services to {pulsar-short} without modifying the code. - -If source code is your thing, visit the https://github.com/datastax/starlight-for-kafka[project's repo on GitHub]. - -== Architecture reference - -If you would like to get deep into how {kafka-for-astra} works, xref:starlight-for-kafka:ROOT:index.adoc[read the docs]. - -image:s4k-architecture.png[{kafka-for-astra} Architecture] - -== Establishing the Kafka protocol handler - -Before a Kafka client can interact with your {pulsar-short} cluster, you need the {kafka-for-astra} protocol handler installed in the cluster. -Installation looks a bit different depending on where your {pulsar-short} cluster is running. -Choose the option that best fits your needs. - -[tabs] -==== -{product}:: -+ --- - -If you want a working Kafka extension as quickly as possible, this is your best bet. -This is also a good option for those that already have a streaming tenant and are looking to extend it. - -. Sign in to your {product-short} account and navigate to your streaming tenant. -+ -TIP: Don't have a streaming tenant? Follow our "xref:astra-streaming:getting-started:index.adoc[]" guide. - -. Go to the "Connect" tab and choose the "Kafka" option. - -. Click "Enable Kafka". - -. A message will let you know of the additions (and restrictions) that come with using {kafka-for-astra}. - -. Select the "Enable Kafka" button to confirm your understanding. - -Your {product} tenant is ready for prime time! Continue to the next section of the guide to see it in action. --- -Luna Streaming:: -+ --- -The {kafka-for-astra} extension is included in the `luna-streaming-all` image used to deploy a Luna cluster. The Luna helm chart makes deploying the Kafka extension quite easy. Follow the "xref:luna-streaming:components:starlight-for-kafka.adoc[]" guide to create a simple {pulsar-short} cluster with the {kafka-for-astra} extension ready for use. --- -Self Managed:: -+ --- -Already have your own {pulsar-short} cluster? Or maybe you're using a standalone cluster? {kafka-for-astra} can easily be a part of that cluster! Follow the "xref:starlight-for-kafka:installation:starlight-kafka-quickstart.adoc[]" guide. --- -==== - -== Messaging with {kafka-for-astra} - -{kafka-for-astra} supports quite a few different use cases. With a {pulsar-short} cluster between producers and consumers you can interchange the type of producer and consumer to fit your needs. - -*The below examples are using an {product} tenant as the Kafka bootstrap server.* If you are using Luna Streaming or a self-managed tenant, switch the bootstrap server URL for your own. - -=== Retrieve Kafka connection properties in {product} - -In the {product} portal "Connect" tab, the "kafka" area provides important connection information. -You will need this connection information to create a working Kafka client or use the CLI. - -image:kafka-client-settings.png[{product} kafka settings] - -TIP: Click the clipboard icon to copy the Kafka connection values, as well as a working token to paste in code. - -=== Produce and consume a message - -[tabs] -====== -Kafka CLI:: -+ --- -Download the latest Kafka distribution https://www.apache.org/dyn/closer.cgi?path=/kafka/3.3.1/kafka_2.13-3.3.1.tgz[here]. -With the tarball extracted, the producer and consumer CLIs are in the 'bin' folder. - -. To get started, let's set a few variables. If you've completed our "xref:astra-streaming:getting-started:index.adoc[Getting started with {product}]" guide, the below values will be a perfect fit for your existing tenant. -+ -[source,shell] ----- -SERVICE_URL="" -TENANT="" -NAMESPACE="" -TOPIC="" ----- - -. Now let's enter those variables in Kafka's producer shell. -+ -[source,shell] ----- -# cd kafka_2.13-3.3.1 -./bin/kafka-console-producer.sh --topic "$TENANT/$NAMESPACE/$TOPIC" --bootstrap-server "$SERVICE_URL" ----- - -. Type in a super memorable message and hit 'enter' to send. Press 'Ctrl-C' to exit the shell. -+ -[source,shell] ----- -> This is my first S4K message. ----- -A new message has been produced in the provided tenant/namespace/topic and is ready for consumption. -. Start the Kafka consumer shell. -+ -[source,shell] ----- -# cd kafka_2.13-3.3.1 -./bin/kafka-console-consumer.sh --topic "$TENANT/$NAMESPACE/$TOPIC" --from-beginning --bootstrap-server "$SERVICE_URL" ----- - -. The consumer should immediately find the new message and output its value. -+ -[source,shell] ----- -This is my first S4K message. ----- - -. Press 'Ctrl-C' to exit the consumer shell. --- - -Kafka Client (Java):: -+ --- -This example uses Maven for the project structure. -If you prefer Gradle or another tool, this code should still be a good fit. - -For complete source code examples, see the https://github.com/datastax/astra-streaming-examples[{product} examples repository]. - -. Create a new Maven project. -+ -[source,shell] ----- -mvn archetype:generate \ - -DgroupId=org.example \ - -DartifactId=StarlightForKafkaClient \ - -DarchetypeArtifactId=maven-archetype-quickstart \ - -DinteractiveMode=false - -cd StarlightForKafkaClient ----- - -. Open the new project in your IDE or text editor, and then add the Kafka client dependency to `pom.xml`: -+ -[source,xml] ----- - - org.apache.kafka - kafka-clients - 3.3.1 - ----- - -. Open the file `src/main/java/org/example/App.java`, and then enter the following code. -If you cloned the example repo, replace the entire contents of `App.java` with the following code. -Your editor will report an error because this isn't a complete script yet. -+ -Replace placeholders with the values you previously retrieved from {product}. -+ -[source,java] ----- -package org.example; - -import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.apache.kafka.clients.consumer.ConsumerRecords; -import org.apache.kafka.clients.consumer.KafkaConsumer; -import org.apache.kafka.clients.producer.*; -import org.apache.kafka.common.serialization.LongSerializer; -import org.apache.kafka.common.serialization.StringDeserializer; -import org.apache.kafka.common.serialization.StringSerializer; - -import java.time.Duration; -import java.util.Collections; -import java.util.Properties; - -public class App { - private static String bootstrapServers = ""; - private static String pulsarToken = ""; - private static String tenantName = ""; - private static final String namespace = ""; - private static final String topicName = ""; - private static final String topic = String.format("persistent://%s/%s/%s", tenantName,namespace,topicName); - - public static void main(String[] args) { ----- - -. Add the following code that builds the configuration that will be used by both the producer and consumer: -+ -[source,java] ----- - Properties config = new Properties(); - config.put("bootstrap.servers",bootstrapServers); - config.put("security.protocol","SASL_SSL"); - config.put("sasl.jaas.config", String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username='%s' password='token:%s';", tenantName, pulsarToken)); - config.put("sasl.mechanism","PLAIN"); - config.put("session.timeout.ms","45000"); - config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, LongSerializer.class.getName()); - config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); - config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); - config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); - config.put("group.id", "my-consumer-group"); ----- - -. Add the producer code, which is a simple flow that sends a single message and awaits acknowledgment: -+ -[source,java] ----- - KafkaProducer producer = new KafkaProducer<>(config); - - final ProducerRecord producerRecord = new ProducerRecord<>(topic, System.currentTimeMillis(), "Hello World"); - producer.send(producerRecord, new Callback() { - public void onCompletion(RecordMetadata metadata, Exception e) { - if (e != null) - System.out.println(String.format("Send failed for record, %s. \nRecord data: %s",e.getMessage(), producerRecord)); - else - System.out.println("Successfully sent message"); - } - }); - - producer.flush(); - producer.close(); ----- - -. Add the consumer code, which creates a basic subscription and retrieves the latest messages on the topic: -+ -[source,java] ----- - final KafkaConsumer consumer = new KafkaConsumer(config); - - consumer.subscribe(Collections.singletonList(topic)); - ConsumerRecords consumerRecords = consumer.poll(Duration.ofMillis(5000)); - - System.out.println(String.format("Found %d total record(s)", consumerRecords.count())); - - for (ConsumerRecord consumerRecord : consumerRecords) { - System.out.println(consumerRecord); - } - - consumer.commitSync(); - consumer.close(); - } -} ----- - -. Build and run a JAR file for the complete program: -+ -[source,shell] ----- -mvn clean package assembly:single -java -jar target/StarlightForKafkaClient-1.0-SNAPSHOT-jar-with-dependencies.jar ----- -+ -.Result -[%collapsible] -==== -[source,shell] ----- -Successfully sent message - -Found 1 total record(s) -ConsumerRecord(topic = persistent://my-tenant-007/my-namespace/my-topic, partition = 0, leaderEpoch = null, offset = 22, CreateTime = 1673545962124, serialized key size = 8, serialized value size = 11, headers = RecordHeaders(headers = [], isReadOnly = false), key = xxxxx, value = Hello World) ----- -==== --- -====== - -== See also - -* xref:starlight-for-kafka:operations:starlight-kafka-kstreams.adoc[] -* xref:starlight-for-kafka:operations:starlight-kafka-implementation.adoc[] -* xref:starlight-for-kafka:operations:starlight-kafka-monitor.adoc[] -* xref:starlight-for-kafka:operations:starlight-kafka-security.adoc[] \ No newline at end of file diff --git a/modules/use-cases-architectures/pages/starlight/rabbitmq/index.adoc b/modules/use-cases-architectures/pages/starlight/rabbitmq/index.adoc deleted file mode 100644 index fa704ec..0000000 --- a/modules/use-cases-architectures/pages/starlight/rabbitmq/index.adoc +++ /dev/null @@ -1,206 +0,0 @@ -= Getting started with the {starlight-rabbitmq} extension -:navtitle: {starlight-rabbitmq} -:description: Learn how to get started using the {starlight-rabbitmq} extension with {pulsar-short} and get hands on by publishing and consuming messages from a topic. - -{starlight-rabbitmq} acts as a proxy between your https://www.rabbitmq.com/[RabbitMQ] application and https://pulsar.apache.org/[{pulsar-reg}] cluster. -It implements the AMQP 0.9.1 protocol used by RabbitMQ clients and translates AMQP frames and concepts to {pulsar-short} concepts. - -If source code is your thing, visit the https://github.com/datastax/starlight-for-rabbitmq[project's repo on GitHub]. - -== Architecture reference - -If you want to dive deep into how {starlight-rabbitmq} works, xref:starlight-for-rabbitmq:ROOT:index.adoc[read the documentation]. - -image:s4r-architecture.png[{starlight-rabbitmq} Architecture] - -== Establishing the RabbitMQ protocol handler - -Before you can use a RabbitMQ client to interact with your {pulsar-short} cluster, you need the {starlight-rabbitmq} protocol handler installed in the cluster. -Installation looks a bit different depending on where your {pulsar-short} cluster is running. -Choose the option that best fits your needs. - -[tabs] -==== -{product}:: -+ --- -If you want a working RabbitMQ extension as quickly as possible, this is your best bet. This is also a good option for those that already have a streaming tenant and are looking to extend it. - -. Sign in to your {product-short} account and navigate to your streaming tenant. -+ -TIP: Don't have a streaming tenant? Follow our "xref:astra-streaming:getting-started:index.adoc[]" guide. - -. Go to the "Connect" tab and choose the "RabbitMQ" option. - -. Click "Enable RabbitMQ". - -. A message will let you know of the additions (and restrictions) that come with using {starlight-rabbitmq}. - -. Click the "Enable RabbitMQ" button to confirm your understanding. - -Your {product} tenant is ready for prime time! Continue to the next section of the guide to see it in action. --- -Luna Streaming:: -+ --- -The {starlight-rabbitmq} extension is included in the `luna-streaming-all` image used to deploy a Luna cluster. The Luna helm chart makes deploying the Kafka extension quite easy. Follow the "xref:luna-streaming:components:starlight-for-rabbitmq.adoc[]" guide to create a simple {pulsar-short} cluster with the {starlight-rabbitmq} extension ready for use. --- -Self Managed:: -+ --- -Already have your own {pulsar-short} Cluster? Or maybe you're using a standalone cluster? {starlight-rabbitmq} can easily be a part of that cluster! Follow the "xref:starlight-for-rabbitmq:installation:getting-started.adoc[]" guide. --- -==== - -== Messaging with {starlight-rabbitmq} - -{starlight-rabbitmq} supports quite a few different use cases. -With a {pulsar-short} cluster between publishers and consumers you can interchange the type of publisher and consumer to fit your needs. - -*The below examples are using an {product} tenant as the AMQP listener.* If you are using Luna Streaming or a self-managed tenant, switch the listener URL for your own. - -=== Retrieve RabbitMQ connection properties in {product} - -In the {product} portal "Connect" tab, the "RabbitMQ" area provides important connection information. -You will need this connection information to create a working RabbitMQ client or use the CLI. - -image:rabbitmq-client-settings.png[{product} RabbitMQ settings] - -TIP: Click the clipboard icon to copy the RabbitMQ connection values, as well as a working token to paste in code. - -=== Produce and consume a message - -This example uses Maven for the project structure for a Rabbit MQ Java client. -If you prefer Gradle or another tool, this code should still be a good fit. - -For complete source code examples, see the https://github.com/datastax/astra-streaming-examples[{product} examples repository]. - -. Create a new Maven project. -+ -[source,shell] ----- -mvn archetype:generate \ - -DgroupId=org.example \ - -DartifactId=StarlightForRabbitMqClient \ - -DarchetypeArtifactId=maven-archetype-quickstart \ - -DinteractiveMode=false - -cd StarlightForRabbitMqClient ----- - -. Open the new project in your IDE or text editor, and then add the RabbitMQ client dependency to `pom.xml`: -+ -[source,xml] ----- - - com.rabbitmq - amqp-client - 5.16.0 - ----- - -. Open the file `src/main/java/org/example/App.java`, and then enter the following code. -If you cloned the example repo, replace the entire contents with the following code. -Your editor will report errors because this isn't a complete program yet. -+ -Replace placeholders with the values you previously retrieved from {product}. -+ -[source,java] ----- -package org.example; - -import com.rabbitmq.client.*; - -import java.io.IOException; -import java.net.URISyntaxException; -import java.nio.charset.StandardCharsets; -import java.security.KeyManagementException; -import java.security.NoSuchAlgorithmException; -import java.util.concurrent.TimeoutException; - -public class App { - private static final String username = ""; - private static final String password = ""; - private static final String host = ""; - private static final int port = 5671; - private static final String virtual_host = "/"; //The "rabbitmq" namespace should have been created when you enabled S4R - private static final String queueName = ""; //This will get created automatically - private static final String amqp_URI = String.format("amqps://%s:%s@%s:%d/%s", username, password, host, port, virtual_host.replace("/","%2f")); - - public static void main(String[] args) throws IOException, TimeoutException, URISyntaxException, NoSuchAlgorithmException, KeyManagementException, InterruptedException { ----- - -. Add the code to create a connection, channel, and queue that will be used by both the producer and consumer: -+ -[source,java] ----- - ConnectionFactory factory = new ConnectionFactory(); - factory.setUri(amqp_URI); - - /* - You could also set each value individually - factory.setHost(host); - factory.setPort(port); - factory.setUsername(username); - factory.setPassword(password); - factory.setVirtualHost(virtual_host); - factory.useSslProtocol(); - */ - - Connection connection = factory.newConnection(); - Channel channel = connection.createChannel(); - - channel.queueDeclare(queueName, false, false, false, null); ----- - -. Add the producer code, which is a simple flow that sends a single message and awaits acknowledgment: -+ -[source,java] ----- - String publishMessage = "Hello World!"; - channel.basicPublish("", queueName, null, publishMessage.getBytes()); - System.out.println(" Sent '" + publishMessage + "'"); ----- - -. Add the consumer code, which creates a basic consumer with callback on message receipt. -Because the consumer isn't a blocking thread, the `sleep` allows time for messages to be received and processed. -+ -[source,java] ----- - DeliverCallback deliverCallback = (consumerTag, delivery) -> { - String consumeMessage = new String(delivery.getBody(), StandardCharsets.UTF_8); - System.out.println(" Received '" + consumeMessage + "'"); - }; - - channel.basicConsume(queueName, true, deliverCallback, consumerTag -> { }); - - Thread.sleep(4000); // wait a bit for messages to be received - - channel.close(); - connection.close(); - } -} ----- - -. Build and run the JAR file for the complete program: -+ -[source,shell] ----- -mvn clean package assembly:single -java -jar target/StarlightForRabbitMqClient-1.0-SNAPSHOT-jar-with-dependencies.jar ----- -+ -.Result -[%collapsible] -==== -[source,shell] ----- -Sent 'Hello World!' -Received 'Hello World!' ----- -==== - -== Next steps - -* xref:starlight-for-rabbitmq:ROOT:index.adoc[{starlight-rabbitmq} documentation] -* xref:luna-streaming:components:starlight-for-rabbitmq.adoc[] \ No newline at end of file