From 1d98f4f1590076a712c4a2dfe5897868c57658fa Mon Sep 17 00:00:00 2001 From: atovpeko Date: Wed, 5 Nov 2025 17:08:35 +0200 Subject: [PATCH 01/13] draft --- docs.json | 24 +- .../code/start-coding-with-tigerdata.mdx | 44 +- .../connectors/destination/tigerlake.mdx | 348 +++++++- .../connectors/source/stream-from-kafka.mdx | 223 +++++ .../connectors/source/sync-from-postgres.mdx | 568 +++++++++++- .../connectors/source/sync-from-s3.mdx | 162 +++- integrations/find-connection-details.mdx | 91 ++ integrations/index.mdx | 259 ------ integrations/integrate/amazon-sagemaker.mdx | 78 +- integrations/integrate/apache-airflow.mdx | 150 +--- integrations/integrate/apache-kafka.mdx | 195 +--- integrations/integrate/aws-lambda.mdx | 209 +---- integrations/integrate/aws.mdx | 37 +- integrations/integrate/azure-data-studio.mdx | 54 +- integrations/integrate/cloudwatch.mdx | 36 +- .../integrate/corporate-data-center.mdx | 40 +- integrations/integrate/datadog.mdx | 149 +--- integrations/integrate/dbeaver.mdx | 50 +- integrations/integrate/debezium.mdx | 91 +- integrations/integrate/decodable.mdx | 76 +- .../integrate/find-connection-details.mdx | 77 -- integrations/integrate/fivetran.mdx | 93 +- integrations/integrate/google-cloud.mdx | 44 +- integrations/integrate/grafana.mdx | 187 +--- integrations/integrate/kubernetes.mdx | 41 +- integrations/integrate/microsoft-azure.mdx | 43 +- integrations/integrate/pgadmin.mdx | 44 +- integrations/integrate/postgresql.mdx | 13 +- integrations/integrate/power-bi.mdx | 52 +- integrations/integrate/prometheus.mdx | 14 +- integrations/integrate/psql.mdx | 265 +----- integrations/integrate/qstudio.mdx | 53 +- integrations/integrate/supabase.mdx | 271 +----- integrations/integrate/tableau.mdx | 32 +- integrations/integrate/telegraf.mdx | 4 + integrations/integrate/terraform.mdx | 192 ++-- integrations/integrations.mdx | 658 +++++++++++--- .../migration/migrate-with-downtime.mdx | 39 - integrations/troubleshooting.mdx | 30 +- snippets/changes/_not-supported-for-azure.mdx | 5 + snippets/coding/_start-coding-golang.mdx | 2 +- snippets/coding/_start-coding-java.mdx | 1 + snippets/coding/_start-coding-node.mdx | 1 + snippets/coding/_start-coding-python.mdx | 2 +- snippets/coding/_start-coding-ruby.mdx | 2 +- .../_integration-prereqs-cloud-only.mdx | 7 + .../integrations/_livesync-limitations.mdx | 42 + .../integrations/_livesync-prereqs-cloud.mdx | 14 + .../_livesync-prereqs-terminal.mdx | 30 + .../code/_start-coding-golang.mdx | 840 ++++++++++++++++++ .../integrations/code/_start-coding-java.mdx | 574 ++++++++++++ .../integrations/code/_start-coding-node.mdx | 329 +++++++ .../code/_start-coding-python.mdx | 378 ++++++++ .../integrations/code/_start-coding-ruby.mdx | 394 ++++++++ ...ate-hypertable-columnstore-policy-note.mdx | 20 + .../_kubernetes-install-self-hosted.mdx | 169 ++++ snippets/vars.mdx | 2 +- 57 files changed, 5003 insertions(+), 2845 deletions(-) create mode 100644 integrations/connectors/source/stream-from-kafka.mdx create mode 100644 integrations/find-connection-details.mdx delete mode 100644 integrations/index.mdx delete mode 100644 integrations/integrate/find-connection-details.mdx create mode 100644 integrations/integrate/telegraf.mdx delete mode 100644 integrations/migration/migrate-with-downtime.mdx create mode 100644 snippets/changes/_not-supported-for-azure.mdx create mode 100644 snippets/integrations/_integration-prereqs-cloud-only.mdx create mode 100644 snippets/integrations/_livesync-limitations.mdx create mode 100644 snippets/integrations/_livesync-prereqs-cloud.mdx create mode 100644 snippets/integrations/_livesync-prereqs-terminal.mdx create mode 100644 snippets/integrations/code/_start-coding-golang.mdx create mode 100644 snippets/integrations/code/_start-coding-java.mdx create mode 100644 snippets/integrations/code/_start-coding-node.mdx create mode 100644 snippets/integrations/code/_start-coding-python.mdx create mode 100644 snippets/integrations/code/_start-coding-ruby.mdx create mode 100644 snippets/manage-data/_create-hypertable-columnstore-policy-note.mdx create mode 100644 snippets/procedures/_kubernetes-install-self-hosted.mdx diff --git a/docs.json b/docs.json index 7515571..7d3b3c0 100644 --- a/docs.json +++ b/docs.json @@ -328,7 +328,10 @@ "groups": [ { "group": " ", - "pages": ["integrations/integrations"] + "pages": [ + "integrations/integrations", + "integrations/find-connection-details" + ] }, { "group": "Destination connectors", @@ -338,13 +341,21 @@ "group": "Source connectors", "pages": [ "integrations/connectors/source/sync-from-postgres", - "integrations/connectors/source/sync-from-s3" + "integrations/connectors/source/sync-from-s3", + "integrations/connectors/source/stream-from-kafka" ] }, { "group": "Coding", "pages": ["integrations/code/start-coding-with-tigerdata"] }, + { + "group": "Business intelligence and data visualization", + "pages": [ + "integrations/integrate/power-bi", + "integrations/integrate/tableau" + ] + }, { "group": "Configuration and deployment", "pages": [ @@ -374,7 +385,7 @@ "integrations/integrate/datadog", "integrations/integrate/grafana", "integrations/integrate/prometheus", - "integrations/integrate/tableau" + "integrations/integrate/telegraf" ] }, { @@ -385,7 +396,8 @@ "integrations/integrate/pgadmin", "integrations/integrate/postgresql", "integrations/integrate/psql", - "integrations/integrate/qstudio" + "integrations/integrate/qstudio", + "integrations/integrate/supabase" ] }, { @@ -396,6 +408,10 @@ "integrations/integrate/google-cloud", "integrations/integrate/microsoft-azure" ] + }, + { + "group": " ", + "pages": ["integrations/troubleshooting"] } ] }, diff --git a/integrations/code/start-coding-with-tigerdata.mdx b/integrations/code/start-coding-with-tigerdata.mdx index 362f6ee..013f93a 100644 --- a/integrations/code/start-coding-with-tigerdata.mdx +++ b/integrations/code/start-coding-with-tigerdata.mdx @@ -1,43 +1,44 @@ --- title: Start coding with Tiger Data -description: Integrate Tiger Cloud with your app using your preferred programming language. Connect to a service, - create and manage hypertables, then and ingest and query data -products: [cloud, self_hosted, mst] +description: Integrate Tiger Cloud with your app using your preferred programming language. Connect to a service, create and manage hypertables, then ingest and query data --- -import StartCodingRuby from "/snippets/coding/_start-coding-ruby.mdx"; -import StartCodingPython from "/snippets/coding/_start-coding-python.mdx"; -import StartCodingNode from "/snippets/coding/_start-coding-node.mdx"; -import StartCodingGoLang from "/snippets/coding/_start-coding-golang.mdx"; -import StartCodingJava from "/snippets/coding/_start-coding-java.mdx"; +import StartCodingRuby from '/snippets/integrations/code/_start-coding-ruby.mdx'; +import StartCodingPython from '/snippets/integrations/code/_start-coding-python.mdx'; +import StartCodingNode from '/snippets/integrations/code/_start-coding-node.mdx'; +import StartCodingGoLang from '/snippets/integrations/code/_start-coding-golang.mdx'; +import StartCodingJava from '/snippets/integrations/code/_start-coding-java.mdx'; +Easily integrate your app with Tiger Cloud or self-hosted TimescaleDB. Use your favorite programming language to connect to your +service, create and manage hypertables, then ingest and query data. -Easily integrate your app with {CLOUD_LONG} or {SELF_LONG}. Use your favorite programming language to connect to your -{SERVICE_LONG}, create and manage hypertables, then ingest and query data. + - - - + - + + - + + - + + - + + @@ -45,10 +46,5 @@ Easily integrate your app with {CLOUD_LONG} or {SELF_LONG}. Use your favorite pr - - -You are not limited to these languages. {CLOUD_LONG} is based on {PG}, you can interface -with {TIMESCALE_DB} and {CLOUD_LONG} using any [{PG} client driver][postgres-drivers]. - - -[postgres-drivers]: https://wiki.postgresql.org/wiki/List_of_drivers +You are not limited to these languages. Tiger Cloud is based on {PG}, you can interface +with TimescaleDB and Tiger Cloud using any [{PG} client driver](https://wiki.postgresql.org/wiki/List_of_drivers). \ No newline at end of file diff --git a/integrations/connectors/destination/tigerlake.mdx b/integrations/connectors/destination/tigerlake.mdx index 31725a6..6d1af12 100644 --- a/integrations/connectors/destination/tigerlake.mdx +++ b/integrations/connectors/destination/tigerlake.mdx @@ -1,8 +1,346 @@ --- -title: Connect service data to your data lake -description: Simulate and analyze a transport dataset in your Tiger Cloud service -products: [cloud, mst, self_hosted] -keywords: [IoT, simulate] +title: Integrate with data lakes +description: Unify your Tiger Cloud operational architecture with data lakes --- -Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. +import { LAKE_LONG, LAKE_SHORT, SERVICE_SHORT, HYPERTABLE, HYPERTABLE_CAP, CONSOLE, PG } from '/snippets/vars.mdx'; +import IntegrationPrereqsCloud from '/snippets/integrations/_integration-prereqs-cloud-only.mdx'; +import NotSupportedAzure from '/snippets/changes/_not-supported-for-azure.mdx'; + +{LAKE_LONG} enables you to build real-time applications alongside efficient data pipeline management within a single +system. {LAKE_LONG} unifies the {CLOUD_LONG} operational architecture with data lake architectures. + +![Tiger Lake architecture](https://assets.timescale.com/docs/images/tiger-cloud-console/tiger-lake-integration-tiger.svg) + +{LAKE_LONG} is a native integration enabling synchronization between {HYPERTABLE}s and relational tables +running in {SERVICE_LONG}s to Iceberg tables running in [Amazon S3 Tables](https://aws.amazon.com/s3/features/tables/) in your AWS account. + + +Tiger Lake is currently in private beta. Please contact us to request access. + + +## Prerequisites + + + + + +## Integrate a data lake with your {SERVICE_LONG} + +To connect a {SERVICE_LONG} to your data lake: + + + + + +1. **Set the AWS region to host your table bucket** + 1. In [AWS CloudFormation](https://console.aws.amazon.com/cloudformation/), select the current AWS region at the top-right of the page. + 1. Set it to the Region you want to create your table bucket in. + + **This must match the region your {SERVICE_LONG} is running in**: if the regions do not match AWS charges you for + cross-region data transfer. + +2. **Create your CloudFormation stack** + 1. Click `Create stack`, then select `With new resources (standard)`. + 1. In `Amazon S3 URL`, paste the following URL, then click `Next`. + + ```http + https://tigerlake.s3.us-east-1.amazonaws.com/tigerlake-connect-cloudformation.yaml + ``` + + 1. In `Specify stack details`, enter the following details, then click `Next`: + * `Stack Name`: a name for this CloudFormation stack + * `BucketName`: a name for this S3 table bucket + * `ProjectID` and `ServiceID`: enter the [connection details](/integrations/find-connection-details#find-your-project-and-service-id) for your {LAKE_LONG} {SERVICE_SHORT} + 1. In `Configure stack options` check `I acknowledge that AWS CloudFormation might create IAM resources`, then + click `Next`. + 1. In `Review and create`, click `Submit`, then wait for the deployment to complete. + AWS deploys your stack and creates the S3 table bucket and IAM role. + 1. Click `Outputs`, then copy all four outputs. + +3. **Connect your {SERVICE_SHORT} to the data lake** + + 1. In [{CONSOLE}](https://console.cloud.timescale.com/dashboard/services), select the {SERVICE_SHORT} you want to integrate with AWS S3 Tables, then click + `Connectors`. + + 1. Select the Apache Iceberg connector and supply the: + - ARN of the S3Table bucket + - ARN of a role with permissions to write to the table bucket + + Provisioning takes a couple of minutes. + + + + + +1. **Create your CloudFormation stack** + + Replace the following values in the command, then run it from the terminal: + + * `Region`: region of the S3 table bucket + * `StackName`: the name for this CloudFormation stack + * `BucketName`: the name of the S3 table bucket to create + * `ProjectID`: enter your {SERVICE_LONG} [connection details](/integrations/find-connection-details#find-your-project-and-service-id) + * `ServiceID`: enter your {SERVICE_LONG} [connection details](/integrations/find-connection-details#find-your-project-and-service-id) + + ```bash + aws cloudformation create-stack \ + --capabilities CAPABILITY_IAM \ + --template-url https://tigerlake.s3.us-east-1.amazonaws.com/tigerlake-connect-cloudformation.yaml \ + --region \ + --stack-name \ + --parameters \ + ParameterKey=BucketName,ParameterValue="" \ + ParameterKey=ProjectID,ParameterValue="" \ + ParameterKey=ServiceID,ParameterValue="" + ``` + + Setting up the integration through {CONSOLE} in {CLOUD_LONG}, provides a convenient copy-paste option with the + placeholders populated. + +2. **Connect your {SERVICE_SHORT} to the data lake** + + 1. In [{CONSOLE}](https://console.cloud.timescale.com/dashboard/services), select the {SERVICE_SHORT} you want to integrate with AWS S3 Tables, then click + `Connectors`. + + 1. Select the Apache Iceberg connector and supply the: + - ARN of the S3Table bucket + - ARN of a role with permissions to write to the table bucket + + Provisioning takes a couple of minutes. + + + + + +1. **Create a S3 Bucket** + + 1. Set the AWS region to host your table bucket + 1. In [Amazon S3 console](https://console.aws.amazon.com/s3/), select the current AWS region at the top-right of the page. + 2. Set it to the Region your you want to create your table bucket in. + + **This must match the region your {SERVICE_LONG} is running in**: if the regions do not match AWS charges you for + cross-region data transfer. + 1. In the left navigation pane, click `Table buckets`, then click `Create table bucket`. + 1. Enter `Table bucket name`, then click `Create table bucket`. + 1. Copy the `Amazon Resource Name (ARN)` for your table bucket. + +2. **Create an ARN role** + 1. In [IAM Dashboard](https://console.aws.amazon.com/iamv2/home), click `Roles` then click `Create role` + 1. In `Select trusted entity`, click `Custom trust policy`, replace the **Custom trust policy** code block with the + following: + + ```json + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam::142548018081:root" + }, + "Action": "sts:AssumeRole", + "Condition": { + "StringEquals": { + "sts:ExternalId": "/" + } + } + } + ] + } + ``` + + `"Principal": { "AWS": "arn:aws:iam::123456789012:root" }` does not mean `root` access. This delegates + permissions to the entire AWS account, not just the root user. + + 1. Replace `` and `` with the the [connection details](/integrations/find-connection-details#find-your-project-and-service-id) for your {LAKE_LONG} + {SERVICE_SHORT}, then click `Next`. + + 1. In `Permissions policies`. click `Next`. + 1. In `Role details`, enter `Role name`, then click `Create role`. + 1. In `Roles`, select the role you just created, then click `Add Permissions` > `Create inline policy`. + 1. Select `JSON` then replace the `Policy editor` code block with the following: + + ```json + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "BucketOps", + "Effect": "Allow", + "Action": [ + "s3tables:*" + ], + "Resource": "" + }, + { + "Sid": "BucketTableOps", + "Effect": "Allow", + "Action": [ + "s3tables:*" + ], + "Resource": "/table/*" + } + ] + } + ``` + 1. Replace `` with the `Amazon Resource Name (ARN)` for the table bucket you just created. + 1. Click `Next`, then give the inline policy a name and click `Create policy`. + +3. **Connect your {SERVICE_SHORT} to the data lake** + + 1. In [{CONSOLE}](https://console.cloud.timescale.com/dashboard/services), select the {SERVICE_SHORT} you want to integrate with AWS S3 Tables, then click + `Connectors`. + + 1. Select the Apache Iceberg connector and supply the: + - ARN of the S3Table bucket + - ARN of a role with permissions to write to the table bucket + + Provisioning takes a couple of minutes. + + + + + +## Stream data from your {SERVICE_LONG} to your data lake + +When you start streaming, all data in the table is synchronized to Iceberg. Records are imported in time order, from +oldest to youngest. The write throughput is approximately 40.000 records / second. For larger tables, a full import can +take some time. + +For Iceberg to perform update or delete statements, your {HYPERTABLE} or relational table must have a primary key. +This includes composite primary keys. + +To stream data from a {PG} relational table, or a {HYPERTABLE} in your {SERVICE_LONG} to your data lake, run the following +statement: + +```sql +ALTER TABLE SET ( + tigerlake.iceberg_sync = true | false, + tigerlake.iceberg_partitionby = '', + tigerlake.iceberg_namespace = '', + tigerlake.iceberg_table = '' +) +``` + +* `tigerlake.iceberg_sync`: `boolean`, set to `true` to start streaming, or `false` to stop the stream. A stream + **cannot** resume after being stopped. +* `tigerlake.iceberg_partitionby`: optional property to define a partition specification in Iceberg. By default the + Iceberg table is partitioned as `day()`. This default behavior is only applicable + to {HYPERTABLE}s. For more information, see [partitioning](#partitioning-intervals). +* `tigerlake.iceberg_namespace`: optional property to set a namespace, the default is `timescaledb`. +* `tigerlake.iceberg_table`: optional property to specify a different table name. If no name is specified the {PG} table name is used. + +### Partitioning intervals + +By default, the partition interval for an Iceberg table is one day(time-column) for a {HYPERTABLE}. +{PG} table sync does not enable any partitioning in Iceberg for non-hypertables. You can set it using +[tigerlake.iceberg_partitionby](#sample-code). The following partition intervals and specifications are supported: + +| Interval | Description | Source types | +| ------------- |---------------------------------------------------------------------------| --- | +| `hour` | Extract a date or timestamp day, as days from epoch. Epoch is 1970-01-01. | `date`, `timestamp`, `timestamptz` | +| `day` | Extract a date or timestamp day, as days from epoch. | `date`, `timestamp`, `timestamptz` | +| `month` | Extract a date or timestamp day, as days from epoch. | `date`, `timestamp`, `timestamptz` | +| `year` | Extract a date or timestamp day, as days from epoch. | `date`, `timestamp`, `timestamptz` | +| `truncate[W]` | Value truncated to width W, see [options](https://iceberg.apache.org/spec/#truncate-transform-details) | + +These partitions define the behavior using the [Iceberg partition specification](https://iceberg.apache.org/spec/#partition-transforms). + +### Sample code + +The following samples show you how to tune data sync from a {HYPERTABLE} or a {PG} relational table to your +data lake: + +- **Sync a {HYPERTABLE} with the default one-day partitioning interval on the `ts_column` column** + + To start syncing data from a {HYPERTABLE} to your data lake using the default one-day chunk interval as the + partitioning scheme to the Iceberg table, run the following statement: + + ```sql + ALTER TABLE my_hypertable SET (tigerlake.iceberg_sync = true); + ``` + + This is equivalent to `day(ts_column)`. + +- **Specify a custom partitioning scheme for a {HYPERTABLE}** + + You use the `tigerlake.iceberg_partitionby` property to specify a different partitioning scheme for the Iceberg + table at sync start. For example, to enforce an hourly partition scheme from the chunks on `ts_column` on a + {HYPERTABLE}, run the following statement: + + ```sql + ALTER TABLE my_hypertable SET ( + tigerlake.iceberg_sync = true, + tigerlake.iceberg_partitionby = 'hour(ts_column)' + ); + ``` + +- **Set the partition to sync relational tables** + + {PG} relational tables do not forward a partitioning scheme to Iceberg, you must specify the partitioning scheme using + `tigerlake.iceberg_partitionby` when you start the sync. For example, for a standard {PG} table to sync to the Iceberg + table with daily partitioning , run the following statement: + + ```sql + ALTER TABLE my_postgres_table SET ( + tigerlake.iceberg_sync = true, + tigerlake.iceberg_partitionby = 'day(timestamp_col)' + ); + ``` + +- **Stop sync to an Iceberg table for a {HYPERTABLE} or a {PG} relational table** + + ```sql + ALTER TABLE my_hypertable SET (tigerlake.iceberg_sync = false); + ``` + +- **Update or add the partitioning scheme of an Iceberg table** + + To change the partitioning scheme of an Iceberg table, you specify the desired partitioning scheme using the `tigerlake.iceberg_partitionby` property. + For example. if the `samples` table has an hourly (`hour(ts)`) partition on the `ts` timestamp column, + to change to daily partitioning, call the following statement: + + ```sql + ALTER TABLE samples SET (tigerlake.iceberg_partitionby = 'day(ts)'); + ``` + + This statement is also correct for Iceberg tables without a partitioning scheme. + When you change the partition, you **do not** have to pause the sync to Iceberg. + Apache Iceberg handles the partitioning operation in function of the internal implementation. + +- **Specify a different namespace** + + By default, tables are created in the the `timescaledb` namespace. To specify a different namespace when you start the sync, use the `tigerlake.iceberg_namespace` property. For example: + + ```sql + ALTER TABLE my_hypertable SET ( + tigerlake.iceberg_sync = true, + tigerlake.iceberg_namespace = 'my_namespace' + ); + ``` + +- **Specify a different Iceberg table name** + + The table name in Iceberg is the same as the source table in {CLOUD_LONG}. + Some services do not allow mixed case, or have other constraints for table names. + To define a different table name for the Iceberg table at sync start, use the `tigerlake.iceberg_table` property. For example: + + ```sql + ALTER TABLE Mixed_CASE_TableNAME SET ( + tigerlake.iceberg_sync = true, + tigerlake.iceberg_table = 'my_table_name' + ); + ``` + +## Limitations + +* Service requires {PG} 17.6 and above is supported. +* Consistent ingestion rates of over 30000 records / second can lead to a lost replication slot. Burst can be feathered out over time. +* [Amazon S3 Tables Iceberg REST](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-tables-integrating-open-source.html) catalog only is supported. +* In order to collect deletes made to data in the columstore, certain columnstore optimizations are disabled for {HYPERTABLE}s. +* Direct Compress is not supported. +* The `TRUNCATE` statement is not supported, and does not truncate data in the corresponding Iceberg table. +* Data in a {HYPERTABLE} that has been moved to the low-cost object storage tier is not synced. +* Writing to the same S3 table bucket from multiple services is not supported, bucket-to-service mapping is one-to-one. +* Iceberg snapshots are pruned automatically if the amount exceeds 2500. \ No newline at end of file diff --git a/integrations/connectors/source/stream-from-kafka.mdx b/integrations/connectors/source/stream-from-kafka.mdx new file mode 100644 index 0000000..208d74d --- /dev/null +++ b/integrations/connectors/source/stream-from-kafka.mdx @@ -0,0 +1,223 @@ +--- +title: Stream from Kafka +description: Stream data from Kafka into a Tiger Cloud service +--- + +import { SERVICE_SHORT, CONSOLE, PROJECT_SHORT } from '/snippets/vars.mdx'; +import IntegrationPrereqsCloud from '/snippets/integrations/_integration-prereqs-cloud-only.mdx'; +import NotSupportedAzure from '/snippets/changes/_not-supported-for-azure.mdx'; + + Early access + +You use the Kafka source connector to stream events from Kafka into your {SERVICE_SHORT}. The connector connects to your Confluent Cloud Kafka cluster and Schema Registry using SASL/SCRAM authentication and service account–based API keys. Only the Avro format is currently supported [with some limitations](#known-limitations-and-unsupported-types). + +This page explains how to connect to your Confluent Cloud Kafka cluster. + + +The Kafka source connector is not yet supported for production use. + + +## Prerequisites + + + +- [Sign up](https://www.confluent.io/get-started/) for Confluent Cloud. +- [Create](https://docs.confluent.io/cloud/current/clusters/create-cluster.html) a Kafka cluster in Confluent Cloud. + + + +## Access your Kafka cluster in Confluent Cloud + +Take the following steps to prepare your Kafka cluster for connection: + +1. **Create a service account** + + If you already have a service account, you can reuse it. To create a new service account: + + 1. Log in to [Confluent Cloud](https://confluent.cloud/). + 2. Click the burger menu at the top-right of the pane, then press + `Access control` > `Service accounts` >`Add service account`. + 3. Enter the following details: + + - Name: `tigerdata-access` + - Description: `Service account for the Tiger Cloud source connector` + + 4. Add the service account owner role, then click `Next`. + + 5. Select a role assignment, then click `Add` + + 6. Click `Next`, then click `Create service account`. + +2. **Create API keys** + + 1. In Confluent Cloud, click `Home` > `Environments` > Select your environment > Select your cluster. + 2. Under `Cluster overview` in the left sidebar, select `API Keys`. + 3. Click `Add key`, choose `Service Account` and click `Next`. + 4. Select `tigerdata-access`, then click `Next`. + 5. For your cluster, choose the `Operation` and select the following `Permission`s, then click `Next`: + - `Resource type`: `Cluster` + - `Operation`: `DESCRIBE` + - `Permission`: `ALLOW` + 6. Click `Download and continue`, then securely store the ACL. + 7. Use the same procedure to add the following keys: + - ACL 2: Topic access + - `Resource type`: `Topic` + - `Topic name`: Select the topics that Tiger Cloud should read + - `Pattern type`: `LITERAL` + - `Operation`: `READ` + - `Permission`: `ALLOW` + - ACL 3: Consumer group access + - `Resource type`: `Consumer group` + - `Consumer group ID`: `tigerdata-kafka/`. See [Find your connection details](/integrations/find-connection-details#find-your-project-and-service-id) for where to find your {PROJECT_SHORT} ID + - `Pattern type`: `PREFIXED` + - `Operation`: `READ` + - `Permission`: `ALLOW` + You need these to configure your Kafka source connector. + +## Configure Confluent Cloud Schema Registry + +The connector requires access to the Schema Registry to fetch schemas for Kafka topics. To configure the Schema Registry: + +1. **Navigate to Schema Registry** + + In Confluent Cloud, click `Environments` and select your environment, then click `Stream Governance`. + +2. **Create a Schema Registry API key** + + 1. Click `API Keys`, then click `Add API Key`. + 2. Choose `Service Account`, select `tigerdata-access`, then click `Next`. + 3. Under `Resource scope`, choose `Schema Registry`, select the `default` environment, then click `Next`. + 4. In `Create API Key`, add the following, then click `Create API Key` : + + - `Name`: `tigerdata-schema-registry-access` + - `Description`: `API key for Tiger Cloud schema registry access` + + 5. Click `Download API Key` and securely store the API key and secret, then click `Complete`. + +3. **Assign roles for Schema Registry** + + 1. Click the burger menu at the top-right of the pane, then press + `Access control` > `Accounts & access` > `Service accounts`. + 2. Select the `tigerdata-access` service account. + 3. In the `Access` tab, add the following role assignments for `All schema subjects`: + + - `ResourceOwner` on the service account. + - `DeveloperRead` on schema subjects. + + Choose `All schema subjects` or restrict to specific subjects as required. + 4. Save the role assignments. + +Your Confluent Cloud Schema Registry is now accessible using the API key and secret. + +## Add Kafka source connector + +Take the following steps to create a Kafka source connector in {CONSOLE}. + +1. **In [{CONSOLE}](https://console.cloud.timescale.com/dashboard/services), select your {SERVICE_SHORT}** +2. **Go to `Connectors` > `Source connectors`. Click `New Connector`, then select `Kafka`** +3. **Click the pencil icon, then set the connector name** +4. **Set up Kafka authentication** + + Enter the name of your cluster in Confluent Cloud and the information from the first `api-key-*.txt` that you + downloaded, then click `Authenticate`. +5. **Set up the Schema Registry** + + Enter the service account ID and the information from the second `api-key-*.txt` that you + downloaded, then click `Authenticate`. +6. **Select topics to sync** + + Add the schema and table, map the columns in the table, and click `Create connector`. + + +Your Kafka connector is configured and ready to stream events. + +## Known limitations and unsupported types + +The following Avro schema types are not supported: + +### Union types + +Multi-type non-nullable unions are blocked. + +Examples: + +- Multiple type union: + + ```json + { + "type": "record", + "name": "Message", + "fields": [ + {"name": "content", "type": ["string", "bytes", "null"]} + ] + } + ``` + +- Union as root schema: + + ```json + ["null", "string"] + ``` + +### Reference types (named type references) + +Referencing a previously defined named type by name, instead of inline, is not supported. + +Examples: + +- Named type definition: + + ```json + { + "type": "record", + "name": "Address", + "fields": [ + {"name": "street", "type": "string"}, + {"name": "city", "type": "string"} + ] + } + ``` + +- Failing reference: + + ```json + { + "type": "record", + "name": "Person", + "fields": [ + {"name": "name", "type": "string"}, + {"name": "address", "type": "Address"} + ] + } + ``` + +### Unsupported logical types + +Only the logical types in the hardcoded supported list are supported. This includes: + +* decimal, date, time-millis, time-micros + +* timestamp-millis, timestamp-micros, timestamp-nanos + +* local-timestamp-millis, local-timestamp-micros, local-timestamp-nanos + +* uuid, duration + +Unsupported examples: + +```json +{ + "type": "int", + "logicalType": "date-time" +} + +{ + "type": "string", + "logicalType": "json" +} + +{ + "type": "bytes", + "logicalType": "custom-type" +} +``` \ No newline at end of file diff --git a/integrations/connectors/source/sync-from-postgres.mdx b/integrations/connectors/source/sync-from-postgres.mdx index 77d8a18..0b8ac48 100644 --- a/integrations/connectors/source/sync-from-postgres.mdx +++ b/integrations/connectors/source/sync-from-postgres.mdx @@ -1,8 +1,566 @@ --- -title: Connect data in Postgres to your service -description: Simulate and analyze a transport dataset in your Tiger Cloud service -products: [cloud, mst, self_hosted] -keywords: [IoT, simulate] +title: Sync from Postgres +description: Sync updates to your primary Postgres database with your Tiger Cloud service in real time --- -Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. +import { HYPERTABLE, CONSOLE } from '/snippets/vars.mdx'; +import LivesyncPrereqsCloud from '/snippets/integrations/_livesync-prereqs-cloud.mdx'; +import LivesyncPrereqsTerminal from '/snippets/integrations/_livesync-prereqs-terminal.mdx'; +import LivesyncLimitations from '/snippets/integrations/_livesync-limitations.mdx'; + + Early access + +You use the {PG} connector to synchronize all data or specific tables from a {PG} database instance to your +{SERVICE_SHORT}, in real time. You run the connector continuously, turning {PG} into a primary database with your +{SERVICE_SHORT} as a logical replica. This enables you to leverage real-time analytics capabilities on +your replica data. + +![Connectors overview](https://assets.timescale.com/docs/images/tiger-on-azure/tiger-console-connector-overview.png) + +The {PG} connector leverages the well-established {PG} logical replication protocol. By relying on this protocol, +it ensures compatibility, familiarity, and a broader knowledge base—making it easier for you to adopt the connector +and integrate your data. + +You use the {PG} connector for data synchronization, rather than migration. This includes: + +* Copy existing data from a {PG} instance: + - Copy data at up to 150 GB/hr. + + You need at least a 4 CPU/16 GB source database, and a 4 CPU/16 GB target {SERVICE_SHORT}. + - Copy the publication tables in parallel. + + Large tables are still copied using a single connection. Parallel copying is in the backlog. + - Forget foreign key relationships. + + The connector disables foreign key validation during the sync. For example, if a `metrics` table refers to + the `id` column on the `tags` table, you can still sync only the `metrics` table without worrying about their + foreign key relationships. + - Track progress. + + {PG} exposes `COPY` progress under `pg_stat_progress_copy`. + +* Synchronize real-time changes from a {PG} instance. +* Add and remove tables on demand using the [{PG} PUBLICATION interface](https://www.postgresql.org/docs/current/sql-createpublication.html). +* Enable features such as hypertables, columnstore, and continuous aggregates on your logical replica. + + +This source Postgres connector is not yet supported for production use. If you have any questions or feedback, talk to us in [#livesync in the Tiger Community](https://app.slack.com/client/T4GT3N2JK/C086NU9EZ88). + + + + + + +## Prerequisites + + + +## Limitations + +* The source {PG} instance must be accessible from the Internet. + + Services hosted behind a firewall or VPC are not supported. This functionality is on the roadmap. + +* Indexes, including the primary key and unique constraints, are not migrated to the target. + + We recommend that, depending on your query patterns, you create only the necessary indexes on the target. + + + +## Set your connection string + +This variable holds the connection information for the source database. In the terminal on your migration machine, +set the following: + +```bash +export SOURCE="postgres://:@:/" +``` + + +Avoid using connection strings that route through connection poolers like PgBouncer or similar tools. This tool +requires a direct connection to the database to function properly. + + +## Tune your source database + + + + + +1. **Set the `rds.logical_replication` parameter to `1`** + + In the AWS console, navigate to your RDS instance parameter group and set `rds.logical_replication` to `1`. This enables logical replication on the RDS instance. + + After changing this parameter, restart your RDS instance for the changes to take effect. + +2. **Create a user for the connector and assign permissions** + + 1. Create ``: + + ```sql + psql $SOURCE -c "CREATE USER PASSWORD ''" + ``` + + You can use an existing user. However, you must ensure that the user has the following permissions. + + 2. Grant permissions to create a replication slot: + + ```sql + psql $SOURCE -c "ALTER ROLE REPLICATION" + ``` + + 3. Grant permissions to create a publication: + + ```sql + psql $SOURCE -c "GRANT CREATE ON DATABASE TO " + ``` + + 4. Assign the user permissions on the source database: + + ```sql + psql $SOURCE <; + GRANT SELECT ON ALL TABLES IN SCHEMA "public" TO ; + ALTER DEFAULT PRIVILEGES IN SCHEMA "public" GRANT SELECT ON TABLES TO ; + EOF + ``` + + If the tables you are syncing are not in the `public` schema, grant the user permissions for each schema you are syncing: + ```sql + psql $SOURCE < TO ; + GRANT SELECT ON ALL TABLES IN SCHEMA TO ; + ALTER DEFAULT PRIVILEGES IN SCHEMA GRANT SELECT ON TABLES TO ; + EOF + ``` + + 5. On each table you want to sync, make `` the owner: + + ```sql + psql $SOURCE -c 'ALTER TABLE
OWNER TO ;' + ``` + You can skip this step if the replicating user is already the owner of the tables. + +3. **Enable replication `DELETE` and `UPDATE` operations** + + For the connector to replicate `DELETE` and `UPDATE` operations, enable `REPLICA IDENTITY` on each table: + + ```sql + psql $SOURCE -c 'ALTER TABLE
REPLICA IDENTITY FULL;' + ``` + + + + + +1. **Tune the Write Ahead Log (WAL) on the {PG} source database** + + ```sql + psql $SOURCE <`: + + ```sql + psql $SOURCE -c "CREATE USER PASSWORD ''" + ``` + + You can use an existing user. However, you must ensure that the user has the following permissions. + + 2. Grant permissions to create a replication slot: + + ```sql + psql $SOURCE -c "ALTER ROLE REPLICATION" + ``` + + 3. Grant permissions to create a publication: + + ```sql + psql $SOURCE -c "GRANT CREATE ON DATABASE TO " + ``` + + 4. Assign the user permissions on the source database: + + ```sql + psql $SOURCE <; + GRANT SELECT ON ALL TABLES IN SCHEMA "public" TO ; + ALTER DEFAULT PRIVILEGES IN SCHEMA "public" GRANT SELECT ON TABLES TO ; + EOF + ``` + + If the tables you are syncing are not in the `public` schema, grant the user permissions for each schema you are syncing: + ```sql + psql $SOURCE < TO ; + GRANT SELECT ON ALL TABLES IN SCHEMA TO ; + ALTER DEFAULT PRIVILEGES IN SCHEMA GRANT SELECT ON TABLES TO ; + EOF + ``` + + 5. On each table you want to sync, make `` the owner: + + ```sql + psql $SOURCE -c 'ALTER TABLE
OWNER TO ;' + ``` + You can skip this step if the replicating user is already the owner of the tables. + +3. **Enable replication `DELETE` and `UPDATE` operations** + + For the connector to replicate `DELETE` and `UPDATE` operations, enable `REPLICA IDENTITY` on each table: + + ```sql + psql $SOURCE -c 'ALTER TABLE
REPLICA IDENTITY FULL;' + ``` + + + + + +## Synchronize data + +To sync data from your {PG} database using {CONSOLE}: + +1. **Connect to your {SERVICE_SHORT}** + + In [{CONSOLE}](https://console.cloud.timescale.com/dashboard/services), select the {SERVICE_SHORT} to sync live data to. + +2. **Connect the source database and the target {SERVICE_SHORT}** + + ![Postgres connector wizard](https://assets.timescale.com/docs/images/tiger-on-azure/pg-connector-wizard-tiger-console.png) + + 1. Click `Connectors` > `PostgreSQL`. + 2. Set the name for the new connector by clicking the pencil icon. + 3. Check the boxes for `Set wal_level to logical` and `Update your credentials`, then click `Continue`. + 4. Enter your database credentials or a {PG} connection string, then click `Connect to database`. + This is the connection string for ``. The console connects to the source database and retrieves the schema information. + +3. **Optimize the data to synchronize in hypertables** + + ![Postgres connector start](https://assets.timescale.com/docs/images/tiger-on-azure/pg-connector-start-tiger-console.png) + + 1. In the `Select table` dropdown, select the tables to sync. + 2. Click `Select tables +`. + + The console checks the table schema and, if possible, suggests the column to use as the time dimension in a {HYPERTABLE}. + 3. Click `Create Connector`. + + The console starts the connector between the source database and the target {SERVICE_SHORT} and displays the progress. + +4. **Monitor synchronization** + + ![Connectors overview](https://assets.timescale.com/docs/images/tiger-on-azure/tiger-console-connector-overview.png) + + 1. To view the amount of data replicated, click `Connectors`. The diagram in `Connector data flow` gives you an overview of the connectors you have created, their status, and how much data has been replicated. + + 2. To review the syncing progress for each table, click `Connectors` > `Source connectors`, then select the name of your connector in the table. + +5. **Manage the connector** + + ![Edit a Postgres connector](https://assets.timescale.com/docs/images/tiger-on-azure/edit-pg-connector-tiger-console.png) + + 1. To edit the connector, click `Connectors` > `Source connectors`, then select the name of your connector in the table. You can rename the connector, delete or add new tables for syncing. + + 2. To pause a connector, click `Connectors` > `Source connectors`, then open the three-dot menu on the right and select `Pause`. + + 3. To delete a connector, click `Connectors` > `Source connectors`, then open the three-dot menu on the right and select `Delete`. You must pause the connector before deleting it. + +And that is it, you are using the connector to synchronize all the data, or specific tables, from a {PG} database +instance in real time. + + + + + +## Prerequisites + + + +## Limitations + +- The schema is not migrated by the connector, you use `pg_dump`/`pg_restore` to migrate it. + + + +## Set your connection strings + +The `` in the `SOURCE` connection must have the replication role granted in order to create a replication slot. + +These variables hold the connection information for the source database and target. In Terminal on your migration machine, set the following: + +```bash +export SOURCE="postgres://:@:/" +export TARGET="postgres://tsdbadmin:@:/tsdb?sslmode=require" +``` + +You find the connection information in the configuration file you +downloaded when you created the service. + + +Avoid using connection strings that route through connection poolers like PgBouncer or similar tools. This tool requires a direct connection to the database to function properly. + + +## Tune your source database + +Follow the same tuning steps from the Console tab above. + +## Migrate the table schema + +Use `pg_dump` to: + +1. **Download the schema from the source database** + + ```bash + pg_dump $SOURCE \ + --no-privileges \ + --no-owner \ + --no-publications \ + --no-subscriptions \ + --no-table-access-method \ + --no-tablespaces \ + --schema-only \ + --file=schema.sql + ``` + +2. **Apply the schema on the target {SERVICE_SHORT}** + ```bash + psql $TARGET -f schema.sql + ``` + +## Convert partitions and tables with time-series data into hypertables + +For efficient querying and analysis, you can convert tables which contain time-series or +events data, and tables that are already partitioned using {PG} declarative partition into +hypertables. + +1. **Convert tables to hypertables** + + Run the following on each table in the target to convert it to a hypertable: + + ```bash + psql -X -d $TARGET -c "SELECT public.create_hypertable('
', by_range('', ''::interval));" + ``` + + For example, to convert the *metrics* table into a hypertable with *time* as a partition column and + *1 day* as a partition interval: + + ```bash + psql -X -d $TARGET -c "SELECT public.create_hypertable('public.metrics', by_range('time', '1 day'::interval));" + ``` + +2. **Convert {PG} partitions to hypertables** + + Rename the partition and create a new regular table with the same name as the partitioned table, then + convert to a hypertable: + + ```bash + psql $TARGET -f - <<'EOF' + BEGIN; + ALTER TABLE public.events RENAME TO events_part; + CREATE TABLE public.events(LIKE public.events_part INCLUDING ALL); + SELECT create_hypertable('public.events', by_range('time', '1 day'::interval)); + COMMIT; +EOF + ``` + +## Specify the tables to synchronize + +After the schema is migrated, you [`CREATE PUBLICATION`](https://www.postgresql.org/docs/current/sql-createpublication.html) on the source database that +specifies the tables to synchronize. + +1. **Create a publication that specifies the table to synchronize** + + A `PUBLICATION` enables you to synchronize some or all the tables in the schema or database. + + ```sql + CREATE PUBLICATION FOR TABLE , ; + ``` + + To add tables after to an existing publication, use [ALTER PUBLICATION](https://www.postgresql.org/docs/current/sql-alterpublication.html) + + ```sql + ALTER PUBLICATION ADD TABLE ; + ``` + +2. **Publish the {PG} declarative partitioned table** + + ```sql + ALTER PUBLICATION SET(publish_via_partition_root=true); + ``` + +3. **Stop syncing a table in the `PUBLICATION`, use `DROP TABLE`** + + ```sql + ALTER PUBLICATION DROP TABLE ; + ``` + +## Synchronize data + +You use the connector docker image to synchronize changes in real time from a {PG} database +instance: + +1. **Start the connector** + + As you run the connector continuously, best practice is to run it as a Docker daemon. + + ```bash + docker run -d --rm --name livesync timescale/live-sync:v0.1.25 run \ + --publication --subscription \ + --source $SOURCE --target $TARGET --table-map + ``` + + `--publication`: The name of the publication as you created in the previous step. To use multiple publications, repeat the `--publication` flag. + + `--subscription`: The name that identifies the subscription on the target. + + `--source`: The connection string to the source {PG} database. + + `--target`: The connection string to the target. + + `--table-map`: (Optional) A JSON string that maps source tables to target tables. If not provided, the source and target table names are assumed to be the same. + For example, to map the source table `metrics` to the target table `metrics_data`: + + ``` + --table-map '{"source": {"schema": "public", "table": "metrics"}, "target": {"schema": "public", "table": "metrics_data"}}' + ``` + To map only the schema, use: + + ``` + --table-map '{"source": {"schema": "public"}, "target": {"schema": "analytics"}}' + ``` + This flag can be repeated for multiple table mappings. + +2. **Capture logs** + + Once the connector is running as a docker daemon, you can also capture the logs: + ```bash + docker logs -f livesync + ``` + +3. **View the progress of tables being synchronized** + + List the tables being synchronized by the connector using the `_ts_live_sync.subscription_rel` table in the target: + + ```bash + psql $TARGET -c "SELECT * FROM _ts_live_sync.subscription_rel" + ``` + + The `state` column indicates the current state of the table synchronization. + Possible values for `state` are: + + | state | description | + |-------|-------------| + | d | initial table data sync | + | f | initial table data sync completed | + | s | catching up with the latest changes | + | r | table is ready, syncing live changes | + + To see the replication lag, run the following against the SOURCE database: + + ```bash + psql $SOURCE -f - <<'EOF' + SELECT + slot_name, + pg_size_pretty(pg_current_wal_flush_lsn() - confirmed_flush_lsn) AS lag + FROM pg_replication_slots + WHERE slot_name LIKE 'live_sync_%' AND slot_type = 'logical' +EOF + ``` + +4. **Add or remove tables from the publication** + + To add tables, use [ALTER PUBLICATION .. ADD TABLE](https://www.postgresql.org/docs/current/sql-alterpublication.html) + + ```sql + ALTER PUBLICATION ADD TABLE ; + ``` + + To remove tables, use [ALTER PUBLICATION .. DROP TABLE](https://www.postgresql.org/docs/current/sql-alterpublication.html) + + ```sql + ALTER PUBLICATION DROP TABLE ; + ``` + +5. **Update table statistics** + + If you have a large table, you can run `ANALYZE` on the target + to update the table statistics after the initial sync is complete. + + This helps the query planner make better decisions for query execution plans. + + ```bash + vacuumdb --analyze --verbose --dbname=$TARGET + ``` + +6. **Stop the connector** + + ```bash + docker stop live-sync + ``` + +7. **(Optional) Reset sequence nextval on the target** + + The connector does not automatically reset the sequence nextval on the target. + + Run the following script to reset the sequence for all tables that have a + serial or identity column in the target: + + ```bash + psql $TARGET -f - <<'EOF' + DO $$ + DECLARE + rec RECORD; + BEGIN + FOR rec IN ( + SELECT + sr.target_schema AS table_schema, + sr.target_table AS table_name, + col.column_name, + pg_get_serial_sequence( + sr.target_schema || '.' || sr.target_table, + col.column_name + ) AS seqname + FROM _ts_live_sync.subscription_rel AS sr + JOIN information_schema.columns AS col + ON col.table_schema = sr.target_schema + AND col.table_name = sr.target_table + WHERE col.column_default LIKE 'nextval(%' -- only serial/identity columns + ) LOOP + EXECUTE format( + 'SELECT setval(%L, + COALESCE((SELECT MAX(%I) FROM %I.%I), 0) + 1, + false + );', + rec.seqname, -- the sequence identifier + rec.column_name, -- the column to MAX() + rec.table_schema, -- schema for MAX() + rec.table_name -- table for MAX() + ); + END LOOP; + END; + $$ LANGUAGE plpgsql; +EOF + ``` + +8. **Clean up** + + Use the `--drop` flag to remove the replication slots created by the connector on the source database. + + ```bash + docker run -it --rm --name livesync timescale/live-sync:v0.1.25 run \ + --publication --subscription \ + --source $SOURCE --target $TARGET \ + --drop + ``` + + + + \ No newline at end of file diff --git a/integrations/connectors/source/sync-from-s3.mdx b/integrations/connectors/source/sync-from-s3.mdx index b136146..a991ba1 100644 --- a/integrations/connectors/source/sync-from-s3.mdx +++ b/integrations/connectors/source/sync-from-s3.mdx @@ -1,8 +1,160 @@ --- -title: Connect data in S3 to your service -description: Simulate and analyze a transport dataset in your Tiger Cloud service -products: [cloud, mst, self_hosted] -keywords: [IoT, simulate] +title: Sync from S3 +description: Sync data from S3 to your Tiger Cloud service in real time --- -Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. +import { S3_CONNECTOR, HYPERTABLE, CONSOLE, SERVICE_SHORT, PROJECT_SHORT } from '/snippets/vars.mdx'; +import IntegrationPrereqsCloud from '/snippets/integrations/_integration-prereqs-cloud-only.mdx'; +import NotSupportedAzure from '/snippets/changes/_not-supported-for-azure.mdx'; + + Early access + +You use the {S3_CONNECTOR} to synchronize CSV and Parquet files from an S3 bucket in real time. The connector runs continuously, enabling you to leverage real-time analytics capabilities with data constantly synced from S3. This lets you take full advantage of real-time analytics capabilities without having to develop or manage custom ETL solutions between S3 and your database. + +![Connectors overview](https://assets.timescale.com/docs/images/tiger-on-azure/tiger-console-connector-overview.png) + +You can use the {S3_CONNECTOR} to synchronize your existing and new data. Here's what the connector can do: + +* Sync data from an S3 bucket: + - Use glob patterns to identify the objects to sync. + - Watch an S3 bucket for new files and import them automatically. It runs on a configurable schedule and tracks processed files. + - **Important**: The connector processes files in [lexicographical order](https://en.wikipedia.org/wiki/Lexicographic_order). It uses the name of the last file processed as a marker and fetches only files later in the alphabet in subsequent queries. Files added with names earlier in the alphabet than the marker are skipped and never synced. For example, if you add the file Bob when the marker is at Elephant, Bob is never processed. + - For large backlogs, check every minute until caught up. + +* Sync data from multiple file formats: + - CSV: check for compression in GZ and ZIP format, then process using [timescaledb-parallel-copy](https://github.com/timescale/timescaledb-parallel-copy). + - Parquet: convert to CSV, then process using [timescaledb-parallel-copy](https://github.com/timescale/timescaledb-parallel-copy). + +* The {S3_CONNECTOR} offers an option to enable a {HYPERTABLE} during the file-to-table schema mapping setup. You can enable [columnstore](/use-timescale/compression/about-compression) and [continuous aggregates](/use-timescale/continuous-aggregates/about-continuous-aggregates) through the SQL editor once the connector has started running. + +* The connector offers a default 1-minute polling interval. This checks the S3 source every minute for new data. You can customize this interval by setting up a cron expression. + +The {S3_CONNECTOR} continuously imports data from an Amazon S3 bucket into your database. It monitors your S3 bucket for new files matching a specified pattern and automatically imports them into your designated database table. + + +The connector currently only syncs existing and new files—it does not support updating or deleting records based on updates and deletes from S3 to tables. + + + +This source S3 connector is not supported for production use. If you have any questions or feedback, talk to us in [#livesync in the Tiger Community](https://app.slack.com/client/T4GT3N2JK/C086NU9EZ88). + + +## Prerequisites + + + +- Ensure access to a standard Amazon S3 bucket containing your data files. + + Directory buckets are not supported. +- Configure access credentials for the S3 bucket. + The following credentials are supported: + - [IAM Role](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user.html#roles-creatingrole-user-console). + + - Configure the trust policy. Set the: + + - `Principal`: `arn:aws:iam::142548018081:role/timescale-s3-connections`. + - `ExternalID`: set to the [{PROJECT_SHORT} and {SERVICE_SHORT} ID](/integrations/find-connection-details#find-your-project-and-service-id) of the + {SERVICE_SHORT} you are syncing to in the format `/`. + + This is to avoid the [confused deputy problem](https://docs.aws.amazon.com/IAM/latest/UserGuide/confused-deputy.html). + - Give the following access permissions: + + - `s3:GetObject`. + - `s3:ListBucket`. + + - [Public anonymous user](https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html#example-bucket-policies-anonymous-user). + + + +## Limitations + +- **File naming**: + Files must follow lexicographical ordering conventions. Files with names that sort earlier than already-processed files are permanently skipped. Example: if `file_2024_01_15.csv` has been processed, a file named `file_2024_01_10.csv` added later will never be synced. + Recommended naming patterns: timestamps (for example, `YYYY-MM-DD-HHMMSS`), sequential numbers with fixed padding (for example, `file_00001`, `file_00002`). + +- **CSV**: + - Maximum file size: 1 GB + + To increase this limit, contact sales@tigerdata.com + - Maximum row size: 2 MB + - Supported compressed formats: + - GZ + - ZIP + - Advanced settings: + - Delimiter: the default character is `,`, you can choose a different delimiter + - Skip header: skip the first row if your file has headers +- **Parquet**: + - Maximum file size: 1 GB + - Maximum row size: 2 MB +- **Sync iteration**: + + To prevent system overload, the connector tracks up to 100 files for each sync iteration. Additional checks only fill + empty queue slots. + +## Synchronize data + +To sync data from your S3 bucket using {CONSOLE}: + +1. **Connect to your {SERVICE_SHORT}** + + In [{CONSOLE}](https://console.cloud.timescale.com/dashboard/services), select the {SERVICE_SHORT} to sync live data to. + +2. **Connect the source S3 bucket to the target {SERVICE_SHORT}** + + ![Connect to S3 bucket](https://assets.timescale.com/docs/images/tiger-on-azure/s3-connector-tiger-console.png) + + 1. Click `Connectors` > `Amazon S3`. + 2. Click the pencil icon, then set the name for the new connector. + 3. Set the `Bucket name` and `Authentication method`, then click `Continue`. + + For instruction on creating the IAM role to connect your S3 bucket, click `Learn how`. The console connects to the source bucket. + 4. In `Define files to sync`, choose the `File type` and set the `Glob pattern`. + + Use the following patterns: + - `/*`: match all files in a folder. Also, any pattern ending with `/` is treated as `/*`. + - `/**`: match all recursively. + - `/**/*.csv`: match a specific file type. + + The {S3_CONNECTOR} uses prefix filters where possible, place patterns carefully at the end of your glob expression. + AWS S3 doesn't support complex filtering. If your expression filters too many files, the list operation may time out. + + 5. Click the search icon. You see the files to sync. Click `Continue`. + +3. **Optimize the data to synchronize in hypertables** + + ![S3 connector table selection](https://assets.timescale.com/docs/images/tiger-cloud-console/tiger-console-s3-connector-create-tables.png) + + The console checks the file schema and, if possible, suggests the column to use as the time dimension in a + {HYPERTABLE}. + + 1. Choose `Create a new table for your data` or `Ingest data to an existing table`. + 2. Choose the `Data type` for each column, then click `Continue`. + 3. Choose the interval. This can be a minute, an hour, or use a [cron expression](https://en.wikipedia.org/wiki/Cron#Cron_expression). + 4. Click `Start Connector`. + + The console starts the connection between the source database and the target {SERVICE_SHORT} and displays the progress. + +4. **Monitor synchronization** + + 1. To view the amount of data replicated, click `Connectors`. The diagram in `Connector data flow` gives you an overview of the connectors you have created, their status, and how much data has been replicated. + + ![Connectors overview](https://assets.timescale.com/docs/images/tiger-on-azure/tiger-console-connector-overview.png) + + 2. To view file import statistics and logs, click `Connectors` > `Source connectors`, then select the name of your connector in the table. + + ![S3 connector stats](https://assets.timescale.com/docs/images/tiger-on-azure/tiger-console-s3-connector-import-details.png) + + +5. **Manage the connector** + + 1. To pause the connector, click `Connectors` > `Source connectors`. Open the three-dot menu next to your connector in the table, then click `Pause`. + + ![Edit S3 connector](https://assets.timescale.com/docs/images/tiger-on-azure/tiger-console-s3-connector-pause.png) + + 2. To edit the connector, click `Connectors` > `Source connectors`. Open the three-dot menu next to your connector in the table, then click `Edit` and scroll down to `Modify your Connector`. You must pause the connector before editing it. + + ![S3 connector change config](https://assets.timescale.com/docs/images/tiger-cloud-console/tiger-console-s3-connector-edit.png) + + 3. To pause or delete the connector, click `Connectors` > `Source connectors`, then open the three-dot menu on the right and select an option. You must pause the connector before deleting it. + +And that is it, you are using the {S3_CONNECTOR} to synchronize all the data, or specific files, from an S3 bucket in real time. \ No newline at end of file diff --git a/integrations/find-connection-details.mdx b/integrations/find-connection-details.mdx new file mode 100644 index 0000000..477c1b4 --- /dev/null +++ b/integrations/find-connection-details.mdx @@ -0,0 +1,91 @@ +--- +title: Find your connection details +description: You connect to Tiger Cloud, self-hosted TimescaleDB, or MST using your connection details +--- + +import { SERVICE_LONG, SELF_LONG, SELF_LONG_CAP, SERVICE_SHORT, CONSOLE, PROJECT_LONG, PROJECT_SHORT, PROJECT_SHORT_CAP, CLOUD_LONG, PG, MST_CONSOLE_LONG, MST_LONG, MST_SERVICE_SHORT } from '/snippets/vars.mdx'; + +To connect to {CLOUD_LONG}, {SELF_LONG}, or {MST_LONG}, you need at least the following: + +- Hostname +- Port +- Username +- Password +- Database name + +Find the connection details based on your deployment type: + + + + + +## Connect to your {SERVICE_SHORT} + +Retrieve the connection details for your {SERVICE_LONG}: + +- **In `-credentials.txt`**: + + All connection details are supplied in the configuration file you download when you create a new {SERVICE_SHORT}. + +- **In {CONSOLE}**: + + Open the [`Services`][console-services] page and select your {SERVICE_SHORT}. The connection details, except the password, are available in `Service info` > `Connection info` > `More details`. If necessary, click `Forgot your password?` to get a new one. + + ![{SERVICE_LONG} connection details](https://assets.timescale.com/docs/images/tiger-cloud-console/tiger-service-connection-details.png) + +## Find your {PROJECT_SHORT} and {SERVICE_SHORT} ID + +To retrieve the connection details for your {PROJECT_LONG} and {SERVICE_LONG}: + +1. **Retrieve your {PROJECT_SHORT} ID**: + + In [{CONSOLE}][console-services], click your {PROJECT_SHORT} name in the upper left corner, then click `Copy` next to the {PROJECT_SHORT} ID. + ![Retrive the {PROJECT_SHORT} id in {CONSOLE}](https://assets.timescale.com/docs/images/tiger-on-azure/tiger-console-project-id.png) + +2. **Retrieve your {SERVICE_SHORT} ID**: + + Click the dots next to the {SERVICE_SHORT}, then click `Copy` next to the {SERVICE_SHORT} ID. + ![Retrive the {SERVICE_SHORT} id in {CONSOLE}](https://assets.timescale.com/docs/images/tiger-on-azure/tiger-console-service-id.png) + +## Create client credentials + +You use client credentials to obtain access tokens outside of the user context. + +To retrieve the connection details for your {CLOUD_LONG} {PROJECT_SHORT} for programmatic usage +such as Terraform or the [{CLOUD_LONG} REST API][api-reference]: + +1. **Open the settings for your {PROJECT_SHORT}**: + + In [{CONSOLE}][console-services], click your {PROJECT_SHORT} name in the upper left corner, then click `{PROJECT_SHORT_CAP} settings`. + +2. **Create client credentials**: + + 1. Click `Create credentials`, then copy `Public key` and `Secret key` locally. + + ![Create client credentials in {CONSOLE}](https://assets.timescale.com/docs/images/tiger-cloud-console/tiger-cloud-console-client-credentials.png) + + This is the only time you see the `Secret key`. After this, only the `Public key` is visible in this page. + + 2. Click `Done`. + + + + + +Find the connection details in the [{PG} configuration file][pg-config-file] or by asking your database administrator. The `postgres` superuser, created during {PG} installation, has all the permissions required to run procedures in this documentation. However, it is recommended to create other users and assign permissions on the need-only basis. + + + + + +In the `Services` page of the {MST_CONSOLE_LONG}, click the {MST_SERVICE_SHORT} you want to connect to. You see the connection details: + +![MST connection details](https://assets.timescale.com/docs/images/mst-connection-info.png) + + + + + +[api-reference]: /api-reference +[console-services]: https://console.cloud.timescale.com/dashboard/services +[pg-config-file]: https://www.postgresql.org/docs/current/runtime-config-file-locations.html \ No newline at end of file diff --git a/integrations/index.mdx b/integrations/index.mdx deleted file mode 100644 index 1182f5d..0000000 --- a/integrations/index.mdx +++ /dev/null @@ -1,259 +0,0 @@ ---- -title: Integrations -description: Built on Postgres, Tiger Cloud can integrate with the same array of third-party solutions. See integration procedures for the most popular and requested third-party services -products: [cloud, self_hosted] -keywords: [integrations] -tags: [integrations] ---- - -import { CLOUD_LONG, COMPANY, PG, SERVICE_LONG, SERVICE_SHORT } from '/snippets/vars.mdx'; - -You can integrate your {SERVICE_LONG} with third-party solutions to expand and extend what you can do with your data. - -## Integrates with {PG}? Integrates with your {SERVICE_SHORT}! - -A {SERVICE_LONG} is a {PG} database instance extended by {COMPANY} with custom capabilities. This means that any third-party solution that you can integrate with {PG}, you can also integrate with {CLOUD_LONG}. See the full list of {PG} integrations [here][postgresql-integrations]. - - - -## Authentication and security - - -| Name | Description | -|:-----------------------------------------------------------------------------------------------------------------------------------:|---------------------------------------------------------------------------| -| auth-logo[Auth.js][auth-js] | Implement authentication and authorization for web applications. | -| auth0-logo[Auth0][auth0] | Securely manage user authentication and access controls for applications. | -| okta-logo[Okta][okta] | Secure authentication and user identity management for applications. | - -## Business intelligence and data visualization - -| Name | Description | -|:----------------------------------------------------------------------------------------------------------------------------------:|-------------------------------------------------------------------------| -| cubejs-logo[Cube.js][cube-js] | Build and optimize data APIs for analytics applications. | -| looker-logo[Looker][looker] | Explore, analyze, and share business insights with a BI platform. | -| metabase-logo[Metabase][metabase] | Create dashboards and visualize business data without SQL expertise. | -| power-bi-logo[Power BI][power-bi] | Visualize data, build interactive dashboards, and share insights. | -| superset-logo[Superset][superset] | Create and explore data visualizations and dashboards. | - -## Configuration and deployment - -| Name | Description | -|:----------------------------------:|--------------------------------------------------------------------------------| -| azure-functions-logo[Azure Functions][azure-functions] | Run event-driven serverless code in the cloud without managing infrastructure. | -| deno-deploy-logo[Deno Deploy][deno-deploy] | Deploy and run JavaScript and TypeScript applications at the edge. | -| flyway-logo[Flyway][flyway] | Manage and automate database migrations using version control. | -| liquibase-logo[Liquibase][liquibase] | Track, version, and automate database schema changes. | -| pulimi-logo[Pulumi][pulumi] | Define and manage cloud infrastructure using code in multiple languages. | -| render-logo[Render][render] | Deploy and scale web applications, databases, and services easily. | -| terraform-logo[Terraform][terraform] | Safely and predictably provision and manage infrastructure in any cloud. | -| kubernets-logo[Kubernetes][kubernetes] | Deploy, scale, and manage containerized applications automatically. | - - -## Data engineering and extract, transform, load - -| Name | Description | -|:------------------------------------:|------------------------------------------------------------------------------------------| -| airbyte-logo[Airbyte][airbyte] | Sync data between various sources and destinations. | -| amazon-sagemaker-logo[Amazon SageMaker][amazon-sagemaker] | Build, train, and deploy ML models into a production-ready hosted environment. | -| airflow-logo[Apache Airflow][apache-airflow] | Programmatically author, schedule, and monitor workflows. | -| beam-logo[Apache Beam][apache-beam] | Build and execute batch and streaming data pipelines across multiple processing engines. | -| kafka-logo[Apache Kafka][kafka] | Stream high-performance data pipelines, analytics, and data integration. | -| lambda-logo[AWS Lambda][aws-lambda] | Run code without provisioning or managing servers, scaling automatically as needed. | -| dbt-logo[dbt][dbt] | Transform and model data in your warehouse using SQL-based workflows. | -| debezium-logo[Debezium][debezium] | Capture and stream real-time changes from databases. | -| decodable-logo[Decodable][decodable] | Build, run, and manage data pipelines effortlessly. | -| delta-lake-logo[DeltaLake][deltalake] | Enhance data lakes with ACID transactions and schema enforcement. | -| firebase-logo[Firebase Wrapper][firebase-wrapper] | Simplify interactions with Firebase services through an abstraction layer. | -| stitch-logo[Stitch][stitch] | Extract, load, and transform data from various sources to data warehouses. | - -## Data ingestion and streaming - -| Name | Description | -|:----------------------------:|-----------------------------------------------------------------------------------------------------------------------------------| -| spark-logo[Apache Spark][apache-spark] | Process large-scale data workloads quickly using distributed computing. | -| confluent-logo[Confluent][confluent] | Manage and scale Apache Kafka-based event streaming applications. You can also [set up {PG} as a source][confluent-source]. | -| electric-sql-logo[ElectricSQL][electricsql] | Enable real-time synchronization between databases and frontend applications. | -| emqx-logo[EMQX][emqx] | Deploy an enterprise-grade MQTT broker for IoT messaging. | -| estuary-logo[Estuary][estuary] | Stream and synchronize data in real time between different systems. | -| flink-logo[Flink][flink] | Process real-time data streams with fault-tolerant distributed computing. | -| fivetran-logo[Fivetran][fivetran] | Sync data from multiple sources to your data warehouse. | -| red-panda-logo[Redpanda][redpanda] | Stream and process real-time data as a Kafka-compatible platform. | -| strimm-logo[Striim][striim] | Ingest, process, and analyze real-time data streams. | - -## Development tools - -| Name | Description | -|:---------------------------------------:|--------------------------------------------------------------------------------------| -| deepnote-logo[Deepnote][deepnote] | Collaborate on data science projects with a cloud-based notebook platform. | -| django-logo[Django][django] | Develop scalable and secure web applications using a Python framework. | -| long-chain-logo[LangChain][langchain] | Build applications that integrate with language models like GPT. | -| rust-logo[Rust][rust] | Build high-performance, memory-safe applications with a modern programming language. | -| streamlit-logo[Streamlit][streamlit] | Create interactive data applications and dashboards using Python. | - -## Language-specific integrations - -| Name | Description | -|:------------------:|---------------------------------------------------| -| golang-logo[Golang][golang] | Integrate {CLOUD_LONG} with a Golang application. | -| java-logo[Java][java] | Integrate {CLOUD_LONG} with a Java application. | -| node-logo[Node.js][node-js] | Integrate {CLOUD_LONG} with a Node.js application. | -| python-logo[Python][python] | Integrate {CLOUD_LONG} with a Python application. | -| ruby-logo[Ruby][ruby] | Integrate {CLOUD_LONG} with a Ruby application. | - -## Logging and system administration - -| Name | Description | -|:----------------------:|---------------------------------------------------------------------------| -| rsyslog-logo[RSyslog][rsyslog] | Collect, filter, and forward system logs for centralized logging. | -| schemaspy-logo[SchemaSpy][schemaspy] | Generate database schema documentation and visualization. | - -## Observability and alerting - -| Name | Description | -|:------------------------------------------------------:|-----------------------------------------------------------------------------------------------------------------------------------------------------------| -| cloudwatch-logo[Amazon Cloudwatch][cloudwatch] | Collect, analyze, and act on data from applications, infrastructure, and services running in AWS and on-premises environments. | -| skywalking-logo[Apache SkyWalking][apache-skywalking] | Monitor, trace, and diagnose distributed applications for improved observability. You can also [set up {PG} as storage][apache-skywalking-storage]. | -| azure-monitor-logo[Azure Monitor][azure-monitor] | Collect and analyze telemetry data from cloud and on-premises environments. | -| datadog-logo[Datadog][datadog] | Gain comprehensive visibility into applications, infrastructure, and systems through real-time monitoring, logging, and analytics. | -| grafana-logo[Grafana][grafana] | Query, visualize, alert on, and explore your metrics and logs. | -| instana-logo[IBM Instana][ibm-instana] | Monitor application performance and detect issues in real-time. | -| jaeger-logo[Jaeger][jaeger] | Trace and diagnose distributed transactions for observability. | -| new-relic-logo[New Relic][new-relic] | Monitor applications, infrastructure, and logs for performance insights. | -| open-telemetery-logo[OpenTelemetry Beta][opentelemetry] | Collect and analyze telemetry data for observability across systems. | -| prometheus-logo[Prometheus][prometheus] | Track the performance and health of systems, applications, and infrastructure. | -| signoz-logo[SigNoz][signoz] | Monitor application performance with an open-source observability tool. | -| tableau-logo[Tableau][tableau] | Connect to data sources, analyze data, and create interactive visualizations and dashboards. | - -## Query and administration - -| Name | Description | -|:--------------------------------------------------------------------------------------------------------------------------------------------:|-------------------------------------------------------------------------------------------------------------------------------------------| -| azure-data-studio-logo[Azure Data Studio][ads] | Query, manage, visualize, and develop databases across SQL Server, Azure SQL, and {PG}. | -| dbeaver-logo[DBeaver][dbeaver] | Connect to, manage, query, and analyze multiple database in a single interface with SQL editing, visualization, and administration tools. | -| forest-admin-logo[Forest Admin][forest-admin] | Create admin panels and dashboards for business applications. | -| hasura-logo[Hasura][hasura] | Instantly generate GraphQL APIs from databases with access control. | -| mode-logo[Mode Analytics][mode-analytics] | Analyze data, create reports, and share insights with teams. | -| neon-logo[Neon][neon] | Run a cloud-native, serverless {PG} database with automatic scaling. | -| pgadmin-logo[pgAdmin][pgadmin] | Manage, query, and administer {PG} databases through a graphical interface. | -| postgresql-logo[{PG}][postgresql] | Access and query data from external sources as if they were regular {PG} tables. | -| prisma-logo[Prisma][prisma] | Simplify database access with an open-source ORM for Node.js. | -| psql-logo[psql][psql] | Run SQL queries, manage databases, automate tasks, and interact directly with {PG}. | -| qlik-logo[Qlik Replicate][qlik-replicate] | Move and synchronize data across multiple database platforms. You an also [set up {PG} as a source][qlik-source]. | -| qstudio-logo[qStudio][qstudio] | Write and execute SQL queries, manage database objects, and analyze data in a user-friendly interface. | -| redash-logo[Redash][redash] | Query, visualize, and share data from multiple sources. | -| sqlalchemy-logo[SQLalchemy][sqlalchemy] | Manage database operations using a Python SQL toolkit and ORM. | -| sequelize-logo[Sequelize][sequelize] | Interact with SQL databases in Node.js using an ORM. | -| stepzen-logo[StepZen][stepzen] | Build and deploy GraphQL APIs with data from multiple sources. | -| typeorm-logo[TypeORM][typeorm] | Work with databases in TypeScript and JavaScript using an ORM. | - -## Secure connectivity to {CLOUD_LONG} - -| Name | Description | -|:------------------------------------:|-----------------------------------------------------------------------------| -| aws-logo[Amazon Web Services][aws] | Connect your other services and applications running in AWS to {CLOUD_LONG}. | -| corporate-data-center-logo[Corporate data center][data-center] | Connect your on-premise data center to {CLOUD_LONG}. -| google-cloud-logo[Google Cloud][google-cloud] | Connect your Google Cloud infrastructure to {CLOUD_LONG}. | -| azure-logo[Microsoft Azure][azure] | Connect your Microsoft Azure infrastructure to {CLOUD_LONG}. | - -## Workflow automation and no-code tools - -| Name | Description | -|:--------------------:|---------------------------------------------------------------------------| -| appsmith-logo[Appsmith][appsmith] | Create internal business applications with a low-code platform. | -| n8n-logo[n8n][n8n] | Automate workflows and integrate services with a no-code platform. | -| retool-logo[Retool][retool] | Build custom internal tools quickly using a drag-and-drop interface. | -| tooljet-logo[Tooljet][tooljet] | Develop internal tools and business applications with a low-code builder. | -| zapier-logo[Zapier][zapier] | Automate workflows by connecting different applications and services. | - -[ads]: /integrations/:currentVersion:/azure-data-studio/ -[airbyte]: https://docs.airbyte.com/integrations/sources/postgres -[amazon-sagemaker]: /integrations/:currentVersion:/amazon-sagemaker -[apache-airflow]: /integrations/:currentVersion:/apache-airflow -[apache-beam]: https://beam.apache.org/releases/javadoc/current/org/apache/beam/sdk/io/jdbc/JdbcIO.html -[apache-skywalking]: https://skywalking.apache.org/docs/main/next/en/setup/backend/backend-postgresql-monitoring/ -[apache-skywalking-storage]: https://skywalking.apache.org/docs/main/next/en/setup/backend/storages/postgresql/ -[apache-spark]: https://spark.apache.org/docs/3.5.4/sql-data-sources-jdbc.html -[appsmith]: https://docs.appsmith.com/connect-data/reference/querying-postgres -[auth-js]: https://authjs.dev/getting-started/adapters/pg?framework=next-js -[auth0]: https://auth0.com/blog/configuring-postgresql-as-auth0-custom-database/ -[aws]: /integrations/:currentVersion:/aws -[aws-lambda]: /integrations/:currentVersion:/aws-lambda -[azure]: /integrations/:currentVersion:/microsoft-azure -[azure-functions]: https://github.com/Azure/azure-functions-postgresql-extension -[azure-monitor]: https://learn.microsoft.com/en-us/azure/postgresql/flexible-server/concepts-monitoring -[cloudwatch]: /integrations/:currentVersion:/cloudwatch/ -[confluent]: https://docs.confluent.io/cloud/current/connectors/cc-postgresql-sink.html -[confluent-source]: https://docs.confluent.io/cloud/current/connectors/cc-postgresql-source.html -[cube-js]: https://cube.dev/integrations/Timescale-API -[data-center]: /integrations/:currentVersion:/corporate-data-center -[datadog]: /integrations/:currentVersion:/datadog/ -[dbt]: https://dbt-timescaledb.debruyn.dev/ -[dbeaver]: /integrations/:currentVersion:/dbeaver/ -[debezium]: /integrations/:currentVersion:/debezium/ -[decodable]: /integrations/:currentVersion:/decodable -[deepnote]: https://deepnote.com/docs/postgresql -[deltalake]: https://github.com/delta-io/delta/blob/master/connectors/sql-delta-import/readme.md -[deno-deploy]: https://docs.deno.com/deploy/manual/postgres/ -[django]: https://docs.djangoproject.com/en/5.1/ref/databases/#postgresql-notes -[electricsql]: https://electric-sql.com/docs/intro -[emqx]: https://docs.emqx.com/en/emqx/latest/data-integration/data-bridge-timescale.html -[estuary]: https://docs.estuary.dev/reference/Connectors/materialization-connectors/timescaledb/ -[firebase-wrapper]: https://firebase.google.com/products/data-connect -[fivetran]: /integrations/:currentVersion:/fivetran -[flink]: https://nightlies.apache.org/flink/flink-cdc-docs-release-3.1/docs/connectors/flink-sources/postgres-cdc/ -[flyway]: https://documentation.red-gate.com/flyway/reference/database-driver-reference/timescaledb -[forest-admin]: https://www.forestadmin.com/integrations/postgresql -[golang]: /getting-started/:currentVersion:/start-coding-with-timescale/ -[google-cloud]: /integrations/:currentVersion:/google-cloud -[grafana]: /integrations/:currentVersion:/grafana/ -[hasura]: https://hasura.io/docs/2.0/databases/postgres/timescale-cloud/ -[ibm-instana]: https://www.ibm.com/docs/en/instana-observability/current?topic=technologies-monitoring-postgresql -[jaeger]: https://www.jaegertracing.io/docs/2.0/storage/ -[java]: /getting-started/:currentVersion:/start-coding-with-timescale/ -[kafka]: /integrations/:currentVersion:/apache-kafka -[langchain]: https://api.python.langchain.com/en/latest/postgres/index.html# -[liquibase]: https://docs.liquibase.com/start/tutorials/postgresql/postgresql.html -[looker]: https://cloud.google.com/looker/docs/db-config-postgresql -[metabase]: https://www.metabase.com/data_sources/postgresql -[mode-analytics]: https://mode.com/integrations/postgresql/ -[n8n]: https://n8n.io/integrations/redis/and/timescaledb/ -[neon]: https://neon.com/docs/extensions/timescaledb -[new-relic]: https://docs.newrelic.com/docs/infrastructure/host-integrations/host-integrations-list/postgresql/postgresql-integration/ -[node-js]: /getting-started/:currentVersion:/start-coding-with-timescale/ -[okta]: https://help.okta.com/oag/en-us/content/topics/access-gateway/integrate-app-datastores.htm -[opentelemetry]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/postgresqlreceiver -[pgadmin]: /integrations/:currentVersion:/pgadmin/ -[postgresql]: /integrations/:currentVersion:/postgresql -[postgresql-integrations]: https://slashdot.org/software/p/PostgreSQL/integrations/ -[power-bi]: /integrations/:currentVersion:/power-bi -[prisma]: https://www.prisma.io/docs/orm/overview/databases/postgresql -[prometheus]: /integrations/:currentVersion:/prometheus -[kubernetes]: /integrations/:currentVersion:/kubernetes -[psql]: /integrations/:currentVersion:/psql/ -[pulumi]: https://www.pulumi.com/registry/packages/timescale/ -[python]: /getting-started/:currentVersion:/start-coding-with-timescale/ -[qlik-replicate]: https://help.qlik.com/en-US/replicate/November2024/Content/Replicate/Main/PostgreSQL/postgresql.htm#ar_postgresds_802412600_1325150 -[qlik-source]: https://help.qlik.com/en-US/replicate/November2024/Content/Replicate/Main/PostgreSQL/postgresql_source.htm -[qstudio]: /integrations/:currentVersion:/qstudio/ -[redash]: https://redash.io/data-sources/postgresql/ -[redpanda]: https://www.redpanda.com/blog/build-data-stream-detect-anomalies-timescale-kafka-connect -[render]: https://render.com/docs/postgresql -[retool]: https://retool.com/integrations/postgresql -[rsyslog]: https://www.rsyslog.com/doc/configuration/modules/ompgsql.html -[ruby]: /getting-started/:currentVersion:/start-coding-with-timescale/ -[rust]: https://github.com/sfackler/rust-postgres -[schemaspy]: https://wiki.postgresql.org/wiki/SchemaSpy -[signoz]: https://signoz.io/docs/integrations/postgresql/ -[sqlalchemy]: https://docs.sqlalchemy.org/en/20/dialects/postgresql.html -[sequelize]: https://sequelize.org/docs/v7/databases/postgres/ -[stepzen]: https://stepzen.com/docs/quick-start/with-database-postgresql -[stitch]: https://stitch-docs.netlify.app/docs/integrations/databases/postgresql -[streamlit]: https://docs.streamlit.io/develop/tutorials/databases/postgresql -[striim]: https://www.striim.com/connectors/postgresql/ -[superset]: https://superset.apache.org/docs/configuration/databases#timescaledb -[tableau]: /integrations/:currentVersion:/tableau/ -[terraform]: /integrations/:currentVersion:/terraform -[tooljet]: https://docs.tooljet.ai/docs/data-sources/postgresql/ -[typeorm]: https://typeorm.biunav.com/en/connection-options.html#postgres-cockroachdb-connection-options -[zapier]: https://zapier.com/apps/postgresql/integrations - diff --git a/integrations/integrate/amazon-sagemaker.mdx b/integrations/integrate/amazon-sagemaker.mdx index 98a4df6..73f9cfd 100644 --- a/integrations/integrate/amazon-sagemaker.mdx +++ b/integrations/integrate/amazon-sagemaker.mdx @@ -1,23 +1,22 @@ --- -title: Integrate Amazon Sagemaker with Tiger Cloud -sidebarTitle: Amazon Sagemaker +title: Integrate Amazon Sagemaker with Tiger +sidebarTitle: Amazon SageMaker description: Amazon SageMaker is a fully managed machine learning service. Integrate Amazon SageMaker with Tiger Cloud to store and analyze ML model data -products: [cloud, self_hosted] --- -import { CLOUD_LONG, CONSOLE, SERVICE_SHORT } from '/snippets/vars.mdx'; -import IntegrationPrereqCloud from "/snippets/prerequisites/_integration-prereqs-cloud-only.mdx"; -import OldCreateHypertable from "/snippets/changes/_old-api-create-hypertable.mdx"; +import { CLOUD_LONG, CONSOLE, PG, SERVICE_SHORT } from '/snippets/vars.mdx'; +import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; +import CreateHypertablePolicyNote from '/snippets/manage-data/_create-hypertable-columnstore-policy-note.mdx'; -[Amazon SageMaker AI][Amazon Sagemaker] is a fully managed machine learning (ML) service. With SageMaker AI, data -scientists and developers can quickly and confidently build, train, and deploy ML models into a production-ready +[Amazon SageMaker AI][amazon-sagemaker] is a fully managed machine learning (ML) service. With SageMaker AI, data +scientists and developers can quickly and confidently build, train, and deploy ML models into a production-ready hosted environment. This page shows you how to integrate Amazon Sagemaker with a {SERVICE_LONG}. ## Prerequisites - + * Set up an [AWS Account][aws-sign-up] @@ -25,13 +24,11 @@ This page shows you how to integrate Amazon Sagemaker with a {SERVICE_LONG}. Create a table in {SERVICE_LONG} to store model predictions generated by SageMaker. - - 1. **Connect to your {SERVICE_LONG}** - For {CLOUD_LONG}, open an [SQL editor][run-queries] in [{CONSOLE}][open-console]. For {SELF_LONG}, use [`psql`][psql]. + For {CLOUD_LONG}, open an [SQL editor][run-queries] in [{CONSOLE}][console]. For {SELF_LONG}, use [`psql`][psql]. -1. **For better performance and easier real-time analytics, create a hypertable** +2. **For better performance and easier real-time analytics, create a hypertable** [Hypertables][about-hypertables] are {PG} tables that automatically partition your data by time. You interact with hypertables in the same way as regular {PG} tables, but with extra features that makes managing your @@ -43,27 +40,22 @@ Create a table in {SERVICE_LONG} to store model predictions generated by SageMak model_name TEXT NOT NULL, prediction DOUBLE PRECISION NOT NULL ) WITH ( - tsdb.hypertable, - tsdb.partition_column='time' + tsdb.hypertable ); ``` - - - + ## Create the code to inject data into a {SERVICE_LONG} - - 1. **Create a SageMaker Notebook instance** 1. In [Amazon SageMaker > Notebooks and Git repos][aws-notebooks-git-repos], click `Create Notebook instance`. - 1. Follow the wizard to create a default Notebook instance. + 2. Follow the wizard to create a default Notebook instance. -1. **Write a Notebook script that inserts data into your {SERVICE_LONG}** +2. **Write a Notebook script that inserts data into your {SERVICE_LONG}** - 1. When your Notebook instance is `inService,` click `Open JupyterLab` and click `conda_python3`. - 1. Update the following script with your [connection details][connection-info], then paste it in the Notebook. + 1. When your Notebook instance is `inService,` click `Open JupyterLab` and click `conda_python3`. + 2. Update the following script with your [connection details][connection-info], then paste it in the Notebook. ```python import psycopg2 @@ -103,10 +95,10 @@ Create a table in {SERVICE_LONG} to store model predictions generated by SageMak ) ``` -1. **Test your SageMaker script** +3. **Test your SageMaker script** 1. Run the script in your SageMaker notebook. - 1. Verify that the data is in your {SERVICE_SHORT} + 2. Verify that the data is in your {SERVICE_SHORT} Open an [SQL editor][run-queries] and check the `sensor_data` table: @@ -119,29 +111,17 @@ Create a table in {SERVICE_LONG} to store model predictions generated by SageMak | -- | -- | -- | |2025-02-06 16:56:34.370316+00| timescale-cloud-model| 0.95| - - -Now you can seamlessly integrate Amazon SageMaker with {CLOUD_LONG} to store and analyze time-series data generated by -machine learning models. You can also untegrate visualization tools like [Grafana][grafana-integration] or +Now you can seamlessly integrate Amazon SageMaker with {CLOUD_LONG} to store and analyze time-series data generated by +machine learning models. You can also untegrate visualization tools like [Grafana][grafana-integration] or [Tableau][tableau-integration] with {CLOUD_LONG} to create real-time dashboards of your model predictions. - - - - - -[Amazon Sagemaker]: https://docs.aws.amazon.com/sagemaker/latest/dg/whatis.html +[about-hypertables]: /use-timescale/hypertables/ +[amazon-sagemaker]: https://docs.aws.amazon.com/sagemaker/latest/dg/whatis.html +[aws-notebooks-git-repos]: https://console.aws.amazon.com/sagemaker/home#/notebooks-and-git-repos [aws-sign-up]: https://signin.aws.amazon.com/signup?request_type=register -[install-aws-cli]: https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html -[install-python]: https://www.python.org/downloads/ -[install-postgresql]: https://www.postgresql.org/download/ -[console]: https://console.cloud.timescale.com/ -[grafana-integration]: /integrations/:currentVersion:/grafana/ -[tableau-integration]: /integrations/:currentVersion:/tableau/ -[run-queries]: /getting-started/:currentVersion:/run-queries-from-console/ -[open-console]: https://console.cloud.timescale.com/dashboard/services -[psql]: /integrations/:currentVersion:/psql/ -[about-hypertables]: /use-timescale/:currentVersion:/hypertables/ -[aws-notebooks-git-repos]:https://console.aws.amazon.com/sagemaker/home#/notebooks-and-git-repos -[secure-vpc-aws]: /use-timescale/:currentVersion:/vpc/ -[connection-info]: /integrations/:currentVersion:/find-connection-details/ +[connection-info]: /integrations/find-connection-details +[console]: https://console.cloud.timescale.com/dashboard/services +[grafana-integration]: /integrations/integrate/grafana +[psql]: /integrations/integrate/psql +[run-queries]: /getting-started/run-queries-from-console +[tableau-integration]: /integrations/integrate/tableau \ No newline at end of file diff --git a/integrations/integrate/apache-airflow.mdx b/integrations/integrate/apache-airflow.mdx index 117da7e..b1bc6bf 100644 --- a/integrations/integrate/apache-airflow.mdx +++ b/integrations/integrate/apache-airflow.mdx @@ -1,150 +1,4 @@ --- -title: Integrate Apache Airflow with Tiger Cloud -sidebarTitle: Apache Airflow -description: Apache Airflow is a platform to programmatically author, schedule, and monitor workflows. Integrate Apache Airflow with Tiger Cloud and create a data pipeline -products: [cloud, self_hosted] -keywords: [connect, integrate, apache, airflow] +title: Apache Airflow +description: TBD --- - -import { CLOUD_LONG, CONSOLE, PG } from '/snippets/vars.mdx'; -import IntegrationPrereqs from "/snippets/prerequisites/_integration-prereqs.mdx"; - -Apache Airflow® is a platform created by the community to programmatically author, schedule, and monitor workflows. - -A [DAG (Directed Acyclic Graph)][Airflow-DAG] is the core concept of Airflow, collecting [Tasks][Airflow-Task] together, -organized with dependencies and relationships to say how they should run. You declare a DAG in a Python file -in the `$AIRFLOW_HOME/dags` folder of your Airflow instance. - -This page shows you how to use a Python connector in a DAG to integrate Apache Airflow with a {SERVICE_LONG}. - -## Prerequisites - - - -* Install [Python3 and pip3][install-python-pip] -* Install [Apache Airflow][install-apache-airflow] - - Ensure that your Airflow instance has network access to {CLOUD_LONG}. - -This example DAG uses the `company` table you create in [Optimize time-series data in hypertables][create-a-table-in-timescale] - -## Install python connectivity libraries - -To install the Python libraries required to connect to {CLOUD_LONG}: - - - -1. **Enable {PG} connections between Airflow and {CLOUD_LONG}** - - ```bash - pip install psycopg2-binary - ``` - -1. **Enable {PG} connection types in the Airflow UI** - - ```bash - pip install apache-airflow-providers-postgres - ``` - - - -## Create a connection between Airflow and your {SERVICE_LONG} - -In your Airflow instance, securely connect to your {SERVICE_LONG}: - - - -1. **Run Airflow** - - On your development machine, run the following command: - - ```bash - airflow standalone - ``` - - The username and password for Airflow UI are displayed in the `standalone | Login with username` - line in the output. - -1. **Add a connection from Airflow to your {SERVICE_LONG}** - - 1. In your browser, navigate to `localhost:8080`, then select `Admin` > `Connections`. - 1. Click `+` (Add a new record), then use your [connection info][connection-info] to fill in - the form. The `Connection Type` is `Postgres`. - - - -## Exchange data between Airflow and your {SERVICE_LONG} - -To exchange data between Airflow and your {SERVICE_LONG}: - - - -1. **Create and execute a DAG** - - To insert data in your {SERVICE_LONG} from Airflow: - 1. In `$AIRFLOW_HOME/dags/timescale_dag.py`, add the following code: - - ```python - from airflow import DAG - from airflow.operators.python_operator import PythonOperator - from airflow.hooks.postgres_hook import PostgresHook - from datetime import datetime - - def insert_data_to_timescale(): - hook = PostgresHook(postgres_conn_id='the ID of the connenction you created') - conn = hook.get_conn() - cursor = conn.cursor() - """ - This could be any query. This example inserts data into the table - you create in: - - https://docs.tigerdata.com/getting-started/latest/try-key-features-timescale-products/#optimize-time-series-data-in-hypertables - """ - cursor.execute("INSERT INTO crypto_assets (symbol, name) VALUES (%s, %s)", - ('NEW/Asset','New Asset Name')) - conn.commit() - cursor.close() - conn.close() - - default_args = { - 'owner': 'airflow', - 'start_date': datetime(2023, 1, 1), - 'retries': 1, - } - - dag = DAG('timescale_dag', default_args=default_args, schedule_interval='@daily') - - insert_task = PythonOperator( - task_id='insert_data', - python_callable=insert_data_to_timescale, - dag=dag, - ) - ``` - This DAG uses the `company` table created in [Create regular {PG} tables for relational data][create-a-table-in-timescale]. - - 1. In your browser, refresh the Airflow UI. - 1. In `Search DAGS`, type `timescale_dag` and press ENTER. - 1. Press the play icon and trigger the DAG: - ![daily eth volume of assets](https://assets.timescale.com/docs/images/integrations-apache-airflow.png) -1. **Verify that the data appears in {CLOUD_LONG}** - - 1. In [{CONSOLE}][console], navigate to your service and click `SQL editor`. - 1. Run a query to view your data. For example: `SELECT symbol, name FROM company;`. - - You see the new rows inserted in the table. - - - -You have successfully integrated Apache Airflow with {CLOUD_LONG} and created a data pipeline. - - -[create-a-table-in-timescale]: /getting-started/:currentVersion:/try-key-features-timescale-products/#optimize-time-series-data-in-hypertables -[install-apache-airflow]: https://airflow.apache.org/docs/apache-airflow/stable/start.html -[install-python-pip]: https://docs.python.org/3/using/index.html -[console]: https://console.cloud.timescale.com/ -[create-service]: /cloud/get-started/create-services -[enable-timescaledb]: /self-hosted/:currentVersion:/install/ -[connection-info]: /integrations/:currentVersion:/find-connection-details/ -[Airflow-DAG]: https://airflow.apache.org/docs/apache-airflow/stable/core-concepts/dags.html#dags -[Airflow-Task]:https://airflow.apache.org/docs/apache-airflow/stable/core-concepts/tasks.html -[Airflow_UI]: localhost:8080 diff --git a/integrations/integrate/apache-kafka.mdx b/integrations/integrate/apache-kafka.mdx index 8d98595..748043f 100644 --- a/integrations/integrate/apache-kafka.mdx +++ b/integrations/integrate/apache-kafka.mdx @@ -1,195 +1,4 @@ --- -title: Integrate Apache Kafka with Tiger Cloud -sidebarTitle: Apache Kafka -description: Apache Kafka is a distributed event streaming platform used for high-performance data pipelines. Learn how to integrate Apache Kafka with Tiger Cloud to manage and analyze streaming data -products: [cloud, self_hosted] -keywords: [Apache Kafka, integrations] +title: Apache Kafka +description: TBD --- - -import { CLOUD_LONG } from '/snippets/vars.mdx'; -import IntegrationPrereqs from "/snippets/prerequisites/_integration-prereqs.mdx"; -import IntegrationApacheKafka from "/snippets/integrations/_integration-apache-kafka-install.mdx"; -import OldCreateHypertable from "/snippets/changes/_old-api-create-hypertable.mdx"; - -[Apache Kafka][apache-kafka] is a distributed event streaming platform used for high-performance data pipelines, -streaming analytics, and data integration. [Apache Kafka Connect][kafka-connect] is a tool to scalably and reliably -stream data between Apache Kafka® and other data systems. Kafka Connect is an ecosystem of pre-written and maintained -Kafka Producers (source connectors) and Kafka Consumers (sink connectors) for data products and platforms like -databases and message brokers. - -This guide explains how to set up Kafka and Kafka Connect to stream data from a Kafka topic into your {SERVICE_LONG}. - -## Prerequisites - - - -- [Java8 or higher][java-installers] to run Apache Kafka - -## Install and configure Apache Kafka - -To install and configure Apache Kafka: - - - - - - - -Keep these terminals open, you use them to test the integration later. - -## Install the sink connector to communicate with {CLOUD_LONG} - -To set up Kafka Connect server, plugins, drivers, and connectors: - - - -1. **Install the {PG} connector** - - In another Terminal window, navigate to ``, then download and configure the {PG} sink and driver. - ```bash - mkdir -p "plugins/camel-postgresql-sink-kafka-connector" - curl https://repo.maven.apache.org/maven2/org/apache/camel/kafkaconnector/camel-postgresql-sink-kafka-connector/3.21.0/camel-postgresql-sink-kafka-connector-3.21.0-package.tar.gz \ - | tar -xzf - -C "plugins/camel-postgresql-sink-kafka-connector" --strip-components=1 - curl -H "Accept: application/zip" https://jdbc.postgresql.org/download/postgresql-42.7.5.jar -o "plugins/camel-postgresql-sink-kafka-connector/postgresql-42.7.5.jar" - echo "plugin.path=`pwd`/plugins/camel-postgresql-sink-kafka-connector" >> "config/connect-distributed.properties" - echo "plugin.path=`pwd`/plugins/camel-postgresql-sink-kafka-connector" >> "config/connect-standalone.properties" - ``` - -1. **Start Kafka Connect** - - ```bash - export CLASSPATH=`pwd`/plugins/camel-postgresql-sink-kafka-connector/* - ./bin/connect-standalone.sh config/connect-standalone.properties - ``` - - Use the `-daemon` flag to run this process in the background. - -1. **Verify Kafka Connect is running** - - In yet another another Terminal window, run the following command: - ```bash - curl http://localhost:8083 - ``` - You see something like: - ```bash - {"version":"3.9.0","commit":"a60e31147e6b01ee","kafka_cluster_id":"J-iy4IGXTbmiALHwPZEZ-A"} - ``` - - - -## Create a table in your {SERVICE_LONG} to ingest Kafka events - -To prepare your {SERVICE_LONG} for Kafka integration: - - - -1. **[Connect][connect] to your {SERVICE_LONG}** - -1. **Create a hypertable to ingest Kafka events** - - ```sql - CREATE TABLE accounts ( - created_at TIMESTAMPTZ DEFAULT NOW(), - name TEXT, - city TEXT - ) WITH ( - tsdb.hypertable, - tsdb.partition_column='created_at' - ); - ``` - - - - -## Create the {CLOUD_LONG} sink - -To create a {CLOUD_LONG} sink in Apache Kafka: - - - - -1. **Create the connection configuration** - - 1. In the terminal running Kafka Connect, stop the process by pressing `Ctrl+C`. - - 1. Write the following configuration to `/config/timescale-standalone-sink.properties`, then update the `` with your [connection details][connection-info]. - - ```properties - name=timescale-standalone-sink - connector.class=org.apache.camel.kafkaconnector.postgresqlsink.CamelPostgresqlsinkSinkConnector - errors.tolerance=all - errors.deadletterqueue.topic.name=deadletter - tasks.max=10 - value.converter=org.apache.kafka.connect.storage.StringConverter - key.converter=org.apache.kafka.connect.storage.StringConverter - topics=accounts - camel.kamelet.postgresql-sink.databaseName= - camel.kamelet.postgresql-sink.username= - camel.kamelet.postgresql-sink.password= - camel.kamelet.postgresql-sink.serverName= - camel.kamelet.postgresql-sink.serverPort= - camel.kamelet.postgresql-sink.query=INSERT INTO accounts (name,city) VALUES (:#name,:#city) - ``` - 1. Restart Kafka Connect with the new configuration: - ```bash - export CLASSPATH=`pwd`/plugins/camel-postgresql-sink-kafka-connector/* - ./bin/connect-standalone.sh config/connect-standalone.properties config/timescale-standalone-sink.properties - ``` - -1. **Test the connection** - - To see your sink, query the `/connectors` route in a GET request: - - ```bash - curl -X GET http://localhost:8083/connectors - ``` - You see: - - ```bash - #["timescale-standalone-sink"] - ``` - - - -## Test the integration with {CLOUD_LONG} - -To test this integration, send some messages onto the `accounts` topic. You can do this using the kafkacat or kcat utility. - - - -1. **In the terminal running `kafka-console-producer.sh` enter the following json strings** - - ```bash - {"name":"Lola","city":"Copacabana"} - {"name":"Holly","city":"Miami"} - {"name":"Jolene","city":"Tennessee"} - {"name":"Barbara Ann ","city":"California"} - ``` - Look in your terminal running `kafka-console-consumer` to see the messages being processed. - -1. **Query your {SERVICE_LONG} for all rows in the `accounts` table** - - ```sql - SELECT * FROM accounts; - ``` - You see something like: - - | created_at | name | city | - | -- | --| -- | - |2025-02-18 13:55:05.147261+00 | Lola | Copacabana | - |2025-02-18 13:55:05.216673+00 | Holly | Miami | - |2025-02-18 13:55:05.283549+00 | Jolene | Tennessee | - |2025-02-18 13:55:05.35226+00 | Barbara Ann | California | - - - -You have successfully integrated Apache Kafka with {CLOUD_LONG}. - -[connection-info]: /integrations/:currentVersion:/find-connection-details/ -[apache-kafka]: https://kafka.apache.org/documentation/ -[install-kafka]: https://kafka.apache.org/quickstart -[java-installers]: https://www.oracle.com/java/technologies/downloads/ -[kafka-connect]: https://docs.confluent.io/platform/current/connect/index.html -[kraft]: https://developer.confluent.io/learn/kraft/ -[connect]: /getting-started/:currentVersion:/run-queries-from-console/ -[kcat]: https://github.com/edenhill/kcat diff --git a/integrations/integrate/aws-lambda.mdx b/integrations/integrate/aws-lambda.mdx index 9e425fb..5555027 100644 --- a/integrations/integrate/aws-lambda.mdx +++ b/integrations/integrate/aws-lambda.mdx @@ -1,209 +1,4 @@ --- -title: Integrate AWS Lambda with Tiger Cloud -sidebarTitle: AWS Lambda -description: With AWS Lambda, you can run code without provisioning or managing servers, and scale automatically. Integrate AWS Lambda with Tiger Cloud and inject data into your service -products: [cloud, self_hosted] -keywords: [connect, integrate, aws, lambda] +title: AWS Lambda +description: TBD --- - -import { CLOUD_LONG, CONSOLE, SERVICE_SHORT } from '/snippets/vars.mdx'; -import IntegrationPrereqs from "/snippets/prerequisites/_integration-prereqs.mdx"; -import OldCreateHypertable from "/snippets/changes/_old-api-create-hypertable.mdx"; - -[AWS Lambda][AWS-Lambda] is a serverless computing service provided by Amazon Web Services (AWS) that allows you to run -code without provisioning or managing servers, scaling automatically as needed. - -This page shows you how to integrate AWS Lambda with {SERVICE_LONG} to process and store time-series data efficiently. - -## Prerequisites - - - -* Set up an [AWS Account][aws-sign-up]. -* Install and configure [AWS CLI][install-aws-cli]. -* Install [NodeJS v18.x or later][install-nodejs]. - - -## Prepare your {SERVICE_LONG} to ingest data from AWS Lambda - -Create a table in {SERVICE_LONG} to store time-series data. - - - -1. **Connect to your {SERVICE_LONG}** - - For {CLOUD_LONG}, open an [SQL editor][run-queries] in [{CONSOLE}][open-console]. For {SELF_LONG}, use [`psql`][psql]. - -1. **Create a hypertable to store sensor data** - - [Hypertables][about-hypertables] are {PG} tables that automatically partition your data by time. You interact - with hypertables in the same way as regular {PG} tables, but with extra features that make managing your - time-series data much easier. - - ```sql - CREATE TABLE sensor_data ( - time TIMESTAMPTZ NOT NULL, - sensor_id TEXT NOT NULL, - value DOUBLE PRECISION NOT NULL - ) WITH ( - tsdb.hypertable, - tsdb.partition_column='time' - ); - ``` - - - - -## Create the code to inject data into a {SERVICE_LONG} - -Write an AWS Lambda function in a Node.js project that processes and inserts time-series data into a {SERVICE_LONG}. - - - -1. **Initialize a new Node.js project to hold your Lambda function** - - ```shell - mkdir lambda-timescale && cd lambda-timescale - npm init -y - ``` - -1. **Install the {PG} client library in your project** - - ```shell - npm install pg - ``` - -1. **Write a Lambda Function that inserts data into your {SERVICE_LONG}** - - Create a file named `index.js`, then add the following code: - - ```javascript - const { - Client - } = require('pg'); - - exports.handler = async (event) => { - const client = new Client({ - host: process.env.TIMESCALE_HOST, - port: process.env.TIMESCALE_PORT, - user: process.env.TIMESCALE_USER, - password: process.env.TIMESCALE_PASSWORD, - database: process.env.TIMESCALE_DB, - }); - - try { - await client.connect(); - // - const query = ` - INSERT INTO sensor_data (time, sensor_id, value) - VALUES ($1, $2, $3); - `; - - const data = JSON.parse(event.body); - const values = [new Date(), data.sensor_id, data.value]; - - await client.query(query, values); - - return { - statusCode: 200, - body: JSON.stringify({ - message: 'Data inserted successfully!' - }), - }; - } catch (error) { - console.error('Error inserting data:', error); - return { - statusCode: 500, - body: JSON.stringify({ - error: 'Failed to insert data.' - }), - }; - } finally { - await client.end(); - } - - }; - ``` - - - -## Deploy your Node project to AWS Lambda - -To create an AWS Lambda function that injects data into your {SERVICE_LONG}: - - - -1. **Compress your code into a `.zip`** - - ```shell - zip -r lambda-timescale.zip . - ``` - -1. **Deploy to AWS Lambda** - - In the following example, replace `` with your [AWS IAM credentials][aws-iam-role], then use - AWS CLI to create a Lambda function for your project: - - ```shell - aws lambda create-function \ - --function-name TimescaleIntegration \ - --runtime nodejs14.x \ - --role \ - --handler index.handler \ - --zip-file fileb://lambda-timescale.zip - ``` - -1. **Set up environment variables** - - In the following example, use your [connection details][connection-info] to add your {SERVICE_LONG} connection settings to your Lambda function: - ```shell - aws lambda update-function-configuration \ - --function-name TimescaleIntegration \ - --environment "Variables={TIMESCALE_HOST=,TIMESCALE_PORT=, \ - TIMESCALE_USER=,TIMESCALE_PASSWORD=, \ - TIMESCALE_DB=}" - ``` - -1. **Test your AWS Lambda function** - - 1. Invoke the Lambda function and send some data to your {SERVICE_LONG}: - - ```shell - aws lambda invoke \ - --function-name TimescaleIntegration \ - --payload '{"body": "{\"sensor_id\": \"sensor-123\", \"value\": 42.5}"}' \ - --cli-binary-format raw-in-base64-out \ - response.json - ``` - - 1. Verify that the data is in your {SERVICE_SHORT}. - - Open an [SQL editor][run-queries] and check the `sensor_data` table: - - ```sql - SELECT * FROM sensor_data; - ``` - You see something like: - - | time | sensor_id | value | - |-- |-- |--------| - | 2025-02-10 10:58:45.134912+00 | sensor-123 | 42.5 | - - - - -You can now seamlessly ingest time-series data from AWS Lambda into {CLOUD_LONG}. - -[AWS-Lambda]: https://docs.aws.amazon.com/lambda/latest/dg/welcome.html -[lambda-functions]: https://console.aws.amazon.com/lambda/home#/functions -[aws-sign-up]: https://signin.aws.amazon.com/signup?request_type=register -[install-aws-cli]: https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html -[install-nodejs]: https://nodejs.org/en/download -[install-postgresql]: https://www.postgresql.org/download/ -[console]: https://console.cloud.timescale.com/ -[run-queries]: /getting-started/:currentVersion:/run-queries-from-console/ -[psql]: /integrations/:currentVersion:/psql/ -[about-hypertables]: /use-timescale/:currentVersion:/hypertables/ -[aws-iam-role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access-keys-admin-managed.html#admin-list-access-key -[open-console]: https://console.cloud.timescale.com/dashboard/services -[connection-info]: /integrations/:currentVersion:/find-connection-details/ diff --git a/integrations/integrate/aws.mdx b/integrations/integrate/aws.mdx index e520c9a..e8e3618 100644 --- a/integrations/integrate/aws.mdx +++ b/integrations/integrate/aws.mdx @@ -1,37 +1,4 @@ --- -title: Integrate Amazon Web Services with Tiger Cloud -sidebarTitle: Amazon Web Services -description: AWS enables you to build, run, and manage applications across cloud, hybrid, and edge environments with AI, analytics, security, and scalable infrastructure. Integrate AWS with Tiger Cloud using AWS Transit Gateway -products: [cloud] -price_plans: [scale, enterprise] -keywords: [AWS, integrations] +title: AWS +description: TBD --- - -import IntegrationPrereqsCloud from "/snippets/prerequisites/_integration-prereqs-cloud-only.mdx"; -import TransitGateway from "/snippets/integrations/_transit-gateway.mdx"; - -[Amazon Web Services (AWS)][aws] is a comprehensive cloud computing platform that provides on-demand infrastructure, storage, databases, AI, analytics, and security services to help businesses build, deploy, and scale applications in the cloud. - -This page explains how to integrate your AWS infrastructure with {CLOUD_LONG} using [AWS Transit Gateway][aws-transit-gateway]. - -## Prerequisites - - - -- Set up [AWS Transit Gateway][gtw-setup]. - -## Connect your AWS infrastructure to your {SERVICE_LONG}s - -To connect to {CLOUD_LONG}: - - - - - - - -You have successfully integrated your AWS infrastructure with {CLOUD_LONG}. - -[aws]: https://aws.amazon.com/ -[aws-transit-gateway]: https://aws.amazon.com/transit-gateway/ -[gtw-setup]: https://docs.aws.amazon.com/vpc/latest/tgw/tgw-getting-started.html diff --git a/integrations/integrate/azure-data-studio.mdx b/integrations/integrate/azure-data-studio.mdx index fc8cd5e..5f034b5 100644 --- a/integrations/integrate/azure-data-studio.mdx +++ b/integrations/integrate/azure-data-studio.mdx @@ -1,54 +1,4 @@ --- -title: Integrate Azure Data Studio with Tiger Cloud -sidebarTitle: Azure Data Studio -description: Azure Data Studio is an open-source, cross-platform hybrid data analytics tool designed to simplify the data landscape. Integrate Azure Data Studio with Tiger Cloud -products: [cloud, self_hosted] -keywords: [integrate] +title: Azure Data Studio +description: TBD --- - -import IntegrationPrereqs from "/snippets/prerequisites/_integration-prereqs.mdx"; - -[Azure Data Studio][azure-data-studio] is an open-source, cross-platform hybrid data analytics tool designed to simplify the data landscape. - -This page explains how to integrate Azure Data Studio with {CLOUD_LONG}. - -## Prerequisites - - - -* Download and install [Azure Data Studio][ms-azure-data-studio]. -* Install the [{PG} extension for Azure Data Studio][postgresql-azure-data-studio]. - -## Connect to your {SERVICE_LONG} with Azure Data Studio - -To connect to {CLOUD_LONG}: - - - -1. **Start `Azure Data Studio`** -1. **In the `SERVERS` page, click `New Connection`** -1. **Configure the connection** - 1. Select `PostgreSQL` for `Connection type`. - 1. Configure the server name, database, username, port, and password using your [connection details][connection-info]. - 1. Click `Advanced`. - - If you configured your {SERVICE_LONG} to connect using [stricter SSL mode][ssl-mode], set `SSL mode` to the - configured mode, then type the location of your SSL root CA certificate in `SSL root certificate filename`. - - 1. In the `Port` field, type the port number and click `OK`. - -1. **Click `Connect`** - - - - - -You have successfully integrated Azure Data Studio with {CLOUD_LONG}. - -[ms-azure-data-studio]: https://learn.microsoft.com/en-us/azure-data-studio/download-azure-data-studio?view=sql-server-ver16#install-azure-data-studio -[postgresql-azure-data-studio]: https://learn.microsoft.com/en-us/azure-data-studio/extensions/postgres-extension?view=sql-server-ver16 -[connection-info]: /integrations/:currentVersion:/find-connection-details/ -[azure-data-studio]: https://azure.microsoft.com/en-us/products/data-studio -[ssl-mode]: /use-timescale/:currentVersion:/security/strict-ssl/ -[connection-info]: /integrations/:currentVersion:/find-connection-details/ - diff --git a/integrations/integrate/cloudwatch.mdx b/integrations/integrate/cloudwatch.mdx index a4dfb30..c6de9f8 100644 --- a/integrations/integrate/cloudwatch.mdx +++ b/integrations/integrate/cloudwatch.mdx @@ -1,36 +1,4 @@ --- -title: Integrate Amazon CloudWatch with Tiger Cloud -sidebarTitle: Amazon CloudWatch -description: Amazon CloudWatch is a monitoring and observability service. Export telemetry data from your Tiger Cloud service with time-series and analytics capability to Amazon CloudWatch +title: Amazon CloudWatch +description: TBD --- - -import IntegrationPrereqsCloud from "/snippets/prerequisites/_integration-prereqs-cloud-only.mdx"; -import CloudWatchExporter from "/snippets/integrations/_cloudwatch-data-exporter.mdx"; -import ManageDataExporter from "/snippets/integrations/_manage-a-data-exporter.mdx"; - -[Amazon CloudWatch][cloudwatch] is a monitoring and observability service designed to help collect, analyze, and act on data from applications, infrastructure, and services running in AWS and on-premises environments. - -You can export telemetry data from your {SERVICE_LONG}s with the time-series and analytics capability enabled to CloudWatch. The available metrics include CPU usage, RAM usage, and storage. This integration is available for [Scale and Enterprise][pricing-plan-features] pricing tiers. - -This pages explains how to export telemetry data from your {SERVICE_LONG} into CloudWatch by creating a {CLOUD_LONG} data exporter, then attaching it to the {SERVICE_SHORT}. - -## Prerequisites - - - -- Sign up for [Amazon CloudWatch][cloudwatch-signup]. - -## Create a data exporter - -A {CLOUD_LONG} data exporter sends telemetry data from a {SERVICE_LONG} to a third-party monitoring -tool. You create an exporter on the [project level][projects], in the same AWS region as your {SERVICE_SHORT}: - - - - - -[projects]: /use-timescale/:currentVersion:/members/ -[pricing-plan-features]: /about/:currentVersion:/pricing-and-account-management/#features-included-in-each-plan -[cloudwatch]: https://aws.amazon.com/cloudwatch/ -[cloudwatch-signup]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/GettingSetup.html - diff --git a/integrations/integrate/corporate-data-center.mdx b/integrations/integrate/corporate-data-center.mdx index 110ffea..d41f6e8 100644 --- a/integrations/integrate/corporate-data-center.mdx +++ b/integrations/integrate/corporate-data-center.mdx @@ -1,40 +1,4 @@ --- -title: Integrate your data center with Tiger Cloud -sidebarTitle: Corporate data center -description: Integrate your on-premise data center with Tiger Cloud using AWS Transit Gateway -products: [cloud] -price_plans: [scale, enterprise] -keywords: [on-premise, integrations] +title: Corporate data center +description: TBD --- - - -import IntegrationPrereqsCloud from "/snippets/prerequisites/_integration-prereqs-cloud-only.mdx"; -import TransitGateway from "/snippets/integrations/_transit-gateway.mdx"; - -This page explains how to integrate your corporate on-premise infrastructure with {CLOUD_LONG} using [AWS Transit Gateway][aws-transit-gateway]. - -## Prerequisites - - - -- Set up [AWS Transit Gateway][gtw-setup]. - -## Connect your on-premise infrastructure to your {SERVICE_LONG}s - -To connect to {CLOUD_LONG}: - - - -1. **Connect your infrastructure to AWS Transit Gateway** - - Establish connectivity between your on-premise infrastructure and AWS. See the [Centralize network connectivity using AWS Transit Gateway][aws-onprem]. - - - - - -You have successfully integrated your Microsoft Azure infrastructure with {CLOUD_LONG}. - -[aws-transit-gateway]: https://aws.amazon.com/transit-gateway/ -[gtw-setup]: https://docs.aws.amazon.com/vpc/latest/tgw/tgw-getting-started.html -[aws-onprem]: https://docs.aws.amazon.com/prescriptive-guidance/latest/patterns/centralize-network-connectivity-using-aws-transit-gateway.html diff --git a/integrations/integrate/datadog.mdx b/integrations/integrate/datadog.mdx index 51ca9ab..4b2004f 100644 --- a/integrations/integrate/datadog.mdx +++ b/integrations/integrate/datadog.mdx @@ -1,149 +1,4 @@ --- -title: Integrate Datadog with Tiger Cloud -sidebarTitle: Datadog -description: Datadog is a cloud-based monitoring and analytics platform. Export telemetry data from your Tiger Cloud service with time-series and analytics capability to Datadog -products: [cloud] -price_plans: [scale, enterprise] -keywords: [integrate] +title: Datadog +description: TBD --- - -import IntegrationPrereqsCloud from "/snippets/prerequisites/_integration-prereqs-cloud-only.mdx"; -import DataDogExporter from "/snippets/integrations/_datadog-data-exporter.mdx"; -import ManageDataExporter from "/snippets/integrations/_manage-a-data-exporter.mdx"; - -[Datadog][datadog] is a cloud-based monitoring and analytics platform that provides comprehensive visibility into -applications, infrastructure, and systems through real-time monitoring, logging, and analytics. - -This page explains how to: - -- [Monitor {SERVICE_LONG} metrics with Datadog][datadog-monitor-cloud] - - This integration is available for [Scale and Enterprise][pricing-plan-features] pricing plans. - -- Configure Datadog Agent to collect metrics for your {SERVICE_LONG} - - This integration is available for all pricing plans. - - -## Prerequisites - - - -- Sign up for [Datadog][datadog-signup]. - - You need your [Datadog API key][datadog-api-key] to follow this procedure. - -- Install [Datadog Agent][datadog-agent-install]. - -## Monitor {SERVICE_LONG} metrics with Datadog - -Export telemetry data from your {SERVICE_LONG}s with the time-series and analytics capability enabled to -Datadog using a {CLOUD_LONG} data exporter. The available metrics include CPU usage, RAM usage, and storage. - -### Create a data exporter - -A {CLOUD_LONG} data exporter sends telemetry data from a {SERVICE_LONG} to a third-party monitoring -tool. You create an exporter on the [project level][projects], in the same AWS region as your {SERVICE_SHORT}: - - - -### Manage a data exporter - -This section shows you how to attach, monitor, edit, and delete a data exporter. - - - -## Configure Datadog Agent to collect metrics for your {SERVICE_LONG}s - -Datadog Agent includes a [{PG} integration][datadog-postgres] that you use to collect detailed {PG} database -metrics about your {SERVICE_LONG}s. - -1. **Connect to your {SERVICE_LONG}** - - For {CLOUD_LONG}, open an [SQL editor][run-queries] in [{CONSOLE}][open-console]. For {SELF_LONG}, use [`psql`][psql]. - -1. **Add the `datadog` user to your {SERVICE_LONG}** - - ```sql - create user datadog with password ''; - ``` - - ```sql - grant pg_monitor to datadog; - ``` - - ```sql - grant SELECT ON pg_stat_database to datadog; - ``` - -1. **Test the connection and rights for the datadog user** - - Update the following command with your [connection details][connection-info], then run it from the command line: - - ```bash - psql "postgres://datadog:@:/tsdb?sslmode=require" -c \ - "select * from pg_stat_database LIMIT(1);" \ - && echo -e "\e[0;32mPostgres connection - OK\e[0m" || echo -e "\e[0;31mCannot connect to Postgres\e[0m" - ``` - You see the output from the `pg_stat_database` table, which means you have given the correct rights to `datadog`. - -1. **Connect Datadog to your {SERVICE_LONG}** - - 1. Open the datadog agent {PG} configuration file, usually located at: - - **Linux**: `/etc/datadog-agent/conf.d/postgres.d/conf.yaml` - - **MacOS**: `/opt/datadog-agent/etc/conf.d/postgres.d/conf.yaml` - - **Windows**: `C:\ProgramData\Datadog\conf.d\postgres.d\conf.yaml` - - 1. Integrate Datadog Agent with your {SERVICE_LONG} - - Use your [connection details][connection-info] to update the following and add it to the datadog agent {PG} - configuration file: - - ```yaml - init_config: - - instances: - - host: - port: - username: datadog - password: > - dbname: tsdb - disable_generic_tags: true - ``` - -1. **Add {CLOUD_LONG} metrics** - - Tags to make it easier for build Datadog dashboards that combine metrics from the {CLOUD_LONG} data exporter and - Datadog Agent. Use your [connection details][connection-info] to update the following and add it to - `/datadog.yaml`: - - ```yaml - tags: - - project-id: - - service-id: - - region: - ``` - -1. **Restart Datadog Agent** - - See how to [Start, stop, and restart Datadog Agent][datadog-agent-restart]. - -Metrics for your {SERVICE_LONG} are now visible in Datadog. Check the Datadog {PG} integration documentation for a -comprehensive list of [metrics][datadog-postgres-metrics] collected. - -[datadog]: https://www.datadoghq.com/ -[datadog-agent-install]: https://docs.datadoghq.com/getting_started/agent/#installation -[datadog-postgres]: https://docs.datadoghq.com/integrations/postgres/ -[datadog-postgres-metrics]:https://docs.datadoghq.com/integrations/postgres/?tab=host#metrics -[datadog-postgres-setup]: https://docs.datadoghq.com/integrations/postgres/?tab=host#configuration -[datadog-signup]: https://www.datadoghq.com/ -[datadog-monitor-cloud]: /integrations/:currentVersion:/datadog/#monitor-timescale-cloud-service-metrics-with-datadog -[datadog-agent]: /integrations/:currentVersion:/datadog/#configure-datadog-agent-to-collect-metrics-for-your-timescale-cloud-services -[datadog-agent-restart]: https://docs.datadoghq.com/agent/configuration/agent-commands/#start-stop-and-restart-the-agent -[projects]: /use-timescale/:currentVersion:/members/ -[datadog-api-key]: https://docs.datadoghq.com/account_management/api-app-keys/#add-an-api-key-or-client-token -[pricing-plan-features]: /about/:currentVersion:/pricing-and-account-management/#features-included-in-each-plan -[run-queries]: /getting-started/:currentVersion:/run-queries-from-console/ -[open-console]: https://console.cloud.timescale.com/dashboard/services -[psql]: /integrations/:currentVersion:/psql/ -[connection-info]: /integrations/:currentVersion:/find-connection-details/ diff --git a/integrations/integrate/dbeaver.mdx b/integrations/integrate/dbeaver.mdx index 3a87e66..7984641 100644 --- a/integrations/integrate/dbeaver.mdx +++ b/integrations/integrate/dbeaver.mdx @@ -1,50 +1,4 @@ --- -title: Integrate DBeaver with Tiger Cloud -sidebarTitle: DBeaver -description: DBeaver is a free cross-platform database tool for developers, database administrators, analysts, and everyone working with data. Integrate DBeaver with Tiger Cloud -products: [cloud, self_hosted] -keywords: [integrate] +title: DBeaver +description: TBD --- - - -[DBeaver][dbeaver] is a free cross-platform database tool for developers, database administrators, analysts, and everyone working with data. DBeaver provides an SQL editor, administration features, data and schema migration, and the ability to monitor database connection sessions. - -This page explains how to integrate DBeaver with your {SERVICE_LONG}. - -## Prerequisites - - - -* Download and install [DBeaver][dbeaver-downloads]. - -## Connect DBeaver to your {SERVICE_LONG} - -To connect to {CLOUD_LONG}: - - - -1. **Start `DBeaver`** -1. **In the toolbar, click the plug+ icon** -1. **In `Connect to a database` search for `TimescaleDB`** -1. **Select `TimescaleDB`, then click `Next`** -1. **Configure the connection** - - Use your [connection details][connection-info] to add your connection settings. - ![DBeaver integration](https://assets.timescale.com/docs/images/integrations-dbeaver.png) - - If you configured your {SERVICE_SHORT} to connect using a [stricter SSL mode][ssl-mode], in the `SSL` tab check - `Use SSL` and set `SSL mode` to the configured mode. Then, in the `CA Certificate` field type the location of the SSL - root CA certificate. - -1. **Click `Test Connection`. When the connection is successful, click `Finish`** - - Your connection is listed in the `Database Navigator`. - - - -You have successfully integrated DBeaver with {CLOUD_LONG}. - -[dbeaver]: https://dbeaver.io/ -[dbeaver-downloads]: https://dbeaver.io/download/ -[connection-info]: /integrations/:currentVersion:/find-connection-details/ -[ssl-mode]: /use-timescale/:currentVersion:/security/strict-ssl/ diff --git a/integrations/integrate/debezium.mdx b/integrations/integrate/debezium.mdx index f6a17e0..6e00eae 100644 --- a/integrations/integrate/debezium.mdx +++ b/integrations/integrate/debezium.mdx @@ -1,91 +1,4 @@ --- -title: Integrate Debezium with Tiger Cloud -sidebarTitle: Debezium -description: Integrate Debezium with Tiger Cloud to enable change data capture in your Tiger Cloud service and streaming to Redis Streams -products: [self_hosted] -keywords: [Debezium, integrate] +title: Debezium +description: TBD --- - -import { CLOUD_LONG, COMPANY, SERVICE_LONG, TIMESCALE_DB } from '/snippets/vars.mdx'; -import IntegrationPrereqsSelfOnly from "/snippets/prerequisites/_integration-prereqs-self-only.mdx"; -import IntegrationDebeziumDocker from "/snippets/integrations/_integration-debezium-docker.mdx"; -import IntegrationDebeziumSelfHostedConfig from "/snippets/integrations/_integration-debezium-self-hosted-config-database.mdx"; - - -[Debezium][debezium] is an open-source distributed platform for change data capture (CDC). -It enables you to capture changes in a {SELF_LONG} instance and stream them to other systems in real time. - -Debezium can capture events about: - -- [Hypertables][hypertables]: captured events are rerouted from their chunk-specific topics to a single logical topic -named according to the following pattern: `..` -- [Continuous aggregates][caggs]: captured events are rerouted from their chunk-specific topics to a single logical topic -named according to the following pattern: `..` -- [Hypercore][hypercore]: If you enable hypercore, the Debezium {TIMESCALE_DB} connector does not apply any special -processing to data in the columnstore. Compressed chunks are forwarded unchanged to the next downstream job in the -pipeline for further processing as needed. Typically, messages with compressed chunks are dropped, and are not -processed by subsequent jobs in the pipeline. - -This limitation only affects changes to chunks in the columnstore. Changes to data in the rowstore work correctly. - - -This page explains how to capture changes in your database and stream them using Debezium on Apache Kafka. - -## Prerequisites - - - -- [Install Docker][install-docker] on your development machine. - -## Configure your database to work with Debezium - - - - - - To set up {SELF_LONG} to communicate with Debezium: - - - - - - - - ## Configure Debezium to work with your database - - Set up Kafka Connect server, plugins, drivers, and connectors: - - - - - - - - - - - - Debezium requires logical replication to be enabled. Currently, this is not enabled by default on {SERVICE_LONG}s. - We are working on enabling this feature as you read. As soon as it is live, these docs will be updated. - - - - - -And that is it, you have configured Debezium to interact with {COMPANY} products. - -[hypertables]: /use-timescale/:currentVersion:/hypertables/ -[hypercore]: /use-timescale/:currentVersion:/hypercore/ -[caggs]: /use-timescale/:currentVersion:/continuous-aggregates/ -[connection-info]: /integrations/:currentVersion:/find-connection-details/ -[debezium]: https://debezium.io/ -[java-installers]: https://www.oracle.com/java/technologies/downloads/ -[debezium-install]: https://debezium.io/documentation/reference/stable/operations/debezium-server.html#_installation -[console]: https://console.cloud.timescale.com/dashboard/services -[redis-local]: https://redis.io/docs/getting-started/ -[redis-cloud]: https://redis.com/try-free/ -[connect]: /getting-started/:currentVersion:/run-queries-from-console/ -[kafka-install-configure]: /integrations/:currentVersion:/debezium#install-and-configure-apache-kafka -[debezium-configure-database]: /integrations/:currentVersion:/debezium##configure-your-database-to-work-with-debezium -[psql-connect]: /integrations/:currentVersion:/psql/#connect-to-your-service -[install-docker]: https://docs.docker.com/engine/install/ diff --git a/integrations/integrate/decodable.mdx b/integrations/integrate/decodable.mdx index 7f98f25..69beb26 100644 --- a/integrations/integrate/decodable.mdx +++ b/integrations/integrate/decodable.mdx @@ -1,76 +1,4 @@ --- -title: Integrate Decodable with Tiger Cloud -sidebarTitle: Decodable -description: Decodable enables you to build, run, and manage data pipelines effortlessly. Seamlessly integrate Decodable with Tiger Cloud to unlock real-time data processing capabilities -products: [cloud, self_hosted] -keywords: [Decodable, Tiger Cloud] +title: Decodable +description: TBD --- - -import { CLOUD_LONG, CONSOLE } from '/snippets/vars.mdx'; -import IntegrationPrereqs from "/snippets/prerequisites/_integration-prereqs.mdx"; - - -[Decodable][decodable] is a real-time data platform that allows you to build, run, and manage data pipelines effortlessly. - -![Decodable workflow](https://assets.timescale.com/docs/images/integrations-decodable-configuration.png) - -This page explains how to integrate Decodable with your {SERVICE_LONG} to enable efficient real-time streaming and analytics. - -## Prerequisites - - - -- Sign up for [Decodable][sign-up-decodable]. - - This page uses the pipeline you create using the [Decodable Quickstart Guide][decodable-quickstart]. - -## Connect Decodable to your {SERVICE_LONG} - -To stream data gathered in Decodable to a {SERVICE_LONG}: - - - -1. **Create the sync to pipe a Decodable data stream into your {SERVICE_LONG}** - - 1. Log in to your [Decodable account][decodable-app]. - 1. Click `Connections`, then click `New Connection`. - 1. Select a `PostgreSQL sink` connection type, then click `Connect`. - 1. Using your [connection details][connection-info], fill in the connection information. - - Leave `schema` and `JDBC options` empty. - 1. Select the `http_events` source stream, then click `Next`. - - Decodable creates the table in your {SERVICE_LONG} and starts streaming data. - - - -1. **Test the connection** - - 1. Connect to your {SERVICE_LONG}. - - For {CLOUD_LONG}, open an [SQL editor][run-queries] in [{CONSOLE}][open-console]. For {SELF_LONG}, use [`psql`][psql]. - - 1. Check the data from Decodable is streaming into your {SERVICE_LONG}. - - ```sql - SELECT * FROM http_events; - ``` - You see something like: - - ![Decodable workflow](https://assets.timescale.com/docs/images/integrations-decodable-data-in-service.png) - - - - -You have successfully integrated Decodable with {CLOUD_LONG}. - - -[decodable]: https://www.decodable.co/ -[decodable-app]:https://app.decodable.co/-/accounts -[sign-up-decodable]: https://auth.decodable.co/u/signup/ -[decodable-quickstart]: https://docs.decodable.co/get-started/quickstart.html -[connection-info]: /integrations/:currentVersion:/find-connection-details/ -[run-queries]: /getting-started/:currentVersion:/run-queries-from-console/ -[open-console]: https://console.cloud.timescale.com/dashboard/services -[psql]: /integrations/:currentVersion:/psql/ -[about-hypertables]: /use-timescale/:currentVersion:/hypertables/ diff --git a/integrations/integrate/find-connection-details.mdx b/integrations/integrate/find-connection-details.mdx deleted file mode 100644 index b4f87f7..0000000 --- a/integrations/integrate/find-connection-details.mdx +++ /dev/null @@ -1,77 +0,0 @@ ---- -title: Find your connection details -sidebarTitle: Connection details -description: You connect to Tiger Cloud or self-hosted TimescaleDB using your connection details. Learn where to find them -products: [cloud, mst, self_hosted] -keywords: [connect, Managed Service for TimescaleDB, Timescale] ---- - -import { CLOUD_LONG, CONSOLE, MST_CONSOLE_LONG, PG, SELF_LONG, SERVICE_LONG, SERVICE_SHORT } from '/snippets/vars.mdx'; - - -To connect to your {SERVICE_LONG} or {SELF_LONG}, you need at least the following: - -- Hostname -- Port -- Username -- Password -- Database name - -Find the connection details based on your deployment type: - - - - - -## Connect to your service - -Retrieve the connection details for your {SERVICE_LONG}: - -- **In `-credentials.txt`**: - - All connection details are supplied in the configuration file you download when you create a new {SERVICE_SHORT}. - -- **In {CONSOLE}**: - - Open the [`Services`][console-services] page and select your {SERVICE_SHORT}. The connection details, except the password, are available in `Service info` > `Connection info` > `More details`. If necessary, click `Forgot your password?` to get a new one. - - ![Tiger Cloud service connection details](https://assets.timescale.com/docs/images/tiger-cloud-console/tiger-cloud-service-connection-details.png) - -## Find your project and service ID - -To retrieve the connection details for your {CLOUD_LONG} project and {SERVICE_LONG}: - - - -1. **Retreive your project ID**: - - In [{CONSOLE}][console-services], click your project name in the upper left corner, then click `Copy` next to the project ID. - ![Retrive the project id in {CONSOLE}](https://assets.timescale.com/docs/images/tiger-cloud-console/tiger-cloud-console-project-id.png) - -1. **Retrieve your service ID**: - - Click the dots next to the service, then click `Copy` next to the service ID. - ![Retrive the service id in {CONSOLE}](https://assets.timescale.com/docs/images/tiger-cloud-console/tiger-cloud-console-service-id.png) - - - - - - - -Find the connection details in the [{PG} configuration file][postgres-config] or by asking your database administrator. The `postgres` superuser, created during {PG} installation, has all the permissions required to run procedures in this documentation. However, it is recommended to create other users and assign permissions on the need-only basis. - - - - - -In the `Services` page of the {MST_CONSOLE_LONG}, click the service you want to connect to. You see the connection details: - -![MST connection details](https://assets.timescale.com/docs/images/mst-connection-info.png) - - - - - -[console-services]: https://console.cloud.timescale.com/dashboard/services -[postgres-config]: https://www.postgresql.org/docs/current/runtime-config-file-locations.html diff --git a/integrations/integrate/fivetran.mdx b/integrations/integrate/fivetran.mdx index 329fd59..f13c87e 100644 --- a/integrations/integrate/fivetran.mdx +++ b/integrations/integrate/fivetran.mdx @@ -1,93 +1,4 @@ --- -title: Integrate Fivetran with Tiger Cloud -sidebarTitle: Fivetran -description: Fivetran is a fully managed data pipeline platform that simplifies extract, transform, and load processes. Integrate Fivetran with Tiger Cloud for seamless data synchronization -products: [cloud, self_hosted] -keywords: [Fivetran, PostgreSQL, connection, integrate] - +title: Fivetran +description: TBD --- - -import { CLOUD_LONG, CONSOLE, SERVICE_SHORT } from '/snippets/vars.mdx'; -import IntegrationPrereqs from "/snippets/prerequisites/_integration-prereqs.mdx"; - -[Fivetran][fivetran] is a fully managed data pipeline platform that simplifies ETL (Extract, Transform, Load) processes -by automatically syncing data from multiple sources to your data warehouse. - -![Fivetran data in a service](https://assets.timescale.com/docs/images/integrations-fivetran-sync-data.png) - -This page shows you how to inject data from data sources managed by Fivetran into a {SERVICE_LONG}. - -## Prerequisites - - - -* Sign up for [Fivetran][sign-up-fivetran] - -## Set your {SERVICE_LONG} as a destination in Fivetran - -To be able to inject data into your {SERVICE_LONG}, set it as a destination in Fivetran: - -![Fivetran data destination](https://assets.timescale.com/docs/images/integrations-fivetran-destination-timescal-cloud.png) - - - -1. In [Fivetran Dashboard > Destinations][fivetran-dashboard-destinations], click `Add destination`. -1. Search for the `PostgreSQL` connector and click `Select`. Add the destination name and click `Add`. -1. In the `PostgreSQL` setup, add your [{SERVICE_LONG} connection details][connection-info], then click `Save & Test`. - - Fivetran validates the connection settings and sets up any security configurations. -1. Click `View Destination`. - - The `Destination Connection Details` page opens. - - - -## Set up a Fivetran connection as your data source - -In a real world scenario, you can select any of the over 600 connectors available in Fivetran to sync data with your -{SERVICE_LONG}. This section shows you how to inject the logs for your Fivetran connections into your {SERVICE_LONG}. - -![Fivetran data source](https://assets.timescale.com/docs/images/integrations-fivetran-data-source.png) - - - -1. In [Fivetran Dashboard > Connections][fivetran-dashboard-connectors], click `Add connector`. -1. Search for the `Fivetran Platform` connector, then click `Setup`. -1. Leave the default schema name, then click `Save & Test`. - - You see `All connection tests passed!` -1. Click `Continue`, enable `Add Quickstart Data Model` and click `Continue`. - - Your Fivetran connection is connected to your {SERVICE_LONG} destination. -1. Click `Start Initial Sync`. - - Fivetran creates the log schema in your {SERVICE_SHORT} and syncs the data to your {SERVICE_SHORT}. - - - -## View Fivetran data in your {SERVICE_LONG} - -To see data injected by Fivetran into your {SERVICE_LONG}: - - - -1. In [data mode][portal-data-mode] in {CONSOLE}, select your {SERVICE_SHORT}, then run the following query: - ```sql - SELECT * - FROM fivetran_log.account - LIMIT 10; - ``` - You see something like the following: - - ![Fivetran data in a service](https://assets.timescale.com/docs/images/integrations-fivetran-view-data-in-service.png) - - - -You have successfully integrated Fivetran with {CLOUD_LONG}. - -[connection-info]: /integrations/:currentVersion:/find-connection-details/ -[fivetran]: https://fivetran.com/docs/getting-started -[sign-up-fivetran]: https://www.fivetran.com/ -[fivetran-dashboard-destinations]: https://fivetran.com/dashboard/destinations -[fivetran-dashboard-connectors]: https://fivetran.com/dashboard/connections -[portal-data-mode]: https://console.cloud.timescale.com/dashboard/services?popsql diff --git a/integrations/integrate/google-cloud.mdx b/integrations/integrate/google-cloud.mdx index d11a5f1..ab6cbd6 100644 --- a/integrations/integrate/google-cloud.mdx +++ b/integrations/integrate/google-cloud.mdx @@ -1,44 +1,4 @@ --- -title: Integrate Google Cloud with Tiger Cloud -sidebarTitle: Google Cloud -description: Google Cloud enables you to deploy, manage, and scale cloud-based applications, databases, and data processing workflows. Integrate Google Cloud with Tiger Cloud using AWS Transit Gateway -products: [cloud] -price_plans: [scale, enterprise] -keywords: [Google Cloud, integrations] +title: Google Cloud +description: TBD --- - - -import IntegrationPrereqsCloud from "/snippets/prerequisites/_integration-prereqs-cloud-only.mdx"; -import TransitGateway from "/snippets/integrations/_transit-gateway.mdx"; - - -[Google Cloud][google-cloud] is a suite of cloud computing services, offering scalable infrastructure, AI, analytics, databases, security, and developer tools to help businesses build, deploy, and manage applications. - -This page explains how to integrate your Google Cloud infrastructure with {CLOUD_LONG} using [AWS Transit Gateway][aws-transit-gateway]. - -## Prerequisites - - - -- Set up [AWS Transit Gateway][gtw-setup]. - -## Connect your Google Cloud infrastructure to your {SERVICE_LONG}s - -To connect to {CLOUD_LONG}: - - - -1. **Connect your infrastructure to AWS Transit Gateway** - - Establish connectivity between Google Cloud and AWS. See [Connect HA VPN to AWS peer gateways][gcp-aws]. - - - - - -You have successfully integrated your Google Cloud infrastructure with {CLOUD_LONG}. - -[google-cloud]: https://cloud.google.com/?hl=en -[aws-transit-gateway]: https://aws.amazon.com/transit-gateway/ -[gtw-setup]: https://docs.aws.amazon.com/vpc/latest/tgw/tgw-getting-started.html -[gcp-aws]: https://cloud.google.com/network-connectivity/docs/vpn/how-to/connect-ha-vpn-aws-peer-gateway diff --git a/integrations/integrate/grafana.mdx b/integrations/integrate/grafana.mdx index 8e3510e..89826e3 100644 --- a/integrations/integrate/grafana.mdx +++ b/integrations/integrate/grafana.mdx @@ -1,187 +1,4 @@ --- -title: Integrate Grafana and Tiger Cloud -sidebarTitle: Grafana -description: Grafana enables you to query, visualize, alert on, and explore your metrics, logs, and traces wherever they’re stored. Integrate Grafana with Tiger Cloud -products: [cloud, self_hosted] -keywords: [Grafana, visualizations, analytics, monitoring] +title: Grafana +description: TBD --- - -import IntegrationPrereqs from "/snippets/prerequisites/_integration-prereqs.mdx"; -import GrafanaConnect from "/snippets/integrations/_grafana-connect.mdx"; - -[Grafana](https://grafana.com/docs/) enables you to query, visualize, alert on, and explore your metrics, logs, and traces wherever they’re stored. - -This page shows you how to integrate Grafana with a {SERVICE_LONG}, create a dashboard and panel, then visualize geospatial data. - -## Prerequisites - - - -* Install [self-managed Grafana][grafana-self-managed] or sign up for [Grafana Cloud][grafana-cloud]. - - - -## Create a Grafana dashboard and panel - -Grafana is organized into dashboards and panels. A dashboard represents a -view into the performance of a system, and each dashboard consists of one or -more panels, which represent information about a specific metric related to -that system. - -To create a new dashboard: - - - -1. **On the `Dashboards` page, click `New` and select `New dashboard`** - -1. **Click `Add visualization`** - -1. **Select the data source** - - Select your {SERVICE_SHORT} from the list of pre-configured data sources or configure a new one. - -1. **Configure your panel** - - Select the visualization type. The type defines specific fields to configure in addition to standard ones, such as the panel name. - -1. **Run your queries** - - You can edit the queries directly or use the built-in query editor. If you are visualizing time-series data, select `Time series` in the `Format` drop-down. - -1. **Click `Save dashboard`** - - You now have a dashboard with one panel. Add more panels to a dashboard by clicking `Add` at the top right and selecting `Visualization` from the drop-down. - - - -## Use the time filter function - -Grafana time-series panels include a time filter: - - - -1. **Call `$__timefilter()` to link the user interface construct in a Grafana panel with the query** - - For example, to set the `pickup_datetime` column as the filtering range for your visualizations: - - ```sql - SELECT - --1-- - time_bucket('1 day', pickup_datetime) AS "time", - --2-- - COUNT(*) - FROM rides - WHERE $__timeFilter(pickup_datetime) - ``` - -1. **Group your visualizations and order the results by [time buckets][time-buckets]** - - In this case, the `GROUP BY` and `ORDER BY` statements reference `time`. - - For example: - - ```sql - SELECT - --1-- - time_bucket('1 day', pickup_datetime) AS time, - --2-- - COUNT(*) - FROM rides - WHERE $__timeFilter(pickup_datetime) - GROUP BY time - ORDER BY time - ``` - - When you visualize this query in Grafana, you see this: - - ![Tiger Cloud service and Grafana query results](https://assets.timescale.com/docs/images/grafana_query_results.png) - - You can adjust the `time_bucket` function and compare the graphs: - - ```sql - SELECT - --1-- - time_bucket('5m', pickup_datetime) AS time, - --2-- - COUNT(*) - FROM rides - WHERE $__timeFilter(pickup_datetime) - GROUP BY time - ORDER BY time - ``` - - When you visualize this query, it looks like this: - - ![Tiger Cloud service and Grafana query results in time buckets](https://assets.timescale.com/docs/images/grafana_query_results_5m.png) - - - -## Visualize geospatial data - -Grafana includes a Geomap panel so you can see geospatial data -overlaid on a map. This can be helpful to understand how data -changes based on its location. - -This section visualizes taxi rides in Manhattan, where the distance traveled -was greater than 5 miles. It uses the same query as the [NYC Taxi Cab][nyc-taxi] -tutorial as a starting point. - - - -1. **Add a geospatial visualization** - - 1. In your Grafana dashboard, click `Add` > `Visualization`. - - 1. Select `Geomap` in the visualization type drop-down at the top right. - -1. **Configure the data format** - - 1. In the `Queries` tab below, select your data source. - - 1. In the `Format` drop-down, select `Table`. - - 1. In the mode switcher, toggle `Code` and enter the query, then click `Run`. - - For example: - - ```sql - SELECT time_bucket('5m', rides.pickup_datetime) AS time, - rides.trip_distance AS value, - rides.pickup_latitude AS latitude, - rides.pickup_longitude AS longitude - FROM rides - WHERE rides.trip_distance > 5 - GROUP BY time, - rides.trip_distance, - rides.pickup_latitude, - rides.pickup_longitude - ORDER BY time - LIMIT 500; - ``` - -1. **Customize the Geomap settings** - - With default settings, the visualization uses green circles of the fixed size. Configure at least the following for a more representative view: - - - `Map layers` > `Styles` > `Size` > `value`. - - This changes the size of the circle depending on the value, with bigger circles representing bigger values. - - - `Map layers` > `Styles` > `Color` > `value`. - - - `Thresholds` > Add `threshold`. - - Add thresholds for 7 and 10, to mark rides over 7 and 10 miles in different colors, respectively. - - You now have a visualization that looks like this: - - ![Tiger Cloud service and Grafana integration](https://assets.timescale.com/docs/images/timescale-grafana-integration.png) - - - - -[nyc-taxi]: /tutorials/real-time-analytics-transport/ -[grafana-website]: https://www.grafana.com -[time-buckets]: /use-timescale/time-buckets/ -[grafana-self-managed]: https://grafana.com/get/?tab=self-managed -[grafana-cloud]: https://grafana.com/get/ diff --git a/integrations/integrate/kubernetes.mdx b/integrations/integrate/kubernetes.mdx index bc19ea5..270cb6b 100644 --- a/integrations/integrate/kubernetes.mdx +++ b/integrations/integrate/kubernetes.mdx @@ -1,18 +1,16 @@ --- -title: Integrate Kubernetes with Tiger Cloud +title: Integrate Kubernetes with Tiger sidebarTitle: Kubernetes description: Learn how to integrate Kubernetes with Tiger Cloud to enable seamless deployment and scaling of your Postgres workloads -products: [cloud, self_hosted] -keywords: [Kubernetes, Tiger Cloud, PostgreSQL, container orchestration] --- import { CLOUD_LONG, SERVICE_LONG } from '/snippets/vars.mdx'; -import KubernetesPrereqs from "/snippets/prerequisites/_kubernetes-prereqs.mdx"; -import KubernetesInstallSelf from "/snippets/integrations/_kubernetes-install-self-hosted.mdx"; +import KubernetesPrereqs from '/snippets/prerequisites/_kubernetes-prereqs.mdx'; +import KubernetesInstallSelf from '/snippets/procedures/_kubernetes-install-self-hosted.mdx'; [Kubernetes][kubernetes] is an open-source container orchestration system that automates the deployment, scaling, and management of containerized applications. You can connect Kubernetes to {CLOUD_LONG}, and deploy {TIMESCALE_DB} within your Kubernetes clusters. -This guide explains how to connect a Kubernetes cluster to {CLOUD_LONG}, configure persistent storage, and deploy {TIMESCALE_DB} in your kubernetes cluster. +This guide explains how to connect a Kubernetes cluster to {CLOUD_LONG}, configure persistent storage, and deploy {TIMESCALE_DB} in your kubernetes cluster. ## Prerequisites @@ -20,16 +18,14 @@ To follow the steps on this page: -## Integrate {TIMESCALE_DB} in a Kubernetes cluster +## Integrate {TIMESCALE_DB} in a Kubernetes cluster - + To connect your Kubernetes cluster to your {SERVICE_LONG}: - - 1. **Create a default namespace for your {CLOUD_LONG} components** 1. Create a namespace: @@ -38,7 +34,7 @@ To connect your Kubernetes cluster to your {SERVICE_LONG}: kubectl create namespace timescale ``` - 1. Set this namespace as the default for your session: + 2. Set this namespace as the default for your session: ```shell kubectl config set-context --current --namespace=timescale @@ -46,7 +42,7 @@ To connect your Kubernetes cluster to your {SERVICE_LONG}: For more information, see [Kubernetes Namespaces][kubernetes-namespace]. -1. **Create a Kubernetes secret that stores your {SERVICE_LONG} credentials** +2. **Create a Kubernetes secret that stores your {SERVICE_LONG} credentials** Update the following command with your [connection details][connection-info], then run it: @@ -59,12 +55,12 @@ To connect your Kubernetes cluster to your {SERVICE_LONG}: --from-literal=PGPASSWORD= ``` -1. **Configure network access to {CLOUD_LONG}** +3. **Configure network access to {CLOUD_LONG}** - - **Managed Kubernetes**: outbound connections to external databases like {CLOUD_LONG} work by default. - Make sure your cluster’s security group or firewall rules allow outbound traffic to {CLOUD_LONG} IP. + - **Managed Kubernetes**: outbound connections to external databases like {CLOUD_LONG} work by default. + Make sure your cluster's security group or firewall rules allow outbound traffic to {CLOUD_LONG} IP. - - **Self-hosted Kubernetes**: If your cluster is behind a firewall or running on-premise, you may need to allow + - **Self-hosted Kubernetes**: If your cluster is behind a firewall or running on-premise, you may need to allow egress traffic to {CLOUD_LONG}. Test connectivity using your [connection details][connection-info]: ```shell @@ -73,7 +69,7 @@ To connect your Kubernetes cluster to your {SERVICE_LONG}: If the connection fails, check your firewall rules. -1. **Create a Kubernetes deployment that can access your {CLOUD_LONG}** +4. **Create a Kubernetes deployment that can access your {CLOUD_LONG}** Run the following command to apply the deployment: @@ -102,9 +98,9 @@ To connect your Kubernetes cluster to your {SERVICE_LONG}: EOF ``` -1. **Test the connection** +5. **Test the connection** - 1. Create and run a pod that uses the [connection details][connection-info] you added to `timescale-secret` in + 1. Create and run a pod that uses the [connection details][connection-info] you added to `timescale-secret` in the `timescale` namespace: ```shell @@ -125,11 +121,9 @@ To connect your Kubernetes cluster to your {SERVICE_LONG}: You start a `psql` session connected to your {SERVICE_LONG}. - - - + @@ -139,5 +133,6 @@ To connect your Kubernetes cluster to your {SERVICE_LONG}: You have successfully integrated Kubernetes with {CLOUD_LONG}. -[connection-info]: /integrations/:currentVersion:/find-connection-details/ +[connection-info]: /integrations/find-connection-details [kubernetes]: https://kubernetes.io/ +[kubernetes-namespace]: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ \ No newline at end of file diff --git a/integrations/integrate/microsoft-azure.mdx b/integrations/integrate/microsoft-azure.mdx index 3067304..7bf8212 100644 --- a/integrations/integrate/microsoft-azure.mdx +++ b/integrations/integrate/microsoft-azure.mdx @@ -1,43 +1,4 @@ --- -title: Integrate Microsoft Azure with Tiger Cloud -sidebarTitle: Microsoft Azure -description: Microsoft Azure enables you to build, deploy, and manage applications across cloud, hybrid, and edge environments. Integrate Microsoft Azure with Tiger Cloud using AWS Transit Gateway -products: [cloud] -price_plans: [scale, enterprise] -keywords: [Azure, integrations] +title: Microsoft Azure +description: TBD --- - -import IntegrationPrereqsCloud from "/snippets/prerequisites/_integration-prereqs-cloud-only.mdx"; -import TransitGateway from "/snippets/integrations/_transit-gateway.mdx"; - - -[Microsoft Azure][azure] is a cloud computing platform and services suite, offering infrastructure, AI, analytics, security, and developer tools to help businesses build, deploy, and manage applications. - -This page explains how to integrate your Microsoft Azure infrastructure with {CLOUD_LONG} using [AWS Transit Gateway][aws-transit-gateway]. - -## Prerequisites - - - -- Set up [AWS Transit Gateway][gtw-setup]. - -## Connect your Microsoft Azure infrastructure to your {SERVICE_LONG}s - -To connect to {CLOUD_LONG}: - - - -1. **Connect your infrastructure to AWS Transit Gateway** - - Establish connectivity between Azure and AWS. See the [AWS architectural documentation][azure-aws] for details. - - - - - -You have successfully integrated your Microsoft Azure infrastructure with {CLOUD_LONG}. - -[aws-transit-gateway]: https://aws.amazon.com/transit-gateway/ -[gtw-setup]: https://docs.aws.amazon.com/vpc/latest/tgw/tgw-getting-started.html -[azure]: https://azure.microsoft.com/en-gb/ -[azure-aws]: https://aws.amazon.com/blogs/modernizing-with-aws/designing-private-network-connectivity-aws-azure/ diff --git a/integrations/integrate/pgadmin.mdx b/integrations/integrate/pgadmin.mdx index ebefa39..fd7188f 100644 --- a/integrations/integrate/pgadmin.mdx +++ b/integrations/integrate/pgadmin.mdx @@ -1,44 +1,4 @@ --- -title: Integrate pgAdmin with Tiger Cloud -sidebarTitle: pgAdmin -description: pgAdmin is a feature-rich open-source administration and development platform for PostgreSQL. Integrate pgadmin with Tiger Cloud -products: [cloud, self_hosted] -keywords: [integrate] +title: pgAdmin +description: TBD --- - - -[pgAdmin][pgadmin] is a feature-rich open-source administration and development platform for {PG}. It is available for Chrome, Firefox, Edge, and -Safari browsers, or can be installed on Microsoft Windows, Apple macOS, or various Linux flavors. - -![Tiger Cloud pgadmin](https://assets.timescale.com/docs/images/timescale-cloud-pgadmin.png) - -This page explains how to integrate pgAdmin with your {SERVICE_LONG}. - -## Prerequisites - - - -- [Download][download-pgadmin] and install pgAdmin. - -## Connect pgAdmin to your {SERVICE_LONG} - -To connect to {CLOUD_LONG}: - - - -1. **Start pgAdmin** -1. **In the `Quick Links` section of the `Dashboard` tab, click `Add New Server`** -1. **In `Register - Server` > `General`, fill in the `Name` and `Comments` fields with the server name and description, respectively** -1. **Configure the connection** - 1. In the `Connection` tab, configure the connection using your [connection details][connection-info]. - 1. If you configured your {SERVICE_SHORT} to connect using a [stricter SSL mode][ssl-mode], then in the `SSL` tab check `Use SSL`, set `SSL mode` to the configured mode, and in the `CA Certificate` field type the location of the SSL root CA certificate to use. -1. **Click `Save`** - - - -You have successfully integrated pgAdmin with {CLOUD_LONG}. - -[pgadmin]: https://www.pgadmin.org/ -[download-pgadmin]: https://www.pgadmin.org/download/ -[connection-info]: /integrations/:currentVersion:/find-connection-details/ -[ssl-mode]: /use-timescale/:currentVersion:/security/strict-ssl/ diff --git a/integrations/integrate/postgresql.mdx b/integrations/integrate/postgresql.mdx index b4a0322..f90f693 100644 --- a/integrations/integrate/postgresql.mdx +++ b/integrations/integrate/postgresql.mdx @@ -1,13 +1,4 @@ --- -title: Integrate with PostgreSQL -sidebarTitle: PostgreSQL -description: Query any other Postgres database or another Tiger Cloud service from your service by using Postgres foreign data wrappers -products: [cloud, self_hosted] -keywords: [integrate, foreign data wrappers, fdw] -tags: [change] +title: PostgreSQL +description: TBD --- - -import FDW from "/snippets/integrations/_foreign-data-wrappers.mdx"; - - - diff --git a/integrations/integrate/power-bi.mdx b/integrations/integrate/power-bi.mdx index d89f7cd..54d7eaf 100644 --- a/integrations/integrate/power-bi.mdx +++ b/integrations/integrate/power-bi.mdx @@ -1,74 +1,62 @@ --- title: Integrate Power BI with Tiger Cloud sidebarTitle: Power BI -description: Integrate Tiger Cloud with Power BI for advanced data visualization. -products: [cloud, self_hosted] -keywords: [Power BI, visualizations, analysis, real-time] +description: Integrate Power BI with Tiger Cloud --- -import { CLOUD_LONG, PG, SERVICE_LONG } from '/snippets/vars.mdx'; -import IntegrationPrereqs from "/snippets/prerequisites/_integration-prereqs.mdx"; - +import { CLOUD_LONG, PG } from '/snippets/vars.mdx'; +import IntegrationPrereqs from '/snippets/integrations/_integration-prereqs-cloud-only.mdx'; [Power BI][power-bi] is a business analytics tool for visualizing data, creating interactive reports, and sharing insights across an organization. -This page explains how to integrate Power BI with {CLOUD_LONG} using the {PG} ODBC driver, so that you can build interactive reports based on the data in your {SERVICE_LONG}. +This page explains how to integrate Power BI with {CLOUD_LONG} using the {PG} ODBC driver, so that you can build interactive reports based on the data in your {SERVICE_LONG}. ## Prerequisites - Download [Power BI Desktop][power-bi-install] on your Microsoft Windows machine. -- Install the [PostgreSQL ODBC driver][postgresql-odbc-driver]. +- Install the [{PG} ODBC driver][postgresql-odbc-driver]. ## Add your {SERVICE_LONG} as an ODBC data source -Use the PostgreSQL ODBC driver to connect Power BI to {CLOUD_LONG}. - - +Use the {PG} ODBC driver to connect Power BI to {CLOUD_LONG}. 1. **Open the ODBC data sources** On your Windows machine, search for and select `ODBC Data Sources`. -1. **Connect to your {SERVICE_LONG}** +2. **Connect to your {SERVICE_LONG}** 1. Under `User DSN`, click `Add`. - 1. Choose `PostgreSQL Unicode` and click `Finish`. - 1. Use your [connection details][connection-info] to configure the data source. - 1. Click `Test` to ensure the connection works, then click `Save`. + 2. Choose `PostgreSQL Unicode` and click `Finish`. + 3. Use your [connection details][connection-info] to configure the data source. + 4. Click `Test` to ensure the connection works, then click `Save`. - - -## Import the data from your your {SERVICE_LONG} into Power BI +## Import the data from your your {SERVICE_LONG} into Power BI Establish a connection and import data from your {SERVICE_LONG} into Power BI: - - 1. **Connect Power BI to your {SERVICE_LONG}** 1. Open Power BI, then click `Get data from other sources`. - 1. Search for and select `ODBC`, then click `Connect`. - 1. In `Data source name (DSN)`, select the {CLOUD_LONG} data source and click `OK`. - 1. Use your [connection details][connection-info] to enter your `User Name` and `Password`, then click `Connect`. + 2. Search for and select `ODBC`, then click `Connect`. + 3. In `Data source name (DSN)`, select the {CLOUD_LONG} data source and click `OK`. + 4. Use your [connection details][connection-info] to enter your `User Name` and `Password`, then click `Connect`. - After connecting, `Navigator` displays the available tables and schemas. + After connecting, `Navigator` displays the available tables and schemas. -1. **Import your data into Power BI** +2. **Import your data into Power BI** - 1. Select the tables to import and click `Load`. + 1. Select the tables to import and click `Load`. The `Data` pane shows your imported tables. - - 1. To visualize your data and build reports, drag fields from the tables onto the canvas. - + 2. To visualize your data and build reports, drag fields from the tables onto the canvas. You have successfully integrated Power BI with {CLOUD_LONG}. -[timescale-on-windows]: https://docs.tigerdata.com/self-hosted/latest/install/installation-windows/ -[connection-info]: /integrations/find-connection-details/ +[connection-info]: /integrations/find-connection-details [power-bi]: https://www.microsoft.com/en-us/power-platform/products/power-bi/ [power-bi-install]: https://www.microsoft.com/en-us/power-platform/products/power-bi/downloads -[postgresql-odbc-driver]: https://www.postgresql.org/ftp/odbc/releases/ +[postgresql-odbc-driver]: https://www.postgresql.org/ftp/odbc/releases/ \ No newline at end of file diff --git a/integrations/integrate/prometheus.mdx b/integrations/integrate/prometheus.mdx index d8c0f6a..4956d66 100644 --- a/integrations/integrate/prometheus.mdx +++ b/integrations/integrate/prometheus.mdx @@ -1,14 +1,4 @@ --- -title: Integrate Prometheus with Tiger Cloud -sidebarTitle: Prometheus -description: Prometheus is an open-source monitoring system with a modern alerting approach. Export telemetry metrics from your Tiger Cloud service to Prometheus -products: [cloud, self_hosted] -price_plans: [scale, enterprise] -keywords: [integrate] +title: Prometheus +description: TBD --- - - -import PrometheusIntegrate from "/snippets/integrations/_prometheus-integrate.mdx"; - - - diff --git a/integrations/integrate/psql.mdx b/integrations/integrate/psql.mdx index 6f35d37..a68be2b 100644 --- a/integrations/integrate/psql.mdx +++ b/integrations/integrate/psql.mdx @@ -1,265 +1,4 @@ --- -title: Connect to a Tiger Cloud service with psql -description: psql enables you to type in queries interactively, issue them to Postgres, and see the query results. Connect to your Tiger Cloud service using psql -products: [cloud, self_hosted] -keywords: [connect, psql] +title: psql +description: TBD --- - -import IntegrationPrereqs from "/snippets/prerequisites/_integration-prereqs.mdx"; - -[`psql`][psql-docs] is a terminal-based frontend to {PG} that enables you to type in queries interactively, issue them to Postgres, and see the query results. - -This page shows you how to use the `psql` command line tool to interact with your {SERVICE_LONG}. - -## Prerequisites - - - -## Check for an existing installation - -On many operating systems, `psql` is installed by default. To use the functionality described in this page, best practice is to use the latest version of `psql`. To check the version running on your system: - - - - - - -```bash -psql --version -``` - - - - - - -```powershell -wmic -/output:C:\list.txt product get name, version -``` - - - - - -If you already have the latest version of `psql` installed, proceed to the [Connect to your {SERVICE_SHORT}][connect-database] section. - -## Install psql - -If there is no existing installation, take the following steps to install `psql`: - - - - - -Install using Homebrew. `libpqxx` is the official C++ client API for {PG}. - - - -1. Install Homebrew, if you don't already have it: - - ```bash - /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" - ``` - - For more information about Homebrew, including installation instructions, see the [Homebrew documentation][homebrew]. - -1. Make sure your Homebrew repository is up to date: - - ```bash - brew doctor - brew update - ``` - -1. Install `psql`: - - ```bash - brew install libpq - ``` - -1. Update your path to include the `psql` tool: - - ```bash - brew link --force libpq - ``` - -On Intel chips, the symbolic link is added to `/usr/local/bin`. On Apple Silicon, the symbolic link is added to `/opt/homebrew/bin`. - - - - - - - -Install using MacPorts. `libpqxx` is the official C++ client API for {PG}. - - - -1. [Install MacPorts][macports] by downloading and running the package installer. - -1. Make sure MacPorts is up to date: - - ```bash - sudo port selfupdate - ``` - -1. Install the latest version of `libpqxx`: - - ```bash - sudo port install libpqxx - ``` - -1. View the files that were installed by `libpqxx`: - - ```bash - port contents libpqxx - ``` - - - - - - - -Install `psql` on Debian and Ubuntu with the `apt` package manager. - - - -1. Make sure your `apt` repository is up to date: - - ```bash - sudo apt-get update - ``` - -1. Install the `postgresql-client` package: - - ```bash - sudo apt-get install postgresql-client - ``` - - - - - - - -`psql` is installed by default when you install {PG}. This procedure uses the interactive installer provided by {PG} and EnterpriseDB. - - - -1. Download and run the {PG} installer from [www.enterprisedb.com][windows-installer]. - -1. In the `Select Components` dialog, check `Command Line Tools`, along with any other components you want to install, and click `Next`. - -1. Complete the installation wizard to install the package. - - - - - - - -## Connect to your {SERVICE_SHORT} - -To use `psql` to connect to your {SERVICE_SHORT}, you need the connection details. See [Find your connection details][connection-info]. - -Connect to your {SERVICE_SHORT} with either: - -- The parameter flags: - - ```bash - psql -h -p -U -W -d - ``` - -- The {SERVICE_SHORT} URL: - - ```bash - psql "postgres://@:/?sslmode=require" - ``` - - You are prompted to provide the password. - -- The {SERVICE_SHORT} URL with the password already included and [a stricter SSL mode][ssl-mode] enabled: - - ```bash - psql "postgres://:@:/?sslmode=verify-full" - ``` - -## Useful psql commands - -When you start using `psql`, these are the commands you are likely to use most frequently: - -|Command|Description| -|-|-| -|`\c `|Connect to a new database| -|`\d `|Show the details of a table| -|`\df`|List functions in the current database| -|`\df+`|List all functions with more details| -|`\di`|List all indexes from all tables| -|`\dn`|List all schemas in the current database| -|`\dt`|List available tables| -|`\du`|List {PG} database roles| -|`\dv`|List views in current schema| -|`\dv+`|List all views with more details| -|`\dx`|Show all installed extensions| -|`ef `|Edit a function| -|`\h`|Show help on syntax of SQL commands| -|`\l`|List available databases| -|`\password `|Change the password for the user| -|`\q`|Quit `psql`| -|`\set`|Show system variables list| -|`\timing`|Show how long a query took to execute| -|`\x`|Show expanded query results| -|`\?`|List all `psql` slash commands| - -For more on `psql` commands, see the [{COMPANY} psql cheat sheet][psql-cheat-sheet] and [psql documentation][psql-docs]. - -## Save query results to a file - -When you run queries in `psql`, the results are shown in the terminal by default. -If you are running queries that have a lot of results, you might like to save -the results into a comma-separated `.csv` file instead. You can do this using -the `COPY` command. For example: - -```sql -\copy (SELECT * FROM ...) TO '/tmp/output.csv' (format CSV); -``` - -This command sends the results of the query to a new file called `output.csv` in -the `/tmp/` directory. You can open the file using any spreadsheet program. - -## Run long queries - -To run multi-line queries in `psql`, use the `EOF` delimiter. For example: - -```sql -psql -d $TARGET -f -v hypertable= - <<'EOF' -SELECT public.alter_job(j.id, scheduled=>true) -FROM _timescaledb_config.bgw_job j -JOIN _timescaledb_catalog.hypertable h ON h.id = j.hypertable_id -WHERE j.proc_schema IN ('_timescaledb_internal', '_timescaledb_functions') -AND j.proc_name = 'policy_columnstore' -AND j.id >= 1000 -AND format('%I.%I', h.schema_name, h.table_name)::text::regclass = :'hypertable'::text::regclass; -EOF -``` - -## Edit queries in a text editor - -Sometimes, queries can get very long, and you might make a mistake when you try -typing it the first time around. If you have made a mistake in a long query, -instead of retyping it, you can use a built-in text editor, which is based on -`Vim`. Launch the query editor with the `\e` command. Your previous query is -loaded into the editor. When you have made your changes, press `Esc`, then type -`:`+`w`+`q` to save the changes, and return to the command prompt. Access the -edited query by pressing `↑`, and press `Enter` to run it. - -[psql-cheat-sheet]: https://www.timescale.com/learn/postgres-cheat-sheet -[psql-docs]: https://www.postgresql.org/docs/current/app-psql.html -[ssl-mode]: /use-timescale/:currentVersion:/security/strict-ssl/ -[homebrew]: https://docs.brew.sh/Installation -[macports]: https://guide.macports.org/#installing.macports -[windows-installer]: https://www.postgresql.org/download/windows/ -[connect-database]:/integrations/:currentVersion:/psql/#connect-to-your-service -[connection-info]: /integrations/:currentVersion:/find-connection-details/ - diff --git a/integrations/integrate/qstudio.mdx b/integrations/integrate/qstudio.mdx index 8c71da3..d91fecd 100644 --- a/integrations/integrate/qstudio.mdx +++ b/integrations/integrate/qstudio.mdx @@ -1,53 +1,4 @@ --- -title: Integrate qStudio with Tiger Cloud -sidebarTitle: qStudio -description: qStudio is a modern free SQL editor that provides syntax highlighting, code-completion, excel export, charting, and much more. Integrate qStudio with Tiger Cloud -products: [cloud, self_hosted] -keywords: [integrate] +title: qStudio +description: TBD --- - -import IntegrationPrereqs from "/snippets/prerequisites/_integration-prereqs.mdx"; - - -[qStudio][qstudio] is a modern free SQL editor that provides syntax highlighting, code-completion, excel export, charting, and much more. You can use it to run queries, browse tables, and create charts for your {SERVICE_LONG}. - -This page explains how to integrate qStudio with {CLOUD_LONG}. - -## Prerequisites - - - -* [Download][qstudio-downloads] and install qStudio. - -## Connect qStudio to your {SERVICE_LONG} - -To connect to {CLOUD_LONG}: - - - -1. **Start qStudio** -1. **Click `Server` > `Add Server`** -1. **Configure the connection** - - * For `Server Type`, select `Postgres`. - * For `Connect By`, select `Host`. - * For `Host`, `Port`, `Database`, `Username`, and `Password`, use - your [connection details][connection-info]. - - ![qStudio integration](https://assets.timescale.com/docs/images/integrations-qstudio.png) - -1. **Click `Test`** - - qStudio indicates whether the connection works. - -1. **Click `Add`** - - The server is listed in the `Server Tree`. - - - -You have successfully integrated qStudio with {CLOUD_LONG}. - -[qstudio]: https://www.timestored.com/qstudio/ -[qstudio-downloads]: https://www.timestored.com/qstudio/download -[connection-info]: /integrations/:currentVersion:/find-connection-details/ diff --git a/integrations/integrate/supabase.mdx b/integrations/integrate/supabase.mdx index b6d2b09..9ecdeca 100644 --- a/integrations/integrate/supabase.mdx +++ b/integrations/integrate/supabase.mdx @@ -1,271 +1,4 @@ --- -title: Integrate Supabase with Tiger Cloud -sidebarTitle: Supabase -description: Supabase is an open source Firebase alternative. Integrate Supabase with Tiger Cloud -products: [cloud, self_hosted] -keywords: [integrate] +title: Supabase +description: TBD --- - -import { CLOUD_LONG, PG, SERVICE_LONG, TIMESCALE_DB } from '/snippets/vars.mdx'; -import IntegrationPrereqs from "/snippets/prerequisites/_integration-prereqs.mdx"; -import OldCreateHypertable from "/snippets/changes/_old-api-create-hypertable.mdx"; - -[Supabase][supabase] is an open source Firebase alternative. This page shows how to run real-time analytical queries -against a {SERVICE_LONG} through Supabase using a foreign data wrapper (fdw) to bring aggregated data from your -{SERVICE_LONG}. - -## Prerequisites - - - -- Create a [Supabase project][supabase-new-project] - -## Set up your {SERVICE_LONG} - -To set up a {SERVICE_LONG} optimized for analytics to receive data from Supabase: - - - -1. **Optimize time-series data in hypertables** - - Time-series data represents how a system, process, or behavior changes over time. [Hypertables][hypertables-section] - are {PG} tables that help you improve insert and query performance by automatically partitioning your data by - time. - - 1. [Connect to your {SERVICE_LONG}][connect] and create a table that will point to a Supabase database: - - ```sql - CREATE TABLE signs ( - time timestamptz NOT NULL DEFAULT now(), - origin_time timestamptz NOT NULL, - name TEXT - ) WITH ( - tsdb.hypertable, - tsdb.partition_column='time' - ); - ``` - - -1. **Optimize cooling data for analytics** - - Hypercore is the hybrid row-columnar storage engine in {TIMESCALE_DB}, designed specifically for real-time analytics - and powered by time-series data. The advantage of hypercore is its ability to seamlessly switch between row-oriented - and column-oriented storage. This flexibility enables {TIMESCALE_DB} to deliver the best of both worlds, solving the - key challenges in real-time analytics. - - ```sql - ALTER TABLE signs SET ( - timescaledb.enable_columnstore = true, - timescaledb.segmentby = 'name'); - ``` - -1. **Create optimized analytical queries** - - Continuous aggregates are designed to make queries on very large datasets run - faster. Continuous aggregates in {CLOUD_LONG} use {PG} [materialized views][postgres-materialized-views] to - continuously, and incrementally refresh a query in the background, so that when you run the query, - only the data that has changed needs to be computed, not the entire dataset. - - 1. Create a continuous aggregate pointing to the Supabase database. - - ```sql - CREATE MATERIALIZED VIEW IF NOT EXISTS signs_per_minute - WITH (timescaledb.continuous) - AS - SELECT time_bucket('1 minute', time) as ts, - name, - count(*) as total - FROM signs - GROUP BY 1, 2 - WITH NO DATA; - ``` - - 1. Setup a delay stats comparing `origin_time` to `time`. - - ```sql - CREATE MATERIALIZED VIEW IF NOT EXISTS _signs_per_minute_delay - WITH (timescaledb.continuous) - AS - SELECT time_bucket('1 minute', time) as ts, - stats_agg(extract(epoch from origin_time - time)::float8) as delay_agg, - candlestick_agg(time, extract(epoch from origin_time - time)::float8, 1) as delay_candlestick - FROM signs GROUP BY 1 - WITH NO DATA; - ``` - - 1. Setup a view to recieve the data from Supabase. - - ```sql - CREATE VIEW signs_per_minute_delay - AS - SELECT ts, - average(delay_agg) as avg_delay, - stddev(delay_agg) as stddev_delay, - open(delay_candlestick) as open, - high(delay_candlestick) as high, - low(delay_candlestick) as low, - close(delay_candlestick) as close - FROM _signs_per_minute_delay - ``` - -1. **Add refresh policies for your analytical queries** - - You use `start_offset` and `end_offset` to define the time range that the continuous aggregate will cover. Assuming - that the data is being inserted without any delay, set the `start_offset` to `5 minutes` and the `end_offset` to - `1 minute`. This means that the continuous aggregate is refreshed every minute, and the refresh covers the last 5 - minutes. - You set `schedule_interval` to `INTERVAL '1 minute'` so the continuous aggregate refreshes on your {SERVICE_LONG} - every minute. The data is accessed from Supabase, and the continuous aggregate is refreshed every minute in - the other side. - - ```sql - SELECT add_continuous_aggregate_policy('signs_per_minute', - start_offset => INTERVAL '5 minutes', - end_offset => INTERVAL '1 minute', - schedule_interval => INTERVAL '1 minute'); - ``` - Do the same thing for data inserted with a delay: - ```sql - SELECT add_continuous_aggregate_policy('_signs_per_minute_delay', - start_offset => INTERVAL '5 minutes', - end_offset => INTERVAL '1 minute', - schedule_interval => INTERVAL '1 minute'); - ``` - - - - -## Set up a Supabase database - -To set up a Supabase database that injects data into your {SERVICE_LONG}: - - - -1. **Connect a foreign server in Supabase to your {SERVICE_LONG}** - - 1. Connect to your Supabase project using Supabase dashboard or psql. - 1. Enable the `postgres_fdw` extension. - - ```sql - CREATE EXTENSION postgres_fdw; - ``` - 1. Create a foreign server that points to your {SERVICE_LONG}. - - Update the following command with your [connection details][connection-info], then run it - in the Supabase database: - - ```sql - CREATE SERVER timescale - FOREIGN DATA WRAPPER postgres_fdw - OPTIONS ( - host '', - port '', - dbname '', - sslmode 'require', - extensions 'timescaledb' - ); - ``` - -1. **Create the user mapping for the foreign server** - - Update the following command with your [connection details][connection-info], the run it - in the Supabase database: - - ```sql - CREATE USER MAPPING FOR CURRENT_USER - SERVER timescale - OPTIONS ( - user '', - password '' - ); - ``` - -1. **Create a foreign table that points to a table in your {SERVICE_LONG}.** - - This query introduced the following columns: - - `time`: with a default value of `now()`. This is because the `time` column is used by {CLOUD_LONG} to optimize data - in the columnstore. - - `origin_time`: store the original timestamp of the data. - - Using both columns, you understand the delay between Supabase (`origin_time`) and the time the data is - inserted into your {SERVICE_LONG} (`time`). - - ```sql - CREATE FOREIGN TABLE signs ( - TIME timestamptz NOT NULL DEFAULT now(), - origin_time timestamptz NOT NULL, - NAME TEXT) - SERVER timescale OPTIONS ( - schema_name 'public', - table_name 'signs' - ); - ``` - -1. **Create a foreign table in Supabase** - - 1. Create a foreign table that matches the `signs_per_minute` view in your {SERVICE_LONG}. It represents a top level - view of the data. - - ```sql - CREATE FOREIGN TABLE signs_per_minute ( - ts timestamptz, - name text, - total int - ) - SERVER timescale OPTIONS (schema_name 'public', table_name 'signs_per_minute'); - ``` - - 1. Create a foreign table that matches the `signs_per_minute_delay` view in your {SERVICE_LONG}. - - ```sql - CREATE FOREIGN TABLE signs_per_minute_delay ( - ts timestamptz, - avg_delay float8, - stddev_delay float8, - open float8, - high float8, - low float8, - close float8 - ) SERVER timescale OPTIONS (schema_name 'public', table_name 'signs_per_minute_delay'); - ``` - - - -## Test the integration - -To inject data into your {SERVICE_LONG} from a Supabase database using a foreign table: - - - -1. **Insert data into your Supabase database** - - Connect to Supabase and run the following query: - - ```sql - INSERT INTO signs (origin_time, name) VALUES (now(), 'test') - ``` - -1. **Check the data in your {SERVICE_LONG}** - - [Connect to your {SERVICE_LONG}][connect] and run the following query: - - ```sql - SELECT * from signs; - ``` - You see something like: - - | origin_time | time | name | - |-------------|------|------| - | 2025-02-27 16:30:04.682391+00 | 2025-02-27 16:30:04.682391+00 | test | - - - -You have successfully integrated Supabase with your {SERVICE_LONG}. - -[supabase]: https://supabase.com/ -[supabase-new-project]: https://supabase.com/dashboard/new -[hypertables-section]: /use-timescale/:currentVersion:/hypertables/ -[connect]: /getting-started/:currentVersion:/run-queries-from-console/ -[hypercore]: /use-timescale/:currentVersion:/hypercore/ -[postgres-materialized-views]: https://www.postgresql.org/docs/current/rules-materializedviews.html -[connection-info]: /integrations/:currentVersion:/find-connection-details/ diff --git a/integrations/integrate/tableau.mdx b/integrations/integrate/tableau.mdx index 5e7fa94..0b9dfeb 100644 --- a/integrations/integrate/tableau.mdx +++ b/integrations/integrate/tableau.mdx @@ -1,12 +1,11 @@ --- title: Integrate Tableau and Tiger Cloud sidebarTitle: Tableau -description: Tableau is a popular analytics platform that helps you gain greater intelligence about your business. Integrate Tableau with Tiger Cloud -products: [cloud, self_hosted] -keywords: [visualizations, analytics, Tableau] +description: Integrate Tableau with Tiger Cloud --- -import IntegrationPrereqs from "/snippets/prerequisites/_integration-prereqs.mdx"; +import { CLOUD_LONG, SERVICE_SHORT, PG } from '/snippets/vars.mdx'; +import IntegrationPrereqs from '/snippets/integrations/_integration-prereqs-cloud-only.mdx'; [Tableau][tableau] is a popular analytics platform that helps you gain greater intelligence about your business. You can use it to visualize data stored in {CLOUD_LONG}. @@ -21,32 +20,27 @@ data stored in {CLOUD_LONG}. To connect the data in your {SERVICE_LONG} to Tableau: - - 1. **Log in to Tableau** - - Tableau Cloud: [sign in][tableau-login], then click `Explore` and select a project. + - Tableau Cloud: [sign in][tableau-login], then click `Explore` and select a project. - Tableau Desktop: sign in, then open a workbook. -1. **Configure Tableau to connect to your {SERVICE_LONG}** - 1. Add a new data source: +2. **Configure Tableau to connect to your {SERVICE_LONG}** + 1. Add a new data source: - Tableau Cloud: click `New` > `Virtual Connection`. - Tableau Desktop: click `Data` > `New Data Source`. - 1. Search for and select `PostgreSQL`. - + 2. Search for and select `PostgreSQL`. + For Tableau Desktop download the driver and restart Tableau. - 1. Configure the connection: + 3. Configure the connection: - `Server`, `Port`, `Database`, `Username`, `Password`: configure using your [connection details][connection-info]. - `Require SSL`: tick the checkbox. - -1. **Click `Sign In` and connect Tableau to your {SERVICE_SHORT}** - +3. **Click `Sign In` and connect Tableau to your {SERVICE_SHORT}** You have successfully integrated Tableau with {CLOUD_LONG}. +[connection-info]: /integrations/find-connection-details +[tableau]: https://www.tableau.com/ [tableau-cloud]: https://www.tableau.com/products/trial -[tableau-server]: https://www.tableau.com/support/releases/server/2024.2.6#esdalt [tableau-login]: http://online.tableau.com/ -[cloud-login]: https://console.cloud.timescale.com/ -[connection-info]: /integrations/:currentVersion:/find-connection-details/ -[tableau]: https://www.tableau.com/ +[tableau-server]: https://www.tableau.com/support/releases/server/2024.2.6#esdalt \ No newline at end of file diff --git a/integrations/integrate/telegraf.mdx b/integrations/integrate/telegraf.mdx new file mode 100644 index 0000000..b0d7541 --- /dev/null +++ b/integrations/integrate/telegraf.mdx @@ -0,0 +1,4 @@ +--- +title: Telegraf +description: TBD +--- diff --git a/integrations/integrate/terraform.mdx b/integrations/integrate/terraform.mdx index 51d2b8b..3e664b3 100644 --- a/integrations/integrate/terraform.mdx +++ b/integrations/integrate/terraform.mdx @@ -2,12 +2,10 @@ title: Integrate Terraform with Tiger Cloud sidebarTitle: Terraform description: Manage your Tiger Cloud services with a Terraform provider -products: [cloud, self_hosted] -keywords: [Terraform, configuration, deployment] --- -import { CLOUD_LONG, COMPANY, CONSOLE, PG, VPC } from '/snippets/vars.mdx'; -import IntegrationPrereqCloud from "/snippets/prerequisites/_integration-prereqs-cloud-only.mdx"; +import { CLOUD_LONG, COMPANY, CONSOLE, VPC, PG } from '/snippets/vars.mdx'; +import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; [Terraform][terraform] is an infrastructure-as-code tool that enables you to safely and predictably provision and manage infrastructure. @@ -15,138 +13,134 @@ This page explains how to configure Terraform to manage your {SERVICE_LONG} or { ## Prerequisites - + + * [Download and install][terraform-install] Terraform. ## Configure Terraform Configure Terraform based on your deployment type: - - - + - You use the [{COMPANY} Terraform provider][terraform-provider] to manage {SERVICE_LONG}s: + - +You use the [{COMPANY} Terraform provider][terraform-provider] to manage {SERVICE_LONG}s: - 1. **Generate client credentials for programmatic use** +1. **Generate client credentials for programmatic use** - 1. In [{CONSOLE}][console], click `Projects` and save your `Project ID`, then click `Project settings`. + 1. In [{CONSOLE}][console], click `Projects` and save your `Project ID`, then click `Project settings`. - 1. Click `Create credentials`, then save `Public key` and `Secret key`. + 2. Click `Create credentials`, then save `Public key` and `Secret key`. - 1. **Configure {COMPANY} Terraform provider** +2. **Configure {COMPANY} Terraform provider** - 1. Create a `main.tf` configuration file with at least the following content. Change `x.y.z` to the [latest version][terraform-provider] of the provider. + 1. Create a `main.tf` configuration file with at least the following content. Change `x.y.z` to the [latest version][terraform-provider] of the provider. - ```hcl - terraform { - required_providers { - timescale = { - source = "timescale/timescale" - version = "x.y.z" - } - } - } + ```hcl + terraform { + required_providers { + timescale = { + source = "timescale/timescale" + version = "x.y.z" + } + } + } - # Authenticate using client credentials generated in Tiger Console. - # When required, these credentials will change to a short-lived JWT to do the calls. - provider "timescale" { - project_id = var.ts_project_id - access_key = var.ts_access_key - secret_key = var.ts_secret_key - } + # Authenticate using client credentials generated in Tiger Cloud Console. + # When required, these credentials will change to a short-lived JWT to do the calls. + provider "timescale" { + project_id = var.ts_project_id + access_key = var.ts_access_key + secret_key = var.ts_secret_key + } - variable "ts_project_id" { - type = string - } + variable "ts_project_id" { + type = string + } - variable "ts_access_key" { - type = string - } + variable "ts_access_key" { + type = string + } - variable "ts_secret_key" { - type = string - } - ``` + variable "ts_secret_key" { + type = string + } + ``` - 1. Create a `terraform.tfvars` file in the same directory as your `main.tf` to pass in the variable values: + 2. Create a `terraform.tfvars` file in the same directory as your `main.tf` to pass in the variable values: - ```hcl - export TF_VAR_ts_project_id="" - export TF_VAR_ts_access_key="" - export TF_VAR_ts_secret_key="" - ``` + ```hcl + export TF_VAR_ts_project_id="" + export TF_VAR_ts_access_key="" + export TF_VAR_ts_secret_key="" + ``` - 1. **Add your resources** +3. **Add your resources** - Add your {SERVICE_LONG}s or {VPC} connections to the `main.tf` configuration file. For example: + Add your {SERVICE_LONG}s or {VPC} connections to the `main.tf` configuration file. For example: - ```hcl - resource "timescale_service" "test" { - name = "test-service" - milli_cpu = 500 - memory_gb = 2 - region_code = "us-east-1" - enable_ha_replica = false + ```hcl + resource "timescale_service" "test" { + name = "test-service" + milli_cpu = 500 + memory_gb = 2 + region_code = "us-east-1" + enable_ha_replica = false - timeouts = { - create = "30m" - } - } + timeouts = { + create = "30m" + } + } - resource "timescale_vpc" "vpc" { - cidr = "10.10.0.0/16" - name = "test-vpc" - region_code = "us-east-1" - } - ``` + resource "timescale_vpc" "vpc" { + cidr = "10.10.0.0/16" + name = "test-vpc" + region_code = "us-east-1" + } + ``` - You can now manage your resources with Terraform. See more about [available resources][terraform-resources] and [data sources][terraform-data-sources]. +You can now manage your resources with Terraform. See more about [available resources][terraform-resources] and [data sources][terraform-data-sources]. - + - + - +You use the [`cyrilgdn/postgresql`][pg-provider] {PG} provider to connect to your {SELF_LONG} instance. - You use the [`cyrilgdn/postgresql`][pg-provider] {PG} provider to connect to your {SELF_LONG} instance. +Create a `main.tf` configuration file with the following content, using your [connection details][connection-info]: - Create a `main.tf` configuration file with the following content, using your [connection details][connection-info]: - - ```hcl - terraform { - required_providers { - postgresql = { - source = "cyrilgdn/postgresql" - version = ">= 1.15.0" - } - } +```hcl + terraform { + required_providers { + postgresql = { + source = "cyrilgdn/postgresql" + version = ">= 1.15.0" + } } + } - provider "postgresql" { - host = "your-timescaledb-host" - port = "your-timescaledb-port" - database = "your-database-name" - username = "your-username" - password = "your-password" - sslmode = "require" # Or "disable" if SSL isn't enabled - } - ``` + provider "postgresql" { + host = "your-timescaledb-host" + port = "your-timescaledb-port" + database = "your-database-name" + username = "your-username" + password = "your-password" + sslmode = "require" # Or "disable" if SSL isn't enabled + } +``` - You can now manage your database with Terraform. +You can now manage your database with Terraform. - + -[terraform-install]: https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli -[terraform]: https://developer.hashicorp.com/terraform +[connection-info]: /integrations/find-connection-details [console]: https://console.cloud.timescale.com/dashboard/services -[terraform-provider]: https://registry.terraform.io/providers/timescale/timescale/latest/docs -[connection-info]: /integrations/:currentVersion:/find-connection-details/ -[terraform-resources]: https://registry.terraform.io/providers/timescale/timescale/latest/docs/resources/peering_connection -[terraform-data-sources]: https://registry.terraform.io/providers/timescale/timescale/latest/docs/data-sources/products [pg-provider]: https://registry.terraform.io/providers/cyrilgdn/postgresql/latest - +[terraform]: https://developer.hashicorp.com/terraform +[terraform-data-sources]: https://registry.terraform.io/providers/timescale/timescale/latest/docs/data-sources/products +[terraform-install]: https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli +[terraform-provider]: https://registry.terraform.io/providers/timescale/timescale/latest/docs +[terraform-resources]: https://registry.terraform.io/providers/timescale/timescale/latest/docs/resources/peering_connection \ No newline at end of file diff --git a/integrations/integrations.mdx b/integrations/integrations.mdx index 428377a..e5154db 100644 --- a/integrations/integrations.mdx +++ b/integrations/integrations.mdx @@ -1,14 +1,16 @@ --- title: Integrations -description: You can integrate your Tiger Cloud service with third-party solutions to expand and extend what you can do with your data. +description: You can integrate your {SERVICE_LONG} with third-party solutions to expand and extend what you can do with your data. products: [cloud, mst, self_hosted] keywords: [IoT, simulate] mode: "wide" --- +import { SERVICE_LONG, PG, COMPANY, CLOUD_LONG } from '/snippets/vars.mdx'; - - A Tiger Cloud service is a Postgres database instance extended by Tiger Data with custom capabilities. This means that any third-party solution that you can integrate with Postgres, you can also integrate with Tiger Cloud. See the full list of Postgres integrations here. + + + A {SERVICE_LONG} is a {PG} database instance extended by {COMPANY} with custom capabilities. This means that any third-party solution that you can integrate with {PG}, you can also integrate with {CLOUD_LONG}. See the full list of {PG} integrations here. @@ -16,30 +18,37 @@ mode: "wide" - Synchronization all the data, or specific tables in your Tiger Cloud service to Iceberg tables running in Amazon S3 Tables in real time. + Synchronize data from your {SERVICE_LONG} to Iceberg tables in Amazon S3 in real time. ## Source connectors - + - Synchronization all the data, or specific tables in your Tiger Cloud service to Iceberg tables running in Amazon S3 Tables in real time. + Synchronize data from Amazon S3 to your {SERVICE_LONG} in real time. - Synchronize all the data, or specific tables, from a Postgres database instance to your Tiger Cloud service in real time. + Synchronize data from a {PG} database to your {SERVICE_LONG} in real time. + + + Stream data from Apache Kafka topics to your {SERVICE_LONG} in real time. @@ -47,129 +56,504 @@ mode: "wide" - Get started with coding examples and SDKs for Tiger Data . + Get started with {COMPANY} using coding examples and SDKs. +## Authentication and security + + + + Implement authentication and authorization for web applications. + + + Securely manage user authentication and access controls for applications. + + + Manage authentication and user identity securely for applications. + + + +## Business intelligence and data visualization + + + + Build and optimize data APIs for analytics applications. + + + Explore, analyze, and share business insights with a BI platform. + + + Create dashboards and visualize business data without SQL expertise. + + + Visualize data, build interactive dashboards, and share insights. + + + Create and explore data visualizations and dashboards. + + + Connect to data sources, analyze data, and create interactive visualizations and dashboards. + + + ## Configuration and deployment - + + Run event-driven serverless code in the cloud without managing infrastructure. + + + Deploy and run JavaScript and TypeScript applications at the edge. + + - Infrastructure as code for your Tiger Data deployment. + Manage and automate database migrations using version control. - Deploy and manage Tiger Data on Kubernetes. + Deploy, scale, and manage containerized applications automatically. + + + Track, version, and automate database schema changes. + + + Define and manage cloud infrastructure using code in multiple languages. + + + Deploy and scale web applications, databases, and services easily. + + + Safely and predictably provision and manage infrastructure in any cloud. ## Data engineering and extract, transform, load + + Sync data between various sources and destinations. + - Machine learning integration with Amazon SageMaker. + Build, train, and deploy ML models into a production-ready hosted environment. - Workflow orchestration with Apache Airflow. + Programmatically author, schedule, and monitor workflows. + + + Build and execute batch and streaming data pipelines across multiple processing engines. - Real-time data streaming with Apache Kafka. + Stream high-performance data pipelines, analytics, and data integration. - Serverless computing with AWS Lambda. + Run code without provisioning or managing servers, scaling automatically as needed. + + + Transform and model data in your warehouse using SQL-based workflows. - Change data capture with Debezium. + Capture and stream real-time changes from databases. - Real-time stream processing with Decodable. + Build, run, and manage data pipelines effortlessly. + + + Enhance data lakes with ACID transactions and schema enforcement. + + + Simplify interactions with Firebase services through an abstraction layer. + + + Extract, load, and transform data from various sources to data warehouses. ## Data ingestion and streaming - + + + Process large-scale data workloads quickly using distributed computing. + + + Manage and scale Apache Kafka-based event streaming applications. + + + Enable real-time synchronization between databases and frontend applications. + + + Deploy an enterprise-grade MQTT broker for IoT messaging. + + + Stream and synchronize data in real time between different systems. + + + Process real-time data streams with fault-tolerant distributed computing. + - Automated data integration with Fivetran. + Sync data from multiple sources to your data warehouse. + + + Connect operational technology sources, model the data, and stream it into Postgres. + + + Stream and process real-time data as a Kafka-compatible platform. + + + Ingest, process, and analyze real-time data streams. +## Development tools + + + + Collaborate on data science projects with a cloud-based notebook platform. + + + Develop scalable and secure web applications using a Python framework. + + + Build applications that integrate with language models like GPT. + + + Build high-performance, memory-safe applications with a modern programming language. + + + Create interactive data applications and dashboards using Python. + + + +## Language-specific integrations + + + + Integrate {CLOUD_LONG} with a Golang application. + + + Integrate {CLOUD_LONG} with a Java application. + + + Integrate {CLOUD_LONG} with a Node.js application. + + + Integrate {CLOUD_LONG} with a Python application. + + + Integrate {CLOUD_LONG} with a Ruby application. + + + +## Logging and system administration + + + + Collect, filter, and forward system logs for centralized logging. + + + Generate database schema documentation and visualization. + + + ## Observability and alerting - AWS monitoring and logging with CloudWatch. + Collect, analyze, and act on data from applications, infrastructure, and services running in AWS and on-premises environments. + + + Monitor, trace, and diagnose distributed applications for improved observability. + + + Collect and analyze telemetry data from cloud and on-premises environments. + + + Monitor applications with OpenTelemetry-native observability built on CNCF Open Standards like PromQL, Perses, and OTLP. - Application performance monitoring with Datadog. + Gain comprehensive visibility into applications, infrastructure, and systems through real-time monitoring, logging, and analytics. - Data visualization and monitoring with Grafana. + Query, visualize, alert on, and explore your metrics and logs. + + + Monitor application performance and detect issues in real-time. + + + Trace and diagnose distributed transactions for observability. + + + Monitor applications, infrastructure, and logs for performance insights. + + + Collect and analyze telemetry data for observability across systems. - Metrics collection and alerting with Prometheus. + Track the performance and health of systems, applications, and infrastructure. + + + Monitor application performance with an open-source observability tool. - Business intelligence with Tableau. + Collect, process, and ship metrics and events into databases or monitoring platforms. @@ -178,127 +562,201 @@ mode: "wide" - Cross-platform database tool from Microsoft. + Query, manage, visualize, and develop databases across SQL Server, Azure SQL, and Postgres. - Universal database tool for developers. + Connect to, manage, query, and analyze multiple databases in a single interface with SQL editing, visualization, and administration tools. + + + Create admin panels and dashboards for business applications. + + + Instantly generate GraphQL APIs from databases with access control. + + + Analyze data, create reports, and share insights with teams. + + + Run a cloud-native, serverless Postgres database with automatic scaling. - Web-based PostgreSQL administration tool. + Manage, query, and administer Postgres databases through a graphical interface. - Native PostgreSQL client tools. + Access and query data from external sources as if they were regular Postgres tables. + + + Simplify database access with an open-source ORM for Node.js. - Command-line interface for PostgreSQL. + Run SQL queries, manage databases, automate tasks, and interact directly with Postgres. + + + Move and synchronize data across multiple database platforms. - SQL analytics and visualization tool. + Write and execute SQL queries, manage database objects, and analyze data in a user-friendly interface. + + + Query, visualize, and share data from multiple sources. + + + Manage database operations using a Python SQL toolkit and ORM. + + + Interact with SQL databases in Node.js using an ORM. + + + Build and deploy GraphQL APIs with data from multiple sources. + + + Build applications with an open-source Firebase alternative powered by Postgres. + + + Work with databases in TypeScript and JavaScript using an ORM. -## Secure connectivity to Tiger Cloud +## Secure connectivity to {CLOUD_LONG} - Secure connectivity from AWS environments. + Establish secure connectivity from AWS environments. Connect from your corporate network. - Secure connectivity from Google Cloud Platform. + Establish secure connectivity from Google Cloud Platform. - Secure connectivity from Microsoft Azure. + Establish secure connectivity from Microsoft Azure. - -### Authentication and security +## Workflow automation and no-code tools - Implement authentication and authorization for web applications. + Create internal business applications with a low-code platform. - Securely manage user authentication and access controls for applications. + Automate workflows and integrate services with a no-code platform. - Secure authentication and user identity management for applications. + Build custom internal tools quickly using a drag-and-drop interface. - - -### Business intelligence and data visualization - - + Develop internal tools and business applications with a low-code builder. + Automate workflows by connecting different applications and services. - - - + \ No newline at end of file diff --git a/integrations/migration/migrate-with-downtime.mdx b/integrations/migration/migrate-with-downtime.mdx deleted file mode 100644 index 9de3818..0000000 --- a/integrations/migration/migrate-with-downtime.mdx +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: Import from a database with downtime -description: Simulate and analyze a transport dataset in your Tiger Cloud service -products: [cloud, mst, self_hosted] -keywords: [IoT, simulate] ---- - -Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. - - - - - 1. Do this - 2. Do that - - - - - - 1. Do this - 2. Do that - - - - - - 1. Do this - 2. Do that - - - - - - 1. Do this - 2. Do that - - - - diff --git a/integrations/troubleshooting.mdx b/integrations/troubleshooting.mdx index 67e4c95..5efb347 100644 --- a/integrations/troubleshooting.mdx +++ b/integrations/troubleshooting.mdx @@ -1,32 +1,4 @@ --- title: Troubleshooting -sidebarTitle: Troubleshooting -description: Troubleshoot common problems that occur when integrating Tiger Cloud services with third-party solutions -products: [cloud, self_hosted] -keywords: [troubleshooting] +description: TBD --- - -import { CLOUD_LONG, SERVICE_LONG } from '/snippets/vars.mdx'; - -## JDBC authentication type is not supported - -When connecting to {SERVICE_LONG} with a Java Database Connectivity (JDBC) -driver, you might get this error message: - -```text -Check that your connection definition references your JDBC database with correct URL syntax, -username, and password. The authentication type 10 is not supported. -``` - -Your {CLOUD_LONG} authentication type doesn't match your JDBC driver's -supported authentication types. The recommended approach is to upgrade your JDBC -driver to a version that supports `scram-sha-256` encryption. If that isn't an -option, you can change the authentication type for your {SERVICE_LONG} -to `md5`. Note that `md5` is less secure, and is provided solely for -compatibility with older clients. - -For information on changing your authentication type, see the documentation on -[resetting your service password][password-reset]. - -[password-reset]: /use-timescale/:currentVersion:/services/service-management/#reset-service-password - diff --git a/snippets/changes/_not-supported-for-azure.mdx b/snippets/changes/_not-supported-for-azure.mdx new file mode 100644 index 0000000..95b5c2d --- /dev/null +++ b/snippets/changes/_not-supported-for-azure.mdx @@ -0,0 +1,5 @@ +import { CLOUD_LONG } from '/snippets/vars.mdx'; + + +This feature is currently not supported for {CLOUD_LONG} on Microsoft Azure. + \ No newline at end of file diff --git a/snippets/coding/_start-coding-golang.mdx b/snippets/coding/_start-coding-golang.mdx index ea646fc..219343b 100644 --- a/snippets/coding/_start-coding-golang.mdx +++ b/snippets/coding/_start-coding-golang.mdx @@ -1,4 +1,4 @@ -import { SERVICE_SHORT, COMPANY, CONSOLE, OPS_MODE, READ_REPLICA, COLUMNSTORE, DATA_MODE, ACCOUNT_LONG, CLOUD_LONG, SERVICE_LONG, TIMESCALE_DB, PG, HYPERCORE } from '/snippets/vars.mdx'; +import { CLOUD_LONG, TIMESCALE_DB, PG } from '/snippets/vars.mdx'; import IntegrationPrereqs from "/snippets/prerequisites/_integration-prereqs.mdx"; ## Prerequisites diff --git a/snippets/coding/_start-coding-java.mdx b/snippets/coding/_start-coding-java.mdx index adb3c6c..b2327e7 100644 --- a/snippets/coding/_start-coding-java.mdx +++ b/snippets/coding/_start-coding-java.mdx @@ -1,3 +1,4 @@ +import { SERVICE_SHORT, TIMESCALE_DB } from '/snippets/vars.mdx'; import IntegrationPrereqs from "/snippets/prerequisites/_integration-prereqs.mdx"; ## Prerequisites diff --git a/snippets/coding/_start-coding-node.mdx b/snippets/coding/_start-coding-node.mdx index c52e1d9..2ea4125 100644 --- a/snippets/coding/_start-coding-node.mdx +++ b/snippets/coding/_start-coding-node.mdx @@ -1,3 +1,4 @@ +import { TIMESCALE_DB } from '/snippets/vars.mdx'; import IntegrationPrereqs from "/snippets/prerequisites/_integration-prereqs.mdx"; ## Prerequisites diff --git a/snippets/coding/_start-coding-python.mdx b/snippets/coding/_start-coding-python.mdx index 1b8aebd..04ce49a 100644 --- a/snippets/coding/_start-coding-python.mdx +++ b/snippets/coding/_start-coding-python.mdx @@ -1,4 +1,4 @@ - +import { PG, TIMESCALE_DB } from '/snippets/vars.mdx'; import IntegrationPrereqs from "/snippets/prerequisites/_integration-prereqs.mdx"; ## Prerequisites diff --git a/snippets/coding/_start-coding-ruby.mdx b/snippets/coding/_start-coding-ruby.mdx index 0ec34fa..78c0d43 100644 --- a/snippets/coding/_start-coding-ruby.mdx +++ b/snippets/coding/_start-coding-ruby.mdx @@ -1,4 +1,4 @@ - +import { CLOUD_LONG, COMPANY, PG, SELF_LONG_CAP, TIMESCALE_DB } from '/snippets/vars.mdx'; import IntegrationPrereqs from "/snippets/prerequisites/_integration-prereqs.mdx"; ## Prerequisites diff --git a/snippets/integrations/_integration-prereqs-cloud-only.mdx b/snippets/integrations/_integration-prereqs-cloud-only.mdx new file mode 100644 index 0000000..56b2e62 --- /dev/null +++ b/snippets/integrations/_integration-prereqs-cloud-only.mdx @@ -0,0 +1,7 @@ +import { SERVICE_LONG } from '/snippets/vars.mdx'; + +To follow the steps on this page: + +* Create a target {SERVICE_LONG} with the Real-time analytics capability. + + You need your [connection details](/integrations/find-connection-details). \ No newline at end of file diff --git a/snippets/integrations/_livesync-limitations.mdx b/snippets/integrations/_livesync-limitations.mdx new file mode 100644 index 0000000..aa022fc --- /dev/null +++ b/snippets/integrations/_livesync-limitations.mdx @@ -0,0 +1,42 @@ +import { PG, TIMESCALE_DB, SERVICE_LONG, PG_CONNECTOR } from '/snippets/vars.mdx'; + +* This works for {PG} databases only as source. {TIMESCALE_DB} is not yet supported. + +* The source must be running {PG} 13 or later. + +* Schema changes must be co-ordinated. + + Make compatible changes to the schema in your {SERVICE_LONG} first, then make + the same changes to the source {PG} instance. + +* Ensure that the source {PG} instance and the target {SERVICE_LONG} have the same extensions installed. + + The {PG_CONNECTOR} does not create extensions on the target. If the table uses + column types from an extension, first create the extension on the + target {SERVICE_LONG} before syncing the table. + +* There is WAL volume growth on the source {PG} instance during large table copy. + +* Continuous aggregate invalidation + + The connector uses `session_replication_role=replica` during data replication, + which prevents table triggers from firing. This includes the internal + triggers that mark continuous aggregates as invalid when underlying data + changes. + + If you have continuous aggregates on your target database, they do not + automatically refresh for data inserted during the migration. This limitation + only applies to data below the continuous aggregate's materialization + watermark. For example, backfilled data. New rows synced above the continuous + aggregate watermark are used correctly when refreshing. + + This can lead to: + + - Missing data in continuous aggregates for the migration period. + - Stale aggregate data. + - Queries returning incomplete results. + + If the continuous aggregate exists in the source database, best + practice is to add it to the {PG} connector publication. If it only exists on the + target database, manually refresh the continuous aggregate using the `force` + option of refresh_continuous_aggregate. \ No newline at end of file diff --git a/snippets/integrations/_livesync-prereqs-cloud.mdx b/snippets/integrations/_livesync-prereqs-cloud.mdx new file mode 100644 index 0000000..dcd55c7 --- /dev/null +++ b/snippets/integrations/_livesync-prereqs-cloud.mdx @@ -0,0 +1,14 @@ +import { SERVICE_LONG, PG, PG_CONNECTOR } from '/snippets/vars.mdx'; + +To follow the steps on this page: + +* Create a target {SERVICE_LONG} with real-time analytics enabled. + + You need your [connection details](/integrations/find-connection-details). + +- Install the [{PG} client tools](/integrations/integrate/psql) on your sync machine. + +- Ensure that the source {PG} instance and the target {SERVICE_LONG} have the same extensions installed. + + The {PG_CONNECTOR} does not create extensions on the target. If the table uses column types from an extension, + first create the extension on the target {SERVICE_LONG} before syncing the table. \ No newline at end of file diff --git a/snippets/integrations/_livesync-prereqs-terminal.mdx b/snippets/integrations/_livesync-prereqs-terminal.mdx new file mode 100644 index 0000000..898f035 --- /dev/null +++ b/snippets/integrations/_livesync-prereqs-terminal.mdx @@ -0,0 +1,30 @@ +import { SERVICE_LONG, SERVICE_SHORT, PG, PG_CONNECTOR } from '/snippets/vars.mdx'; + +Best practice is to use an [Ubuntu EC2 instance](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EC2_GetStarted.html#ec2-launch-instance) hosted in the same region as your +{SERVICE_LONG} to move data. That is, the machine you run the commands on to move your +data from your source database to your target {SERVICE_LONG}. + +Before you move your data: + +- Create a target {SERVICE_LONG}. + + Each {SERVICE_LONG} has a single {PG} instance that supports the + most popular extensions. {SERVICE_LONG}s do not support tablespaces, + and there is no superuser associated with a {SERVICE_SHORT}. + Best practice is to create a {SERVICE_LONG} with at least 8 CPUs for a smoother experience. A higher-spec instance + can significantly reduce the overall migration window. + +- To ensure that maintenance does not run while migration is in progress, best practice is to adjust the maintenance window. + +- Ensure that the source {PG} instance and the target {SERVICE_LONG} have the same extensions installed. + + The {PG_CONNECTOR} does not create extensions on the target. If the table uses column types from an extension, + first create the extension on the target {SERVICE_LONG} before syncing the table. + +- [Install Docker](https://docs.docker.com/engine/install/) on your sync machine. + + For a better experience, use a 4 CPU/16GB EC2 instance or greater to run the {PG_CONNECTOR}. + +- Install the [{PG} client tools](/integrations/integrate/psql) on your sync machine. + + This includes `psql`, `pg_dump`, `pg_dumpall`, and `vacuumdb` commands. \ No newline at end of file diff --git a/snippets/integrations/code/_start-coding-golang.mdx b/snippets/integrations/code/_start-coding-golang.mdx new file mode 100644 index 0000000..596d89c --- /dev/null +++ b/snippets/integrations/code/_start-coding-golang.mdx @@ -0,0 +1,840 @@ +import { CLOUD_LONG, TIMESCALE_DB, PG } from '/snippets/vars.mdx'; +import IntegrationPrereqs from '/snippets/integrations/_integration-prereqs-cloud-only.mdx'; + +## Prerequisites + + + +- Install [Go](https://golang.org/doc/install). +- Install the [PGX driver for Go](https://github.com/jackc/pgx). + +## Connect to your {SERVICE_LONG} + +In this section, you create a connection to {CLOUD_LONG} using the PGX driver. +PGX is a toolkit designed to help Go developers work directly with {PG}. +You can use it to help your Go application interact directly with TimescaleDB. + +1. Locate your {TIMESCALE_DB} credentials and use them to compose a connection + string for PGX. + + You'll need: + + * password + * username + * host URL + * port number + * database name + +2. Compose your connection string variable as a + [libpq connection string](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING), using this format: + + ```go + connStr := "postgres://username:password@host:port/dbname" + ``` + + If you're using a hosted version of TimescaleDB, or if you need an SSL + connection, use this format instead: + + ```go + connStr := "postgres://username:password@host:port/dbname?sslmode=require" + ``` + +3. You can check that you're connected to your database with this + hello world program: + + ```go + package main + + import ( + "context" + "fmt" + "os" + + "github.com/jackc/pgx/v5" + ) + + //connect to database using a single connection + func main() { + /***********************************************/ + /* Single Connection to TimescaleDB/ PostgreSQL */ + /***********************************************/ + ctx := context.Background() + connStr := "yourConnectionStringHere" + conn, err := pgx.Connect(ctx, connStr) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to connect to database: %v\n", err) + os.Exit(1) + } + defer conn.Close(ctx) + + //run a simple query to check our connection + var greeting string + err = conn.QueryRow(ctx, "select 'Hello, Timescale!'").Scan(&greeting) + if err != nil { + fmt.Fprintf(os.Stderr, "QueryRow failed: %v\n", err) + os.Exit(1) + } + fmt.Println(greeting) + } + + ``` + + If you'd like to specify your connection string as an environment variable, + you can use this syntax to access it in place of the `connStr` variable: + + ```go + os.Getenv("DATABASE_CONNECTION_STRING") + ``` + +Alternatively, you can connect to {TIMESCALE_DB} using a connection pool. +Connection pooling is useful to conserve computing resources, and can also +result in faster database queries: + +1. To create a connection pool that can be used for concurrent connections to + your database, use the `pgxpool.New()` function instead of + `pgx.Connect()`. Also note that this script imports + `github.com/jackc/pgx/v5/pgxpool`, instead of `pgx/v5` which was used to + create a single connection: + + ```go + package main + + import ( + "context" + "fmt" + "os" + + "github.com/jackc/pgx/v5/pgxpool" + ) + + func main() { + + ctx := context.Background() + connStr := "yourConnectionStringHere" + dbpool, err := pgxpool.New(ctx, connStr) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to connect to database: %v\n", err) + os.Exit(1) + } + defer dbpool.Close() + + //run a simple query to check our connection + var greeting string + err = dbpool.QueryRow(ctx, "select 'Hello, Tiger Data (but concurrently)'").Scan(&greeting) + if err != nil { + fmt.Fprintf(os.Stderr, "QueryRow failed: %v\n", err) + os.Exit(1) + } + fmt.Println(greeting) + } + ``` + +## Create a relational table + +In this section, you create a table called `sensors` which holds the ID, type, +and location of your fictional sensors. Additionally, you create a hypertable +called `sensor_data` which holds the measurements of those sensors. The +measurements contain the time, sensor_id, temperature reading, and CPU +percentage of the sensors. + +1. Compose a string that contains the SQL statement to create a relational + table. This example creates a table called `sensors`, with columns for ID, + type, and location: + + ```go + queryCreateTable := `CREATE TABLE sensors (id SERIAL PRIMARY KEY, type VARCHAR(50), location VARCHAR(50));` + ``` + +2. Execute the `CREATE TABLE` statement with the `Exec()` function on the + `dbpool` object, using the arguments of the current context and the + statement string you created: + + ```go + package main + + import ( + "context" + "fmt" + "os" + + "github.com/jackc/pgx/v5/pgxpool" + ) + + func main() { + ctx := context.Background() + connStr := "yourConnectionStringHere" + dbpool, err := pgxpool.New(ctx, connStr) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to connect to database: %v\n", err) + os.Exit(1) + } + defer dbpool.Close() + + /********************************************/ + /* Create relational table */ + /********************************************/ + + //Create relational table called sensors + queryCreateTable := `CREATE TABLE sensors (id SERIAL PRIMARY KEY, type VARCHAR(50), location VARCHAR(50));` + _, err = dbpool.Exec(ctx, queryCreateTable) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to create SENSORS table: %v\n", err) + os.Exit(1) + } + fmt.Println("Successfully created relational table SENSORS") + } + ``` + +## Generate a hypertable + +When you have created the relational table, you can create a hypertable. +Creating tables and indexes, altering tables, inserting data, selecting data, +and most other tasks are executed on the hypertable. + +1. Create a variable for the `CREATE TABLE SQL` statement for your hypertable. + Notice how the hypertable has the compulsory time column: + + ```go + queryCreateTable := `CREATE TABLE sensor_data ( + time TIMESTAMPTZ NOT NULL, + sensor_id INTEGER, + temperature DOUBLE PRECISION, + cpu DOUBLE PRECISION, + FOREIGN KEY (sensor_id) REFERENCES sensors (id)); + ` + ``` + +2. Formulate the `SELECT` statement to convert the table into a hypertable. You + must specify the table name to convert to a hypertable, and its time column + name as the second argument. For more information, see the + [`create_hypertable` docs](/api/hypertable/create_hypertable): + + ```go + queryCreateHypertable := `SELECT create_hypertable('sensor_data', by_range('time'));` + ``` + + + The `by_range` dimension builder is an addition to {TIMESCALE_DB} 2.13. + + +3. Execute the `CREATE TABLE` statement and `SELECT` statement which converts + the table into a hypertable. You can do this by calling the `Exec()` + function on the `dbpool` object, using the arguments of the current context, + and the `queryCreateTable` and `queryCreateHypertable` statement strings: + + ```go + package main + + import ( + "context" + "fmt" + "os" + + "github.com/jackc/pgx/v5/pgxpool" + ) + + func main() { + ctx := context.Background() + connStr := "yourConnectionStringHere" + dbpool, err := pgxpool.New(ctx, connStr) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to connect to database: %v\n", err) + os.Exit(1) + } + defer dbpool.Close() + + /********************************************/ + /* Create Hypertable */ + /********************************************/ + // Create hypertable of time-series data called sensor_data + queryCreateTable := `CREATE TABLE sensor_data ( + time TIMESTAMPTZ NOT NULL, + sensor_id INTEGER, + temperature DOUBLE PRECISION, + cpu DOUBLE PRECISION, + FOREIGN KEY (sensor_id) REFERENCES sensors (id)); + ` + + queryCreateHypertable := `SELECT create_hypertable('sensor_data', by_range('time'));` + + //execute statement + _, err = dbpool.Exec(ctx, queryCreateTable+queryCreateHypertable) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to create the `sensor_data` hypertable: %v\n", err) + os.Exit(1) + } + fmt.Println("Successfully created hypertable `sensor_data`") + } + ``` + +## Insert rows of data + +You can insert rows into your database in a couple of different +ways. Each of these example inserts the data from the two arrays, `sensorTypes` and +`sensorLocations`, into the relational table named `sensors`. + +The first example inserts a single row of data at a time. The second example +inserts multiple rows of data. The third example uses batch inserts to speed up +the process. + +1. Open a connection pool to the database, then use the prepared statements to + formulate an `INSERT` SQL statement, and execute it: + + ```go + package main + + import ( + "context" + "fmt" + "os" + + "github.com/jackc/pgx/v5/pgxpool" + ) + + func main() { + ctx := context.Background() + connStr := "yourConnectionStringHere" + dbpool, err := pgxpool.New(ctx, connStr) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to connect to database: %v\n", err) + os.Exit(1) + } + defer dbpool.Close() + + /********************************************/ + /* INSERT into relational table */ + /********************************************/ + //Insert data into relational table + + // Slices of sample data to insert + // observation i has type sensorTypes[i] and location sensorLocations[i] + sensorTypes := []string{"a", "a", "b", "b"} + sensorLocations := []string{"floor", "ceiling", "floor", "ceiling"} + + for i := range sensorTypes { + //INSERT statement in SQL + queryInsertMetadata := `INSERT INTO sensors (type, location) VALUES ($1, $2);` + + //Execute INSERT command + _, err := dbpool.Exec(ctx, queryInsertMetadata, sensorTypes[i], sensorLocations[i]) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to insert data into database: %v\n", err) + os.Exit(1) + } + fmt.Printf("Inserted sensor (%s, %s) into database \n", sensorTypes[i], sensorLocations[i]) + } + fmt.Println("Successfully inserted all sensors into database") + } + ``` + +Instead of inserting a single row of data at a time, you can use this procedure +to insert multiple rows of data, instead: + +1. This example uses {PG} to generate some sample time-series to insert + into the `sensor_data` hypertable. Define the SQL statement to generate the + data, called `queryDataGeneration`. Then use the `.Query()` function to + execute the statement and return the sample data. The data returned by the + query is stored in `results`, a slice of structs, which is then used as a + source to insert data into the hypertable: + + ```go + package main + + import ( + "context" + "fmt" + "os" + "time" + + "github.com/jackc/pgx/v5/pgxpool" + ) + + func main() { + ctx := context.Background() + connStr := "yourConnectionStringHere" + dbpool, err := pgxpool.New(ctx, connStr) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to connect to database: %v\n", err) + os.Exit(1) + } + defer dbpool.Close() + + // Generate data to insert + + //SQL query to generate sample data + queryDataGeneration := ` + SELECT generate_series(now() - interval '24 hour', now(), interval '5 minute') AS time, + floor(random() * (3) + 1)::int as sensor_id, + random()*100 AS temperature, + random() AS cpu + ` + //Execute query to generate samples for sensor_data hypertable + rows, err := dbpool.Query(ctx, queryDataGeneration) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to generate sensor data: %v\n", err) + os.Exit(1) + } + defer rows.Close() + + fmt.Println("Successfully generated sensor data") + + //Store data generated in slice results + type result struct { + Time time.Time + SensorId int + Temperature float64 + CPU float64 + } + + var results []result + for rows.Next() { + var r result + err = rows.Scan(&r.Time, &r.SensorId, &r.Temperature, &r.CPU) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to scan %v\n", err) + os.Exit(1) + } + results = append(results, r) + } + + // Any errors encountered by rows.Next or rows.Scan are returned here + if rows.Err() != nil { + fmt.Fprintf(os.Stderr, "rows Error: %v\n", rows.Err()) + os.Exit(1) + } + + // Check contents of results slice + fmt.Println("Contents of RESULTS slice") + for i := range results { + var r result + r = results[i] + fmt.Printf("Time: %s | ID: %d | Temperature: %f | CPU: %f |\n", &r.Time, r.SensorId, r.Temperature, r.CPU) + } + } + ``` + +2. Formulate an SQL insert statement for the `sensor_data` hypertable: + + ```go + //SQL query to generate sample data + queryInsertTimeseriesData := ` + INSERT INTO sensor_data (time, sensor_id, temperature, cpu) VALUES ($1, $2, $3, $4); + ` + ``` + +3. Execute the SQL statement for each sample in the results slice: + + ```go + //Insert contents of results slice into TimescaleDB + for i := range results { + var r result + r = results[i] + _, err := dbpool.Exec(ctx, queryInsertTimeseriesData, r.Time, r.SensorId, r.Temperature, r.CPU) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to insert sample into TimescaleDB %v\n", err) + os.Exit(1) + } + defer rows.Close() + } + fmt.Println("Successfully inserted samples into sensor_data hypertable") + ``` + +4. This example `main.go` generates sample data and inserts it into + the `sensor_data` hypertable: + + ```go + package main + + import ( + "context" + "fmt" + "os" + "time" + + "github.com/jackc/pgx/v5/pgxpool" + ) + + func main() { + /********************************************/ + /* Connect using Connection Pool */ + /********************************************/ + ctx := context.Background() + connStr := "yourConnectionStringHere" + dbpool, err := pgxpool.New(ctx, connStr) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to connect to database: %v\n", err) + os.Exit(1) + } + defer dbpool.Close() + + /********************************************/ + /* Insert data into hypertable */ + /********************************************/ + // Generate data to insert + + //SQL query to generate sample data + queryDataGeneration := ` + SELECT generate_series(now() - interval '24 hour', now(), interval '5 minute') AS time, + floor(random() * (3) + 1)::int as sensor_id, + random()*100 AS temperature, + random() AS cpu + ` + //Execute query to generate samples for sensor_data hypertable + rows, err := dbpool.Query(ctx, queryDataGeneration) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to generate sensor data: %v\n", err) + os.Exit(1) + } + defer rows.Close() + + fmt.Println("Successfully generated sensor data") + + //Store data generated in slice results + type result struct { + Time time.Time + SensorId int + Temperature float64 + CPU float64 + } + var results []result + for rows.Next() { + var r result + err = rows.Scan(&r.Time, &r.SensorId, &r.Temperature, &r.CPU) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to scan %v\n", err) + os.Exit(1) + } + results = append(results, r) + } + // Any errors encountered by rows.Next or rows.Scan are returned here + if rows.Err() != nil { + fmt.Fprintf(os.Stderr, "rows Error: %v\n", rows.Err()) + os.Exit(1) + } + + // Check contents of results slice + fmt.Println("Contents of RESULTS slice") + for i := range results { + var r result + r = results[i] + fmt.Printf("Time: %s | ID: %d | Temperature: %f | CPU: %f |\n", &r.Time, r.SensorId, r.Temperature, r.CPU) + } + + //Insert contents of results slice into TimescaleDB + //SQL query to generate sample data + queryInsertTimeseriesData := ` + INSERT INTO sensor_data (time, sensor_id, temperature, cpu) VALUES ($1, $2, $3, $4); + ` + + //Insert contents of results slice into TimescaleDB + for i := range results { + var r result + r = results[i] + _, err := dbpool.Exec(ctx, queryInsertTimeseriesData, r.Time, r.SensorId, r.Temperature, r.CPU) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to insert sample into TimescaleDB %v\n", err) + os.Exit(1) + } + defer rows.Close() + } + fmt.Println("Successfully inserted samples into sensor_data hypertable") + } + ``` + +Inserting multiple rows of data using this method executes as many `insert` +statements as there are samples to be inserted. This can make ingestion of data +slow. To speed up ingestion, you can batch insert data instead. + +Here's a sample pattern for how to do so, using the sample data you generated in +the previous procedure. It uses the pgx `Batch` object: + +1. This example batch inserts data into the database: + + ```go + package main + + import ( + "context" + "fmt" + "os" + "time" + + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" + ) + + func main() { + /********************************************/ + /* Connect using Connection Pool */ + /********************************************/ + ctx := context.Background() + connStr := "yourConnectionStringHere" + dbpool, err := pgxpool.New(ctx, connStr) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to connect to database: %v\n", err) + os.Exit(1) + } + defer dbpool.Close() + + // Generate data to insert + + //SQL query to generate sample data + queryDataGeneration := ` + SELECT generate_series(now() - interval '24 hour', now(), interval '5 minute') AS time, + floor(random() * (3) + 1)::int as sensor_id, + random()*100 AS temperature, + random() AS cpu + ` + + //Execute query to generate samples for sensor_data hypertable + rows, err := dbpool.Query(ctx, queryDataGeneration) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to generate sensor data: %v\n", err) + os.Exit(1) + } + defer rows.Close() + + fmt.Println("Successfully generated sensor data") + + //Store data generated in slice results + type result struct { + Time time.Time + SensorId int + Temperature float64 + CPU float64 + } + var results []result + for rows.Next() { + var r result + err = rows.Scan(&r.Time, &r.SensorId, &r.Temperature, &r.CPU) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to scan %v\n", err) + os.Exit(1) + } + results = append(results, r) + } + // Any errors encountered by rows.Next or rows.Scan are returned here + if rows.Err() != nil { + fmt.Fprintf(os.Stderr, "rows Error: %v\n", rows.Err()) + os.Exit(1) + } + + // Check contents of results slice + /*fmt.Println("Contents of RESULTS slice") + for i := range results { + var r result + r = results[i] + fmt.Printf("Time: %s | ID: %d | Temperature: %f | CPU: %f |\n", &r.Time, r.SensorId, r.Temperature, r.CPU) + }*/ + + //Insert contents of results slice into TimescaleDB + //SQL query to generate sample data + queryInsertTimeseriesData := ` + INSERT INTO sensor_data (time, sensor_id, temperature, cpu) VALUES ($1, $2, $3, $4); + ` + + /********************************************/ + /* Batch Insert into TimescaleDB */ + /********************************************/ + //create batch + batch := &pgx.Batch{} + //load insert statements into batch queue + for i := range results { + var r result + r = results[i] + batch.Queue(queryInsertTimeseriesData, r.Time, r.SensorId, r.Temperature, r.CPU) + } + batch.Queue("select count(*) from sensor_data") + + //send batch to connection pool + br := dbpool.SendBatch(ctx, batch) + //execute statements in batch queue + _, err = br.Exec() + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to execute statement in batch queue %v\n", err) + os.Exit(1) + } + fmt.Println("Successfully batch inserted data") + + //Compare length of results slice to size of table + fmt.Printf("size of results: %d\n", len(results)) + //check size of table for number of rows inserted + // result of last SELECT statement + var rowsInserted int + err = br.QueryRow().Scan(&rowsInserted) + fmt.Printf("size of table: %d\n", rowsInserted) + + err = br.Close() + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to closer batch %v\n", err) + os.Exit(1) + } + } + ``` + +## Execute a query + +This section covers how to execute queries against your database. + +1. Define the SQL query you'd like to run on the database. This example uses a + SQL query that combines time-series and relational data. It returns the + average CPU values for every 5 minute interval, for sensors located on + location `ceiling` and of type `a`: + + ```go + // Formulate query in SQL + // Note the use of prepared statement placeholders $1 and $2 + queryTimebucketFiveMin := ` + SELECT time_bucket('5 minutes', time) AS five_min, avg(cpu) + FROM sensor_data + JOIN sensors ON sensors.id = sensor_data.sensor_id + WHERE sensors.location = $1 AND sensors.type = $2 + GROUP BY five_min + ORDER BY five_min DESC; + ` + ``` + +2. Use the `.Query()` function to execute the query string. Make sure you + specify the relevant placeholders: + + ```go + //Execute query on TimescaleDB + rows, err := dbpool.Query(ctx, queryTimebucketFiveMin, "ceiling", "a") + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to execute query %v\n", err) + os.Exit(1) + } + defer rows.Close() + + fmt.Println("Successfully executed query") + ``` + +3. Access the rows returned by `.Query()`. Create a struct with fields + representing the columns that you expect to be returned, then use the + `rows.Next()` function to iterate through the rows returned and fill + `results` with the array of structs. This uses the `rows.Scan()` function, + passing in pointers to the fields that you want to scan for results. + + This example prints out the results returned from the query, but you might + want to use those results for some other purpose. Once you've scanned + through all the rows returned you can then use the results array however you + like. + + ```go + //Do something with the results of query + // Struct for results + type result2 struct { + Bucket time.Time + Avg float64 + } + + // Print rows returned and fill up results slice for later use + var results []result2 + for rows.Next() { + var r result2 + err = rows.Scan(&r.Bucket, &r.Avg) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to scan %v\n", err) + os.Exit(1) + } + results = append(results, r) + fmt.Printf("Time bucket: %s | Avg: %f\n", &r.Bucket, r.Avg) + } + + // Any errors encountered by rows.Next or rows.Scan are returned here + if rows.Err() != nil { + fmt.Fprintf(os.Stderr, "rows Error: %v\n", rows.Err()) + os.Exit(1) + } + + // use results here… + ``` + +4. This example program runs a query, and accesses the results of + that query: + + ```go + package main + + import ( + "context" + "fmt" + "os" + "time" + + "github.com/jackc/pgx/v5/pgxpool" + ) + + func main() { + ctx := context.Background() + connStr := "yourConnectionStringHere" + dbpool, err := pgxpool.New(ctx, connStr) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to connect to database: %v\n", err) + os.Exit(1) + } + defer dbpool.Close() + + /********************************************/ + /* Execute a query */ + /********************************************/ + + // Formulate query in SQL + // Note the use of prepared statement placeholders $1 and $2 + queryTimebucketFiveMin := ` + SELECT time_bucket('5 minutes', time) AS five_min, avg(cpu) + FROM sensor_data + JOIN sensors ON sensors.id = sensor_data.sensor_id + WHERE sensors.location = $1 AND sensors.type = $2 + GROUP BY five_min + ORDER BY five_min DESC; + ` + + //Execute query on TimescaleDB + rows, err := dbpool.Query(ctx, queryTimebucketFiveMin, "ceiling", "a") + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to execute query %v\n", err) + os.Exit(1) + } + defer rows.Close() + + fmt.Println("Successfully executed query") + + //Do something with the results of query + // Struct for results + type result2 struct { + Bucket time.Time + Avg float64 + } + + // Print rows returned and fill up results slice for later use + var results []result2 + for rows.Next() { + var r result2 + err = rows.Scan(&r.Bucket, &r.Avg) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to scan %v\n", err) + os.Exit(1) + } + results = append(results, r) + fmt.Printf("Time bucket: %s | Avg: %f\n", &r.Bucket, r.Avg) + } + // Any errors encountered by rows.Next or rows.Scan are returned here + if rows.Err() != nil { + fmt.Fprintf(os.Stderr, "rows Error: %v\n", rows.Err()) + os.Exit(1) + } + } + ``` + +## Next steps + +Now that you're able to connect, read, and write to a {TIMESCALE_DB} instance from +your Go application, be sure to check out these advanced {TIMESCALE_DB} tutorials: + +* Refer to the [pgx documentation](https://pkg.go.dev/github.com/jackc/pgx) for more information about pgx. +* Get up and running with {TIMESCALE_DB} with the [Getting Started](/getting-started/latest/) + tutorial. +* Want fast inserts on CSV data? Check out + [{TIMESCALE_DB} parallel copy](https://github.com/timescale/timescaledb-parallel-copy), a tool for fast inserts, + written in Go. \ No newline at end of file diff --git a/snippets/integrations/code/_start-coding-java.mdx b/snippets/integrations/code/_start-coding-java.mdx new file mode 100644 index 0000000..f6f403b --- /dev/null +++ b/snippets/integrations/code/_start-coding-java.mdx @@ -0,0 +1,574 @@ +import { SERVICE_SHORT, TIMESCALE_DB } from '/snippets/vars.mdx'; +import IntegrationPrereqs from '/snippets/integrations/_integration-prereqs-cloud-only.mdx'; + +## Prerequisites + + + +* Install the [Java Development Kit (JDK)](https://openjdk.java.net). +* Install the [PostgreSQL JDBC driver](https://jdbc.postgresql.org). + +All code in this quick start is for Java 16 and later. If you are working +with older JDK versions, use legacy coding techniques. + +## Connect to your {SERVICE_LONG} + +In this section, you create a connection to your {SERVICE_SHORT} using an application in +a single file. You can use any of your favorite build tools, including `gradle` +or `maven`. + +1. Create a directory containing a text file called `Main.java`, with this content: + + ```java + package com.timescale.java; + + public class Main { + + public static void main(String... args) { + System.out.println("Hello, World!"); + } + } + ``` + +2. From the command line in the current directory, run the application: + + ```bash + java Main.java + ``` + + If the command is successful, `Hello, World!` line output is printed + to your console. + +3. Import the PostgreSQL JDBC driver. If you are using a dependency manager, + include the [PostgreSQL JDBC Driver](https://mvnrepository.com/artifact/org.postgresql/postgresql) as a + dependency. + +4. Download the [JAR artifact of the JDBC Driver](https://jdbc.postgresql.org/download/) and + save it with the `Main.java` file. + +5. Import the `JDBC Driver` into the Java application and display a list of + available drivers for the check: + + ```java + package com.timescale.java; + + import java.sql.DriverManager; + + public class Main { + + public static void main(String... args) { + DriverManager.drivers().forEach(System.out::println); + } + } + ``` + +6. Run all the examples: + + ```bash + java -cp *.jar Main.java + ``` + + If the command is successful, a string similar to + `org.postgresql.Driver@7f77e91b` is printed to your console. This means that you + are ready to connect to {TIMESCALE_DB} from Java. + +7. Locate your {TIMESCALE_DB} credentials and use them to compose a connection + string for JDBC. + + You'll need: + + * password + * username + * host URL + * port + * database name + +8. Compose your connection string variable, using this format: + + ```java + var connUrl = "jdbc:postgresql://:/?user=&password="; + ``` + + For more information about creating connection strings, see the [JDBC documentation](https://jdbc.postgresql.org/documentation/datasource/). + + + This method of composing a connection string is for test or development + purposes only. For production, use environment variables for sensitive + details like your password, hostname, and port number. + + + ```java + package com.timescale.java; + + import java.sql.DriverManager; + import java.sql.SQLException; + + public class Main { + + public static void main(String... args) throws SQLException { + var connUrl = "jdbc:postgresql://:/?user=&password="; + var conn = DriverManager.getConnection(connUrl); + System.out.println(conn.getClientInfo()); + } + } + ``` + +9. Run the code: + + ```bash + java -cp *.jar Main.java + ``` + + If the command is successful, a string similar to + `{ApplicationName=PostgreSQL JDBC Driver}` is printed to your console. + +## Create a relational table + +In this section, you create a table called `sensors` which holds the ID, type, +and location of your fictional sensors. Additionally, you create a hypertable +called `sensor_data` which holds the measurements of those sensors. The +measurements contain the time, sensor_id, temperature reading, and CPU +percentage of the sensors. + +1. Compose a string which contains the SQL statement to create a relational + table. This example creates a table called `sensors`, with columns `id`, + `type` and `location`: + + ```sql + CREATE TABLE sensors ( + id SERIAL PRIMARY KEY, + type TEXT NOT NULL, + location TEXT NOT NULL + ); + ``` + +2. Create a statement, execute the query you created in the previous step, and + check that the table was created successfully: + + ```java + package com.timescale.java; + + import java.sql.DriverManager; + import java.sql.SQLException; + + public class Main { + + public static void main(String... args) throws SQLException { + var connUrl = "jdbc:postgresql://:/?user=&password="; + var conn = DriverManager.getConnection(connUrl); + + var createSensorTableQuery = """ + CREATE TABLE sensors ( + id SERIAL PRIMARY KEY, + type TEXT NOT NULL, + location TEXT NOT NULL + ) + """; + try (var stmt = conn.createStatement()) { + stmt.execute(createSensorTableQuery); + } + + var showAllTablesQuery = "SELECT tablename FROM pg_catalog.pg_tables WHERE schemaname = 'public'"; + try (var stmt = conn.createStatement(); + var rs = stmt.executeQuery(showAllTablesQuery)) { + System.out.println("Tables in the current database: "); + while (rs.next()) { + System.out.println(rs.getString("tablename")); + } + } + } + } + ``` + +## Create a hypertable + +When you have created the relational table, you can create a hypertable. +Creating tables and indexes, altering tables, inserting data, selecting data, +and most other tasks are executed on the hypertable. + +1. Create a `CREATE TABLE` SQL statement for + your hypertable. Notice how the hypertable has the compulsory time column: + + ```sql + CREATE TABLE sensor_data ( + time TIMESTAMPTZ NOT NULL, + sensor_id INTEGER REFERENCES sensors (id), + value DOUBLE PRECISION + ); + ``` + +2. Create a statement, execute the query you created in the previous step: + + ```sql + SELECT create_hypertable('sensor_data', by_range('time')); + ``` + + + The `by_range` and `by_hash` dimension builder is an addition to {TIMESCALE_DB} 2.13. + + +3. Execute the two statements you created, and commit your changes to the + database: + + ```java + package com.timescale.java; + + import java.sql.Connection; + import java.sql.DriverManager; + import java.sql.SQLException; + import java.util.List; + + public class Main { + + public static void main(String... args) { + final var connUrl = "jdbc:postgresql://:/?user=&password="; + try (var conn = DriverManager.getConnection(connUrl)) { + createSchema(conn); + insertData(conn); + } catch (SQLException ex) { + System.err.println(ex.getMessage()); + } + } + + private static void createSchema(final Connection conn) throws SQLException { + try (var stmt = conn.createStatement()) { + stmt.execute(""" + CREATE TABLE sensors ( + id SERIAL PRIMARY KEY, + type TEXT NOT NULL, + location TEXT NOT NULL + ) + """); + } + + try (var stmt = conn.createStatement()) { + stmt.execute(""" + CREATE TABLE sensor_data ( + time TIMESTAMPTZ NOT NULL, + sensor_id INTEGER REFERENCES sensors (id), + value DOUBLE PRECISION + ) + """); + } + + try (var stmt = conn.createStatement()) { + stmt.execute("SELECT create_hypertable('sensor_data', by_range('time'))"); + } + } + } + ``` + +## Insert data + +You can insert data into your hypertables in several different ways. In this +section, you can insert single rows, or insert by batches of rows. + +1. Open a connection to the database, use prepared statements to formulate the + `INSERT` SQL statement, then execute the statement: + + ```java + final List sensors = List.of( + new Sensor("temperature", "bedroom"), + new Sensor("temperature", "living room"), + new Sensor("temperature", "outside"), + new Sensor("humidity", "kitchen"), + new Sensor("humidity", "outside")); + for (final var sensor : sensors) { + try (var stmt = conn.prepareStatement("INSERT INTO sensors (type, location) VALUES (?, ?)")) { + stmt.setString(1, sensor.type()); + stmt.setString(2, sensor.location()); + stmt.executeUpdate(); + } + } + ``` + +If you want to insert a batch of rows by using a batching mechanism. In this +example, you generate some sample time-series data to insert into the +`sensor_data` hypertable: + +1. Insert batches of rows: + + ```java + final var sensorDataCount = 100; + final var insertBatchSize = 10; + try (var stmt = conn.prepareStatement(""" + INSERT INTO sensor_data (time, sensor_id, value) + VALUES ( + generate_series(now() - INTERVAL '24 hours', now(), INTERVAL '5 minutes'), + floor(random() * 4 + 1)::INTEGER, + random() + ) + """)) { + for (int i = 0; i < sensorDataCount; i++) { + stmt.addBatch(); + + if ((i > 0 && i % insertBatchSize == 0) || i == sensorDataCount - 1) { + stmt.executeBatch(); + } + } + } + ``` + +## Execute a query + +This section covers how to execute queries against your database. + +## Execute queries on {TIMESCALE_DB} + +1. Define the SQL query you'd like to run on the database. This example + combines time-series and relational data. It returns the average values for + every 15 minute interval for sensors with specific type and location. + + ```sql + SELECT time_bucket('15 minutes', time) AS bucket, avg(value) + FROM sensor_data + JOIN sensors ON sensors.id = sensor_data.sensor_id + WHERE sensors.type = ? AND sensors.location = ? + GROUP BY bucket + ORDER BY bucket DESC; + ``` + +2. Execute the query with the prepared statement and read out the result set for + all `a`-type sensors located on the `floor`: + + ```java + try (var stmt = conn.prepareStatement(""" + SELECT time_bucket('15 minutes', time) AS bucket, avg(value) + FROM sensor_data + JOIN sensors ON sensors.id = sensor_data.sensor_id + WHERE sensors.type = ? AND sensors.location = ? + GROUP BY bucket + ORDER BY bucket DESC + """)) { + stmt.setString(1, "temperature"); + stmt.setString(2, "living room"); + + try (var rs = stmt.executeQuery()) { + while (rs.next()) { + System.out.printf("%s: %f%n", rs.getTimestamp(1), rs.getDouble(2)); + } + } + } + ``` + + If the command is successful, you'll see output like this: + + ```bash + 2021-05-12 23:30:00.0: 0,508649 + 2021-05-12 23:15:00.0: 0,477852 + 2021-05-12 23:00:00.0: 0,462298 + 2021-05-12 22:45:00.0: 0,457006 + 2021-05-12 22:30:00.0: 0,568744 + ... + ``` + +## Next steps + +Now that you're able to connect, read, and write to a {TIMESCALE_DB} instance from +your Java application, and generate the scaffolding necessary to build a new +application from an existing {TIMESCALE_DB} instance, be sure to check out these +advanced {TIMESCALE_DB} tutorials: + +* [Continuous Aggregates](/use-timescale/continuous-aggregates) +* [Migrate Your own Data](/migrate) + +## Complete code samples + +This section contains complete code samples. + +### Complete code sample + +```java +package com.timescale.java; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.List; + +public class Main { + + public static void main(String... args) { + final var connUrl = "jdbc:postgresql://:/?user=&password="; + try (var conn = DriverManager.getConnection(connUrl)) { + createSchema(conn); + insertData(conn); + } catch (SQLException ex) { + System.err.println(ex.getMessage()); + } + } + + private static void createSchema(final Connection conn) throws SQLException { + try (var stmt = conn.createStatement()) { + stmt.execute(""" + CREATE TABLE sensors ( + id SERIAL PRIMARY KEY, + type TEXT NOT NULL, + location TEXT NOT NULL + ) + """); + } + + try (var stmt = conn.createStatement()) { + stmt.execute(""" + CREATE TABLE sensor_data ( + time TIMESTAMPTZ NOT NULL, + sensor_id INTEGER REFERENCES sensors (id), + value DOUBLE PRECISION + ) + """); + } + + try (var stmt = conn.createStatement()) { + stmt.execute("SELECT create_hypertable('sensor_data', by_range('time'))"); + } + } + + private static void insertData(final Connection conn) throws SQLException { + final List sensors = List.of( + new Sensor("temperature", "bedroom"), + new Sensor("temperature", "living room"), + new Sensor("temperature", "outside"), + new Sensor("humidity", "kitchen"), + new Sensor("humidity", "outside")); + for (final var sensor : sensors) { + try (var stmt = conn.prepareStatement("INSERT INTO sensors (type, location) VALUES (?, ?)")) { + stmt.setString(1, sensor.type()); + stmt.setString(2, sensor.location()); + stmt.executeUpdate(); + } + } + + final var sensorDataCount = 100; + final var insertBatchSize = 10; + try (var stmt = conn.prepareStatement(""" + INSERT INTO sensor_data (time, sensor_id, value) + VALUES ( + generate_series(now() - INTERVAL '24 hours', now(), INTERVAL '5 minutes'), + floor(random() * 4 + 1)::INTEGER, + random() + ) + """)) { + for (int i = 0; i < sensorDataCount; i++) { + stmt.addBatch(); + + if ((i > 0 && i % insertBatchSize == 0) || i == sensorDataCount - 1) { + stmt.executeBatch(); + } + } + } + } + + private record Sensor(String type, String location) { + } +} +``` + +### Execute more complex queries + +```java +package com.timescale.java; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.List; + +public class Main { + + public static void main(String... args) { + final var connUrl = "jdbc:postgresql://:/?user=&password="; + try (var conn = DriverManager.getConnection(connUrl)) { + createSchema(conn); + insertData(conn); + executeQueries(conn); + } catch (SQLException ex) { + System.err.println(ex.getMessage()); + } + } + + private static void createSchema(final Connection conn) throws SQLException { + try (var stmt = conn.createStatement()) { + stmt.execute(""" + CREATE TABLE sensors ( + id SERIAL PRIMARY KEY, + type TEXT NOT NULL, + location TEXT NOT NULL + ) + """); + } + + try (var stmt = conn.createStatement()) { + stmt.execute(""" + CREATE TABLE sensor_data ( + time TIMESTAMPTZ NOT NULL, + sensor_id INTEGER REFERENCES sensors (id), + value DOUBLE PRECISION + ) + """); + } + + try (var stmt = conn.createStatement()) { + stmt.execute("SELECT create_hypertable('sensor_data', by_range('time'))"); + } + } + + private static void insertData(final Connection conn) throws SQLException { + final List sensors = List.of( + new Sensor("temperature", "bedroom"), + new Sensor("temperature", "living room"), + new Sensor("temperature", "outside"), + new Sensor("humidity", "kitchen"), + new Sensor("humidity", "outside")); + for (final var sensor : sensors) { + try (var stmt = conn.prepareStatement("INSERT INTO sensors (type, location) VALUES (?, ?)")) { + stmt.setString(1, sensor.type()); + stmt.setString(2, sensor.location()); + stmt.executeUpdate(); + } + } + + final var sensorDataCount = 100; + final var insertBatchSize = 10; + try (var stmt = conn.prepareStatement(""" + INSERT INTO sensor_data (time, sensor_id, value) + VALUES ( + generate_series(now() - INTERVAL '24 hours', now(), INTERVAL '5 minutes'), + floor(random() * 4 + 1)::INTEGER, + random() + ) + """)) { + for (int i = 0; i < sensorDataCount; i++) { + stmt.addBatch(); + + if ((i > 0 && i % insertBatchSize == 0) || i == sensorDataCount - 1) { + stmt.executeBatch(); + } + } + } + } + + private static void executeQueries(final Connection conn) throws SQLException { + try (var stmt = conn.prepareStatement(""" + SELECT time_bucket('15 minutes', time) AS bucket, avg(value) + FROM sensor_data + JOIN sensors ON sensors.id = sensor_data.sensor_id + WHERE sensors.type = ? AND sensors.location = ? + GROUP BY bucket + ORDER BY bucket DESC + """)) { + stmt.setString(1, "temperature"); + stmt.setString(2, "living room"); + + try (var rs = stmt.executeQuery()) { + while (rs.next()) { + System.out.printf("%s: %f%n", rs.getTimestamp(1), rs.getDouble(2)); + } + } + } + } + + private record Sensor(String type, String location) { + } +} +``` \ No newline at end of file diff --git a/snippets/integrations/code/_start-coding-node.mdx b/snippets/integrations/code/_start-coding-node.mdx new file mode 100644 index 0000000..e95132e --- /dev/null +++ b/snippets/integrations/code/_start-coding-node.mdx @@ -0,0 +1,329 @@ +import { TIMESCALE_DB, PG } from '/snippets/vars.mdx'; +import IntegrationPrereqs from '/snippets/integrations/_integration-prereqs-cloud-only.mdx'; + +## Prerequisites + + + +* Install [Node.js](https://nodejs.org). +* Install the Node.js package manager [npm](https://docs.npmjs.com/getting-started). + +## Connect to {TIMESCALE_DB} + +In this section, you create a connection to {TIMESCALE_DB} with a common Node.js +ORM (object relational mapper) called [Sequelize](https://sequelize.org). + +1. At the command prompt, initialize a new Node.js app: + + ```bash + npm init -y + ``` + + This creates a `package.json` file in your directory, which contains all + of the dependencies for your project. It looks something like this: + + ```json + { + "name": "node-sample", + "version": "1.0.0", + "description": "", + "main": "index.js", + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1" + }, + "keywords": [], + "author": "", + "license": "ISC" + } + ``` + +2. Install Express.js: + + ```bash + npm install express + ``` + +3. Create a simple web page to check the connection. Create a new file called + `index.js`, with this content: + + ```javascript + const express = require('express') + const app = express() + const port = 3000; + + app.use(express.json()); + app.get('/', (req, res) => res.send('Hello World!')) + app.listen(port, () => console.log(`Example app listening at http://localhost:${port}`)) + ``` + +4. Test your connection by starting the application: + + ```bash + node index.js + ``` + + In your web browser, navigate to `http://localhost:3000`. If the connection + is successful, it shows "Hello World!" + +5. Add Sequelize to your project: + + ```bash + npm install sequelize sequelize-cli pg pg-hstore + ``` + +6. Locate your {TIMESCALE_DB} credentials and use them to compose a connection + string for Sequelize. + + You'll need: + + * password + * username + * host URL + * port + * database name + +7. Compose your connection string variable, using this format: + + ```javascript + 'postgres://:@:/' + ``` + +8. Open the `index.js` file you created. Require Sequelize in the application, + and declare the connection string: + + ```javascript + const Sequelize = require('sequelize') + const sequelize = new Sequelize('postgres://:@:/', + { + dialect: 'postgres', + protocol: 'postgres', + dialectOptions: { + ssl: { + require: true, + rejectUnauthorized: false + } + } + }) + ``` + + Make sure you add the SSL settings in the `dialectOptions` sections. You + can't connect to {TIMESCALE_DB} using SSL without them. + +9. You can test the connection by adding these lines to `index.js` after the + `app.get` statement: + + ```javascript + sequelize.authenticate().then(() => { + console.log('Connection has been established successfully.'); + }).catch(err => { + console.error('Unable to connect to the database:', err); + }); + ``` + + Start the application on the command line: + + ```bash + node index.js + ``` + + If the connection is successful, you'll get output like this: + + ```bash + Example app listening at http://localhost:3000 + Executing (default): SELECT 1+1 AS result + Connection has been established successfully. + ``` + +## Create a relational table + +In this section, you create a relational table called `page_loads`. + +1. Use the Sequelize command line tool to create a table and model called `page_loads`: + + ```bash + npx sequelize model:generate --name page_loads \ + --attributes userAgent:string,time:date + ``` + + The output looks similar to this: + + ```bash + Sequelize CLI [Node: 12.16.2, CLI: 5.5.1, ORM: 5.21.11] + + New model was created at . + New migration was created at . + ``` + +2. Edit the migration file so that it sets up a migration key: + + ```javascript + 'use strict'; + module.exports = { + up: async (queryInterface, Sequelize) => { + await queryInterface.createTable('page_loads', { + userAgent: { + primaryKey: true, + type: Sequelize.STRING + }, + time: { + primaryKey: true, + type: Sequelize.DATE + } + }); + }, + down: async (queryInterface, Sequelize) => { + await queryInterface.dropTable('page_loads'); + } + }; + ``` + +3. Migrate the change and make sure that it is reflected in the database: + + ```bash + npx sequelize db:migrate + ``` + + The output looks similar to this: + + ```bash + Sequelize CLI [Node: 12.16.2, CLI: 5.5.1, ORM: 5.21.11] + + Loaded configuration file "config/config.json". + Using environment "development". + == 20200528195725-create-page-loads: migrating ======= + == 20200528195725-create-page-loads: migrated (0.443s) + ``` + +4. Create the `PageLoads` model in your code. In the `index.js` file, above the + `app.use` statement, add these lines: + + ```javascript + let PageLoads = sequelize.define('page_loads', { + userAgent: {type: Sequelize.STRING, primaryKey: true }, + time: {type: Sequelize.DATE, primaryKey: true } + }, { timestamps: false }); + ``` + +5. Instantiate a `PageLoads` object and save it to the database. + +## Create a hypertable + +When you have created the relational table, you can create a hypertable. +Creating tables and indexes, altering tables, inserting data, selecting data, +and most other tasks are executed on the hypertable. + +1. Create a migration to modify the `page_loads` relational table, and change + it to a hypertable by first running the following command: + + ```bash + npx sequelize migration:generate --name add_hypertable + ``` + + The output looks similar to this: + + ```bash + Sequelize CLI [Node: 12.16.2, CLI: 5.5.1, ORM: 5.21.11] + + migrations folder at already exists. + New migration was created at /20200601202912-add_hypertable.js . + ``` + +2. In the `migrations` folder, there is now a new file. Open the + file, and add this content: + + ```javascript + 'use strict'; + + module.exports = { + up: (queryInterface, Sequelize) => { + return queryInterface.sequelize.query("SELECT create_hypertable('page_loads', by_range('time'));"); + }, + + down: (queryInterface, Sequelize) => { + } + }; + ``` + + + The `by_range` dimension builder is an addition to {TIMESCALE_DB} 2.13. + + +3. At the command prompt, run the migration command: + + ```bash + npx sequelize db:migrate + ``` + + The output looks similar to this: + + ```bash + Sequelize CLI [Node: 12.16.2, CLI: 5.5.1, ORM: 5.21.11] + + Loaded configuration file "config/config.json". + Using environment "development". + == 20200601202912-add_hypertable: migrating ======= + == 20200601202912-add_hypertable: migrated (0.426s) + ``` + +## Insert rows of data + +This section covers how to insert data into your hypertables. + +1. In the `index.js` file, modify the `/` route to get the `user-agent` from + the request object (`req`) and the current timestamp. Then, call the + `create` method on `PageLoads` model, supplying the user agent and timestamp + parameters. The `create` call executes an `INSERT` on the database: + + ```javascript + app.get('/', async (req, res) => { + // get the user agent and current time + const userAgent = req.get('user-agent'); + const time = new Date().getTime(); + + try { + // insert the record + await PageLoads.create({ + userAgent, time + }); + + // send response + res.send('Inserted!'); + } catch (e) { + console.log('Error inserting data', e) + } + }) + ``` + +## Execute a query + +This section covers how to execute queries against your database. In this +example, every time the page is reloaded, all information currently in the table +is displayed. + +1. Modify the `/` route in the `index.js` file to call the Sequelize `findAll` + function and retrieve all data from the `page_loads` table using the + `PageLoads` model: + + ```javascript + app.get('/', async (req, res) => { + // get the user agent and current time + const userAgent = req.get('user-agent'); + const time = new Date().getTime(); + + try { + // insert the record + await PageLoads.create({ + userAgent, time + }); + + // now display everything in the table + const messages = await PageLoads.findAll(); + res.send(messages); + } catch (e) { + console.log('Error inserting data', e) + } + }) + ``` + +Now, when you reload the page, you should see all of the rows currently in the +`page_loads` table. \ No newline at end of file diff --git a/snippets/integrations/code/_start-coding-python.mdx b/snippets/integrations/code/_start-coding-python.mdx new file mode 100644 index 0000000..69913ca --- /dev/null +++ b/snippets/integrations/code/_start-coding-python.mdx @@ -0,0 +1,378 @@ +import { TIMESCALE_DB, PG } from '/snippets/vars.mdx'; +import IntegrationPrereqs from '/snippets/integrations/_integration-prereqs-cloud-only.mdx'; + +## Prerequisites + + + +* Install the `psycopg2` library. + + For more information, see the [psycopg2 documentation](https://pypi.org/project/psycopg2/). +* Create a [Python virtual environment](https://docs.python.org/3/library/venv.html). (Optional) + +## Connect to TimescaleDB + +In this section, you create a connection to {TIMESCALE_DB} using the `psycopg2` +library. This library is one of the most popular {PG} libraries for +Python. It allows you to execute raw SQL queries efficiently and safely, and +prevents common attacks such as SQL injection. + +1. Import the psycogpg2 library: + + ```python + import psycopg2 + ``` + +2. Locate your {TIMESCALE_DB} credentials and use them to compose a connection + string for `psycopg2`. + + You'll need: + + * password + * username + * host URL + * port + * database name + +3. Compose your connection string variable as a + [libpq connection string](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING), using this format: + + ```python + CONNECTION = "postgres://username:password@host:port/dbname" + ``` + + If you're using a hosted version of {TIMESCALE_DB}, or generally require an SSL + connection, use this version instead: + + ```python + CONNECTION = "postgres://username:password@host:port/dbname?sslmode=require" + ``` + + Alternatively you can specify each parameter in the connection string as follows + + ```python + CONNECTION = "dbname=tsdb user=tsdbadmin password=secret host=host.com port=5432 sslmode=require" + ``` + + + This method of composing a connection string is for test or development + purposes only. For production, use environment variables for sensitive + details like your password, hostname, and port number. + + +4. Use the `psycopg2` [connect function](https://www.psycopg.org/docs/module.html?highlight=connect#psycopg2.connect) to create a new + database session and create a new [cursor object](https://www.psycopg.org/docs/connection.html?highlight=cursor#connection.cursor) to + interact with the database. + + In your `main` function, add these lines: + + ```python + CONNECTION = "postgres://username:password@host:port/dbname" + with psycopg2.connect(CONNECTION) as conn: + cursor = conn.cursor() + # use the cursor to interact with your database + # cursor.execute("SELECT * FROM table") + ``` + + Alternatively, you can create a connection object and pass the object + around as needed, like opening a cursor to perform database operations: + + ```python + CONNECTION = "postgres://username:password@host:port/dbname" + conn = psycopg2.connect(CONNECTION) + cursor = conn.cursor() + # use the cursor to interact with your database + cursor.execute("SELECT 'hello world'") + print(cursor.fetchone()) + ``` + +## Create a relational table + +In this section, you create a table called `sensors` which holds the ID, type, +and location of your fictional sensors. Additionally, you create a hypertable +called `sensor_data` which holds the measurements of those sensors. The +measurements contain the time, sensor_id, temperature reading, and CPU +percentage of the sensors. + +1. Compose a string which contains the SQL statement to create a relational + table. This example creates a table called `sensors`, with columns `id`, + `type` and `location`: + + ```python + query_create_sensors_table = """CREATE TABLE sensors ( + id SERIAL PRIMARY KEY, + type VARCHAR(50), + location VARCHAR(50) + ); + """ + ``` + +2. Open a cursor, execute the query you created in the previous step, and + commit the query to make the changes persistent. Afterward, close the cursor + to clean up: + + ```python + cursor = conn.cursor() + # see definition in Step 1 + cursor.execute(query_create_sensors_table) + conn.commit() + cursor.close() + ``` + +## Create a hypertable + +When you have created the relational table, you can create a hypertable. +Creating tables and indexes, altering tables, inserting data, selecting data, +and most other tasks are executed on the hypertable. + +1. Create a string variable that contains the `CREATE TABLE` SQL statement for + your hypertable. Notice how the hypertable has the compulsory time column: + + ```python + # create sensor data hypertable + query_create_sensordata_table = """CREATE TABLE sensor_data ( + time TIMESTAMPTZ NOT NULL, + sensor_id INTEGER, + temperature DOUBLE PRECISION, + cpu DOUBLE PRECISION, + FOREIGN KEY (sensor_id) REFERENCES sensors (id) + ); + """ + ``` + +2. Formulate a `SELECT` statement that converts the `sensor_data` table to a + hypertable. You must specify the table name to convert to a hypertable, and + the name of the time column as the two arguments. For more information, see + the [`create_hypertable` docs](/api/hypertable/create_hypertable): + + ```python + query_create_sensordata_hypertable = "SELECT create_hypertable('sensor_data', by_range('time'));" + ``` + + + The `by_range` dimension builder is an addition to {TIMESCALE_DB} 2.13. + + +3. Open a cursor with the connection, execute the statements from the previous + steps, commit your changes, and close the cursor: + + ```python + cursor = conn.cursor() + cursor.execute(query_create_sensordata_table) + cursor.execute(query_create_sensordata_hypertable) + # commit changes to the database to make changes persistent + conn.commit() + cursor.close() + ``` + +## Insert rows of data + +You can insert data into your hypertables in several different ways. In this +section, you can use `psycopg2` with prepared statements, or you can use +`pgcopy` for a faster insert. + +1. This example inserts a list of tuples, or relational data, called `sensors`, + into the relational table named `sensors`. Open a cursor with a connection + to the database, use prepared statements to formulate the `INSERT` SQL + statement, and then execute that statement: + + ```python + sensors = [('a', 'floor'), ('a', 'ceiling'), ('b', 'floor'), ('b', 'ceiling')] + cursor = conn.cursor() + for sensor in sensors: + try: + cursor.execute("INSERT INTO sensors (type, location) VALUES (%s, %s);", + (sensor[0], sensor[1])) + except (Exception, psycopg2.Error) as error: + print(error.pgerror) + conn.commit() + ``` + +2. (Optional) Alternatively, you can pass variables to the `cursor.execute` + function and separate the formulation of the SQL statement, `SQL`, from the + data being passed with it into the prepared statement, `data`: + + ```python + SQL = "INSERT INTO sensors (type, location) VALUES (%s, %s);" + sensors = [('a', 'floor'), ('a', 'ceiling'), ('b', 'floor'), ('b', 'ceiling')] + cursor = conn.cursor() + for sensor in sensors: + try: + data = (sensor[0], sensor[1]) + cursor.execute(SQL, data) + except (Exception, psycopg2.Error) as error: + print(error.pgerror) + conn.commit() + ``` + +If you choose to use `pgcopy` instead, install the `pgcopy` package +[using pip](https://pypi.org/project/pgcopy/), and then add this line to your list of +`import` statements: + +```python +from pgcopy import CopyManager +``` + +1. Generate some random sensor data using the `generate_series` function + provided by {PG}. This example inserts a total of 480 rows of data (4 + readings, every 5 minutes, for 24 hours). In your application, this would be + the query that saves your time-series data into the hypertable: + + ```python + # for sensors with ids 1-4 + for id in range(1, 4, 1): + data = (id,) + # create random data + simulate_query = """SELECT generate_series(now() - interval '24 hour', now(), interval '5 minute') AS time, + %s as sensor_id, + random()*100 AS temperature, + random() AS cpu; + """ + cursor.execute(simulate_query, data) + values = cursor.fetchall() + ``` + +2. Define the column names of the table you want to insert data into. This + example uses the `sensor_data` hypertable created earlier. This hypertable + consists of columns named `time`, `sensor_id`, `temperature` and `cpu`. The + column names are defined in a list of strings called `cols`: + + ```python + cols = ['time', 'sensor_id', 'temperature', 'cpu'] + ``` + +3. Create an instance of the `pgcopy` CopyManager, `mgr`, and pass the + connection variable, hypertable name, and list of column names. Then use the + `copy` function of the CopyManager to insert the data into the database + quickly using `pgcopy`. + + ```python + mgr = CopyManager(conn, 'sensor_data', cols) + mgr.copy(values) + ``` + +4. Commit to persist changes: + + ```python + conn.commit() + ``` + +5. (Optional) The full sample code to insert data into {TIMESCALE_DB} using + `pgcopy`, using the example of sensor data from four sensors: + + ```python + # insert using pgcopy + def fast_insert(conn): + cursor = conn.cursor() + + # for sensors with ids 1-4 + for id in range(1, 4, 1): + data = (id,) + # create random data + simulate_query = """SELECT generate_series(now() - interval '24 hour', now(), interval '5 minute') AS time, + %s as sensor_id, + random()*100 AS temperature, + random() AS cpu; + """ + cursor.execute(simulate_query, data) + values = cursor.fetchall() + + # column names of the table you're inserting into + cols = ['time', 'sensor_id', 'temperature', 'cpu'] + + # create copy manager with the target table and insert + mgr = CopyManager(conn, 'sensor_data', cols) + mgr.copy(values) + + # commit after all sensor data is inserted + # could also commit after each sensor insert is done + conn.commit() + ``` + +6. (Optional) You can also check if the insertion worked: + + ```python + cursor.execute("SELECT * FROM sensor_data LIMIT 5;") + print(cursor.fetchall()) + ``` + +## Execute a query + +This section covers how to execute queries against your database. + +The first procedure shows a simple `SELECT *` query. For more complex queries, +you can use prepared statements to ensure queries are executed safely against +the database. + +For more information about properly using placeholders in `psycopg2`, see the +[basic module usage document](https://www.psycopg.org/docs/usage.html). +For more information about how to execute more complex queries in `psycopg2`, +see the [psycopg2 documentation](https://www.psycopg.org/docs/usage.html). + +### Execute a query + +1. Define the SQL query you'd like to run on the database. This example is a + simple `SELECT` statement querying each row from the previously created + `sensor_data` table. + + ```python + query = "SELECT * FROM sensor_data;" + ``` + +2. Open a cursor from the existing database connection, `conn`, and then execute + the query you defined: + + ```python + cursor = conn.cursor() + query = "SELECT * FROM sensor_data;" + cursor.execute(query) + ``` + +3. To access all resulting rows returned by your query, use one of `pyscopg2`'s + [results retrieval methods](https://www.psycopg.org/docs/cursor.html), + such as `fetchall()` or `fetchmany()`. This example prints the results of + the query, row by row. Note that the result of `fetchall()` is a list of + tuples, so you can handle them accordingly: + + ```python + cursor = conn.cursor() + query = "SELECT * FROM sensor_data;" + cursor.execute(query) + for row in cursor.fetchall(): + print(row) + cursor.close() + ``` + +4. (Optional) If you want a list of dictionaries instead, you can define the + cursor using [`DictCursor`](https://www.psycopg.org/docs/extras.html#dictionary-like-cursor): + + ```python + cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor) + ``` + + Using this cursor, `cursor.fetchall()` returns a list of dictionary-like objects. + +For more complex queries, you can use prepared statements to ensure queries are +executed safely against the database. + +### Execute queries using prepared statements + +1. Write the query using prepared statements: + + ```python + # query with placeholders + cursor = conn.cursor() + query = """ + SELECT time_bucket('5 minutes', time) AS five_min, avg(cpu) + FROM sensor_data + JOIN sensors ON sensors.id = sensor_data.sensor_id + WHERE sensors.location = %s AND sensors.type = %s + GROUP BY five_min + ORDER BY five_min DESC; + """ + location = "floor" + sensor_type = "a" + data = (location, sensor_type) + cursor.execute(query, data) + results = cursor.fetchall() + ``` \ No newline at end of file diff --git a/snippets/integrations/code/_start-coding-ruby.mdx b/snippets/integrations/code/_start-coding-ruby.mdx new file mode 100644 index 0000000..2cbe7b6 --- /dev/null +++ b/snippets/integrations/code/_start-coding-ruby.mdx @@ -0,0 +1,394 @@ +import { PG, CLOUD_LONG, COMPANY, TIMESCALE_DB, SELF_LONG_CAP } from '/snippets/vars.mdx'; +import IntegrationPrereqs from '/snippets/integrations/_integration-prereqs-cloud-only.mdx'; + +## Prerequisites + + + +* Install [Rails](https://guides.rubyonrails.org/install_ruby_on_rails.html#installing-rails). + +## Connect a Rails app to your service + +Every {SERVICE_LONG} is a 100% {PG} database hosted in {CLOUD_LONG} with +{COMPANY} extensions such as {TIMESCALE_DB}. You connect to your {SERVICE_LONG} +from a standard Rails app configured for {PG}. + +1. **Create a new Rails app configured for {PG}** + + Rails creates and bundles your app, then installs the standard {PG} Gems. + + ```bash + rails new my_app -d=postgresql + cd my_app + ``` + +2. **Install the {TIMESCALE_DB} gem** + + 1. Open `Gemfile`, add the following line, then save your changes: + + ```ruby + gem 'timescaledb' + ``` + + 2. In Terminal, run the following command: + + ```bash + bundle install + ``` + +3. **Connect your app to your {SERVICE_LONG}** + + 1. In `/config/database.yml` update the configuration to read securely connect to your {SERVICE_LONG} + by adding `url: <%= ENV['DATABASE_URL'] %>` to the default configuration: + + ```yaml + default: &default + adapter: postgresql + encoding: unicode + pool: <%= ENV.fetch("RAILS_MAX_THREADS") { 5 } %> + url: <%= ENV['DATABASE_URL'] %> + ``` + + 2. Set the environment variable for `DATABASE_URL` to the value of `Service URL` from + your [connection details](/integrations/find-connection-details) + ```bash + export DATABASE_URL="value of Service URL" + ``` + + 3. Create the database: + - **{CLOUD_LONG}**: nothing to do. The database is part of your {SERVICE_LONG}. + - **{SELF_LONG_CAP}**, create the database for the project: + + ```bash + rails db:create + ``` + + 4. Run migrations: + + ```bash + rails db:migrate + ``` + + 5. Verify the connection from your app to your {SERVICE_LONG}: + + ```bash + echo "\dx" | rails dbconsole + ``` + + The result shows the list of extensions in your {SERVICE_LONG} + + | Name | Version | Schema | Description | + | -- | -- | -- | -- | + | pg_buffercache | 1.5 | public | examine the shared buffer cache| + | pg_stat_statements | 1.11 | public | track planning and execution statistics of all SQL statements executed| + | plpgsql | 1.0 | pg_catalog | PL/pgSQL procedural language| + | postgres_fdw | 1.1 | public | foreign-data wrapper for remote {PG} servers| + | timescaledb | 2.18.1 | public | Enables scalable inserts and complex queries for time-series data (Community Edition)| + | timescaledb_toolkit | 1.19.0 | public | Library of analytical hyperfunctions, time-series pipelining, and other SQL utilities| + +## Optimize time-series data in hypertables + +Hypertables are {PG} tables designed to simplify and accelerate data analysis. Anything +you can do with regular {PG} tables, you can do with hypertables - but much faster and more conveniently. + +In this section, you use the helpers in the {TIMESCALE_DB} gem to create and manage a [hypertable](/use-timescale/hypertables). + +1. **Generate a migration to create the page loads table** + + ```bash + rails generate migration create_page_loads + ``` + + This creates the `/db/migrate/_create_page_loads.rb` migration file. + +2. **Add hypertable options** + + Replace the contents of `/db/migrate/_create_page_loads.rb` + with the following: + + ```ruby + class CreatePageLoads < ActiveRecord::Migration[8.0] + def change + hypertable_options = { + time_column: 'created_at', + chunk_time_interval: '1 day', + compress_segmentby: 'path', + compress_orderby: 'created_at', + compress_after: '7 days', + drop_after: '30 days' + } + + create_table :page_loads, id: false, primary_key: [:created_at, :user_agent, :path], hypertable: hypertable_options do |t| + t.timestamptz :created_at, null: false + t.string :user_agent + t.string :path + t.float :performance + end + end + end + ``` + + The `id` column is not included in the table. This is because {TIMESCALE_DB} requires that any `UNIQUE` or `PRIMARY KEY` + indexes on the table include all partitioning columns. In this case, this is the time column. A new + Rails model includes a `PRIMARY KEY` index for id by default: either remove the column or make sure that the index + includes time as part of a "composite key." + + For more information, check the Roby docs around [composite primary keys](https://guides.rubyonrails.org/active_record_composite_primary_keys.html). + +3. **Create a `PageLoad` model** + + Create a new file called `/app/models/page_load.rb` and add the following code: + + ```ruby + class PageLoad < ApplicationRecord + extend Timescaledb::ActsAsHypertable + include Timescaledb::ContinuousAggregatesHelper + + acts_as_hypertable time_column: "created_at", + segment_by: "path", + value_column: "performance" + + # Basic scopes for filtering by browser + scope :chrome_users, -> { where("user_agent LIKE ?", "%Chrome%") } + scope :firefox_users, -> { where("user_agent LIKE ?", "%Firefox%") } + scope :safari_users, -> { where("user_agent LIKE ?", "%Safari%") } + + # Performance analysis scopes + scope :performance_stats, -> { + select("stats_agg(#{value_column}) as stats_agg") + } + + scope :slow_requests, -> { where("performance > ?", 1.0) } + scope :fast_requests, -> { where("performance < ?", 0.1) } + + # Set up continuous aggregates for different timeframes + continuous_aggregates scopes: [:performance_stats], + timeframes: [:minute, :hour, :day], + refresh_policy: { + minute: { + start_offset: '3 minute', + end_offset: '1 minute', + schedule_interval: '1 minute' + }, + hour: { + start_offset: '3 hours', + end_offset: '1 hour', + schedule_interval: '1 minute' + }, + day: { + start_offset: '3 day', + end_offset: '1 day', + schedule_interval: '1 minute' + } + } + end + ``` + +4. **Run the migration** + + ```bash + rails db:migrate + ``` + +## Insert data your service + +The {TIMESCALE_DB} gem provides efficient ways to insert data into hypertables. This section +shows you how to ingest test data into your hypertable. + +1. **Create a controller to handle page loads** + + Create a new file called `/app/controllers/application_controller.rb` and add the following code: + + ```ruby + class ApplicationController < ActionController::Base + around_action :track_page_load + + private + + def track_page_load + start_time = Time.current + yield + end_time = Time.current + + PageLoad.create( + path: request.path, + user_agent: request.user_agent, + performance: (end_time - start_time) + ) + end + end + ``` + +2. **Generate some test data** + + Use `bin/console` to join a Rails console session and run the following code + to define some random page load access data: + + ```ruby + def generate_sample_page_loads(total: 1000) + time = 1.month.ago + paths = %w[/ /about /contact /products /blog] + browsers = [ + "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36", + "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:89.0) Gecko/20100101 Firefox/89.0", + "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.1 Safari/605.1.15" + ] + + total.times.map do + time = time + rand(60).seconds + { + path: paths.sample, + user_agent: browsers.sample, + performance: rand(0.1..2.0), + created_at: time, + updated_at: time + } + end + end + ``` + +3. **Insert the generated data into your {SERVICE_LONG}** + + ```bash + # Insert the data in batches + PageLoad.insert_all(generate_sample_page_loads, returning: false) + ``` + +4. **Validate the test data in your {SERVICE_LONG}** + + ```bash + PageLoad.count + PageLoad.first + ``` + +## Reference + +This section lists the most common tasks you might perform with the {TIMESCALE_DB} gem. + +### Query scopes + +The {TIMESCALE_DB} gem provides several convenient scopes for querying your time-series data. + + +- Built-in time-based scopes: + + ```ruby + PageLoad.last_hour.count + PageLoad.today.count + PageLoad.this_week.count + PageLoad.this_month.count + ``` + +- Browser-specific scopes: + + ```ruby + # Count requests by browser + PageLoad.chrome_users.last_hour.count + PageLoad.firefox_users.last_hour.count + PageLoad.safari_users.last_hour.count + + # Performance analysis + PageLoad.slow_requests.last_hour.count + PageLoad.fast_requests.last_hour.count + ``` + +- Query continuous aggregates: + + This query fetches the average and standard deviation from the performance stats for the `/products` path over the last day. + + ```ruby + # Access aggregated performance stats through generated classes + PageLoad::PerformanceStatsPerMinute.last_hour + PageLoad::PerformanceStatsPerHour.last_day + PageLoad::PerformanceStatsPerDay.last_month + + # Get statistics for a specific path + stats = PageLoad::PerformanceStatsPerHour.last_day.where(path: '/products').select("average(stats_agg) as average, stddev(stats_agg) as stddev").first + puts "Average: #{stats.average}" + puts "Standard Deviation: #{stats.stddev}" + ``` + +### {TIMESCALE_DB} features + +The {TIMESCALE_DB} gem provides utility methods to access hypertable and chunk information. Every model that uses +the `acts_as_hypertable` method has access to these methods. + + +#### Access hypertable and chunk information + +- View chunk or hypertable information: + + ```ruby + PageLoad.chunks.count + PageLoad.hypertable.detailed_size + ``` + +- Compress/Decompress chunks: + + ```ruby + PageLoad.chunks.uncompressed.first.compress! # Compress the first uncompressed chunk + PageLoad.chunks.compressed.first.decompress! # Decompress the oldest chunk + PageLoad.hypertable.compression_stats # View compression stats + + ``` + +#### Access hypertable stats + +You collect hypertable stats using methods that provide insights into your hypertable's structure, size, and compression +status: + +- Get basic hypertable information: + + ```ruby + hypertable = PageLoad.hypertable + hypertable.hypertable_name # The name of your hypertable + hypertable.schema_name # The schema where the hypertable is located + ``` + +- Get detailed size information: + + ```ruby + hypertable.detailed_size # Get detailed size information for the hypertable + hypertable.compression_stats # Get compression statistics + hypertable.chunks_detailed_size # Get chunk information + hypertable.approximate_row_count # Get approximate row count + hypertable.dimensions.map(&:column_name) # Get dimension information + hypertable.continuous_aggregates.map(&:view_name) # Get continuous aggregate view names + ``` + +#### Continuous aggregates + +The `continuous_aggregates` method generates a class for each continuous aggregate. + +- Get all the continuous aggregate classes: + + ```ruby + PageLoad.descendants # Get all continuous aggregate classes + ``` + +- Manually refresh a continuous aggregate: + + ```ruby + PageLoad.refresh_aggregates + ``` + +- Create or drop a continuous aggregate: + + Create or drop all the continuous aggregates in the proper order to build them hierarchically. See more about how it + works in this [blog post](https://www.timescale.com/blog/building-a-better-ruby-orm-for-time-series-and-analytics). + + ```ruby + PageLoad.create_continuous_aggregates + PageLoad.drop_continuous_aggregates + ``` + + + + +## Next steps + +Now that you have integrated the ruby gem into your app: + +* Learn more about the [{TIMESCALE_DB} gem](https://github.com/timescale/timescaledb-ruby). +* Check out the [official docs](https://timescale.github.io/timescaledb-ruby/). +* Follow the [LTTB](https://timescale.github.io/timescaledb-ruby/toolkit_lttb_tutorial/), [Open AI long-term storage](https://timescale.github.io/timescaledb-ruby/chat_gpt_tutorial/), and [candlesticks](https://timescale.github.io/timescaledb-ruby/toolkit_candlestick/) tutorials. \ No newline at end of file diff --git a/snippets/manage-data/_create-hypertable-columnstore-policy-note.mdx b/snippets/manage-data/_create-hypertable-columnstore-policy-note.mdx new file mode 100644 index 0000000..4cc3433 --- /dev/null +++ b/snippets/manage-data/_create-hypertable-columnstore-policy-note.mdx @@ -0,0 +1,20 @@ +import { HYPERTABLE, TIMESCALE_DB, COLUMNSTORE } from '/snippets/vars.mdx'; + +When you create a {HYPERTABLE} using [CREATE TABLE ... WITH ...][hypertable-create-table], the default partitioning +column is automatically the first column with a timestamp data type. Also, {TIMESCALE_DB} creates a +[columnstore policy][add_columnstore_policy] that automatically converts your data to the {COLUMNSTORE}, after an interval equal to the value of the [chunk_interval][create_table_arguments], defined through `compress_after` in the policy. This columnar format enables fast scanning and +aggregation, optimizing performance for analytical workloads while also saving significant storage space. In the +{COLUMNSTORE} conversion, {HYPERTABLE} chunks are compressed by up to 98%, and organized for efficient, large-scale queries. + +You can customize this policy later using [alter_job][alter_job_samples]. However, to change `after` or +`created_before`, the compression settings, or the {HYPERTABLE} the policy is acting on, you must +[remove the columnstore policy][remove_columnstore_policy] and [add a new one][add_columnstore_policy]. + +You can also manually [convert chunks][convert_to_columnstore] in a {HYPERTABLE} to the {COLUMNSTORE}. + +[add_columnstore_policy]: /api/hypercore/add_columnstore_policy/ +[remove_columnstore_policy]: /api/hypercore/remove_columnstore_policy/ +[create_table_arguments]: /api/hypertable/create_table/#arguments +[alter_job_samples]: /api/jobs-automation/alter_job/#samples +[convert_to_columnstore]: /api/hypercore/convert_to_columnstore/ +[hypertable-create-table]: /api/hypertable/create_table/ \ No newline at end of file diff --git a/snippets/procedures/_kubernetes-install-self-hosted.mdx b/snippets/procedures/_kubernetes-install-self-hosted.mdx new file mode 100644 index 0000000..ea42000 --- /dev/null +++ b/snippets/procedures/_kubernetes-install-self-hosted.mdx @@ -0,0 +1,169 @@ +import { TIMESCALE_DB, PG, SELF_LONG, COMPANY } from '/snippets/vars.mdx'; + +Running {TIMESCALE_DB} on Kubernetes is similar to running {PG}. This procedure outlines the steps for a non-distributed system. + +To connect your Kubernetes cluster to {SELF_LONG} running in the cluster: + +1. **Create a default namespace for {COMPANY} components** + + 1. Create the {COMPANY} namespace: + + ```shell + kubectl create namespace timescale + ``` + + 2. Set this namespace as the default for your session: + + ```shell + kubectl config set-context --current --namespace=timescale + ``` + + For more information, see [Kubernetes Namespaces][kubernetes-namespace]. + +2. **Set up a persistent volume claim (PVC) storage** + + To manually set up a persistent volume and claim for self-hosted Kubernetes, run the following command: + + ```yaml + kubectl apply -f - < Date: Thu, 6 Nov 2025 10:14:12 +0200 Subject: [PATCH 02/13] draft --- claude.md | 115 ++++++++ .../connectors/destination/tigerlake.mdx | 4 +- .../connectors/source/stream-from-kafka.mdx | 4 +- .../connectors/source/sync-from-postgres.mdx | 6 +- .../connectors/source/sync-from-s3.mdx | 4 +- integrations/integrate/apache-airflow.mdx | 132 ++++++++- integrations/integrate/apache-kafka.mdx | 168 +++++++++++- integrations/integrate/aws-lambda.mdx | 190 ++++++++++++- integrations/integrate/aws.mdx | 33 ++- integrations/integrate/azure-data-studio.mdx | 45 ++- integrations/integrate/cloudwatch.mdx | 38 ++- .../integrate/corporate-data-center.mdx | 35 ++- integrations/integrate/datadog.mdx | 151 ++++++++++- integrations/integrate/dbeaver.mdx | 46 +++- integrations/integrate/debezium.mdx | 70 ++++- integrations/integrate/decodable.mdx | 67 ++++- integrations/integrate/fivetran.mdx | 78 +++++- integrations/integrate/google-cloud.mdx | 38 ++- integrations/integrate/grafana.mdx | 173 +++++++++++- integrations/integrate/microsoft-azure.mdx | 38 ++- integrations/integrate/pgadmin.mdx | 40 ++- integrations/integrate/postgresql.mdx | 9 +- integrations/integrate/power-bi.mdx | 2 +- integrations/integrate/prometheus.mdx | 12 +- integrations/integrate/psql.mdx | 238 +++++++++++++++- integrations/integrate/qstudio.mdx | 47 +++- integrations/integrate/supabase.mdx | 256 +++++++++++++++++- integrations/integrate/tableau.mdx | 2 +- integrations/integrate/telegraf.mdx | 151 ++++++++++- integrations/troubleshooting.mdx | 6 +- .../_cloudwatch-data-exporter.mdx | 66 ++--- .../integrations/_datadog-data-exporter.mdx | 6 +- .../integrations/_foreign-data-wrappers.mdx | 31 +-- snippets/integrations/_grafana-connect.mdx | 22 +- .../_integration-debezium-docker.mdx | 15 +- ...n-debezium-self-hosted-config-database.mdx | 28 +- .../_integration-prereqs-cloud-only.mdx | 7 - .../integrations/_manage-a-data-exporter.mdx | 52 +--- .../integrations/_prometheus-integrate.mdx | 40 +-- snippets/integrations/_transit-gateway.mdx | 22 +- .../code/_start-coding-golang.mdx | 2 +- .../integrations/code/_start-coding-java.mdx | 2 +- .../integrations/code/_start-coding-node.mdx | 2 +- .../code/_start-coding-python.mdx | 2 +- .../integrations/code/_start-coding-ruby.mdx | 2 +- .../_jdbc-authentication-not-supported.mdx | 16 ++ .../_integration-prereqs-cloud-only.mdx | 2 +- .../_integration-prereqs-self-only.mdx | 3 +- .../prerequisites/_integration-prereqs.mdx | 2 +- .../_livesync-prereqs-cloud.mdx | 0 .../_livesync-prereqs-terminal.mdx | 0 .../_migrate-import-prerequisites.mdx | 22 ++ ...igrate-import-setup-connection-strings.mdx | 12 + 53 files changed, 2307 insertions(+), 247 deletions(-) delete mode 100644 snippets/integrations/_integration-prereqs-cloud-only.mdx create mode 100644 snippets/integrations/troubleshooting/_jdbc-authentication-not-supported.mdx rename snippets/{integrations => prerequisites}/_livesync-prereqs-cloud.mdx (100%) rename snippets/{integrations => prerequisites}/_livesync-prereqs-terminal.mdx (100%) create mode 100644 snippets/prerequisites/_migrate-import-prerequisites.mdx create mode 100644 snippets/procedures/_migrate-import-setup-connection-strings.mdx diff --git a/claude.md b/claude.md index 2fe62fd..7d8a59a 100644 --- a/claude.md +++ b/claude.md @@ -81,6 +81,110 @@ - Keep meta descriptions under 200 characters - Keep meta titles under 60 characters +## Variables and snippets + +### Variable import hierarchy +Variables follow a bottom-up import hierarchy: +- Snippets import their own variables directly +- Parent files MUST NOT duplicate variable imports that come from their snippets +- Only import variables in the main file if they are used directly in that file's content + +### Checking for duplicate imports +Before finalizing any file migration, systematically check for duplicate variable imports: + +1. **Read all imported snippets** to see what variables they import +2. **Compare with main file imports** to identify duplicates +3. **Remove duplicates from main file** - the snippet's variables are automatically available +4. **Verify all text** in the main file uses variables (not plain text) + +Example workflow: +``` +Main file uses: {CLOUD_LONG}, {SERVICE_LONG} +Snippet A imports: SERVICE_LONG, CONSOLE +Snippet B imports: CLOUD_LONG, VPC + +Result: Main file should import NOTHING - all variables come from snippets +``` + +### Variable application checklist +For EVERY file migration, systematically check the vars.mdx file and apply ALL relevant variables: + +**Core product variables:** +- CLOUD_LONG, SERVICE_LONG, SERVICE_SHORT, SELF_LONG, SELF_LONG_CAP +- CONSOLE, TIMESCALE_DB, PG, COMPANY + +**Feature variables:** +- HYPERTABLE, HYPERTABLE_CAP (for "hypertable(s)" / "Hypertable(s)") +- HYPERCORE, HYPERCORE_CAP (for "hypercore" / "Hypercore") +- COLUMNSTORE, COLUMNSTORE_CAP (for "columnstore" / "Columnstore") +- ROWSTORE, ROWSTORE_CAP (for "rowstore" / "Rowstore") +- CAGG, CAGG_CAP (for "continuous aggregate(s)" / "Continuous aggregate(s)") +- MAT_HYPERTABLE, MAT_HYPERTABLE_CAP (for "materialized hypertable(s)") +- VPC (for "VPC") + +**Pricing variables:** +- PRICING_PLAN, SCALE, ENTERPRISE + +**Process:** +1. Read snippets/vars.mdx to see all available variables +2. Search the file content for terms that match variable values +3. Replace ALL occurrences with variables +4. Check that variables aren't imported twice (main file + snippets) + +### Common patterns + +**Pattern 1: Integration files** +```mdx +--- +title: Integrate [Tool] with Tiger Cloud +sidebarTitle: [Tool] +description: [Tool description] +--- + +import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; +import OtherSnippet from '/snippets/path/_snippet.mdx'; + +[Tool][tool-link] does something with {SERVICE_LONG}. + +## Prerequisites + + + +## Connect + +Instructions with {CLOUD_LONG} and {SERVICE_SHORT} variables... + +[tool-link]: https://example.com +``` + +Main file imports: NONE (if all variables come from snippets) or only those used directly in content + +**Pattern 2: Snippet files** +```mdx +import { SERVICE_LONG, CONSOLE, VPC } from '/snippets/vars.mdx'; + +Content using {SERVICE_LONG}, {CONSOLE}, and {VPC}... +``` + +Snippets import only what they use directly + +**Pattern 3: Nested snippets** +If snippet A imports snippet B: +- Snippet B imports its own variables +- Snippet A only imports variables it uses directly (not from B) +- Main file that imports snippet A gets variables from both A and B automatically + +### Template literal syntax for Tab titles +When using variables in component props (like Tab titles), use template literal syntax: +```mdx + +``` + +NOT: +```mdx + +``` + ## Git workflow - NEVER use --no-verify when committing - Ask how to handle uncommitted changes before starting @@ -101,8 +205,19 @@ # Migration +## Critical migration requirements + +**ALWAYS follow these steps for EVERY file migration:** + +1. **Apply ALL relevant variables** - Systematically check snippets/vars.mdx and apply every applicable variable (see "Variables and snippets" section above) +2. **Check for duplicate imports** - Read all imported snippets to see what variables they import, then ensure the main file doesn't duplicate those imports +3. **Verify variable usage** - Ensure all content uses variables, not plain text for product names and features + +## Migration steps + - Check the directory that the files are to move into - Update all ${VARIABLES} to use the mintlify variables (reference snippets/vars.mdx for mappings) +- **CRITICAL**: After migration, check that variables are not imported twice (see "Variables and snippets" section) - replace references to import since`` with `` Since `` on its own line after the frontmatter, followed by a newline before content begins - replace references to import deprecated`` with `` Deprecated `` on its own line after the frontmatter, followed by a newline before content begins - replace references to import DeprecationNotice with `` Deprecated on its own line after the frontmatter, followed by a newline before content begins diff --git a/integrations/connectors/destination/tigerlake.mdx b/integrations/connectors/destination/tigerlake.mdx index 6d1af12..326d7c9 100644 --- a/integrations/connectors/destination/tigerlake.mdx +++ b/integrations/connectors/destination/tigerlake.mdx @@ -1,10 +1,10 @@ --- title: Integrate with data lakes -description: Unify your Tiger Cloud operational architecture with data lakes +description: Unifies the Tiger Cloud operational architecture with data lake architectures. This enables real-time application building alongside efficient data pipeline management within a single system. --- import { LAKE_LONG, LAKE_SHORT, SERVICE_SHORT, HYPERTABLE, HYPERTABLE_CAP, CONSOLE, PG } from '/snippets/vars.mdx'; -import IntegrationPrereqsCloud from '/snippets/integrations/_integration-prereqs-cloud-only.mdx'; +import IntegrationPrereqsCloud from '/snippets/prerequisites/_integration-prereqs-cloud-only.mdx'; import NotSupportedAzure from '/snippets/changes/_not-supported-for-azure.mdx'; {LAKE_LONG} enables you to build real-time applications alongside efficient data pipeline management within a single diff --git a/integrations/connectors/source/stream-from-kafka.mdx b/integrations/connectors/source/stream-from-kafka.mdx index 208d74d..34b21b4 100644 --- a/integrations/connectors/source/stream-from-kafka.mdx +++ b/integrations/connectors/source/stream-from-kafka.mdx @@ -1,10 +1,10 @@ --- title: Stream from Kafka -description: Stream data from Kafka into a Tiger Cloud service +description: Stream data from Kafka into a Tiger Cloud service in order to store, query, and analyze your Kafka events efficiently --- import { SERVICE_SHORT, CONSOLE, PROJECT_SHORT } from '/snippets/vars.mdx'; -import IntegrationPrereqsCloud from '/snippets/integrations/_integration-prereqs-cloud-only.mdx'; +import IntegrationPrereqsCloud from '/snippets/prerequisites/_integration-prereqs-cloud-only.mdx'; import NotSupportedAzure from '/snippets/changes/_not-supported-for-azure.mdx'; Early access diff --git a/integrations/connectors/source/sync-from-postgres.mdx b/integrations/connectors/source/sync-from-postgres.mdx index 0b8ac48..c770645 100644 --- a/integrations/connectors/source/sync-from-postgres.mdx +++ b/integrations/connectors/source/sync-from-postgres.mdx @@ -1,11 +1,11 @@ --- title: Sync from Postgres -description: Sync updates to your primary Postgres database with your Tiger Cloud service in real time +description: Synchronize updates to your primary Postgres database with the corresponding Tiger Cloud service in real time --- import { HYPERTABLE, CONSOLE } from '/snippets/vars.mdx'; -import LivesyncPrereqsCloud from '/snippets/integrations/_livesync-prereqs-cloud.mdx'; -import LivesyncPrereqsTerminal from '/snippets/integrations/_livesync-prereqs-terminal.mdx'; +import LivesyncPrereqsCloud from '/snippets/prerequisites/_livesync-prereqs-cloud.mdx'; +import LivesyncPrereqsTerminal from '/snippets/prerequisites/_livesync-prereqs-terminal.mdx'; import LivesyncLimitations from '/snippets/integrations/_livesync-limitations.mdx'; Early access diff --git a/integrations/connectors/source/sync-from-s3.mdx b/integrations/connectors/source/sync-from-s3.mdx index a991ba1..6e1cf85 100644 --- a/integrations/connectors/source/sync-from-s3.mdx +++ b/integrations/connectors/source/sync-from-s3.mdx @@ -1,10 +1,10 @@ --- title: Sync from S3 -description: Sync data from S3 to your Tiger Cloud service in real time +description: Synchronize data from S3 to Tiger Cloud service in real time --- import { S3_CONNECTOR, HYPERTABLE, CONSOLE, SERVICE_SHORT, PROJECT_SHORT } from '/snippets/vars.mdx'; -import IntegrationPrereqsCloud from '/snippets/integrations/_integration-prereqs-cloud-only.mdx'; +import IntegrationPrereqsCloud from '/snippets/prerequisites/_integration-prereqs-cloud-only.mdx'; import NotSupportedAzure from '/snippets/changes/_not-supported-for-azure.mdx'; Early access diff --git a/integrations/integrate/apache-airflow.mdx b/integrations/integrate/apache-airflow.mdx index b1bc6bf..8652190 100644 --- a/integrations/integrate/apache-airflow.mdx +++ b/integrations/integrate/apache-airflow.mdx @@ -1,4 +1,132 @@ --- -title: Apache Airflow -description: TBD +title: Integrate Apache Airflow with Tiger +sidebarTitle: Apache Airflow +description: Apache Airflow is a platform to programmatically author, schedule, and monitor workflows. Integrate Apache Airflow with Tiger Cloud and create a data pipeline --- + +import { CLOUD_LONG, PG, CONSOLE } from '/snippets/vars.mdx'; +import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; + +Apache Airflow® is a platform created by the community to programmatically author, schedule, and monitor workflows. + +A [DAG (Directed Acyclic Graph)][airflow-dag] is the core concept of Airflow, collecting [Tasks][airflow-task] together, +organized with dependencies and relationships to say how they should run. You declare a DAG in a Python file +in the `$AIRFLOW_HOME/dags` folder of your Airflow instance. + +This page shows you how to use a Python connector in a DAG to integrate Apache Airflow with a {SERVICE_LONG}. + +## Prerequisites + + + +* Install [Python3 and pip3][install-python-pip] +* Install [Apache Airflow][install-apache-airflow] + + Ensure that your Airflow instance has network access to {CLOUD_LONG}. + +This example DAG uses the `company` table you create in [Optimize time-series data in hypertables][create-a-table-in-timescale] + +## Install python connectivity libraries + +To install the Python libraries required to connect to {CLOUD_LONG}: + +1. **Enable {PG} connections between Airflow and {CLOUD_LONG}** + + ```bash + pip install psycopg2-binary + ``` + +2. **Enable {PG} connection types in the Airflow UI** + + ```bash + pip install apache-airflow-providers-postgres + ``` + +## Create a connection between Airflow and your {SERVICE_LONG} + +In your Airflow instance, securely connect to your {SERVICE_LONG}: + +1. **Run Airflow** + + On your development machine, run the following command: + + ```bash + airflow standalone + ``` + + The username and password for Airflow UI are displayed in the `standalone | Login with username` + line in the output. + +2. **Add a connection from Airflow to your {SERVICE_LONG}** + + 1. In your browser, navigate to `localhost:8080`, then select `Admin` > `Connections`. + 2. Click `+` (Add a new record), then use your [connection info][connection-info] to fill in + the form. The `Connection Type` is `Postgres`. + +## Exchange data between Airflow and your {SERVICE_LONG} + +To exchange data between Airflow and your {SERVICE_LONG}: + +1. **Create and execute a DAG** + + To insert data in your {SERVICE_LONG} from Airflow: + 1. In `$AIRFLOW_HOME/dags/timescale_dag.py`, add the following code: + + ```python + from airflow import DAG + from airflow.operators.python_operator import PythonOperator + from airflow.hooks.postgres_hook import PostgresHook + from datetime import datetime + + def insert_data_to_timescale(): + hook = PostgresHook(postgres_conn_id='the ID of the connenction you created') + conn = hook.get_conn() + cursor = conn.cursor() + """ + This could be any query. This example inserts data into the table + you create in: + + https://docs.tigerdata.com/getting-started/latest/try-key-features-timescale-products/#optimize-time-series-data-in-hypertables + """ + cursor.execute("INSERT INTO crypto_assets (symbol, name) VALUES (%s, %s)", + ('NEW/Asset','New Asset Name')) + conn.commit() + cursor.close() + conn.close() + + default_args = { + 'owner': 'airflow', + 'start_date': datetime(2023, 1, 1), + 'retries': 1, + } + + dag = DAG('timescale_dag', default_args=default_args, schedule_interval='@daily') + + insert_task = PythonOperator( + task_id='insert_data', + python_callable=insert_data_to_timescale, + dag=dag, + ) + ``` + This DAG uses the `company` table created in [Create regular {PG} tables for relational data][create-a-table-in-timescale]. + + 2. In your browser, refresh the Airflow UI. + 3. In `Search DAGS`, type `timescale_dag` and press ENTER. + 4. Press the play icon and trigger the DAG: + ![daily eth volume of assets](https://assets.timescale.com/docs/images/integrations-apache-airflow.png) +2. **Verify that the data appears in {CLOUD_LONG}** + + 1. In [{CONSOLE}][console], navigate to your service and click `SQL editor`. + 2. Run a query to view your data. For example: `SELECT symbol, name FROM company;`. + + You see the new rows inserted in the table. + +You have successfully integrated Apache Airflow with {CLOUD_LONG} and created a data pipeline. + +[airflow-dag]: https://airflow.apache.org/docs/apache-airflow/stable/core-concepts/dags.html#dags +[airflow-task]: https://airflow.apache.org/docs/apache-airflow/stable/core-concepts/tasks.html +[connection-info]: /integrations/find-connection-details +[console]: https://console.cloud.timescale.com/dashboard/services +[create-a-table-in-timescale]: /getting-started/try-key-features-timescale-products/#optimize-time-series-data-in-hypertables +[install-apache-airflow]: https://airflow.apache.org/docs/apache-airflow/stable/start.html +[install-python-pip]: https://docs.python.org/3/using/index.html diff --git a/integrations/integrate/apache-kafka.mdx b/integrations/integrate/apache-kafka.mdx index 748043f..efc67b1 100644 --- a/integrations/integrate/apache-kafka.mdx +++ b/integrations/integrate/apache-kafka.mdx @@ -1,4 +1,168 @@ --- -title: Apache Kafka -description: TBD +title: Integrate Apache Kafka with Tiger Cloud +sidebarTitle: Apache Kafka +description: Apache Kafka is a distributed event streaming platform used for high-performance data pipelines. Learn how to integrate Apache Kafka with Tiger Cloud to manage and analyze streaming data --- + +import { CLOUD_LONG, PG } from '/snippets/vars.mdx'; +import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; +import IntegrationApacheKafka from '/snippets/integrations/_integration-apache-kafka-install.mdx'; +import CreateHypertablePolicyNote from '/snippets/manage-data/_create-hypertable-columnstore-policy-note.mdx'; + +[Apache Kafka][apache-kafka] is a distributed event streaming platform used for high-performance data pipelines, +streaming analytics, and data integration. [Apache Kafka Connect][kafka-connect] is a tool to scalably and reliably +stream data between Apache Kafka® and other data systems. Kafka Connect is an ecosystem of pre-written and maintained +Kafka Producers (source connectors) and Kafka Consumers (sink connectors) for data products and platforms like +databases and message brokers. + +This guide explains how to set up Kafka and Kafka Connect to stream data from a Kafka topic into your {SERVICE_LONG}. + +## Prerequisites + + + +- Install Java8 or higher][java-installers] to run Apache Kafka + +## Install and configure Apache Kafka + +To install and configure Apache Kafka: + + + +Keep these terminals open, you use them to test the integration later. + +## Install the sink connector to communicate with {CLOUD_LONG} + +To set up Kafka Connect server, plugins, drivers, and connectors: + +1. **Install the {PG} connector** + + In another Terminal window, navigate to ``, then download and configure the {PG} sink and driver. + ```bash + mkdir -p "plugins/camel-postgresql-sink-kafka-connector" + curl https://repo.maven.apache.org/maven2/org/apache/camel/kafkaconnector/camel-postgresql-sink-kafka-connector/3.21.0/camel-postgresql-sink-kafka-connector-3.21.0-package.tar.gz \ + | tar -xzf - -C "plugins/camel-postgresql-sink-kafka-connector" --strip-components=1 + curl -H "Accept: application/zip" https://jdbc.postgresql.org/download/postgresql-42.7.5.jar -o "plugins/camel-postgresql-sink-kafka-connector/postgresql-42.7.5.jar" + echo "plugin.path=`pwd`/plugins/camel-postgresql-sink-kafka-connector" >> "config/connect-distributed.properties" + echo "plugin.path=`pwd`/plugins/camel-postgresql-sink-kafka-connector" >> "config/connect-standalone.properties" + ``` + +2. **Start Kafka Connect** + + ```bash + export CLASSPATH=`pwd`/plugins/camel-postgresql-sink-kafka-connector/* + ./bin/connect-standalone.sh config/connect-standalone.properties + ``` + + Use the `-daemon` flag to run this process in the background. + +3. **Verify Kafka Connect is running** + + In yet another another Terminal window, run the following command: + ```bash + curl http://localhost:8083 + ``` + You see something like: + ```bash + {"version":"3.9.0","commit":"a60e31147e6b01ee","kafka_cluster_id":"J-iy4IGXTbmiALHwPZEZ-A"} + ``` + +## Create a table in your {SERVICE_LONG} to ingest Kafka events + +To prepare your {SERVICE_LONG} for Kafka integration: + +1. **[Connect][connect] to your {SERVICE_LONG}** + +2. **Create a hypertable to ingest Kafka events** + + ```sql + CREATE TABLE accounts ( + created_at TIMESTAMPTZ DEFAULT NOW(), + name TEXT, + city TEXT + ) WITH ( + tsdb.hypertable + ); + ``` + + +## Create the {CLOUD_LONG} sink + +To create a {CLOUD_LONG} sink in Apache Kafka: + +1. **Create the connection configuration** + + 1. In the terminal running Kafka Connect, stop the process by pressing `Ctrl+C`. + + 2. Write the following configuration to `/config/timescale-standalone-sink.properties`, then update the `properties` with your [connection details][connection-info]. + + ```properties + name=timescale-standalone-sink + connector.class=org.apache.camel.kafkaconnector.postgresqlsink.CamelPostgresqlsinkSinkConnector + errors.tolerance=all + errors.deadletterqueue.topic.name=deadletter + tasks.max=10 + value.converter=org.apache.kafka.connect.storage.StringConverter + key.converter=org.apache.kafka.connect.storage.StringConverter + topics=accounts + camel.kamelet.postgresql-sink.databaseName= + camel.kamelet.postgresql-sink.username= + camel.kamelet.postgresql-sink.password= + camel.kamelet.postgresql-sink.serverName= + camel.kamelet.postgresql-sink.serverPort= + camel.kamelet.postgresql-sink.query=INSERT INTO accounts (name,city) VALUES (:#name,:#city) + ``` + 3. Restart Kafka Connect with the new configuration: + ```bash + export CLASSPATH=`pwd`/plugins/camel-postgresql-sink-kafka-connector/* + ./bin/connect-standalone.sh config/connect-standalone.properties config/timescale-standalone-sink.properties + ``` + +2. **Test the connection** + + To see your sink, query the `/connectors` route in a GET request: + + ```bash + curl -X GET http://localhost:8083/connectors + ``` + You see: + + ```bash + #["timescale-standalone-sink"] + ``` + +## Test the integration with {CLOUD_LONG} + +To test this integration, send some messages onto the `accounts` topic. You can do this using the kafkacat or kcat utility. + +1. **In the terminal running `kafka-console-producer.sh` enter the following json strings** + + ```bash + {"name":"Lola","city":"Copacabana"} + {"name":"Holly","city":"Miami"} + {"name":"Jolene","city":"Tennessee"} + {"name":"Barbara Ann ","city":"California"} + ``` + Look in your terminal running `kafka-console-consumer` to see the messages being processed. + +2. **Query your {SERVICE_LONG} for all rows in the `accounts` table** + + ```sql + SELECT * FROM accounts; + ``` + You see something like: + + | created_at | name | city | + | -- | --| -- | + |2025-02-18 13:55:05.147261+00 | Lola | Copacabana | + |2025-02-18 13:55:05.216673+00 | Holly | Miami | + |2025-02-18 13:55:05.283549+00 | Jolene | Tennessee | + |2025-02-18 13:55:05.35226+00 | Barbara Ann | California | + +You have successfully integrated Apache Kafka with {CLOUD_LONG}. + +[apache-kafka]: https://kafka.apache.org/documentation/ +[connect]: /getting-started/run-queries-from-console +[connection-info]: /integrations/find-connection-details +[java-installers]: https://www.oracle.com/java/technologies/downloads/ +[kafka-connect]: https://docs.confluent.io/platform/current/connect/index.html \ No newline at end of file diff --git a/integrations/integrate/aws-lambda.mdx b/integrations/integrate/aws-lambda.mdx index 5555027..d0e8d00 100644 --- a/integrations/integrate/aws-lambda.mdx +++ b/integrations/integrate/aws-lambda.mdx @@ -1,4 +1,190 @@ --- -title: AWS Lambda -description: TBD +title: Integrate AWS Lambda with Tiger Cloud +sidebarTitle: AWS Lambda +description: With AWS Lambda, you can run code without provisioning or managing servers, and scale automatically. Integrate AWS Lambda with Tiger Cloud and inject data into your service --- + +import { CLOUD_LONG, CONSOLE, PG, SERVICE_SHORT, HYPERTABLE_CAP } from '/snippets/vars.mdx'; +import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; +import CreateHypertablePolicyNote from '/snippets/manage-data/_create-hypertable-columnstore-policy-note.mdx'; + +[AWS Lambda][aws-lambda] is a serverless computing service provided by Amazon Web Services (AWS) that allows you to run +code without provisioning or managing servers, scaling automatically as needed. + +This page shows you how to integrate AWS Lambda with {SERVICE_LONG} to process and store time-series data efficiently. + +## Prerequisites + + + +* Set up an [AWS Account][aws-sign-up]. +* Install and configure [AWS CLI][install-aws-cli]. +* Install [NodeJS v18.x or later][install-nodejs]. + + +## Prepare your {SERVICE_LONG} to ingest data from AWS Lambda + +Create a table in {SERVICE_LONG} to store time-series data. + +1. **Connect to your {SERVICE_LONG}** + + For {CLOUD_LONG}, open an [SQL editor][run-queries] in [{CONSOLE}][console]. For {SELF_LONG}, use [`psql`][psql]. + +2. **Create a {HYPERTABLE} to store sensor data** + + [{HYPERTABLE_CAP}s][about-hypertables] are {PG} tables that automatically partition your data by time. You interact + with {HYPERTABLE}s in the same way as regular {PG} tables, but with extra features that make managing your + time-series data much easier. + + ```sql + CREATE TABLE sensor_data ( + time TIMESTAMPTZ NOT NULL, + sensor_id TEXT NOT NULL, + value DOUBLE PRECISION NOT NULL + ) WITH ( + tsdb.hypertable + ); + ``` + + +## Create the code to inject data into a {SERVICE_LONG} + +Write an AWS Lambda function in a Node.js project that processes and inserts time-series data into a {SERVICE_LONG}. + +1. **Initialize a new Node.js project to hold your Lambda function** + + ```shell + mkdir lambda-timescale && cd lambda-timescale + npm init -y + ``` + +2. **Install the {PG} client library in your project** + + ```shell + npm install pg + ``` + +3. **Write a Lambda Function that inserts data into your {SERVICE_LONG}** + + Create a file named `index.js`, then add the following code: + + ```javascript + const { + Client + } = require('pg'); + + exports.handler = async (event) => { + const client = new Client({ + host: process.env.TIMESCALE_HOST, + port: process.env.TIMESCALE_PORT, + user: process.env.TIMESCALE_USER, + password: process.env.TIMESCALE_PASSWORD, + database: process.env.TIMESCALE_DB, + }); + + try { + await client.connect(); + // + const query = ` + INSERT INTO sensor_data (time, sensor_id, value) + VALUES ($1, $2, $3); + `; + + const data = JSON.parse(event.body); + const values = [new Date(), data.sensor_id, data.value]; + + await client.query(query, values); + + return { + statusCode: 200, + body: JSON.stringify({ + message: 'Data inserted successfully!' + }), + }; + } catch (error) { + console.error('Error inserting data:', error); + return { + statusCode: 500, + body: JSON.stringify({ + error: 'Failed to insert data.' + }), + }; + } finally { + await client.end(); + } + + }; + ``` + +## Deploy your Node project to AWS Lambda + +To create an AWS Lambda function that injects data into your {SERVICE_LONG}: + +1. **Compress your code into a `.zip`** + + ```shell + zip -r lambda-timescale.zip . + ``` + +2. **Deploy to AWS Lambda** + + In the following example, replace `` with your [AWS IAM credentials][aws-iam-role], then use + AWS CLI to create a Lambda function for your project: + + ```shell + aws lambda create-function \ + --function-name TimescaleIntegration \ + --runtime nodejs14.x \ + --role \ + --handler index.handler \ + --zip-file fileb://lambda-timescale.zip + ``` + +3. **Set up environment variables** + + In the following example, use your [connection details][connection-info] to add your {SERVICE_LONG} connection settings to your Lambda function: + ```shell + aws lambda update-function-configuration \ + --function-name TimescaleIntegration \ + --environment "Variables={TIMESCALE_HOST=,TIMESCALE_PORT=, \ + TIMESCALE_USER=,TIMESCALE_PASSWORD=, \ + TIMESCALE_DB=}" + ``` + +4. **Test your AWS Lambda function** + + 1. Invoke the Lambda function and send some data to your {SERVICE_LONG}: + + ```shell + aws lambda invoke \ + --function-name TimescaleIntegration \ + --payload '{"body": "{\"sensor_id\": \"sensor-123\", \"value\": 42.5}"}' \ + --cli-binary-format raw-in-base64-out \ + response.json + ``` + + 2. Verify that the data is in your {SERVICE_SHORT}. + + Open an [SQL editor][run-queries] and check the `sensor_data` table: + + ```sql + SELECT * FROM sensor_data; + ``` + You see something like: + + | time | sensor_id | value | + |-- |-- |--------| + | 2025-02-10 10:58:45.134912+00 | sensor-123 | 42.5 | + +You can now seamlessly ingest time-series data from AWS Lambda into {CLOUD_LONG}. + +[about-hypertables]: /use-timescale/hypertables +[aws-iam-role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access-keys-admin-managed.html#admin-list-access-key +[aws-lambda]: https://docs.aws.amazon.com/lambda/latest/dg/welcome.html +[aws-sign-up]: https://signin.aws.amazon.com/signup?request_type=register +[connection-info]: /integrations/find-connection-details +[console]: https://console.cloud.timescale.com/dashboard/services +[install-aws-cli]: https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html +[install-nodejs]: https://nodejs.org/en/download +[psql]: /integrations/integrate/psql +[run-queries]: /getting-started/run-queries-from-console \ No newline at end of file diff --git a/integrations/integrate/aws.mdx b/integrations/integrate/aws.mdx index e8e3618..966324e 100644 --- a/integrations/integrate/aws.mdx +++ b/integrations/integrate/aws.mdx @@ -1,4 +1,33 @@ --- -title: AWS -description: TBD +title: Integrate Amazon Web Services with Tiger Cloud +sidebarTitle: AWS +description: AWS enables you to build, run, and manage applications across cloud, hybrid, and edge environments with AI, analytics, security, and scalable infrastructure. Integrate AWS with Tiger Cloud using AWS Transit Gateway --- + +import IntegrationPrereqsCloud from '/snippets/prerequisites/_integration-prereqs-cloud-only.mdx'; +import TransitGateway from '/snippets/integrations/_transit-gateway.mdx'; +import NotSupportedAzure from '/snippets/changes/_not-supported-for-azure.mdx'; + +[Amazon Web Services (AWS)][aws] is a comprehensive cloud computing platform that provides on-demand infrastructure, storage, databases, AI, analytics, and security services to help businesses build, deploy, and scale applications in the cloud. + +This page explains how to integrate your AWS infrastructure with {CLOUD_LONG} using [AWS Transit Gateway][aws-transit-gateway]. + +## Prerequisites + + + +- Set up [AWS Transit Gateway][gtw-setup]. + + + +## Connect your AWS infrastructure to your {SERVICE_LONG}s + +To connect to {CLOUD_LONG}: + + + +You have successfully integrated your AWS infrastructure with {CLOUD_LONG}. + +[aws]: https://aws.amazon.com/ +[aws-transit-gateway]: https://aws.amazon.com/transit-gateway/ +[gtw-setup]: https://docs.aws.amazon.com/vpc/latest/tgw/tgw-getting-started.html \ No newline at end of file diff --git a/integrations/integrate/azure-data-studio.mdx b/integrations/integrate/azure-data-studio.mdx index 5f034b5..3ecf44b 100644 --- a/integrations/integrate/azure-data-studio.mdx +++ b/integrations/integrate/azure-data-studio.mdx @@ -1,4 +1,45 @@ --- -title: Azure Data Studio -description: TBD +title: Integrate Azure Data Studio with Tiger Cloud +sidebarTitle: Azure Data Studio +description: Azure Data Studio is an open-source, cross-platform hybrid data analytics tool designed to simplify the data landscape. Integrate Azure Data Studio with Tiger Cloud --- + +import { PG, CLOUD_LONG } from '/snippets/vars.mdx'; +import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; + +[Azure Data Studio][azure-data-studio] is an open-source, cross-platform hybrid data analytics tool designed to simplify the data landscape. + +This page explains how to integrate Azure Data Studio with {CLOUD_LONG}. + +## Prerequisites + + + +* Download and install [Azure Data Studio][ms-azure-data-studio]. +* Install the [{PG} extension for Azure Data Studio][postgresql-azure-data-studio]. + +## Connect to your {SERVICE_LONG} with Azure Data Studio + +To connect to {CLOUD_LONG}: + +1. **Start `Azure Data Studio`** +1. **In the `SERVERS` page, click `New Connection`** +1. **Configure the connection** + 1. Select `PostgreSQL` for `Connection type`. + 1. Configure the server name, database, username, port, and password using your [connection details][connection-info]. + 1. Click `Advanced`. + + If you configured your {SERVICE_LONG} to connect using [stricter SSL mode][ssl-mode], set `SSL mode` to the + configured mode, then type the location of your SSL root CA certificate in `SSL root certificate filename`. + + 1. In the `Port` field, type the port number and click `OK`. + +1. **Click `Connect`** + +You have successfully integrated Azure Data Studio with {CLOUD_LONG}. + +[ms-azure-data-studio]: https://learn.microsoft.com/en-us/azure-data-studio/download-azure-data-studio?view=sql-server-ver16#install-azure-data-studio +[postgresql-azure-data-studio]: https://learn.microsoft.com/en-us/azure-data-studio/extensions/postgres-extension?view=sql-server-ver16 +[connection-info]: /integrations/find-connection-details +[azure-data-studio]: https://azure.microsoft.com/en-us/products/data-studio +[ssl-mode]: /use-timescale/security/strict-ssl \ No newline at end of file diff --git a/integrations/integrate/cloudwatch.mdx b/integrations/integrate/cloudwatch.mdx index c6de9f8..5958aa9 100644 --- a/integrations/integrate/cloudwatch.mdx +++ b/integrations/integrate/cloudwatch.mdx @@ -1,4 +1,38 @@ --- -title: Amazon CloudWatch -description: TBD +title: Integrate Amazon CloudWatch with Tiger Cloud +sidebarTitle: Amazon CloudWatch +description: Amazon CloudWatch is a monitoring and observability service. Export telemetry data from your Tiger Cloud service with time-series and analytics capability to Amazon CloudWatch --- + +import IntegrationPrereqsCloud from '/snippets/prerequisites/_integration-prereqs-cloud-only.mdx'; +import NotSupportedAzure from '/snippets/changes/_not-supported-for-azure.mdx'; +import CloudWatchExporter from '/snippets/integrations/_cloudwatch-data-exporter.mdx'; +import ManageDataExporter from '/snippets/integrations/_manage-a-data-exporter.mdx'; + +[Amazon CloudWatch][cloudwatch] is a monitoring and observability service designed to help collect, analyze, and act on data from applications, infrastructure, and services running in AWS and on-premises environments. + +You can export telemetry data from your {SERVICE_LONG}s with the time-series and analytics capability enabled to CloudWatch. The available metrics include CPU usage, RAM usage, and storage. This integration is available for [Scale and Enterprise][pricing-plan-features] pricing tiers. + +This pages explains how to export telemetry data from your {SERVICE_LONG} into CloudWatch by creating a {CLOUD_LONG} data exporter, then attaching it to the {SERVICE_SHORT}. + +## Prerequisites + + + +- Sign up for [Amazon CloudWatch][cloudwatch-signup]. + + + +## Create a data exporter + +A {CLOUD_LONG} data exporter sends telemetry data from a {SERVICE_LONG} to a third-party monitoring +tool. You create an exporter on the [project level][projects], in the same AWS region as your {SERVICE_SHORT}: + + + + + +[cloudwatch]: https://aws.amazon.com/cloudwatch/ +[cloudwatch-signup]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/GettingSetup.html +[pricing-plan-features]: /about/pricing-and-account-management/#features-included-in-each-pricing-plan +[projects]: /use-timescale/security/members \ No newline at end of file diff --git a/integrations/integrate/corporate-data-center.mdx b/integrations/integrate/corporate-data-center.mdx index d41f6e8..e102946 100644 --- a/integrations/integrate/corporate-data-center.mdx +++ b/integrations/integrate/corporate-data-center.mdx @@ -1,4 +1,35 @@ --- -title: Corporate data center -description: TBD +title: Integrate your data center with Tiger Cloud +sidebarTitle: Corporate data center +description: Integrate your on-premise data center with Tiger Cloud using AWS Transit Gateway --- + +import IntegrationPrereqsCloud from '/snippets/prerequisites/_integration-prereqs-cloud-only.mdx'; +import TransitGateway from '/snippets/integrations/_transit-gateway.mdx'; +import NotSupportedAzure from '/snippets/changes/_not-supported-for-azure.mdx'; + +This page explains how to integrate your corporate on-premise infrastructure with {CLOUD_LONG} using [AWS Transit Gateway][aws-transit-gateway]. + +## Prerequisites + + + +- Set up [AWS Transit Gateway][gtw-setup]. + + + +## Connect your on-premise infrastructure to your {SERVICE_LONG}s + +To connect to {CLOUD_LONG}: + +1. **Connect your infrastructure to AWS Transit Gateway** + + Establish connectivity between your on-premise infrastructure and AWS. See the [Centralize network connectivity using AWS Transit Gateway][aws-onprem]. + + + +You have successfully integrated your corporate data center with {CLOUD_LONG}. + +[aws-transit-gateway]: https://aws.amazon.com/transit-gateway/ +[gtw-setup]: https://docs.aws.amazon.com/vpc/latest/tgw/tgw-getting-started.html +[aws-onprem]: https://docs.aws.amazon.com/prescriptive-guidance/latest/patterns/centralize-network-connectivity-using-aws-transit-gateway.html \ No newline at end of file diff --git a/integrations/integrate/datadog.mdx b/integrations/integrate/datadog.mdx index 4b2004f..ee9ccd5 100644 --- a/integrations/integrate/datadog.mdx +++ b/integrations/integrate/datadog.mdx @@ -1,4 +1,151 @@ --- -title: Datadog -description: TBD +title: Integrate Datadog with Tiger Cloud +sidebarTitle: Datadog +description: Datadog is a cloud-based monitoring and analytics platform. Export telemetry data from your Tiger Cloud service with time-series and analytics capability to Datadog --- + +import { PG, SELF_LONG, SCALE, ENTERPRISE, PRICING_PLAN } from '/snippets/vars.mdx'; +import IntegrationPrereqsCloud from '/snippets/prerequisites/_integration-prereqs-cloud-only.mdx'; +import DataDogExporter from '/snippets/integrations/_datadog-data-exporter.mdx'; +import ManageDataExporter from '/snippets/integrations/_manage-a-data-exporter.mdx'; +import NotSupportedAzure from '/snippets/changes/_not-supported-for-azure.mdx'; + +[Datadog][datadog] is a cloud-based monitoring and analytics platform that provides comprehensive visibility into +applications, infrastructure, and systems through real-time monitoring, logging, and analytics. + +This page explains how to: + +- [Monitor {SERVICE_LONG} metrics with Datadog][datadog-monitor-cloud] + + This integration is available for [{SCALE} and {ENTERPRISE}][pricing-plan-features] {PRICING_PLAN}s. + +- Configure Datadog Agent to collect metrics for your {SERVICE_LONG} + + This integration is available for all {PRICING_PLAN}s. + + +## Prerequisites + + + +- Sign up for [Datadog][datadog-signup]. + + You need your [Datadog API key][datadog-api-key] to follow this procedure. + +- Install [Datadog Agent][datadog-agent-install]. + + + +## Monitor {SERVICE_LONG} metrics with Datadog + +Export telemetry data from your {SERVICE_LONG}s with the time-series and analytics capability enabled to +Datadog using a {CLOUD_LONG} data exporter. The available metrics include CPU usage, RAM usage, and storage. + +### Create a data exporter + +A {CLOUD_LONG} data exporter sends telemetry data from a {SERVICE_LONG} to a third-party monitoring +tool. You create an exporter on the [project level][projects], in the same AWS region as your {SERVICE_SHORT}: + + + +### Manage a data exporter + +This section shows you how to attach, monitor, edit, and delete a data exporter. + + + +## Configure Datadog Agent to collect metrics for your {SERVICE_LONG}s + +Datadog Agent includes a [{PG} integration][datadog-postgres] that you use to collect detailed {PG} database +metrics about your {SERVICE_LONG}s. + +1. **Connect to your {SERVICE_LONG}** + + For {CLOUD_LONG}, open an [SQL editor][run-queries] in [{CONSOLE}][open-console]. For {SELF_LONG}, use [`psql`][psql]. + +1. **Add the `datadog` user to your {SERVICE_LONG}** + + ```sql + create user datadog with password ''; + ``` + + ```sql + grant pg_monitor to datadog; + ``` + + ```sql + grant SELECT ON pg_stat_database to datadog; + ``` + +1. **Test the connection and rights for the datadog user** + + Update the following command with your [connection details][connection-info], then run it from the command line: + + ```bash + psql "postgres://datadog:@:/tsdb?sslmode=require" -c \ + "select * from pg_stat_database LIMIT(1);" \ + && echo -e "\e[0;32mPostgres connection - OK\e[0m" || echo -e "\e[0;31mCannot connect to Postgres\e[0m" + ``` + You see the output from the `pg_stat_database` table, which means you have given the correct rights to `datadog`. + +1. **Connect Datadog to your {SERVICE_LONG}** + + 1. Configure the [Datadog Agent {PG} configuration file][datadog-config]; it is usually located on the Datadog Agent host at: + - **Linux**: `/etc/datadog-agent/conf.d/postgres.d/conf.yaml` + - **MacOS**: `/opt/datadog-agent/etc/conf.d/postgres.d/conf.yaml` + - **Windows**: `C:\ProgramData\Datadog\conf.d\postgres.d\conf.yaml` + + 1. Integrate Datadog Agent with your {SERVICE_LONG}: + + Use your [connection details][connection-info] to update the following and add it to the Datadog Agent {PG} + configuration file: + + ```yaml + init_config: + + instances: + - host: + port: + username: datadog + password: > + dbname: tsdb + disable_generic_tags: true + ``` + +1. **Add {CLOUD_LONG} metrics** + + Tags to make it easier for build Datadog dashboards that combine metrics from the {CLOUD_LONG} data exporter and + Datadog Agent. Use your [connection details][connection-info] to update the following and add it to + `/datadog.yaml`: + + ```yaml + tags: + - project-id: + - service-id: + - region: + ``` + +1. **Restart Datadog Agent** + + See how to [Start, stop, and restart Datadog Agent][datadog-agent-restart]. + +Metrics for your {SERVICE_LONG} are now visible in Datadog. Check the Datadog {PG} integration documentation for a +comprehensive list of [metrics][datadog-postgres-metrics] collected. + +[datadog]: https://www.datadoghq.com/ +[datadog-agent-install]: https://docs.datadoghq.com/getting_started/agent/#installation +[datadog-postgres]: https://docs.datadoghq.com/integrations/postgres/ +[datadog-postgres-metrics]:https://docs.datadoghq.com/integrations/postgres/?tab=host#metrics +[datadog-postgres-setup]: https://docs.datadoghq.com/integrations/postgres/?tab=host#configuration +[datadog-signup]: https://www.datadoghq.com/ +[datadog-monitor-cloud]: #monitor-tiger-cloud-service-metrics-with-datadog +[datadog-agent]: #configure-datadog-agent-to-collect-metrics-for-your-tiger-cloud-services +[datadog-agent-restart]: https://docs.datadoghq.com/agent/configuration/agent-commands/#start-stop-and-restart-the-agent +[projects]: /use-timescale/security/members +[datadog-api-key]: https://docs.datadoghq.com/account_management/api-app-keys/#add-an-api-key-or-client-token +[pricing-plan-features]: /about/pricing-and-account-management/#features-included-in-each-pricing-plan +[run-queries]: /getting-started/run-queries-from-console +[open-console]: https://console.cloud.timescale.com/dashboard/services +[psql]: /integrations/psql +[connection-info]: /integrations/find-connection-details +[datadog-config]: https://docs.datadoghq.com/database_monitoring/setup_postgres/selfhosted?tab=postgres15 \ No newline at end of file diff --git a/integrations/integrate/dbeaver.mdx b/integrations/integrate/dbeaver.mdx index 7984641..1c669b6 100644 --- a/integrations/integrate/dbeaver.mdx +++ b/integrations/integrate/dbeaver.mdx @@ -1,4 +1,46 @@ --- -title: DBeaver -description: TBD +title: Integrate DBeaver with Tiger Cloud +sidebarTitle: DBeaver +description: DBeaver is a free cross-platform database tool for developers, database administrators, analysts, and everyone working with data. Integrate DBeaver with Tiger Cloud --- + +import { CLOUD_LONG, SERVICE_SHORT, TIMESCALE_DB } from '/snippets/vars.mdx'; +import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; + +[DBeaver][dbeaver] is a free cross-platform database tool for developers, database administrators, analysts, and everyone working with data. DBeaver provides an SQL editor, administration features, data and schema migration, and the ability to monitor database connection sessions. + +This page explains how to integrate DBeaver with your {SERVICE_LONG}. + +## Prerequisites + + + +* Download and install [DBeaver][dbeaver-downloads]. + +## Connect DBeaver to your {SERVICE_LONG} + +To connect to {CLOUD_LONG}: + +1. **Start `DBeaver`** +1. **In the toolbar, click the plug+ icon** +1. **In `Connect to a database` search for `{TIMESCALE_DB}`** +1. **Select `{TIMESCALE_DB}`, then click `Next`** +1. **Configure the connection** + + Use your [connection details][connection-info] to add your connection settings. + ![DBeaver integration](https://assets.timescale.com/docs/images/integrations-dbeaver.png) + + If you configured your {SERVICE_SHORT} to connect using a [stricter SSL mode][ssl-mode], in the `SSL` tab check + `Use SSL` and set `SSL mode` to the configured mode. Then, in the `CA Certificate` field type the location of the SSL + root CA certificate. + +1. **Click `Test Connection`. When the connection is successful, click `Finish`** + + Your connection is listed in the `Database Navigator`. + +You have successfully integrated DBeaver with {CLOUD_LONG}. + +[dbeaver]: https://dbeaver.io/ +[dbeaver-downloads]: https://dbeaver.io/download/ +[connection-info]: /integrations/find-connection-details +[ssl-mode]: /use-timescale/security/strict-ssl \ No newline at end of file diff --git a/integrations/integrate/debezium.mdx b/integrations/integrate/debezium.mdx index 6e00eae..e89b63f 100644 --- a/integrations/integrate/debezium.mdx +++ b/integrations/integrate/debezium.mdx @@ -1,4 +1,70 @@ --- -title: Debezium -description: TBD +title: Integrate Debezium with Tiger Cloud +sidebarTitle: Debezium +description: Integrate Debezium with Tiger Cloud to enable change data capture in your Tiger Cloud service and streaming to Redis Streams --- + +import { CLOUD_LONG, TIMESCALE_DB, SELF_LONG_CAP, SERVICE_LONG, HYPERTABLE_CAP, HYPERCORE_CAP, COLUMNSTORE, ROWSTORE } from '/snippets/vars.mdx'; +import IntegrationPrereqsSelfOnly from '/snippets/prerequisites/_integration-prereqs-self-only.mdx'; +import IntegrationDebeziumDocker from '/snippets/integrations/_integration-debezium-docker.mdx'; +import IntegrationDebeziumSelfHostedConfig from '/snippets/integrations/_integration-debezium-self-hosted-config-database.mdx'; + +[Debezium][debezium] is an open-source distributed platform for change data capture (CDC). +It enables you to capture changes in a {SELF_LONG} instance and stream them to other systems in real time. + +Debezium can capture events about: + +- [{HYPERTABLE_CAP}s][hypertables]: captured events are rerouted from their chunk-specific topics to a single logical topic + named according to the following pattern: `..` +- [{CAGG_CAP}s][caggs]: captured events are rerouted from their chunk-specific topics to a single logical topic + named according to the following pattern: `..` +- [{HYPERCORE_CAP}][hypercore]: if you enable {HYPERCORE_CAP}, the Debezium {TIMESCALE_DB} connector does not apply any special + processing to data in the {COLUMNSTORE}. Compressed chunks are forwarded unchanged to the next downstream job in the + pipeline for further processing as needed. Typically, messages with compressed chunks are dropped, and are not + processed by subsequent jobs in the pipeline. + + This limitation only affects changes to chunks in the {COLUMNSTORE}. Changes to data in the {ROWSTORE} work correctly. + + +This page explains how to capture changes in your database and stream them using Debezium on Apache Kafka. + +## Prerequisites + + + +- [Install Docker][install-docker] on your development machine. + +## Configure your database to work with Debezium + + + + + +To set up {SELF_LONG} to communicate with Debezium: + + + +## Configure Debezium to work with your database + +Set up Kafka Connect server, plugins, drivers, and connectors: + + + +And that is it, you have configured Debezium to interact with {TIMESCALE_DB}. + + + + + +Debezium requires logical replication to be enabled. Currently, this is not enabled by default on {SERVICE_LONG}s. +We are working on enabling this feature as you read. As soon as it is live, these docs will be updated. + + + + + +[caggs]: /use-timescale/continuous-aggregates +[debezium]: https://debezium.io/ +[hypercore]: /use-timescale/hypercore +[hypertables]: /use-timescale/hypertables +[install-docker]: https://docs.docker.com/engine/install/ \ No newline at end of file diff --git a/integrations/integrate/decodable.mdx b/integrations/integrate/decodable.mdx index 69beb26..b286840 100644 --- a/integrations/integrate/decodable.mdx +++ b/integrations/integrate/decodable.mdx @@ -1,4 +1,67 @@ --- -title: Decodable -description: TBD +title: Integrate Decodable with Tiger Cloud +sidebarTitle: Decodable +description: Decodable enables you to build, run, and manage data pipelines effortlessly. Seamlessly integrate Decodable with Tiger Cloud to unlock real-time data processing capabilities --- + +import { CLOUD_LONG, CONSOLE, PG } from '/snippets/vars.mdx'; +import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; + +[Decodable][decodable] is a real-time data platform that allows you to build, run, and manage data pipelines effortlessly. + +![Decodable workflow](https://assets.timescale.com/docs/images/integrations-decodable-configuration.png) + +This page explains how to integrate Decodable with your {SERVICE_LONG} to enable efficient real-time streaming and analytics. + +## Prerequisites + + + +- Sign up for [Decodable][sign-up-decodable]. + + This page uses the pipeline you create using the [Decodable Quickstart Guide][decodable-quickstart]. + +## Connect Decodable to your {SERVICE_LONG} + +To stream data gathered in Decodable to a {SERVICE_LONG}: + +1. **Create the sync to pipe a Decodable data stream into your {SERVICE_LONG}** + + 1. Log in to your [Decodable account][decodable-app]. + 2. Click `Connections`, then click `New Connection`. + 3. Select a `{PG} sink` connection type, then click `Connect`. + 4. Using your [connection details][connection-info], fill in the connection information. + + Leave `schema` and `JDBC options` empty. + 5. Select the `http_events` source stream, then click `Next`. + + Decodable creates the table in your {SERVICE_LONG} and starts streaming data. + + + +2. **Test the connection** + + 1. Connect to your {SERVICE_LONG}. + + For {CLOUD_LONG}, open an [SQL editor][run-queries] in [{CONSOLE}][console]. For {SELF_LONG}, use [`psql`][psql]. + + 2. Check the data from Decodable is streaming into your {SERVICE_LONG}. + + ```sql + SELECT * FROM http_events; + ``` + You see something like: + + ![Decodable workflow](https://assets.timescale.com/docs/images/integrations-decodable-data-in-service.png) + + +You have successfully integrated Decodable with {CLOUD_LONG}. + +[connection-info]: /integrations/find-connection-details +[console]: https://console.cloud.timescale.com/dashboard/services +[decodable]: https://www.decodable.co/ +[decodable-app]: https://app.decodable.co/-/accounts +[decodable-quickstart]: https://docs.decodable.co/get-started/quickstart.html +[psql]: /integrations/integrate/psql +[run-queries]: /getting-started/run-queries-from-console +[sign-up-decodable]: https://auth.decodable.co/u/signup/ \ No newline at end of file diff --git a/integrations/integrate/fivetran.mdx b/integrations/integrate/fivetran.mdx index f13c87e..fc3a864 100644 --- a/integrations/integrate/fivetran.mdx +++ b/integrations/integrate/fivetran.mdx @@ -1,4 +1,78 @@ --- -title: Fivetran -description: TBD +title: Integrate Fivetran with Tiger Cloud +sidebarTitle: Fivetran +description: Fivetran is a fully managed data pipeline platform that simplifies extract, transform, and load processes. Integrate Fivetran with Tiger Cloud for seamless data synchronization --- + +import { CLOUD_LONG, CONSOLE, PG, SERVICE_SHORT } from '/snippets/vars.mdx'; +import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; + +[Fivetran][fivetran] is a fully managed data pipeline platform that simplifies ETL (Extract, Transform, Load) processes +by automatically syncing data from multiple sources to your data warehouse. + +![Fivetran data in a service](https://assets.timescale.com/docs/images/integrations-fivetran-sync-data.png) + +This page shows you how to inject data from data sources managed by Fivetran into a {SERVICE_LONG}. + +## Prerequisites + + + +* Sign up for [Fivetran][sign-up-fivetran] + +## Set your {SERVICE_LONG} as a destination in Fivetran + +To be able to inject data into your {SERVICE_LONG}, set it as a destination in Fivetran: + +![Fivetran data destination](https://assets.timescale.com/docs/images/integrations-fivetran-destination-timescal-cloud.png) + +1. In [Fivetran Dashboard > Destinations][fivetran-dashboard-destinations], click `Add destination`. +2. Search for the `{PG}` connector and click `Select`. Add the destination name and click `Add`. +3. In the `{PG}` setup, add your [{SERVICE_LONG} connection details][connection-info], then click `Save & Test`. + + Fivetran validates the connection settings and sets up any security configurations. +4. Click `View Destination`. + + The `Destination Connection Details` page opens. + +## Set up a Fivetran connection as your data source + +In a real world scenario, you can select any of the over 600 connectors available in Fivetran to sync data with your +{SERVICE_LONG}. This section shows you how to inject the logs for your Fivetran connections into your {SERVICE_LONG}. + +![Fivetran data source](https://assets.timescale.com/docs/images/integrations-fivetran-data-source.png) + +1. In [Fivetran Dashboard > Connections][fivetran-dashboard-connectors], click `Add connector`. +2. Search for the `Fivetran Platform` connector, then click `Setup`. +3. Leave the default schema name, then click `Save & Test`. + + You see `All connection tests passed!` +4. Click `Continue`, enable `Add Quickstart Data Model` and click `Continue`. + + Your Fivetran connection is connected to your {SERVICE_LONG} destination. +5. Click `Start Initial Sync`. + + Fivetran creates the log schema in your {SERVICE_SHORT} and syncs the data to your {SERVICE_SHORT}. + +## View Fivetran data in your {SERVICE_LONG} + +To see data injected by Fivetran into your {SERVICE_LONG}: + +1. In [data mode][portal-data-mode] in {CONSOLE}, select your {SERVICE_SHORT}, then run the following query: + ```sql + SELECT * + FROM fivetran_log.account + LIMIT 10; + ``` + You see something like the following: + + ![Fivetran data in a service](https://assets.timescale.com/docs/images/integrations-fivetran-view-data-in-service.png) + +You have successfully integrated Fivetran with {CLOUD_LONG}. + +[connection-info]: /integrations/find-connection-details +[fivetran]: https://fivetran.com/docs/getting-started +[fivetran-dashboard-connectors]: https://fivetran.com/dashboard/connections +[fivetran-dashboard-destinations]: https://fivetran.com/dashboard/destinations +[portal-data-mode]: https://console.cloud.timescale.com/dashboard/services?popsql +[sign-up-fivetran]: https://www.fivetran.com/ \ No newline at end of file diff --git a/integrations/integrate/google-cloud.mdx b/integrations/integrate/google-cloud.mdx index ab6cbd6..70b015f 100644 --- a/integrations/integrate/google-cloud.mdx +++ b/integrations/integrate/google-cloud.mdx @@ -1,4 +1,38 @@ --- -title: Google Cloud -description: TBD +title: Integrate Google Cloud with Tiger Cloud +sidebarTitle: Google Cloud +description: Google Cloud enables you to deploy, manage, and scale cloud-based applications, databases, and data processing workflows. Integrate Google Cloud with Tiger Cloud using AWS Transit Gateway --- + +import IntegrationPrereqsCloud from '/snippets/prerequisites/_integration-prereqs-cloud-only.mdx'; +import TransitGateway from '/snippets/integrations/_transit-gateway.mdx'; +import NotSupportedAzure from '/snippets/changes/_not-supported-for-azure.mdx'; + +[Google Cloud][google-cloud] is a suite of cloud computing services, offering scalable infrastructure, AI, analytics, databases, security, and developer tools to help businesses build, deploy, and manage applications. + +This page explains how to integrate your Google Cloud infrastructure with {CLOUD_LONG} using [AWS Transit Gateway][aws-transit-gateway]. + +## Prerequisites + + + +- Set up [AWS Transit Gateway][gtw-setup]. + + + +## Connect your Google Cloud infrastructure to your {SERVICE_LONG}s + +To connect to {CLOUD_LONG}: + +1. **Connect your infrastructure to AWS Transit Gateway** + + Establish connectivity between Google Cloud and AWS. See [Connect HA VPN to AWS peer gateways][gcp-aws]. + + + +You have successfully integrated your Google Cloud infrastructure with {CLOUD_LONG}. + +[google-cloud]: https://cloud.google.com/?hl=en +[aws-transit-gateway]: https://aws.amazon.com/transit-gateway/ +[gtw-setup]: https://docs.aws.amazon.com/vpc/latest/tgw/tgw-getting-started.html +[gcp-aws]: https://cloud.google.com/network-connectivity/docs/vpn/how-to/connect-ha-vpn-aws-peer-gateway \ No newline at end of file diff --git a/integrations/integrate/grafana.mdx b/integrations/integrate/grafana.mdx index 89826e3..92810e7 100644 --- a/integrations/integrate/grafana.mdx +++ b/integrations/integrate/grafana.mdx @@ -1,4 +1,173 @@ --- -title: Grafana -description: TBD +title: Integrate Grafana with Tiger Cloud +sidebarTitle: Grafana +description: Grafana enables you to query, visualize, alert on, and explore your metrics, logs, and traces wherever they're stored. Integrate Grafana with Tiger --- + +import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; +import GrafanaConnect from '/snippets/integrations/_grafana-connect.mdx'; + +[Grafana][grafana-docs] enables you to query, visualize, alert on, and explore your metrics, logs, and traces wherever they're stored. + +This page shows you how to integrate Grafana with a {SERVICE_LONG}, create a dashboard and panel, then visualize geospatial data. + +## Prerequisites + + + +* Install [self-managed Grafana][grafana-self-managed] or sign up for [Grafana Cloud][grafana-cloud]. + + + +## Create a Grafana dashboard and panel + +Grafana is organized into dashboards and panels. A dashboard represents a +view into the performance of a system, and each dashboard consists of one or +more panels, which represent information about a specific metric related to +that system. + +To create a new dashboard: + +1. **On the `Dashboards` page, click `New` and select `New dashboard`** + +1. **Click `Add visualization`** + +1. **Select the data source** + + Select your {SERVICE_SHORT} from the list of pre-configured data sources or configure a new one. + +1. **Configure your panel** + + Select the visualization type. The type defines specific fields to configure in addition to standard ones, such as the panel name. + +1. **Run your queries** + + You can edit the queries directly or use the built-in query editor. If you are visualizing time-series data, select `Time series` in the `Format` drop-down. + +1. **Click `Save dashboard`** + + You now have a dashboard with one panel. Add more panels to a dashboard by clicking `Add` at the top right and selecting `Visualization` from the drop-down. + +## Use the time filter function + +Grafana time-series panels include a time filter: + +1. **Call `$__timefilter()` to link the user interface construct in a Grafana panel with the query** + + For example, to set the `pickup_datetime` column as the filtering range for your visualizations: + + ```sql + SELECT + --1-- + time_bucket('1 day', pickup_datetime) AS "time", + --2-- + COUNT(*) + FROM rides + WHERE $__timeFilter(pickup_datetime) + ``` + +1. **Group your visualizations and order the results by [time buckets][time-buckets]** + + In this case, the `GROUP BY` and `ORDER BY` statements reference `time`. + + For example: + + ```sql + SELECT + --1-- + time_bucket('1 day', pickup_datetime) AS time, + --2-- + COUNT(*) + FROM rides + WHERE $__timeFilter(pickup_datetime) + GROUP BY time + ORDER BY time + ``` + + When you visualize this query in Grafana, you see this: + + ![{SERVICE_LONG} and Grafana query results](https://assets.timescale.com/docs/images/grafana_query_results.png) + + You can adjust the `time_bucket` function and compare the graphs: + + ```sql + SELECT + --1-- + time_bucket('5m', pickup_datetime) AS time, + --2-- + COUNT(*) + FROM rides + WHERE $__timeFilter(pickup_datetime) + GROUP BY time + ORDER BY time + ``` + + When you visualize this query, it looks like this: + + ![{SERVICE_LONG} and Grafana query results in time buckets](https://assets.timescale.com/docs/images/grafana_query_results_5m.png) + +## Visualize geospatial data + +Grafana includes a Geomap panel so you can see geospatial data +overlaid on a map. This can be helpful to understand how data +changes based on its location. + +This section visualizes taxi rides in Manhattan, where the distance traveled +was greater than 5 miles. It uses the same query as the [NYC Taxi Cab][nyc-taxi] +tutorial as a starting point. + +1. **Add a geospatial visualization** + + 1. In your Grafana dashboard, click `Add` > `Visualization`. + + 1. Select `Geomap` in the visualization type drop-down at the top right. + +1. **Configure the data format** + + 1. In the `Queries` tab below, select your data source. + + 1. In the `Format` drop-down, select `Table`. + + 1. In the mode switcher, toggle `Code` and enter the query, then click `Run`. + + For example: + + ```sql + SELECT time_bucket('5m', rides.pickup_datetime) AS time, + rides.trip_distance AS value, + rides.pickup_latitude AS latitude, + rides.pickup_longitude AS longitude + FROM rides + WHERE rides.trip_distance > 5 + GROUP BY time, + rides.trip_distance, + rides.pickup_latitude, + rides.pickup_longitude + ORDER BY time + LIMIT 500; + ``` + +1. **Customize the Geomap settings** + + With default settings, the visualization uses green circles of the fixed size. Configure at least the following for a more representative view: + + - `Map layers` > `Styles` > `Size` > `value`. + + This changes the size of the circle depending on the value, with bigger circles representing bigger values. + + - `Map layers` > `Styles` > `Color` > `value`. + + - `Thresholds` > Add `threshold`. + + Add thresholds for 7 and 10, to mark rides over 7 and 10 miles in different colors, respectively. + + You now have a visualization that looks like this: + + ![{SERVICE_LONG} and Grafana integration](https://assets.timescale.com/docs/images/timescale-grafana-integration.png) + +[grafana-docs]: https://grafana.com/docs/ +[nyc-taxi]: /tutorials/real-time-analytics-transport +[grafana-website]: https://www.grafana.com +[time-buckets]: /use-timescale/time-buckets +[grafana-self-managed]: https://grafana.com/get/?tab=self-managed +[grafana-cloud]: https://grafana.com/get/ \ No newline at end of file diff --git a/integrations/integrate/microsoft-azure.mdx b/integrations/integrate/microsoft-azure.mdx index 7bf8212..0778005 100644 --- a/integrations/integrate/microsoft-azure.mdx +++ b/integrations/integrate/microsoft-azure.mdx @@ -1,4 +1,38 @@ --- -title: Microsoft Azure -description: TBD +title: Integrate Microsoft Azure with Tiger Cloud +sidebarTitle: Microsoft Azure +description: Microsoft Azure enables you to build, deploy, and manage applications across cloud, hybrid, and edge environments. Integrate Microsoft Azure with Tiger Cloud using AWS Transit Gateway --- + +import IntegrationPrereqsCloud from '/snippets/prerequisites/_integration-prereqs-cloud-only.mdx'; +import TransitGateway from '/snippets/integrations/_transit-gateway.mdx'; +import NotSupportedAzure from '/snippets/changes/_not-supported-for-azure.mdx'; + +[Microsoft Azure][azure] is a cloud computing platform and services suite, offering infrastructure, AI, analytics, security, and developer tools to help businesses build, deploy, and manage applications. + +This page explains how to integrate your Microsoft Azure infrastructure with {CLOUD_LONG} using [AWS Transit Gateway][aws-transit-gateway]. + +## Prerequisites + + + +- Set up [AWS Transit Gateway][gtw-setup]. + + + +## Connect your Microsoft Azure infrastructure to your {SERVICE_LONG}s + +To connect to {CLOUD_LONG}: + +1. **Connect your infrastructure to AWS Transit Gateway** + + Establish connectivity between Azure and AWS. See the [AWS architectural documentation][azure-aws] for details. + + + +You have successfully integrated your Microsoft Azure infrastructure with {CLOUD_LONG}. + +[aws-transit-gateway]: https://aws.amazon.com/transit-gateway/ +[gtw-setup]: https://docs.aws.amazon.com/vpc/latest/tgw/tgw-getting-started.html +[azure]: https://azure.microsoft.com/en-gb/ +[azure-aws]: https://aws.amazon.com/blogs/modernizing-with-aws/designing-private-network-connectivity-aws-azure/ \ No newline at end of file diff --git a/integrations/integrate/pgadmin.mdx b/integrations/integrate/pgadmin.mdx index fd7188f..425fc80 100644 --- a/integrations/integrate/pgadmin.mdx +++ b/integrations/integrate/pgadmin.mdx @@ -1,4 +1,40 @@ --- -title: pgAdmin -description: TBD +title: Integrate pgAdmin with Tiger Cloud +sidebarTitle: pgAdmin +description: pgAdmin is a feature-rich open-source administration and development platform for PostgreSQL. Integrate pgadmin with Tiger Cloud --- + +import { PG, CLOUD_LONG, SERVICE_SHORT } from '/snippets/vars.mdx'; +import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; + +[pgAdmin][pgadmin] is a feature-rich open-source administration and development platform for {PG}. It is available for Chrome, Firefox, Edge, and +Safari browsers, or can be installed on Microsoft Windows, Apple macOS, or various Linux flavors. + +![{CLOUD_LONG} pgadmin](https://assets.timescale.com/docs/images/timescale-cloud-pgadmin.png) + +This page explains how to integrate pgAdmin with your {SERVICE_LONG}. + +## Prerequisites + + + +- [Download][download-pgadmin] and install pgAdmin. + +## Connect pgAdmin to your {SERVICE_LONG} + +To connect to {CLOUD_LONG}: + +1. **Start pgAdmin** +1. **In the `Quick Links` section of the `Dashboard` tab, click `Add New Server`** +1. **In `Register - Server` > `General`, fill in the `Name` and `Comments` fields with the server name and description, respectively** +1. **Configure the connection** + 1. In the `Connection` tab, configure the connection using your [connection details][connection-info]. + 1. If you configured your {SERVICE_SHORT} to connect using a [stricter SSL mode][ssl-mode], then in the `SSL` tab check `Use SSL`, set `SSL mode` to the configured mode, and in the `CA Certificate` field type the location of the SSL root CA certificate to use. +1. **Click `Save`** + +You have successfully integrated pgAdmin with {CLOUD_LONG}. + +[pgadmin]: https://www.pgadmin.org/ +[download-pgadmin]: https://www.pgadmin.org/download/ +[connection-info]: /integrations/find-connection-details +[ssl-mode]: /use-timescale/security/strict-ssl \ No newline at end of file diff --git a/integrations/integrate/postgresql.mdx b/integrations/integrate/postgresql.mdx index f90f693..3ea0816 100644 --- a/integrations/integrate/postgresql.mdx +++ b/integrations/integrate/postgresql.mdx @@ -1,4 +1,9 @@ --- -title: PostgreSQL -description: TBD +title: Integrate with PostgreSQL +sidebarTitle: PostgreSQL +description: Query any other Postgres database or another Tiger Cloud service from your service by using Postgres foreign data wrappers --- + +import FDW from '/snippets/integrations/_foreign-data-wrappers.mdx'; + + \ No newline at end of file diff --git a/integrations/integrate/power-bi.mdx b/integrations/integrate/power-bi.mdx index 54d7eaf..f930609 100644 --- a/integrations/integrate/power-bi.mdx +++ b/integrations/integrate/power-bi.mdx @@ -5,7 +5,7 @@ description: Integrate Power BI with Tiger Cloud --- import { CLOUD_LONG, PG } from '/snippets/vars.mdx'; -import IntegrationPrereqs from '/snippets/integrations/_integration-prereqs-cloud-only.mdx'; +import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs-cloud-only.mdx'; [Power BI][power-bi] is a business analytics tool for visualizing data, creating interactive reports, and sharing insights across an organization. diff --git a/integrations/integrate/prometheus.mdx b/integrations/integrate/prometheus.mdx index 4956d66..93a50bc 100644 --- a/integrations/integrate/prometheus.mdx +++ b/integrations/integrate/prometheus.mdx @@ -1,4 +1,12 @@ --- -title: Prometheus -description: TBD +title: Integrate Prometheus with Tiger Cloud +sidebarTitle: Prometheus +description: Prometheus is an open-source monitoring system with a modern alerting approach. Export telemetry metrics from your Tiger Cloud service to Prometheus --- + +import PrometheusIntegrate from '/snippets/integrations/_prometheus-integrate.mdx'; +import NotSupportedAzure from '/snippets/changes/_not-supported-for-azure.mdx'; + + + + \ No newline at end of file diff --git a/integrations/integrate/psql.mdx b/integrations/integrate/psql.mdx index a68be2b..0c3d6cc 100644 --- a/integrations/integrate/psql.mdx +++ b/integrations/integrate/psql.mdx @@ -1,4 +1,238 @@ --- -title: psql -description: TBD +title: Connect to a Tiger Cloud service with psql +sidebarTitle: psql +description: psql enables you to type in queries interactively, issue them to Postgres, and see the query results. Connect to your Tiger Cloud service using psql --- + +import { PG, SERVICE_SHORT, COMPANY } from '/snippets/vars.mdx'; +import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; + +[`psql`][psql-docs] is a terminal-based frontend to {PG} that enables you to type in queries interactively, issue them to Postgres, and see the query results. + +This page shows you how to use the `psql` command line tool to interact with your {SERVICE_LONG}. + +## Prerequisites + + + +## Check for an existing installation + +On many operating systems, `psql` is installed by default. To use the functionality described in this page, best practice is to use the latest version of `psql`. To check the version running on your system: + + + +```bash Linux/MacOS +psql --version +``` + +```powershell Windows +wmic +/output:C:\list.txt product get name, version +``` + + + +If you already have the latest version of `psql` installed, proceed to the [Connect to your {SERVICE_SHORT}][connect-database] section. + +## Install psql + +If there is no existing installation, take the following steps to install `psql`: + + + + + +Install using Homebrew. `libpqxx` is the official C++ client API for {PG}. + +1. Install Homebrew, if you don't already have it: + + ```bash + /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" + ``` + + For more information about Homebrew, including installation instructions, see the [Homebrew documentation][homebrew]. + +1. Make sure your Homebrew repository is up to date: + + ```bash + brew doctor + brew update + ``` + +1. Install `psql`: + + ```bash + brew install libpq + ``` + +1. Update your path to include the `psql` tool: + + ```bash + brew link --force libpq + ``` + +On Intel chips, the symbolic link is added to `/usr/local/bin`. On Apple Silicon, the symbolic link is added to `/opt/homebrew/bin`. + + + + + +Install using MacPorts. `libpqxx` is the official C++ client API for {PG}. + +1. [Install MacPorts][macports] by downloading and running the package installer. + +1. Make sure MacPorts is up to date: + + ```bash + sudo port selfupdate + ``` + +1. Install the latest version of `libpqxx`: + + ```bash + sudo port install libpqxx + ``` + +1. View the files that were installed by `libpqxx`: + + ```bash + port contents libpqxx + ``` + + + + + +Install `psql` on Debian and Ubuntu with the `apt` package manager. + +1. Make sure your `apt` repository is up to date: + + ```bash + sudo apt-get update + ``` + +1. Install the `postgresql-client` package: + + ```bash + sudo apt-get install postgresql-client + ``` + + + + + +`psql` is installed by default when you install {PG}. This procedure uses the interactive installer provided by {PG} and EnterpriseDB. + +1. Download and run the {PG} installer from [www.enterprisedb.com][windows-installer]. + +1. In the `Select Components` dialog, check `Command Line Tools`, along with any other components you want to install, and click `Next`. + +1. Complete the installation wizard to install the package. + + + + + +## Connect to your {SERVICE_SHORT} + +To use `psql` to connect to your {SERVICE_SHORT}, you need the connection details. See [Find your connection details][connection-info]. + +Connect to your {SERVICE_SHORT} with either: + +- The parameter flags: + + ```bash + psql -h -p -U -W -d + ``` + +- The {SERVICE_SHORT} URL: + + ```bash + psql "postgres://@:/?sslmode=require" + ``` + + You are prompted to provide the password. + +- The {SERVICE_SHORT} URL with the password already included and [a stricter SSL mode][ssl-mode] enabled: + + ```bash + psql "postgres://:@:/?sslmode=verify-full" + ``` + +## Useful psql commands + +When you start using `psql`, these are the commands you are likely to use most frequently: + +|Command|Description| +|-|-| +|`\c `|Connect to a new database| +|`\d `|Show the details of a table| +|`\df`|List functions in the current database| +|`\df+`|List all functions with more details| +|`\di`|List all indexes from all tables| +|`\dn`|List all schemas in the current database| +|`\dt`|List available tables| +|`\du`|List {PG} database roles| +|`\dv`|List views in current schema| +|`\dv+`|List all views with more details| +|`\dx`|Show all installed extensions| +|`ef `|Edit a function| +|`\h`|Show help on syntax of SQL commands| +|`\l`|List available databases| +|`\password `|Change the password for the user| +|`\q`|Quit `psql`| +|`\set`|Show system variables list| +|`\timing`|Show how long a query took to execute| +|`\x`|Show expanded query results| +|`\?`|List all `psql` slash commands| + +For more on `psql` commands, see the [{COMPANY} psql cheat sheet][psql-cheat-sheet] and [psql documentation][psql-docs]. + +## Save query results to a file + +When you run queries in `psql`, the results are shown in the terminal by default. +If you are running queries that have a lot of results, you might like to save +the results into a comma-separated `.csv` file instead. You can do this using +the `COPY` command. For example: + +```sql +\copy (SELECT * FROM ...) TO '/tmp/output.csv' (format CSV); +``` + +This command sends the results of the query to a new file called `output.csv` in +the `/tmp/` directory. You can open the file using any spreadsheet program. + +## Run long queries + +To run multi-line queries in `psql`, use the `EOF` delimiter. For example: + +```sql +psql -d $TARGET -f -v hypertable= - <<'EOF' +SELECT public.alter_job(j.id, scheduled=>true) +FROM _timescaledb_config.bgw_job j +JOIN _timescaledb_catalog.hypertable h ON h.id = j.hypertable_id +WHERE j.proc_schema IN ('_timescaledb_internal', '_timescaledb_functions') +AND j.proc_name = 'policy_columnstore' +AND j.id >= 1000 +AND format('%I.%I', h.schema_name, h.table_name)::text::regclass = :'hypertable'::text::regclass; +EOF +``` + +## Edit queries in a text editor + +Sometimes, queries can get very long, and you might make a mistake when you try +typing it the first time around. If you have made a mistake in a long query, +instead of retyping it, you can use a built-in text editor, which is based on +`Vim`. Launch the query editor with the `\e` command. Your previous query is +loaded into the editor. When you have made your changes, press `Esc`, then type +`:`+`w`+`q` to save the changes, and return to the command prompt. Access the +edited query by pressing `↑`, and press `Enter` to run it. + +[psql-cheat-sheet]: https://www.timescale.com/learn/postgres-cheat-sheet +[psql-docs]: https://www.postgresql.org/docs/current/app-psql.html +[ssl-mode]: /use-timescale/security/strict-ssl +[homebrew]: https://docs.brew.sh/Installation +[macports]: https://guide.macports.org/#installing.macports +[windows-installer]: https://www.postgresql.org/download/windows/ +[connect-database]: /integrations/psql/#connect-to-your-service +[connection-info]: /integrations/find-connection-details \ No newline at end of file diff --git a/integrations/integrate/qstudio.mdx b/integrations/integrate/qstudio.mdx index d91fecd..3ef8781 100644 --- a/integrations/integrate/qstudio.mdx +++ b/integrations/integrate/qstudio.mdx @@ -1,4 +1,47 @@ --- -title: qStudio -description: TBD +title: Integrate qStudio with Tiger Cloud +sidebarTitle: qStudio +description: qStudio is a modern free SQL editor that provides syntax highlighting, code-completion, excel export, charting, and much more. Integrate qStudio with Tiger Cloud --- + +import { CLOUD_LONG } from '/snippets/vars.mdx'; +import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; + +[qStudio][qstudio] is a modern free SQL editor that provides syntax highlighting, code-completion, excel export, charting, and much more. You can use it to run queries, browse tables, and create charts for your {SERVICE_LONG}. + +This page explains how to integrate qStudio with {CLOUD_LONG}. + +## Prerequisites + + + +* [Download][qstudio-downloads] and install qStudio. + +## Connect qStudio to your {SERVICE_LONG} + +To connect to {CLOUD_LONG}: + +1. **Start qStudio** +1. **Click `Server` > `Add Server`** +1. **Configure the connection** + + * For `Server Type`, select `Postgres`. + * For `Connect By`, select `Host`. + * For `Host`, `Port`, `Database`, `Username`, and `Password`, use + your [connection details][connection-info]. + + ![qStudio integration](https://assets.timescale.com/docs/images/integrations-qstudio.png) + +1. **Click `Test`** + + qStudio indicates whether the connection works. + +1. **Click `Add`** + + The server is listed in the `Server Tree`. + +You have successfully integrated qStudio with {CLOUD_LONG}. + +[qstudio]: https://www.timestored.com/qstudio/ +[qstudio-downloads]: https://www.timestored.com/qstudio/download +[connection-info]: /integrations/find-connection-details \ No newline at end of file diff --git a/integrations/integrate/supabase.mdx b/integrations/integrate/supabase.mdx index 9ecdeca..6bd06c4 100644 --- a/integrations/integrate/supabase.mdx +++ b/integrations/integrate/supabase.mdx @@ -1,4 +1,256 @@ --- -title: Supabase -description: TBD +title: Integrate Supabase with Tiger Cloud +sidebarTitle: Supabase +description: Supabase is an open source Firebase alternative. Integrate Supabase with Tiger Cloud --- + +import { PG, CLOUD_LONG, HYPERTABLE_CAP, HYPERCORE_CAP, HYPERCORE, CAGG_CAP } from '/snippets/vars.mdx'; +import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; +import CreateHypertablePolicyNote from '/snippets/manage-data/_create-hypertable-columnstore-policy-note.mdx'; + +[Supabase][supabase] is an open source Firebase alternative. This page shows how to run real-time analytical queries +against a {SERVICE_LONG} through Supabase using a foreign data wrapper (fdw) to bring aggregated data from your +{SERVICE_LONG}. + +## Prerequisites + + + +- Create a [Supabase project][supabase-new-project] + +## Set up your {SERVICE_LONG} + +To set up a {SERVICE_LONG} optimized for analytics to receive data from Supabase: + +1. **Optimize time-series data in hypertables** + + Time-series data represents how a system, process, or behavior changes over time. [{HYPERTABLE_CAP}][hypertables-section] + are {PG} tables that help you improve insert and query performance by automatically partitioning your data by + time. + + 1. [Connect to your {SERVICE_LONG}][connect] and create a table that will point to a Supabase database: + + ```sql + CREATE TABLE signs ( + time timestamptz NOT NULL DEFAULT now(), + origin_time timestamptz NOT NULL, + name TEXT + ) WITH ( + tsdb.hypertable + ); + ``` + + +1. **Optimize cooling data for analytics** + + {HYPERCORE_CAP} is the hybrid row-columnar storage engine in {TIMESCALE_DB}, designed specifically for real-time analytics + and powered by time-series data. The advantage of {HYPERCORE} is its ability to seamlessly switch between row-oriented + and column-oriented storage. This flexibility enables {TIMESCALE_DB} to deliver the best of both worlds, solving the + key challenges in real-time analytics. + + ```sql + ALTER TABLE signs SET ( + timescaledb.enable_columnstore = true, + timescaledb.segmentby = 'name'); + ``` + +1. **Create optimized analytical queries** + + {CAGG_CAP} are designed to make queries on very large datasets run + faster. {CAGG_CAP} in {CLOUD_LONG} use {PG} [materialized views][postgres-materialized-views] to + continuously, and incrementally refresh a query in the background, so that when you run the query, + only the data that has changed needs to be computed, not the entire dataset. + + 1. Create a continuous aggregate pointing to the Supabase database. + + ```sql + CREATE MATERIALIZED VIEW IF NOT EXISTS signs_per_minute + WITH (timescaledb.continuous) + AS + SELECT time_bucket('1 minute', time) as ts, + name, + count(*) as total + FROM signs + GROUP BY 1, 2 + WITH NO DATA; + ``` + + 1. Setup a delay stats comparing `origin_time` to `time`. + + ```sql + CREATE MATERIALIZED VIEW IF NOT EXISTS _signs_per_minute_delay + WITH (timescaledb.continuous) + AS + SELECT time_bucket('1 minute', time) as ts, + stats_agg(extract(epoch from origin_time - time)::float8) as delay_agg, + candlestick_agg(time, extract(epoch from origin_time - time)::float8, 1) as delay_candlestick + FROM signs GROUP BY 1 + WITH NO DATA; + ``` + + 1. Setup a view to recieve the data from Supabase. + + ```sql + CREATE VIEW signs_per_minute_delay + AS + SELECT ts, + average(delay_agg) as avg_delay, + stddev(delay_agg) as stddev_delay, + open(delay_candlestick) as open, + high(delay_candlestick) as high, + low(delay_candlestick) as low, + close(delay_candlestick) as close + FROM _signs_per_minute_delay + ``` + +1. **Add refresh policies for your analytical queries** + + You use `start_offset` and `end_offset` to define the time range that the continuous aggregate will cover. Assuming + that the data is being inserted without any delay, set the `start_offset` to `5 minutes` and the `end_offset` to + `1 minute`. This means that the continuous aggregate is refreshed every minute, and the refresh covers the last 5 + minutes. + You set `schedule_interval` to `INTERVAL '1 minute'` so the continuous aggregate refreshes on your {SERVICE_LONG} + every minute. The data is accessed from Supabase, and the continuous aggregate is refreshed every minute in + the other side. + + ```sql + SELECT add_continuous_aggregate_policy('signs_per_minute', + start_offset => INTERVAL '5 minutes', + end_offset => INTERVAL '1 minute', + schedule_interval => INTERVAL '1 minute'); + ``` + Do the same thing for data inserted with a delay: + ```sql + SELECT add_continuous_aggregate_policy('_signs_per_minute_delay', + start_offset => INTERVAL '5 minutes', + end_offset => INTERVAL '1 minute', + schedule_interval => INTERVAL '1 minute'); + ``` + + +## Set up a Supabase database + +To set up a Supabase database that injects data into your {SERVICE_LONG}: + +1. **Connect a foreign server in Supabase to your {SERVICE_LONG}** + + 1. Connect to your Supabase project using Supabase dashboard or psql. + 1. Enable the `postgres_fdw` extension. + + ```sql + CREATE EXTENSION postgres_fdw; + ``` + 1. Create a foreign server that points to your {SERVICE_LONG}. + + Update the following command with your [connection details][connection-info], then run it + in the Supabase database: + + ```sql + CREATE SERVER timescale + FOREIGN DATA WRAPPER postgres_fdw + OPTIONS ( + host '', + port '', + dbname '', + sslmode 'require', + extensions 'timescaledb' + ); + ``` + +1. **Create the user mapping for the foreign server** + + Update the following command with your [connection details][connection-info], the run it + in the Supabase database: + + ```sql + CREATE USER MAPPING FOR CURRENT_USER + SERVER timescale + OPTIONS ( + user '', + password '' + ); + ``` + +1. **Create a foreign table that points to a table in your {SERVICE_LONG}.** + + This query introduced the following columns: + - `time`: with a default value of `now()`. This is because the `time` column is used by {CLOUD_LONG} to optimize data + in the {COLUMNSTORE}. + - `origin_time`: store the original timestamp of the data. + + Using both columns, you understand the delay between Supabase (`origin_time`) and the time the data is + inserted into your {SERVICE_LONG} (`time`). + + ```sql + CREATE FOREIGN TABLE signs ( + TIME timestamptz NOT NULL DEFAULT now(), + origin_time timestamptz NOT NULL, + NAME TEXT) + SERVER timescale OPTIONS ( + schema_name 'public', + table_name 'signs' + ); + ``` + +1. **Create a foreign table in Supabase** + + 1. Create a foreign table that matches the `signs_per_minute` view in your {SERVICE_LONG}. It represents a top level + view of the data. + + ```sql + CREATE FOREIGN TABLE signs_per_minute ( + ts timestamptz, + name text, + total int + ) + SERVER timescale OPTIONS (schema_name 'public', table_name 'signs_per_minute'); + ``` + + 1. Create a foreign table that matches the `signs_per_minute_delay` view in your {SERVICE_LONG}. + + ```sql + CREATE FOREIGN TABLE signs_per_minute_delay ( + ts timestamptz, + avg_delay float8, + stddev_delay float8, + open float8, + high float8, + low float8, + close float8 + ) SERVER timescale OPTIONS (schema_name 'public', table_name 'signs_per_minute_delay'); + ``` + +## Test the integration + +To inject data into your {SERVICE_LONG} from a Supabase database using a foreign table: + +1. **Insert data into your Supabase database** + + Connect to Supabase and run the following query: + + ```sql + INSERT INTO signs (origin_time, name) VALUES (now(), 'test') + ``` + +1. **Check the data in your {SERVICE_LONG}** + + [Connect to your {SERVICE_LONG}][connect] and run the following query: + + ```sql + SELECT * from signs; + ``` + You see something like: + + | origin_time | time | name | + |-------------|------|------| + | 2025-02-27 16:30:04.682391+00 | 2025-02-27 16:30:04.682391+00 | test | + +You have successfully integrated Supabase with your {SERVICE_LONG}. + +[supabase]: https://supabase.com/ +[supabase-new-project]: https://supabase.com/dashboard/new +[hypertables-section]: /use-timescale/hypertables +[connect]: /getting-started/run-queries-from-console +[hypercore]: /use-timescale/hypercore +[postgres-materialized-views]: https://www.postgresql.org/docs/current/rules-materializedviews.html +[connection-info]: /integrations/find-connection-details \ No newline at end of file diff --git a/integrations/integrate/tableau.mdx b/integrations/integrate/tableau.mdx index 0b9dfeb..97e6341 100644 --- a/integrations/integrate/tableau.mdx +++ b/integrations/integrate/tableau.mdx @@ -5,7 +5,7 @@ description: Integrate Tableau with Tiger Cloud --- import { CLOUD_LONG, SERVICE_SHORT, PG } from '/snippets/vars.mdx'; -import IntegrationPrereqs from '/snippets/integrations/_integration-prereqs-cloud-only.mdx'; +import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs-cloud-only.mdx'; [Tableau][tableau] is a popular analytics platform that helps you gain greater intelligence about your business. You can use it to visualize data stored in {CLOUD_LONG}. diff --git a/integrations/integrate/telegraf.mdx b/integrations/integrate/telegraf.mdx index b0d7541..1cd7457 100644 --- a/integrations/integrate/telegraf.mdx +++ b/integrations/integrate/telegraf.mdx @@ -1,4 +1,151 @@ --- -title: Telegraf -description: TBD +title: Ingest data using Telegraf +sidebarTitle: Telegraf +description: Ingest data into a Tiger Cloud service using using the Telegraf plugin --- + +import { PG, HYPERTABLE, TIMESCALE_DB } from '/snippets/vars.mdx'; +import ImportPrerequisites from '/snippets/prerequisites/_migrate-import-prerequisites.mdx'; +import SetupConnectionString from '/snippets/procedures/_migrate-import-setup-connection-strings.mdx'; + +Telegraf is a server-based agent that collects and sends metrics and events from databases, +systems, and IoT sensors. Telegraf is an open source, plugin-driven tool for the collection +and output of data. + +To view metrics gathered by Telegraf and stored in a [{HYPERTABLE}][about-hypertables] in a +{SERVICE_LONG}. + +- [Link Telegraf to your {SERVICE_LONG}](#link-telegraf-to-your-service): create a Telegraf configuration +- [View the metrics collected by Telegraf](#view-the-metrics-collected-by-telegraf): connect to your {SERVICE_SHORT} and + query the metrics table + +## Prerequisites + + + +- [Install Telegraf][install-telegraf] + + +## Link Telegraf to your {SERVICE_SHORT} + +To create a Telegraf configuration that exports data to a {HYPERTABLE} in your {SERVICE_SHORT}: + +1. **Set up your {SERVICE_SHORT} connection string** + + + +1. **Generate a Telegraf configuration file** + + In Terminal, run the following: + + ```bash + telegraf --input-filter=cpu --output-filter=postgresql config > telegraf.conf + ``` + + `telegraf.conf` configures a CPU input plugin that samples + various metrics about CPU usage, and the {PG} output plugin. `telegraf.conf` + also includes all available input, output, processor, and aggregator + plugins. These are commented out by default. + +1. **Test the configuration** + + ```bash + telegraf --config telegraf.conf --test + ``` + + You see an output similar to the following: + + ```bash + 2022-11-28T12:53:44Z I! Starting Telegraf 1.24.3 + 2022-11-28T12:53:44Z I! Available plugins: 208 inputs, 9 aggregators, 26 processors, 20 parsers, 57 outputs + 2022-11-28T12:53:44Z I! Loaded inputs: cpu + 2022-11-28T12:53:44Z I! Loaded aggregators: + 2022-11-28T12:53:44Z I! Loaded processors: + 2022-11-28T12:53:44Z W! Outputs are not used in testing mode! + 2022-11-28T12:53:44Z I! Tags enabled: host=localhost + > cpu,cpu=cpu0,host=localhost usage_guest=0,usage_guest_nice=0,usage_idle=90.00000000087311,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=6.000000000040018,usage_user=3.999999999996362 1669640025000000000 + > cpu,cpu=cpu1,host=localhost usage_guest=0,usage_guest_nice=0,usage_idle=92.15686274495818,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=5.882352941192206,usage_user=1.9607843136712912 1669640025000000000 + > cpu,cpu=cpu2,host=localhost usage_guest=0,usage_guest_nice=0,usage_idle=91.99999999982538,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=3.999999999996362,usage_user=3.999999999996362 1669640025000000000 + ``` + +1. **Configure the {PG} output plugin** + + 1. In `telegraf.conf`, in the `[[outputs.postgresql]]` section, set `connection` to + the value of $TARGET. + + ```bash + connection = "" + ``` + + 1. Use {HYPERTABLE}s when Telegraf creates a new table: + + In the section that begins with the comment `## Templated statements to execute + when creating a new table`, add the following template: + + ```bash + ## Templated statements to execute when creating a new table. + # create_templates = [ + # '''CREATE TABLE {{ .table }} ({{ .columns }})''', + # ] + # table_template=`CREATE TABLE IF NOT EXISTS {TABLE}({COLUMNS}); SELECT create_hypertable({TABLELITERAL},by_range('time', INTERVAL '1 week'),if_not_exists := true);` + + ``` + + The `by_range` dimension builder was added to {TIMESCALE_DB} 2.13. + + +## View the metrics collected by Telegraf + +This section shows you how to generate system metrics using Telegraf, then connect to your +{SERVICE_SHORT} and query the metrics [{HYPERTABLE}][about-hypertables]. + +1. **Collect system metrics using Telegraf** + + Run the following command for a 30 seconds: + + ```bash + telegraf --config telegraf.conf + ``` + + Telegraf uses loaded inputs `cpu` and outputs `postgresql` along with + `global tags`, the intervals when the agent collects data from the inputs, and + flushes to the outputs. + +1. **View the metrics** + + 1. Connect to your {SERVICE_LONG}: + + ```bash + psql $TARGET + ``` + + 1. View the metrics collected in the `cpu` table in `tsdb`: + + ```sql + SELECT*FROM cpu; + ``` + + You see something like: + + ```sql + time | cpu | host | usage_guest | usage_guest_nice | usage_idle | usage_iowait | usage_irq | usage_nice | usage_softirq | usage_steal | usage_system | usage_user + ---------------------+-----------+----------------------------------+-------------+------------------+-------------------+--------------+-----------+------------+---------------+-------------+---------------------+--------------------- + 2022-12-05 12:25:20 | cpu0 | hostname | 0 | 0 | 83.08605341237833 | 0 | 0 | 0 | 0 | 0 | 6.824925815961274 | 10.089020771444481 + 2022-12-05 12:25:20 | cpu1 | hostname | 0 | 0 | 84.27299703278959 | 0 | 0 | 0 | 0 | 0 | 5.934718100814769 | 9.792284866395647 + 2022-12-05 12:25:20 | cpu2 | hostname | 0 | 0 | 87.53709198848934 | 0 | 0 | 0 | 0 | 0 | 4.747774480755411 | 7.715133531241037 + 2022-12-05 12:25:20 | cpu3 | hostname| 0 | 0 | 86.68639053296472 | 0 | 0 | 0 | 0 | 0 | 4.43786982253345 | 8.875739645039992 + 2022-12-05 12:25:20 | cpu4 | hostname | 0 | 0 | 96.15384615371369 | 0 | 0 | 0 | 0 | 0 | 1.1834319526667423 | 2.6627218934917614 + ``` + + To view the average usage per CPU core, use `SELECT cpu, avg(usage_user) FROM cpu GROUP BY cpu;`. + +For more information about the options that you can configure in Telegraf, +see the [PostgreSQL output plugin][output-plugin]. + + +[output-plugin]: https://github.com/influxdata/telegraf/blob/release-1.24/plugins/outputs/postgresql/README.md +[install-telegraf]: https://docs.influxdata.com/telegraf/v1/introduction/installation/ +[create-service]: /getting-started/ +[connect-timescaledb]: /integrations/find-connection-details +[grafana]: /integrations/grafana +[about-hypertables]: /use-timescale/hypertables \ No newline at end of file diff --git a/integrations/troubleshooting.mdx b/integrations/troubleshooting.mdx index 5efb347..878fb0b 100644 --- a/integrations/troubleshooting.mdx +++ b/integrations/troubleshooting.mdx @@ -1,4 +1,8 @@ --- title: Troubleshooting -description: TBD +description: Troubleshoot common problems that occur when integrating Tiger Cloud services with third-party solutions --- + +import JdbcAuthenticationNotSupported from '/snippets/integrations/troubleshooting/_jdbc-authentication-not-supported.mdx'; + + \ No newline at end of file diff --git a/snippets/integrations/_cloudwatch-data-exporter.mdx b/snippets/integrations/_cloudwatch-data-exporter.mdx index 1ce3609..b52cae6 100644 --- a/snippets/integrations/_cloudwatch-data-exporter.mdx +++ b/snippets/integrations/_cloudwatch-data-exporter.mdx @@ -1,14 +1,12 @@ import { CONSOLE, CLOUD_LONG, SERVICE_LONG } from '/snippets/vars.mdx'; - - 1. **In {CONSOLE}, open [Exporters][console-integrations]** -1. **Click `New exporter`** -1. **Select the data type and specify `AWS CloudWatch` for provider** +2. **Click `New exporter`** +3. **Select the data type and specify `AWS CloudWatch` for provider** - ![Add CloudWatch data exporter](https://assets.timescale.com/docs/images/tiger-cloud-console/tiger-cloud-integrations-cloudwatch.png) + ![Add CloudWatch data exporter](https://assets.timescale.com/docs/images/tiger-cloud-console/tiger-console-integrations-cloudwatch.png) -1. **Provide your AWS CloudWatch configuration** +4. **Provide your AWS CloudWatch configuration** - The AWS region must be the same for your {CLOUD_LONG} exporter and AWS CloudWatch Log group. - The exporter name appears in {CONSOLE}, best practice is to make this name easily understandable. @@ -16,36 +14,34 @@ import { CONSOLE, CLOUD_LONG, SERVICE_LONG } from '/snippets/vars.mdx'; or [create a new one][console-cloudwatch-create-group]. If you're uncertain, use the default values. For more information, see [Working with log groups and log streams][cloudwatch-log-naming]. -1. **Choose the authentication method to use for the exporter** +5. **Choose the authentication method to use for the exporter** ![Add CloudWatch authentication](https://assets.timescale.com/docs/images/tiger-cloud-console/tiger-cloud-integrations-cloudwatch-authentication.png) - + - - 1. In AWS, navigate to [IAM > Identity providers][create-an-iam-id-provider], then click `Add provider`. - 1. Update the new identity provider with your details: + 2. Update the new identity provider with your details: Set `Provider URL` to the [region where you are creating your exporter][reference]. ![oidc provider creation](https://assets.timescale.com/docs/images/aws-create-iam-oicd-provider.png) - 1. Click `Add provider`. + 3. Click `Add provider`. - 1. In AWS, navigate to [IAM > Roles][add-id-provider-as-wi-role], then click `Create role`. + 4. In AWS, navigate to [IAM > Roles][add-id-provider-as-wi-role], then click `Create role`. - 1. Add your identity provider as a Web identity role and click `Next`. + 5. Add your identity provider as a Web identity role and click `Next`. ![web identity role creation](https://assets.timescale.com/docs/images/aws-create-role-web-identity.png) - 1. Set the following permission and trust policies: + 6. Set the following permission and trust policies: - Permission policy: - + ```json { "Version": "2012-10-17", @@ -69,10 +65,10 @@ import { CONSOLE, CLOUD_LONG, SERVICE_LONG } from '/snippets/vars.mdx'; "Resource": "*" } ] - } + } ``` - Role with a Trust Policy: - + ```json { "Version": "2012-10-17", @@ -98,18 +94,14 @@ import { CONSOLE, CLOUD_LONG, SERVICE_LONG } from '/snippets/vars.mdx'; "Action": "sts:AssumeRole" } ] - } - ``` - 1. Click `Add role`. - - + } + ``` + 7. Click `Add role`. - - When you use CloudWatch credentials, you link an Identity and Access Management (IAM) user with access to CloudWatch only with your {SERVICE_LONG}: @@ -119,27 +111,23 @@ import { CONSOLE, CLOUD_LONG, SERVICE_LONG } from '/snippets/vars.mdx'; [create one][create-an-iam-user]. For more information, see [Creating IAM users (console)][aws-access-keys]. - 1. Enter the credentials for the AWS IAM user. + 2. Enter the credentials for the AWS IAM user. AWS keys give access to your AWS services. To keep your AWS account secure, restrict users to the minimum required permissions. Always store your keys in a safe location. To avoid this issue, use the IAM role authentication method. - - - - -1. Select the AWS Region your CloudWatch services run in, then click `Create exporter`. + - +6. Select the AWS Region your CloudWatch services run in, then click `Create exporter`. -[console-integrations]: https://console.cloud.timescale.com/dashboard/integrations +[add-id-provider-as-wi-role]: https://console.aws.amazon.com/iam/home#/roles +[aws-access-keys]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html#id_users_create_console +[cloudwatch-log-naming]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Working-with-log-groups-and-streams.html [console-cloudwatch-configuration]: https://console.aws.amazon.com/cloudwatch/home#logsV2:log-groups [console-cloudwatch-create-group]: https://console.aws.amazon.com/cloudwatch/home#logsV2:log-groups/create-log-group -[cloudwatch-log-naming]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Working-with-log-groups-and-streams.html -[reference]: #reference -[list-iam-users]: https://console.aws.amazon.com/iam/home#/users -[create-an-iam-user]: https://console.aws.amazon.com/iam/home#/users/create +[console-integrations]: https://console.cloud.timescale.com/dashboard/integrations [create-an-iam-id-provider]: https://console.aws.amazon.com/iam/home#/identity_providers -[add-id-provider-as-wi-role]: https://console.aws.amazon.com/iam/home#/roles -[aws-access-keys]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html#id_users_create_console +[create-an-iam-user]: https://console.aws.amazon.com/iam/home#/users/create +[list-iam-users]: https://console.aws.amazon.com/iam/home#/users +[reference]: #reference diff --git a/snippets/integrations/_datadog-data-exporter.mdx b/snippets/integrations/_datadog-data-exporter.mdx index 931b192..d2ef728 100644 --- a/snippets/integrations/_datadog-data-exporter.mdx +++ b/snippets/integrations/_datadog-data-exporter.mdx @@ -1,12 +1,10 @@ import { CONSOLE, CLOUD_LONG } from '/snippets/vars.mdx'; - - 1. **In {CONSOLE}, open [Exporters][console-integrations]** 1. **Click `New exporter`** 1. **Select `Metrics` for `Data type` and `Datadog` for provider** - ![Add Datadog exporter](https://assets.timescale.com/docs/images/tiger-cloud-console/tiger-cloud-integrations-datadog.png) + ![Add Datadog exporter](https://assets.timescale.com/docs/images/tiger-cloud-console/tiger-console-integrations-datadog.png) 1. **Choose your AWS region and provide the API key** @@ -14,6 +12,4 @@ import { CONSOLE, CLOUD_LONG } from '/snippets/vars.mdx'; 1. **Set `Site` to your Datadog region, then click `Create exporter`** - - [console-integrations]: https://console.cloud.timescale.com/dashboard/integrations \ No newline at end of file diff --git a/snippets/integrations/_foreign-data-wrappers.mdx b/snippets/integrations/_foreign-data-wrappers.mdx index 8068673..554dd69 100644 --- a/snippets/integrations/_foreign-data-wrappers.mdx +++ b/snippets/integrations/_foreign-data-wrappers.mdx @@ -1,5 +1,5 @@ import IntegrationPrereqs from "/snippets/prerequisites/_integration-prereqs.mdx"; -import { PG, SERVICE_LONG, CLOUD_LONG, VPC, SERVICE_SHORT } from '/snippets/vars.mdx'; +import { PG, SERVICE_LONG, CLOUD_LONG, VPC, SERVICE_SHORT, SELF_LONG_CAP } from '/snippets/vars.mdx'; You use {PG} foreign data wrappers (FDWs) to query external data sources from a {SERVICE_LONG}. These external data sources can be one of the following: @@ -16,16 +16,14 @@ If you are using {VPC} peering, you can create FDWs in your Customer VPC to quer ## Query another data source -To query another data source: +To query another data source: - + - + You create {PG} FDWs with the `postgres_fdw` extension, which is enabled by default in {CLOUD_LONG}. - - 1. **Connect to your service** See [how to connect][connect]. @@ -87,9 +85,6 @@ You create {PG} FDWs with the `postgres_fdw` extension, which is enabled by defa SERVER film_server; ``` - - - A user with the `tsdbadmin` role assigned already has the required `USAGE` permission to create {PG} FDWs. You can enable another user, without the `tsdbadmin` role assigned, to query foreign data. To do so, explicitly grant the permission. For example, for a new `grafana` user: ```sql @@ -116,11 +111,9 @@ IMPORT FOREIGN SCHEMA public - + -You create {PG} FDWs with the `postgres_fdw` extension. See [documenation][enable-fdw-docs] on how to enable it. - - +You create {PG} FDWs with the `postgres_fdw` extension. See [documenation][enable-fdw-docs] on how to enable it. 1. **Connect to your database** @@ -183,15 +176,13 @@ You create {PG} FDWs with the `postgres_fdw` extension. See [documenation][enabl SERVER film_server; ``` - - -[vpc-peering]: /use-timescale/:currentVersion:/security/vpc/ -[sql-editor]: /getting-started/:currentVersion:/run-queries-from-console/#ops-mode-sql-editor/ -[connect]: /getting-started/:currentVersion:/run-queries-from-console/ -[connection-info]: /integrations/:currentVersion:/find-connection-details/ +[vpc-peering]: /use-timescale/security/vpc +[sql-editor]: /getting-started/run-queries-from-console/#ops-mode-sql-editor +[connect]: /getting-started/run-queries-from-console +[connection-info]: /integrations/find-connection-details [enable-fdw-docs]: https://www.postgresql.org/docs/current/postgres-fdw.html -[psql]: /integrations/:currentVersion:/psql/ +[psql]: /integrations/psql diff --git a/snippets/integrations/_grafana-connect.mdx b/snippets/integrations/_grafana-connect.mdx index 66a382b..aec3a8b 100644 --- a/snippets/integrations/_grafana-connect.mdx +++ b/snippets/integrations/_grafana-connect.mdx @@ -1,35 +1,31 @@ -import { SERVICE_SHORT } from '/snippets/vars.mdx'; +import { CLOUD_LONG, SERVICE_SHORT } from '/snippets/vars.mdx'; -## Connect Grafana to Tiger Cloud +## Connect Grafana to {CLOUD_LONG} To visualize the results of your queries, enable Grafana to read the data in your {SERVICE_SHORT}: - - -1. **Log in to Grafana** +1. **Log in to Grafana** In your browser, log in to either: - Self-hosted Grafana: at `http://localhost:3000/`. The default credentials are `admin`, `admin`. - Grafana Cloud: use the URL and credentials you set when you created your account. 1. **Add your {SERVICE_SHORT} as a data source** 1. Open `Connections` > `Data sources`, then click `Add new data source`. - 1. Select `PostgreSQL` from the list. + 1. Select `PostgreSQL` from the list. 1. Configure the connection: - - `Host URL`, `Database name`, `Username`, and `Password` - + - `Host URL`, `Database name`, `Username`, and `Password` + Configure using your [connection details][connection-info]. `Host URL` is in the format `:`. - `TLS/SSL Mode`: select `require`. - `PostgreSQL options`: enable `TimescaleDB`. - Leave the default setting for all other fields. - 1. Click `Save & test`. - - Grafana checks that your details are set correctly. + 1. Click `Save & test`. - + Grafana checks that your details are set correctly. [grafana-self-managed]: https://grafana.com/get/?tab=self-managed [grafana-cloud]: https://grafana.com/get/ [cloud-login]: https://console.cloud.timescale.com/ [create-service]: /cloud/get-started/create-services -[connection-info]: /integrations/:currentVersion:/find-connection-details/ +[connection-info]: /integrations/find-connection-details diff --git a/snippets/integrations/_integration-debezium-docker.mdx b/snippets/integrations/_integration-debezium-docker.mdx index 2736b93..a8927ef 100644 --- a/snippets/integrations/_integration-debezium-docker.mdx +++ b/snippets/integrations/_integration-debezium-docker.mdx @@ -1,3 +1,4 @@ +import { PG, SELF_LONG } from '/snippets/vars.mdx'; 1. **Run Zookeeper in Docker** @@ -7,7 +8,7 @@ ``` Check the output log to see that zookeeper is running. -1. **Run Kafka in Docker** +2. **Run Kafka in Docker** In another Terminal window, run the following command: ```bash @@ -16,7 +17,7 @@ Check the output log to see that Kafka is running. -1. **Run Kafka Connect in Docker** +3. **Run Kafka Connect in Docker** In another Terminal window, run the following command: ```bash @@ -33,9 +34,9 @@ Check the output log to see that Kafka Connect is running. -1. **Register the Debezium {PG} source connector** +4. **Register the Debezium {PG} source connector** - Update the `` for the `` you created in your {SELF_LONG} instance in the following command. + Update the `properties` for the `debezium-user` you created in your {SELF_LONG} instance in the following command. Then run the command in another Terminal window: ```bash curl -X POST http://localhost:8083/connectors \ @@ -63,7 +64,7 @@ }' ``` -1. **Verify `timescaledb-source-connector` is included in the connector list** +5. **Verify `timescaledb-source-connector` is included in the connector list** 1. Check the tasks associated with `timescaledb-connector`: ```bash @@ -84,7 +85,7 @@ "name":"timescaledb-connector"},"tasks":[{"connector":"timescaledb-connector","task":0}],"type":"source"} ``` -1. **Verify `timescaledb-connector` is running** +6. **Verify `timescaledb-connector` is running** 1. Open the Terminal window running Kafka Connect. When the connector is active, you see something like the following: @@ -109,7 +110,7 @@ 2025-04-30 10:40:15,219 INFO Postgres|accounts|streaming Processing messages [io.debezium.connector.postgresql.PostgresStreamingChangeEventSource] ``` - 1. Watch the events in the accounts topic on your {SELF_LONG} instance. + 2. Watch the events in the accounts topic on your {SELF_LONG} instance. In another Terminal instance, run the following command: diff --git a/snippets/integrations/_integration-debezium-self-hosted-config-database.mdx b/snippets/integrations/_integration-debezium-self-hosted-config-database.mdx index c265898..22922bd 100644 --- a/snippets/integrations/_integration-debezium-self-hosted-config-database.mdx +++ b/snippets/integrations/_integration-debezium-self-hosted-config-database.mdx @@ -1,4 +1,4 @@ -import { PG, SELF_LONG, CAGG } from '/snippets/vars.mdx'; +import { PG, SELF_LONG, CAGG_CAP } from '/snippets/vars.mdx'; 1. **Configure your self-hosted {PG} deployment** @@ -11,7 +11,7 @@ import { PG, SELF_LONG, CAGG } from '/snippets/vars.mdx'; - MacOS: `/opt/homebrew/var/postgresql@/` - Windows: `C:\Program Files\PostgreSQL\\data\` - 1. Enable logical replication. + 2. Enable logical replication. Modify the following settings in `postgresql.conf`: @@ -21,7 +21,7 @@ import { PG, SELF_LONG, CAGG } from '/snippets/vars.mdx'; max_wal_senders = 10 ``` - 1. Open `pg_hba.conf` and enable host replication. + 3. Open `pg_hba.conf` and enable host replication. To allow replication connections, add the following: @@ -31,14 +31,14 @@ import { PG, SELF_LONG, CAGG } from '/snippets/vars.mdx'; This permission is for the `debezium` {PG} user running on a local or Docker deployment. For more about replication permissions, see [Configuring {PG} to allow replication with the Debezium connector host][debezium-replication-permissions]. - 1. Restart {PG}. + 4. Restart {PG}. -1. **Connect to your {SELF_LONG} instance** +2. **Connect to your {SELF_LONG} instance** Use [`psql`][psql-connect]. -1. **Create a Debezium user in {PG}** +3. **Create a Debezium user in {PG}** Create a user with the `LOGIN` and `REPLICATION` permissions: @@ -46,7 +46,7 @@ import { PG, SELF_LONG, CAGG } from '/snippets/vars.mdx'; CREATE ROLE debezium WITH LOGIN REPLICATION PASSWORD ''; ``` -1. **Enable a replication spot for Debezium** +4. **Enable a replication spot for Debezium** 1. Create a table for Debezium to listen to: @@ -56,22 +56,20 @@ import { PG, SELF_LONG, CAGG } from '/snippets/vars.mdx'; city TEXT); ``` - 1. Turn the table into a hypertable: + 2. Turn the table into a hypertable: ```sql SELECT create_hypertable('accounts', 'created_at'); ``` - Debezium also works with [{CAGG}s][caggs]. + Debezium also works with [{CAGG_CAP}s][caggs]. - 1. Create a publication and enable a replication slot: + 3. Create a publication and enable a replication slot: ```sql CREATE PUBLICATION dbz_publication FOR ALL TABLES WITH (publish = 'insert, update'); ``` -[caggs]: /use-timescale/:currentVersion:/continuous-aggregates/ -[run-queries]: /getting-started/:currentVersion:/run-queries-from-console/ -[open-console]: https://console.cloud.timescale.com/dashboard/services -[psql-connect]: /integrations/:currentVersion:/psql/#connect-to-your-service -[debezium-replication-permissions]: https://debezium.io/documentation/reference/3.1/connectors/postgresql.html#postgresql-host-replication-permissions +[caggs]: /use-timescale/continuous-aggregates +[debezium-replication-permissions]: https://debezium.io/documentation/reference/3.2/connectors/postgresql.html#postgresql-host-replication-permissions +[psql-connect]: /integrations/integrate/psql/#connect-to-your-service diff --git a/snippets/integrations/_integration-prereqs-cloud-only.mdx b/snippets/integrations/_integration-prereqs-cloud-only.mdx deleted file mode 100644 index 56b2e62..0000000 --- a/snippets/integrations/_integration-prereqs-cloud-only.mdx +++ /dev/null @@ -1,7 +0,0 @@ -import { SERVICE_LONG } from '/snippets/vars.mdx'; - -To follow the steps on this page: - -* Create a target {SERVICE_LONG} with the Real-time analytics capability. - - You need your [connection details](/integrations/find-connection-details). \ No newline at end of file diff --git a/snippets/integrations/_manage-a-data-exporter.mdx b/snippets/integrations/_manage-a-data-exporter.mdx index adff76c..76bd796 100644 --- a/snippets/integrations/_manage-a-data-exporter.mdx +++ b/snippets/integrations/_manage-a-data-exporter.mdx @@ -5,16 +5,12 @@ import { SERVICE_LONG, SERVICE_SHORT, CONSOLE } from '/snippets/vars.mdx'; To send telemetry data to an external monitoring tool, you attach a data exporter to your {SERVICE_LONG}. You can attach only one exporter to a {SERVICE_SHORT}. -To attach an exporter: - - +To attach an exporter: 1. **In [{CONSOLE}][console-services], choose the {SERVICE_SHORT}** -1. **Click `Operations` > `Exporters`** -1. **Select the exporter, then click `Attach exporter`** -1. **If you are attaching a first `Logs` data type exporter, restart the {SERVICE_SHORT}** - - +2. **Click `Operations` > `Exporters`** +3. **Select the exporter, then click `Attach exporter`** +4. **If you are attaching a first `Logs` data type exporter, restart the {SERVICE_SHORT}** ### Monitor {SERVICE_LONG} metrics @@ -42,38 +38,30 @@ Additionally, use the following tags to filter your results. To update a data exporter: - - 1. **In {CONSOLE}, open [Exporters][console-integrations]** -1. **Next to the exporter you want to edit, click the menu > `Edit`** -1. **Edit the exporter fields and save your changes** +2. **Next to the exporter you want to edit, click the menu > `Edit`** +3. **Edit the exporter fields and save your changes** You cannot change fields such as the provider or the AWS region. - - ### Delete a data exporter To remove a data exporter that you no longer need: - - 1. **Disconnect the data exporter from your {SERVICE_LONG}s** 1. In [{CONSOLE}][console-services], choose the {SERVICE_SHORT}. - 1. Click `Operations` > `Exporters`. - 1. Click the trash can icon. - 1. Repeat for every {SERVICE_SHORT} attached to the exporter you want to remove. + 2. Click `Operations` > `Exporters`. + 3. Click the trash can icon. + 4. Repeat for every {SERVICE_SHORT} attached to the exporter you want to remove. The data exporter is now unattached from all {SERVICE_SHORT}s. However, it still exists in your project. -1. **Delete the exporter on the project level** +2. **Delete the exporter on the project level** 1. In {CONSOLE}, open [Exporters][console-integrations] - 1. Next to the exporter you want to edit, click menu > `Delete` - 1. Confirm that you want to delete the data exporter. - - + 2. Next to the exporter you want to edit, click menu > `Delete` + 3. Confirm that you want to delete the data exporter. ### Reference @@ -94,21 +82,5 @@ It must be one of the following: | `us-east-2` | United States | Ohio | `irsa-oidc-discovery-prod-us-east-2.s3.us-east-2.amazonaws.com` | `us-west-2` | United States | Oregon | `irsa-oidc-discovery-prod-us-west-2.s3.us-west-2.amazonaws.com` -[aws-access-keys]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html#id_users_create_console -[irsa]: https://aws.amazon.com/blogs/opensource/introducing-fine-grained-iam-roles-service-accounts/ -[cross-account-iam-roles]: https://aws.amazon.com/blogs/containers/cross-account-iam-roles-for-kubernetes-service-accounts/ -[cloudwatch]: https://aws.amazon.com/cloudwatch/ -[cloudwatch-docs]: https://docs.aws.amazon.com/cloudwatch/index.html -[cloudwatch-log-naming]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Working-with-log-groups-and-streams.html -[datadog]: https://www.datadoghq.com -[datadog-api-key]: https://docs.datadoghq.com/account_management/api-app-keys/#add-an-api-key-or-client-token -[datadog-docs]: https://docs.datadoghq.com/ -[datadog-metrics-explorer]: https://app.datadoghq.com/metric/explorer [console-integrations]: https://console.cloud.timescale.com/dashboard/integrations [console-services]: https://console.cloud.timescale.com/dashboard/services -[list-iam-users]: https://console.aws.amazon.com/iam/home#/users -[create-an-iam-user]: https://console.aws.amazon.com/iam/home#/users/create -[console-cloudwatch-configuration]: https://console.aws.amazon.com/cloudwatch/home#logsV2:log-groups -[console-cloudwatch-create-group]: https://console.aws.amazon.com/cloudwatch/home#logsV2:log-groups/create-log-group -[services-portal]: https://console.cloud.timescale.com/dashboard/services -[pricing-plan-features]: /about/:currentVersion:/pricing-and-account-management/#features-included-in-each-plan diff --git a/snippets/integrations/_prometheus-integrate.mdx b/snippets/integrations/_prometheus-integrate.mdx index 7535853..e4601d8 100644 --- a/snippets/integrations/_prometheus-integrate.mdx +++ b/snippets/integrations/_prometheus-integrate.mdx @@ -1,12 +1,11 @@ -import IntegrationPrereqs from "/snippets/prerequisites/_integration-prereqs.mdx"; -import { SERVICE_SHORT, CLOUD_LONG, CONSOLE, SELF_LONG, SERVICE_LONG } from "/snippets/vars.mdx"; +import { SERVICE_SHORT, CLOUD_LONG, CONSOLE, SELF_LONG, SERVICE_LONG, SCALE, ENTERPRISE, PRICING_PLAN, SELF_LONG_CAP } from "/snippets/vars.mdx"; [Prometheus][prometheus] is an open-source monitoring system with a dimensional data model, flexible query language, and a modern alerting approach. This page shows you how to export your {SERVICE_SHORT} telemetry to Prometheus: -- For {CLOUD_LONG}, using a dedicated Prometheus exporter in {CONSOLE}. -- For {SELF_LONG}, using [Postgres Exporter][postgresql-exporter]. +- For {CLOUD_LONG}, using a dedicated Prometheus exporter in {CONSOLE}. +- For {SELF_LONG}, using [Postgres Exporter][postgresql-exporter]. ## Prerequisites @@ -14,7 +13,7 @@ To follow the steps on this page: - [Download and run Prometheus][install-prometheus]. - For {CLOUD_LONG}: - + Create a target [{SERVICE_LONG}][create-service] with the time-series and analytics capability enabled. - For {SELF_LONG}: - Create a target [{SELF_LONG}][enable-timescaledb] instance. You need your [connection details][connection-info]. @@ -25,13 +24,11 @@ To follow the steps on this page: To export your data, do the following: - - - + - + -To export metrics from a {SERVICE_LONG}, you create a dedicated Prometheus exporter in {CONSOLE}, attach it to your {SERVICE_SHORT}, then configure Prometheus to scrape metrics using the exposed URL. The Prometheus exporter exposes the metrics related to the {SERVICE_LONG} like CPU, memory, and storage. To scrape other metrics, use Postgres Exporter as described for {SELF_LONG}. The Prometheus exporter is available for [Scale and Enterprise][pricing-plan-features] pricing plans. +To export metrics from a {SERVICE_LONG}, you create a dedicated Prometheus exporter in {CONSOLE}, attach it to your {SERVICE_SHORT}, then configure Prometheus to scrape metrics using the exposed URL. The Prometheus exporter exposes the metrics related to the {SERVICE_LONG} like CPU, memory, and storage. To scrape other metrics, use Postgres Exporter as described for {SELF_LONG}. The Prometheus exporter is available for [{SCALE} and {ENTERPRISE}][pricing-plan-features] {PRICING_PLAN}s. 1. **Create a Prometheus exporter** @@ -102,16 +99,10 @@ To export metrics from a {SERVICE_LONG}, you create a dedicated Prometheus expor |`service-id`|| | |`region`|`us-east-1`| AWS region | |`role`|`replica` or `primary`| For {SERVICE_SHORT} with replicas | - - - - - - - + To export metrics from {SELF_LONG}, you import telemetry data about your database to Postgres Exporter, then configure Prometheus to scrape metrics from it. Postgres Exporter exposes metrics that you define, excluding the system metrics. @@ -187,8 +178,6 @@ To export metrics from {SELF_LONG}, you import telemetry data about your databas You see the Postgres Exporter target and the metrics scraped from it. - - @@ -199,17 +188,16 @@ You can further [visualize your data][grafana-prometheus] with Grafana. Use the [install-exporter]: https://grafana.com/oss/prometheus/exporters/postgres-exporter/?tab=installation [postgresql-exporter-dashboard]: https://grafana.com/oss/prometheus/exporters/postgres-exporter/?tab=dashboards [install-prometheus]: https://prometheus.io/docs/prometheus/latest/installation/ -[grafana]: /integrations/:currentVersion:/grafana/ +[grafana]: /integrations/grafana [grafana-prometheus]: https://grafana.com/docs/grafana-cloud/send-data/metrics/metrics-prometheus/ [prometheus]: https://prometheus.io/docs/introduction/overview/ -[run-queries]: /getting-started/:currentVersion:/run-queries-from-console/ -[psql]: /integrations/:currentVersion:/psql/ -[connection-info]: /integrations/:currentVersion:/find-connection-details/ +[run-queries]: /getting-started/run-queries-from-console +[psql]: /integrations/psql +[connection-info]: /integrations/find-connection-details [postgresql-exporter]: https://grafana.com/oss/prometheus/exporters/postgres-exporter/ [open-console]: https://console.cloud.timescale.com/dashboard/services -[connection-info]: /integrations/:currentVersion:/find-connection-details/ [create-service]: /cloud/get-started/create-services -[enable-timescaledb]: /self-hosted/:currentVersion:/install/ +[enable-timescaledb]: /open-source/timescaledb/install-and-update/install-self-hosted [prometheus-authentication]: https://prometheus.io/docs/guides/basic-auth/ [scrape-targets]: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config -[pricing-plan-features]: /about/:currentVersion:/pricing-and-account-management/#features-included-in-each-plan +[pricing-plan-features]: /about/pricing-and-account-management/#features-included-in-each-pricing-plan diff --git a/snippets/integrations/_transit-gateway.mdx b/snippets/integrations/_transit-gateway.mdx index 68212ce..1118ba2 100644 --- a/snippets/integrations/_transit-gateway.mdx +++ b/snippets/integrations/_transit-gateway.mdx @@ -1,23 +1,23 @@ -import { VPC, CONSOLE, CLOUD_LONG, SERVICE_SHORT, SERVICE_LONG } from '/snippets/vars.mdx'; +import { VPC, CONSOLE, CLOUD_LONG, SERVICE_SHORT, SERVICE_LONG, PRICING_PLAN } from '/snippets/vars.mdx'; 1. **Create a Peering {VPC} in [{CONSOLE}][console-login]** 1. In `Security` > `VPC`, click `Create a VPC`: - ![{CLOUD_LONG} new {VPC}](https://assets.timescale.com/docs/images/tiger-cloud-console/add-peering-vpc-tiger-cloud.png) + ![{CLOUD_LONG} new {VPC}](https://assets.timescale.com/docs/images/tiger-cloud-console/add-peering-vpc-tiger-console.png) 1. Choose your region and IP range, name your VPC, then click `Create VPC`: - ![Create a new VPC in {CLOUD_LONG}](https://assets.timescale.com/docs/images/tiger-cloud-console/configure-peering-vpc-tiger-cloud.png) + ![Create a new VPC in {CLOUD_LONG}](https://assets.timescale.com/docs/images/tiger-cloud-console/configure-peering-vpc-tiger-console.png) - Your {SERVICE_SHORT} and Peering {VPC} must be in the same AWS region. The number of Peering {VPC}s you can create in your project depends on your [pricing plan][pricing-plans]. If you need another Peering {VPC}, either contact [support@tigerdata.com](mailto:support@tigerdata.com) or change your plan in [{CONSOLE}][console-login]. + Your {SERVICE_SHORT} and Peering {VPC} must be in the same AWS region. The number of Peering {VPC}s you can create in your project depends on your [{PRICING_PLAN}][pricing-plans]. If you need another Peering {VPC}, either contact [support@tigerdata.com](mailto:support@tigerdata.com) or change your plan in [{CONSOLE}][console-login]. 1. Add a peering connection: 1. In the `VPC Peering` column, click `Add`. 1. Provide your AWS account ID, Transit Gateway ID, CIDR ranges, and AWS region. {CLOUD_LONG} creates a new isolated connection for every unique Transit Gateway ID. - ![Add peering](https://assets.timescale.com/docs/images/tiger-cloud-console/add-peering-tiger-cloud.png) + ![Add peering](https://assets.timescale.com/docs/images/tiger-cloud-console/add-peering-tiger-console.png) 1. Click `Add connection`. @@ -29,18 +29,18 @@ import { VPC, CONSOLE, CLOUD_LONG, SERVICE_SHORT, SERVICE_LONG } from '/snippets 1. Configure at least the following in your AWS account networking: - - Your subnet route table to route traffic to your Transit Gateway for the Peering VPC CIDRs. - - Your Transit Gateway route table to route traffic to the newly created Transit Gateway peering attachment for the Peering VPC CIDRs. + - Your subnet route table to route traffic to your Transit Gateway for the Peering {VPC} CIDRs. + - Your Transit Gateway route table to route traffic to the newly created Transit Gateway peering attachment for the Peering {VPC} CIDRs. - Security groups to allow outbound TCP 5432. -1. **Attach a {CLOUD_LONG} service to the Peering VPC In [{CONSOLE}][console-services]** +1. **Attach a {CLOUD_LONG} service to the Peering {VPC} In [{CONSOLE}][console-services]** - 1. Select the {SERVICE_SHORT} you want to connect to the Peering VPC. + 1. Select the {SERVICE_SHORT} you want to connect to the Peering {VPC}. 1. Click `Operations` > `Security` > `VPC`. - 1. Select the VPC, then click `Attach VPC`. + 1. Select the {VPC}, then click `Attach VPC`. You cannot attach a {SERVICE_LONG} to multiple {CLOUD_LONG} {VPC}s at the same time. [console-login]: https://console.cloud.timescale.com/ [console-services]: https://console.cloud.timescale.com/dashboard/services -[pricing-plans]: /about/:currentVersion:/pricing-and-account-management/ +[pricing-plans]: /about/pricing-and-account-management/ diff --git a/snippets/integrations/code/_start-coding-golang.mdx b/snippets/integrations/code/_start-coding-golang.mdx index 596d89c..c978132 100644 --- a/snippets/integrations/code/_start-coding-golang.mdx +++ b/snippets/integrations/code/_start-coding-golang.mdx @@ -1,5 +1,5 @@ import { CLOUD_LONG, TIMESCALE_DB, PG } from '/snippets/vars.mdx'; -import IntegrationPrereqs from '/snippets/integrations/_integration-prereqs-cloud-only.mdx'; +import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs-cloud-only.mdx'; ## Prerequisites diff --git a/snippets/integrations/code/_start-coding-java.mdx b/snippets/integrations/code/_start-coding-java.mdx index f6f403b..b46ce9b 100644 --- a/snippets/integrations/code/_start-coding-java.mdx +++ b/snippets/integrations/code/_start-coding-java.mdx @@ -1,5 +1,5 @@ import { SERVICE_SHORT, TIMESCALE_DB } from '/snippets/vars.mdx'; -import IntegrationPrereqs from '/snippets/integrations/_integration-prereqs-cloud-only.mdx'; +import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs-cloud-only.mdx'; ## Prerequisites diff --git a/snippets/integrations/code/_start-coding-node.mdx b/snippets/integrations/code/_start-coding-node.mdx index e95132e..594bdc7 100644 --- a/snippets/integrations/code/_start-coding-node.mdx +++ b/snippets/integrations/code/_start-coding-node.mdx @@ -1,5 +1,5 @@ import { TIMESCALE_DB, PG } from '/snippets/vars.mdx'; -import IntegrationPrereqs from '/snippets/integrations/_integration-prereqs-cloud-only.mdx'; +import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs-cloud-only.mdx'; ## Prerequisites diff --git a/snippets/integrations/code/_start-coding-python.mdx b/snippets/integrations/code/_start-coding-python.mdx index 69913ca..b4afd62 100644 --- a/snippets/integrations/code/_start-coding-python.mdx +++ b/snippets/integrations/code/_start-coding-python.mdx @@ -1,5 +1,5 @@ import { TIMESCALE_DB, PG } from '/snippets/vars.mdx'; -import IntegrationPrereqs from '/snippets/integrations/_integration-prereqs-cloud-only.mdx'; +import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs-cloud-only.mdx'; ## Prerequisites diff --git a/snippets/integrations/code/_start-coding-ruby.mdx b/snippets/integrations/code/_start-coding-ruby.mdx index 2cbe7b6..5047ee6 100644 --- a/snippets/integrations/code/_start-coding-ruby.mdx +++ b/snippets/integrations/code/_start-coding-ruby.mdx @@ -1,5 +1,5 @@ import { PG, CLOUD_LONG, COMPANY, TIMESCALE_DB, SELF_LONG_CAP } from '/snippets/vars.mdx'; -import IntegrationPrereqs from '/snippets/integrations/_integration-prereqs-cloud-only.mdx'; +import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs-cloud-only.mdx'; ## Prerequisites diff --git a/snippets/integrations/troubleshooting/_jdbc-authentication-not-supported.mdx b/snippets/integrations/troubleshooting/_jdbc-authentication-not-supported.mdx new file mode 100644 index 0000000..9447382 --- /dev/null +++ b/snippets/integrations/troubleshooting/_jdbc-authentication-not-supported.mdx @@ -0,0 +1,16 @@ +import { SERVICE_LONG, CLOUD_LONG } from '/snippets/vars.mdx'; + +## JDBC authentication type is not supported + +When connecting to {SERVICE_LONG} with a Java Database Connectivity (JDBC) driver, you might get this error message: + +```text +Check that your connection definition references your JDBC database with correct URL syntax, +username, and password. The authentication type 10 is not supported. +``` + +Your {CLOUD_LONG} authentication type doesn't match your JDBC driver's supported authentication types. The recommended approach is to upgrade your JDBC driver to a version that supports `scram-sha-256` encryption. If that isn't an option, you can change the authentication type for your {SERVICE_LONG} to `md5`. Note that `md5` is less secure, and is provided solely for compatibility with older clients. + +For information on changing your authentication type, see the documentation on [resetting your service password][password-reset]. + +[password-reset]: /use-timescale/services/service-management/#reset-service-password \ No newline at end of file diff --git a/snippets/prerequisites/_integration-prereqs-cloud-only.mdx b/snippets/prerequisites/_integration-prereqs-cloud-only.mdx index 75116f4..08ed617 100644 --- a/snippets/prerequisites/_integration-prereqs-cloud-only.mdx +++ b/snippets/prerequisites/_integration-prereqs-cloud-only.mdx @@ -3,7 +3,7 @@ import { SERVICE_LONG } from '/snippets/vars.mdx'; To follow the steps on this page: -* Create a target [{SERVICE_LONG}][create-service] with time-series and analytics enabled. +* Create a target [{SERVICE_LONG}][create-service] with the Real-time analytics capability enabled. You need your [connection details][connection-info]. diff --git a/snippets/prerequisites/_integration-prereqs-self-only.mdx b/snippets/prerequisites/_integration-prereqs-self-only.mdx index c4f2b0d..af18a61 100644 --- a/snippets/prerequisites/_integration-prereqs-self-only.mdx +++ b/snippets/prerequisites/_integration-prereqs-self-only.mdx @@ -1,7 +1,8 @@ +import { SELF_LONG } from '/snippets/vars.mdx'; To follow the steps on this page: * Create a target [{SELF_LONG}][enable-timescaledb] instance. -[enable-timescaledb]: /self-hosted//install/ +[enable-timescaledb]: /self-hosted/install diff --git a/snippets/prerequisites/_integration-prereqs.mdx b/snippets/prerequisites/_integration-prereqs.mdx index f821921..5d9505f 100644 --- a/snippets/prerequisites/_integration-prereqs.mdx +++ b/snippets/prerequisites/_integration-prereqs.mdx @@ -3,7 +3,7 @@ import { SELF_LONG, SERVICE_LONG } from '/snippets/vars.mdx'; To follow the steps on this page: -* Create a target [{SERVICE_LONG}][create-service] with time-series and analytics enabled.

+* Create a target [{SERVICE_LONG}][create-service] with the Real-time analytics capability enabled.

You need [your connection details][connection-info]. This procedure also works for [{SELF_LONG}][enable-timescaledb]. diff --git a/snippets/integrations/_livesync-prereqs-cloud.mdx b/snippets/prerequisites/_livesync-prereqs-cloud.mdx similarity index 100% rename from snippets/integrations/_livesync-prereqs-cloud.mdx rename to snippets/prerequisites/_livesync-prereqs-cloud.mdx diff --git a/snippets/integrations/_livesync-prereqs-terminal.mdx b/snippets/prerequisites/_livesync-prereqs-terminal.mdx similarity index 100% rename from snippets/integrations/_livesync-prereqs-terminal.mdx rename to snippets/prerequisites/_livesync-prereqs-terminal.mdx diff --git a/snippets/prerequisites/_migrate-import-prerequisites.mdx b/snippets/prerequisites/_migrate-import-prerequisites.mdx new file mode 100644 index 0000000..60b5b0e --- /dev/null +++ b/snippets/prerequisites/_migrate-import-prerequisites.mdx @@ -0,0 +1,22 @@ +import { SERVICE_LONG, SERVICE_SHORT } from '/snippets/vars.mdx'; + +Best practice is to use an [Ubuntu EC2 instance][create-ec2-instance] hosted in the same region as your +{SERVICE_LONG} as a migration machine. That is, the machine you run the commands on to move your +data from your source database to your target {SERVICE_LONG}. + +Before you migrate your data: + +- Create a target [{SERVICE_LONG}][created-a-database-service-in-timescale]. + + Each {SERVICE_LONG} has a single database that supports the + [most popular extensions][all-available-extensions]. {SERVICE_LONG}s do not support tablespaces, + and there is no superuser associated with a {SERVICE_SHORT}. + Best practice is to create a {SERVICE_LONG} with at least 8 CPUs for a smoother experience. A higher-spec instance + can significantly reduce the overall migration window. + +- To ensure that maintenance does not run during the process, [adjust the maintenance window][adjust-maintenance-window]. + +[created-a-database-service-in-timescale]: /cloud/get-started/create-services +[all-available-extensions]: /use-timescale/extensions +[create-ec2-instance]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EC2_GetStarted.html#ec2-launch-instance +[adjust-maintenance-window]: /use-timescale/upgrades/#adjusting-your-maintenance-window \ No newline at end of file diff --git a/snippets/procedures/_migrate-import-setup-connection-strings.mdx b/snippets/procedures/_migrate-import-setup-connection-strings.mdx new file mode 100644 index 0000000..5253a1e --- /dev/null +++ b/snippets/procedures/_migrate-import-setup-connection-strings.mdx @@ -0,0 +1,12 @@ +import { SERVICE_LONG } from '/snippets/vars.mdx'; + +This variable holds the connection information for the target {SERVICE_LONG}. + +In the terminal on the source machine, set the following: + +```bash +export TARGET=postgres://tsdbadmin:@:/tsdb?sslmode=require +``` +See where to [find your connection details][connection-info]. + +[connection-info]: /integrations/find-connection-details \ No newline at end of file From af92b5b83699b6f5c0d24bd76e50e8bf297a607b Mon Sep 17 00:00:00 2001 From: atovpeko Date: Mon, 10 Nov 2025 10:20:17 +0200 Subject: [PATCH 03/13] add supabase logo --- integrations/integrations.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/integrations/integrations.mdx b/integrations/integrations.mdx index e5154db..970e658 100644 --- a/integrations/integrations.mdx +++ b/integrations/integrations.mdx @@ -10,7 +10,7 @@ import { SERVICE_LONG, PG, COMPANY, CLOUD_LONG } from '/snippets/vars.mdx'; - A {SERVICE_LONG} is a {PG} database instance extended by {COMPANY} with custom capabilities. This means that any third-party solution that you can integrate with {PG}, you can also integrate with {CLOUD_LONG}. See the full list of {PG} integrations here. + A {SERVICE_LONG} is a {PG} database instance extended by {COMPANY} with custom capabilities. This means that any third-party solution that you can integrate with {PG}, you can also integrate with {CLOUD_LONG}. See the full list of available {PG} integrations. @@ -674,7 +674,7 @@ import { SERVICE_LONG, PG, COMPANY, CLOUD_LONG } from '/snippets/vars.mdx';
Build applications with an open-source Firebase alternative powered by Postgres. From 764ef043126e134971e06e1a9a29a05ac8ff1f35 Mon Sep 17 00:00:00 2001 From: atovpeko Date: Mon, 10 Nov 2025 10:58:12 +0200 Subject: [PATCH 04/13] add keywords --- integrations/code/start-coding-with-tigerdata.mdx | 1 + integrations/connectors/destination/tigerlake.mdx | 1 + integrations/connectors/source/stream-from-kafka.mdx | 1 + integrations/connectors/source/sync-from-postgres.mdx | 1 + integrations/connectors/source/sync-from-s3.mdx | 1 + integrations/find-connection-details.mdx | 1 + integrations/integrate/amazon-sagemaker.mdx | 1 + integrations/integrate/apache-airflow.mdx | 1 + integrations/integrate/apache-kafka.mdx | 1 + integrations/integrate/aws-lambda.mdx | 1 + integrations/integrate/aws.mdx | 1 + integrations/integrate/azure-data-studio.mdx | 1 + integrations/integrate/cloudwatch.mdx | 1 + integrations/integrate/corporate-data-center.mdx | 1 + integrations/integrate/datadog.mdx | 1 + integrations/integrate/dbeaver.mdx | 1 + integrations/integrate/debezium.mdx | 1 + integrations/integrate/decodable.mdx | 1 + integrations/integrate/fivetran.mdx | 1 + integrations/integrate/google-cloud.mdx | 1 + integrations/integrate/grafana.mdx | 1 + integrations/integrate/kubernetes.mdx | 1 + integrations/integrate/microsoft-azure.mdx | 1 + integrations/integrate/pgadmin.mdx | 1 + integrations/integrate/postgresql.mdx | 1 + integrations/integrate/power-bi.mdx | 1 + integrations/integrate/prometheus.mdx | 1 + integrations/integrate/psql.mdx | 1 + integrations/integrate/qstudio.mdx | 1 + integrations/integrate/supabase.mdx | 1 + integrations/integrate/tableau.mdx | 1 + integrations/integrate/telegraf.mdx | 1 + integrations/integrate/terraform.mdx | 1 + integrations/integrations.mdx | 3 +-- integrations/troubleshooting.mdx | 1 + 35 files changed, 35 insertions(+), 2 deletions(-) diff --git a/integrations/code/start-coding-with-tigerdata.mdx b/integrations/code/start-coding-with-tigerdata.mdx index 013f93a..613ed90 100644 --- a/integrations/code/start-coding-with-tigerdata.mdx +++ b/integrations/code/start-coding-with-tigerdata.mdx @@ -1,6 +1,7 @@ --- title: Start coding with Tiger Data description: Integrate Tiger Cloud with your app using your preferred programming language. Connect to a service, create and manage hypertables, then ingest and query data +keywords: [coding, programming, SDKs, client libraries, Python, Node.js, Java, Ruby, Golang, database drivers, application integration] --- import StartCodingRuby from '/snippets/integrations/code/_start-coding-ruby.mdx'; diff --git a/integrations/connectors/destination/tigerlake.mdx b/integrations/connectors/destination/tigerlake.mdx index 326d7c9..d3a21fc 100644 --- a/integrations/connectors/destination/tigerlake.mdx +++ b/integrations/connectors/destination/tigerlake.mdx @@ -1,6 +1,7 @@ --- title: Integrate with data lakes description: Unifies the Tiger Cloud operational architecture with data lake architectures. This enables real-time application building alongside efficient data pipeline management within a single system. +keywords: [Tiger Lake, data lake, destination connector, Iceberg, S3 Tables, Amazon S3, synchronization, hypertables, real-time, ETL] --- import { LAKE_LONG, LAKE_SHORT, SERVICE_SHORT, HYPERTABLE, HYPERTABLE_CAP, CONSOLE, PG } from '/snippets/vars.mdx'; diff --git a/integrations/connectors/source/stream-from-kafka.mdx b/integrations/connectors/source/stream-from-kafka.mdx index 34b21b4..303eb25 100644 --- a/integrations/connectors/source/stream-from-kafka.mdx +++ b/integrations/connectors/source/stream-from-kafka.mdx @@ -1,6 +1,7 @@ --- title: Stream from Kafka description: Stream data from Kafka into a Tiger Cloud service in order to store, query, and analyze your Kafka events efficiently +keywords: [Kafka, source connector, streaming, Confluent Cloud, SASL/SCRAM, Avro, Schema Registry, event streaming, real-time ingestion] --- import { SERVICE_SHORT, CONSOLE, PROJECT_SHORT } from '/snippets/vars.mdx'; diff --git a/integrations/connectors/source/sync-from-postgres.mdx b/integrations/connectors/source/sync-from-postgres.mdx index c770645..db689dc 100644 --- a/integrations/connectors/source/sync-from-postgres.mdx +++ b/integrations/connectors/source/sync-from-postgres.mdx @@ -1,6 +1,7 @@ --- title: Sync from Postgres description: Synchronize updates to your primary Postgres database with the corresponding Tiger Cloud service in real time +keywords: [PostgreSQL, source connector, logical replication, synchronization, real-time replication, data sync, migration, CDC, change data capture] --- import { HYPERTABLE, CONSOLE } from '/snippets/vars.mdx'; diff --git a/integrations/connectors/source/sync-from-s3.mdx b/integrations/connectors/source/sync-from-s3.mdx index 6e1cf85..18d7584 100644 --- a/integrations/connectors/source/sync-from-s3.mdx +++ b/integrations/connectors/source/sync-from-s3.mdx @@ -1,6 +1,7 @@ --- title: Sync from S3 description: Synchronize data from S3 to Tiger Cloud service in real time +keywords: [S3, source connector, Amazon S3, CSV, Parquet, file sync, data ingestion, real-time sync, ETL, batch import] --- import { S3_CONNECTOR, HYPERTABLE, CONSOLE, SERVICE_SHORT, PROJECT_SHORT } from '/snippets/vars.mdx'; diff --git a/integrations/find-connection-details.mdx b/integrations/find-connection-details.mdx index 477c1b4..8b62a10 100644 --- a/integrations/find-connection-details.mdx +++ b/integrations/find-connection-details.mdx @@ -1,6 +1,7 @@ --- title: Find your connection details description: You connect to Tiger Cloud, self-hosted TimescaleDB, or MST using your connection details +keywords: [connection, credentials, hostname, port, username, password, database, authentication, connection string, service ID] --- import { SERVICE_LONG, SELF_LONG, SELF_LONG_CAP, SERVICE_SHORT, CONSOLE, PROJECT_LONG, PROJECT_SHORT, PROJECT_SHORT_CAP, CLOUD_LONG, PG, MST_CONSOLE_LONG, MST_LONG, MST_SERVICE_SHORT } from '/snippets/vars.mdx'; diff --git a/integrations/integrate/amazon-sagemaker.mdx b/integrations/integrate/amazon-sagemaker.mdx index 73f9cfd..47382ce 100644 --- a/integrations/integrate/amazon-sagemaker.mdx +++ b/integrations/integrate/amazon-sagemaker.mdx @@ -2,6 +2,7 @@ title: Integrate Amazon Sagemaker with Tiger sidebarTitle: Amazon SageMaker description: Amazon SageMaker is a fully managed machine learning service. Integrate Amazon SageMaker with Tiger Cloud to store and analyze ML model data +keywords: [Amazon SageMaker, machine learning, ML, AI, model training, model deployment, data science, AWS, ML models, predictions] --- import { CLOUD_LONG, CONSOLE, PG, SERVICE_SHORT } from '/snippets/vars.mdx'; diff --git a/integrations/integrate/apache-airflow.mdx b/integrations/integrate/apache-airflow.mdx index 8652190..79a9c16 100644 --- a/integrations/integrate/apache-airflow.mdx +++ b/integrations/integrate/apache-airflow.mdx @@ -2,6 +2,7 @@ title: Integrate Apache Airflow with Tiger sidebarTitle: Apache Airflow description: Apache Airflow is a platform to programmatically author, schedule, and monitor workflows. Integrate Apache Airflow with Tiger Cloud and create a data pipeline +keywords: [Apache Airflow, workflow orchestration, DAG, data pipeline, scheduling, task automation, ETL, Python, workflow management] --- import { CLOUD_LONG, PG, CONSOLE } from '/snippets/vars.mdx'; diff --git a/integrations/integrate/apache-kafka.mdx b/integrations/integrate/apache-kafka.mdx index efc67b1..c458cc1 100644 --- a/integrations/integrate/apache-kafka.mdx +++ b/integrations/integrate/apache-kafka.mdx @@ -2,6 +2,7 @@ title: Integrate Apache Kafka with Tiger Cloud sidebarTitle: Apache Kafka description: Apache Kafka is a distributed event streaming platform used for high-performance data pipelines. Learn how to integrate Apache Kafka with Tiger Cloud to manage and analyze streaming data +keywords: [Apache Kafka, Kafka Connect, event streaming, data pipelines, real-time streaming, distributed systems, message broker, producers, consumers] --- import { CLOUD_LONG, PG } from '/snippets/vars.mdx'; diff --git a/integrations/integrate/aws-lambda.mdx b/integrations/integrate/aws-lambda.mdx index d0e8d00..44eeb4c 100644 --- a/integrations/integrate/aws-lambda.mdx +++ b/integrations/integrate/aws-lambda.mdx @@ -2,6 +2,7 @@ title: Integrate AWS Lambda with Tiger Cloud sidebarTitle: AWS Lambda description: With AWS Lambda, you can run code without provisioning or managing servers, and scale automatically. Integrate AWS Lambda with Tiger Cloud and inject data into your service +keywords: [AWS Lambda, serverless, function as a service, FaaS, AWS, event-driven, auto-scaling, cloud functions, serverless computing] --- import { CLOUD_LONG, CONSOLE, PG, SERVICE_SHORT, HYPERTABLE_CAP } from '/snippets/vars.mdx'; diff --git a/integrations/integrate/aws.mdx b/integrations/integrate/aws.mdx index 966324e..783b5ba 100644 --- a/integrations/integrate/aws.mdx +++ b/integrations/integrate/aws.mdx @@ -2,6 +2,7 @@ title: Integrate Amazon Web Services with Tiger Cloud sidebarTitle: AWS description: AWS enables you to build, run, and manage applications across cloud, hybrid, and edge environments with AI, analytics, security, and scalable infrastructure. Integrate AWS with Tiger Cloud using AWS Transit Gateway +keywords: [AWS, Amazon Web Services, Transit Gateway, VPC, cloud connectivity, network integration, hybrid cloud, secure connection] --- import IntegrationPrereqsCloud from '/snippets/prerequisites/_integration-prereqs-cloud-only.mdx'; diff --git a/integrations/integrate/azure-data-studio.mdx b/integrations/integrate/azure-data-studio.mdx index 3ecf44b..5e75259 100644 --- a/integrations/integrate/azure-data-studio.mdx +++ b/integrations/integrate/azure-data-studio.mdx @@ -2,6 +2,7 @@ title: Integrate Azure Data Studio with Tiger Cloud sidebarTitle: Azure Data Studio description: Azure Data Studio is an open-source, cross-platform hybrid data analytics tool designed to simplify the data landscape. Integrate Azure Data Studio with Tiger Cloud +keywords: [Azure Data Studio, SQL editor, database management, query tool, cross-platform, Microsoft, data analytics, PostgreSQL client] --- import { PG, CLOUD_LONG } from '/snippets/vars.mdx'; diff --git a/integrations/integrate/cloudwatch.mdx b/integrations/integrate/cloudwatch.mdx index 5958aa9..e155d08 100644 --- a/integrations/integrate/cloudwatch.mdx +++ b/integrations/integrate/cloudwatch.mdx @@ -2,6 +2,7 @@ title: Integrate Amazon CloudWatch with Tiger Cloud sidebarTitle: Amazon CloudWatch description: Amazon CloudWatch is a monitoring and observability service. Export telemetry data from your Tiger Cloud service with time-series and analytics capability to Amazon CloudWatch +keywords: [Amazon CloudWatch, monitoring, observability, metrics, AWS, telemetry, logs, alarms, dashboards, resource monitoring] --- import IntegrationPrereqsCloud from '/snippets/prerequisites/_integration-prereqs-cloud-only.mdx'; diff --git a/integrations/integrate/corporate-data-center.mdx b/integrations/integrate/corporate-data-center.mdx index e102946..c568bb9 100644 --- a/integrations/integrate/corporate-data-center.mdx +++ b/integrations/integrate/corporate-data-center.mdx @@ -2,6 +2,7 @@ title: Integrate your data center with Tiger Cloud sidebarTitle: Corporate data center description: Integrate your on-premise data center with Tiger Cloud using AWS Transit Gateway +keywords: [corporate data center, on-premise, hybrid cloud, Transit Gateway, VPN, secure connection, network integration, enterprise connectivity] --- import IntegrationPrereqsCloud from '/snippets/prerequisites/_integration-prereqs-cloud-only.mdx'; diff --git a/integrations/integrate/datadog.mdx b/integrations/integrate/datadog.mdx index ee9ccd5..cfa2a37 100644 --- a/integrations/integrate/datadog.mdx +++ b/integrations/integrate/datadog.mdx @@ -2,6 +2,7 @@ title: Integrate Datadog with Tiger Cloud sidebarTitle: Datadog description: Datadog is a cloud-based monitoring and analytics platform. Export telemetry data from your Tiger Cloud service with time-series and analytics capability to Datadog +keywords: [Datadog, monitoring, observability, APM, metrics, logs, traces, dashboards, alerting, infrastructure monitoring] --- import { PG, SELF_LONG, SCALE, ENTERPRISE, PRICING_PLAN } from '/snippets/vars.mdx'; diff --git a/integrations/integrate/dbeaver.mdx b/integrations/integrate/dbeaver.mdx index 1c669b6..a38cdf4 100644 --- a/integrations/integrate/dbeaver.mdx +++ b/integrations/integrate/dbeaver.mdx @@ -2,6 +2,7 @@ title: Integrate DBeaver with Tiger Cloud sidebarTitle: DBeaver description: DBeaver is a free cross-platform database tool for developers, database administrators, analysts, and everyone working with data. Integrate DBeaver with Tiger Cloud +keywords: [DBeaver, database tool, SQL editor, database administration, cross-platform, query tool, data migration, PostgreSQL client] --- import { CLOUD_LONG, SERVICE_SHORT, TIMESCALE_DB } from '/snippets/vars.mdx'; diff --git a/integrations/integrate/debezium.mdx b/integrations/integrate/debezium.mdx index e89b63f..65b3f03 100644 --- a/integrations/integrate/debezium.mdx +++ b/integrations/integrate/debezium.mdx @@ -2,6 +2,7 @@ title: Integrate Debezium with Tiger Cloud sidebarTitle: Debezium description: Integrate Debezium with Tiger Cloud to enable change data capture in your Tiger Cloud service and streaming to Redis Streams +keywords: [Debezium, change data capture, CDC, data replication, streaming, real-time sync, database events, Kafka, log-based replication] --- import { CLOUD_LONG, TIMESCALE_DB, SELF_LONG_CAP, SERVICE_LONG, HYPERTABLE_CAP, HYPERCORE_CAP, COLUMNSTORE, ROWSTORE } from '/snippets/vars.mdx'; diff --git a/integrations/integrate/decodable.mdx b/integrations/integrate/decodable.mdx index b286840..5f43fd1 100644 --- a/integrations/integrate/decodable.mdx +++ b/integrations/integrate/decodable.mdx @@ -2,6 +2,7 @@ title: Integrate Decodable with Tiger Cloud sidebarTitle: Decodable description: Decodable enables you to build, run, and manage data pipelines effortlessly. Seamlessly integrate Decodable with Tiger Cloud to unlock real-time data processing capabilities +keywords: [Decodable, data pipelines, real-time processing, stream processing, ETL, data transformation, Apache Flink, event-driven] --- import { CLOUD_LONG, CONSOLE, PG } from '/snippets/vars.mdx'; diff --git a/integrations/integrate/fivetran.mdx b/integrations/integrate/fivetran.mdx index fc3a864..b5a574a 100644 --- a/integrations/integrate/fivetran.mdx +++ b/integrations/integrate/fivetran.mdx @@ -2,6 +2,7 @@ title: Integrate Fivetran with Tiger Cloud sidebarTitle: Fivetran description: Fivetran is a fully managed data pipeline platform that simplifies extract, transform, and load processes. Integrate Fivetran with Tiger Cloud for seamless data synchronization +keywords: [Fivetran, ETL, ELT, data pipeline, data integration, data synchronization, managed service, data ingestion, connectors] --- import { CLOUD_LONG, CONSOLE, PG, SERVICE_SHORT } from '/snippets/vars.mdx'; diff --git a/integrations/integrate/google-cloud.mdx b/integrations/integrate/google-cloud.mdx index 70b015f..60c5060 100644 --- a/integrations/integrate/google-cloud.mdx +++ b/integrations/integrate/google-cloud.mdx @@ -2,6 +2,7 @@ title: Integrate Google Cloud with Tiger Cloud sidebarTitle: Google Cloud description: Google Cloud enables you to deploy, manage, and scale cloud-based applications, databases, and data processing workflows. Integrate Google Cloud with Tiger Cloud using AWS Transit Gateway +keywords: [Google Cloud, GCP, Transit Gateway, VPN, cloud connectivity, network integration, hybrid cloud, multi-cloud, secure connection] --- import IntegrationPrereqsCloud from '/snippets/prerequisites/_integration-prereqs-cloud-only.mdx'; diff --git a/integrations/integrate/grafana.mdx b/integrations/integrate/grafana.mdx index 92810e7..7acae2f 100644 --- a/integrations/integrate/grafana.mdx +++ b/integrations/integrate/grafana.mdx @@ -2,6 +2,7 @@ title: Integrate Grafana with Tiger Cloud sidebarTitle: Grafana description: Grafana enables you to query, visualize, alert on, and explore your metrics, logs, and traces wherever they're stored. Integrate Grafana with Tiger +keywords: [Grafana, visualization, dashboards, metrics, monitoring, time-series, charts, alerting, data exploration, BI] --- import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; diff --git a/integrations/integrate/kubernetes.mdx b/integrations/integrate/kubernetes.mdx index 270cb6b..cac2673 100644 --- a/integrations/integrate/kubernetes.mdx +++ b/integrations/integrate/kubernetes.mdx @@ -2,6 +2,7 @@ title: Integrate Kubernetes with Tiger sidebarTitle: Kubernetes description: Learn how to integrate Kubernetes with Tiger Cloud to enable seamless deployment and scaling of your Postgres workloads +keywords: [Kubernetes, K8s, container orchestration, deployment, scaling, cloud native, microservices, containerization, pods] --- import { CLOUD_LONG, SERVICE_LONG } from '/snippets/vars.mdx'; diff --git a/integrations/integrate/microsoft-azure.mdx b/integrations/integrate/microsoft-azure.mdx index 0778005..d87ea83 100644 --- a/integrations/integrate/microsoft-azure.mdx +++ b/integrations/integrate/microsoft-azure.mdx @@ -2,6 +2,7 @@ title: Integrate Microsoft Azure with Tiger Cloud sidebarTitle: Microsoft Azure description: Microsoft Azure enables you to build, deploy, and manage applications across cloud, hybrid, and edge environments. Integrate Microsoft Azure with Tiger Cloud using AWS Transit Gateway +keywords: [Microsoft Azure, Azure, Transit Gateway, VPN, cloud connectivity, network integration, hybrid cloud, secure connection] --- import IntegrationPrereqsCloud from '/snippets/prerequisites/_integration-prereqs-cloud-only.mdx'; diff --git a/integrations/integrate/pgadmin.mdx b/integrations/integrate/pgadmin.mdx index 425fc80..b88ee7b 100644 --- a/integrations/integrate/pgadmin.mdx +++ b/integrations/integrate/pgadmin.mdx @@ -2,6 +2,7 @@ title: Integrate pgAdmin with Tiger Cloud sidebarTitle: pgAdmin description: pgAdmin is a feature-rich open-source administration and development platform for PostgreSQL. Integrate pgadmin with Tiger Cloud +keywords: [pgAdmin, PostgreSQL, database administration, open-source, query tool, database management, GUI, SQL editor] --- import { PG, CLOUD_LONG, SERVICE_SHORT } from '/snippets/vars.mdx'; diff --git a/integrations/integrate/postgresql.mdx b/integrations/integrate/postgresql.mdx index 3ea0816..c46e29b 100644 --- a/integrations/integrate/postgresql.mdx +++ b/integrations/integrate/postgresql.mdx @@ -2,6 +2,7 @@ title: Integrate with PostgreSQL sidebarTitle: PostgreSQL description: Query any other Postgres database or another Tiger Cloud service from your service by using Postgres foreign data wrappers +keywords: [PostgreSQL, foreign data wrappers, FDW, cross-database queries, federated queries, external data, postgres_fdw, data integration] --- import FDW from '/snippets/integrations/_foreign-data-wrappers.mdx'; diff --git a/integrations/integrate/power-bi.mdx b/integrations/integrate/power-bi.mdx index f930609..e71cc47 100644 --- a/integrations/integrate/power-bi.mdx +++ b/integrations/integrate/power-bi.mdx @@ -2,6 +2,7 @@ title: Integrate Power BI with Tiger Cloud sidebarTitle: Power BI description: Integrate Power BI with Tiger Cloud +keywords: [Power BI, business intelligence, BI, data visualization, dashboards, reports, analytics, Microsoft, data analysis] --- import { CLOUD_LONG, PG } from '/snippets/vars.mdx'; diff --git a/integrations/integrate/prometheus.mdx b/integrations/integrate/prometheus.mdx index 93a50bc..e306e75 100644 --- a/integrations/integrate/prometheus.mdx +++ b/integrations/integrate/prometheus.mdx @@ -2,6 +2,7 @@ title: Integrate Prometheus with Tiger Cloud sidebarTitle: Prometheus description: Prometheus is an open-source monitoring system with a modern alerting approach. Export telemetry metrics from your Tiger Cloud service to Prometheus +keywords: [Prometheus, monitoring, metrics, time-series, alerting, open-source, PromQL, exporters, service discovery, pull-based monitoring] --- import PrometheusIntegrate from '/snippets/integrations/_prometheus-integrate.mdx'; diff --git a/integrations/integrate/psql.mdx b/integrations/integrate/psql.mdx index 0c3d6cc..f867fd6 100644 --- a/integrations/integrate/psql.mdx +++ b/integrations/integrate/psql.mdx @@ -2,6 +2,7 @@ title: Connect to a Tiger Cloud service with psql sidebarTitle: psql description: psql enables you to type in queries interactively, issue them to Postgres, and see the query results. Connect to your Tiger Cloud service using psql +keywords: [psql, command line, terminal, PostgreSQL client, CLI, interactive shell, SQL queries, database connection] --- import { PG, SERVICE_SHORT, COMPANY } from '/snippets/vars.mdx'; diff --git a/integrations/integrate/qstudio.mdx b/integrations/integrate/qstudio.mdx index 3ef8781..a450387 100644 --- a/integrations/integrate/qstudio.mdx +++ b/integrations/integrate/qstudio.mdx @@ -2,6 +2,7 @@ title: Integrate qStudio with Tiger Cloud sidebarTitle: qStudio description: qStudio is a modern free SQL editor that provides syntax highlighting, code-completion, excel export, charting, and much more. Integrate qStudio with Tiger Cloud +keywords: [qStudio, SQL editor, query tool, syntax highlighting, code completion, charting, data visualization, database client] --- import { CLOUD_LONG } from '/snippets/vars.mdx'; diff --git a/integrations/integrate/supabase.mdx b/integrations/integrate/supabase.mdx index 6bd06c4..f09c383 100644 --- a/integrations/integrate/supabase.mdx +++ b/integrations/integrate/supabase.mdx @@ -2,6 +2,7 @@ title: Integrate Supabase with Tiger Cloud sidebarTitle: Supabase description: Supabase is an open source Firebase alternative. Integrate Supabase with Tiger Cloud +keywords: [Supabase, Firebase alternative, backend as a service, BaaS, PostgreSQL, real-time, authentication, open-source, database management] --- import { PG, CLOUD_LONG, HYPERTABLE_CAP, HYPERCORE_CAP, HYPERCORE, CAGG_CAP } from '/snippets/vars.mdx'; diff --git a/integrations/integrate/tableau.mdx b/integrations/integrate/tableau.mdx index 97e6341..de62e71 100644 --- a/integrations/integrate/tableau.mdx +++ b/integrations/integrate/tableau.mdx @@ -2,6 +2,7 @@ title: Integrate Tableau and Tiger Cloud sidebarTitle: Tableau description: Integrate Tableau with Tiger Cloud +keywords: [Tableau, business intelligence, BI, data visualization, dashboards, analytics, data exploration, interactive reports] --- import { CLOUD_LONG, SERVICE_SHORT, PG } from '/snippets/vars.mdx'; diff --git a/integrations/integrate/telegraf.mdx b/integrations/integrate/telegraf.mdx index 1cd7457..4ce295d 100644 --- a/integrations/integrate/telegraf.mdx +++ b/integrations/integrate/telegraf.mdx @@ -2,6 +2,7 @@ title: Ingest data using Telegraf sidebarTitle: Telegraf description: Ingest data into a Tiger Cloud service using using the Telegraf plugin +keywords: [Telegraf, data ingestion, metrics collection, InfluxData, plugins, IoT, systems monitoring, agent, time-series data] --- import { PG, HYPERTABLE, TIMESCALE_DB } from '/snippets/vars.mdx'; diff --git a/integrations/integrate/terraform.mdx b/integrations/integrate/terraform.mdx index 3e664b3..4c61ad7 100644 --- a/integrations/integrate/terraform.mdx +++ b/integrations/integrate/terraform.mdx @@ -2,6 +2,7 @@ title: Integrate Terraform with Tiger Cloud sidebarTitle: Terraform description: Manage your Tiger Cloud services with a Terraform provider +keywords: [Terraform, infrastructure as code, IaC, provisioning, automation, deployment, configuration management, HashiCorp] --- import { CLOUD_LONG, COMPANY, CONSOLE, VPC, PG } from '/snippets/vars.mdx'; diff --git a/integrations/integrations.mdx b/integrations/integrations.mdx index 970e658..12308ca 100644 --- a/integrations/integrations.mdx +++ b/integrations/integrations.mdx @@ -1,8 +1,7 @@ --- title: Integrations description: You can integrate your {SERVICE_LONG} with third-party solutions to expand and extend what you can do with your data. -products: [cloud, mst, self_hosted] -keywords: [IoT, simulate] +keywords: [integrations, connectors, third-party, PostgreSQL, tools, data engineering, ETL, observability, BI, visualization] mode: "wide" --- diff --git a/integrations/troubleshooting.mdx b/integrations/troubleshooting.mdx index 878fb0b..40c2806 100644 --- a/integrations/troubleshooting.mdx +++ b/integrations/troubleshooting.mdx @@ -1,6 +1,7 @@ --- title: Troubleshooting description: Troubleshoot common problems that occur when integrating Tiger Cloud services with third-party solutions +keywords: [troubleshooting, errors, problems, issues, solutions, debugging, integration issues, connection problems] --- import JdbcAuthenticationNotSupported from '/snippets/integrations/troubleshooting/_jdbc-authentication-not-supported.mdx'; From f5364daf7815dcc9d88dfe0f1f292dff4d7e90b5 Mon Sep 17 00:00:00 2001 From: atovpeko Date: Tue, 11 Nov 2025 12:23:44 +0200 Subject: [PATCH 05/13] draft --- README.md | 3 +- integrations/integrate/amazon-sagemaker.mdx | 8 ++-- integrations/integrate/apache-kafka.mdx | 2 +- snippets/vars.mdx | 52 ++++++++++++++------- 4 files changed, 41 insertions(+), 24 deletions(-) diff --git a/README.md b/README.md index 5e06fe4..5588741 100644 --- a/README.md +++ b/README.md @@ -61,5 +61,4 @@ Tiger Data is Postgres made powerful. To learn more about the company and its pr [docs-issues]: https://github.com/timescale/docs/issues [github-fork]: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/fork-a-repo [github-clone]: https://docs.github.com/en/repositories/creating-and-managing-repositories/cloning-a-repository -[gatsby]: https://www.gatsbyjs.com/ - \ No newline at end of file +[gatsby]: https://www.gatsbyjs.com/ \ No newline at end of file diff --git a/integrations/integrate/amazon-sagemaker.mdx b/integrations/integrate/amazon-sagemaker.mdx index 47382ce..e4a42ad 100644 --- a/integrations/integrate/amazon-sagemaker.mdx +++ b/integrations/integrate/amazon-sagemaker.mdx @@ -5,7 +5,7 @@ description: Amazon SageMaker is a fully managed machine learning service. Integ keywords: [Amazon SageMaker, machine learning, ML, AI, model training, model deployment, data science, AWS, ML models, predictions] --- -import { CLOUD_LONG, CONSOLE, PG, SERVICE_SHORT } from '/snippets/vars.mdx'; +import { CLOUD_LONG, CONSOLE, PG, SERVICE_SHORT, HYPERTABLE_CAP } from '/snippets/vars.mdx'; import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; import CreateHypertablePolicyNote from '/snippets/manage-data/_create-hypertable-columnstore-policy-note.mdx'; @@ -29,10 +29,10 @@ Create a table in {SERVICE_LONG} to store model predictions generated by SageMak For {CLOUD_LONG}, open an [SQL editor][run-queries] in [{CONSOLE}][console]. For {SELF_LONG}, use [`psql`][psql]. -2. **For better performance and easier real-time analytics, create a hypertable** +2. **For better performance and easier real-time analytics, create a {HYPERTABLE}** - [Hypertables][about-hypertables] are {PG} tables that automatically partition your data by time. You interact - with hypertables in the same way as regular {PG} tables, but with extra features that makes managing your + [{HYPERTABLE_CAP}s][about-hypertables] are {PG} tables that automatically partition your data by time. You interact + with {HYPERTABLE}s in the same way as regular {PG} tables, but with extra features that makes managing your time-series data much easier. ```sql diff --git a/integrations/integrate/apache-kafka.mdx b/integrations/integrate/apache-kafka.mdx index c458cc1..b6018c5 100644 --- a/integrations/integrate/apache-kafka.mdx +++ b/integrations/integrate/apache-kafka.mdx @@ -74,7 +74,7 @@ To prepare your {SERVICE_LONG} for Kafka integration: 1. **[Connect][connect] to your {SERVICE_LONG}** -2. **Create a hypertable to ingest Kafka events** +2. **Create a {HYPERTABLE} to ingest Kafka events** ```sql CREATE TABLE accounts ( diff --git a/snippets/vars.mdx b/snippets/vars.mdx index fe0b6d7..028445b 100644 --- a/snippets/vars.mdx +++ b/snippets/vars.mdx @@ -1,46 +1,57 @@ +// General export const PRODUCT_PREFIX = 'Tiger'; -export const COMPANY = 'Tiger Data '; +export const COMPANY = 'Tiger Data'; export const COMPANY_URL = 'https://www.tigerdata.com'; export const PG = 'Postgres'; -export const products = { CLOUD: 'Tiger Cloud', TSDB: 'TimescaleDB', PG: 'Postgres', TGPG: 'Tiger Postgres', MST: 'MST', SELF: 'self-hosted TimescaleDB' }; -export const TIGER_POSTGRES = 'Tiger Postgres'; -export const SERVICE_LONG = 'Tiger Cloud service'; - +// Pricing export const PRICING_PLAN_CAP = 'Pricing plan'; export const PRICING_PLAN = 'pricing plan'; export const SCALE = 'Scale'; export const PERFORMANCE = 'Performance'; export const ENTERPRISE = 'Enterprise'; +export const FREE = 'Free'; +// Products export const CLOUD_LONG = 'Tiger Cloud'; export const LAKE_LONG = 'Tiger Lake'; export const LAKE_SHORT = 'Tiger Lake'; export const TIMESCALE_DB = 'TimescaleDB'; -export const PRODUCTS_ALL = 'Tiger Data products' -export const PRODUCTS_CL_DB = 'Tiger Cloud and TimescaleDB' +export const PRODUCTS_ALL = 'Tiger Data products'; +export const PRODUCTS_CL_DB = 'Tiger Cloud and TimescaleDB'; export const TDB_APACHE = 'TimescaleDB Apache 2 Edition'; export const TDB_COMMUNITY = 'TimescaleDB Community Edition'; export const SELF_SHORT_CAP = 'Self-hosted'; export const SELF_SHORT = 'self-hosted'; export const SELF_LONG_CAP = 'Self-hosted TimescaleDB'; export const SELF_LONG = 'self-hosted TimescaleDB'; -export const CONSOLE = 'Tiger Console'; -export const CONSOLE_LONG = 'Tiger Console'; +export const CONSOLE = 'Tiger Cloud Console'; +export const CONSOLE_LONG = 'Tiger Cloud Console'; export const CONSOLE_SHORT = 'Console'; +export const CLI_LONG = 'Tiger CLI'; +export const CLI_SHORT = 'CLI'; +export const REST_LONG = 'Tiger REST API'; +export const REST_SHORT = 'REST API'; +export const MCP_LONG = 'Tiger MCP'; +export const MCP_SHORT = 'Tiger MCP'; +export const AGENTS_LONG = 'Tiger Agents for Work'; +export const AGENTS_SHORT = 'Tiger Agent'; +export const AGENTS_CLI = 'Tiger Agent CLI'; +export const EON_SHORT = 'Eon'; +export const EON_LONG = 'Tiger Eon'; export const CONSOLE_URL = 'https://console.cloud.timescale.com/'; export const MST_LONG = 'Managed Service for TimescaleDB'; export const MST_SHORT = 'MST'; -export const MST_CONSOLE_LONG = 'MST Console'; +export const MST_CONSOLE_LONG = 'Managed Service for TimescaleDB Console'; export const MST_CONSOLE_SHORT = 'MST Console'; export const MST_CONSOLE_URL = 'https://portal.managed.timescale.com/'; export const POPSQL = 'PopSQL'; export const SQL_EDITOR = 'SQL editor'; -export const CLOUD_EDITOR = 'Cloud SQL editor'; +export const CLOUD_EDITOR = 'Tiger Cloud SQL editor'; export const SQL_ASSISTANT_LONG = 'Tiger Cloud SQL assistant'; export const SQL_ASSISTANT_SHORT = 'SQL assistant'; export const POPSQL_URL = 'https://popsql.com/'; @@ -48,11 +59,12 @@ export const SKIPSCAN_LONG = 'Tiger Data SkipScan'; export const SKIPSCAN_SHORT = 'SkipScan'; export const TOOLKIT_LONG = 'TimescaleDB Toolkit'; export const TOOLKIT_SHORT = 'Toolkit'; -export const PGAI_LONG = 'pgai on Tiger Cloud'; +export const PGAI_LONG = 'pgai on Tiger Data'; export const PGAI_SHORT = 'pgai'; export const PGVECTORSCALE = 'pgvectorscale'; export const PG_SPOT = 'pgspot'; +// Projects export const PROJECT_LONG = 'Tiger Cloud project'; export const PROJECT_SHORT = 'project'; @@ -60,13 +72,16 @@ export const PROJECT_SHORT_CAP = 'Project'; export const ACCOUNT_LONG = 'Tiger Cloud account'; export const ACCOUNT_SHORT = 'account'; +// Services - +export const TIGER_POSTGRES = 'Tiger Cloud'; +export const SERVICE_LONG = 'Tiger Cloud service'; export const SERVICE_SHORT_CAP = 'Service'; export const SERVICE_SHORT = 'service'; -export const MST_SERVICE_LONG = 'MST service'; +export const MST_SERVICE_LONG = 'Managed Service for TimescaleDB service'; export const MST_SERVICE_SHORT = 'MST service'; +// Features export const HYPERTABLE_CAP = 'Hypertable'; export const HYPERTABLE = 'hypertable'; @@ -104,11 +119,14 @@ export const DATA_MODE_CAP = 'Data mode'; export const DATA_MODE = 'data mode'; export const VPC = 'VPC'; export const IO_BOOST = 'I/O boost'; -export const LIVESYNC = 'livesync'; -export const LIVESYNC_CAP = 'Livesync'; +export const PG_CONNECTOR = 'source Postgres connector'; +export const PG_CONNECTOR_CAP = 'Source Postgres connector'; +export const S3_CONNECTOR = 'source S3 connector'; +export const S3_CONNECTOR_CAP = 'Source S3 connector'; + +// URLS export const WEBSITE_MARKETING = 'www.tigerdata.com'; export const WEBSITE_DOCS = 'docs.tigerdata.com/'; export const CONTACT_SALES = 'sales@tigerdata.com'; export const CONTACT_COMPANY = 'https://www.tigerdata.com/contact/'; - From 0757ff837f5266b92326fa049bc3dcae555cc741 Mon Sep 17 00:00:00 2001 From: atovpeko Date: Tue, 11 Nov 2025 13:17:37 +0200 Subject: [PATCH 06/13] last updates --- integrations/connectors/source/stream-from-kafka.mdx | 3 ++- integrations/connectors/source/sync-from-postgres.mdx | 9 +++++---- integrations/connectors/source/sync-from-s3.mdx | 3 ++- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/integrations/connectors/source/stream-from-kafka.mdx b/integrations/connectors/source/stream-from-kafka.mdx index 303eb25..d7d7673 100644 --- a/integrations/connectors/source/stream-from-kafka.mdx +++ b/integrations/connectors/source/stream-from-kafka.mdx @@ -4,9 +4,10 @@ description: Stream data from Kafka into a Tiger Cloud service in order to store keywords: [Kafka, source connector, streaming, Confluent Cloud, SASL/SCRAM, Avro, Schema Registry, event streaming, real-time ingestion] --- -import { SERVICE_SHORT, CONSOLE, PROJECT_SHORT } from '/snippets/vars.mdx'; + import IntegrationPrereqsCloud from '/snippets/prerequisites/_integration-prereqs-cloud-only.mdx'; import NotSupportedAzure from '/snippets/changes/_not-supported-for-azure.mdx'; +import { SERVICE_SHORT, CONSOLE, PROJECT_SHORT } from '/snippets/vars.mdx'; Early access diff --git a/integrations/connectors/source/sync-from-postgres.mdx b/integrations/connectors/source/sync-from-postgres.mdx index db689dc..430bcba 100644 --- a/integrations/connectors/source/sync-from-postgres.mdx +++ b/integrations/connectors/source/sync-from-postgres.mdx @@ -4,25 +4,26 @@ description: Synchronize updates to your primary Postgres database with the corr keywords: [PostgreSQL, source connector, logical replication, synchronization, real-time replication, data sync, migration, CDC, change data capture] --- -import { HYPERTABLE, CONSOLE } from '/snippets/vars.mdx'; + import LivesyncPrereqsCloud from '/snippets/prerequisites/_livesync-prereqs-cloud.mdx'; import LivesyncPrereqsTerminal from '/snippets/prerequisites/_livesync-prereqs-terminal.mdx'; import LivesyncLimitations from '/snippets/integrations/_livesync-limitations.mdx'; +import { HYPERTABLE, CONSOLE, PG_CONNECTOR, SERVICE_SHORT } from '/snippets/vars.mdx'; Early access -You use the {PG} connector to synchronize all data or specific tables from a {PG} database instance to your +You use the {PG_CONNECTOR} to synchronize all data or specific tables from a {PG} database instance to your {SERVICE_SHORT}, in real time. You run the connector continuously, turning {PG} into a primary database with your {SERVICE_SHORT} as a logical replica. This enables you to leverage real-time analytics capabilities on your replica data. ![Connectors overview](https://assets.timescale.com/docs/images/tiger-on-azure/tiger-console-connector-overview.png) -The {PG} connector leverages the well-established {PG} logical replication protocol. By relying on this protocol, +The {PG_CONNECTOR} leverages the well-established {PG} logical replication protocol. By relying on this protocol, it ensures compatibility, familiarity, and a broader knowledge base—making it easier for you to adopt the connector and integrate your data. -You use the {PG} connector for data synchronization, rather than migration. This includes: +You use the {PG_CONNECTOR} for data synchronization, rather than migration. This includes: * Copy existing data from a {PG} instance: - Copy data at up to 150 GB/hr. diff --git a/integrations/connectors/source/sync-from-s3.mdx b/integrations/connectors/source/sync-from-s3.mdx index 18d7584..52f14ee 100644 --- a/integrations/connectors/source/sync-from-s3.mdx +++ b/integrations/connectors/source/sync-from-s3.mdx @@ -4,9 +4,10 @@ description: Synchronize data from S3 to Tiger Cloud service in real time keywords: [S3, source connector, Amazon S3, CSV, Parquet, file sync, data ingestion, real-time sync, ETL, batch import] --- -import { S3_CONNECTOR, HYPERTABLE, CONSOLE, SERVICE_SHORT, PROJECT_SHORT } from '/snippets/vars.mdx'; + import IntegrationPrereqsCloud from '/snippets/prerequisites/_integration-prereqs-cloud-only.mdx'; import NotSupportedAzure from '/snippets/changes/_not-supported-for-azure.mdx'; +import { S3_CONNECTOR, HYPERTABLE, CONSOLE, SERVICE_SHORT, PROJECT_SHORT } from '/snippets/vars.mdx'; Early access From 4bb232880758190653d35a092507d98e9b1543b8 Mon Sep 17 00:00:00 2001 From: atovpeko Date: Tue, 11 Nov 2025 13:33:01 +0200 Subject: [PATCH 07/13] last updates --- integrations/connectors/destination/tigerlake.mdx | 3 ++- integrations/connectors/source/sync-from-s3.mdx | 4 ++-- integrations/integrate/amazon-sagemaker.mdx | 6 ++++-- integrations/integrate/apache-airflow.mdx | 5 +++-- integrations/integrate/apache-kafka.mdx | 3 ++- integrations/integrate/aws-lambda.mdx | 3 ++- integrations/integrate/azure-data-studio.mdx | 3 ++- integrations/integrate/datadog.mdx | 3 ++- integrations/integrate/dbeaver.mdx | 3 ++- integrations/integrate/debezium.mdx | 3 ++- integrations/integrate/decodable.mdx | 3 ++- integrations/integrate/fivetran.mdx | 3 ++- integrations/integrate/kubernetes.mdx | 5 +++-- integrations/integrate/pgadmin.mdx | 3 ++- integrations/integrate/power-bi.mdx | 3 ++- integrations/integrate/psql.mdx | 3 ++- integrations/integrate/qstudio.mdx | 3 ++- integrations/integrate/supabase.mdx | 3 ++- integrations/integrate/tableau.mdx | 5 +++-- integrations/integrate/telegraf.mdx | 3 ++- integrations/integrate/terraform.mdx | 3 ++- 21 files changed, 47 insertions(+), 26 deletions(-) diff --git a/integrations/connectors/destination/tigerlake.mdx b/integrations/connectors/destination/tigerlake.mdx index d3a21fc..b7b5b2e 100644 --- a/integrations/connectors/destination/tigerlake.mdx +++ b/integrations/connectors/destination/tigerlake.mdx @@ -4,9 +4,10 @@ description: Unifies the Tiger Cloud operational architecture with data lake arc keywords: [Tiger Lake, data lake, destination connector, Iceberg, S3 Tables, Amazon S3, synchronization, hypertables, real-time, ETL] --- -import { LAKE_LONG, LAKE_SHORT, SERVICE_SHORT, HYPERTABLE, HYPERTABLE_CAP, CONSOLE, PG } from '/snippets/vars.mdx'; + import IntegrationPrereqsCloud from '/snippets/prerequisites/_integration-prereqs-cloud-only.mdx'; import NotSupportedAzure from '/snippets/changes/_not-supported-for-azure.mdx'; +import { LAKE_LONG, LAKE_SHORT, SERVICE_SHORT, HYPERTABLE, HYPERTABLE_CAP, CONSOLE, PG } from '/snippets/vars.mdx'; {LAKE_LONG} enables you to build real-time applications alongside efficient data pipeline management within a single system. {LAKE_LONG} unifies the {CLOUD_LONG} operational architecture with data lake architectures. diff --git a/integrations/connectors/source/sync-from-s3.mdx b/integrations/connectors/source/sync-from-s3.mdx index 52f14ee..59d8433 100644 --- a/integrations/connectors/source/sync-from-s3.mdx +++ b/integrations/connectors/source/sync-from-s3.mdx @@ -151,11 +151,11 @@ To sync data from your S3 bucket using {CONSOLE}: 1. To pause the connector, click `Connectors` > `Source connectors`. Open the three-dot menu next to your connector in the table, then click `Pause`. - ![Edit S3 connector](https://assets.timescale.com/docs/images/tiger-on-azure/tiger-console-s3-connector-pause.png) + ![Edit S3 connector](https://assets.timescale.com/docs/images/tiger-on-azure/tiger-console-s3-connector-pause.png) 2. To edit the connector, click `Connectors` > `Source connectors`. Open the three-dot menu next to your connector in the table, then click `Edit` and scroll down to `Modify your Connector`. You must pause the connector before editing it. - ![S3 connector change config](https://assets.timescale.com/docs/images/tiger-cloud-console/tiger-console-s3-connector-edit.png) + ![S3 connector change config](https://assets.timescale.com/docs/images/tiger-cloud-console/tiger-console-s3-connector-edit.png) 3. To pause or delete the connector, click `Connectors` > `Source connectors`, then open the three-dot menu on the right and select an option. You must pause the connector before deleting it. diff --git a/integrations/integrate/amazon-sagemaker.mdx b/integrations/integrate/amazon-sagemaker.mdx index e4a42ad..c6674ed 100644 --- a/integrations/integrate/amazon-sagemaker.mdx +++ b/integrations/integrate/amazon-sagemaker.mdx @@ -1,13 +1,15 @@ --- -title: Integrate Amazon Sagemaker with Tiger +title: Integrate Amazon Sagemaker with Tiger Cloud sidebarTitle: Amazon SageMaker description: Amazon SageMaker is a fully managed machine learning service. Integrate Amazon SageMaker with Tiger Cloud to store and analyze ML model data keywords: [Amazon SageMaker, machine learning, ML, AI, model training, model deployment, data science, AWS, ML models, predictions] --- -import { CLOUD_LONG, CONSOLE, PG, SERVICE_SHORT, HYPERTABLE_CAP } from '/snippets/vars.mdx'; + import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; import CreateHypertablePolicyNote from '/snippets/manage-data/_create-hypertable-columnstore-policy-note.mdx'; +import { CLOUD_LONG, CONSOLE, PG, SERVICE_SHORT, HYPERTABLE_CAP } from '/snippets/vars.mdx'; + [Amazon SageMaker AI][amazon-sagemaker] is a fully managed machine learning (ML) service. With SageMaker AI, data scientists and developers can quickly and confidently build, train, and deploy ML models into a production-ready diff --git a/integrations/integrate/apache-airflow.mdx b/integrations/integrate/apache-airflow.mdx index 79a9c16..83c7bc6 100644 --- a/integrations/integrate/apache-airflow.mdx +++ b/integrations/integrate/apache-airflow.mdx @@ -1,12 +1,13 @@ --- -title: Integrate Apache Airflow with Tiger +title: Integrate Apache Airflow with Tiger Cloud sidebarTitle: Apache Airflow description: Apache Airflow is a platform to programmatically author, schedule, and monitor workflows. Integrate Apache Airflow with Tiger Cloud and create a data pipeline keywords: [Apache Airflow, workflow orchestration, DAG, data pipeline, scheduling, task automation, ETL, Python, workflow management] --- -import { CLOUD_LONG, PG, CONSOLE } from '/snippets/vars.mdx'; + import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; +import { CLOUD_LONG, PG, CONSOLE } from '/snippets/vars.mdx'; Apache Airflow® is a platform created by the community to programmatically author, schedule, and monitor workflows. diff --git a/integrations/integrate/apache-kafka.mdx b/integrations/integrate/apache-kafka.mdx index b6018c5..08b03a8 100644 --- a/integrations/integrate/apache-kafka.mdx +++ b/integrations/integrate/apache-kafka.mdx @@ -5,10 +5,11 @@ description: Apache Kafka is a distributed event streaming platform used for hig keywords: [Apache Kafka, Kafka Connect, event streaming, data pipelines, real-time streaming, distributed systems, message broker, producers, consumers] --- -import { CLOUD_LONG, PG } from '/snippets/vars.mdx'; + import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; import IntegrationApacheKafka from '/snippets/integrations/_integration-apache-kafka-install.mdx'; import CreateHypertablePolicyNote from '/snippets/manage-data/_create-hypertable-columnstore-policy-note.mdx'; +import { CLOUD_LONG, PG } from '/snippets/vars.mdx'; [Apache Kafka][apache-kafka] is a distributed event streaming platform used for high-performance data pipelines, streaming analytics, and data integration. [Apache Kafka Connect][kafka-connect] is a tool to scalably and reliably diff --git a/integrations/integrate/aws-lambda.mdx b/integrations/integrate/aws-lambda.mdx index 44eeb4c..1146389 100644 --- a/integrations/integrate/aws-lambda.mdx +++ b/integrations/integrate/aws-lambda.mdx @@ -5,9 +5,10 @@ description: With AWS Lambda, you can run code without provisioning or managing keywords: [AWS Lambda, serverless, function as a service, FaaS, AWS, event-driven, auto-scaling, cloud functions, serverless computing] --- -import { CLOUD_LONG, CONSOLE, PG, SERVICE_SHORT, HYPERTABLE_CAP } from '/snippets/vars.mdx'; + import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; import CreateHypertablePolicyNote from '/snippets/manage-data/_create-hypertable-columnstore-policy-note.mdx'; +import { CLOUD_LONG, CONSOLE, PG, SERVICE_SHORT, HYPERTABLE_CAP } from '/snippets/vars.mdx'; [AWS Lambda][aws-lambda] is a serverless computing service provided by Amazon Web Services (AWS) that allows you to run code without provisioning or managing servers, scaling automatically as needed. diff --git a/integrations/integrate/azure-data-studio.mdx b/integrations/integrate/azure-data-studio.mdx index 5e75259..3e0fe27 100644 --- a/integrations/integrate/azure-data-studio.mdx +++ b/integrations/integrate/azure-data-studio.mdx @@ -5,8 +5,9 @@ description: Azure Data Studio is an open-source, cross-platform hybrid data ana keywords: [Azure Data Studio, SQL editor, database management, query tool, cross-platform, Microsoft, data analytics, PostgreSQL client] --- -import { PG, CLOUD_LONG } from '/snippets/vars.mdx'; + import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; +import { PG, CLOUD_LONG } from '/snippets/vars.mdx'; [Azure Data Studio][azure-data-studio] is an open-source, cross-platform hybrid data analytics tool designed to simplify the data landscape. diff --git a/integrations/integrate/datadog.mdx b/integrations/integrate/datadog.mdx index cfa2a37..444d212 100644 --- a/integrations/integrate/datadog.mdx +++ b/integrations/integrate/datadog.mdx @@ -5,11 +5,12 @@ description: Datadog is a cloud-based monitoring and analytics platform. Export keywords: [Datadog, monitoring, observability, APM, metrics, logs, traces, dashboards, alerting, infrastructure monitoring] --- -import { PG, SELF_LONG, SCALE, ENTERPRISE, PRICING_PLAN } from '/snippets/vars.mdx'; + import IntegrationPrereqsCloud from '/snippets/prerequisites/_integration-prereqs-cloud-only.mdx'; import DataDogExporter from '/snippets/integrations/_datadog-data-exporter.mdx'; import ManageDataExporter from '/snippets/integrations/_manage-a-data-exporter.mdx'; import NotSupportedAzure from '/snippets/changes/_not-supported-for-azure.mdx'; +import { PG, SELF_LONG, SCALE, ENTERPRISE, PRICING_PLAN } from '/snippets/vars.mdx'; [Datadog][datadog] is a cloud-based monitoring and analytics platform that provides comprehensive visibility into applications, infrastructure, and systems through real-time monitoring, logging, and analytics. diff --git a/integrations/integrate/dbeaver.mdx b/integrations/integrate/dbeaver.mdx index a38cdf4..e65bc01 100644 --- a/integrations/integrate/dbeaver.mdx +++ b/integrations/integrate/dbeaver.mdx @@ -5,8 +5,9 @@ description: DBeaver is a free cross-platform database tool for developers, data keywords: [DBeaver, database tool, SQL editor, database administration, cross-platform, query tool, data migration, PostgreSQL client] --- -import { CLOUD_LONG, SERVICE_SHORT, TIMESCALE_DB } from '/snippets/vars.mdx'; + import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; +import { CLOUD_LONG, SERVICE_SHORT, TIMESCALE_DB } from '/snippets/vars.mdx'; [DBeaver][dbeaver] is a free cross-platform database tool for developers, database administrators, analysts, and everyone working with data. DBeaver provides an SQL editor, administration features, data and schema migration, and the ability to monitor database connection sessions. diff --git a/integrations/integrate/debezium.mdx b/integrations/integrate/debezium.mdx index 65b3f03..48191d3 100644 --- a/integrations/integrate/debezium.mdx +++ b/integrations/integrate/debezium.mdx @@ -5,10 +5,11 @@ description: Integrate Debezium with Tiger Cloud to enable change data capture i keywords: [Debezium, change data capture, CDC, data replication, streaming, real-time sync, database events, Kafka, log-based replication] --- -import { CLOUD_LONG, TIMESCALE_DB, SELF_LONG_CAP, SERVICE_LONG, HYPERTABLE_CAP, HYPERCORE_CAP, COLUMNSTORE, ROWSTORE } from '/snippets/vars.mdx'; + import IntegrationPrereqsSelfOnly from '/snippets/prerequisites/_integration-prereqs-self-only.mdx'; import IntegrationDebeziumDocker from '/snippets/integrations/_integration-debezium-docker.mdx'; import IntegrationDebeziumSelfHostedConfig from '/snippets/integrations/_integration-debezium-self-hosted-config-database.mdx'; +import { CLOUD_LONG, TIMESCALE_DB, SELF_LONG_CAP, SERVICE_LONG, HYPERTABLE_CAP, HYPERCORE_CAP, COLUMNSTORE, ROWSTORE } from '/snippets/vars.mdx'; [Debezium][debezium] is an open-source distributed platform for change data capture (CDC). It enables you to capture changes in a {SELF_LONG} instance and stream them to other systems in real time. diff --git a/integrations/integrate/decodable.mdx b/integrations/integrate/decodable.mdx index 5f43fd1..46c598b 100644 --- a/integrations/integrate/decodable.mdx +++ b/integrations/integrate/decodable.mdx @@ -5,8 +5,9 @@ description: Decodable enables you to build, run, and manage data pipelines effo keywords: [Decodable, data pipelines, real-time processing, stream processing, ETL, data transformation, Apache Flink, event-driven] --- -import { CLOUD_LONG, CONSOLE, PG } from '/snippets/vars.mdx'; + import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; +import { CLOUD_LONG, CONSOLE, PG } from '/snippets/vars.mdx'; [Decodable][decodable] is a real-time data platform that allows you to build, run, and manage data pipelines effortlessly. diff --git a/integrations/integrate/fivetran.mdx b/integrations/integrate/fivetran.mdx index b5a574a..b91baa0 100644 --- a/integrations/integrate/fivetran.mdx +++ b/integrations/integrate/fivetran.mdx @@ -5,8 +5,9 @@ description: Fivetran is a fully managed data pipeline platform that simplifies keywords: [Fivetran, ETL, ELT, data pipeline, data integration, data synchronization, managed service, data ingestion, connectors] --- -import { CLOUD_LONG, CONSOLE, PG, SERVICE_SHORT } from '/snippets/vars.mdx'; + import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; +import { CLOUD_LONG, CONSOLE, PG, SERVICE_SHORT } from '/snippets/vars.mdx'; [Fivetran][fivetran] is a fully managed data pipeline platform that simplifies ETL (Extract, Transform, Load) processes by automatically syncing data from multiple sources to your data warehouse. diff --git a/integrations/integrate/kubernetes.mdx b/integrations/integrate/kubernetes.mdx index cac2673..1632127 100644 --- a/integrations/integrate/kubernetes.mdx +++ b/integrations/integrate/kubernetes.mdx @@ -1,13 +1,14 @@ --- -title: Integrate Kubernetes with Tiger +title: Integrate Kubernetes with Tiger Cloud sidebarTitle: Kubernetes description: Learn how to integrate Kubernetes with Tiger Cloud to enable seamless deployment and scaling of your Postgres workloads keywords: [Kubernetes, K8s, container orchestration, deployment, scaling, cloud native, microservices, containerization, pods] --- -import { CLOUD_LONG, SERVICE_LONG } from '/snippets/vars.mdx'; + import KubernetesPrereqs from '/snippets/prerequisites/_kubernetes-prereqs.mdx'; import KubernetesInstallSelf from '/snippets/procedures/_kubernetes-install-self-hosted.mdx'; +import { CLOUD_LONG, SERVICE_LONG } from '/snippets/vars.mdx'; [Kubernetes][kubernetes] is an open-source container orchestration system that automates the deployment, scaling, and management of containerized applications. You can connect Kubernetes to {CLOUD_LONG}, and deploy {TIMESCALE_DB} within your Kubernetes clusters. diff --git a/integrations/integrate/pgadmin.mdx b/integrations/integrate/pgadmin.mdx index b88ee7b..ea77448 100644 --- a/integrations/integrate/pgadmin.mdx +++ b/integrations/integrate/pgadmin.mdx @@ -5,8 +5,9 @@ description: pgAdmin is a feature-rich open-source administration and developmen keywords: [pgAdmin, PostgreSQL, database administration, open-source, query tool, database management, GUI, SQL editor] --- -import { PG, CLOUD_LONG, SERVICE_SHORT } from '/snippets/vars.mdx'; + import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; +import { PG, CLOUD_LONG, SERVICE_SHORT } from '/snippets/vars.mdx'; [pgAdmin][pgadmin] is a feature-rich open-source administration and development platform for {PG}. It is available for Chrome, Firefox, Edge, and Safari browsers, or can be installed on Microsoft Windows, Apple macOS, or various Linux flavors. diff --git a/integrations/integrate/power-bi.mdx b/integrations/integrate/power-bi.mdx index e71cc47..4940498 100644 --- a/integrations/integrate/power-bi.mdx +++ b/integrations/integrate/power-bi.mdx @@ -5,8 +5,9 @@ description: Integrate Power BI with Tiger Cloud keywords: [Power BI, business intelligence, BI, data visualization, dashboards, reports, analytics, Microsoft, data analysis] --- -import { CLOUD_LONG, PG } from '/snippets/vars.mdx'; + import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs-cloud-only.mdx'; +import { CLOUD_LONG, PG } from '/snippets/vars.mdx'; [Power BI][power-bi] is a business analytics tool for visualizing data, creating interactive reports, and sharing insights across an organization. diff --git a/integrations/integrate/psql.mdx b/integrations/integrate/psql.mdx index f867fd6..7223a7c 100644 --- a/integrations/integrate/psql.mdx +++ b/integrations/integrate/psql.mdx @@ -5,8 +5,9 @@ description: psql enables you to type in queries interactively, issue them to Po keywords: [psql, command line, terminal, PostgreSQL client, CLI, interactive shell, SQL queries, database connection] --- -import { PG, SERVICE_SHORT, COMPANY } from '/snippets/vars.mdx'; + import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; +import { PG, SERVICE_SHORT, COMPANY } from '/snippets/vars.mdx'; [`psql`][psql-docs] is a terminal-based frontend to {PG} that enables you to type in queries interactively, issue them to Postgres, and see the query results. diff --git a/integrations/integrate/qstudio.mdx b/integrations/integrate/qstudio.mdx index a450387..70e074a 100644 --- a/integrations/integrate/qstudio.mdx +++ b/integrations/integrate/qstudio.mdx @@ -5,8 +5,9 @@ description: qStudio is a modern free SQL editor that provides syntax highlighti keywords: [qStudio, SQL editor, query tool, syntax highlighting, code completion, charting, data visualization, database client] --- -import { CLOUD_LONG } from '/snippets/vars.mdx'; + import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; +import { CLOUD_LONG } from '/snippets/vars.mdx'; [qStudio][qstudio] is a modern free SQL editor that provides syntax highlighting, code-completion, excel export, charting, and much more. You can use it to run queries, browse tables, and create charts for your {SERVICE_LONG}. diff --git a/integrations/integrate/supabase.mdx b/integrations/integrate/supabase.mdx index f09c383..de7a096 100644 --- a/integrations/integrate/supabase.mdx +++ b/integrations/integrate/supabase.mdx @@ -5,9 +5,10 @@ description: Supabase is an open source Firebase alternative. Integrate Supabase keywords: [Supabase, Firebase alternative, backend as a service, BaaS, PostgreSQL, real-time, authentication, open-source, database management] --- -import { PG, CLOUD_LONG, HYPERTABLE_CAP, HYPERCORE_CAP, HYPERCORE, CAGG_CAP } from '/snippets/vars.mdx'; + import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; import CreateHypertablePolicyNote from '/snippets/manage-data/_create-hypertable-columnstore-policy-note.mdx'; +import { PG, CLOUD_LONG, HYPERTABLE_CAP, HYPERCORE_CAP, HYPERCORE, CAGG_CAP } from '/snippets/vars.mdx'; [Supabase][supabase] is an open source Firebase alternative. This page shows how to run real-time analytical queries against a {SERVICE_LONG} through Supabase using a foreign data wrapper (fdw) to bring aggregated data from your diff --git a/integrations/integrate/tableau.mdx b/integrations/integrate/tableau.mdx index de62e71..c999e37 100644 --- a/integrations/integrate/tableau.mdx +++ b/integrations/integrate/tableau.mdx @@ -1,12 +1,13 @@ --- -title: Integrate Tableau and Tiger Cloud +title: Integrate Tableau with Tiger Cloud sidebarTitle: Tableau description: Integrate Tableau with Tiger Cloud keywords: [Tableau, business intelligence, BI, data visualization, dashboards, analytics, data exploration, interactive reports] --- -import { CLOUD_LONG, SERVICE_SHORT, PG } from '/snippets/vars.mdx'; + import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs-cloud-only.mdx'; +import { CLOUD_LONG, SERVICE_SHORT, PG } from '/snippets/vars.mdx'; [Tableau][tableau] is a popular analytics platform that helps you gain greater intelligence about your business. You can use it to visualize data stored in {CLOUD_LONG}. diff --git a/integrations/integrate/telegraf.mdx b/integrations/integrate/telegraf.mdx index 4ce295d..ae97b17 100644 --- a/integrations/integrate/telegraf.mdx +++ b/integrations/integrate/telegraf.mdx @@ -5,9 +5,10 @@ description: Ingest data into a Tiger Cloud service using using the Telegraf plu keywords: [Telegraf, data ingestion, metrics collection, InfluxData, plugins, IoT, systems monitoring, agent, time-series data] --- -import { PG, HYPERTABLE, TIMESCALE_DB } from '/snippets/vars.mdx'; + import ImportPrerequisites from '/snippets/prerequisites/_migrate-import-prerequisites.mdx'; import SetupConnectionString from '/snippets/procedures/_migrate-import-setup-connection-strings.mdx'; +import { PG, HYPERTABLE, TIMESCALE_DB } from '/snippets/vars.mdx'; Telegraf is a server-based agent that collects and sends metrics and events from databases, systems, and IoT sensors. Telegraf is an open source, plugin-driven tool for the collection diff --git a/integrations/integrate/terraform.mdx b/integrations/integrate/terraform.mdx index 4c61ad7..70f7792 100644 --- a/integrations/integrate/terraform.mdx +++ b/integrations/integrate/terraform.mdx @@ -5,8 +5,9 @@ description: Manage your Tiger Cloud services with a Terraform provider keywords: [Terraform, infrastructure as code, IaC, provisioning, automation, deployment, configuration management, HashiCorp] --- -import { CLOUD_LONG, COMPANY, CONSOLE, VPC, PG } from '/snippets/vars.mdx'; + import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; +import { CLOUD_LONG, COMPANY, CONSOLE, VPC, PG } from '/snippets/vars.mdx'; [Terraform][terraform] is an infrastructure-as-code tool that enables you to safely and predictably provision and manage infrastructure. From a9f40e4e41eaeffd495a9d280a1e14c299b7f1c5 Mon Sep 17 00:00:00 2001 From: atovpeko Date: Wed, 12 Nov 2025 13:36:33 +0200 Subject: [PATCH 08/13] redirects --- docs.json | 150 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 150 insertions(+) diff --git a/docs.json b/docs.json index 7d3b3c0..7175cdd 100644 --- a/docs.json +++ b/docs.json @@ -546,6 +546,156 @@ } ] }, + "redirects": [ + { + "source": "/tutorials/latest/real-time-analytics-energy-consumption/:slug*", + "destination": "/tutorials/time-series/analyze-energy-consumption" + }, + { + "source": "/tutorials/latest/blockchain-analyze/:slug*", + "destination": "/tutorials/time-series/analyze-blockchain" + }, + { + "source": "/tutorials/latest/blockchain-query/:slug*", + "destination": "/tutorials/time-series/query-blockchain" + }, + { + "source": "/tutorials/latest/simulate-iot-sensor-data/:slug*", + "destination": "/tutorials/time-series/simulate-iot-sensor-data" + }, + { + "source": "/tutorials/latest/real-time-analytics-transport/:slug*", + "destination": "/tutorials/time-series/analyze-transport-geospatial-data" + }, + { + "source": "/tutorials/latest/financial-tick-data/:slug*", + "destination": "/tutorials/time-series/analyze-financial-tick-data" + }, + { + "source": "/tutorials/latest/financial-ingest-real-time/:slug*", + "destination": "/tutorials/time-series/ingest-real-time-financial-data" + }, + { + "source": "/tutorials/latest/:slug*", + "destination": "/tutorials/tutorials" + }, + { + "source": "/integrations/latest/amazon-sagemaker/:slug*", + "destination": "/integrations/integrate/amazon-sagemaker" + }, + { + "source": "/integrations/latest/apache-airflow/:slug*", + "destination": "/integrations/integrate/apache-airflow" + }, + { + "source": "/integrations/latest/apache-kafka/:slug*", + "destination": "/integrations/integrate/apache-kafka" + }, + { + "source": "/integrations/latest/aws/:slug*", + "destination": "/integrations/integrate/aws" + }, + { + "source": "/integrations/latest/aws-lambda/:slug*", + "destination": "/integrations/integrate/aws-lambda" + }, + { + "source": "/integrations/latest/azure-data-studio/:slug*", + "destination": "/integrations/integrate/azure-data-studio" + }, + { + "source": "/integrations/latest/cloudwatch/:slug*", + "destination": "/integrations/integrate/cloudwatch" + }, + { + "source": "/integrations/latest/corporate-data-center/:slug*", + "destination": "/integrations/integrate/corporate-data-center" + }, + { + "source": "/integrations/latest/datadog/:slug*", + "destination": "/integrations/integrate/datadog" + }, + { + "source": "/integrations/latest/dbeaver/:slug*", + "destination": "/integrations/integrate/dbeaver" + }, + { + "source": "/integrations/latest/debezium/:slug*", + "destination": "/integrations/integrate/debezium" + }, + { + "source": "/integrations/latest/decodable/:slug*", + "destination": "/integrations/integrate/decodable" + }, + { + "source": "/integrations/latest/fivetran/:slug*", + "destination": "/integrations/integrate/fivetran" + }, + { + "source": "/integrations/latest/google-cloud/:slug*", + "destination": "/integrations/integrate/google-cloud" + }, + { + "source": "/integrations/latest/grafana/:slug*", + "destination": "/integrations/integrate/grafana" + }, + { + "source": "/integrations/latest/kubernetes/:slug*", + "destination": "/integrations/integrate/kubernetes" + }, + { + "source": "/integrations/latest/microsoft-azure/:slug*", + "destination": "/integrations/integrate/microsoft-azure" + }, + { + "source": "/integrations/latest/pgadmin/:slug*", + "destination": "/integrations/integrate/pgadmin" + }, + { + "source": "/integrations/latest/postgresql/:slug*", + "destination": "/integrations/integrate/postgresql" + }, + { + "source": "/integrations/latest/power-bi/:slug*", + "destination": "/integrations/integrate/power-bi" + }, + { + "source": "/integrations/latest/prometheus/:slug*", + "destination": "/integrations/integrate/prometheus" + }, + { + "source": "/integrations/latest/psql/:slug*", + "destination": "/integrations/integrate/psql" + }, + { + "source": "/integrations/latest/qstudio/:slug*", + "destination": "/integrations/integrate/qstudio" + }, + { + "source": "/integrations/latest/supabase/:slug*", + "destination": "/integrations/integrate/supabase" + }, + { + "source": "/integrations/latest/tableau/:slug*", + "destination": "/integrations/integrate/tableau" + }, + { + "source": "/integrations/latest/telegraf/:slug*", + "destination": "/integrations/integrate/telegraf" + }, + { + "source": "/integrations/latest/terraform/:slug*", + "destination": "/integrations/integrate/terraform" + }, + { + "source": "/integrations/latest/find-connection-details/:slug*", + "destination": "/integrations/find-connection-details" + }, + { + "source": "/integrations/latest/:slug*", + "destination": "/integrations/integrations" + } + ], "contextual": { "options": ["claude", "chatgpt"] }, From 1e852c3b3bbe99cee431b2556ff5b3fb516a6b6a Mon Sep 17 00:00:00 2001 From: billy-the-fish Date: Fri, 14 Nov 2025 17:44:22 +0100 Subject: [PATCH 09/13] chore: review source connectors. --- cloud/analyse-your-data-using-pgai.mdx | 4 +- .../get-started-with-tiger-cloud.mdx | 2 +- .../get-started/run-queries-from-console.mdx | 12 +- .../pricing-and-account-management.mdx | 6 +- docs.json | 4 + .../code/start-coding-with-tigerdata.mdx | 4 +- .../connectors/destination/tigerlake.mdx | 35 +- .../connectors/source/stream-from-kafka.mdx | 21 +- .../connectors/source/sync-from-postgres.mdx | 523 +----------------- .../connectors/source/sync-from-s3.mdx | 42 +- integrations/find-connection-details.mdx | 8 +- integrations/integrate/amazon-sagemaker.mdx | 4 +- integrations/integrate/apache-airflow.mdx | 4 +- integrations/integrate/apache-kafka.mdx | 8 +- integrations/integrate/aws-lambda.mdx | 4 +- integrations/integrate/aws.mdx | 2 +- integrations/integrate/azure-data-studio.mdx | 2 +- .../integrate/corporate-data-center.mdx | 2 +- integrations/integrate/datadog.mdx | 4 +- integrations/integrate/dbeaver.mdx | 6 +- integrations/integrate/decodable.mdx | 4 +- integrations/integrate/fivetran.mdx | 8 +- integrations/integrate/google-cloud.mdx | 2 +- integrations/integrate/kubernetes.mdx | 2 +- integrations/integrate/microsoft-azure.mdx | 2 +- integrations/integrate/pgadmin.mdx | 2 +- integrations/integrate/power-bi.mdx | 4 +- integrations/integrate/psql.mdx | 2 +- integrations/integrate/qstudio.mdx | 2 +- integrations/integrate/supabase.mdx | 2 +- integrations/integrate/tableau.mdx | 2 +- integrations/integrate/telegraf.mdx | 2 +- integrations/integrate/terraform.mdx | 2 +- integrations/integrations.mdx | 2 +- .../hypertables/hypertable-crud.mdx | 2 +- .../get-started-with-timescaledb.mdx | 2 +- .../understand/timescaledb-architecture.mdx | 4 +- .../get-started-with-timescaledb.mdx | 2 +- .../understand/timescaledb-architecture.mdx | 4 +- snippets/cloud/_cloud-create-service.mdx | 4 +- snippets/cloud/_cloud-installation.mdx | 2 +- snippets/cloud/_mst-create-service.mdx | 6 +- snippets/coding/_start-coding-golang.mdx | 2 +- snippets/coding/_start-coding-java.mdx | 4 +- snippets/coding/_start-coding-node.mdx | 2 +- snippets/coding/_start-coding-ruby.mdx | 8 +- .../_configure-source-database-awsrds.mdx | 85 +++ .../_configure-source-database-postgres.mdx | 73 +++ snippets/integrations/_enable-replication.mdx | 25 + snippets/integrations/_grafana-connect.mdx | 2 +- .../integrations/_livesync-console-pg.mdx | 113 ++++ .../integrations/_livesync-limitations.mdx | 2 +- .../integrations/_livesync-terminal-pg.mdx | 313 +++++++++++ .../integrations/_manage-a-data-exporter.mdx | 6 +- .../integrations/_prometheus-integrate.mdx | 2 +- .../_setup-connection-strings-livesync.mdx | 15 + ..._tune-source-database-awsrds-migration.mdx | 37 ++ .../code/_start-coding-golang.mdx | 20 +- .../integrations/code/_start-coding-java.mdx | 28 +- .../integrations/code/_start-coding-node.mdx | 14 +- .../code/_start-coding-python.mdx | 32 +- .../integrations/code/_start-coding-ruby.mdx | 20 +- .../prerequisites/_livesync-prereqs-cloud.mdx | 14 - .../_livesync-prereqs-terminal.mdx | 30 - .../prerequisites/_migrate-prerequisites.mdx | 19 + .../prerequisites/_prereqs-cloud-only.mdx | 7 +- snippets/vars.mdx | 4 +- styles.css | 11 + 68 files changed, 931 insertions(+), 718 deletions(-) create mode 100644 snippets/integrations/_configure-source-database-awsrds.mdx create mode 100644 snippets/integrations/_configure-source-database-postgres.mdx create mode 100644 snippets/integrations/_enable-replication.mdx create mode 100644 snippets/integrations/_livesync-console-pg.mdx create mode 100644 snippets/integrations/_livesync-terminal-pg.mdx create mode 100644 snippets/integrations/_setup-connection-strings-livesync.mdx create mode 100644 snippets/integrations/_tune-source-database-awsrds-migration.mdx delete mode 100644 snippets/prerequisites/_livesync-prereqs-cloud.mdx delete mode 100644 snippets/prerequisites/_livesync-prereqs-terminal.mdx create mode 100644 snippets/prerequisites/_migrate-prerequisites.mdx diff --git a/cloud/analyse-your-data-using-pgai.mdx b/cloud/analyse-your-data-using-pgai.mdx index d47f340..66227b8 100644 --- a/cloud/analyse-your-data-using-pgai.mdx +++ b/cloud/analyse-your-data-using-pgai.mdx @@ -21,7 +21,7 @@ To start using {CLOUD_LONG} for your data: -## Create a {SERVICE_LONG} +## Create a Tiger Cloud service Now that you have an active {ACCOUNT_LONG}, you create and manage your {SERVICE_SHORT}s in {CONSOLE}. When you create a {SERVICE_SHORT}, you effectively create a blank {PG} database with additional {CLOUD_LONG} features available under your {PRICING_PLAN}. You then add or migrate your data into this database. @@ -45,7 +45,7 @@ shows you how to connect. -## Connect to your {SERVICE_SHORT} +## Connect to your service To run queries and perform other operations, connect to your {SERVICE_SHORT}: diff --git a/cloud/tiger/get-started/get-started-with-tiger-cloud.mdx b/cloud/tiger/get-started/get-started-with-tiger-cloud.mdx index c2d0ee4..be353f0 100644 --- a/cloud/tiger/get-started/get-started-with-tiger-cloud.mdx +++ b/cloud/tiger/get-started/get-started-with-tiger-cloud.mdx @@ -37,7 +37,7 @@ ingest and query data faster while keeping the costs low. -## Optimize time-series data in {HYPERTABLE}s with {HYPERCORE} +## Optimize time-series data in hypertables with hypercore Time-series data represents the way a system, process, or behavior changes over time. {HYPERTABLE}_CAPs are {PG} tables that help you improve insert and query performance by automatically partitioning your data by time. Each {HYPERTABLE} diff --git a/cloud/tiger/get-started/run-queries-from-console.mdx b/cloud/tiger/get-started/run-queries-from-console.mdx index 7c43ff6..4dfe079 100644 --- a/cloud/tiger/get-started/run-queries-from-console.mdx +++ b/cloud/tiger/get-started/run-queries-from-console.mdx @@ -40,7 +40,7 @@ Available features are: - **Cross-platform support**: work from [{CONSOLE}][portal-data-mode] or download the [desktop app][popsql-desktop] for macOS, Windows, and Linux. - **Easy connection**: connect to {CLOUD_LONG}, {PG}, Redshift, Snowflake, BigQuery, MySQL, SQL Server, [and more][popsql-connections]. -### Connect to your {SERVICE_LONG} in the data mode +### Connect to your Tiger Cloud service in the data mode @@ -89,11 +89,11 @@ If your {SERVICE_LONG} runs inside a {VPC}, do one of the following to enable ac - Use an SSH tunnel: when you configure the connection in {POPSQL}, under `Advanced Options` enable `Connect over SSH`. - Add {POPSQL}'s static IPs (`23.20.131.72, 54.211.234.135`) to your allowlist. -#### What happens if another member of my {PROJECT_LONG} uses the {DATA_MODE}? +#### What happens if another member of my Tiger Cloud project uses the data mode? The number of {DATA_MODE} seats you are allocated depends on your [{PRICING_PLAN}][pricing-plan-features]. -#### Will using the {DATA_MODE} affect the performance of my {SERVICE_LONG}? +#### Will using the data mode affect the performance of my Tiger Cloud service? There are a few factors to consider: @@ -111,7 +111,7 @@ If you'd like to prevent write operations such as insert or update, instead of using the `tsdbadmin` user, create a read-only user for your {SERVICE_SHORT} and use that in the {DATA_MODE}. -## {SQL_ASSISTANT_SHORT} +## SQL assistant {SQL_ASSISTANT_SHORT} in [{CONSOLE}][portal-data-mode] is a chat-like interface that harnesses the power of AI to help you write, fix, and organize SQL faster and more accurately. Ask {SQL_ASSISTANT_SHORT} to change existing queries, write new ones from scratch, debug error messages, optimize for query performance, add comments, improve readability—and really, get answers to any questions you can think of. @@ -192,7 +192,7 @@ manage {SQL_ASSISTANT_SHORT} settings under [`User name` > `Settings` > `SQL Ass * **Sample data**: to give the LLM more context so you have better SQL suggestions, enable sample data sharing in the {SQL_ASSISTANT_SHORT} preferences. * **Telemetry**: to improve {SQL_ASSISTANT_SHORT}, {COMPANY} collects telemetry and usage data, including prompts, responses, and query metadata. -## {OPS_MODE_CAP} {SQL_EDITOR} +## Ops mode SQL editor {SQL_EDITOR} is an integrated secure UI that you use to run queries and see the results for a {SERVICE_LONG}. @@ -220,7 +220,7 @@ To use {SQL_EDITOR}: -## Cloud {SQL_EDITOR} licenses +## Cloud SQL editor licenses * **{SQL_EDITOR} in the {OPS_MODE}**: free for anyone with a [{ACCOUNT_LONG}][create-cloud-account]. * **Data mode**: the number of seats you are allocated depends on your [{PRICING_PLAN}][pricing-plan-features]. diff --git a/cloud/tiger/understand/pricing-and-account-management.mdx b/cloud/tiger/understand/pricing-and-account-management.mdx index 1a45c25..86326d0 100644 --- a/cloud/tiger/understand/pricing-and-account-management.mdx +++ b/cloud/tiger/understand/pricing-and-account-management.mdx @@ -57,7 +57,7 @@ from initial development through to mission-critical enterprise applications. or egress. There are no per-query fees, nor additional costs to read or write data. It's all completely transparent, easily understood, and up to you. -### {CLOUD_LONG} free trial for the different price plans +### Tiger Cloud free trial for the different price plans We offer new users a free, 30-day trial period of our {PERFORMANCE} plan with no credit card required. During your trial, you can contact {CONTACT_SALES} to request information about, and access @@ -84,7 +84,7 @@ resource usage and dashboards with performance insights. This allows you to clos {CONSOLE}_SHORT also shows your month-to-date accrued charges, as well as a forecast of your expected month-end bill. Your previous invoices are also available as PDFs for download. -### {COMPANY} support +### Tiger Data support {COMPANY} runs a global support organization with Customer Satisfaction (CSAT) scores above 99%. Support covers all timezones, and is fully staffed at weekend hours. @@ -199,7 +199,7 @@ Your monthly price for compute and storage is computed similarly. For example, o Some add-ons such as Elastic storage, Tiered storage, and Connection pooling may incur additional charges. These charges are clearly marked in your billing snapshot in {CONSOLE}. -## Manage your {CLOUD_LONG} {PRICING_PLAN} +## Manage your Tiger Cloud pricing plan You handle all details about your {CLOUD_LONG} project including updates to your {PRICING_PLAN}, payment methods, and add-ons in the [billing section in {CONSOLE}][cloud-billing]: diff --git a/docs.json b/docs.json index 7175cdd..52df6bd 100644 --- a/docs.json +++ b/docs.json @@ -7,6 +7,9 @@ "light": "#6447FB", "dark": "#000" }, + "styling": { + "codeblocks": "dark" + }, "seo": { "metatags": { "robots": "noindex" @@ -696,6 +699,7 @@ "destination": "/integrations/integrations" } ], + "customCss": ["styles.css"], "contextual": { "options": ["claude", "chatgpt"] }, diff --git a/integrations/code/start-coding-with-tigerdata.mdx b/integrations/code/start-coding-with-tigerdata.mdx index 613ed90..1169b87 100644 --- a/integrations/code/start-coding-with-tigerdata.mdx +++ b/integrations/code/start-coding-with-tigerdata.mdx @@ -48,4 +48,6 @@ service, create and manage hypertables, then ingest and query data.
You are not limited to these languages. Tiger Cloud is based on {PG}, you can interface -with TimescaleDB and Tiger Cloud using any [{PG} client driver](https://wiki.postgresql.org/wiki/List_of_drivers). \ No newline at end of file +with TimescaleDB and Tiger Cloud using any [{PG} client driver][pg-drivers]. + +[pg-drivers]: https://wiki.postgresql.org/wiki/List_of_drivers \ No newline at end of file diff --git a/integrations/connectors/destination/tigerlake.mdx b/integrations/connectors/destination/tigerlake.mdx index b7b5b2e..a0d276f 100644 --- a/integrations/connectors/destination/tigerlake.mdx +++ b/integrations/connectors/destination/tigerlake.mdx @@ -15,7 +15,7 @@ system. {LAKE_LONG} unifies the {CLOUD_LONG} operational architecture with data ![Tiger Lake architecture](https://assets.timescale.com/docs/images/tiger-cloud-console/tiger-lake-integration-tiger.svg) {LAKE_LONG} is a native integration enabling synchronization between {HYPERTABLE}s and relational tables -running in {SERVICE_LONG}s to Iceberg tables running in [Amazon S3 Tables](https://aws.amazon.com/s3/features/tables/) in your AWS account. +running in {SERVICE_LONG}s to Iceberg tables running in [Amazon S3 Tables][s3-tables] in your AWS account. Tiger Lake is currently in private beta. Please contact us to request access. @@ -27,7 +27,7 @@ Tiger Lake is currently in private beta. Please contact us to request access. -## Integrate a data lake with your {SERVICE_LONG} +## Integrate a data lake with your Tiger Cloud service To connect a {SERVICE_LONG} to your data lake: @@ -36,7 +36,7 @@ To connect a {SERVICE_LONG} to your data lake: 1. **Set the AWS region to host your table bucket** - 1. In [AWS CloudFormation](https://console.aws.amazon.com/cloudformation/), select the current AWS region at the top-right of the page. + 1. In [AWS CloudFormation][aws-cloudformation], select the current AWS region at the top-right of the page. 1. Set it to the Region you want to create your table bucket in. **This must match the region your {SERVICE_LONG} is running in**: if the regions do not match AWS charges you for @@ -62,7 +62,7 @@ To connect a {SERVICE_LONG} to your data lake: 3. **Connect your {SERVICE_SHORT} to the data lake** - 1. In [{CONSOLE}](https://console.cloud.timescale.com/dashboard/services), select the {SERVICE_SHORT} you want to integrate with AWS S3 Tables, then click + 1. In [{CONSOLE}][console-services], select the {SERVICE_SHORT} you want to integrate with AWS S3 Tables, then click `Connectors`. 1. Select the Apache Iceberg connector and supply the: @@ -102,7 +102,7 @@ To connect a {SERVICE_LONG} to your data lake: 2. **Connect your {SERVICE_SHORT} to the data lake** - 1. In [{CONSOLE}](https://console.cloud.timescale.com/dashboard/services), select the {SERVICE_SHORT} you want to integrate with AWS S3 Tables, then click + 1. In [{CONSOLE}][console-services], select the {SERVICE_SHORT} you want to integrate with AWS S3 Tables, then click `Connectors`. 1. Select the Apache Iceberg connector and supply the: @@ -118,7 +118,7 @@ To connect a {SERVICE_LONG} to your data lake: 1. **Create a S3 Bucket** 1. Set the AWS region to host your table bucket - 1. In [Amazon S3 console](https://console.aws.amazon.com/s3/), select the current AWS region at the top-right of the page. + 1. In [Amazon S3 console][aws-s3-console], select the current AWS region at the top-right of the page. 2. Set it to the Region your you want to create your table bucket in. **This must match the region your {SERVICE_LONG} is running in**: if the regions do not match AWS charges you for @@ -128,7 +128,7 @@ To connect a {SERVICE_LONG} to your data lake: 1. Copy the `Amazon Resource Name (ARN)` for your table bucket. 2. **Create an ARN role** - 1. In [IAM Dashboard](https://console.aws.amazon.com/iamv2/home), click `Roles` then click `Create role` + 1. In [IAM Dashboard][aws-iam-dashboard], click `Roles` then click `Create role` 1. In `Select trusted entity`, click `Custom trust policy`, replace the **Custom trust policy** code block with the following: @@ -191,7 +191,7 @@ To connect a {SERVICE_LONG} to your data lake: 3. **Connect your {SERVICE_SHORT} to the data lake** - 1. In [{CONSOLE}](https://console.cloud.timescale.com/dashboard/services), select the {SERVICE_SHORT} you want to integrate with AWS S3 Tables, then click + 1. In [{CONSOLE}][console-services], select the {SERVICE_SHORT} you want to integrate with AWS S3 Tables, then click `Connectors`. 1. Select the Apache Iceberg connector and supply the: @@ -204,7 +204,7 @@ To connect a {SERVICE_LONG} to your data lake: -## Stream data from your {SERVICE_LONG} to your data lake +## Stream data from your Tiger Cloud service to your data lake When you start streaming, all data in the table is synchronized to Iceberg. Records are imported in time order, from oldest to youngest. The write throughput is approximately 40.000 records / second. For larger tables, a full import can @@ -245,9 +245,9 @@ By default, the partition interval for an Iceberg table is one day(time-column) | `day` | Extract a date or timestamp day, as days from epoch. | `date`, `timestamp`, `timestamptz` | | `month` | Extract a date or timestamp day, as days from epoch. | `date`, `timestamp`, `timestamptz` | | `year` | Extract a date or timestamp day, as days from epoch. | `date`, `timestamp`, `timestamptz` | -| `truncate[W]` | Value truncated to width W, see [options](https://iceberg.apache.org/spec/#truncate-transform-details) | +| `truncate[W]` | Value truncated to width W, see [options][iceberg-truncate] | -These partitions define the behavior using the [Iceberg partition specification](https://iceberg.apache.org/spec/#partition-transforms). +These partitions define the behavior using the [Iceberg partition specification][iceberg-partitions]. ### Sample code @@ -339,10 +339,19 @@ data lake: * Service requires {PG} 17.6 and above is supported. * Consistent ingestion rates of over 30000 records / second can lead to a lost replication slot. Burst can be feathered out over time. -* [Amazon S3 Tables Iceberg REST](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-tables-integrating-open-source.html) catalog only is supported. +* [Amazon S3 Tables Iceberg REST][s3-iceberg-rest] catalog only is supported. * In order to collect deletes made to data in the columstore, certain columnstore optimizations are disabled for {HYPERTABLE}s. * Direct Compress is not supported. * The `TRUNCATE` statement is not supported, and does not truncate data in the corresponding Iceberg table. * Data in a {HYPERTABLE} that has been moved to the low-cost object storage tier is not synced. * Writing to the same S3 table bucket from multiple services is not supported, bucket-to-service mapping is one-to-one. -* Iceberg snapshots are pruned automatically if the amount exceeds 2500. \ No newline at end of file +* Iceberg snapshots are pruned automatically if the amount exceeds 2500. + +[s3-tables]: https://aws.amazon.com/s3/features/tables/ +[aws-cloudformation]: https://console.aws.amazon.com/cloudformation/ +[console-services]: https://console.cloud.timescale.com/dashboard/services +[aws-s3-console]: https://console.aws.amazon.com/s3/ +[aws-iam-dashboard]: https://console.aws.amazon.com/iamv2/home +[iceberg-truncate]: https://iceberg.apache.org/spec/#truncate-transform-details +[iceberg-partitions]: https://iceberg.apache.org/spec/#partition-transforms +[s3-iceberg-rest]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-tables-integrating-open-source.html \ No newline at end of file diff --git a/integrations/connectors/source/stream-from-kafka.mdx b/integrations/connectors/source/stream-from-kafka.mdx index d7d7673..303d360 100644 --- a/integrations/connectors/source/stream-from-kafka.mdx +++ b/integrations/connectors/source/stream-from-kafka.mdx @@ -9,22 +9,18 @@ import IntegrationPrereqsCloud from '/snippets/prerequisites/_integration-prereq import NotSupportedAzure from '/snippets/changes/_not-supported-for-azure.mdx'; import { SERVICE_SHORT, CONSOLE, PROJECT_SHORT } from '/snippets/vars.mdx'; - Early access - You use the Kafka source connector to stream events from Kafka into your {SERVICE_SHORT}. The connector connects to your Confluent Cloud Kafka cluster and Schema Registry using SASL/SCRAM authentication and service account–based API keys. Only the Avro format is currently supported [with some limitations](#known-limitations-and-unsupported-types). This page explains how to connect to your Confluent Cloud Kafka cluster. - -The Kafka source connector is not yet supported for production use. - + Early access: the Kafka source connector is not yet supported for production use. ## Prerequisites -- [Sign up](https://www.confluent.io/get-started/) for Confluent Cloud. -- [Create](https://docs.confluent.io/cloud/current/clusters/create-cluster.html) a Kafka cluster in Confluent Cloud. +- [Sign up][confluent-signup] for Confluent Cloud. +- [Create][confluent-create-cluster] a Kafka cluster in Confluent Cloud. @@ -36,7 +32,7 @@ Take the following steps to prepare your Kafka cluster for connection: If you already have a service account, you can reuse it. To create a new service account: - 1. Log in to [Confluent Cloud](https://confluent.cloud/). + 1. Log in to [Confluent Cloud][confluent-cloud]. 2. Click the burger menu at the top-right of the pane, then press `Access control` > `Service accounts` >`Add service account`. 3. Enter the following details: @@ -115,7 +111,7 @@ Your Confluent Cloud Schema Registry is now accessible using the API key and sec Take the following steps to create a Kafka source connector in {CONSOLE}. -1. **In [{CONSOLE}](https://console.cloud.timescale.com/dashboard/services), select your {SERVICE_SHORT}** +1. **In [{CONSOLE}][console-services], select your {SERVICE_SHORT}** 2. **Go to `Connectors` > `Source connectors`. Click `New Connector`, then select `Kafka`** 3. **Click the pencil icon, then set the connector name** 4. **Set up Kafka authentication** @@ -222,4 +218,9 @@ Unsupported examples: "type": "bytes", "logicalType": "custom-type" } -``` \ No newline at end of file +``` + +[confluent-signup]: https://www.confluent.io/get-started/ +[confluent-create-cluster]: https://docs.confluent.io/cloud/current/clusters/create-cluster.html +[confluent-cloud]: https://confluent.cloud/ +[console-services]: https://console.cloud.timescale.com/dashboard/services \ No newline at end of file diff --git a/integrations/connectors/source/sync-from-postgres.mdx b/integrations/connectors/source/sync-from-postgres.mdx index 430bcba..caee2c6 100644 --- a/integrations/connectors/source/sync-from-postgres.mdx +++ b/integrations/connectors/source/sync-from-postgres.mdx @@ -4,13 +4,10 @@ description: Synchronize updates to your primary Postgres database with the corr keywords: [PostgreSQL, source connector, logical replication, synchronization, real-time replication, data sync, migration, CDC, change data capture] --- +import LivesyncConsole from '/snippets/integrations/_livesync-console-pg.mdx'; +import LivesyncTerminal from '/snippets/integrations/_livesync-terminal-pg.mdx'; +import { SERVICE_SHORT, PG, PG_CONNECTOR } from '/snippets/vars.mdx'; -import LivesyncPrereqsCloud from '/snippets/prerequisites/_livesync-prereqs-cloud.mdx'; -import LivesyncPrereqsTerminal from '/snippets/prerequisites/_livesync-prereqs-terminal.mdx'; -import LivesyncLimitations from '/snippets/integrations/_livesync-limitations.mdx'; -import { HYPERTABLE, CONSOLE, PG_CONNECTOR, SERVICE_SHORT } from '/snippets/vars.mdx'; - - Early access You use the {PG_CONNECTOR} to synchronize all data or specific tables from a {PG} database instance to your {SERVICE_SHORT}, in real time. You run the connector continuously, turning {PG} into a primary database with your @@ -42,527 +39,31 @@ You use the {PG_CONNECTOR} for data synchronization, rather than migration. This {PG} exposes `COPY` progress under `pg_stat_progress_copy`. * Synchronize real-time changes from a {PG} instance. -* Add and remove tables on demand using the [{PG} PUBLICATION interface](https://www.postgresql.org/docs/current/sql-createpublication.html). +* Add and remove tables on demand using the [{PG} PUBLICATION interface][pg-publication]. * Enable features such as hypertables, columnstore, and continuous aggregates on your logical replica. - -This source Postgres connector is not yet supported for production use. If you have any questions or feedback, talk to us in [#livesync in the Tiger Community](https://app.slack.com/client/T4GT3N2JK/C086NU9EZ88). - - - - - - -## Prerequisites - - - -## Limitations - -* The source {PG} instance must be accessible from the Internet. - - Services hosted behind a firewall or VPC are not supported. This functionality is on the roadmap. + Early access: this source {PG} connector is not yet supported for production use. If you have +any questions or feedback, talk to us in [#livesync in the Tiger Community][tiger-community-livesync]. -* Indexes, including the primary key and unique constraints, are not migrated to the target. - We recommend that, depending on your query patterns, you create only the necessary indexes on the target. - -## Set your connection string - -This variable holds the connection information for the source database. In the terminal on your migration machine, -set the following: - -```bash -export SOURCE="postgres://:@:/" -``` - - -Avoid using connection strings that route through connection poolers like PgBouncer or similar tools. This tool -requires a direct connection to the database to function properly. - - -## Tune your source database - - -1. **Set the `rds.logical_replication` parameter to `1`** - - In the AWS console, navigate to your RDS instance parameter group and set `rds.logical_replication` to `1`. This enables logical replication on the RDS instance. - - After changing this parameter, restart your RDS instance for the changes to take effect. - -2. **Create a user for the connector and assign permissions** - - 1. Create ``: - - ```sql - psql $SOURCE -c "CREATE USER PASSWORD ''" - ``` - - You can use an existing user. However, you must ensure that the user has the following permissions. - - 2. Grant permissions to create a replication slot: - - ```sql - psql $SOURCE -c "ALTER ROLE REPLICATION" - ``` - - 3. Grant permissions to create a publication: - - ```sql - psql $SOURCE -c "GRANT CREATE ON DATABASE TO " - ``` - - 4. Assign the user permissions on the source database: - - ```sql - psql $SOURCE <; - GRANT SELECT ON ALL TABLES IN SCHEMA "public" TO ; - ALTER DEFAULT PRIVILEGES IN SCHEMA "public" GRANT SELECT ON TABLES TO ; - EOF - ``` - - If the tables you are syncing are not in the `public` schema, grant the user permissions for each schema you are syncing: - ```sql - psql $SOURCE < TO ; - GRANT SELECT ON ALL TABLES IN SCHEMA TO ; - ALTER DEFAULT PRIVILEGES IN SCHEMA GRANT SELECT ON TABLES TO ; - EOF - ``` - - 5. On each table you want to sync, make `` the owner: - - ```sql - psql $SOURCE -c 'ALTER TABLE
OWNER TO ;' - ``` - You can skip this step if the replicating user is already the owner of the tables. - -3. **Enable replication `DELETE` and `UPDATE` operations** - - For the connector to replicate `DELETE` and `UPDATE` operations, enable `REPLICA IDENTITY` on each table: + - ```sql - psql $SOURCE -c 'ALTER TABLE
REPLICA IDENTITY FULL;' - ``` + - - -1. **Tune the Write Ahead Log (WAL) on the {PG} source database** - - ```sql - psql $SOURCE <`: - - ```sql - psql $SOURCE -c "CREATE USER PASSWORD ''" - ``` - - You can use an existing user. However, you must ensure that the user has the following permissions. - - 2. Grant permissions to create a replication slot: - - ```sql - psql $SOURCE -c "ALTER ROLE REPLICATION" - ``` - - 3. Grant permissions to create a publication: - - ```sql - psql $SOURCE -c "GRANT CREATE ON DATABASE TO " - ``` - - 4. Assign the user permissions on the source database: - - ```sql - psql $SOURCE <; - GRANT SELECT ON ALL TABLES IN SCHEMA "public" TO ; - ALTER DEFAULT PRIVILEGES IN SCHEMA "public" GRANT SELECT ON TABLES TO ; - EOF - ``` - - If the tables you are syncing are not in the `public` schema, grant the user permissions for each schema you are syncing: - ```sql - psql $SOURCE < TO ; - GRANT SELECT ON ALL TABLES IN SCHEMA TO ; - ALTER DEFAULT PRIVILEGES IN SCHEMA GRANT SELECT ON TABLES TO ; - EOF - ``` - - 5. On each table you want to sync, make `` the owner: - - ```sql - psql $SOURCE -c 'ALTER TABLE
OWNER TO ;' - ``` - You can skip this step if the replicating user is already the owner of the tables. - -3. **Enable replication `DELETE` and `UPDATE` operations** - - For the connector to replicate `DELETE` and `UPDATE` operations, enable `REPLICA IDENTITY` on each table: + - ```sql - psql $SOURCE -c 'ALTER TABLE
REPLICA IDENTITY FULL;' - ``` + -## Synchronize data - -To sync data from your {PG} database using {CONSOLE}: - -1. **Connect to your {SERVICE_SHORT}** - - In [{CONSOLE}](https://console.cloud.timescale.com/dashboard/services), select the {SERVICE_SHORT} to sync live data to. - -2. **Connect the source database and the target {SERVICE_SHORT}** - - ![Postgres connector wizard](https://assets.timescale.com/docs/images/tiger-on-azure/pg-connector-wizard-tiger-console.png) - - 1. Click `Connectors` > `PostgreSQL`. - 2. Set the name for the new connector by clicking the pencil icon. - 3. Check the boxes for `Set wal_level to logical` and `Update your credentials`, then click `Continue`. - 4. Enter your database credentials or a {PG} connection string, then click `Connect to database`. - This is the connection string for ``. The console connects to the source database and retrieves the schema information. - -3. **Optimize the data to synchronize in hypertables** - - ![Postgres connector start](https://assets.timescale.com/docs/images/tiger-on-azure/pg-connector-start-tiger-console.png) - - 1. In the `Select table` dropdown, select the tables to sync. - 2. Click `Select tables +`. - - The console checks the table schema and, if possible, suggests the column to use as the time dimension in a {HYPERTABLE}. - 3. Click `Create Connector`. - - The console starts the connector between the source database and the target {SERVICE_SHORT} and displays the progress. - -4. **Monitor synchronization** - - ![Connectors overview](https://assets.timescale.com/docs/images/tiger-on-azure/tiger-console-connector-overview.png) - - 1. To view the amount of data replicated, click `Connectors`. The diagram in `Connector data flow` gives you an overview of the connectors you have created, their status, and how much data has been replicated. - - 2. To review the syncing progress for each table, click `Connectors` > `Source connectors`, then select the name of your connector in the table. - -5. **Manage the connector** - - ![Edit a Postgres connector](https://assets.timescale.com/docs/images/tiger-on-azure/edit-pg-connector-tiger-console.png) - - 1. To edit the connector, click `Connectors` > `Source connectors`, then select the name of your connector in the table. You can rename the connector, delete or add new tables for syncing. - - 2. To pause a connector, click `Connectors` > `Source connectors`, then open the three-dot menu on the right and select `Pause`. - - 3. To delete a connector, click `Connectors` > `Source connectors`, then open the three-dot menu on the right and select `Delete`. You must pause the connector before deleting it. - -And that is it, you are using the connector to synchronize all the data, or specific tables, from a {PG} database -instance in real time. - - - - - -## Prerequisites - - - -## Limitations - -- The schema is not migrated by the connector, you use `pg_dump`/`pg_restore` to migrate it. - - - -## Set your connection strings - -The `` in the `SOURCE` connection must have the replication role granted in order to create a replication slot. - -These variables hold the connection information for the source database and target. In Terminal on your migration machine, set the following: - -```bash -export SOURCE="postgres://:@:/" -export TARGET="postgres://tsdbadmin:@:/tsdb?sslmode=require" -``` - -You find the connection information in the configuration file you -downloaded when you created the service. - - -Avoid using connection strings that route through connection poolers like PgBouncer or similar tools. This tool requires a direct connection to the database to function properly. - - -## Tune your source database - -Follow the same tuning steps from the Console tab above. - -## Migrate the table schema - -Use `pg_dump` to: - -1. **Download the schema from the source database** - - ```bash - pg_dump $SOURCE \ - --no-privileges \ - --no-owner \ - --no-publications \ - --no-subscriptions \ - --no-table-access-method \ - --no-tablespaces \ - --schema-only \ - --file=schema.sql - ``` - -2. **Apply the schema on the target {SERVICE_SHORT}** - ```bash - psql $TARGET -f schema.sql - ``` - -## Convert partitions and tables with time-series data into hypertables - -For efficient querying and analysis, you can convert tables which contain time-series or -events data, and tables that are already partitioned using {PG} declarative partition into -hypertables. - -1. **Convert tables to hypertables** - - Run the following on each table in the target to convert it to a hypertable: - - ```bash - psql -X -d $TARGET -c "SELECT public.create_hypertable('
', by_range('', ''::interval));" - ``` - - For example, to convert the *metrics* table into a hypertable with *time* as a partition column and - *1 day* as a partition interval: - - ```bash - psql -X -d $TARGET -c "SELECT public.create_hypertable('public.metrics', by_range('time', '1 day'::interval));" - ``` - -2. **Convert {PG} partitions to hypertables** - - Rename the partition and create a new regular table with the same name as the partitioned table, then - convert to a hypertable: - - ```bash - psql $TARGET -f - <<'EOF' - BEGIN; - ALTER TABLE public.events RENAME TO events_part; - CREATE TABLE public.events(LIKE public.events_part INCLUDING ALL); - SELECT create_hypertable('public.events', by_range('time', '1 day'::interval)); - COMMIT; -EOF - ``` - -## Specify the tables to synchronize - -After the schema is migrated, you [`CREATE PUBLICATION`](https://www.postgresql.org/docs/current/sql-createpublication.html) on the source database that -specifies the tables to synchronize. - -1. **Create a publication that specifies the table to synchronize** - - A `PUBLICATION` enables you to synchronize some or all the tables in the schema or database. - - ```sql - CREATE PUBLICATION FOR TABLE , ; - ``` - - To add tables after to an existing publication, use [ALTER PUBLICATION](https://www.postgresql.org/docs/current/sql-alterpublication.html) - - ```sql - ALTER PUBLICATION ADD TABLE ; - ``` - -2. **Publish the {PG} declarative partitioned table** - - ```sql - ALTER PUBLICATION SET(publish_via_partition_root=true); - ``` - -3. **Stop syncing a table in the `PUBLICATION`, use `DROP TABLE`** - - ```sql - ALTER PUBLICATION DROP TABLE ; - ``` - -## Synchronize data - -You use the connector docker image to synchronize changes in real time from a {PG} database -instance: - -1. **Start the connector** - - As you run the connector continuously, best practice is to run it as a Docker daemon. - - ```bash - docker run -d --rm --name livesync timescale/live-sync:v0.1.25 run \ - --publication --subscription \ - --source $SOURCE --target $TARGET --table-map - ``` - - `--publication`: The name of the publication as you created in the previous step. To use multiple publications, repeat the `--publication` flag. - - `--subscription`: The name that identifies the subscription on the target. - - `--source`: The connection string to the source {PG} database. - - `--target`: The connection string to the target. - - `--table-map`: (Optional) A JSON string that maps source tables to target tables. If not provided, the source and target table names are assumed to be the same. - For example, to map the source table `metrics` to the target table `metrics_data`: - - ``` - --table-map '{"source": {"schema": "public", "table": "metrics"}, "target": {"schema": "public", "table": "metrics_data"}}' - ``` - To map only the schema, use: - - ``` - --table-map '{"source": {"schema": "public"}, "target": {"schema": "analytics"}}' - ``` - This flag can be repeated for multiple table mappings. - -2. **Capture logs** - - Once the connector is running as a docker daemon, you can also capture the logs: - ```bash - docker logs -f livesync - ``` - -3. **View the progress of tables being synchronized** - - List the tables being synchronized by the connector using the `_ts_live_sync.subscription_rel` table in the target: - - ```bash - psql $TARGET -c "SELECT * FROM _ts_live_sync.subscription_rel" - ``` - - The `state` column indicates the current state of the table synchronization. - Possible values for `state` are: - - | state | description | - |-------|-------------| - | d | initial table data sync | - | f | initial table data sync completed | - | s | catching up with the latest changes | - | r | table is ready, syncing live changes | - - To see the replication lag, run the following against the SOURCE database: - - ```bash - psql $SOURCE -f - <<'EOF' - SELECT - slot_name, - pg_size_pretty(pg_current_wal_flush_lsn() - confirmed_flush_lsn) AS lag - FROM pg_replication_slots - WHERE slot_name LIKE 'live_sync_%' AND slot_type = 'logical' -EOF - ``` - -4. **Add or remove tables from the publication** - - To add tables, use [ALTER PUBLICATION .. ADD TABLE](https://www.postgresql.org/docs/current/sql-alterpublication.html) - - ```sql - ALTER PUBLICATION ADD TABLE ; - ``` - - To remove tables, use [ALTER PUBLICATION .. DROP TABLE](https://www.postgresql.org/docs/current/sql-alterpublication.html) - - ```sql - ALTER PUBLICATION DROP TABLE ; - ``` - -5. **Update table statistics** - - If you have a large table, you can run `ANALYZE` on the target - to update the table statistics after the initial sync is complete. - - This helps the query planner make better decisions for query execution plans. - - ```bash - vacuumdb --analyze --verbose --dbname=$TARGET - ``` - -6. **Stop the connector** - - ```bash - docker stop live-sync - ``` - -7. **(Optional) Reset sequence nextval on the target** - - The connector does not automatically reset the sequence nextval on the target. - - Run the following script to reset the sequence for all tables that have a - serial or identity column in the target: - - ```bash - psql $TARGET -f - <<'EOF' - DO $$ - DECLARE - rec RECORD; - BEGIN - FOR rec IN ( - SELECT - sr.target_schema AS table_schema, - sr.target_table AS table_name, - col.column_name, - pg_get_serial_sequence( - sr.target_schema || '.' || sr.target_table, - col.column_name - ) AS seqname - FROM _ts_live_sync.subscription_rel AS sr - JOIN information_schema.columns AS col - ON col.table_schema = sr.target_schema - AND col.table_name = sr.target_table - WHERE col.column_default LIKE 'nextval(%' -- only serial/identity columns - ) LOOP - EXECUTE format( - 'SELECT setval(%L, - COALESCE((SELECT MAX(%I) FROM %I.%I), 0) + 1, - false - );', - rec.seqname, -- the sequence identifier - rec.column_name, -- the column to MAX() - rec.table_schema, -- schema for MAX() - rec.table_name -- table for MAX() - ); - END LOOP; - END; - $$ LANGUAGE plpgsql; -EOF - ``` - -8. **Clean up** - - Use the `--drop` flag to remove the replication slots created by the connector on the source database. - - ```bash - docker run -it --rm --name livesync timescale/live-sync:v0.1.25 run \ - --publication --subscription \ - --source $SOURCE --target $TARGET \ - --drop - ``` - - - - \ No newline at end of file +[pg-publication]: https://www.postgresql.org/docs/current/sql-createpublication.html +[tiger-community-livesync]: https://app.slack.com/client/T4GT3N2JK/C086NU9EZ88 diff --git a/integrations/connectors/source/sync-from-s3.mdx b/integrations/connectors/source/sync-from-s3.mdx index 59d8433..fea62d2 100644 --- a/integrations/connectors/source/sync-from-s3.mdx +++ b/integrations/connectors/source/sync-from-s3.mdx @@ -9,7 +9,6 @@ import IntegrationPrereqsCloud from '/snippets/prerequisites/_integration-prereq import NotSupportedAzure from '/snippets/changes/_not-supported-for-azure.mdx'; import { S3_CONNECTOR, HYPERTABLE, CONSOLE, SERVICE_SHORT, PROJECT_SHORT } from '/snippets/vars.mdx'; - Early access You use the {S3_CONNECTOR} to synchronize CSV and Parquet files from an S3 bucket in real time. The connector runs continuously, enabling you to leverage real-time analytics capabilities with data constantly synced from S3. This lets you take full advantage of real-time analytics capabilities without having to develop or manage custom ETL solutions between S3 and your database. @@ -20,12 +19,12 @@ You can use the {S3_CONNECTOR} to synchronize your existing and new data. Here's * Sync data from an S3 bucket: - Use glob patterns to identify the objects to sync. - Watch an S3 bucket for new files and import them automatically. It runs on a configurable schedule and tracks processed files. - - **Important**: The connector processes files in [lexicographical order](https://en.wikipedia.org/wiki/Lexicographic_order). It uses the name of the last file processed as a marker and fetches only files later in the alphabet in subsequent queries. Files added with names earlier in the alphabet than the marker are skipped and never synced. For example, if you add the file Bob when the marker is at Elephant, Bob is never processed. + - **Important**: The connector processes files in [lexicographical order][lexicographical-order]. It uses the name of the last file processed as a marker and fetches only files later in the alphabet in subsequent queries. Files added with names earlier in the alphabet than the marker are skipped and never synced. For example, if you add the file Bob when the marker is at Elephant, Bob is never processed. - For large backlogs, check every minute until caught up. * Sync data from multiple file formats: - - CSV: check for compression in GZ and ZIP format, then process using [timescaledb-parallel-copy](https://github.com/timescale/timescaledb-parallel-copy). - - Parquet: convert to CSV, then process using [timescaledb-parallel-copy](https://github.com/timescale/timescaledb-parallel-copy). + - CSV: check for compression in GZ and ZIP format, then process using [timescaledb-parallel-copy][timescaledb-parallel-copy]. + - Parquet: convert to CSV, then process using [timescaledb-parallel-copy][timescaledb-parallel-copy]. * The {S3_CONNECTOR} offers an option to enable a {HYPERTABLE} during the file-to-table schema mapping setup. You can enable [columnstore](/use-timescale/compression/about-compression) and [continuous aggregates](/use-timescale/continuous-aggregates/about-continuous-aggregates) through the SQL editor once the connector has started running. @@ -37,9 +36,9 @@ The {S3_CONNECTOR} continuously imports data from an Amazon S3 bucket into your The connector currently only syncs existing and new files—it does not support updating or deleting records based on updates and deletes from S3 to tables. - -This source S3 connector is not supported for production use. If you have any questions or feedback, talk to us in [#livesync in the Tiger Community](https://app.slack.com/client/T4GT3N2JK/C086NU9EZ88). - + + Early access: this source S3 connector is not yet supported for production use. If you have +any questions or feedback, talk to us in [#livesync in the Tiger Community][tiger-community-livesync]. ## Prerequisites @@ -50,7 +49,7 @@ This source S3 connector is not supported for production use. If you have any qu Directory buckets are not supported. - Configure access credentials for the S3 bucket. The following credentials are supported: - - [IAM Role](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user.html#roles-creatingrole-user-console). + - [IAM Role][aws-iam-role]. - Configure the trust policy. Set the: @@ -58,13 +57,13 @@ This source S3 connector is not supported for production use. If you have any qu - `ExternalID`: set to the [{PROJECT_SHORT} and {SERVICE_SHORT} ID](/integrations/find-connection-details#find-your-project-and-service-id) of the {SERVICE_SHORT} you are syncing to in the format `/`. - This is to avoid the [confused deputy problem](https://docs.aws.amazon.com/IAM/latest/UserGuide/confused-deputy.html). + This is to avoid the [confused deputy problem][aws-confused-deputy]. - Give the following access permissions: - `s3:GetObject`. - `s3:ListBucket`. - - [Public anonymous user](https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html#example-bucket-policies-anonymous-user). + - [Public anonymous user][aws-public-anonymous]. @@ -93,13 +92,13 @@ This source S3 connector is not supported for production use. If you have any qu To prevent system overload, the connector tracks up to 100 files for each sync iteration. Additional checks only fill empty queue slots. -## Synchronize data +## Synchronize data to your service To sync data from your S3 bucket using {CONSOLE}: 1. **Connect to your {SERVICE_SHORT}** - In [{CONSOLE}](https://console.cloud.timescale.com/dashboard/services), select the {SERVICE_SHORT} to sync live data to. + In [{CONSOLE}][console-services], select the {SERVICE_SHORT} to sync live data to. 2. **Connect the source S3 bucket to the target {SERVICE_SHORT}** @@ -109,7 +108,7 @@ To sync data from your S3 bucket using {CONSOLE}: 2. Click the pencil icon, then set the name for the new connector. 3. Set the `Bucket name` and `Authentication method`, then click `Continue`. - For instruction on creating the IAM role to connect your S3 bucket, click `Learn how`. The console connects to the source bucket. + For instruction on creating the IAM role to connect your S3 bucket, click `Learn how`. {CONSOLE} connects to the source bucket. 4. In `Define files to sync`, choose the `File type` and set the `Glob pattern`. Use the following patterns: @@ -126,15 +125,15 @@ To sync data from your S3 bucket using {CONSOLE}: ![S3 connector table selection](https://assets.timescale.com/docs/images/tiger-cloud-console/tiger-console-s3-connector-create-tables.png) - The console checks the file schema and, if possible, suggests the column to use as the time dimension in a + {CONSOLE} checks the file schema and, if possible, suggests the column to use as the time dimension in a {HYPERTABLE}. 1. Choose `Create a new table for your data` or `Ingest data to an existing table`. 2. Choose the `Data type` for each column, then click `Continue`. - 3. Choose the interval. This can be a minute, an hour, or use a [cron expression](https://en.wikipedia.org/wiki/Cron#Cron_expression). + 3. Choose the interval. This can be a minute, an hour, or use a [cron expression][cron-expression]. 4. Click `Start Connector`. - The console starts the connection between the source database and the target {SERVICE_SHORT} and displays the progress. + {CONSOLE} starts the connection between the source database and the target {SERVICE_SHORT} and displays the progress. 4. **Monitor synchronization** @@ -159,4 +158,13 @@ To sync data from your S3 bucket using {CONSOLE}: 3. To pause or delete the connector, click `Connectors` > `Source connectors`, then open the three-dot menu on the right and select an option. You must pause the connector before deleting it. -And that is it, you are using the {S3_CONNECTOR} to synchronize all the data, or specific files, from an S3 bucket in real time. \ No newline at end of file +And that is it, you are using the {S3_CONNECTOR} to synchronize all the data, or specific files, from an S3 bucket in real time. + +[lexicographical-order]: https://en.wikipedia.org/wiki/Lexicographic_order +[timescaledb-parallel-copy]: https://github.com/timescale/timescaledb-parallel-copy +[tiger-community-livesync]: https://app.slack.com/client/T4GT3N2JK/C086NU9EZ88 +[aws-iam-role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user.html#roles-creatingrole-user-console +[aws-confused-deputy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/confused-deputy.html +[aws-public-anonymous]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html#example-bucket-policies-anonymous-user +[console-services]: https://console.cloud.timescale.com/dashboard/services +[cron-expression]: https://en.wikipedia.org/wiki/Cron#Cron_expression \ No newline at end of file diff --git a/integrations/find-connection-details.mdx b/integrations/find-connection-details.mdx index 8b62a10..1de8673 100644 --- a/integrations/find-connection-details.mdx +++ b/integrations/find-connection-details.mdx @@ -20,7 +20,7 @@ Find the connection details based on your deployment type: -## Connect to your {SERVICE_SHORT} +## Connect to your service Retrieve the connection details for your {SERVICE_LONG}: @@ -34,7 +34,7 @@ Retrieve the connection details for your {SERVICE_LONG}: ![{SERVICE_LONG} connection details](https://assets.timescale.com/docs/images/tiger-cloud-console/tiger-service-connection-details.png) -## Find your {PROJECT_SHORT} and {SERVICE_SHORT} ID +## Find your project and service ID To retrieve the connection details for your {PROJECT_LONG} and {SERVICE_LONG}: @@ -46,7 +46,7 @@ To retrieve the connection details for your {PROJECT_LONG} and {SERVICE_LONG}: 2. **Retrieve your {SERVICE_SHORT} ID**: Click the dots next to the {SERVICE_SHORT}, then click `Copy` next to the {SERVICE_SHORT} ID. - ![Retrive the {SERVICE_SHORT} id in {CONSOLE}](https://assets.timescale.com/docs/images/tiger-on-azure/tiger-console-service-id.png) + ![Retrieve the {SERVICE_SHORT} id in {CONSOLE}](https://assets.timescale.com/docs/images/tiger-on-azure/tiger-console-service-id.png) ## Create client credentials @@ -57,7 +57,7 @@ such as Terraform or the [{CLOUD_LONG} REST API][api-reference]: 1. **Open the settings for your {PROJECT_SHORT}**: - In [{CONSOLE}][console-services], click your {PROJECT_SHORT} name in the upper left corner, then click `{PROJECT_SHORT_CAP} settings`. + In [{CONSOLE}][console-services], click your {PROJECT_SHORT} name in the upper left corner, then click {PROJECT_SHORT_CAP} settings. 2. **Create client credentials**: diff --git a/integrations/integrate/amazon-sagemaker.mdx b/integrations/integrate/amazon-sagemaker.mdx index c6674ed..58b9c1d 100644 --- a/integrations/integrate/amazon-sagemaker.mdx +++ b/integrations/integrate/amazon-sagemaker.mdx @@ -23,7 +23,7 @@ This page shows you how to integrate Amazon Sagemaker with a {SERVICE_LONG}. * Set up an [AWS Account][aws-sign-up] -## Prepare your {SERVICE_LONG} to ingest data from SageMaker +## Prepare your Tiger Cloud service to ingest data from SageMaker Create a table in {SERVICE_LONG} to store model predictions generated by SageMaker. @@ -48,7 +48,7 @@ Create a table in {SERVICE_LONG} to store model predictions generated by SageMak ``` -## Create the code to inject data into a {SERVICE_LONG} +## Create the code to inject data into a Tiger Cloud service 1. **Create a SageMaker Notebook instance** diff --git a/integrations/integrate/apache-airflow.mdx b/integrations/integrate/apache-airflow.mdx index 83c7bc6..2b730b7 100644 --- a/integrations/integrate/apache-airflow.mdx +++ b/integrations/integrate/apache-airflow.mdx @@ -44,7 +44,7 @@ To install the Python libraries required to connect to {CLOUD_LONG}: pip install apache-airflow-providers-postgres ``` -## Create a connection between Airflow and your {SERVICE_LONG} +## Create a connection between Airflow and your Tiger Cloud service In your Airflow instance, securely connect to your {SERVICE_LONG}: @@ -65,7 +65,7 @@ In your Airflow instance, securely connect to your {SERVICE_LONG}: 2. Click `+` (Add a new record), then use your [connection info][connection-info] to fill in the form. The `Connection Type` is `Postgres`. -## Exchange data between Airflow and your {SERVICE_LONG} +## Exchange data between Airflow and your Tiger Cloud service To exchange data between Airflow and your {SERVICE_LONG}: diff --git a/integrations/integrate/apache-kafka.mdx b/integrations/integrate/apache-kafka.mdx index 08b03a8..0059cda 100644 --- a/integrations/integrate/apache-kafka.mdx +++ b/integrations/integrate/apache-kafka.mdx @@ -33,7 +33,7 @@ To install and configure Apache Kafka: Keep these terminals open, you use them to test the integration later. -## Install the sink connector to communicate with {CLOUD_LONG} +## Install the sink connector to communicate with Tiger Cloud To set up Kafka Connect server, plugins, drivers, and connectors: @@ -69,7 +69,7 @@ To set up Kafka Connect server, plugins, drivers, and connectors: {"version":"3.9.0","commit":"a60e31147e6b01ee","kafka_cluster_id":"J-iy4IGXTbmiALHwPZEZ-A"} ``` -## Create a table in your {SERVICE_LONG} to ingest Kafka events +## Create a table in your Tiger Cloud service to ingest Kafka events To prepare your {SERVICE_LONG} for Kafka integration: @@ -88,7 +88,7 @@ To prepare your {SERVICE_LONG} for Kafka integration: ``` -## Create the {CLOUD_LONG} sink +## Create the Tiger Cloud sink To create a {CLOUD_LONG} sink in Apache Kafka: @@ -133,7 +133,7 @@ To create a {CLOUD_LONG} sink in Apache Kafka: #["timescale-standalone-sink"] ``` -## Test the integration with {CLOUD_LONG} +## Test the integration with Tiger Cloud To test this integration, send some messages onto the `accounts` topic. You can do this using the kafkacat or kcat utility. diff --git a/integrations/integrate/aws-lambda.mdx b/integrations/integrate/aws-lambda.mdx index 1146389..31aadf8 100644 --- a/integrations/integrate/aws-lambda.mdx +++ b/integrations/integrate/aws-lambda.mdx @@ -24,7 +24,7 @@ This page shows you how to integrate AWS Lambda with {SERVICE_LONG} to process a * Install [NodeJS v18.x or later][install-nodejs]. -## Prepare your {SERVICE_LONG} to ingest data from AWS Lambda +## Prepare your Tiger Cloud service to ingest data from AWS Lambda Create a table in {SERVICE_LONG} to store time-series data. @@ -49,7 +49,7 @@ Create a table in {SERVICE_LONG} to store time-series data. ``` -## Create the code to inject data into a {SERVICE_LONG} +## Create the code to inject data into a Tiger Cloud service Write an AWS Lambda function in a Node.js project that processes and inserts time-series data into a {SERVICE_LONG}. diff --git a/integrations/integrate/aws.mdx b/integrations/integrate/aws.mdx index 783b5ba..47ac684 100644 --- a/integrations/integrate/aws.mdx +++ b/integrations/integrate/aws.mdx @@ -21,7 +21,7 @@ This page explains how to integrate your AWS infrastructure with {CLOUD_LONG} us -## Connect your AWS infrastructure to your {SERVICE_LONG}s +## Connect your AWS infrastructure to your Tiger Cloud services To connect to {CLOUD_LONG}: diff --git a/integrations/integrate/azure-data-studio.mdx b/integrations/integrate/azure-data-studio.mdx index 3e0fe27..03d8f94 100644 --- a/integrations/integrate/azure-data-studio.mdx +++ b/integrations/integrate/azure-data-studio.mdx @@ -20,7 +20,7 @@ This page explains how to integrate Azure Data Studio with {CLOUD_LONG}. * Download and install [Azure Data Studio][ms-azure-data-studio]. * Install the [{PG} extension for Azure Data Studio][postgresql-azure-data-studio]. -## Connect to your {SERVICE_LONG} with Azure Data Studio +## Connect to your Tiger Cloud service with Azure Data Studio To connect to {CLOUD_LONG}: diff --git a/integrations/integrate/corporate-data-center.mdx b/integrations/integrate/corporate-data-center.mdx index c568bb9..ad46b4a 100644 --- a/integrations/integrate/corporate-data-center.mdx +++ b/integrations/integrate/corporate-data-center.mdx @@ -19,7 +19,7 @@ This page explains how to integrate your corporate on-premise infrastructure wit -## Connect your on-premise infrastructure to your {SERVICE_LONG}s +## Connect your on-premise infrastructure to your Tiger Cloud services To connect to {CLOUD_LONG}: diff --git a/integrations/integrate/datadog.mdx b/integrations/integrate/datadog.mdx index 444d212..1fc449d 100644 --- a/integrations/integrate/datadog.mdx +++ b/integrations/integrate/datadog.mdx @@ -38,7 +38,7 @@ This page explains how to: -## Monitor {SERVICE_LONG} metrics with Datadog +## Monitor Tiger Cloud service metrics with Datadog Export telemetry data from your {SERVICE_LONG}s with the time-series and analytics capability enabled to Datadog using a {CLOUD_LONG} data exporter. The available metrics include CPU usage, RAM usage, and storage. @@ -56,7 +56,7 @@ This section shows you how to attach, monitor, edit, and delete a data exporter. -## Configure Datadog Agent to collect metrics for your {SERVICE_LONG}s +## Configure Datadog Agent to collect metrics for your Tiger Cloud services Datadog Agent includes a [{PG} integration][datadog-postgres] that you use to collect detailed {PG} database metrics about your {SERVICE_LONG}s. diff --git a/integrations/integrate/dbeaver.mdx b/integrations/integrate/dbeaver.mdx index e65bc01..08f063c 100644 --- a/integrations/integrate/dbeaver.mdx +++ b/integrations/integrate/dbeaver.mdx @@ -19,14 +19,14 @@ This page explains how to integrate DBeaver with your {SERVICE_LONG}. * Download and install [DBeaver][dbeaver-downloads]. -## Connect DBeaver to your {SERVICE_LONG} +## Connect DBeaver to your Tiger Cloud service To connect to {CLOUD_LONG}: 1. **Start `DBeaver`** 1. **In the toolbar, click the plug+ icon** -1. **In `Connect to a database` search for `{TIMESCALE_DB}`** -1. **Select `{TIMESCALE_DB}`, then click `Next`** +1. **In `Connect to a database` search for {TIMESCALE_DB}** +1. **Select {TIMESCALE_DB}, then click `Next`** 1. **Configure the connection** Use your [connection details][connection-info] to add your connection settings. diff --git a/integrations/integrate/decodable.mdx b/integrations/integrate/decodable.mdx index 46c598b..50de905 100644 --- a/integrations/integrate/decodable.mdx +++ b/integrations/integrate/decodable.mdx @@ -23,7 +23,7 @@ This page explains how to integrate Decodable with your {SERVICE_LONG} to enable This page uses the pipeline you create using the [Decodable Quickstart Guide][decodable-quickstart]. -## Connect Decodable to your {SERVICE_LONG} +## Connect Decodable to your Tiger Cloud service To stream data gathered in Decodable to a {SERVICE_LONG}: @@ -31,7 +31,7 @@ To stream data gathered in Decodable to a {SERVICE_LONG}: 1. Log in to your [Decodable account][decodable-app]. 2. Click `Connections`, then click `New Connection`. - 3. Select a `{PG} sink` connection type, then click `Connect`. + 3. Select a {PG} sink connection type, then click `Connect`. 4. Using your [connection details][connection-info], fill in the connection information. Leave `schema` and `JDBC options` empty. diff --git a/integrations/integrate/fivetran.mdx b/integrations/integrate/fivetran.mdx index b91baa0..f29b778 100644 --- a/integrations/integrate/fivetran.mdx +++ b/integrations/integrate/fivetran.mdx @@ -22,15 +22,15 @@ This page shows you how to inject data from data sources managed by Fivetran int * Sign up for [Fivetran][sign-up-fivetran] -## Set your {SERVICE_LONG} as a destination in Fivetran +## Set your Tiger Cloud service as a destination in Fivetran To be able to inject data into your {SERVICE_LONG}, set it as a destination in Fivetran: ![Fivetran data destination](https://assets.timescale.com/docs/images/integrations-fivetran-destination-timescal-cloud.png) 1. In [Fivetran Dashboard > Destinations][fivetran-dashboard-destinations], click `Add destination`. -2. Search for the `{PG}` connector and click `Select`. Add the destination name and click `Add`. -3. In the `{PG}` setup, add your [{SERVICE_LONG} connection details][connection-info], then click `Save & Test`. +2. Search for the {PG} connector and click `Select`. Add the destination name and click `Add`. +3. In the {PG} setup, add your [{SERVICE_LONG} connection details][connection-info], then click `Save & Test`. Fivetran validates the connection settings and sets up any security configurations. 4. Click `View Destination`. @@ -56,7 +56,7 @@ In a real world scenario, you can select any of the over 600 connectors availabl Fivetran creates the log schema in your {SERVICE_SHORT} and syncs the data to your {SERVICE_SHORT}. -## View Fivetran data in your {SERVICE_LONG} +## View Fivetran data in your Tiger Cloud service To see data injected by Fivetran into your {SERVICE_LONG}: diff --git a/integrations/integrate/google-cloud.mdx b/integrations/integrate/google-cloud.mdx index 60c5060..44b9426 100644 --- a/integrations/integrate/google-cloud.mdx +++ b/integrations/integrate/google-cloud.mdx @@ -21,7 +21,7 @@ This page explains how to integrate your Google Cloud infrastructure with {CLOUD -## Connect your Google Cloud infrastructure to your {SERVICE_LONG}s +## Connect your Google Cloud infrastructure to your Tiger Cloud services To connect to {CLOUD_LONG}: diff --git a/integrations/integrate/kubernetes.mdx b/integrations/integrate/kubernetes.mdx index 1632127..44f97a0 100644 --- a/integrations/integrate/kubernetes.mdx +++ b/integrations/integrate/kubernetes.mdx @@ -20,7 +20,7 @@ To follow the steps on this page: -## Integrate {TIMESCALE_DB} in a Kubernetes cluster +## Integrate TimescaleDB in a Kubernetes cluster diff --git a/integrations/integrate/microsoft-azure.mdx b/integrations/integrate/microsoft-azure.mdx index d87ea83..5e4bed5 100644 --- a/integrations/integrate/microsoft-azure.mdx +++ b/integrations/integrate/microsoft-azure.mdx @@ -21,7 +21,7 @@ This page explains how to integrate your Microsoft Azure infrastructure with {CL -## Connect your Microsoft Azure infrastructure to your {SERVICE_LONG}s +## Connect your Microsoft Azure infrastructure to your Tiger Cloud services To connect to {CLOUD_LONG}: diff --git a/integrations/integrate/pgadmin.mdx b/integrations/integrate/pgadmin.mdx index ea77448..30c1571 100644 --- a/integrations/integrate/pgadmin.mdx +++ b/integrations/integrate/pgadmin.mdx @@ -22,7 +22,7 @@ This page explains how to integrate pgAdmin with your {SERVICE_LONG}. - [Download][download-pgadmin] and install pgAdmin. -## Connect pgAdmin to your {SERVICE_LONG} +## Connect pgAdmin to your Tiger Cloud service To connect to {CLOUD_LONG}: diff --git a/integrations/integrate/power-bi.mdx b/integrations/integrate/power-bi.mdx index 4940498..9e8aed7 100644 --- a/integrations/integrate/power-bi.mdx +++ b/integrations/integrate/power-bi.mdx @@ -20,7 +20,7 @@ This page explains how to integrate Power BI with {CLOUD_LONG} using the {PG} OD - Download [Power BI Desktop][power-bi-install] on your Microsoft Windows machine. - Install the [{PG} ODBC driver][postgresql-odbc-driver]. -## Add your {SERVICE_LONG} as an ODBC data source +## Add your Tiger Cloud service as an ODBC data source Use the {PG} ODBC driver to connect Power BI to {CLOUD_LONG}. @@ -35,7 +35,7 @@ Use the {PG} ODBC driver to connect Power BI to {CLOUD_LONG}. 3. Use your [connection details][connection-info] to configure the data source. 4. Click `Test` to ensure the connection works, then click `Save`. -## Import the data from your your {SERVICE_LONG} into Power BI +## Import the data from your your Tiger Cloud service into Power BI Establish a connection and import data from your {SERVICE_LONG} into Power BI: diff --git a/integrations/integrate/psql.mdx b/integrations/integrate/psql.mdx index 7223a7c..695d16e 100644 --- a/integrations/integrate/psql.mdx +++ b/integrations/integrate/psql.mdx @@ -135,7 +135,7 @@ Install `psql` on Debian and Ubuntu with the `apt` package manager. -## Connect to your {SERVICE_SHORT} +## Connect to your service To use `psql` to connect to your {SERVICE_SHORT}, you need the connection details. See [Find your connection details][connection-info]. diff --git a/integrations/integrate/qstudio.mdx b/integrations/integrate/qstudio.mdx index 70e074a..999f0db 100644 --- a/integrations/integrate/qstudio.mdx +++ b/integrations/integrate/qstudio.mdx @@ -19,7 +19,7 @@ This page explains how to integrate qStudio with {CLOUD_LONG}. * [Download][qstudio-downloads] and install qStudio. -## Connect qStudio to your {SERVICE_LONG} +## Connect qStudio to your Tiger Cloud service To connect to {CLOUD_LONG}: diff --git a/integrations/integrate/supabase.mdx b/integrations/integrate/supabase.mdx index de7a096..b6d26b2 100644 --- a/integrations/integrate/supabase.mdx +++ b/integrations/integrate/supabase.mdx @@ -20,7 +20,7 @@ against a {SERVICE_LONG} through Supabase using a foreign data wrapper (fdw) to - Create a [Supabase project][supabase-new-project] -## Set up your {SERVICE_LONG} +## Set up your Tiger Cloud service To set up a {SERVICE_LONG} optimized for analytics to receive data from Supabase: diff --git a/integrations/integrate/tableau.mdx b/integrations/integrate/tableau.mdx index c999e37..65ad2c4 100644 --- a/integrations/integrate/tableau.mdx +++ b/integrations/integrate/tableau.mdx @@ -18,7 +18,7 @@ data stored in {CLOUD_LONG}. * Install [Tableau Server][tableau-server] or sign up for [Tableau Cloud][tableau-cloud]. -## Add your {SERVICE_LONG} as a virtual connection +## Add your Tiger Cloud service as a virtual connection To connect the data in your {SERVICE_LONG} to Tableau: diff --git a/integrations/integrate/telegraf.mdx b/integrations/integrate/telegraf.mdx index ae97b17..f8b9de7 100644 --- a/integrations/integrate/telegraf.mdx +++ b/integrations/integrate/telegraf.mdx @@ -28,7 +28,7 @@ To view metrics gathered by Telegraf and stored in a [{HYPERTABLE}][about-hypert - [Install Telegraf][install-telegraf] -## Link Telegraf to your {SERVICE_SHORT} +## Link Telegraf to your service To create a Telegraf configuration that exports data to a {HYPERTABLE} in your {SERVICE_SHORT}: diff --git a/integrations/integrate/terraform.mdx b/integrations/integrate/terraform.mdx index 70f7792..8d1a750 100644 --- a/integrations/integrate/terraform.mdx +++ b/integrations/integrate/terraform.mdx @@ -49,7 +49,7 @@ You use the [{COMPANY} Terraform provider][terraform-provider] to manage {SERVIC } } - # Authenticate using client credentials generated in Tiger Cloud Console. + # Authenticate using client credentials generated in Tiger Console. # When required, these credentials will change to a short-lived JWT to do the calls. provider "timescale" { project_id = var.ts_project_id diff --git a/integrations/integrations.mdx b/integrations/integrations.mdx index 12308ca..e417173 100644 --- a/integrations/integrations.mdx +++ b/integrations/integrations.mdx @@ -687,7 +687,7 @@ import { SERVICE_LONG, PG, COMPANY, CLOUD_LONG } from '/snippets/vars.mdx'; -## Secure connectivity to {CLOUD_LONG} +## Secure connectivity to Tiger Cloud -## Optimize time-series data in {HYPERTABLE}s with {HYPERCORE} +## Optimize time-series data in hypertables with hypercore Time-series data represents the way a system, process, or behavior changes over time. {HYPERTABLE}_CAPs are {PG} tables that help you improve insert and query performance by automatically partitioning your data by time. Each {HYPERTABLE} diff --git a/manage-data/timescaledb/understand/timescaledb-architecture.mdx b/manage-data/timescaledb/understand/timescaledb-architecture.mdx index 8cb1afa..5a30375 100644 --- a/manage-data/timescaledb/understand/timescaledb-architecture.mdx +++ b/manage-data/timescaledb/understand/timescaledb-architecture.mdx @@ -50,7 +50,7 @@ To achieve this, real-time analytics systems must meet several key requirements: * **Query flexibility** provides full SQL support, allowing for complex queries with joins, filters, aggregations, and analytical functions. -### {CLOUD_LONG}: real-time analytics from {PG} +### Tiger Cloud: real-time analytics from Postgres {CLOUD_LONG} is a high-performance database that brings real-time analytics to applications. It combines fast queries, high ingest performance, and full SQL support—all while ensuring scalability and reliability. {CLOUD_LONG} extends {PG} with the {TIMESCALE_DB} extension. It enables sub-second queries on vast amounts of incoming data while providing optimizations designed for continuously updating datasets. @@ -249,7 +249,7 @@ Min/max metadata allows queries filtering on correlated dimensions (e.g., `order -#### {PG} indexes (row and columnar) +#### Postgres indexes (row and columnar) Unlike many databases, {TIMESCALE_DB} supports standard {PG} indexes on columnstore data (B-tree and hash currently, when using the hypercore table access method), allowing queries to efficiently locate specific values within both row-based and compressed columnar storage. These indexes enable fast lookups, range queries, and filtering operations that further reduce unnecessary data scans. diff --git a/self-host/timescaledb/get-started/get-started-with-timescaledb.mdx b/self-host/timescaledb/get-started/get-started-with-timescaledb.mdx index 4b8ced8..089fdef 100644 --- a/self-host/timescaledb/get-started/get-started-with-timescaledb.mdx +++ b/self-host/timescaledb/get-started/get-started-with-timescaledb.mdx @@ -31,7 +31,7 @@ ingest and query data faster while keeping the costs low. -## Optimize time-series data in {HYPERTABLE}s with {HYPERCORE} +## Optimize time-series data in hypertables with hypercore Time-series data represents the way a system, process, or behavior changes over time. {HYPERTABLE}_CAPs are {PG} tables that help you improve insert and query performance by automatically partitioning your data by time. Each {HYPERTABLE} diff --git a/self-host/timescaledb/understand/timescaledb-architecture.mdx b/self-host/timescaledb/understand/timescaledb-architecture.mdx index 8cb1afa..5a30375 100644 --- a/self-host/timescaledb/understand/timescaledb-architecture.mdx +++ b/self-host/timescaledb/understand/timescaledb-architecture.mdx @@ -50,7 +50,7 @@ To achieve this, real-time analytics systems must meet several key requirements: * **Query flexibility** provides full SQL support, allowing for complex queries with joins, filters, aggregations, and analytical functions. -### {CLOUD_LONG}: real-time analytics from {PG} +### Tiger Cloud: real-time analytics from Postgres {CLOUD_LONG} is a high-performance database that brings real-time analytics to applications. It combines fast queries, high ingest performance, and full SQL support—all while ensuring scalability and reliability. {CLOUD_LONG} extends {PG} with the {TIMESCALE_DB} extension. It enables sub-second queries on vast amounts of incoming data while providing optimizations designed for continuously updating datasets. @@ -249,7 +249,7 @@ Min/max metadata allows queries filtering on correlated dimensions (e.g., `order -#### {PG} indexes (row and columnar) +#### Postgres indexes (row and columnar) Unlike many databases, {TIMESCALE_DB} supports standard {PG} indexes on columnstore data (B-tree and hash currently, when using the hypercore table access method), allowing queries to efficiently locate specific values within both row-based and compressed columnar storage. These indexes enable fast lookups, range queries, and filtering operations that further reduce unnecessary data scans. diff --git a/snippets/cloud/_cloud-create-service.mdx b/snippets/cloud/_cloud-create-service.mdx index 022b7fc..4449346 100644 --- a/snippets/cloud/_cloud-create-service.mdx +++ b/snippets/cloud/_cloud-create-service.mdx @@ -16,7 +16,7 @@ To start using {CLOUD_LONG} for your data: -## Create a {SERVICE_LONG} +## Create a Tiger Cloud service Now that you have an active {ACCOUNT_LONG}, you create and manage your {SERVICE_SHORT}s in {CONSOLE}. When you create a {SERVICE_SHORT}, you effectively create a blank {PG} database with additional {CLOUD_LONG} features available under your {PRICING_PLAN}. You then add or migrate your data into this database. @@ -40,7 +40,7 @@ Now that you have an active {ACCOUNT_LONG}, you create and manage your {SERVICE_ -## Connect to your {SERVICE_SHORT} +## Connect to your service To run queries and perform other operations, connect to your {SERVICE_SHORT}: diff --git a/snippets/cloud/_cloud-installation.mdx b/snippets/cloud/_cloud-installation.mdx index c3ea644..88154e1 100644 --- a/snippets/cloud/_cloud-installation.mdx +++ b/snippets/cloud/_cloud-installation.mdx @@ -1,6 +1,6 @@ import { ACCOUNT_LONG, SERVICE_SHORT, CONSOLE, CLOUD_LONG, SERVICE_LONG } from '/snippets/vars.mdx'; -## Create a {ACCOUNT_LONG} +## Create a Tiger Cloud account You create a {ACCOUNT_LONG} to manage your {SERVICE_SHORT}s and data in a centralized and efficient manner in {CONSOLE}. From there, you can create and delete {SERVICE_SHORT}s, run queries, manage access and billing, integrate other services, contact support, and more. diff --git a/snippets/cloud/_mst-create-service.mdx b/snippets/cloud/_mst-create-service.mdx index a0826e1..1fba5af 100644 --- a/snippets/cloud/_mst-create-service.mdx +++ b/snippets/cloud/_mst-create-service.mdx @@ -14,7 +14,7 @@ cloud provider, which you can install your database on. - ### Creating your first {MST_SERVICE_SHORT} + ### Creating your first MST service 1. [Sign in][mst-login] to your {MST_CONSOLE_LONG}. 1. Click `Create service` and choose `TimescaleDB`, and update your preferences: @@ -39,7 +39,7 @@ cloud provider, which you can install your database on. -## Connect to your {MST_SERVICE_SHORT} from the command prompt +## Connect to your MST service from the command prompt When you have a {MST_SERVICE_SHORT} up and running, you can connect to it from your local system using the `psql` command-line utility. This is the same tool you might @@ -68,7 +68,7 @@ check out the [installing psql][install-psql] section. ``` -## Check that you have the {TIMESCALE_DB} extension +## Check that you have the TimescaleDB extension {TIMESCALE_DB} is provided as an extension to your {PG} database, and it is enabled by default when you create a new service on {MST_LONG} You can check that the {TIMESCALE_DB} extension is installed by using diff --git a/snippets/coding/_start-coding-golang.mdx b/snippets/coding/_start-coding-golang.mdx index 219343b..f1e0359 100644 --- a/snippets/coding/_start-coding-golang.mdx +++ b/snippets/coding/_start-coding-golang.mdx @@ -8,7 +8,7 @@ import IntegrationPrereqs from "/snippets/prerequisites/_integration-prereqs.mdx - Install [Go][golang-install]. - Install the [PGX driver for Go][pgx-driver-github]. -## Connect to your {SERVICE_LONG} +## Connect to your Tiger Cloud service In this section, you create a connection to {CLOUD_LONG} using the PGX driver. PGX is a toolkit designed to help Go developers work directly with {PG}. diff --git a/snippets/coding/_start-coding-java.mdx b/snippets/coding/_start-coding-java.mdx index b2327e7..0e66785 100644 --- a/snippets/coding/_start-coding-java.mdx +++ b/snippets/coding/_start-coding-java.mdx @@ -11,7 +11,7 @@ import IntegrationPrereqs from "/snippets/prerequisites/_integration-prereqs.mdx All code in this quick start is for Java 16 and later. If you are working with older JDK versions, use legacy coding techniques. -## Connect to your {SERVICE_LONG} +## Connect to your Tiger Cloud service In this section, you create a connection to your {SERVICE_SHORT} using an application in a single file. You can use any of your favorite build tools, including `gradle` @@ -339,7 +339,7 @@ This section covers how to execute queries against your database. -## Execute queries on {TIMESCALE_DB} +## Execute queries on TimescaleDB 1. Define the SQL query you'd like to run on the database. This example combines time-series and relational data. It returns the average values for diff --git a/snippets/coding/_start-coding-node.mdx b/snippets/coding/_start-coding-node.mdx index 2ea4125..1fe0d17 100644 --- a/snippets/coding/_start-coding-node.mdx +++ b/snippets/coding/_start-coding-node.mdx @@ -8,7 +8,7 @@ import IntegrationPrereqs from "/snippets/prerequisites/_integration-prereqs.mdx * Install [Node.js][node-install]. * Install the Node.js package manager [npm][npm-install]. -## Connect to {TIMESCALE_DB} +## Connect to TimescaleDB In this section, you create a connection to {TIMESCALE_DB} with a common Node.js ORM (object relational mapper) called [Sequelize][sequelize-info]. diff --git a/snippets/coding/_start-coding-ruby.mdx b/snippets/coding/_start-coding-ruby.mdx index 78c0d43..d40f1e7 100644 --- a/snippets/coding/_start-coding-ruby.mdx +++ b/snippets/coding/_start-coding-ruby.mdx @@ -320,7 +320,7 @@ The {TIMESCALE_DB} gem provides several convenient scopes for querying your time puts "Standard Deviation: #{stats.stddev}" ``` -### {TIMESCALE_DB} features +### TimescaleDB features The {TIMESCALE_DB} gem provides utility methods to access hypertable and chunk information. Every model that uses the `acts_as_hypertable` method has access to these methods. @@ -401,8 +401,8 @@ The `continuous_aggregates` method generates a class for each continuous aggrega Now that you have integrated the ruby gem into your app: -* Learn more about the [{TIMESCALE_DB} gem](https://github.com/timescale/timescaledb-ruby). -* Check out the [official docs](https://timescale.github.io/timescaledb-ruby/). +* Learn more about the [{TIMESCALE_DB} gem][timescaledb-ruby-gem]. +* Check out the [official docs][timescaledb-ruby-docs]. * Follow the [LTTB][LTTB], [Open AI long-term storage][open-ai-tutorial], and [candlesticks][candlesticks] tutorials. [connect]: #connect-to-timescaledb @@ -419,6 +419,8 @@ Now that you have integrated the ruby gem into your app: [about-hypertables]: /use-timescale/hypertables/ [rails-compostite-primary-keys]: https://guides.rubyonrails.org/active_record_composite_primary_keys.html [ruby-blog-post]: https://www.timescale.com/blog/building-a-better-ruby-orm-for-time-series-and-analytics +[timescaledb-ruby-gem]: https://github.com/timescale/timescaledb-ruby +[timescaledb-ruby-docs]: https://timescale.github.io/timescaledb-ruby/ [LTTB]: https://timescale.github.io/timescaledb-ruby/toolkit_lttb_tutorial/ [open-ai-tutorial]: https://timescale.github.io/timescaledb-ruby/chat_gpt_tutorial/ [candlesticks]: https://timescale.github.io/timescaledb-ruby/toolkit_candlestick/ diff --git a/snippets/integrations/_configure-source-database-awsrds.mdx b/snippets/integrations/_configure-source-database-awsrds.mdx new file mode 100644 index 0000000..792ac27 --- /dev/null +++ b/snippets/integrations/_configure-source-database-awsrds.mdx @@ -0,0 +1,85 @@ +import EnableReplication from '/snippets/integrations/_enable-replication.mdx'; +import { PG, PG_CONNECTOR } from '/snippets/vars.mdx'; + +Updating parameters on a {PG} instance will cause an outage. Choose a time that will cause the least issues to tune this database. + +1. **Tune the Write Ahead Log (WAL) on the RDS/Aurora {PG} source database** + + 1. In [https://console.aws.amazon.com/rds/home#databases:][aws-databases], + select the RDS instance to migrate. + + 2. Click `Configuration`, scroll down and note the `DB instance parameter group`, then click `Parameter Groups` + + ![Create security rule to enable RDS EC2 connection](https://assets.timescale.com/docs/images/migrate/awsrds-parameter-groups.png) + + 3. Click `Create parameter group`, fill in the form with the following values, then click `Create`. + - **Parameter group name** - whatever suits your fancy. + - **Description** - knock yourself out with this one. + - **Engine type** - `PostgreSQL` + - **Parameter group family** - the same as `DB instance parameter group` in your `Configuration`. + 4. In `Parameter groups`, select the parameter group you created, then click `Edit`. + 5. Update the following parameters, then click `Save changes`. + - `rds.logical_replication` set to `1`: record the information needed for logical decoding. + - `wal_sender_timeout` set to `0`: disable the timeout for the sender process. + + 6. In RDS, navigate back to your [databases][aws-databases], select the RDS instance to migrate, and click `Modify`. + + 7. Scroll down to `Database options`, select your new parameter group, and click `Continue`. + 8. Click `Apply immediately` or choose a maintenance window, then click `Modify DB instance`. + + Changing parameters will cause an outage. Wait for the database instance to reboot before continuing. + 9. Verify that the settings are live in your database. + +2. **Create a user for the {PG_CONNECTOR} and assign permissions** + + 1. Create ``: + + ```sql + psql $SOURCE -c "CREATE USER PASSWORD ''" + ``` + + You can use an existing user. However, you must ensure that the user has the following permissions. + + 2. Grant permissions to create a replication slot: + + ```sql + psql $SOURCE -c "GRANT rds_replication TO " + ``` + + 3. Grant permissions to create a publication: + + ```sql + psql $SOURCE -c "GRANT CREATE ON DATABASE TO " + ``` + + 4. Assign the user permissions on the source database: + + ```sql + psql $SOURCE <; + GRANT SELECT ON ALL TABLES IN SCHEMA "public" TO ; + ALTER DEFAULT PRIVILEGES IN SCHEMA "public" GRANT SELECT ON TABLES TO ; + EOF + ``` + + If the tables you are syncing are not in the `public` schema, grant the user permissions for each schema you are syncing: + ```sql + psql $SOURCE < TO ; + GRANT SELECT ON ALL TABLES IN SCHEMA TO ; + ALTER DEFAULT PRIVILEGES IN SCHEMA GRANT SELECT ON TABLES TO ; + EOF + ``` + + 5. On each table you want to sync, make `` the owner: + + ```sql + psql $SOURCE -c 'ALTER TABLE
OWNER TO ;' + ``` + You can skip this step if the replicating user is already the owner of the tables. + +3. **Enable replication `DELETE` and `UPDATE` operations** + + + +[aws-databases]: https://console.aws.amazon.com/rds/home#databases: diff --git a/snippets/integrations/_configure-source-database-postgres.mdx b/snippets/integrations/_configure-source-database-postgres.mdx new file mode 100644 index 0000000..c40f168 --- /dev/null +++ b/snippets/integrations/_configure-source-database-postgres.mdx @@ -0,0 +1,73 @@ +import EnableReplication from '/snippets/integrations/_enable-replication.mdx'; +import { PG } from '/snippets/vars.mdx'; + +1. **Tune the Write Ahead Log (WAL) on the {PG} source database** + + ```sql + psql $SOURCE <`: + + ```sql + psql $SOURCE -c "CREATE USER PASSWORD ''" + ``` + + You can use an existing user. However, you must ensure that the user has the following permissions. + + 2. Grant permissions to create a replication slot: + + ```sql + psql $SOURCE -c "ALTER ROLE REPLICATION" + ``` + + 3. Grant permissions to create a publication: + + ```sql + psql $SOURCE -c "GRANT CREATE ON DATABASE TO " + ``` + + 4. Assign the user permissions on the source database: + + ```sql + psql $SOURCE <; + GRANT SELECT ON ALL TABLES IN SCHEMA "public" TO ; + ALTER DEFAULT PRIVILEGES IN SCHEMA "public" GRANT SELECT ON TABLES TO ; + EOF + ``` + + If the tables you are syncing are not in the `public` schema, grant the user permissions for each schema you are syncing: + ```sql + psql $SOURCE < TO ; + GRANT SELECT ON ALL TABLES IN SCHEMA TO ; + ALTER DEFAULT PRIVILEGES IN SCHEMA GRANT SELECT ON TABLES TO ; + EOF + ``` + + 5. On each table you want to sync, make `` the owner: + + ```sql + psql $SOURCE -c 'ALTER TABLE
OWNER TO ;' + ``` + You can skip this step if the replicating user is already the owner of the tables. + +3. **Enable replication `DELETE` and `UPDATE` operations** + + + +[wal-level]: https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-WAL-LEVEL +[max-wal-senders]: https://www.postgresql.org/docs/current/runtime-config-replication.html#GUC-MAX-WAL-SENDERS +[wal-sender-timeout]: https://www.postgresql.org/docs/current/runtime-config-replication.html#GUC-WAL-SENDER-TIMEOUT diff --git a/snippets/integrations/_enable-replication.mdx b/snippets/integrations/_enable-replication.mdx new file mode 100644 index 0000000..baf3110 --- /dev/null +++ b/snippets/integrations/_enable-replication.mdx @@ -0,0 +1,25 @@ +import { PG } from '/snippets/vars.mdx'; + +Replica identity assists data replication by identifying the rows being modified. Your options are that +each table and hypertable in the source database should either have: + +- **A primary key**: data replication defaults to the primary key of the table being replicated. + Nothing to do. +- **A viable unique index**: each table has a unique, non-partial, non-deferrable index that includes only columns + marked as `NOT NULL`. If a UNIQUE index does not exist, create one to assist the migration. You can delete if after + migration. + + For each table, set `REPLICA IDENTITY` to the viable unique index: + + ```shell + psql -X -d $SOURCE -c 'ALTER TABLE
REPLICA IDENTITY USING INDEX ' + ``` +- **No primary key or viable unique index**: use brute force. + + For each table, set `REPLICA IDENTITY` to `FULL`: + ```shell + psql -X -d $SOURCE -c 'ALTER TABLE
REPLICA IDENTITY FULL' + ``` + For each `UPDATE` or `DELETE` statement, {PG} reads the whole table to find all matching rows. This results + in significantly slower replication. If you are expecting a large number of `UPDATE` or `DELETE` operations on the table, + best practice is to not use `FULL`. diff --git a/snippets/integrations/_grafana-connect.mdx b/snippets/integrations/_grafana-connect.mdx index aec3a8b..104251d 100644 --- a/snippets/integrations/_grafana-connect.mdx +++ b/snippets/integrations/_grafana-connect.mdx @@ -1,6 +1,6 @@ import { CLOUD_LONG, SERVICE_SHORT } from '/snippets/vars.mdx'; -## Connect Grafana to {CLOUD_LONG} +## Connect Grafana to Tiger Cloud To visualize the results of your queries, enable Grafana to read the data in your {SERVICE_SHORT}: diff --git a/snippets/integrations/_livesync-console-pg.mdx b/snippets/integrations/_livesync-console-pg.mdx new file mode 100644 index 0000000..650e7a7 --- /dev/null +++ b/snippets/integrations/_livesync-console-pg.mdx @@ -0,0 +1,113 @@ +import PrereqCloud from '/snippets/prerequisites/_prereqs-cloud-only.mdx'; +import LivesyncLimitations from '/snippets/integrations/_livesync-limitations.mdx'; +import ConfigureSourceDatabasePostgres from '/snippets/integrations/_configure-source-database-postgres.mdx'; +import ConfigureSourceDatabaseAWSRDS from '/snippets/integrations/_configure-source-database-awsrds.mdx'; +import { SERVICE_SHORT, SERVICE_LONG, PG, PG_CONNECTOR, CONSOLE, HYPERTABLE } from '/snippets/vars.mdx'; + +## Prerequisites + + + +- Install the [{PG} client tools](/integrations/integrate/psql) on your sync machine. + +- Ensure that the source {PG} instance and the target {SERVICE_LONG} have the same extensions installed. + + The {PG_CONNECTOR} does not create extensions on the target. If the table uses column types from an extension, + first create the extension on the target {SERVICE_LONG} before syncing the table. + +## Limitations + +* The source {PG} instance must be accessible from the Internet. + + Services hosted behind a firewall or VPC are not supported. This functionality is on the roadmap. + +* Indexes, including the primary key and unique constraints, are not migrated to the target. + + We recommend that, depending on your query patterns, you create only the necessary indexes on the target. + + + +## Set your connection string + +This variable holds the connection information for the source database. In the terminal on your migration machine, +set the following: + +```bash +export SOURCE="postgres://:@:/" +``` + + +Avoid using connection strings that route through connection poolers like PgBouncer or similar tools. This tool +requires a direct connection to the database to function properly. + + +## Tune your source database + + + + + + + + + + + + + + + + + +## Synchronize data to your Tiger Cloud service + +To sync data from your {PG} database using {CONSOLE}: + +1. **Connect to your {SERVICE_SHORT}** + + In [{CONSOLE}][console-services], select the {SERVICE_SHORT} to sync live data to. + +2. **Connect the source database and the target {SERVICE_SHORT}** + + ![Postgres connector wizard](https://assets.timescale.com/docs/images/tiger-on-azure/pg-connector-wizard-tiger-console.png) + + 1. Click `Connectors` > `PostgreSQL`. + 2. Set the name for the new connector by clicking the pencil icon. + 3. Check the boxes for `Set wal_level to logical` and `Update your credentials`, then click `Continue`. + 4. Enter your database credentials or a {PG} connection string, then click `Connect to database`. + This is the connection string for ``. The console connects to the source database and retrieves the schema information. + +3. **Optimize the data to synchronize in hypertables** + + ![Postgres connector start](https://assets.timescale.com/docs/images/tiger-on-azure/pg-connector-start-tiger-console.png) + + 1. In the `Select table` dropdown, select the tables to sync. + 2. Click `Select tables +`. + + The console checks the table schema and, if possible, suggests the column to use as the time dimension in a {HYPERTABLE}. + 3. Click `Create Connector`. + + The console starts the connector between the source database and the target {SERVICE_SHORT} and displays the progress. + +4. **Monitor synchronization** + + ![Connectors overview](https://assets.timescale.com/docs/images/tiger-on-azure/tiger-console-connector-overview.png) + + 1. To view the amount of data replicated, click `Connectors`. The diagram in `Connector data flow` gives you an overview of the connectors you have created, their status, and how much data has been replicated. + + 2. To review the syncing progress for each table, click `Connectors` > `Source connectors`, then select the name of your connector in the table. + +5. **Manage the connector** + + ![Edit a Postgres connector](https://assets.timescale.com/docs/images/tiger-on-azure/edit-pg-connector-tiger-console.png) + + 1. To edit the connector, click `Connectors` > `Source connectors`, then select the name of your connector in the table. You can rename the connector, delete or add new tables for syncing. + + 2. To pause a connector, click `Connectors` > `Source connectors`, then open the three-dot menu on the right and select `Pause`. + + 3. To delete a connector, click `Connectors` > `Source connectors`, then open the three-dot menu on the right and select `Delete`. You must pause the connector before deleting it. + +And that is it, you are using the connector to synchronize all the data, or specific tables, from a {PG} database +instance in real time. + +[console-services]: https://console.cloud.timescale.com/dashboard/services diff --git a/snippets/integrations/_livesync-limitations.mdx b/snippets/integrations/_livesync-limitations.mdx index aa022fc..5ae3786 100644 --- a/snippets/integrations/_livesync-limitations.mdx +++ b/snippets/integrations/_livesync-limitations.mdx @@ -39,4 +39,4 @@ import { PG, TIMESCALE_DB, SERVICE_LONG, PG_CONNECTOR } from '/snippets/vars.mdx If the continuous aggregate exists in the source database, best practice is to add it to the {PG} connector publication. If it only exists on the target database, manually refresh the continuous aggregate using the `force` - option of refresh_continuous_aggregate. \ No newline at end of file + option of `refresh_continuous_aggregate`. \ No newline at end of file diff --git a/snippets/integrations/_livesync-terminal-pg.mdx b/snippets/integrations/_livesync-terminal-pg.mdx new file mode 100644 index 0000000..0b1b301 --- /dev/null +++ b/snippets/integrations/_livesync-terminal-pg.mdx @@ -0,0 +1,313 @@ +import MigratePrerequisites from '/snippets/prerequisites/_migrate-prerequisites.mdx'; +import LivesyncLimitations from '/snippets/integrations/_livesync-limitations.mdx'; +import SetupConnectionStrings from '/snippets/integrations/_setup-connection-strings-livesync.mdx'; +import ConfigureSourceDatabasePostgres from '/snippets/integrations/_configure-source-database-postgres.mdx'; +import TuneSourceDatabaseAWSRDS from '/snippets/integrations/_tune-source-database-awsrds-migration.mdx'; +import { SERVICE_SHORT, SERVICE_LONG, PG, PG_CONNECTOR } from '/snippets/vars.mdx'; + +## Prerequisites + + + +- Ensure that the source {PG} instance and the target {SERVICE_LONG} have the same extensions installed. + + The {PG_CONNECTOR} does not create extensions on the target. If the table uses column types from an extension, + first create the extension on the target {SERVICE_LONG} before syncing the table. + +- [Install Docker][install-docker] on your sync machine. + + For a better experience, use a 4 CPU/16GB EC2 instance or greater to run the {PG_CONNECTOR}. + +- Install the [{PG} client tools](/integrations/integrate/psql) on your sync machine. + + This includes `psql`, `pg_dump`, `pg_dumpall`, and `vacuumdb` commands. + +## Limitations + +- The schema is not migrated by the connector, you use `pg_dump`/`pg_restore` to migrate it. + + + +## Set your connection strings + +The `` in the `SOURCE` connection must have the replication role granted in order to create a replication slot. + + + +## Tune your source database + + + + + + + + + + + + + + + + + +## Migrate the table schema + +Use `pg_dump` to: + +1. **Download the schema from the source database** + + ```bash + pg_dump $SOURCE \ + --no-privileges \ + --no-owner \ + --no-publications \ + --no-subscriptions \ + --no-table-access-method \ + --no-tablespaces \ + --schema-only \ + --file=schema.sql + ``` + +2. **Apply the schema on the target {SERVICE_SHORT}** + + ```bash + psql $TARGET -f schema.sql + ``` + +## Convert partitions and tables with time-series data into hypertables + +For efficient querying and analysis, you can convert tables which contain time-series or +events data, and tables that are already partitioned using {PG} declarative partition into +hypertables. + +1. **Convert tables to hypertables** + + Run the following on each table in the target to convert it to a hypertable: + + ```bash + psql -X -d $TARGET -c "SELECT public.create_hypertable('
', by_range('', ''::interval));" + ``` + + For example, to convert the *metrics* table into a hypertable with *time* as a partition column and + *1 day* as a partition interval: + + ```bash + psql -X -d $TARGET -c "SELECT public.create_hypertable('public.metrics', by_range('time', '1 day'::interval));" + ``` + +2. **Convert {PG} partitions to hypertables** + + Rename the partition and create a new regular table with the same name as the partitioned table, then + convert to a hypertable: + + ```bash + psql $TARGET -f - <<'EOF' + BEGIN; + ALTER TABLE public.events RENAME TO events_part; + CREATE TABLE public.events(LIKE public.events_part INCLUDING ALL); + SELECT create_hypertable('public.events', by_range('time', '1 day'::interval)); + COMMIT; + EOF + ``` + +## Specify the tables to synchronize + +After the schema is migrated, you [`CREATE PUBLICATION`][create-publication] on the source database that +specifies the tables to synchronize. + +1. **Create a publication that specifies the table to synchronize** + + A `PUBLICATION` enables you to synchronize some or all the tables in the schema or database. + + ```sql + CREATE PUBLICATION FOR TABLE , ; + ``` + + To add tables after to an existing publication, use [ALTER PUBLICATION][alter-publication] + + ```sql + ALTER PUBLICATION ADD TABLE ; + ``` + +2. **Publish the {PG} declarative partitioned table** + + ```sql + ALTER PUBLICATION SET(publish_via_partition_root=true); + ``` + +3. **Stop syncing a table in the `PUBLICATION`, use `DROP TABLE`** + + ```sql + ALTER PUBLICATION DROP TABLE ; + ``` + +## Synchronize your data + +You use the connector docker image to synchronize changes in real time from a {PG} database +instance: + +1. **Start the connector** + + As you run the connector continuously, best practice is to run it as a Docker daemon. + + ```bash + docker run -d --rm --name livesync timescale/live-sync:v0.1.25 run \ + --publication --subscription \ + --source $SOURCE --target $TARGET --table-map + ``` + + `--publication`: The name of the publication as you created in the previous step. To use multiple publications, repeat the `--publication` flag. + + `--subscription`: The name that identifies the subscription on the target. + + `--source`: The connection string to the source {PG} database. + + `--target`: The connection string to the target. + + `--table-map`: (Optional) A JSON string that maps source tables to target tables. If not provided, the source and target table names are assumed to be the same. + For example, to map the source table `metrics` to the target table `metrics_data`: + + ``` + --table-map '{"source": {"schema": "public", "table": "metrics"}, "target": {"schema": "public", "table": "metrics_data"}}' + ``` + To map only the schema, use: + + ``` + --table-map '{"source": {"schema": "public"}, "target": {"schema": "analytics"}}' + ``` + This flag can be repeated for multiple table mappings. + +2. **Capture logs** + + Once the connector is running as a docker daemon, you can also capture the logs: + ```bash + docker logs -f livesync + ``` + +3. **View the progress of tables being synchronized** + + List the tables being synchronized by the connector using the `_ts_live_sync.subscription_rel` table in the target: + + ```bash + psql $TARGET -c "SELECT * FROM _ts_live_sync.subscription_rel" + ``` + + You see something like the following: + + | subname | pubname | schemaname | tablename | rrelid | state | lsn | updated_at | last_error | created_at | rows_copied | approximate_rows | bytes_copied | approximate_size | target_schema | target_table | + |----------|---------|-------------|-----------|--------|-------|------------|-------------------------------|-------------------------------------------------------------------------------|-------------------------------|-------------|------------------|--------------|------------------|---------------|-------------| + |livesync | analytics | public | metrics | 20856 | r | 6/1A8CBA48 | 2025-06-24 06:16:21.434898+00 | | 2025-06-24 06:03:58.172946+00 | 18225440 | 18225440 | 1387359359 | 1387359359 | public | metrics | + + The `state` column indicates the current state of the table synchronization. + Possible values for `state` are: + + | state | description | + |-------|-------------| + | d | initial table data sync | + | f | initial table data sync completed | + | s | catching up with the latest changes | + | r | table is ready, syncing live changes | + + To see the replication lag, run the following against the SOURCE database: + + ```bash + psql $SOURCE -f - <<'EOF' + SELECT + slot_name, + pg_size_pretty(pg_current_wal_flush_lsn() - confirmed_flush_lsn) AS lag + FROM pg_replication_slots + WHERE slot_name LIKE 'live_sync_%' AND slot_type = 'logical' + EOF + ``` + +4. **Add or remove tables from the publication** + + To add tables, use [ALTER PUBLICATION .. ADD TABLE][alter-publication] + + ```sql + ALTER PUBLICATION ADD TABLE ; + ``` + + To remove tables, use [ALTER PUBLICATION .. DROP TABLE][alter-publication] + + ```sql + ALTER PUBLICATION DROP TABLE ; + ``` + +5. **Update table statistics** + + If you have a large table, you can run `ANALYZE` on the target + to update the table statistics after the initial sync is complete. + + This helps the query planner make better decisions for query execution plans. + + ```bash + vacuumdb --analyze --verbose --dbname=$TARGET + ``` + +6. **Stop the connector** + + ```bash + docker stop live-sync + ``` + +7. **(Optional) Reset sequence nextval on the target** + + The connector does not automatically reset the sequence nextval on the target. + + Run the following script to reset the sequence for all tables that have a + serial or identity column in the target: + + ```bash + psql $TARGET -f - <<'EOF' + DO $$ + DECLARE + rec RECORD; + BEGIN + FOR rec IN ( + SELECT + sr.target_schema AS table_schema, + sr.target_table AS table_name, + col.column_name, + pg_get_serial_sequence( + sr.target_schema || '.' || sr.target_table, + col.column_name + ) AS seqname + FROM _ts_live_sync.subscription_rel AS sr + JOIN information_schema.columns AS col + ON col.table_schema = sr.target_schema + AND col.table_name = sr.target_table + WHERE col.column_default LIKE 'nextval(%' -- only serial/identity columns + ) LOOP + EXECUTE format( + 'SELECT setval(%L, + COALESCE((SELECT MAX(%I) FROM %I.%I), 0) + 1, + false + );', + rec.seqname, -- the sequence identifier + rec.column_name, -- the column to MAX() + rec.table_schema, -- schema for MAX() + rec.table_name -- table for MAX() + ); + END LOOP; + END; + $$ LANGUAGE plpgsql; + EOF + ``` + +8. **Clean up** + + Use the `--drop` flag to remove the replication slots created by the connector on the source database. + + ```bash + docker run -it --rm --name livesync timescale/live-sync:v0.1.25 run \ + --publication --subscription \ + --source $SOURCE --target $TARGET \ + --drop + ``` + +[install-docker]: https://docs.docker.com/engine/install/ +[create-publication]: https://www.postgresql.org/docs/current/sql-createpublication.html +[alter-publication]: https://www.postgresql.org/docs/current/sql-alterpublication.html diff --git a/snippets/integrations/_manage-a-data-exporter.mdx b/snippets/integrations/_manage-a-data-exporter.mdx index 76bd796..27d6d06 100644 --- a/snippets/integrations/_manage-a-data-exporter.mdx +++ b/snippets/integrations/_manage-a-data-exporter.mdx @@ -1,6 +1,6 @@ import { SERVICE_LONG, SERVICE_SHORT, CONSOLE } from '/snippets/vars.mdx'; -### Attach a data exporter to a {SERVICE_LONG} +### Attach a data exporter to a Tiger Cloud service To send telemetry data to an external monitoring tool, you attach a data exporter to your {SERVICE_LONG}. You can attach only one exporter to a {SERVICE_SHORT}. @@ -12,7 +12,7 @@ To attach an exporter: 3. **Select the exporter, then click `Attach exporter`** 4. **If you are attaching a first `Logs` data type exporter, restart the {SERVICE_SHORT}** -### Monitor {SERVICE_LONG} metrics +### Monitor Tiger Cloud service metrics You can now monitor your {SERVICE_SHORT} metrics. Use the following metrics to check the service is running correctly: @@ -48,7 +48,7 @@ You cannot change fields such as the provider or the AWS region. To remove a data exporter that you no longer need: -1. **Disconnect the data exporter from your {SERVICE_LONG}s** +1. **Disconnect the data exporter from your Tiger Cloud services** 1. In [{CONSOLE}][console-services], choose the {SERVICE_SHORT}. 2. Click `Operations` > `Exporters`. diff --git a/snippets/integrations/_prometheus-integrate.mdx b/snippets/integrations/_prometheus-integrate.mdx index e4601d8..208d127 100644 --- a/snippets/integrations/_prometheus-integrate.mdx +++ b/snippets/integrations/_prometheus-integrate.mdx @@ -20,7 +20,7 @@ To follow the steps on this page: - [Install Postgres Exporter][install-exporter]. To reduce latency and potential data transfer costs, install Prometheus and Postgres Exporter on a machine in the same AWS region as your {SERVICE_LONG}. -## Export {SERVICE_LONG} telemetry to Prometheus +## Export Tiger Cloud service telemetry to Prometheus To export your data, do the following: diff --git a/snippets/integrations/_setup-connection-strings-livesync.mdx b/snippets/integrations/_setup-connection-strings-livesync.mdx new file mode 100644 index 0000000..e63eb7c --- /dev/null +++ b/snippets/integrations/_setup-connection-strings-livesync.mdx @@ -0,0 +1,15 @@ +import { SERVICE_LONG } from '/snippets/vars.mdx'; + +These variables hold the connection information for the source database and target {SERVICE_LONG}. +In Terminal on your migration machine, set the following: + +```bash +export SOURCE="postgres://:@:/" +export TARGET="postgres://tsdbadmin:@:/tsdb?sslmode=require" +``` +You find the connection information for your {SERVICE_LONG} in the configuration file you +downloaded when you created the service. + + +Avoid using connection strings that route through connection poolers like PgBouncer or similar tools. This tool requires a direct connection to the database to function properly. + diff --git a/snippets/integrations/_tune-source-database-awsrds-migration.mdx b/snippets/integrations/_tune-source-database-awsrds-migration.mdx new file mode 100644 index 0000000..852cacd --- /dev/null +++ b/snippets/integrations/_tune-source-database-awsrds-migration.mdx @@ -0,0 +1,37 @@ +import EnableReplication from '/snippets/integrations/_enable-replication.mdx'; +import { PG } from '/snippets/vars.mdx'; + +Updating parameters on a {PG} instance will cause an outage. Choose a time that will cause the least issues to tune this database. + +1. **Update the DB instance parameter group for your source database** + + 1. In [https://console.aws.amazon.com/rds/home#databases:][aws-databases], + select the RDS instance to migrate. + + 2. Click `Configuration`, scroll down and note the `DB instance parameter group`, then click `Parameter groups` + + ![Create security rule to enable RDS EC2 connection](https://assets.timescale.com/docs/images/migrate/awsrds-parameter-groups.png) + + 3. Click `Create parameter group`, fill in the form with the following values, then click `Create`. + - **Parameter group name** - whatever suits your fancy. + - **Description** - knock yourself out with this one. + - **Engine type** - `PostgreSQL` + - **Parameter group family** - the same as `DB instance parameter group` in your `Configuration`. + 4. In `Parameter groups`, select the parameter group you created, then click `Edit`. + 5. Update the following parameters, then click `Save changes`. + - `rds.logical_replication` set to `1`: record the information needed for logical decoding. + - `wal_sender_timeout` set to `0`: disable the timeout for the sender process. + + 6. In RDS, navigate back to your [databases][aws-databases], select the RDS instance to migrate, and click `Modify`. + + 7. Scroll down to `Database options`, select your new parameter group, and click `Continue`. + 8. Click `Apply immediately` or choose a maintenance window, then click `Modify DB instance`. + + Changing parameters will cause an outage. Wait for the database instance to reboot before continuing. + 9. Verify that the settings are live in your database. + +2. **Enable replication `DELETE` and `UPDATE` operations** + + + +[aws-databases]: https://console.aws.amazon.com/rds/home#databases: diff --git a/snippets/integrations/code/_start-coding-golang.mdx b/snippets/integrations/code/_start-coding-golang.mdx index c978132..a3e0c99 100644 --- a/snippets/integrations/code/_start-coding-golang.mdx +++ b/snippets/integrations/code/_start-coding-golang.mdx @@ -5,10 +5,10 @@ import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs-clo -- Install [Go](https://golang.org/doc/install). -- Install the [PGX driver for Go](https://github.com/jackc/pgx). +- Install [Go][install-go]. +- Install the [PGX driver for Go][pgx-driver]. -## Connect to your {SERVICE_LONG} +## Connect to your Tiger Cloud service In this section, you create a connection to {CLOUD_LONG} using the PGX driver. PGX is a toolkit designed to help Go developers work directly with {PG}. @@ -26,7 +26,7 @@ You can use it to help your Go application interact directly with TimescaleDB. * database name 2. Compose your connection string variable as a - [libpq connection string](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING), using this format: + [libpq connection string][libpq-connstring], using this format: ```go connStr := "postgres://username:password@host:port/dbname" @@ -832,9 +832,15 @@ This section covers how to execute queries against your database. Now that you're able to connect, read, and write to a {TIMESCALE_DB} instance from your Go application, be sure to check out these advanced {TIMESCALE_DB} tutorials: -* Refer to the [pgx documentation](https://pkg.go.dev/github.com/jackc/pgx) for more information about pgx. +* Refer to the [pgx documentation][pgx-docs] for more information about pgx. * Get up and running with {TIMESCALE_DB} with the [Getting Started](/getting-started/latest/) tutorial. * Want fast inserts on CSV data? Check out - [{TIMESCALE_DB} parallel copy](https://github.com/timescale/timescaledb-parallel-copy), a tool for fast inserts, - written in Go. \ No newline at end of file + [{TIMESCALE_DB} parallel copy][timescaledb-parallel-copy], a tool for fast inserts, + written in Go. + +[install-go]: https://golang.org/doc/install +[pgx-driver]: https://github.com/jackc/pgx +[libpq-connstring]: https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING +[pgx-docs]: https://pkg.go.dev/github.com/jackc/pgx +[timescaledb-parallel-copy]: https://github.com/timescale/timescaledb-parallel-copy \ No newline at end of file diff --git a/snippets/integrations/code/_start-coding-java.mdx b/snippets/integrations/code/_start-coding-java.mdx index b46ce9b..fcb232a 100644 --- a/snippets/integrations/code/_start-coding-java.mdx +++ b/snippets/integrations/code/_start-coding-java.mdx @@ -5,13 +5,13 @@ import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs-clo -* Install the [Java Development Kit (JDK)](https://openjdk.java.net). -* Install the [PostgreSQL JDBC driver](https://jdbc.postgresql.org). +* Install the [Java Development Kit (JDK)][install-jdk]. +* Install the [PostgreSQL JDBC driver][jdbc-driver]. All code in this quick start is for Java 16 and later. If you are working with older JDK versions, use legacy coding techniques. -## Connect to your {SERVICE_LONG} +## Connect to your Tiger Cloud service In this section, you create a connection to your {SERVICE_SHORT} using an application in a single file. You can use any of your favorite build tools, including `gradle` @@ -40,10 +40,10 @@ or `maven`. to your console. 3. Import the PostgreSQL JDBC driver. If you are using a dependency manager, - include the [PostgreSQL JDBC Driver](https://mvnrepository.com/artifact/org.postgresql/postgresql) as a + include the [PostgreSQL JDBC Driver][jdbc-maven] as a dependency. -4. Download the [JAR artifact of the JDBC Driver](https://jdbc.postgresql.org/download/) and +4. Download the [JAR artifact of the JDBC Driver][jdbc-download] and save it with the `Main.java` file. 5. Import the `JDBC Driver` into the Java application and display a list of @@ -89,7 +89,7 @@ or `maven`. var connUrl = "jdbc:postgresql://:/?user=&password="; ``` - For more information about creating connection strings, see the [JDBC documentation](https://jdbc.postgresql.org/documentation/datasource/). + For more information about creating connection strings, see the [JDBC documentation][jdbc-docs]. This method of composing a connection string is for test or development @@ -313,7 +313,7 @@ example, you generate some sample time-series data to insert into the This section covers how to execute queries against your database. -## Execute queries on {TIMESCALE_DB} +## Execute queries on TimescaleDB 1. Define the SQL query you'd like to run on the database. This example combines time-series and relational data. It returns the average values for @@ -464,6 +464,12 @@ public class Main { } ``` +[install-jdk]: https://openjdk.java.net +[jdbc-driver]: https://jdbc.postgresql.org +[jdbc-maven]: https://mvnrepository.com/artifact/org.postgresql/postgresql +[jdbc-download]: https://jdbc.postgresql.org/download/ +[jdbc-docs]: https://jdbc.postgresql.org/documentation/datasource/ + ### Execute more complex queries ```java @@ -571,4 +577,10 @@ public class Main { private record Sensor(String type, String location) { } } -``` \ No newline at end of file +``` + +[install-jdk]: https://openjdk.java.net +[jdbc-driver]: https://jdbc.postgresql.org +[jdbc-maven]: https://mvnrepository.com/artifact/org.postgresql/postgresql +[jdbc-download]: https://jdbc.postgresql.org/download/ +[jdbc-docs]: https://jdbc.postgresql.org/documentation/datasource/ \ No newline at end of file diff --git a/snippets/integrations/code/_start-coding-node.mdx b/snippets/integrations/code/_start-coding-node.mdx index 594bdc7..ce551cc 100644 --- a/snippets/integrations/code/_start-coding-node.mdx +++ b/snippets/integrations/code/_start-coding-node.mdx @@ -5,13 +5,13 @@ import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs-clo -* Install [Node.js](https://nodejs.org). -* Install the Node.js package manager [npm](https://docs.npmjs.com/getting-started). +* Install [Node.js][install-nodejs]. +* Install the Node.js package manager [npm][install-npm]. -## Connect to {TIMESCALE_DB} +## Connect to TimescaleDB In this section, you create a connection to {TIMESCALE_DB} with a common Node.js -ORM (object relational mapper) called [Sequelize](https://sequelize.org). +ORM (object relational mapper) called [Sequelize][sequelize]. 1. At the command prompt, initialize a new Node.js app: @@ -326,4 +326,8 @@ is displayed. ``` Now, when you reload the page, you should see all of the rows currently in the -`page_loads` table. \ No newline at end of file +`page_loads` table. + +[install-nodejs]: https://nodejs.org +[install-npm]: https://docs.npmjs.com/getting-started +[sequelize]: https://sequelize.org \ No newline at end of file diff --git a/snippets/integrations/code/_start-coding-python.mdx b/snippets/integrations/code/_start-coding-python.mdx index b4afd62..99b85dc 100644 --- a/snippets/integrations/code/_start-coding-python.mdx +++ b/snippets/integrations/code/_start-coding-python.mdx @@ -7,8 +7,8 @@ import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs-clo * Install the `psycopg2` library. - For more information, see the [psycopg2 documentation](https://pypi.org/project/psycopg2/). -* Create a [Python virtual environment](https://docs.python.org/3/library/venv.html). (Optional) + For more information, see the [psycopg2 documentation][psycopg2-pypi]. +* Create a [Python virtual environment][python-venv]. (Optional) ## Connect to TimescaleDB @@ -35,7 +35,7 @@ prevents common attacks such as SQL injection. * database name 3. Compose your connection string variable as a - [libpq connection string](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING), using this format: + [libpq connection string][libpq-connstring], using this format: ```python CONNECTION = "postgres://username:password@host:port/dbname" @@ -60,8 +60,8 @@ prevents common attacks such as SQL injection. details like your password, hostname, and port number. -4. Use the `psycopg2` [connect function](https://www.psycopg.org/docs/module.html?highlight=connect#psycopg2.connect) to create a new - database session and create a new [cursor object](https://www.psycopg.org/docs/connection.html?highlight=cursor#connection.cursor) to +4. Use the `psycopg2` [connect function][psycopg2-connect] to create a new + database session and create a new [cursor object][psycopg2-cursor] to interact with the database. In your `main` function, add these lines: @@ -206,7 +206,7 @@ section, you can use `psycopg2` with prepared statements, or you can use ``` If you choose to use `pgcopy` instead, install the `pgcopy` package -[using pip](https://pypi.org/project/pgcopy/), and then add this line to your list of +[using pip][pgcopy-pypi], and then add this line to your list of `import` statements: ```python @@ -305,9 +305,9 @@ you can use prepared statements to ensure queries are executed safely against the database. For more information about properly using placeholders in `psycopg2`, see the -[basic module usage document](https://www.psycopg.org/docs/usage.html). +[basic module usage document][psycopg2-usage]. For more information about how to execute more complex queries in `psycopg2`, -see the [psycopg2 documentation](https://www.psycopg.org/docs/usage.html). +see the [psycopg2 documentation][psycopg2-usage]. ### Execute a query @@ -329,7 +329,7 @@ see the [psycopg2 documentation](https://www.psycopg.org/docs/usage.html). ``` 3. To access all resulting rows returned by your query, use one of `pyscopg2`'s - [results retrieval methods](https://www.psycopg.org/docs/cursor.html), + [results retrieval methods][psycopg2-results], such as `fetchall()` or `fetchmany()`. This example prints the results of the query, row by row. Note that the result of `fetchall()` is a list of tuples, so you can handle them accordingly: @@ -344,7 +344,7 @@ see the [psycopg2 documentation](https://www.psycopg.org/docs/usage.html). ``` 4. (Optional) If you want a list of dictionaries instead, you can define the - cursor using [`DictCursor`](https://www.psycopg.org/docs/extras.html#dictionary-like-cursor): + cursor using [`DictCursor`][psycopg2-dictcursor]: ```python cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor) @@ -375,4 +375,14 @@ executed safely against the database. data = (location, sensor_type) cursor.execute(query, data) results = cursor.fetchall() - ``` \ No newline at end of file + ``` + +[psycopg2-pypi]: https://pypi.org/project/psycopg2/ +[python-venv]: https://docs.python.org/3/library/venv.html +[libpq-connstring]: https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING +[psycopg2-connect]: https://www.psycopg.org/docs/module.html?highlight=connect#psycopg2.connect +[psycopg2-cursor]: https://www.psycopg.org/docs/connection.html?highlight=cursor#connection.cursor +[pgcopy-pypi]: https://pypi.org/project/pgcopy/ +[psycopg2-usage]: https://www.psycopg.org/docs/usage.html +[psycopg2-results]: https://www.psycopg.org/docs/cursor.html +[psycopg2-dictcursor]: https://www.psycopg.org/docs/extras.html#dictionary-like-cursor \ No newline at end of file diff --git a/snippets/integrations/code/_start-coding-ruby.mdx b/snippets/integrations/code/_start-coding-ruby.mdx index 5047ee6..fdd0a57 100644 --- a/snippets/integrations/code/_start-coding-ruby.mdx +++ b/snippets/integrations/code/_start-coding-ruby.mdx @@ -5,7 +5,7 @@ import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs-clo -* Install [Rails](https://guides.rubyonrails.org/install_ruby_on_rails.html#installing-rails). +* Install [Rails][install-rails]. ## Connect a Rails app to your service @@ -133,7 +133,7 @@ In this section, you use the helpers in the {TIMESCALE_DB} gem to create and man Rails model includes a `PRIMARY KEY` index for id by default: either remove the column or make sure that the index includes time as part of a "composite key." - For more information, check the Roby docs around [composite primary keys](https://guides.rubyonrails.org/active_record_composite_primary_keys.html). + For more information, check the Roby docs around [composite primary keys][rails-composite-keys]. 3. **Create a `PageLoad` model** @@ -308,7 +308,7 @@ The {TIMESCALE_DB} gem provides several convenient scopes for querying your time puts "Standard Deviation: #{stats.stddev}" ``` -### {TIMESCALE_DB} features +### TimescaleDB features The {TIMESCALE_DB} gem provides utility methods to access hypertable and chunk information. Every model that uses the `acts_as_hypertable` method has access to these methods. @@ -375,7 +375,7 @@ The `continuous_aggregates` method generates a class for each continuous aggrega - Create or drop a continuous aggregate: Create or drop all the continuous aggregates in the proper order to build them hierarchically. See more about how it - works in this [blog post](https://www.timescale.com/blog/building-a-better-ruby-orm-for-time-series-and-analytics). + works in this [blog post][ruby-blog-post]. ```ruby PageLoad.create_continuous_aggregates @@ -389,6 +389,12 @@ The `continuous_aggregates` method generates a class for each continuous aggrega Now that you have integrated the ruby gem into your app: -* Learn more about the [{TIMESCALE_DB} gem](https://github.com/timescale/timescaledb-ruby). -* Check out the [official docs](https://timescale.github.io/timescaledb-ruby/). -* Follow the [LTTB](https://timescale.github.io/timescaledb-ruby/toolkit_lttb_tutorial/), [Open AI long-term storage](https://timescale.github.io/timescaledb-ruby/chat_gpt_tutorial/), and [candlesticks](https://timescale.github.io/timescaledb-ruby/toolkit_candlestick/) tutorials. \ No newline at end of file +* Learn more about the [{TIMESCALE_DB} gem][timescaledb-ruby-gem]. +* Check out the [official docs][timescaledb-ruby-docs]. +* Follow the [LTTB][lttb-tutorial], [Open AI long-term storage][openai-tutorial], and [candlesticks][candlestick-tutorial] tutorials. + +[timescaledb-ruby-gem]: https://github.com/timescale/timescaledb-ruby +[timescaledb-ruby-docs]: https://timescale.github.io/timescaledb-ruby/ +[lttb-tutorial]: https://timescale.github.io/timescaledb-ruby/toolkit_lttb_tutorial/ +[openai-tutorial]: https://timescale.github.io/timescaledb-ruby/chat_gpt_tutorial/ +[candlestick-tutorial]: https://timescale.github.io/timescaledb-ruby/toolkit_candlestick/ \ No newline at end of file diff --git a/snippets/prerequisites/_livesync-prereqs-cloud.mdx b/snippets/prerequisites/_livesync-prereqs-cloud.mdx deleted file mode 100644 index dcd55c7..0000000 --- a/snippets/prerequisites/_livesync-prereqs-cloud.mdx +++ /dev/null @@ -1,14 +0,0 @@ -import { SERVICE_LONG, PG, PG_CONNECTOR } from '/snippets/vars.mdx'; - -To follow the steps on this page: - -* Create a target {SERVICE_LONG} with real-time analytics enabled. - - You need your [connection details](/integrations/find-connection-details). - -- Install the [{PG} client tools](/integrations/integrate/psql) on your sync machine. - -- Ensure that the source {PG} instance and the target {SERVICE_LONG} have the same extensions installed. - - The {PG_CONNECTOR} does not create extensions on the target. If the table uses column types from an extension, - first create the extension on the target {SERVICE_LONG} before syncing the table. \ No newline at end of file diff --git a/snippets/prerequisites/_livesync-prereqs-terminal.mdx b/snippets/prerequisites/_livesync-prereqs-terminal.mdx deleted file mode 100644 index 898f035..0000000 --- a/snippets/prerequisites/_livesync-prereqs-terminal.mdx +++ /dev/null @@ -1,30 +0,0 @@ -import { SERVICE_LONG, SERVICE_SHORT, PG, PG_CONNECTOR } from '/snippets/vars.mdx'; - -Best practice is to use an [Ubuntu EC2 instance](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EC2_GetStarted.html#ec2-launch-instance) hosted in the same region as your -{SERVICE_LONG} to move data. That is, the machine you run the commands on to move your -data from your source database to your target {SERVICE_LONG}. - -Before you move your data: - -- Create a target {SERVICE_LONG}. - - Each {SERVICE_LONG} has a single {PG} instance that supports the - most popular extensions. {SERVICE_LONG}s do not support tablespaces, - and there is no superuser associated with a {SERVICE_SHORT}. - Best practice is to create a {SERVICE_LONG} with at least 8 CPUs for a smoother experience. A higher-spec instance - can significantly reduce the overall migration window. - -- To ensure that maintenance does not run while migration is in progress, best practice is to adjust the maintenance window. - -- Ensure that the source {PG} instance and the target {SERVICE_LONG} have the same extensions installed. - - The {PG_CONNECTOR} does not create extensions on the target. If the table uses column types from an extension, - first create the extension on the target {SERVICE_LONG} before syncing the table. - -- [Install Docker](https://docs.docker.com/engine/install/) on your sync machine. - - For a better experience, use a 4 CPU/16GB EC2 instance or greater to run the {PG_CONNECTOR}. - -- Install the [{PG} client tools](/integrations/integrate/psql) on your sync machine. - - This includes `psql`, `pg_dump`, `pg_dumpall`, and `vacuumdb` commands. \ No newline at end of file diff --git a/snippets/prerequisites/_migrate-prerequisites.mdx b/snippets/prerequisites/_migrate-prerequisites.mdx new file mode 100644 index 0000000..0d0e06d --- /dev/null +++ b/snippets/prerequisites/_migrate-prerequisites.mdx @@ -0,0 +1,19 @@ +import { SERVICE_LONG, SERVICE_SHORT, PG } from '/snippets/vars.mdx'; + +Best practice is to use an [Ubuntu EC2 instance][create-ec2-instance] hosted in the same region as your +{SERVICE_LONG} to move data. That is, the machine you run the commands on to move your +data from your source database to your target {SERVICE_LONG}. + +Before you move your data: + +- Create a target {SERVICE_LONG}. + + Each {SERVICE_LONG} has a single {PG} instance that supports the + most popular extensions. {SERVICE_LONG}s do not support tablespaces, + and there is no superuser associated with a {SERVICE_SHORT}. + Best practice is to create a {SERVICE_LONG} with at least 8 CPUs for a smoother experience. A higher-spec instance + can significantly reduce the overall migration window. + +- To ensure that maintenance does not run while migration is in progress, best practice is to adjust the maintenance window. + +[create-ec2-instance]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EC2_GetStarted.html#ec2-launch-instance diff --git a/snippets/prerequisites/_prereqs-cloud-only.mdx b/snippets/prerequisites/_prereqs-cloud-only.mdx index 5e1cac8..b33f294 100644 --- a/snippets/prerequisites/_prereqs-cloud-only.mdx +++ b/snippets/prerequisites/_prereqs-cloud-only.mdx @@ -1,9 +1,10 @@ +import { SERVICE_LONG } from '/snippets/vars.mdx'; + To follow the steps on this page: -* Create a target [{SERVICE_LONG}][create-service] with time-series and analytics enabled. +* Create a target [{SERVICE_LONG}][portal-ops-mode] with real-time analytics enabled. You need your [connection details][connection-info]. - -[create-service]: /cloud/get-started/create-services +[portal-ops-mode]: https://console.cloud.timescale.com/dashboard/services [connection-info]: /integrations/find-connection-details/ diff --git a/snippets/vars.mdx b/snippets/vars.mdx index 028445b..3cffe05 100644 --- a/snippets/vars.mdx +++ b/snippets/vars.mdx @@ -29,8 +29,8 @@ export const SELF_SHORT_CAP = 'Self-hosted'; export const SELF_SHORT = 'self-hosted'; export const SELF_LONG_CAP = 'Self-hosted TimescaleDB'; export const SELF_LONG = 'self-hosted TimescaleDB'; -export const CONSOLE = 'Tiger Cloud Console'; -export const CONSOLE_LONG = 'Tiger Cloud Console'; +export const CONSOLE = 'Tiger Console'; +export const CONSOLE_LONG = 'Tiger Console'; export const CONSOLE_SHORT = 'Console'; export const CLI_LONG = 'Tiger CLI'; export const CLI_SHORT = 'CLI'; diff --git a/styles.css b/styles.css index c52e85a..9353aac 100644 --- a/styles.css +++ b/styles.css @@ -39,6 +39,17 @@ h1 { font-size: 18px; } +/* External link indicator */ +a[href^="http"]:not([href*="tigerdata.com"])::after, +a[href^="https"]:not([href*="tigerdata.com"])::after { + content: "↗"; + display: inline-block; + margin-left: 4px; + font-size: 0.85em; + vertical-align: super; + opacity: 0.7; +} + /* dark mode styles */ .dark { .link { From 9714132d3ab578879157302f397d4498889f29e2 Mon Sep 17 00:00:00 2001 From: billy-the-fish Date: Mon, 17 Nov 2025 09:41:33 +0100 Subject: [PATCH 10/13] chore: vars. --- docs.json | 6 +++--- integrations/find-connection-details.mdx | 2 +- integrations/{integrations.mdx => index.mdx} | 1 + integrations/integrate/dbeaver.mdx | 4 ++-- integrations/integrate/decodable.mdx | 2 +- integrations/integrate/fivetran.mdx | 4 ++-- integrations/integrate/telegraf.mdx | 2 +- 7 files changed, 11 insertions(+), 10 deletions(-) rename integrations/{integrations.mdx => index.mdx} (99%) diff --git a/docs.json b/docs.json index 7175cdd..e3976e6 100644 --- a/docs.json +++ b/docs.json @@ -324,12 +324,12 @@ ] }, { - "tab": "Integration", + "tab": "Integrations", "groups": [ { "group": " ", "pages": [ - "integrations/integrations", + "integrations/index", "integrations/find-connection-details" ] }, @@ -697,7 +697,7 @@ } ], "contextual": { - "options": ["claude", "chatgpt"] + "options": ["claude", "chatgpt", "perplexity", "cursor"] }, "fonts": { "family": "Geist" diff --git a/integrations/find-connection-details.mdx b/integrations/find-connection-details.mdx index 8b62a10..505ec91 100644 --- a/integrations/find-connection-details.mdx +++ b/integrations/find-connection-details.mdx @@ -57,7 +57,7 @@ such as Terraform or the [{CLOUD_LONG} REST API][api-reference]: 1. **Open the settings for your {PROJECT_SHORT}**: - In [{CONSOLE}][console-services], click your {PROJECT_SHORT} name in the upper left corner, then click `{PROJECT_SHORT_CAP} settings`. + In [{CONSOLE}][console-services], click your {PROJECT_SHORT} name in the upper left corner, then click {PROJECT_SHORT_CAP} settings. 2. **Create client credentials**: diff --git a/integrations/integrations.mdx b/integrations/index.mdx similarity index 99% rename from integrations/integrations.mdx rename to integrations/index.mdx index 12308ca..c5f01b5 100644 --- a/integrations/integrations.mdx +++ b/integrations/index.mdx @@ -1,5 +1,6 @@ --- title: Integrations +sidebarTitle: Overview description: You can integrate your {SERVICE_LONG} with third-party solutions to expand and extend what you can do with your data. keywords: [integrations, connectors, third-party, PostgreSQL, tools, data engineering, ETL, observability, BI, visualization] mode: "wide" diff --git a/integrations/integrate/dbeaver.mdx b/integrations/integrate/dbeaver.mdx index e65bc01..ce38de7 100644 --- a/integrations/integrate/dbeaver.mdx +++ b/integrations/integrate/dbeaver.mdx @@ -25,8 +25,8 @@ To connect to {CLOUD_LONG}: 1. **Start `DBeaver`** 1. **In the toolbar, click the plug+ icon** -1. **In `Connect to a database` search for `{TIMESCALE_DB}`** -1. **Select `{TIMESCALE_DB}`, then click `Next`** +1. **In `Connect to a database` search for {TIMESCALE_DB}** +1. **Select {TIMESCALE_DB}, then click `Next`** 1. **Configure the connection** Use your [connection details][connection-info] to add your connection settings. diff --git a/integrations/integrate/decodable.mdx b/integrations/integrate/decodable.mdx index 46c598b..cbb50ba 100644 --- a/integrations/integrate/decodable.mdx +++ b/integrations/integrate/decodable.mdx @@ -31,7 +31,7 @@ To stream data gathered in Decodable to a {SERVICE_LONG}: 1. Log in to your [Decodable account][decodable-app]. 2. Click `Connections`, then click `New Connection`. - 3. Select a `{PG} sink` connection type, then click `Connect`. + 3. Select a {PG} sink connection type, then click `Connect`. 4. Using your [connection details][connection-info], fill in the connection information. Leave `schema` and `JDBC options` empty. diff --git a/integrations/integrate/fivetran.mdx b/integrations/integrate/fivetran.mdx index b91baa0..8ef47be 100644 --- a/integrations/integrate/fivetran.mdx +++ b/integrations/integrate/fivetran.mdx @@ -29,8 +29,8 @@ To be able to inject data into your {SERVICE_LONG}, set it as a destination in F ![Fivetran data destination](https://assets.timescale.com/docs/images/integrations-fivetran-destination-timescal-cloud.png) 1. In [Fivetran Dashboard > Destinations][fivetran-dashboard-destinations], click `Add destination`. -2. Search for the `{PG}` connector and click `Select`. Add the destination name and click `Add`. -3. In the `{PG}` setup, add your [{SERVICE_LONG} connection details][connection-info], then click `Save & Test`. +2. Search for the {PG} connector and click `Select`. Add the destination name and click `Add`. +3. In the {PG} setup, add your [{SERVICE_LONG} connection details][connection-info], then click `Save & Test`. Fivetran validates the connection settings and sets up any security configurations. 4. Click `View Destination`. diff --git a/integrations/integrate/telegraf.mdx b/integrations/integrate/telegraf.mdx index ae97b17..e447fbe 100644 --- a/integrations/integrate/telegraf.mdx +++ b/integrations/integrate/telegraf.mdx @@ -1,7 +1,7 @@ --- title: Ingest data using Telegraf sidebarTitle: Telegraf -description: Ingest data into a Tiger Cloud service using using the Telegraf plugin +description: Ingest data into a Tiger Cloud service using the Telegraf plugin keywords: [Telegraf, data ingestion, metrics collection, InfluxData, plugins, IoT, systems monitoring, agent, time-series data] --- From eda9d60474f1f56022e9100dc5c3b73fd29ae85e Mon Sep 17 00:00:00 2001 From: billy-the-fish Date: Mon, 17 Nov 2025 11:52:51 +0100 Subject: [PATCH 11/13] chore: coding page review. --- snippets/coding/_start-coding-golang.mdx | 891 ------------------ snippets/coding/_start-coding-java.mdx | 616 ------------ snippets/coding/_start-coding-node.mdx | 361 ------- snippets/coding/_start-coding-python.mdx | 427 --------- snippets/coding/_start-coding-ruby.mdx | 426 --------- .../code/_start-coding-golang.mdx | 332 ++++--- .../integrations/code/_start-coding-java.mdx | 54 +- .../integrations/code/_start-coding-node.mdx | 87 +- .../code/_start-coding-python.mdx | 58 +- .../integrations/code/_start-coding-ruby.mdx | 33 +- 10 files changed, 263 insertions(+), 3022 deletions(-) delete mode 100644 snippets/coding/_start-coding-golang.mdx delete mode 100644 snippets/coding/_start-coding-java.mdx delete mode 100644 snippets/coding/_start-coding-node.mdx delete mode 100644 snippets/coding/_start-coding-python.mdx delete mode 100644 snippets/coding/_start-coding-ruby.mdx diff --git a/snippets/coding/_start-coding-golang.mdx b/snippets/coding/_start-coding-golang.mdx deleted file mode 100644 index f1e0359..0000000 --- a/snippets/coding/_start-coding-golang.mdx +++ /dev/null @@ -1,891 +0,0 @@ -import { CLOUD_LONG, TIMESCALE_DB, PG } from '/snippets/vars.mdx'; -import IntegrationPrereqs from "/snippets/prerequisites/_integration-prereqs.mdx"; - -## Prerequisites - - - -- Install [Go][golang-install]. -- Install the [PGX driver for Go][pgx-driver-github]. - -## Connect to your Tiger Cloud service - -In this section, you create a connection to {CLOUD_LONG} using the PGX driver. -PGX is a toolkit designed to help Go developers work directly with {PG}. -You can use it to help your Go application interact directly with TimescaleDB. - - - -1. Locate your {TIMESCALE_DB} credentials and use them to compose a connection - string for PGX. - - You'll need: - - * password - * username - * host URL - * port number - * database name - -1. Compose your connection string variable as a - [libpq connection string][libpq-docs], using this format: - - ```go - connStr := "postgres://username:password@host:port/dbname" - ``` - - If you're using a hosted version of TimescaleDB, or if you need an SSL - connection, use this format instead: - - ```go - connStr := "postgres://username:password@host:port/dbname?sslmode=require" - ``` - -1. [](#)(optional)You can check that you're connected to your database with this - hello world program: - - ```go - package main - - import ( - "context" - "fmt" - "os" - - "github.com/jackc/pgx/v5" - ) - - //connect to database using a single connection - func main() { - /***********************************************/ - /* Single Connection to TimescaleDB/ PostgreSQL */ - /***********************************************/ - ctx := context.Background() - connStr := "yourConnectionStringHere" - conn, err := pgx.Connect(ctx, connStr) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to connect to database: %v\n", err) - os.Exit(1) - } - defer conn.Close(ctx) - - //run a simple query to check our connection - var greeting string - err = conn.QueryRow(ctx, "select 'Hello, Timescale!'").Scan(&greeting) - if err != nil { - fmt.Fprintf(os.Stderr, "QueryRow failed: %v\n", err) - os.Exit(1) - } - fmt.Println(greeting) - } - - ``` - - If you'd like to specify your connection string as an environment variable, - you can use this syntax to access it in place of the `connStr` variable: - - ```go - os.Getenv("DATABASE_CONNECTION_STRING") - ``` - - - -Alternatively, you can connect to {TIMESCALE_DB} using a connection pool. -Connection pooling is useful to conserve computing resources, and can also -result in faster database queries: - - - -1. To create a connection pool that can be used for concurrent connections to - your database, use the `pgxpool.New()` function instead of - `pgx.Connect()`. Also note that this script imports - `github.com/jackc/pgx/v5/pgxpool`, instead of `pgx/v5` which was used to - create a single connection: - - ```go - package main - - import ( - "context" - "fmt" - "os" - - "github.com/jackc/pgx/v5/pgxpool" - ) - - func main() { - - ctx := context.Background() - connStr := "yourConnectionStringHere" - dbpool, err := pgxpool.New(ctx, connStr) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to connect to database: %v\n", err) - os.Exit(1) - } - defer dbpool.Close() - - //run a simple query to check our connection - var greeting string - err = dbpool.QueryRow(ctx, "select 'Hello, Tiger Data (but concurrently)'").Scan(&greeting) - if err != nil { - fmt.Fprintf(os.Stderr, "QueryRow failed: %v\n", err) - os.Exit(1) - } - fmt.Println(greeting) - } - ``` - - - -## Create a relational table - -In this section, you create a table called `sensors` which holds the ID, type, -and location of your fictional sensors. Additionally, you create a hypertable -called `sensor_data` which holds the measurements of those sensors. The -measurements contain the time, sensor_id, temperature reading, and CPU -percentage of the sensors. - - - -1. Compose a string that contains the SQL statement to create a relational - table. This example creates a table called `sensors`, with columns for ID, - type, and location: - - ```go - queryCreateTable := `CREATE TABLE sensors (id SERIAL PRIMARY KEY, type VARCHAR(50), location VARCHAR(50));` - ``` - -1. Execute the `CREATE TABLE` statement with the `Exec()` function on the - `dbpool` object, using the arguments of the current context and the - statement string you created: - - ```go - package main - - import ( - "context" - "fmt" - "os" - - "github.com/jackc/pgx/v5/pgxpool" - ) - - func main() { - ctx := context.Background() - connStr := "yourConnectionStringHere" - dbpool, err := pgxpool.New(ctx, connStr) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to connect to database: %v\n", err) - os.Exit(1) - } - defer dbpool.Close() - - /********************************************/ - /* Create relational table */ - /********************************************/ - - //Create relational table called sensors - queryCreateTable := `CREATE TABLE sensors (id SERIAL PRIMARY KEY, type VARCHAR(50), location VARCHAR(50));` - _, err = dbpool.Exec(ctx, queryCreateTable) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to create SENSORS table: %v\n", err) - os.Exit(1) - } - fmt.Println("Successfully created relational table SENSORS") - } - ``` - - - -## Generate a hypertable - -When you have created the relational table, you can create a hypertable. -Creating tables and indexes, altering tables, inserting data, selecting data, -and most other tasks are executed on the hypertable. - - - -1. Create a variable for the `CREATE TABLE SQL` statement for your hypertable. - Notice how the hypertable has the compulsory time column: - - ```go - queryCreateTable := `CREATE TABLE sensor_data ( - time TIMESTAMPTZ NOT NULL, - sensor_id INTEGER, - temperature DOUBLE PRECISION, - cpu DOUBLE PRECISION, - FOREIGN KEY (sensor_id) REFERENCES sensors (id)); - ` - ``` - -1. Formulate the `SELECT` statement to convert the table into a hypertable. You - must specify the table name to convert to a hypertable, and its time column - name as the second argument. For more information, see the - [`create_hypertable` docs][create-hypertable-docs]: - - ```go - queryCreateHypertable := `SELECT create_hypertable('sensor_data', by_range('time'));` - ``` - - - - The `by_range` dimension builder is an addition to {TIMESCALE_DB} 2.13. - - - -1. Execute the `CREATE TABLE` statement and `SELECT` statement which converts - the table into a hypertable. You can do this by calling the `Exec()` - function on the `dbpool` object, using the arguments of the current context, - and the `queryCreateTable` and `queryCreateHypertable` statement strings: - - ```go - package main - - import ( - "context" - "fmt" - "os" - - "github.com/jackc/pgx/v5/pgxpool" - ) - - func main() { - ctx := context.Background() - connStr := "yourConnectionStringHere" - dbpool, err := pgxpool.New(ctx, connStr) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to connect to database: %v\n", err) - os.Exit(1) - } - defer dbpool.Close() - - /********************************************/ - /* Create Hypertable */ - /********************************************/ - // Create hypertable of time-series data called sensor_data - queryCreateTable := `CREATE TABLE sensor_data ( - time TIMESTAMPTZ NOT NULL, - sensor_id INTEGER, - temperature DOUBLE PRECISION, - cpu DOUBLE PRECISION, - FOREIGN KEY (sensor_id) REFERENCES sensors (id)); - ` - - queryCreateHypertable := `SELECT create_hypertable('sensor_data', by_range('time'));` - - //execute statement - _, err = dbpool.Exec(ctx, queryCreateTable+queryCreateHypertable) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to create the `sensor_data` hypertable: %v\n", err) - os.Exit(1) - } - fmt.Println("Successfully created hypertable `sensor_data`") - } - ``` - - - -## Insert rows of data - -You can insert rows into your database in a couple of different -ways. Each of these example inserts the data from the two arrays, `sensorTypes` and -`sensorLocations`, into the relational table named `sensors`. - -The first example inserts a single row of data at a time. The second example -inserts multiple rows of data. The third example uses batch inserts to speed up -the process. - - - -1. Open a connection pool to the database, then use the prepared statements to - formulate an `INSERT` SQL statement, and execute it: - - ```go - package main - - import ( - "context" - "fmt" - "os" - - "github.com/jackc/pgx/v5/pgxpool" - ) - - func main() { - ctx := context.Background() - connStr := "yourConnectionStringHere" - dbpool, err := pgxpool.New(ctx, connStr) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to connect to database: %v\n", err) - os.Exit(1) - } - defer dbpool.Close() - - /********************************************/ - /* INSERT into relational table */ - /********************************************/ - //Insert data into relational table - - // Slices of sample data to insert - // observation i has type sensorTypes[i] and location sensorLocations[i] - sensorTypes := []string{"a", "a", "b", "b"} - sensorLocations := []string{"floor", "ceiling", "floor", "ceiling"} - - for i := range sensorTypes { - //INSERT statement in SQL - queryInsertMetadata := `INSERT INTO sensors (type, location) VALUES ($1, $2);` - - //Execute INSERT command - _, err := dbpool.Exec(ctx, queryInsertMetadata, sensorTypes[i], sensorLocations[i]) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to insert data into database: %v\n", err) - os.Exit(1) - } - fmt.Printf("Inserted sensor (%s, %s) into database \n", sensorTypes[i], sensorLocations[i]) - } - fmt.Println("Successfully inserted all sensors into database") - } - ``` - - - -Instead of inserting a single row of data at a time, you can use this procedure -to insert multiple rows of data, instead: - - - -1. This example uses {PG} to generate some sample time-series to insert - into the `sensor_data` hypertable. Define the SQL statement to generate the - data, called `queryDataGeneration`. Then use the `.Query()` function to - execute the statement and return the sample data. The data returned by the - query is stored in `results`, a slice of structs, which is then used as a - source to insert data into the hypertable: - - ```go - package main - - import ( - "context" - "fmt" - "os" - "time" - - "github.com/jackc/pgx/v5/pgxpool" - ) - - func main() { - ctx := context.Background() - connStr := "yourConnectionStringHere" - dbpool, err := pgxpool.New(ctx, connStr) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to connect to database: %v\n", err) - os.Exit(1) - } - defer dbpool.Close() - - // Generate data to insert - - //SQL query to generate sample data - queryDataGeneration := ` - SELECT generate_series(now() - interval '24 hour', now(), interval '5 minute') AS time, - floor(random() * (3) + 1)::int as sensor_id, - random()*100 AS temperature, - random() AS cpu - ` - //Execute query to generate samples for sensor_data hypertable - rows, err := dbpool.Query(ctx, queryDataGeneration) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to generate sensor data: %v\n", err) - os.Exit(1) - } - defer rows.Close() - - fmt.Println("Successfully generated sensor data") - - //Store data generated in slice results - type result struct { - Time time.Time - SensorId int - Temperature float64 - CPU float64 - } - - var results []result - for rows.Next() { - var r result - err = rows.Scan(&r.Time, &r.SensorId, &r.Temperature, &r.CPU) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to scan %v\n", err) - os.Exit(1) - } - results = append(results, r) - } - - // Any errors encountered by rows.Next or rows.Scan are returned here - if rows.Err() != nil { - fmt.Fprintf(os.Stderr, "rows Error: %v\n", rows.Err()) - os.Exit(1) - } - - // Check contents of results slice - fmt.Println("Contents of RESULTS slice") - for i := range results { - var r result - r = results[i] - fmt.Printf("Time: %s | ID: %d | Temperature: %f | CPU: %f |\n", &r.Time, r.SensorId, r.Temperature, r.CPU) - } - } - ``` - -1. Formulate an SQL insert statement for the `sensor_data` hypertable: - - ```go - //SQL query to generate sample data - queryInsertTimeseriesData := ` - INSERT INTO sensor_data (time, sensor_id, temperature, cpu) VALUES ($1, $2, $3, $4); - ` - ``` - -1. Execute the SQL statement for each sample in the results slice: - - ```go - //Insert contents of results slice into TimescaleDB - for i := range results { - var r result - r = results[i] - _, err := dbpool.Exec(ctx, queryInsertTimeseriesData, r.Time, r.SensorId, r.Temperature, r.CPU) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to insert sample into TimescaleDB %v\n", err) - os.Exit(1) - } - defer rows.Close() - } - fmt.Println("Successfully inserted samples into sensor_data hypertable") - ``` - -1. [](#)(optional)This example `main.go` generates sample data and inserts it into - the `sensor_data` hypertable: - - ```go - package main - - import ( - "context" - "fmt" - "os" - "time" - - "github.com/jackc/pgx/v5/pgxpool" - ) - - func main() { - /********************************************/ - /* Connect using Connection Pool */ - /********************************************/ - ctx := context.Background() - connStr := "yourConnectionStringHere" - dbpool, err := pgxpool.New(ctx, connStr) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to connect to database: %v\n", err) - os.Exit(1) - } - defer dbpool.Close() - - /********************************************/ - /* Insert data into hypertable */ - /********************************************/ - // Generate data to insert - - //SQL query to generate sample data - queryDataGeneration := ` - SELECT generate_series(now() - interval '24 hour', now(), interval '5 minute') AS time, - floor(random() * (3) + 1)::int as sensor_id, - random()*100 AS temperature, - random() AS cpu - ` - //Execute query to generate samples for sensor_data hypertable - rows, err := dbpool.Query(ctx, queryDataGeneration) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to generate sensor data: %v\n", err) - os.Exit(1) - } - defer rows.Close() - - fmt.Println("Successfully generated sensor data") - - //Store data generated in slice results - type result struct { - Time time.Time - SensorId int - Temperature float64 - CPU float64 - } - var results []result - for rows.Next() { - var r result - err = rows.Scan(&r.Time, &r.SensorId, &r.Temperature, &r.CPU) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to scan %v\n", err) - os.Exit(1) - } - results = append(results, r) - } - // Any errors encountered by rows.Next or rows.Scan are returned here - if rows.Err() != nil { - fmt.Fprintf(os.Stderr, "rows Error: %v\n", rows.Err()) - os.Exit(1) - } - - // Check contents of results slice - fmt.Println("Contents of RESULTS slice") - for i := range results { - var r result - r = results[i] - fmt.Printf("Time: %s | ID: %d | Temperature: %f | CPU: %f |\n", &r.Time, r.SensorId, r.Temperature, r.CPU) - } - - //Insert contents of results slice into TimescaleDB - //SQL query to generate sample data - queryInsertTimeseriesData := ` - INSERT INTO sensor_data (time, sensor_id, temperature, cpu) VALUES ($1, $2, $3, $4); - ` - - //Insert contents of results slice into TimescaleDB - for i := range results { - var r result - r = results[i] - _, err := dbpool.Exec(ctx, queryInsertTimeseriesData, r.Time, r.SensorId, r.Temperature, r.CPU) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to insert sample into TimescaleDB %v\n", err) - os.Exit(1) - } - defer rows.Close() - } - fmt.Println("Successfully inserted samples into sensor_data hypertable") - } - ``` - - - -Inserting multiple rows of data using this method executes as many `insert` -statements as there are samples to be inserted. This can make ingestion of data -slow. To speed up ingestion, you can batch insert data instead. - -Here's a sample pattern for how to do so, using the sample data you generated in -the previous procedure. It uses the pgx `Batch` object: - - - -1. This example batch inserts data into the database: - - ```go - package main - - import ( - "context" - "fmt" - "os" - "time" - - "github.com/jackc/pgx/v5" - "github.com/jackc/pgx/v5/pgxpool" - ) - - func main() { - /********************************************/ - /* Connect using Connection Pool */ - /********************************************/ - ctx := context.Background() - connStr := "yourConnectionStringHere" - dbpool, err := pgxpool.New(ctx, connStr) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to connect to database: %v\n", err) - os.Exit(1) - } - defer dbpool.Close() - - // Generate data to insert - - //SQL query to generate sample data - queryDataGeneration := ` - SELECT generate_series(now() - interval '24 hour', now(), interval '5 minute') AS time, - floor(random() * (3) + 1)::int as sensor_id, - random()*100 AS temperature, - random() AS cpu - ` - - //Execute query to generate samples for sensor_data hypertable - rows, err := dbpool.Query(ctx, queryDataGeneration) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to generate sensor data: %v\n", err) - os.Exit(1) - } - defer rows.Close() - - fmt.Println("Successfully generated sensor data") - - //Store data generated in slice results - type result struct { - Time time.Time - SensorId int - Temperature float64 - CPU float64 - } - var results []result - for rows.Next() { - var r result - err = rows.Scan(&r.Time, &r.SensorId, &r.Temperature, &r.CPU) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to scan %v\n", err) - os.Exit(1) - } - results = append(results, r) - } - // Any errors encountered by rows.Next or rows.Scan are returned here - if rows.Err() != nil { - fmt.Fprintf(os.Stderr, "rows Error: %v\n", rows.Err()) - os.Exit(1) - } - - // Check contents of results slice - /*fmt.Println("Contents of RESULTS slice") - for i := range results { - var r result - r = results[i] - fmt.Printf("Time: %s | ID: %d | Temperature: %f | CPU: %f |\n", &r.Time, r.SensorId, r.Temperature, r.CPU) - }*/ - - //Insert contents of results slice into TimescaleDB - //SQL query to generate sample data - queryInsertTimeseriesData := ` - INSERT INTO sensor_data (time, sensor_id, temperature, cpu) VALUES ($1, $2, $3, $4); - ` - - /********************************************/ - /* Batch Insert into TimescaleDB */ - /********************************************/ - //create batch - batch := &pgx.Batch{} - //load insert statements into batch queue - for i := range results { - var r result - r = results[i] - batch.Queue(queryInsertTimeseriesData, r.Time, r.SensorId, r.Temperature, r.CPU) - } - batch.Queue("select count(*) from sensor_data") - - //send batch to connection pool - br := dbpool.SendBatch(ctx, batch) - //execute statements in batch queue - _, err = br.Exec() - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to execute statement in batch queue %v\n", err) - os.Exit(1) - } - fmt.Println("Successfully batch inserted data") - - //Compare length of results slice to size of table - fmt.Printf("size of results: %d\n", len(results)) - //check size of table for number of rows inserted - // result of last SELECT statement - var rowsInserted int - err = br.QueryRow().Scan(&rowsInserted) - fmt.Printf("size of table: %d\n", rowsInserted) - - err = br.Close() - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to closer batch %v\n", err) - os.Exit(1) - } - } - ``` - - - -## Execute a query - -This section covers how to execute queries against your database. - - - -1. Define the SQL query you'd like to run on the database. This example uses a - SQL query that combines time-series and relational data. It returns the - average CPU values for every 5 minute interval, for sensors located on - location `ceiling` and of type `a`: - - ```go - // Formulate query in SQL - // Note the use of prepared statement placeholders $1 and $2 - queryTimebucketFiveMin := ` - SELECT time_bucket('5 minutes', time) AS five_min, avg(cpu) - FROM sensor_data - JOIN sensors ON sensors.id = sensor_data.sensor_id - WHERE sensors.location = $1 AND sensors.type = $2 - GROUP BY five_min - ORDER BY five_min DESC; - ` - ``` - -1. Use the `.Query()` function to execute the query string. Make sure you - specify the relevant placeholders: - - ```go - //Execute query on TimescaleDB - rows, err := dbpool.Query(ctx, queryTimebucketFiveMin, "ceiling", "a") - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to execute query %v\n", err) - os.Exit(1) - } - defer rows.Close() - - fmt.Println("Successfully executed query") - ``` - -1. Access the rows returned by `.Query()`. Create a struct with fields - representing the columns that you expect to be returned, then use the - `rows.Next()` function to iterate through the rows returned and fill - `results` with the array of structs. This uses the `rows.Scan()` function, - passing in pointers to the fields that you want to scan for results. - - This example prints out the results returned from the query, but you might - want to use those results for some other purpose. Once you've scanned - through all the rows returned you can then use the results array however you - like. - - ```go - //Do something with the results of query - // Struct for results - type result2 struct { - Bucket time.Time - Avg float64 - } - - // Print rows returned and fill up results slice for later use - var results []result2 - for rows.Next() { - var r result2 - err = rows.Scan(&r.Bucket, &r.Avg) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to scan %v\n", err) - os.Exit(1) - } - results = append(results, r) - fmt.Printf("Time bucket: %s | Avg: %f\n", &r.Bucket, r.Avg) - } - - // Any errors encountered by rows.Next or rows.Scan are returned here - if rows.Err() != nil { - fmt.Fprintf(os.Stderr, "rows Error: %v\n", rows.Err()) - os.Exit(1) - } - - // use results here… - ``` - -1. [](#)This example program runs a query, and accesses the results of - that query: - - ```go - package main - - import ( - "context" - "fmt" - "os" - "time" - - "github.com/jackc/pgx/v5/pgxpool" - ) - - func main() { - ctx := context.Background() - connStr := "yourConnectionStringHere" - dbpool, err := pgxpool.New(ctx, connStr) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to connect to database: %v\n", err) - os.Exit(1) - } - defer dbpool.Close() - - /********************************************/ - /* Execute a query */ - /********************************************/ - - // Formulate query in SQL - // Note the use of prepared statement placeholders $1 and $2 - queryTimebucketFiveMin := ` - SELECT time_bucket('5 minutes', time) AS five_min, avg(cpu) - FROM sensor_data - JOIN sensors ON sensors.id = sensor_data.sensor_id - WHERE sensors.location = $1 AND sensors.type = $2 - GROUP BY five_min - ORDER BY five_min DESC; - ` - - //Execute query on TimescaleDB - rows, err := dbpool.Query(ctx, queryTimebucketFiveMin, "ceiling", "a") - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to execute query %v\n", err) - os.Exit(1) - } - defer rows.Close() - - fmt.Println("Successfully executed query") - - //Do something with the results of query - // Struct for results - type result2 struct { - Bucket time.Time - Avg float64 - } - - // Print rows returned and fill up results slice for later use - var results []result2 - for rows.Next() { - var r result2 - err = rows.Scan(&r.Bucket, &r.Avg) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to scan %v\n", err) - os.Exit(1) - } - results = append(results, r) - fmt.Printf("Time bucket: %s | Avg: %f\n", &r.Bucket, r.Avg) - } - // Any errors encountered by rows.Next or rows.Scan are returned here - if rows.Err() != nil { - fmt.Fprintf(os.Stderr, "rows Error: %v\n", rows.Err()) - os.Exit(1) - } - } - ``` - - - -## Next steps - -Now that you're able to connect, read, and write to a {TIMESCALE_DB} instance from -your Go application, be sure to check out these advanced {TIMESCALE_DB} tutorials: - -* Refer to the [pgx documentation][pgx-docs] for more information about pgx. -* Get up and running with {TIMESCALE_DB} with the [Getting Started][getting-started] - tutorial. -* Want fast inserts on CSV data? Check out - [{TIMESCALE_DB} parallel copy][parallel-copy-tool], a tool for fast inserts, - written in Go. - -[getting-started]: /getting-started/ -[golang-install]: https://golang.org/doc/install -[libpq-docs]: https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING -[parallel-copy-tool]: https://github.com/timescale/timescaledb-parallel-copy -[pgx-docs]: https://pkg.go.dev/github.com/jackc/pgx -[pgx-driver-github]: https://github.com/jackc/pgx -[install]: /getting-started/ -[connect]: /getting-started/start-coding-with-timescale/#connect-to-timescaledb -[create-table]: /getting-started/start-coding-with-timescale/#create-a-relational-table -[create-a-hypertable]: /getting-started/start-coding-with-timescale/#generate-a-hypertable -[insert]: /getting-started/start-coding-with-timescale/#insert-rows-of-data -[query]: /getting-started/start-coding-with-timescale/#execute-a-query -[create-hypertable-docs]: /use-timescale/hypertables/hypertable-crud/#create-a-hypertable -[insert]: /getting-started/start-coding-with-timescale/#insert-a-row-into-your-timescale-database -[query]: /getting-started/start-coding-with-timescale/#execute-a-query-on-your-timescale-database -[create-hypertable]: /getting-started/start-coding-with-timescale/#generate-a-hypertable diff --git a/snippets/coding/_start-coding-java.mdx b/snippets/coding/_start-coding-java.mdx deleted file mode 100644 index 0e66785..0000000 --- a/snippets/coding/_start-coding-java.mdx +++ /dev/null @@ -1,616 +0,0 @@ -import { SERVICE_SHORT, TIMESCALE_DB } from '/snippets/vars.mdx'; -import IntegrationPrereqs from "/snippets/prerequisites/_integration-prereqs.mdx"; - -## Prerequisites - - - -* Install the [Java Development Kit (JDK)][jdk]. -* Install the [PostgreSQL JDBC driver][pg-jdbc-driver]. - -All code in this quick start is for Java 16 and later. If you are working -with older JDK versions, use legacy coding techniques. - -## Connect to your Tiger Cloud service - -In this section, you create a connection to your {SERVICE_SHORT} using an application in -a single file. You can use any of your favorite build tools, including `gradle` -or `maven`. - - - -1. Create a directory containing a text file called `Main.java`, with this content: - - ```java - package com.timescale.java; - - public class Main { - - public static void main(String... args) { - System.out.println("Hello, World!"); - } - } - ``` - -1. From the command line in the current directory, run the application: - - ```bash - java Main.java - ``` - - If the command is successful, `Hello, World!` line output is printed - to your console. - -1. Import the PostgreSQL JDBC driver. If you are using a dependency manager, - include the [PostgreSQL JDBC Driver][pg-jdbc-driver-dependency] as a - dependency. - -1. Download the [JAR artifact of the JDBC Driver][pg-jdbc-driver-artifact] and - save it with the `Main.java` file. - -1. Import the `JDBC Driver` into the Java application and display a list of - available drivers for the check: - - ```java - package com.timescale.java; - - import java.sql.DriverManager; - - public class Main { - - public static void main(String... args) { - DriverManager.drivers().forEach(System.out::println); - } - } - ``` - -1. Run all the examples: - - ```bash - java -cp *.jar Main.java - ``` - - If the command is successful, a string similar to - `org.postgresql.Driver@7f77e91b` is printed to your console. This means that you - are ready to connect to {TIMESCALE_DB} from Java. - -1. Locate your {TIMESCALE_DB} credentials and use them to compose a connection - string for JDBC. - - You'll need: - - * password - * username - * host URL - * port - * database name - -1. Compose your connection string variable, using this format: - - ```java - var connUrl = "jdbc:postgresql://:/?user=&password="; - ``` - - For more information about creating connection strings, see the [JDBC documentation][pg-jdbc-driver-conn-docs]. - - - - This method of composing a connection string is for test or development - purposes only. For production, use environment variables for sensitive - details like your password, hostname, and port number. - - - - ```java - package com.timescale.java; - - import java.sql.DriverManager; - import java.sql.SQLException; - - public class Main { - - public static void main(String... args) throws SQLException { - var connUrl = "jdbc:postgresql://:/?user=&password="; - var conn = DriverManager.getConnection(connUrl); - System.out.println(conn.getClientInfo()); - } - } - ``` - -1. Run the code: - - ```bash - java -cp *.jar Main.java - ``` - - If the command is successful, a string similar to - `{ApplicationName=PostgreSQL JDBC Driver}` is printed to your console. - - - -## Create a relational table - -In this section, you create a table called `sensors` which holds the ID, type, -and location of your fictional sensors. Additionally, you create a hypertable -called `sensor_data` which holds the measurements of those sensors. The -measurements contain the time, sensor_id, temperature reading, and CPU -percentage of the sensors. - - - -1. Compose a string which contains the SQL statement to create a relational - table. This example creates a table called `sensors`, with columns `id`, - `type` and `location`: - - ```sql - CREATE TABLE sensors ( - id SERIAL PRIMARY KEY, - type TEXT NOT NULL, - location TEXT NOT NULL - ); - ``` - -1. Create a statement, execute the query you created in the previous step, and - check that the table was created successfully: - - ```java - package com.timescale.java; - - import java.sql.DriverManager; - import java.sql.SQLException; - - public class Main { - - public static void main(String... args) throws SQLException { - var connUrl = "jdbc:postgresql://:/?user=&password="; - var conn = DriverManager.getConnection(connUrl); - - var createSensorTableQuery = """ - CREATE TABLE sensors ( - id SERIAL PRIMARY KEY, - type TEXT NOT NULL, - location TEXT NOT NULL - ) - """; - try (var stmt = conn.createStatement()) { - stmt.execute(createSensorTableQuery); - } - - var showAllTablesQuery = "SELECT tablename FROM pg_catalog.pg_tables WHERE schemaname = 'public'"; - try (var stmt = conn.createStatement(); - var rs = stmt.executeQuery(showAllTablesQuery)) { - System.out.println("Tables in the current database: "); - while (rs.next()) { - System.out.println(rs.getString("tablename")); - } - } - } - } - ``` - - - -## Create a hypertable - -When you have created the relational table, you can create a hypertable. -Creating tables and indexes, altering tables, inserting data, selecting data, -and most other tasks are executed on the hypertable. - - - -1. Create a `CREATE TABLE` SQL statement for - your hypertable. Notice how the hypertable has the compulsory time column: - - ```sql - CREATE TABLE sensor_data ( - time TIMESTAMPTZ NOT NULL, - sensor_id INTEGER REFERENCES sensors (id), - value DOUBLE PRECISION - ); - ``` - -1. Create a statement, execute the query you created in the previous step: - - ```sql - SELECT create_hypertable('sensor_data', by_range('time')); - ``` - - - - The `by_range` and `by_hash` dimension builder is an addition to {TIMESCALE_DB} 2.13. - - - -1. Execute the two statements you created, and commit your changes to the - database: - - ```java - package com.timescale.java; - - import java.sql.Connection; - import java.sql.DriverManager; - import java.sql.SQLException; - import java.util.List; - - public class Main { - - public static void main(String... args) { - final var connUrl = "jdbc:postgresql://:/?user=&password="; - try (var conn = DriverManager.getConnection(connUrl)) { - createSchema(conn); - insertData(conn); - } catch (SQLException ex) { - System.err.println(ex.getMessage()); - } - } - - private static void createSchema(final Connection conn) throws SQLException { - try (var stmt = conn.createStatement()) { - stmt.execute(""" - CREATE TABLE sensors ( - id SERIAL PRIMARY KEY, - type TEXT NOT NULL, - location TEXT NOT NULL - ) - """); - } - - try (var stmt = conn.createStatement()) { - stmt.execute(""" - CREATE TABLE sensor_data ( - time TIMESTAMPTZ NOT NULL, - sensor_id INTEGER REFERENCES sensors (id), - value DOUBLE PRECISION - ) - """); - } - - try (var stmt = conn.createStatement()) { - stmt.execute("SELECT create_hypertable('sensor_data', by_range('time'))"); - } - } - } - ``` - - - -## Insert data - -You can insert data into your hypertables in several different ways. In this -section, you can insert single rows, or insert by batches of rows. - - - -1. Open a connection to the database, use prepared statements to formulate the - `INSERT` SQL statement, then execute the statement: - - ```java - final List sensors = List.of( - new Sensor("temperature", "bedroom"), - new Sensor("temperature", "living room"), - new Sensor("temperature", "outside"), - new Sensor("humidity", "kitchen"), - new Sensor("humidity", "outside")); - for (final var sensor : sensors) { - try (var stmt = conn.prepareStatement("INSERT INTO sensors (type, location) VALUES (?, ?)")) { - stmt.setString(1, sensor.type()); - stmt.setString(2, sensor.location()); - stmt.executeUpdate(); - } - } - ``` - - - -If you want to insert a batch of rows by using a batching mechanism. In this -example, you generate some sample time-series data to insert into the -`sensor_data` hypertable: - - - -1. Insert batches of rows: - - ```java - final var sensorDataCount = 100; - final var insertBatchSize = 10; - try (var stmt = conn.prepareStatement(""" - INSERT INTO sensor_data (time, sensor_id, value) - VALUES ( - generate_series(now() - INTERVAL '24 hours', now(), INTERVAL '5 minutes'), - floor(random() * 4 + 1)::INTEGER, - random() - ) - """)) { - for (int i = 0; i < sensorDataCount; i++) { - stmt.addBatch(); - - if ((i > 0 && i % insertBatchSize == 0) || i == sensorDataCount - 1) { - stmt.executeBatch(); - } - } - } - ``` - - - -## Execute a query - -This section covers how to execute queries against your database. - - - -## Execute queries on TimescaleDB - -1. Define the SQL query you'd like to run on the database. This example - combines time-series and relational data. It returns the average values for - every 15 minute interval for sensors with specific type and location. - - ```sql - SELECT time_bucket('15 minutes', time) AS bucket, avg(value) - FROM sensor_data - JOIN sensors ON sensors.id = sensor_data.sensor_id - WHERE sensors.type = ? AND sensors.location = ? - GROUP BY bucket - ORDER BY bucket DESC; - ``` - -1. Execute the query with the prepared statement and read out the result set for - all `a`-type sensors located on the `floor`: - - ```java - try (var stmt = conn.prepareStatement(""" - SELECT time_bucket('15 minutes', time) AS bucket, avg(value) - FROM sensor_data - JOIN sensors ON sensors.id = sensor_data.sensor_id - WHERE sensors.type = ? AND sensors.location = ? - GROUP BY bucket - ORDER BY bucket DESC - """)) { - stmt.setString(1, "temperature"); - stmt.setString(2, "living room"); - - try (var rs = stmt.executeQuery()) { - while (rs.next()) { - System.out.printf("%s: %f%n", rs.getTimestamp(1), rs.getDouble(2)); - } - } - } - ``` - - If the command is successful, you'll see output like this: - - ```bash - 2021-05-12 23:30:00.0: 0,508649 - 2021-05-12 23:15:00.0: 0,477852 - 2021-05-12 23:00:00.0: 0,462298 - 2021-05-12 22:45:00.0: 0,457006 - 2021-05-12 22:30:00.0: 0,568744 - ... - ``` - - - -## Next steps - -Now that you're able to connect, read, and write to a {TIMESCALE_DB} instance from -your Java application, and generate the scaffolding necessary to build a new -application from an existing {TIMESCALE_DB} instance, be sure to check out these -advanced {TIMESCALE_DB} tutorials: - -* [Continuous Aggregates][continuous-aggregates] -* [Migrate Your own Data][migrate] - -## Complete code samples - -This section contains complete code samples. - -### Complete code sample - -```java -package com.timescale.java; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.SQLException; -import java.util.List; - -public class Main { - - public static void main(String... args) { - final var connUrl = "jdbc:postgresql://:/?user=&password="; - try (var conn = DriverManager.getConnection(connUrl)) { - createSchema(conn); - insertData(conn); - } catch (SQLException ex) { - System.err.println(ex.getMessage()); - } - } - - private static void createSchema(final Connection conn) throws SQLException { - try (var stmt = conn.createStatement()) { - stmt.execute(""" - CREATE TABLE sensors ( - id SERIAL PRIMARY KEY, - type TEXT NOT NULL, - location TEXT NOT NULL - ) - """); - } - - try (var stmt = conn.createStatement()) { - stmt.execute(""" - CREATE TABLE sensor_data ( - time TIMESTAMPTZ NOT NULL, - sensor_id INTEGER REFERENCES sensors (id), - value DOUBLE PRECISION - ) - """); - } - - try (var stmt = conn.createStatement()) { - stmt.execute("SELECT create_hypertable('sensor_data', by_range('time'))"); - } - } - - private static void insertData(final Connection conn) throws SQLException { - final List sensors = List.of( - new Sensor("temperature", "bedroom"), - new Sensor("temperature", "living room"), - new Sensor("temperature", "outside"), - new Sensor("humidity", "kitchen"), - new Sensor("humidity", "outside")); - for (final var sensor : sensors) { - try (var stmt = conn.prepareStatement("INSERT INTO sensors (type, location) VALUES (?, ?)")) { - stmt.setString(1, sensor.type()); - stmt.setString(2, sensor.location()); - stmt.executeUpdate(); - } - } - - final var sensorDataCount = 100; - final var insertBatchSize = 10; - try (var stmt = conn.prepareStatement(""" - INSERT INTO sensor_data (time, sensor_id, value) - VALUES ( - generate_series(now() - INTERVAL '24 hours', now(), INTERVAL '5 minutes'), - floor(random() * 4 + 1)::INTEGER, - random() - ) - """)) { - for (int i = 0; i < sensorDataCount; i++) { - stmt.addBatch(); - - if ((i > 0 && i % insertBatchSize == 0) || i == sensorDataCount - 1) { - stmt.executeBatch(); - } - } - } - } - - private record Sensor(String type, String location) { - } -} -``` - -### Execute more complex queries - -```java -package com.timescale.java; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.SQLException; -import java.util.List; - -public class Main { - - public static void main(String... args) { - final var connUrl = "jdbc:postgresql://:/?user=&password="; - try (var conn = DriverManager.getConnection(connUrl)) { - createSchema(conn); - insertData(conn); - executeQueries(conn); - } catch (SQLException ex) { - System.err.println(ex.getMessage()); - } - } - - private static void createSchema(final Connection conn) throws SQLException { - try (var stmt = conn.createStatement()) { - stmt.execute(""" - CREATE TABLE sensors ( - id SERIAL PRIMARY KEY, - type TEXT NOT NULL, - location TEXT NOT NULL - ) - """); - } - - try (var stmt = conn.createStatement()) { - stmt.execute(""" - CREATE TABLE sensor_data ( - time TIMESTAMPTZ NOT NULL, - sensor_id INTEGER REFERENCES sensors (id), - value DOUBLE PRECISION - ) - """); - } - - try (var stmt = conn.createStatement()) { - stmt.execute("SELECT create_hypertable('sensor_data', by_range('time'))"); - } - } - - private static void insertData(final Connection conn) throws SQLException { - final List sensors = List.of( - new Sensor("temperature", "bedroom"), - new Sensor("temperature", "living room"), - new Sensor("temperature", "outside"), - new Sensor("humidity", "kitchen"), - new Sensor("humidity", "outside")); - for (final var sensor : sensors) { - try (var stmt = conn.prepareStatement("INSERT INTO sensors (type, location) VALUES (?, ?)")) { - stmt.setString(1, sensor.type()); - stmt.setString(2, sensor.location()); - stmt.executeUpdate(); - } - } - - final var sensorDataCount = 100; - final var insertBatchSize = 10; - try (var stmt = conn.prepareStatement(""" - INSERT INTO sensor_data (time, sensor_id, value) - VALUES ( - generate_series(now() - INTERVAL '24 hours', now(), INTERVAL '5 minutes'), - floor(random() * 4 + 1)::INTEGER, - random() - ) - """)) { - for (int i = 0; i < sensorDataCount; i++) { - stmt.addBatch(); - - if ((i > 0 && i % insertBatchSize == 0) || i == sensorDataCount - 1) { - stmt.executeBatch(); - } - } - } - } - - private static void executeQueries(final Connection conn) throws SQLException { - try (var stmt = conn.prepareStatement(""" - SELECT time_bucket('15 minutes', time) AS bucket, avg(value) - FROM sensor_data - JOIN sensors ON sensors.id = sensor_data.sensor_id - WHERE sensors.type = ? AND sensors.location = ? - GROUP BY bucket - ORDER BY bucket DESC - """)) { - stmt.setString(1, "temperature"); - stmt.setString(2, "living room"); - - try (var rs = stmt.executeQuery()) { - while (rs.next()) { - System.out.printf("%s: %f%n", rs.getTimestamp(1), rs.getDouble(2)); - } - } - } - } - - private record Sensor(String type, String location) { - } -} -``` - -[jdk]: https://openjdk.java.net -[pg-jdbc-driver-artifact]: https://jdbc.postgresql.org/download/ -[pg-jdbc-driver-conn-docs]: https://jdbc.postgresql.org/documentation/datasource/ -[pg-jdbc-driver-dependency]: https://mvnrepository.com/artifact/org.postgresql/postgresql -[pg-jdbc-driver]: https://jdbc.postgresql.org -[connect]: #connect-java-to-timescaledb -[create-table]: #create-a-relational-table -[create-a-hypertable]: #create-a-hypertable -[insert]: #insert-a-batch-of-rows-into-timescaledb -[query]: #execute-queries-on-timescaledb -[install]: /getting-started/latest/ -[continuous-aggregates]: /use-timescale/continuous-aggregates/ -[migrate]: /migrate/ diff --git a/snippets/coding/_start-coding-node.mdx b/snippets/coding/_start-coding-node.mdx deleted file mode 100644 index 1fe0d17..0000000 --- a/snippets/coding/_start-coding-node.mdx +++ /dev/null @@ -1,361 +0,0 @@ -import { TIMESCALE_DB } from '/snippets/vars.mdx'; -import IntegrationPrereqs from "/snippets/prerequisites/_integration-prereqs.mdx"; - -## Prerequisites - - - -* Install [Node.js][node-install]. -* Install the Node.js package manager [npm][npm-install]. - -## Connect to TimescaleDB - -In this section, you create a connection to {TIMESCALE_DB} with a common Node.js -ORM (object relational mapper) called [Sequelize][sequelize-info]. - - - -1. At the command prompt, initialize a new Node.js app: - - ```bash - npm init -y - ``` - - This creates a `package.json` file in your directory, which contains all - of the dependencies for your project. It looks something like this: - - ```json - { - "name": "node-sample", - "version": "1.0.0", - "description": "", - "main": "index.js", - "scripts": { - "test": "echo \"Error: no test specified\" && exit 1" - }, - "keywords": [], - "author": "", - "license": "ISC" - } - ``` - -1. Install Express.js: - - ```bash - npm install express - ``` - -1. Create a simple web page to check the connection. Create a new file called - `index.js`, with this content: - - ```java - const express = require('express') - const app = express() - const port = 3000; - - app.use(express.json()); - app.get('/', (req, res) => res.send('Hello World!')) - app.listen(port, () => console.log(`Example app listening at http://localhost:${port}`)) - ``` - -1. Test your connection by starting the application: - - ```bash - node index.js - ``` - - In your web browser, navigate to `http://localhost:3000`. If the connection - is successful, it shows "Hello World!" - -1. Add Sequelize to your project: - - ```bash - npm install sequelize sequelize-cli pg pg-hstore - ``` - -1. Locate your {TIMESCALE_DB} credentials and use them to compose a connection - string for Sequelize. - - You'll need: - - * password - * username - * host URL - * port - * database name - -1. Compose your connection string variable, using this format: - - ```java - 'postgres://:@:/' - ``` - -1. Open the `index.js` file you created. Require Sequelize in the application, - and declare the connection string: - - ```java - const Sequelize = require('sequelize') - const sequelize = new Sequelize('postgres://:@:/', - { - dialect: 'postgres', - protocol: 'postgres', - dialectOptions: { - ssl: { - require: true, - rejectUnauthorized: false - } - } - }) - ``` - - Make sure you add the SSL settings in the `dialectOptions` sections. You - can't connect to {TIMESCALE_DB} using SSL without them. - -1. You can test the connection by adding these lines to `index.js` after the - `app.get` statement: - - ```java - sequelize.authenticate().then(() => { - console.log('Connection has been established successfully.'); - }).catch(err => { - console.error('Unable to connect to the database:', err); - }); - ``` - - Start the application on the command line: - - ```bash - node index.js - ``` - - If the connection is successful, you'll get output like this: - - ```bash - Example app listening at http://localhost:3000 - Executing (default): SELECT 1+1 AS result - Connection has been established successfully. - ``` - - - -## Create a relational table - -In this section, you create a relational table called `page_loads`. - - - -1. Use the Sequelize command line tool to create a table and model called `page_loads`: - - ```bash - npx sequelize model:generate --name page_loads \ - --attributes userAgent:string,time:date - ``` - - The output looks similar to this: - - ```bash - Sequelize CLI [Node: 12.16.2, CLI: 5.5.1, ORM: 5.21.11] - - New model was created at . - New migration was created at . - ``` - -1. Edit the migration file so that it sets up a migration key: - - ```java - 'use strict'; - module.exports = { - up: async (queryInterface, Sequelize) => { - await queryInterface.createTable('page_loads', { - userAgent: { - primaryKey: true, - type: Sequelize.STRING - }, - time: { - primaryKey: true, - type: Sequelize.DATE - } - }); - }, - down: async (queryInterface, Sequelize) => { - await queryInterface.dropTable('page_loads'); - } - }; - ``` - -1. Migrate the change and make sure that it is reflected in the database: - - ```bash - npx sequelize db:migrate - ``` - - The output looks similar to this: - - ```bash - Sequelize CLI [Node: 12.16.2, CLI: 5.5.1, ORM: 5.21.11] - - Loaded configuration file "config/config.json". - Using environment "development". - == 20200528195725-create-page-loads: migrating ======= - == 20200528195725-create-page-loads: migrated (0.443s) - ``` - -1. Create the `PageLoads` model in your code. In the `index.js` file, above the - `app.use` statement, add these lines: - - ```java - let PageLoads = sequelize.define('page_loads', { - userAgent: {type: Sequelize.STRING, primaryKey: true }, - time: {type: Sequelize.DATE, primaryKey: true } - }, { timestamps: false }); - ``` - -1. Instantiate a `PageLoads` object and save it to the database. - - - -## Create a hypertable - -When you have created the relational table, you can create a hypertable. -Creating tables and indexes, altering tables, inserting data, selecting data, -and most other tasks are executed on the hypertable. - - - -1. Create a migration to modify the `page_loads` relational table, and change - it to a hypertable by first running the following command: - - ```bash - npx sequelize migration:generate --name add_hypertable - ``` - - The output looks similar to this: - - ```bash - Sequelize CLI [Node: 12.16.2, CLI: 5.5.1, ORM: 5.21.11] - - migrations folder at already exists. - New migration was created at /20200601202912-add_hypertable.js . - ``` - -1. In the `migrations` folder, there is now a new file. Open the - file, and add this content: - - ```js - 'use strict'; - - module.exports = { - up: (queryInterface, Sequelize) => { - return queryInterface.sequelize.query("SELECT create_hypertable('page_loads', by_range('time'));"); - }, - - down: (queryInterface, Sequelize) => { - } - }; - ``` - - - - The `by_range` dimension builder is an addition to {TIMESCALE_DB} 2.13. - - - -1. At the command prompt, run the migration command: - - ```bash - npx sequelize db:migrate - ``` - - The output looks similar to this: - - ```bash - Sequelize CLI [Node: 12.16.2, CLI: 5.5.1, ORM: 5.21.11] - - Loaded configuration file "config/config.json". - Using environment "development". - == 20200601202912-add_hypertable: migrating ======= - == 20200601202912-add_hypertable: migrated (0.426s) - ``` - - - -## Insert rows of data - -This section covers how to insert data into your hypertables. - - - -1. In the `index.js` file, modify the `/` route to get the `user-agent` from - the request object (`req`) and the current timestamp. Then, call the - `create` method on `PageLoads` model, supplying the user agent and timestamp - parameters. The `create` call executes an `INSERT` on the database: - - ```java - app.get('/', async (req, res) => { - // get the user agent and current time - const userAgent = req.get('user-agent'); - const time = new Date().getTime(); - - try { - // insert the record - await PageLoads.create({ - userAgent, time - }); - - // send response - res.send('Inserted!'); - } catch (e) { - console.log('Error inserting data', e) - } - }) - ``` - - - -## Execute a query - -This section covers how to execute queries against your database. In this -example, every time the page is reloaded, all information currently in the table -is displayed. - - - -1. Modify the `/` route in the `index.js` file to call the Sequelize `findAll` - function and retrieve all data from the `page_loads` table using the - `PageLoads` model: - - ```java - app.get('/', async (req, res) => { - // get the user agent and current time - const userAgent = req.get('user-agent'); - const time = new Date().getTime(); - - try { - // insert the record - await PageLoads.create({ - userAgent, time - }); - - // now display everything in the table - const messages = await PageLoads.findAll(); - res.send(messages); - } catch (e) { - console.log('Error inserting data', e) - } - }) - ``` - -Now, when you reload the page, you should see all of the rows currently in the -`page_loads` table. - - - -[node-install]: https://nodejs.org -[npm-install]: https://www.npmjs.com/get-npm -[sequelize-info]: https://sequelize.org -[connect]: #connect-to-timescaledb -[create-table]: #create-a-relational-table -[create-a-hypertable]: #create-a-hypertable -[insert]: #insert-rows-of-data -[query]: #execute-a-query -[install]: /getting-started/latest/ diff --git a/snippets/coding/_start-coding-python.mdx b/snippets/coding/_start-coding-python.mdx deleted file mode 100644 index 04ce49a..0000000 --- a/snippets/coding/_start-coding-python.mdx +++ /dev/null @@ -1,427 +0,0 @@ -import { PG, TIMESCALE_DB } from '/snippets/vars.mdx'; -import IntegrationPrereqs from "/snippets/prerequisites/_integration-prereqs.mdx"; - -## Prerequisites - - - -* Install the `psycopg2` library. - - For more information, see the [psycopg2 documentation][psycopg2-docs]. -* Create a [Python virtual environment][virtual-env]. [](#)(optional) - -## Connect to TimescaleDB - -In this section, you create a connection to {TIMESCALE_DB} using the `psycopg2` -library. This library is one of the most popular {PG} libraries for -Python. It allows you to execute raw SQL queries efficiently and safely, and -prevents common attacks such as SQL injection. - - - -1. Import the psycogpg2 library: - - ```python - import psycopg2 - ``` - -1. Locate your {TIMESCALE_DB} credentials and use them to compose a connection - string for `psycopg2`. - - You'll need: - - * password - * username - * host URL - * port - * database name - -1. Compose your connection string variable as a - [libpq connection string][pg-libpq-string], using this format: - - ```python - CONNECTION = "postgres://username:password@host:port/dbname" - ``` - - If you're using a hosted version of {TIMESCALE_DB}, or generally require an SSL - connection, use this version instead: - - ```python - CONNECTION = "postgres://username:password@host:port/dbname?sslmode=require" - ``` - - Alternatively you can specify each parameter in the connection string as follows - - ```python - CONNECTION = "dbname=tsdb user=tsdbadmin password=secret host=host.com port=5432 sslmode=require" - ``` - - - - This method of composing a connection string is for test or development - purposes only. For production, use environment variables for sensitive - details like your password, hostname, and port number. - - - -1. Use the `psycopg2` [connect function][psycopg2-connect] to create a new - database session and create a new [cursor object][psycopg2-cursor] to - interact with the database. - - In your `main` function, add these lines: - - ```python - CONNECTION = "postgres://username:password@host:port/dbname" - with psycopg2.connect(CONNECTION) as conn: - cursor = conn.cursor() - # use the cursor to interact with your database - # cursor.execute("SELECT * FROM table") - ``` - - Alternatively, you can create a connection object and pass the object - around as needed, like opening a cursor to perform database operations: - - ```python - CONNECTION = "postgres://username:password@host:port/dbname" - conn = psycopg2.connect(CONNECTION) - cursor = conn.cursor() - # use the cursor to interact with your database - cursor.execute("SELECT 'hello world'") - print(cursor.fetchone()) - ``` - - - -## Create a relational table - -In this section, you create a table called `sensors` which holds the ID, type, -and location of your fictional sensors. Additionally, you create a hypertable -called `sensor_data` which holds the measurements of those sensors. The -measurements contain the time, sensor_id, temperature reading, and CPU -percentage of the sensors. - - - -1. Compose a string which contains the SQL statement to create a relational - table. This example creates a table called `sensors`, with columns `id`, - `type` and `location`: - - ```python - query_create_sensors_table = """CREATE TABLE sensors ( - id SERIAL PRIMARY KEY, - type VARCHAR(50), - location VARCHAR(50) - ); - """ - ``` - -1. Open a cursor, execute the query you created in the previous step, and - commit the query to make the changes persistent. Afterward, close the cursor - to clean up: - - ```python - cursor = conn.cursor() - # see definition in Step 1 - cursor.execute(query_create_sensors_table) - conn.commit() - cursor.close() - ``` - - - -## Create a hypertable - -When you have created the relational table, you can create a hypertable. -Creating tables and indexes, altering tables, inserting data, selecting data, -and most other tasks are executed on the hypertable. - - - -1. Create a string variable that contains the `CREATE TABLE` SQL statement for - your hypertable. Notice how the hypertable has the compulsory time column: - - ```python - # create sensor data hypertable - query_create_sensordata_table = """CREATE TABLE sensor_data ( - time TIMESTAMPTZ NOT NULL, - sensor_id INTEGER, - temperature DOUBLE PRECISION, - cpu DOUBLE PRECISION, - FOREIGN KEY (sensor_id) REFERENCES sensors (id) - ); - """ - ``` - -2. Formulate a `SELECT` statement that converts the `sensor_data` table to a - hypertable. You must specify the table name to convert to a hypertable, and - the name of the time column as the two arguments. For more information, see - the [`create_hypertable` docs][create-hypertable-docs]: - - ```python - query_create_sensordata_hypertable = "SELECT create_hypertable('sensor_data', by_range('time'));" - ``` - - - - The `by_range` dimension builder is an addition to {TIMESCALE_DB} 2.13. - - - -3. Open a cursor with the connection, execute the statements from the previous - steps, commit your changes, and close the cursor: - - ```python - cursor = conn.cursor() - cursor.execute(query_create_sensordata_table) - cursor.execute(query_create_sensordata_hypertable) - # commit changes to the database to make changes persistent - conn.commit() - cursor.close() - ``` - - - -## Insert rows of data - -You can insert data into your hypertables in several different ways. In this -section, you can use `psycopg2` with prepared statements, or you can use -`pgcopy` for a faster insert. - - - -1. This example inserts a list of tuples, or relational data, called `sensors`, - into the relational table named `sensors`. Open a cursor with a connection - to the database, use prepared statements to formulate the `INSERT` SQL - statement, and then execute that statement: - - ```python - sensors = [('a', 'floor'), ('a', 'ceiling'), ('b', 'floor'), ('b', 'ceiling')] - cursor = conn.cursor() - for sensor in sensors: - try: - cursor.execute("INSERT INTO sensors (type, location) VALUES (%s, %s);", - (sensor[0], sensor[1])) - except (Exception, psycopg2.Error) as error: - print(error.pgerror) - conn.commit() - ``` - -1. [](#)(optional)Alternatively, you can pass variables to the `cursor.execute` - function and separate the formulation of the SQL statement, `SQL`, from the - data being passed with it into the prepared statement, `data`: - - ```python - SQL = "INSERT INTO sensors (type, location) VALUES (%s, %s);" - sensors = [('a', 'floor'), ('a', 'ceiling'), ('b', 'floor'), ('b', 'ceiling')] - cursor = conn.cursor() - for sensor in sensors: - try: - data = (sensor[0], sensor[1]) - cursor.execute(SQL, data) - except (Exception, psycopg2.Error) as error: - print(error.pgerror) - conn.commit() - ``` - - - -If you choose to use `pgcopy` instead, install the `pgcopy` package -[using pip][pgcopy-install], and then add this line to your list of -`import` statements: - -```python -from pgcopy import CopyManager -``` - - - -1. Generate some random sensor data using the `generate_series` function - provided by {PG}. This example inserts a total of 480 rows of data (4 - readings, every 5 minutes, for 24 hours). In your application, this would be - the query that saves your time-series data into the hypertable: - - ```python - # for sensors with ids 1-4 - for id in range(1, 4, 1): - data = (id,) - # create random data - simulate_query = """SELECT generate_series(now() - interval '24 hour', now(), interval '5 minute') AS time, - %s as sensor_id, - random()*100 AS temperature, - random() AS cpu; - """ - cursor.execute(simulate_query, data) - values = cursor.fetchall() - ``` - -1. Define the column names of the table you want to insert data into. This - example uses the `sensor_data` hypertable created earlier. This hypertable - consists of columns named `time`, `sensor_id`, `temperature` and `cpu`. The - column names are defined in a list of strings called `cols`: - - ```python - cols = ['time', 'sensor_id', 'temperature', 'cpu'] - ``` - -1. Create an instance of the `pgcopy` CopyManager, `mgr`, and pass the - connection variable, hypertable name, and list of column names. Then use the - `copy` function of the CopyManager to insert the data into the database - quickly using `pgcopy`. - - ```python - mgr = CopyManager(conn, 'sensor_data', cols) - mgr.copy(values) - ``` - -1. Commit to persist changes: - - ```python - conn.commit() - ``` - -1. [](#)The full sample code to insert data into {TIMESCALE_DB} using - `pgcopy`, using the example of sensor data from four sensors: - - ```python - # insert using pgcopy - def fast_insert(conn): - cursor = conn.cursor() - - # for sensors with ids 1-4 - for id in range(1, 4, 1): - data = (id,) - # create random data - simulate_query = """SELECT generate_series(now() - interval '24 hour', now(), interval '5 minute') AS time, - %s as sensor_id, - random()*100 AS temperature, - random() AS cpu; - """ - cursor.execute(simulate_query, data) - values = cursor.fetchall() - - # column names of the table you're inserting into - cols = ['time', 'sensor_id', 'temperature', 'cpu'] - - # create copy manager with the target table and insert - mgr = CopyManager(conn, 'sensor_data', cols) - mgr.copy(values) - - # commit after all sensor data is inserted - # could also commit after each sensor insert is done - conn.commit() - ``` - -1. [](#)(optional)You can also check if the insertion worked: - - ```python - cursor.execute("SELECT * FROM sensor_data LIMIT 5;") - print(cursor.fetchall()) - ``` - - - -## Execute a query - -This section covers how to execute queries against your database. - -The first procedure shows a simple `SELECT *` query. For more complex queries, -you can use prepared statements to ensure queries are executed safely against -the database. - -For more information about properly using placeholders in `psycopg2`, see the -[basic module usage document][psycopg2-docs-basics]. -For more information about how to execute more complex queries in `psycopg2`, -see the [psycopg2 documentation][psycopg2-docs-basics]. - -### Execute a query - - - -1. Define the SQL query you'd like to run on the database. This example is a - simple `SELECT` statement querying each row from the previously created - `sensor_data` table. - - ```python - query = "SELECT * FROM sensor_data;" - ``` - -1. Open a cursor from the existing database connection, `conn`, and then execute - the query you defined: - - ```python - cursor = conn.cursor() - query = "SELECT * FROM sensor_data;" - cursor.execute(query) - ``` - -1. To access all resulting rows returned by your query, use one of `pyscopg2`'s - [results retrieval methods][results-retrieval-methods], - such as `fetchall()` or `fetchmany()`. This example prints the results of - the query, row by row. Note that the result of `fetchall()` is a list of - tuples, so you can handle them accordingly: - - ```python - cursor = conn.cursor() - query = "SELECT * FROM sensor_data;" - cursor.execute(query) - for row in cursor.fetchall(): - print(row) - cursor.close() - ``` - -1. [](#)(optional)If you want a list of dictionaries instead, you can define the - cursor using [`DictCursor`][dictcursor-docs]: - - ```python - cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor) - ``` - - Using this cursor, `cursor.fetchall()` returns a list of dictionary-like objects. - - - -For more complex queries, you can use prepared statements to ensure queries are -executed safely against the database. - -### Execute queries using prepared statements - - - -1. Write the query using prepared statements: - - ```python - # query with placeholders - cursor = conn.cursor() - query = """ - SELECT time_bucket('5 minutes', time) AS five_min, avg(cpu) - FROM sensor_data - JOIN sensors ON sensors.id = sensor_data.sensor_id - WHERE sensors.location = %s AND sensors.type = %s - GROUP BY five_min - ORDER BY five_min DESC; - """ - location = "floor" - sensor_type = "a" - data = (location, sensor_type) - cursor.execute(query, data) - results = cursor.fetchall() - ``` - - - -[install]: /getting-started/latest/ -[create-hypertable-docs]: /api/hypertable/create_hypertable -[dictcursor-docs]: https://www.psycopg.org/docs/extras.html#dictionary-like-cursor -[pg-libpq-string]: https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING -[pgcopy-install]: https://pypi.org/project/pgcopy/ -[psycopg2-connect]: https://www.psycopg.org/docs/module.html?highlight=connect#psycopg2.connect -[psycopg2-cursor]: https://www.psycopg.org/docs/connection.html?highlight=cursor#connection.cursor -[psycopg2-docs-basics]: https://www.psycopg.org/docs/usage.html -[psycopg2-docs]: https://pypi.org/project/psycopg2/ -[results-retrieval-methods]:https://www.psycopg.org/docs/cursor.html -[virtual-env]: https://docs.python.org/3/library/venv.html -[connect]: #connect-to-timescaledb -[create-table]: #create-a-relational-table -[create-a-hypertable]: #create-a-hypertable -[insert]: #insert-rows-of-data -[query]: #execute-a-query diff --git a/snippets/coding/_start-coding-ruby.mdx b/snippets/coding/_start-coding-ruby.mdx deleted file mode 100644 index d40f1e7..0000000 --- a/snippets/coding/_start-coding-ruby.mdx +++ /dev/null @@ -1,426 +0,0 @@ -import { CLOUD_LONG, COMPANY, PG, SELF_LONG_CAP, TIMESCALE_DB } from '/snippets/vars.mdx'; -import IntegrationPrereqs from "/snippets/prerequisites/_integration-prereqs.mdx"; - -## Prerequisites - - - -* Install [Rails][rails-guide]. - -## Connect a Rails app to your service - -Every {SERVICE_LONG} is a 100% {PG} database hosted in {CLOUD_LONG} with -{COMPANY} extensions such as {TIMESCALE_DB}. You connect to your {SERVICE_LONG} -from a standard Rails app configured for {PG}. - - - -1. **Create a new Rails app configured for {PG}** - - Rails creates and bundles your app, then installs the standard {PG} Gems. - - ```bash - rails new my_app -d=postgresql - cd my_app - ``` - -1. **Install the {TIMESCALE_DB} gem** - - 1. Open `Gemfile`, add the following line, then save your changes: - - ```ruby - gem 'timescaledb' - ``` - - 1. In Terminal, run the following command: - - ```bash - bundle install - ``` - -1. **Connect your app to your {SERVICE_LONG}** - - 1. In `/config/database.yml` update the configuration to read securely connect to your {SERVICE_LONG} - by adding `url: <%= ENV['DATABASE_URL'] %>` to the default configuration: - - ```yaml - default: &default - adapter: postgresql - encoding: unicode - pool: <%= ENV.fetch("RAILS_MAX_THREADS") { 5 } %> - url: <%= ENV['DATABASE_URL'] %> - ``` - - 1. Set the environment variable for `DATABASE_URL` to the value of `Service URL` from - your [connection details][connection-info] - ```bash - export DATABASE_URL="value of Service URL" - ``` - - 1. Create the database: - - **{CLOUD_LONG}**: nothing to do. The database is part of your {SERVICE_LONG}. - - **{SELF_LONG_CAP}**, create the database for the project: - - ```bash - rails db:create - ``` - - 1. Run migrations: - - ```bash - rails db:migrate - ``` - - 1. Verify the connection from your app to your {SERVICE_LONG}: - - ```bash - echo "\dx" | rails dbconsole - ``` - - The result shows the list of extensions in your {SERVICE_LONG} - - | Name | Version | Schema | Description | - | -- | -- | -- | -- | - | pg_buffercache | 1.5 | public | examine the shared buffer cache| - | pg_stat_statements | 1.11 | public | track planning and execution statistics of all SQL statements executed| - | plpgsql | 1.0 | pg_catalog | PL/pgSQL procedural language| - | postgres_fdw | 1.1 | public | foreign-data wrapper for remote {PG} servers| - | timescaledb | 2.18.1 | public | Enables scalable inserts and complex queries for time-series data (Community Edition)| - | timescaledb_toolkit | 1.19.0 | public | Library of analytical hyperfunctions, time-series pipelining, and other SQL utilities| - - - -## Optimize time-series data in hypertables - -Hypertables are {PG} tables designed to simplify and accelerate data analysis. Anything -you can do with regular {PG} tables, you can do with hypertables - but much faster and more conveniently. - -In this section, you use the helpers in the {TIMESCALE_DB} gem to create and manage a [hypertable][about-hypertables]. - - - -1. **Generate a migration to create the page loads table** - - ```bash - rails generate migration create_page_loads - ``` - - This creates the `/db/migrate/_create_page_loads.rb` migration file. - -1. **Add hypertable options** - - Replace the contents of `/db/migrate/_create_page_loads.rb` - with the following: - - ```ruby - class CreatePageLoads < ActiveRecord::Migration[8.0] - def change - hypertable_options = { - time_column: 'created_at', - chunk_time_interval: '1 day', - compress_segmentby: 'path', - compress_orderby: 'created_at', - compress_after: '7 days', - drop_after: '30 days' - } - - create_table :page_loads, id: false, primary_key: [:created_at, :user_agent, :path], hypertable: hypertable_options do |t| - t.timestamptz :created_at, null: false - t.string :user_agent - t.string :path - t.float :performance - end - end - end - ``` - - The `id` column is not included in the table. This is because {TIMESCALE_DB} requires that any `UNIQUE` or `PRIMARY KEY` - indexes on the table include all partitioning columns. In this case, this is the time column. A new - Rails model includes a `PRIMARY KEY` index for id by default: either remove the column or make sure that the index - includes time as part of a "composite key." - - For more information, check the Roby docs around [composite primary keys][rails-compostite-primary-keys]. - -1. **Create a `PageLoad` model** - - Create a new file called `/app/models/page_load.rb` and add the following code: - - ```ruby - class PageLoad < ApplicationRecord - extend Timescaledb::ActsAsHypertable - include Timescaledb::ContinuousAggregatesHelper - - acts_as_hypertable time_column: "created_at", - segment_by: "path", - value_column: "performance" - - # Basic scopes for filtering by browser - scope :chrome_users, -> { where("user_agent LIKE ?", "%Chrome%") } - scope :firefox_users, -> { where("user_agent LIKE ?", "%Firefox%") } - scope :safari_users, -> { where("user_agent LIKE ?", "%Safari%") } - - # Performance analysis scopes - scope :performance_stats, -> { - select("stats_agg(#{value_column}) as stats_agg") - } - - scope :slow_requests, -> { where("performance > ?", 1.0) } - scope :fast_requests, -> { where("performance < ?", 0.1) } - - # Set up continuous aggregates for different timeframes - continuous_aggregates scopes: [:performance_stats], - timeframes: [:minute, :hour, :day], - refresh_policy: { - minute: { - start_offset: '3 minute', - end_offset: '1 minute', - schedule_interval: '1 minute' - }, - hour: { - start_offset: '3 hours', - end_offset: '1 hour', - schedule_interval: '1 minute' - }, - day: { - start_offset: '3 day', - end_offset: '1 day', - schedule_interval: '1 minute' - } - } - end - ``` - -1. **Run the migration** - - ```bash - rails db:migrate - ``` - - - -## Insert data your service - -The {TIMESCALE_DB} gem provides efficient ways to insert data into hypertables. This section -shows you how to ingest test data into your hypertable. - - - -1. **Create a controller to handle page loads** - - Create a new file called `/app/controllers/application_controller.rb` and add the following code: - - ```ruby - class ApplicationController < ActionController::Base - around_action :track_page_load - - private - - def track_page_load - start_time = Time.current - yield - end_time = Time.current - - PageLoad.create( - path: request.path, - user_agent: request.user_agent, - performance: (end_time - start_time) - ) - end - end - ``` - -1. **Generate some test data** - - Use `bin/console` to join a Rails console session and run the following code - to define some random page load access data: - - ```ruby - def generate_sample_page_loads(total: 1000) - time = 1.month.ago - paths = %w[/ /about /contact /products /blog] - browsers = [ - "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36", - "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:89.0) Gecko/20100101 Firefox/89.0", - "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.1 Safari/605.1.15" - ] - - total.times.map do - time = time + rand(60).seconds - { - path: paths.sample, - user_agent: browsers.sample, - performance: rand(0.1..2.0), - created_at: time, - updated_at: time - } - end - end - ``` - -1. **Insert the generated data into your {SERVICE_LONG}** - - ```bash - # Insert the data in batches - PageLoad.insert_all(generate_sample_page_loads, returning: false) - ``` - -1. **Validate the test data in your {SERVICE_LONG}** - - ```bash - PageLoad.count - PageLoad.first - ``` - - - -## Reference - -This section lists the most common tasks you might perform with the {TIMESCALE_DB} gem. - -### Query scopes - -The {TIMESCALE_DB} gem provides several convenient scopes for querying your time-series data. - - -- Built-in time-based scopes: - - ```ruby - PageLoad.last_hour.count - PageLoad.today.count - PageLoad.this_week.count - PageLoad.this_month.count - ``` - -- Browser-specific scopes: - - ```ruby - # Count requests by browser - PageLoad.chrome_users.last_hour.count - PageLoad.firefox_users.last_hour.count - PageLoad.safari_users.last_hour.count - - # Performance analysis - PageLoad.slow_requests.last_hour.count - PageLoad.fast_requests.last_hour.count - ``` - -- Query continuous aggregates: - - This query fetches the average and standard deviation from the performance stats for the `/products` path over the last day. - - ```ruby - # Access aggregated performance stats through generated classes - PageLoad::PerformanceStatsPerMinute.last_hour - PageLoad::PerformanceStatsPerHour.last_day - PageLoad::PerformanceStatsPerDay.last_month - - # Get statistics for a specific path - stats = PageLoad::PerformanceStatsPerHour.last_day.where(path: '/products').select("average(stats_agg) as average, stddev(stats_agg) as stddev").first - puts "Average: #{stats.average}" - puts "Standard Deviation: #{stats.stddev}" - ``` - -### TimescaleDB features - -The {TIMESCALE_DB} gem provides utility methods to access hypertable and chunk information. Every model that uses -the `acts_as_hypertable` method has access to these methods. - - -#### Access hypertable and chunk information - -- View chunk or hypertable information: - - ```ruby - PageLoad.chunks.count - PageLoad.hypertable.detailed_size - ``` - -- Compress/Decompress chunks: - - ```ruby - PageLoad.chunks.uncompressed.first.compress! # Compress the first uncompressed chunk - PageLoad.chunks.compressed.first.decompress! # Decompress the oldest chunk - PageLoad.hypertable.compression_stats # View compression stats - - ``` - -#### Access hypertable stats - -You collect hypertable stats using methods that provide insights into your hypertable's structure, size, and compression -status: - -- Get basic hypertable information: - - ```ruby - hypertable = PageLoad.hypertable - hypertable.hypertable_name # The name of your hypertable - hypertable.schema_name # The schema where the hypertable is located - ``` - -- Get detailed size information: - - ```ruby - hypertable.detailed_size # Get detailed size information for the hypertable - hypertable.compression_stats # Get compression statistics - hypertable.chunks_detailed_size # Get chunk information - hypertable.approximate_row_count # Get approximate row count - hypertable.dimensions.map(&:column_name) # Get dimension information - hypertable.continuous_aggregates.map(&:view_name) # Get continuous aggregate view names - ``` - -#### Continuous aggregates - -The `continuous_aggregates` method generates a class for each continuous aggregate. - -- Get all the continuous aggregate classes: - - ```ruby - PageLoad.descendants # Get all continuous aggregate classes - ``` - -- Manually refresh a continuous aggregate: - - ```ruby - PageLoad.refresh_aggregates - ``` - -- Create or drop a continuous aggregate: - - Create or drop all the continuous aggregates in the proper order to build them hierarchically. See more about how it - works in this [blog post][ruby-blog-post]. - - ```ruby - PageLoad.create_continuous_aggregates - PageLoad.drop_continuous_aggregates - ``` - - - - -## Next steps - -Now that you have integrated the ruby gem into your app: - -* Learn more about the [{TIMESCALE_DB} gem][timescaledb-ruby-gem]. -* Check out the [official docs][timescaledb-ruby-docs]. -* Follow the [LTTB][LTTB], [Open AI long-term storage][open-ai-tutorial], and [candlesticks][candlesticks] tutorials. - -[connect]: #connect-to-timescaledb -[create-a-hypertable]: #create-a-hypertable -[insert]: #insert-data -[query]: #execute-queries -[create-aggregates]: #execute-queries -[add-policies]: #manage-chunks-and-compression -[manage-chunks]: #manage-chunks-and-compression -[install]: /getting-started/latest/ -[psql-install]: /integrations/psql/ -[rails-guide]: https://guides.rubyonrails.org/install_ruby_on_rails.html#installing-rails -[connection-info]: /integrations/find-connection-details/ -[about-hypertables]: /use-timescale/hypertables/ -[rails-compostite-primary-keys]: https://guides.rubyonrails.org/active_record_composite_primary_keys.html -[ruby-blog-post]: https://www.timescale.com/blog/building-a-better-ruby-orm-for-time-series-and-analytics -[timescaledb-ruby-gem]: https://github.com/timescale/timescaledb-ruby -[timescaledb-ruby-docs]: https://timescale.github.io/timescaledb-ruby/ -[LTTB]: https://timescale.github.io/timescaledb-ruby/toolkit_lttb_tutorial/ -[open-ai-tutorial]: https://timescale.github.io/timescaledb-ruby/chat_gpt_tutorial/ -[candlesticks]: https://timescale.github.io/timescaledb-ruby/toolkit_candlestick/ diff --git a/snippets/integrations/code/_start-coding-golang.mdx b/snippets/integrations/code/_start-coding-golang.mdx index a3e0c99..fd07018 100644 --- a/snippets/integrations/code/_start-coding-golang.mdx +++ b/snippets/integrations/code/_start-coding-golang.mdx @@ -14,34 +14,24 @@ In this section, you create a connection to {CLOUD_LONG} using the PGX driver. PGX is a toolkit designed to help Go developers work directly with {PG}. You can use it to help your Go application interact directly with TimescaleDB. -1. Locate your {TIMESCALE_DB} credentials and use them to compose a connection - string for PGX. - - You'll need: - - * password - * username - * host URL - * port number - * database name - -2. Compose your connection string variable as a - [libpq connection string][libpq-connstring], using this format: +1. [Locate your {TIMESCALE_DB} credentials][connection-info] and compose a [libpq connection string][libpq-connstring], + for PGX in the following format: ```go connStr := "postgres://username:password@host:port/dbname" ``` - If you're using a hosted version of TimescaleDB, or if you need an SSL + If you're using a hosted version of {TIMESCALE_DB}, or if you need an SSL connection, use this format instead: ```go connStr := "postgres://username:password@host:port/dbname?sslmode=require" ``` -3. You can check that you're connected to your database with this - hello world program: +1. Check that you're connected to your database with this hello world program: + + ```go package main @@ -85,50 +75,49 @@ You can use it to help your Go application interact directly with TimescaleDB. ```go os.Getenv("DATABASE_CONNECTION_STRING") ``` + + -Alternatively, you can connect to {TIMESCALE_DB} using a connection pool. -Connection pooling is useful to conserve computing resources, and can also -result in faster database queries: + To create a connection pool that can be used for concurrent connections to + your database, use the `pgxpool.New()` function instead of + `pgx.Connect()`. Also note that this script imports + `github.com/jackc/pgx/v5/pgxpool`, instead of `pgx/v5` which was used to + create a single connection: -1. To create a connection pool that can be used for concurrent connections to - your database, use the `pgxpool.New()` function instead of - `pgx.Connect()`. Also note that this script imports - `github.com/jackc/pgx/v5/pgxpool`, instead of `pgx/v5` which was used to - create a single connection: - - ```go - package main + ```go + package main - import ( - "context" - "fmt" - "os" + import ( + "context" + "fmt" + "os" - "github.com/jackc/pgx/v5/pgxpool" - ) + "github.com/jackc/pgx/v5/pgxpool" + ) - func main() { + func main() { - ctx := context.Background() - connStr := "yourConnectionStringHere" - dbpool, err := pgxpool.New(ctx, connStr) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to connect to database: %v\n", err) - os.Exit(1) - } - defer dbpool.Close() + ctx := context.Background() + connStr := "yourConnectionStringHere" + dbpool, err := pgxpool.New(ctx, connStr) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to connect to database: %v\n", err) + os.Exit(1) + } + defer dbpool.Close() - //run a simple query to check our connection - var greeting string - err = dbpool.QueryRow(ctx, "select 'Hello, Tiger Data (but concurrently)'").Scan(&greeting) - if err != nil { - fmt.Fprintf(os.Stderr, "QueryRow failed: %v\n", err) - os.Exit(1) + //run a simple query to check our connection + var greeting string + err = dbpool.QueryRow(ctx, "select 'Hello, Tiger Data (but concurrently)'").Scan(&greeting) + if err != nil { + fmt.Fprintf(os.Stderr, "QueryRow failed: %v\n", err) + os.Exit(1) + } + fmt.Println(greeting) } - fmt.Println(greeting) - } - ``` - + ``` + + ## Create a relational table In this section, you create a table called `sensors` which holds the ID, type, @@ -145,7 +134,7 @@ percentage of the sensors. queryCreateTable := `CREATE TABLE sensors (id SERIAL PRIMARY KEY, type VARCHAR(50), location VARCHAR(50));` ``` -2. Execute the `CREATE TABLE` statement with the `Exec()` function on the +1. Execute the `CREATE TABLE` statement with the `Exec()` function on the `dbpool` object, using the arguments of the current context and the statement string you created: @@ -204,7 +193,7 @@ and most other tasks are executed on the hypertable. ` ``` -2. Formulate the `SELECT` statement to convert the table into a hypertable. You +1. Formulate the `SELECT` statement to convert the table into a hypertable. You must specify the table name to convert to a hypertable, and its time column name as the second argument. For more information, see the [`create_hypertable` docs](/api/hypertable/create_hypertable): @@ -217,7 +206,7 @@ and most other tasks are executed on the hypertable. The `by_range` dimension builder is an addition to {TIMESCALE_DB} 2.13. -3. Execute the `CREATE TABLE` statement and `SELECT` statement which converts +1. Execute the `CREATE TABLE` statement and `SELECT` statement which converts the table into a hypertable. You can do this by calling the `Exec()` function on the `dbpool` object, using the arguments of the current context, and the `queryCreateTable` and `queryCreateHypertable` statement strings: @@ -274,8 +263,7 @@ ways. Each of these example inserts the data from the two arrays, `sensorTypes` `sensorLocations`, into the relational table named `sensors`. The first example inserts a single row of data at a time. The second example -inserts multiple rows of data. The third example uses batch inserts to speed up -the process. +inserts multiple rows of data. 1. Open a connection pool to the database, then use the prepared statements to formulate an `INSERT` SQL statement, and execute it: @@ -327,8 +315,8 @@ the process. } ``` -Instead of inserting a single row of data at a time, you can use this procedure -to insert multiple rows of data, instead: + Instead of inserting a single row of data at a time, you can use this procedure + to insert multiple rows of data, instead: 1. This example uses {PG} to generate some sample time-series to insert into the `sensor_data` hypertable. Define the SQL statement to generate the @@ -413,7 +401,7 @@ to insert multiple rows of data, instead: } ``` -2. Formulate an SQL insert statement for the `sensor_data` hypertable: +1. Formulate an SQL insert statement for the `sensor_data` hypertable: ```go //SQL query to generate sample data @@ -422,7 +410,7 @@ to insert multiple rows of data, instead: ` ``` -3. Execute the SQL statement for each sample in the results slice: +1. Execute the SQL statement for each sample in the results slice: ```go //Insert contents of results slice into TimescaleDB @@ -439,7 +427,7 @@ to insert multiple rows of data, instead: fmt.Println("Successfully inserted samples into sensor_data hypertable") ``` -4. This example `main.go` generates sample data and inserts it into +1. This example `main.go` generates sample data and inserts it into the `sensor_data` hypertable: ```go @@ -541,6 +529,8 @@ to insert multiple rows of data, instead: } ``` +### Batch insert data + Inserting multiple rows of data using this method executes as many `insert` statements as there are samples to be inserted. This can make ingestion of data slow. To speed up ingestion, you can batch insert data instead. @@ -548,129 +538,128 @@ slow. To speed up ingestion, you can batch insert data instead. Here's a sample pattern for how to do so, using the sample data you generated in the previous procedure. It uses the pgx `Batch` object: -1. This example batch inserts data into the database: - ```go - package main +```go +package main - import ( - "context" - "fmt" - "os" - "time" +import ( + "context" + "fmt" + "os" + "time" - "github.com/jackc/pgx/v5" - "github.com/jackc/pgx/v5/pgxpool" - ) + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" +) - func main() { - /********************************************/ - /* Connect using Connection Pool */ - /********************************************/ - ctx := context.Background() - connStr := "yourConnectionStringHere" - dbpool, err := pgxpool.New(ctx, connStr) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to connect to database: %v\n", err) - os.Exit(1) - } - defer dbpool.Close() +func main() { + /********************************************/ + /* Connect using Connection Pool */ + /********************************************/ + ctx := context.Background() + connStr := "yourConnectionStringHere" + dbpool, err := pgxpool.New(ctx, connStr) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to connect to database: %v\n", err) + os.Exit(1) + } + defer dbpool.Close() - // Generate data to insert + // Generate data to insert - //SQL query to generate sample data - queryDataGeneration := ` - SELECT generate_series(now() - interval '24 hour', now(), interval '5 minute') AS time, - floor(random() * (3) + 1)::int as sensor_id, - random()*100 AS temperature, - random() AS cpu - ` + //SQL query to generate sample data + queryDataGeneration := ` + SELECT generate_series(now() - interval '24 hour', now(), interval '5 minute') AS time, + floor(random() * (3) + 1)::int as sensor_id, + random()*100 AS temperature, + random() AS cpu + ` - //Execute query to generate samples for sensor_data hypertable - rows, err := dbpool.Query(ctx, queryDataGeneration) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to generate sensor data: %v\n", err) - os.Exit(1) - } - defer rows.Close() + //Execute query to generate samples for sensor_data hypertable + rows, err := dbpool.Query(ctx, queryDataGeneration) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to generate sensor data: %v\n", err) + os.Exit(1) + } + defer rows.Close() - fmt.Println("Successfully generated sensor data") + fmt.Println("Successfully generated sensor data") - //Store data generated in slice results - type result struct { - Time time.Time - SensorId int - Temperature float64 - CPU float64 - } - var results []result - for rows.Next() { - var r result - err = rows.Scan(&r.Time, &r.SensorId, &r.Temperature, &r.CPU) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to scan %v\n", err) - os.Exit(1) - } - results = append(results, r) - } - // Any errors encountered by rows.Next or rows.Scan are returned here - if rows.Err() != nil { - fmt.Fprintf(os.Stderr, "rows Error: %v\n", rows.Err()) + //Store data generated in slice results + type result struct { + Time time.Time + SensorId int + Temperature float64 + CPU float64 + } + var results []result + for rows.Next() { + var r result + err = rows.Scan(&r.Time, &r.SensorId, &r.Temperature, &r.CPU) + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to scan %v\n", err) os.Exit(1) } + results = append(results, r) + } + // Any errors encountered by rows.Next or rows.Scan are returned here + if rows.Err() != nil { + fmt.Fprintf(os.Stderr, "rows Error: %v\n", rows.Err()) + os.Exit(1) + } - // Check contents of results slice - /*fmt.Println("Contents of RESULTS slice") - for i := range results { - var r result - r = results[i] - fmt.Printf("Time: %s | ID: %d | Temperature: %f | CPU: %f |\n", &r.Time, r.SensorId, r.Temperature, r.CPU) - }*/ + // Check contents of results slice + /*fmt.Println("Contents of RESULTS slice") + for i := range results { + var r result + r = results[i] + fmt.Printf("Time: %s | ID: %d | Temperature: %f | CPU: %f |\n", &r.Time, r.SensorId, r.Temperature, r.CPU) + }*/ - //Insert contents of results slice into TimescaleDB - //SQL query to generate sample data - queryInsertTimeseriesData := ` - INSERT INTO sensor_data (time, sensor_id, temperature, cpu) VALUES ($1, $2, $3, $4); - ` + //Insert contents of results slice into TimescaleDB + //SQL query to generate sample data + queryInsertTimeseriesData := ` + INSERT INTO sensor_data (time, sensor_id, temperature, cpu) VALUES ($1, $2, $3, $4); + ` - /********************************************/ - /* Batch Insert into TimescaleDB */ - /********************************************/ - //create batch - batch := &pgx.Batch{} - //load insert statements into batch queue - for i := range results { - var r result - r = results[i] - batch.Queue(queryInsertTimeseriesData, r.Time, r.SensorId, r.Temperature, r.CPU) - } - batch.Queue("select count(*) from sensor_data") + /********************************************/ + /* Batch Insert into TimescaleDB */ + /********************************************/ + //create batch + batch := &pgx.Batch{} + //load insert statements into batch queue + for i := range results { + var r result + r = results[i] + batch.Queue(queryInsertTimeseriesData, r.Time, r.SensorId, r.Temperature, r.CPU) + } + batch.Queue("select count(*) from sensor_data") - //send batch to connection pool - br := dbpool.SendBatch(ctx, batch) - //execute statements in batch queue - _, err = br.Exec() - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to execute statement in batch queue %v\n", err) - os.Exit(1) - } - fmt.Println("Successfully batch inserted data") + //send batch to connection pool + br := dbpool.SendBatch(ctx, batch) + //execute statements in batch queue + _, err = br.Exec() + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to execute statement in batch queue %v\n", err) + os.Exit(1) + } + fmt.Println("Successfully batch inserted data") - //Compare length of results slice to size of table - fmt.Printf("size of results: %d\n", len(results)) - //check size of table for number of rows inserted - // result of last SELECT statement - var rowsInserted int - err = br.QueryRow().Scan(&rowsInserted) - fmt.Printf("size of table: %d\n", rowsInserted) + //Compare length of results slice to size of table + fmt.Printf("size of results: %d\n", len(results)) + //check size of table for number of rows inserted + // result of last SELECT statement + var rowsInserted int + err = br.QueryRow().Scan(&rowsInserted) + fmt.Printf("size of table: %d\n", rowsInserted) - err = br.Close() - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to closer batch %v\n", err) - os.Exit(1) - } + err = br.Close() + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to closer batch %v\n", err) + os.Exit(1) } - ``` +} +``` ## Execute a query @@ -694,7 +683,7 @@ This section covers how to execute queries against your database. ` ``` -2. Use the `.Query()` function to execute the query string. Make sure you +1. Use the `.Query()` function to execute the query string. Make sure you specify the relevant placeholders: ```go @@ -709,7 +698,7 @@ This section covers how to execute queries against your database. fmt.Println("Successfully executed query") ``` -3. Access the rows returned by `.Query()`. Create a struct with fields +1. Access the rows returned by `.Query()`. Create a struct with fields representing the columns that you expect to be returned, then use the `rows.Next()` function to iterate through the rows returned and fill `results` with the array of structs. This uses the `rows.Scan()` function, @@ -750,7 +739,7 @@ This section covers how to execute queries against your database. // use results here… ``` -4. This example program runs a query, and accesses the results of +1. This example program runs a query, and accesses the results of that query: ```go @@ -843,4 +832,5 @@ your Go application, be sure to check out these advanced {TIMESCALE_DB} tutorial [pgx-driver]: https://github.com/jackc/pgx [libpq-connstring]: https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING [pgx-docs]: https://pkg.go.dev/github.com/jackc/pgx -[timescaledb-parallel-copy]: https://github.com/timescale/timescaledb-parallel-copy \ No newline at end of file +[timescaledb-parallel-copy]: https://github.com/timescale/timescaledb-parallel-copy +[connection-info]: /integrations/find-connection-details/ \ No newline at end of file diff --git a/snippets/integrations/code/_start-coding-java.mdx b/snippets/integrations/code/_start-coding-java.mdx index fcb232a..4df36ea 100644 --- a/snippets/integrations/code/_start-coding-java.mdx +++ b/snippets/integrations/code/_start-coding-java.mdx @@ -30,23 +30,22 @@ or `maven`. } ``` -2. From the command line in the current directory, run the application: +1. From the command line in the current directory, run the application: ```bash java Main.java ``` - If the command is successful, `Hello, World!` line output is printed - to your console. + `Hello, World!` is printed to your console. -3. Import the PostgreSQL JDBC driver. If you are using a dependency manager, +1. Import the PostgreSQL JDBC driver. If you are using a dependency manager, include the [PostgreSQL JDBC Driver][jdbc-maven] as a dependency. -4. Download the [JAR artifact of the JDBC Driver][jdbc-download] and +1. Download the [JAR artifact of the JDBC Driver][jdbc-download] and save it with the `Main.java` file. -5. Import the `JDBC Driver` into the Java application and display a list of +1. Import the `JDBC Driver` into the Java application and display a list of available drivers for the check: ```java @@ -62,28 +61,17 @@ or `maven`. } ``` -6. Run all the examples: +1. Run all the examples: ```bash java -cp *.jar Main.java ``` - If the command is successful, a string similar to - `org.postgresql.Driver@7f77e91b` is printed to your console. This means that you - are ready to connect to {TIMESCALE_DB} from Java. + A string similar to `org.postgresql.Driver@7f77e91b` is printed to the terminal. This means that you are ready to + connect to {TIMESCALE_DB} from Java. -7. Locate your {TIMESCALE_DB} credentials and use them to compose a connection - string for JDBC. - - You'll need: - - * password - * username - * host URL - * port - * database name - -8. Compose your connection string variable, using this format: +1. [Locate your {TIMESCALE_DB} credentials][connection-info] and compose a connection string for JDBC in the + following format: ```java var connUrl = "jdbc:postgresql://:/?user=&password="; @@ -113,14 +101,13 @@ or `maven`. } ``` -9. Run the code: +1. Run the code: ```bash java -cp *.jar Main.java ``` - If the command is successful, a string similar to - `{ApplicationName=PostgreSQL JDBC Driver}` is printed to your console. + A string similar to `{ApplicationName=PostgreSQL JDBC Driver}` is printed to your console. ## Create a relational table @@ -142,7 +129,7 @@ percentage of the sensors. ); ``` -2. Create a statement, execute the query you created in the previous step, and +1. Create a statement, execute the query you created in the previous step, and check that the table was created successfully: ```java @@ -197,7 +184,7 @@ and most other tasks are executed on the hypertable. ); ``` -2. Create a statement, execute the query you created in the previous step: +1. Create a statement, execute the query you created in the previous step: ```sql SELECT create_hypertable('sensor_data', by_range('time')); @@ -207,7 +194,7 @@ and most other tasks are executed on the hypertable. The `by_range` and `by_hash` dimension builder is an addition to {TIMESCALE_DB} 2.13. -3. Execute the two statements you created, and commit your changes to the +1. Execute the two statements you created, and commit your changes to the database: ```java @@ -282,6 +269,8 @@ section, you can insert single rows, or insert by batches of rows. } ``` +## Batch insert data + If you want to insert a batch of rows by using a batching mechanism. In this example, you generate some sample time-series data to insert into the `sensor_data` hypertable: @@ -313,8 +302,6 @@ example, you generate some sample time-series data to insert into the This section covers how to execute queries against your database. -## Execute queries on TimescaleDB - 1. Define the SQL query you'd like to run on the database. This example combines time-series and relational data. It returns the average values for every 15 minute interval for sensors with specific type and location. @@ -328,7 +315,7 @@ This section covers how to execute queries against your database. ORDER BY bucket DESC; ``` -2. Execute the query with the prepared statement and read out the result set for +1. Execute the query with the prepared statement and read out the result set for all `a`-type sensors located on the `floor`: ```java @@ -351,7 +338,7 @@ This section covers how to execute queries against your database. } ``` - If the command is successful, you'll see output like this: + You see output like this: ```bash 2021-05-12 23:30:00.0: 0,508649 @@ -583,4 +570,5 @@ public class Main { [jdbc-driver]: https://jdbc.postgresql.org [jdbc-maven]: https://mvnrepository.com/artifact/org.postgresql/postgresql [jdbc-download]: https://jdbc.postgresql.org/download/ -[jdbc-docs]: https://jdbc.postgresql.org/documentation/datasource/ \ No newline at end of file +[jdbc-docs]: https://jdbc.postgresql.org/documentation/datasource/ +[connection-info]: /integrations/find-connection-details/ \ No newline at end of file diff --git a/snippets/integrations/code/_start-coding-node.mdx b/snippets/integrations/code/_start-coding-node.mdx index ce551cc..c796942 100644 --- a/snippets/integrations/code/_start-coding-node.mdx +++ b/snippets/integrations/code/_start-coding-node.mdx @@ -37,13 +37,13 @@ ORM (object relational mapper) called [Sequelize][sequelize]. } ``` -2. Install Express.js: +1. Install Express.js: ```bash npm install express ``` -3. Create a simple web page to check the connection. Create a new file called +1. Create a simple web page to check the connection. Create a new file called `index.js`, with this content: ```javascript @@ -56,39 +56,29 @@ ORM (object relational mapper) called [Sequelize][sequelize]. app.listen(port, () => console.log(`Example app listening at http://localhost:${port}`)) ``` -4. Test your connection by starting the application: +1. Test your connection by starting the application: ```bash node index.js ``` - In your web browser, navigate to `http://localhost:3000`. If the connection - is successful, it shows "Hello World!" + In your web browser, navigate to `http://localhost:3000`. If the connection + is successful, it shows `"Hello World!"`. -5. Add Sequelize to your project: +1. Add Sequelize to your project: ```bash npm install sequelize sequelize-cli pg pg-hstore ``` -6. Locate your {TIMESCALE_DB} credentials and use them to compose a connection - string for Sequelize. - - You'll need: - - * password - * username - * host URL - * port - * database name - -7. Compose your connection string variable, using this format: +1. [Locate your {TIMESCALE_DB} credentials][connection-info] and compose a connection string for Sequelize + in the following format: ```javascript 'postgres://:@:/' ``` -8. Open the `index.js` file you created. Require Sequelize in the application, +1. Open the `index.js` file you created. Require Sequelize in the application, and declare the connection string: ```javascript @@ -109,7 +99,7 @@ ORM (object relational mapper) called [Sequelize][sequelize]. Make sure you add the SSL settings in the `dialectOptions` sections. You can't connect to {TIMESCALE_DB} using SSL without them. -9. You can test the connection by adding these lines to `index.js` after the +1. You can test the connection by adding these lines to `index.js` after the `app.get` statement: ```javascript @@ -154,7 +144,7 @@ In this section, you create a relational table called `page_loads`. New migration was created at . ``` -2. Edit the migration file so that it sets up a migration key: +1. Edit the migration file so that it sets up a migration key: ```javascript 'use strict'; @@ -177,7 +167,7 @@ In this section, you create a relational table called `page_loads`. }; ``` -3. Migrate the change and make sure that it is reflected in the database: +1. Migrate the change and make sure that it is reflected in the database: ```bash npx sequelize db:migrate @@ -194,7 +184,7 @@ In this section, you create a relational table called `page_loads`. == 20200528195725-create-page-loads: migrated (0.443s) ``` -4. Create the `PageLoads` model in your code. In the `index.js` file, above the +1. Create the `PageLoads` model in your code. In the `index.js` file, above the `app.use` statement, add these lines: ```javascript @@ -204,7 +194,7 @@ In this section, you create a relational table called `page_loads`. }, { timestamps: false }); ``` -5. Instantiate a `PageLoads` object and save it to the database. +1. Instantiate a `PageLoads` object and save it to the database. ## Create a hypertable @@ -228,7 +218,7 @@ and most other tasks are executed on the hypertable. New migration was created at /20200601202912-add_hypertable.js . ``` -2. In the `migrations` folder, there is now a new file. Open the +1. In the `migrations` folder, there is now a new file. Open the file, and add this content: ```javascript @@ -248,7 +238,7 @@ and most other tasks are executed on the hypertable. The `by_range` dimension builder is an addition to {TIMESCALE_DB} 2.13. -3. At the command prompt, run the migration command: +1. At the command prompt, run the migration command: ```bash npx sequelize db:migrate @@ -300,34 +290,35 @@ This section covers how to execute queries against your database. In this example, every time the page is reloaded, all information currently in the table is displayed. -1. Modify the `/` route in the `index.js` file to call the Sequelize `findAll` - function and retrieve all data from the `page_loads` table using the - `PageLoads` model: +Modify the `/` route in the `index.js` file to call the Sequelize `findAll` +function and retrieve all data from the `page_loads` table using the +`PageLoads` model: - ```javascript - app.get('/', async (req, res) => { - // get the user agent and current time - const userAgent = req.get('user-agent'); - const time = new Date().getTime(); +```javascript +app.get('/', async (req, res) => { + // get the user agent and current time + const userAgent = req.get('user-agent'); + const time = new Date().getTime(); - try { - // insert the record - await PageLoads.create({ - userAgent, time - }); + try { + // insert the record + await PageLoads.create({ + userAgent, time + }); - // now display everything in the table - const messages = await PageLoads.findAll(); - res.send(messages); - } catch (e) { - console.log('Error inserting data', e) - } - }) - ``` + // now display everything in the table + const messages = await PageLoads.findAll(); + res.send(messages); + } catch (e) { + console.log('Error inserting data', e) + } +}) +``` Now, when you reload the page, you should see all of the rows currently in the `page_loads` table. [install-nodejs]: https://nodejs.org [install-npm]: https://docs.npmjs.com/getting-started -[sequelize]: https://sequelize.org \ No newline at end of file +[sequelize]: https://sequelize.org +[connection-info]: /integrations/find-connection-details/ \ No newline at end of file diff --git a/snippets/integrations/code/_start-coding-python.mdx b/snippets/integrations/code/_start-coding-python.mdx index 99b85dc..66fa909 100644 --- a/snippets/integrations/code/_start-coding-python.mdx +++ b/snippets/integrations/code/_start-coding-python.mdx @@ -23,19 +23,8 @@ prevents common attacks such as SQL injection. import psycopg2 ``` -2. Locate your {TIMESCALE_DB} credentials and use them to compose a connection - string for `psycopg2`. - - You'll need: - - * password - * username - * host URL - * port - * database name - -3. Compose your connection string variable as a - [libpq connection string][libpq-connstring], using this format: +1. [Locate your {TIMESCALE_DB} credentials][connection-info] and compose a connection string as a + [libpq connection string][libpq-connstring] in the following format: ```python CONNECTION = "postgres://username:password@host:port/dbname" @@ -60,7 +49,7 @@ prevents common attacks such as SQL injection. details like your password, hostname, and port number. -4. Use the `psycopg2` [connect function][psycopg2-connect] to create a new +1. Use the `psycopg2` [connect function][psycopg2-connect] to create a new database session and create a new [cursor object][psycopg2-cursor] to interact with the database. @@ -107,7 +96,7 @@ percentage of the sensors. """ ``` -2. Open a cursor, execute the query you created in the previous step, and +1. Open a cursor, execute the query you created in the previous step, and commit the query to make the changes persistent. Afterward, close the cursor to clean up: @@ -140,7 +129,7 @@ and most other tasks are executed on the hypertable. """ ``` -2. Formulate a `SELECT` statement that converts the `sensor_data` table to a +1. Formulate a `SELECT` statement that converts the `sensor_data` table to a hypertable. You must specify the table name to convert to a hypertable, and the name of the time column as the two arguments. For more information, see the [`create_hypertable` docs](/api/hypertable/create_hypertable): @@ -153,7 +142,7 @@ and most other tasks are executed on the hypertable. The `by_range` dimension builder is an addition to {TIMESCALE_DB} 2.13. -3. Open a cursor with the connection, execute the statements from the previous +1. Open a cursor with the connection, execute the statements from the previous steps, commit your changes, and close the cursor: ```python @@ -188,7 +177,7 @@ section, you can use `psycopg2` with prepared statements, or you can use conn.commit() ``` -2. (Optional) Alternatively, you can pass variables to the `cursor.execute` +1. (Optional) Alternatively, you can pass variables to the `cursor.execute` function and separate the formulation of the SQL statement, `SQL`, from the data being passed with it into the prepared statement, `data`: @@ -205,13 +194,13 @@ section, you can use `psycopg2` with prepared statements, or you can use conn.commit() ``` -If you choose to use `pgcopy` instead, install the `pgcopy` package -[using pip][pgcopy-pypi], and then add this line to your list of -`import` statements: + If you choose to use `pgcopy` instead, install the `pgcopy` package + [using pip][pgcopy-pypi], and then add this line to your list of + `import` statements: -```python -from pgcopy import CopyManager -``` + ```python + from pgcopy import CopyManager + ``` 1. Generate some random sensor data using the `generate_series` function provided by {PG}. This example inserts a total of 480 rows of data (4 @@ -232,7 +221,7 @@ from pgcopy import CopyManager values = cursor.fetchall() ``` -2. Define the column names of the table you want to insert data into. This +1. Define the column names of the table you want to insert data into. This example uses the `sensor_data` hypertable created earlier. This hypertable consists of columns named `time`, `sensor_id`, `temperature` and `cpu`. The column names are defined in a list of strings called `cols`: @@ -241,7 +230,7 @@ from pgcopy import CopyManager cols = ['time', 'sensor_id', 'temperature', 'cpu'] ``` -3. Create an instance of the `pgcopy` CopyManager, `mgr`, and pass the +1. Create an instance of the `pgcopy` CopyManager, `mgr`, and pass the connection variable, hypertable name, and list of column names. Then use the `copy` function of the CopyManager to insert the data into the database quickly using `pgcopy`. @@ -251,13 +240,13 @@ from pgcopy import CopyManager mgr.copy(values) ``` -4. Commit to persist changes: +1. Commit to persist changes: ```python conn.commit() ``` -5. (Optional) The full sample code to insert data into {TIMESCALE_DB} using +1. (Optional) The full sample code to insert data into {TIMESCALE_DB} using `pgcopy`, using the example of sensor data from four sensors: ```python @@ -289,7 +278,7 @@ from pgcopy import CopyManager conn.commit() ``` -6. (Optional) You can also check if the insertion worked: +1. (Optional) You can also check if the insertion worked: ```python cursor.execute("SELECT * FROM sensor_data LIMIT 5;") @@ -319,7 +308,7 @@ see the [psycopg2 documentation][psycopg2-usage]. query = "SELECT * FROM sensor_data;" ``` -2. Open a cursor from the existing database connection, `conn`, and then execute +1. Open a cursor from the existing database connection, `conn`, and then execute the query you defined: ```python @@ -328,7 +317,7 @@ see the [psycopg2 documentation][psycopg2-usage]. cursor.execute(query) ``` -3. To access all resulting rows returned by your query, use one of `pyscopg2`'s +1. To access all resulting rows returned by your query, use one of `pyscopg2`'s [results retrieval methods][psycopg2-results], such as `fetchall()` or `fetchmany()`. This example prints the results of the query, row by row. Note that the result of `fetchall()` is a list of @@ -343,7 +332,7 @@ see the [psycopg2 documentation][psycopg2-usage]. cursor.close() ``` -4. (Optional) If you want a list of dictionaries instead, you can define the +1. (Optional) If you want a list of dictionaries instead, you can define the cursor using [`DictCursor`][psycopg2-dictcursor]: ```python @@ -357,7 +346,7 @@ executed safely against the database. ### Execute queries using prepared statements -1. Write the query using prepared statements: +Write the query using prepared statements: ```python # query with placeholders @@ -385,4 +374,5 @@ executed safely against the database. [pgcopy-pypi]: https://pypi.org/project/pgcopy/ [psycopg2-usage]: https://www.psycopg.org/docs/usage.html [psycopg2-results]: https://www.psycopg.org/docs/cursor.html -[psycopg2-dictcursor]: https://www.psycopg.org/docs/extras.html#dictionary-like-cursor \ No newline at end of file +[psycopg2-dictcursor]: https://www.psycopg.org/docs/extras.html#dictionary-like-cursor +[connection-info]: /integrations/find-connection-details/ \ No newline at end of file diff --git a/snippets/integrations/code/_start-coding-ruby.mdx b/snippets/integrations/code/_start-coding-ruby.mdx index fdd0a57..d5afabb 100644 --- a/snippets/integrations/code/_start-coding-ruby.mdx +++ b/snippets/integrations/code/_start-coding-ruby.mdx @@ -22,7 +22,7 @@ from a standard Rails app configured for {PG}. cd my_app ``` -2. **Install the {TIMESCALE_DB} gem** +1. **Install the {TIMESCALE_DB} gem** 1. Open `Gemfile`, add the following line, then save your changes: @@ -30,13 +30,13 @@ from a standard Rails app configured for {PG}. gem 'timescaledb' ``` - 2. In Terminal, run the following command: + 1. In Terminal, run the following command: ```bash bundle install ``` -3. **Connect your app to your {SERVICE_LONG}** +1. **Connect your app to your {SERVICE_LONG}** 1. In `/config/database.yml` update the configuration to read securely connect to your {SERVICE_LONG} by adding `url: <%= ENV['DATABASE_URL'] %>` to the default configuration: @@ -83,8 +83,8 @@ from a standard Rails app configured for {PG}. | pg_stat_statements | 1.11 | public | track planning and execution statistics of all SQL statements executed| | plpgsql | 1.0 | pg_catalog | PL/pgSQL procedural language| | postgres_fdw | 1.1 | public | foreign-data wrapper for remote {PG} servers| - | timescaledb | 2.18.1 | public | Enables scalable inserts and complex queries for time-series data (Community Edition)| - | timescaledb_toolkit | 1.19.0 | public | Library of analytical hyperfunctions, time-series pipelining, and other SQL utilities| + | timescaledb | 2.23.0 | public | Enables scalable inserts and complex queries for time-series data (Community Edition)| + | timescaledb_toolkit | 1.22.0 | public | Library of analytical hyperfunctions, time-series pipelining, and other SQL utilities| ## Optimize time-series data in hypertables @@ -98,10 +98,9 @@ In this section, you use the helpers in the {TIMESCALE_DB} gem to create and man ```bash rails generate migration create_page_loads ``` + This creates the `/db/migrate/_create_page_loads.rb` migration file. - This creates the `/db/migrate/_create_page_loads.rb` migration file. - -2. **Add hypertable options** +1. **Add hypertable options** Replace the contents of `/db/migrate/_create_page_loads.rb` with the following: @@ -133,9 +132,9 @@ In this section, you use the helpers in the {TIMESCALE_DB} gem to create and man Rails model includes a `PRIMARY KEY` index for id by default: either remove the column or make sure that the index includes time as part of a "composite key." - For more information, check the Roby docs around [composite primary keys][rails-composite-keys]. + For more information, check the Ruby docs around [composite primary keys][rails-composite-keys]. -3. **Create a `PageLoad` model** +1. **Create a `PageLoad` model** Create a new file called `/app/models/page_load.rb` and add the following code: @@ -184,7 +183,7 @@ In this section, you use the helpers in the {TIMESCALE_DB} gem to create and man end ``` -4. **Run the migration** +1. **Run the migration** ```bash rails db:migrate @@ -219,7 +218,7 @@ shows you how to ingest test data into your hypertable. end ``` -2. **Generate some test data** +1. **Generate some test data** Use `bin/console` to join a Rails console session and run the following code to define some random page load access data: @@ -247,14 +246,14 @@ shows you how to ingest test data into your hypertable. end ``` -3. **Insert the generated data into your {SERVICE_LONG}** +1. **Insert the generated data into your {SERVICE_LONG}** ```bash # Insert the data in batches PageLoad.insert_all(generate_sample_page_loads, returning: false) ``` -4. **Validate the test data in your {SERVICE_LONG}** +1. **Validate the test data in your {SERVICE_LONG}** ```bash PageLoad.count @@ -393,8 +392,12 @@ Now that you have integrated the ruby gem into your app: * Check out the [official docs][timescaledb-ruby-docs]. * Follow the [LTTB][lttb-tutorial], [Open AI long-term storage][openai-tutorial], and [candlesticks][candlestick-tutorial] tutorials. +[install-rails]: https://guides.rubyonrails.org/install_ruby_on_rails.html#installing-rails +[rails-composite-keys]: https://guides.rubyonrails.org/active_record_composite_primary_keys.html +[ruby-blog-post]: https://www.timescale.com/blog/building-a-better-ruby-orm-for-time-series-and-analytics [timescaledb-ruby-gem]: https://github.com/timescale/timescaledb-ruby [timescaledb-ruby-docs]: https://timescale.github.io/timescaledb-ruby/ [lttb-tutorial]: https://timescale.github.io/timescaledb-ruby/toolkit_lttb_tutorial/ [openai-tutorial]: https://timescale.github.io/timescaledb-ruby/chat_gpt_tutorial/ -[candlestick-tutorial]: https://timescale.github.io/timescaledb-ruby/toolkit_candlestick/ \ No newline at end of file +[candlestick-tutorial]: https://timescale.github.io/timescaledb-ruby/toolkit_candlestick/ +[connection-info]: /integrations/find-connection-details/ \ No newline at end of file From 061ee069d37c5c77f8f0d511c6f2bc43f3facf8e Mon Sep 17 00:00:00 2001 From: billy-the-fish Date: Mon, 17 Nov 2025 13:09:39 +0100 Subject: [PATCH 12/13] chore: Up to Decodable. --- integrations/integrate/amazon-sagemaker.mdx | 7 +- integrations/integrate/apache-kafka.mdx | 2 +- integrations/integrate/decodable.mdx | 4 +- integrations/integrate/kubernetes.mdx | 7 +- integrations/integrate/terraform.mdx | 6 +- .../_integration-apache-kafka-install.mdx | 4 +- .../_kubernetes-install-self-hosted.mdx | 36 ++-- .../code/_start-coding-python.mdx | 6 +- .../_kubernetes-install-self-hosted.mdx | 169 ------------------ 9 files changed, 31 insertions(+), 210 deletions(-) delete mode 100644 snippets/procedures/_kubernetes-install-self-hosted.mdx diff --git a/integrations/integrate/amazon-sagemaker.mdx b/integrations/integrate/amazon-sagemaker.mdx index 58b9c1d..b2cdc19 100644 --- a/integrations/integrate/amazon-sagemaker.mdx +++ b/integrations/integrate/amazon-sagemaker.mdx @@ -11,11 +11,8 @@ import CreateHypertablePolicyNote from '/snippets/manage-data/_create-hypertable import { CLOUD_LONG, CONSOLE, PG, SERVICE_SHORT, HYPERTABLE_CAP } from '/snippets/vars.mdx'; -[Amazon SageMaker AI][amazon-sagemaker] is a fully managed machine learning (ML) service. With SageMaker AI, data -scientists and developers can quickly and confidently build, train, and deploy ML models into a production-ready -hosted environment. - -This page shows you how to integrate Amazon Sagemaker with a {SERVICE_LONG}. +Integrate [Amazon SageMaker AI][amazon-sagemaker] with a {SERVICE_LONG}, and quickly and confidently build, train, +and deploy ML models into a production-ready hosted environment. ## Prerequisites diff --git a/integrations/integrate/apache-kafka.mdx b/integrations/integrate/apache-kafka.mdx index 0059cda..221786e 100644 --- a/integrations/integrate/apache-kafka.mdx +++ b/integrations/integrate/apache-kafka.mdx @@ -23,7 +23,7 @@ This guide explains how to set up Kafka and Kafka Connect to stream data from a -- Install Java8 or higher][java-installers] to run Apache Kafka +- Install [Java8 or higher][java-installers] to run Apache Kafka ## Install and configure Apache Kafka diff --git a/integrations/integrate/decodable.mdx b/integrations/integrate/decodable.mdx index 50de905..602d291 100644 --- a/integrations/integrate/decodable.mdx +++ b/integrations/integrate/decodable.mdx @@ -9,11 +9,9 @@ keywords: [Decodable, data pipelines, real-time processing, stream processing, E import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; import { CLOUD_LONG, CONSOLE, PG } from '/snippets/vars.mdx'; -[Decodable][decodable] is a real-time data platform that allows you to build, run, and manage data pipelines effortlessly. - ![Decodable workflow](https://assets.timescale.com/docs/images/integrations-decodable-configuration.png) -This page explains how to integrate Decodable with your {SERVICE_LONG} to enable efficient real-time streaming and analytics. +This page explains how to integrate [Decodable][decodable] with your {SERVICE_LONG} to enable efficient real-time streaming and analytics. ## Prerequisites diff --git a/integrations/integrate/kubernetes.mdx b/integrations/integrate/kubernetes.mdx index 44f97a0..a2ccc9c 100644 --- a/integrations/integrate/kubernetes.mdx +++ b/integrations/integrate/kubernetes.mdx @@ -7,8 +7,8 @@ keywords: [Kubernetes, K8s, container orchestration, deployment, scaling, cloud import KubernetesPrereqs from '/snippets/prerequisites/_kubernetes-prereqs.mdx'; -import KubernetesInstallSelf from '/snippets/procedures/_kubernetes-install-self-hosted.mdx'; -import { CLOUD_LONG, SERVICE_LONG } from '/snippets/vars.mdx'; +import KubernetesInstallSelf from '/snippets/integrations/_kubernetes-install-self-hosted.mdx'; +import { CLOUD_LONG, SERVICE_LONG, SELF_LONG_CAP } from '/snippets/vars.mdx'; [Kubernetes][kubernetes] is an open-source container orchestration system that automates the deployment, scaling, and management of containerized applications. You can connect Kubernetes to {CLOUD_LONG}, and deploy {TIMESCALE_DB} within your Kubernetes clusters. @@ -125,7 +125,8 @@ To connect your Kubernetes cluster to your {SERVICE_LONG}: - + + diff --git a/integrations/integrate/terraform.mdx b/integrations/integrate/terraform.mdx index 8d1a750..0ed5dc2 100644 --- a/integrations/integrate/terraform.mdx +++ b/integrations/integrate/terraform.mdx @@ -7,7 +7,7 @@ keywords: [Terraform, infrastructure as code, IaC, provisioning, automation, dep import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; -import { CLOUD_LONG, COMPANY, CONSOLE, VPC, PG } from '/snippets/vars.mdx'; +import { CLOUD_LONG, COMPANY, CONSOLE, VPC, PG, SELF_LONG_CAP } from '/snippets/vars.mdx'; [Terraform][terraform] is an infrastructure-as-code tool that enables you to safely and predictably provision and manage infrastructure. @@ -35,7 +35,7 @@ You use the [{COMPANY} Terraform provider][terraform-provider] to manage {SERVIC 2. Click `Create credentials`, then save `Public key` and `Secret key`. -2. **Configure {COMPANY} Terraform provider** +2. **Configure the {COMPANY} Terraform provider** 1. Create a `main.tf` configuration file with at least the following content. Change `x.y.z` to the [latest version][terraform-provider] of the provider. @@ -106,7 +106,7 @@ You can now manage your resources with Terraform. See more about [available reso - + You use the [`cyrilgdn/postgresql`][pg-provider] {PG} provider to connect to your {SELF_LONG} instance. diff --git a/snippets/integrations/_integration-apache-kafka-install.mdx b/snippets/integrations/_integration-apache-kafka-install.mdx index 7085834..b6fac52 100644 --- a/snippets/integrations/_integration-apache-kafka-install.mdx +++ b/snippets/integrations/_integration-apache-kafka-install.mdx @@ -55,6 +55,4 @@ import { SERVICE_LONG } from '/snippets/vars.mdx'; ```bash Tiger Cloud How Cool - ``` - - + ``` diff --git a/snippets/integrations/_kubernetes-install-self-hosted.mdx b/snippets/integrations/_kubernetes-install-self-hosted.mdx index 7572a3c..90ed6f1 100644 --- a/snippets/integrations/_kubernetes-install-self-hosted.mdx +++ b/snippets/integrations/_kubernetes-install-self-hosted.mdx @@ -4,8 +4,6 @@ Running {TIMESCALE_DB} on Kubernetes is similar to running {PG}. This procedure To connect your Kubernetes cluster to {SELF_LONG} running in the cluster: - - 1. **Create a default namespace for {COMPANY} components** 1. Create the {COMPANY} namespace: @@ -89,22 +87,22 @@ To connect your Kubernetes cluster to {SELF_LONG} running in the cluster: 1. **Allow applications to connect by exposing {TIMESCALE_DB} within Kubernetes** - ```yaml - kubectl apply -f - < - [kubernetes-namespace]: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ [timescale-docker-image]: https://hub.docker.com/r/timescale/timescaledb diff --git a/snippets/integrations/code/_start-coding-python.mdx b/snippets/integrations/code/_start-coding-python.mdx index 66fa909..0ad1e7d 100644 --- a/snippets/integrations/code/_start-coding-python.mdx +++ b/snippets/integrations/code/_start-coding-python.mdx @@ -17,7 +17,7 @@ library. This library is one of the most popular {PG} libraries for Python. It allows you to execute raw SQL queries efficiently and safely, and prevents common attacks such as SQL injection. -1. Import the psycogpg2 library: +1. Import the psycopg2 library: ```python import psycopg2 @@ -209,7 +209,7 @@ section, you can use `psycopg2` with prepared statements, or you can use ```python # for sensors with ids 1-4 - for id in range(1, 4, 1): + for id in range(1, 5, 1): data = (id,) # create random data simulate_query = """SELECT generate_series(now() - interval '24 hour', now(), interval '5 minute') AS time, @@ -255,7 +255,7 @@ section, you can use `psycopg2` with prepared statements, or you can use cursor = conn.cursor() # for sensors with ids 1-4 - for id in range(1, 4, 1): + for id in range(1, 5, 1): data = (id,) # create random data simulate_query = """SELECT generate_series(now() - interval '24 hour', now(), interval '5 minute') AS time, diff --git a/snippets/procedures/_kubernetes-install-self-hosted.mdx b/snippets/procedures/_kubernetes-install-self-hosted.mdx deleted file mode 100644 index ea42000..0000000 --- a/snippets/procedures/_kubernetes-install-self-hosted.mdx +++ /dev/null @@ -1,169 +0,0 @@ -import { TIMESCALE_DB, PG, SELF_LONG, COMPANY } from '/snippets/vars.mdx'; - -Running {TIMESCALE_DB} on Kubernetes is similar to running {PG}. This procedure outlines the steps for a non-distributed system. - -To connect your Kubernetes cluster to {SELF_LONG} running in the cluster: - -1. **Create a default namespace for {COMPANY} components** - - 1. Create the {COMPANY} namespace: - - ```shell - kubectl create namespace timescale - ``` - - 2. Set this namespace as the default for your session: - - ```shell - kubectl config set-context --current --namespace=timescale - ``` - - For more information, see [Kubernetes Namespaces][kubernetes-namespace]. - -2. **Set up a persistent volume claim (PVC) storage** - - To manually set up a persistent volume and claim for self-hosted Kubernetes, run the following command: - - ```yaml - kubectl apply -f - < Date: Mon, 17 Nov 2025 14:37:24 +0100 Subject: [PATCH 13/13] chore: Last ones. --- integrations/integrate/aws.mdx | 4 +--- integrations/integrate/azure-data-studio.mdx | 6 ++---- integrations/integrate/cloudwatch.mdx | 6 ++---- .../integrate/corporate-data-center.mdx | 8 ++++---- integrations/integrate/datadog.mdx | 3 --- integrations/integrate/dbeaver.mdx | 2 +- integrations/integrate/fivetran.mdx | 5 +---- integrations/integrate/google-cloud.mdx | 11 +++++------ integrations/integrate/grafana.mdx | 4 +--- integrations/integrate/microsoft-azure.mdx | 8 ++++---- integrations/integrate/pgadmin.mdx | 4 ++-- integrations/integrate/psql.mdx | 4 +--- integrations/integrate/qstudio.mdx | 2 +- integrations/integrate/supabase.mdx | 18 ++---------------- integrations/integrate/telegraf.mdx | 4 ++-- .../integrations/_prometheus-integrate.mdx | 11 +++++++---- snippets/integrations/_transit-gateway.mdx | 2 +- 17 files changed, 37 insertions(+), 65 deletions(-) diff --git a/integrations/integrate/aws.mdx b/integrations/integrate/aws.mdx index 47ac684..70fa5cf 100644 --- a/integrations/integrate/aws.mdx +++ b/integrations/integrate/aws.mdx @@ -9,9 +9,7 @@ import IntegrationPrereqsCloud from '/snippets/prerequisites/_integration-prereq import TransitGateway from '/snippets/integrations/_transit-gateway.mdx'; import NotSupportedAzure from '/snippets/changes/_not-supported-for-azure.mdx'; -[Amazon Web Services (AWS)][aws] is a comprehensive cloud computing platform that provides on-demand infrastructure, storage, databases, AI, analytics, and security services to help businesses build, deploy, and scale applications in the cloud. - -This page explains how to integrate your AWS infrastructure with {CLOUD_LONG} using [AWS Transit Gateway][aws-transit-gateway]. +This page explains how to integrate your [Amazon Web Services (AWS)][aws] infrastructure with {CLOUD_LONG} using [AWS Transit Gateway][aws-transit-gateway]. ## Prerequisites diff --git a/integrations/integrate/azure-data-studio.mdx b/integrations/integrate/azure-data-studio.mdx index 03d8f94..d927bbd 100644 --- a/integrations/integrate/azure-data-studio.mdx +++ b/integrations/integrate/azure-data-studio.mdx @@ -1,7 +1,7 @@ --- title: Integrate Azure Data Studio with Tiger Cloud sidebarTitle: Azure Data Studio -description: Azure Data Studio is an open-source, cross-platform hybrid data analytics tool designed to simplify the data landscape. Integrate Azure Data Studio with Tiger Cloud +description: Azure Data Studio is an open-source, cross-platform hybrid data analytics tool designed to simplify the data landscape. keywords: [Azure Data Studio, SQL editor, database management, query tool, cross-platform, Microsoft, data analytics, PostgreSQL client] --- @@ -9,9 +9,7 @@ keywords: [Azure Data Studio, SQL editor, database management, query tool, cross import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; import { PG, CLOUD_LONG } from '/snippets/vars.mdx'; -[Azure Data Studio][azure-data-studio] is an open-source, cross-platform hybrid data analytics tool designed to simplify the data landscape. - -This page explains how to integrate Azure Data Studio with {CLOUD_LONG}. +This page explains how to integrate [Azure Data Studio][azure-data-studio] with {CLOUD_LONG}. ## Prerequisites diff --git a/integrations/integrate/cloudwatch.mdx b/integrations/integrate/cloudwatch.mdx index e155d08..f398447 100644 --- a/integrations/integrate/cloudwatch.mdx +++ b/integrations/integrate/cloudwatch.mdx @@ -10,11 +10,9 @@ import NotSupportedAzure from '/snippets/changes/_not-supported-for-azure.mdx'; import CloudWatchExporter from '/snippets/integrations/_cloudwatch-data-exporter.mdx'; import ManageDataExporter from '/snippets/integrations/_manage-a-data-exporter.mdx'; -[Amazon CloudWatch][cloudwatch] is a monitoring and observability service designed to help collect, analyze, and act on data from applications, infrastructure, and services running in AWS and on-premises environments. +You can export telemetry data from your {SERVICE_LONG}s with the time-series and analytics capability enabled to [Amazon CloudWatch][cloudwatch]. The available metrics include CPU usage, RAM usage, and storage. This integration is available for [Scale and Enterprise][pricing-plan-features] pricing tiers. -You can export telemetry data from your {SERVICE_LONG}s with the time-series and analytics capability enabled to CloudWatch. The available metrics include CPU usage, RAM usage, and storage. This integration is available for [Scale and Enterprise][pricing-plan-features] pricing tiers. - -This pages explains how to export telemetry data from your {SERVICE_LONG} into CloudWatch by creating a {CLOUD_LONG} data exporter, then attaching it to the {SERVICE_SHORT}. +This page explains how to export telemetry data from your {SERVICE_LONG} into CloudWatch by creating a {CLOUD_LONG} data exporter, then attaching it to the {SERVICE_SHORT}. ## Prerequisites diff --git a/integrations/integrate/corporate-data-center.mdx b/integrations/integrate/corporate-data-center.mdx index ad46b4a..5f79428 100644 --- a/integrations/integrate/corporate-data-center.mdx +++ b/integrations/integrate/corporate-data-center.mdx @@ -17,16 +17,16 @@ This page explains how to integrate your corporate on-premise infrastructure wit - Set up [AWS Transit Gateway][gtw-setup]. +- Establish connectivity between your on-premise infrastructure and AWS. + + See the [Centralize network connectivity using AWS Transit Gateway][aws-onprem]. + ## Connect your on-premise infrastructure to your Tiger Cloud services To connect to {CLOUD_LONG}: -1. **Connect your infrastructure to AWS Transit Gateway** - - Establish connectivity between your on-premise infrastructure and AWS. See the [Centralize network connectivity using AWS Transit Gateway][aws-onprem]. - You have successfully integrated your corporate data center with {CLOUD_LONG}. diff --git a/integrations/integrate/datadog.mdx b/integrations/integrate/datadog.mdx index 1fc449d..6f56b8b 100644 --- a/integrations/integrate/datadog.mdx +++ b/integrations/integrate/datadog.mdx @@ -12,9 +12,6 @@ import ManageDataExporter from '/snippets/integrations/_manage-a-data-exporter.m import NotSupportedAzure from '/snippets/changes/_not-supported-for-azure.mdx'; import { PG, SELF_LONG, SCALE, ENTERPRISE, PRICING_PLAN } from '/snippets/vars.mdx'; -[Datadog][datadog] is a cloud-based monitoring and analytics platform that provides comprehensive visibility into -applications, infrastructure, and systems through real-time monitoring, logging, and analytics. - This page explains how to: - [Monitor {SERVICE_LONG} metrics with Datadog][datadog-monitor-cloud] diff --git a/integrations/integrate/dbeaver.mdx b/integrations/integrate/dbeaver.mdx index 08f063c..4b3c529 100644 --- a/integrations/integrate/dbeaver.mdx +++ b/integrations/integrate/dbeaver.mdx @@ -9,7 +9,7 @@ keywords: [DBeaver, database tool, SQL editor, database administration, cross-pl import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; import { CLOUD_LONG, SERVICE_SHORT, TIMESCALE_DB } from '/snippets/vars.mdx'; -[DBeaver][dbeaver] is a free cross-platform database tool for developers, database administrators, analysts, and everyone working with data. DBeaver provides an SQL editor, administration features, data and schema migration, and the ability to monitor database connection sessions. +[DBeaver][dbeaver] provides an SQL editor, administration features, data and schema migration, and the ability to monitor database connection sessions. This page explains how to integrate DBeaver with your {SERVICE_LONG}. diff --git a/integrations/integrate/fivetran.mdx b/integrations/integrate/fivetran.mdx index f29b778..e1e79b2 100644 --- a/integrations/integrate/fivetran.mdx +++ b/integrations/integrate/fivetran.mdx @@ -9,12 +9,9 @@ keywords: [Fivetran, ETL, ELT, data pipeline, data integration, data synchroniza import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; import { CLOUD_LONG, CONSOLE, PG, SERVICE_SHORT } from '/snippets/vars.mdx'; -[Fivetran][fivetran] is a fully managed data pipeline platform that simplifies ETL (Extract, Transform, Load) processes -by automatically syncing data from multiple sources to your data warehouse. - ![Fivetran data in a service](https://assets.timescale.com/docs/images/integrations-fivetran-sync-data.png) -This page shows you how to inject data from data sources managed by Fivetran into a {SERVICE_LONG}. +This page shows you how to inject data from data sources managed by [Fivetran][fivetran] into a {SERVICE_LONG}. ## Prerequisites diff --git a/integrations/integrate/google-cloud.mdx b/integrations/integrate/google-cloud.mdx index 44b9426..fe51597 100644 --- a/integrations/integrate/google-cloud.mdx +++ b/integrations/integrate/google-cloud.mdx @@ -9,9 +9,8 @@ import IntegrationPrereqsCloud from '/snippets/prerequisites/_integration-prereq import TransitGateway from '/snippets/integrations/_transit-gateway.mdx'; import NotSupportedAzure from '/snippets/changes/_not-supported-for-azure.mdx'; -[Google Cloud][google-cloud] is a suite of cloud computing services, offering scalable infrastructure, AI, analytics, databases, security, and developer tools to help businesses build, deploy, and manage applications. -This page explains how to integrate your Google Cloud infrastructure with {CLOUD_LONG} using [AWS Transit Gateway][aws-transit-gateway]. +This page explains how to integrate your [Google Cloud][google-cloud] infrastructure with {CLOUD_LONG} using [AWS Transit Gateway][aws-transit-gateway]. ## Prerequisites @@ -19,16 +18,16 @@ This page explains how to integrate your Google Cloud infrastructure with {CLOUD - Set up [AWS Transit Gateway][gtw-setup]. +- Establish connectivity between Google Cloud and AWS. + + See [Connect HA VPN to AWS peer gateways][gcp-aws]. + ## Connect your Google Cloud infrastructure to your Tiger Cloud services To connect to {CLOUD_LONG}: -1. **Connect your infrastructure to AWS Transit Gateway** - - Establish connectivity between Google Cloud and AWS. See [Connect HA VPN to AWS peer gateways][gcp-aws]. - You have successfully integrated your Google Cloud infrastructure with {CLOUD_LONG}. diff --git a/integrations/integrate/grafana.mdx b/integrations/integrate/grafana.mdx index 7acae2f..39578b4 100644 --- a/integrations/integrate/grafana.mdx +++ b/integrations/integrate/grafana.mdx @@ -1,15 +1,13 @@ --- title: Integrate Grafana with Tiger Cloud sidebarTitle: Grafana -description: Grafana enables you to query, visualize, alert on, and explore your metrics, logs, and traces wherever they're stored. Integrate Grafana with Tiger +description: Grafana enables you to query, visualize, alert on, and explore your metrics, logs, and traces wherever they're stored. Integrate Grafana with Tiger Cloud keywords: [Grafana, visualization, dashboards, metrics, monitoring, time-series, charts, alerting, data exploration, BI] --- import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; import GrafanaConnect from '/snippets/integrations/_grafana-connect.mdx'; -[Grafana][grafana-docs] enables you to query, visualize, alert on, and explore your metrics, logs, and traces wherever they're stored. - This page shows you how to integrate Grafana with a {SERVICE_LONG}, create a dashboard and panel, then visualize geospatial data. ## Prerequisites diff --git a/integrations/integrate/microsoft-azure.mdx b/integrations/integrate/microsoft-azure.mdx index 5e4bed5..22d4a60 100644 --- a/integrations/integrate/microsoft-azure.mdx +++ b/integrations/integrate/microsoft-azure.mdx @@ -19,16 +19,16 @@ This page explains how to integrate your Microsoft Azure infrastructure with {CL - Set up [AWS Transit Gateway][gtw-setup]. +- Establish connectivity between Azure and AWS. + + See the [AWS architectural documentation][azure-aws] for details. + ## Connect your Microsoft Azure infrastructure to your Tiger Cloud services To connect to {CLOUD_LONG}: -1. **Connect your infrastructure to AWS Transit Gateway** - - Establish connectivity between Azure and AWS. See the [AWS architectural documentation][azure-aws] for details. - You have successfully integrated your Microsoft Azure infrastructure with {CLOUD_LONG}. diff --git a/integrations/integrate/pgadmin.mdx b/integrations/integrate/pgadmin.mdx index 30c1571..51b7229 100644 --- a/integrations/integrate/pgadmin.mdx +++ b/integrations/integrate/pgadmin.mdx @@ -9,8 +9,8 @@ keywords: [pgAdmin, PostgreSQL, database administration, open-source, query tool import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; import { PG, CLOUD_LONG, SERVICE_SHORT } from '/snippets/vars.mdx'; -[pgAdmin][pgadmin] is a feature-rich open-source administration and development platform for {PG}. It is available for Chrome, Firefox, Edge, and -Safari browsers, or can be installed on Microsoft Windows, Apple macOS, or various Linux flavors. +[pgAdmin][pgadmin] is available for Chrome, Firefox, Edge, and Safari browsers, or can be installed on Microsoft + Windows, Apple macOS, or various Linux flavors. ![{CLOUD_LONG} pgadmin](https://assets.timescale.com/docs/images/timescale-cloud-pgadmin.png) diff --git a/integrations/integrate/psql.mdx b/integrations/integrate/psql.mdx index 695d16e..8810285 100644 --- a/integrations/integrate/psql.mdx +++ b/integrations/integrate/psql.mdx @@ -9,9 +9,7 @@ keywords: [psql, command line, terminal, PostgreSQL client, CLI, interactive she import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; import { PG, SERVICE_SHORT, COMPANY } from '/snippets/vars.mdx'; -[`psql`][psql-docs] is a terminal-based frontend to {PG} that enables you to type in queries interactively, issue them to Postgres, and see the query results. - -This page shows you how to use the `psql` command line tool to interact with your {SERVICE_LONG}. +This page shows you how to use the [`psql`][psql-docs] command line tool to interact with your {SERVICE_LONG}. ## Prerequisites diff --git a/integrations/integrate/qstudio.mdx b/integrations/integrate/qstudio.mdx index 999f0db..42d63b7 100644 --- a/integrations/integrate/qstudio.mdx +++ b/integrations/integrate/qstudio.mdx @@ -9,7 +9,7 @@ keywords: [qStudio, SQL editor, query tool, syntax highlighting, code completion import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; import { CLOUD_LONG } from '/snippets/vars.mdx'; -[qStudio][qstudio] is a modern free SQL editor that provides syntax highlighting, code-completion, excel export, charting, and much more. You can use it to run queries, browse tables, and create charts for your {SERVICE_LONG}. +You can use [qStudio][qstudio] to run queries, browse tables, and create charts for your {SERVICE_LONG}. This page explains how to integrate qStudio with {CLOUD_LONG}. diff --git a/integrations/integrate/supabase.mdx b/integrations/integrate/supabase.mdx index b6d26b2..a4658c8 100644 --- a/integrations/integrate/supabase.mdx +++ b/integrations/integrate/supabase.mdx @@ -10,9 +10,8 @@ import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx import CreateHypertablePolicyNote from '/snippets/manage-data/_create-hypertable-columnstore-policy-note.mdx'; import { PG, CLOUD_LONG, HYPERTABLE_CAP, HYPERCORE_CAP, HYPERCORE, CAGG_CAP } from '/snippets/vars.mdx'; -[Supabase][supabase] is an open source Firebase alternative. This page shows how to run real-time analytical queries -against a {SERVICE_LONG} through Supabase using a foreign data wrapper (fdw) to bring aggregated data from your -{SERVICE_LONG}. +This page shows how to run real-time analytical queries against a {SERVICE_LONG} through [Supabase][supabase] using a +foreign data wrapper (fdw) to bring aggregated data from your {SERVICE_LONG}. ## Prerequisites @@ -43,19 +42,6 @@ To set up a {SERVICE_LONG} optimized for analytics to receive data from Supabase ``` -1. **Optimize cooling data for analytics** - - {HYPERCORE_CAP} is the hybrid row-columnar storage engine in {TIMESCALE_DB}, designed specifically for real-time analytics - and powered by time-series data. The advantage of {HYPERCORE} is its ability to seamlessly switch between row-oriented - and column-oriented storage. This flexibility enables {TIMESCALE_DB} to deliver the best of both worlds, solving the - key challenges in real-time analytics. - - ```sql - ALTER TABLE signs SET ( - timescaledb.enable_columnstore = true, - timescaledb.segmentby = 'name'); - ``` - 1. **Create optimized analytical queries** {CAGG_CAP} are designed to make queries on very large datasets run diff --git a/integrations/integrate/telegraf.mdx b/integrations/integrate/telegraf.mdx index aea7dbe..60b5be9 100644 --- a/integrations/integrate/telegraf.mdx +++ b/integrations/integrate/telegraf.mdx @@ -6,7 +6,7 @@ keywords: [Telegraf, data ingestion, metrics collection, InfluxData, plugins, Io --- -import ImportPrerequisites from '/snippets/prerequisites/_migrate-import-prerequisites.mdx'; +import IntegrationPrereqs from '/snippets/prerequisites/_integration-prereqs.mdx'; import SetupConnectionString from '/snippets/procedures/_migrate-import-setup-connection-strings.mdx'; import { PG, HYPERTABLE, TIMESCALE_DB } from '/snippets/vars.mdx'; @@ -23,7 +23,7 @@ To view metrics gathered by Telegraf and stored in a [{HYPERTABLE}][about-hypert ## Prerequisites - + - [Install Telegraf][install-telegraf] diff --git a/snippets/integrations/_prometheus-integrate.mdx b/snippets/integrations/_prometheus-integrate.mdx index 208d127..bdcb6f9 100644 --- a/snippets/integrations/_prometheus-integrate.mdx +++ b/snippets/integrations/_prometheus-integrate.mdx @@ -1,8 +1,6 @@ import { SERVICE_SHORT, CLOUD_LONG, CONSOLE, SELF_LONG, SERVICE_LONG, SCALE, ENTERPRISE, PRICING_PLAN, SELF_LONG_CAP } from "/snippets/vars.mdx"; -[Prometheus][prometheus] is an open-source monitoring system with a dimensional data model, flexible query language, and a modern alerting approach. - -This page shows you how to export your {SERVICE_SHORT} telemetry to Prometheus: +This page shows you how to export your {SERVICE_SHORT} telemetry to [Prometheus][prometheus]: - For {CLOUD_LONG}, using a dedicated Prometheus exporter in {CONSOLE}. - For {SELF_LONG}, using [Postgres Exporter][postgresql-exporter]. @@ -28,7 +26,12 @@ To export your data, do the following: -To export metrics from a {SERVICE_LONG}, you create a dedicated Prometheus exporter in {CONSOLE}, attach it to your {SERVICE_SHORT}, then configure Prometheus to scrape metrics using the exposed URL. The Prometheus exporter exposes the metrics related to the {SERVICE_LONG} like CPU, memory, and storage. To scrape other metrics, use Postgres Exporter as described for {SELF_LONG}. The Prometheus exporter is available for [{SCALE} and {ENTERPRISE}][pricing-plan-features] {PRICING_PLAN}s. +To export metrics from a {SERVICE_LONG}, you create a dedicated Prometheus exporter in {CONSOLE}, attach it to your +{SERVICE_SHORT}, then configure Prometheus to scrape metrics using the exposed URL. The Prometheus exporter exposes +the metrics related to the {SERVICE_LONG} like CPU, memory, and storage. To scrape other metrics, use Postgres Exporter +as described for {SELF_LONG}. + +The Prometheus exporter is available for [{SCALE} and {ENTERPRISE}][pricing-plan-features] {PRICING_PLAN}s. 1. **Create a Prometheus exporter** diff --git a/snippets/integrations/_transit-gateway.mdx b/snippets/integrations/_transit-gateway.mdx index 1118ba2..c462757 100644 --- a/snippets/integrations/_transit-gateway.mdx +++ b/snippets/integrations/_transit-gateway.mdx @@ -17,7 +17,7 @@ import { VPC, CONSOLE, CLOUD_LONG, SERVICE_SHORT, SERVICE_LONG, PRICING_PLAN } f 1. In the `VPC Peering` column, click `Add`. 1. Provide your AWS account ID, Transit Gateway ID, CIDR ranges, and AWS region. {CLOUD_LONG} creates a new isolated connection for every unique Transit Gateway ID. - ![Add peering](https://assets.timescale.com/docs/images/tiger-cloud-console/add-peering-tiger-console.png) + ![Add peering](https://assets.timescale.com/docs/images/tiger-cloud-console/add-peering-tiger-console.png) 1. Click `Add connection`.