From 0a822b4086ea39f687046c8aebb00656286b15ad Mon Sep 17 00:00:00 2001 From: Anna Geller Date: Fri, 22 Nov 2024 15:59:31 +0100 Subject: [PATCH] fix: remove confusing old examples --- examples/airbyte/airbyteCloud.yml | 8 - examples/airbyte/airbyteCloudDbtCloud.yml | 31 --- examples/airbyte/airbyteDbtCore.yml | 55 ------ examples/airbyte/airbyteSync.yml | 10 - examples/airbyte/airbyteSyncParallel.yml | 28 --- .../airbyte/airbyteSyncParallelWithDbt.yml | 70 ------- examples/airbyte/destinations.tf | 12 -- examples/airbyte/main.tf | 113 ----------- examples/airbyte/outputs.tf | 3 - examples/airbyte/sources.tf | 27 --- examples/airbyte/variables.tf | 17 -- examples/all-in-one/dataWarehouse.yml | 187 ------------------ examples/all-in-one/dataWarehouseRefresh.yml | 187 ------------------ examples/all-in-one/dwhRefresh.yml | 172 ---------------- examples/all-in-one/metadata.yml | 41 ---- examples/aws-cli/lisECSclusters.yml | 15 -- examples/aws_dynamodb/addItemToDynamoDB.yml | 35 ---- examples/aws_dynamodb/dynamodb.tf | 119 ----------- examples/aws_dynamodb/ingest.yml | 21 -- examples/aws_dynamodb/scanDynamoDBTable.yml | 16 -- examples/aws_dynamodb/variables.tf | 11 -- examples/aws_rds_postgres_dbt/kestra.tf | 57 ------ examples/aws_rds_postgres_dbt/main.tf | 89 --------- examples/aws_rds_postgres_dbt/outputs.tf | 17 -- examples/aws_rds_postgres_dbt/variables.tf | 19 -- examples/aws_s3/README.md | 6 - examples/aws_s3/downloadFromS3.yml | 15 -- examples/aws_s3/eventDrivenDuckDB.yml | 62 ------ examples/aws_s3/eventDrivenDuckDBParallel.yml | 45 ----- examples/aws_s3/img.png | Bin 54664 -> 0 bytes examples/aws_s3/main.tf | 21 -- examples/aws_s3/s3.tf | 134 ------------- examples/aws_s3/s3CreateBucket.yml | 16 -- examples/aws_s3/s3MapOverObjects.yml | 33 ---- examples/aws_s3/s3Trigger.yml | 31 --- examples/aws_s3/s3TriggerCommands.yml | 41 ---- examples/aws_s3/s3TriggerScript.yml | 46 ----- examples/aws_s3/s3parallelUploads.yml | 72 ------- examples/aws_s3/uploadFileToS3.yml | 24 --- .../aws_s3/x_ways_to_upload_html_file.yml | 72 ------- examples/aws_sns/smsEverySunday.yml | 25 --- examples/aws_sns/sns.tf | 67 ------- examples/aws_sqs/publish.yml | 20 -- examples/aws_sqs/reactToSqsTrigger.yml | 23 --- examples/aws_sqs/sqs.tf | 113 ----------- examples/azure/absTrigger.yml | 24 --- examples/azure/azureBlobToBigQuery.yml | 43 ---- examples/azure/azureCLI.yml | 12 -- examples/bigquery/gsheetsToBQ.yml | 24 --- examples/bigquery/loadCsvFromInput.yml | 22 --- examples/bigquery/loadFromGCSNorthwind.yml | 63 ------ examples/bigquery/queryExportToCsv.yml | 22 --- examples/bigquery/wikipedia.yml | 14 -- examples/clickhouse/queryClickHouse.yml | 44 ----- examples/defaults/taskDefaults.yml | 0 examples/deploy/flow1.yml | 6 - examples/deploy/flow2.yml | 6 - examples/deploy/flow3.yml | 6 - examples/deploy/marketing__flow1.yml | 6 - examples/deploy/marketing__flow2.yml | 6 - examples/deploy/marketing__flow3.yml | 6 - examples/dltHub/dltHub.yml | 33 ---- examples/duckdb/csvDuckDBSlack.yml | 30 --- examples/duckdb/duckDBqueryS3.yml | 17 -- examples/duckdb/duckDbAnalyzeCsv.yml | 25 --- examples/duckdb/duckDbQueryToCsv.yml | 26 --- examples/duckdb/salesReport.yml | 33 ---- examples/fivetran/fivetranDbtCloud.yml | 30 --- examples/fivetran/fivetranSync.yml | 8 - examples/fivetran/fivetranSyncParallel.yml | 31 --- .../fivetran/fivetranSyncParallelDbtCloud.yml | 38 ---- .../fivetran/fivetranSyncParallelDbtCore.yml | 73 ------- examples/flows/analyzeSales.yml | 26 --- examples/flows/core/null.yml | 22 --- examples/flows/core/parallelPython.yml | 69 ------- examples/flows/core/parallelSequences.yml | 26 --- examples/flows/core/retries.yml | 19 -- examples/flows/core/switch.yml | 24 --- examples/flows/core/terraform_ci_cd.tf | 20 -- examples/flows/ee_executions.yml | 85 -------- examples/flows/ee_stream_auditlogs_to_bq.yml | 83 -------- examples/flows/ee_stream_executions_to_bq.yml | 85 -------- examples/flows/ee_stream_flows_to_bq.yml | 60 ------ .../flows/failure_handling/failureDemo.yml | 11 -- .../failureDemoAllowFailure.yml | 28 --- examples/flows/finance/billing.yml | 18 -- .../getting_started/helloParametrized.yml | 15 -- .../helloParametrizedScheduled.yml | 24 --- .../helloParametrizedSchedulesMultiple.yml | 37 ---- examples/flows/getting_started/helloWorld.yml | 8 - .../getting_started/helloWorldWithLabels.yml | 10 - examples/flows/git/gitPython.yml | 22 --- .../flows/git/gitPythonPublicRepository.yml | 19 -- examples/flows/git/gitPythonWithSecret.yml | 20 -- examples/flows/marketing/attribution.yml | 8 - examples/flows/metrics/bashMetrics.yml | 23 --- examples/flows/metrics/pythonMetrics.yml | 44 ----- .../flows/notifications/slack/runtimeSLA.yml | 21 -- .../notifications/slack/slackFailureAlert.yml | 25 --- .../notifications/slack/slackSendMessage.yml | 11 -- .../flows/outputs/outputFromPythonScript.yml | 25 --- .../flows/outputs/passDataBetweenTasks.yml | 32 --- .../flows/outputs/passOutputsReturnLog.yml | 19 -- examples/flows/product/personalizations.yml | 18 -- .../00_old_blueprint_core_python_outputs.yml | 36 ---- examples/flows/python/analyzeSales.yml | 25 --- examples/flows/python/csvKit.yml | 65 ------ examples/flows/python/dockerGcp.yml | 44 ----- examples/flows/python/log_levels.yml | 62 ------ examples/flows/python/pythonAnalyzeCSV.yml | 16 -- examples/flows/python/pythonAnalyzeOrders.yml | 50 ----- .../flows/python/pythonCsvEachParallel.yml | 26 --- .../pythonCsvEachParallelScriptPlugin.yml | 20 -- .../python/pythonDependenciesVenvPerTask.yml | 23 --- .../pythonExtractUnzipProcessPandasCSV.yml | 30 --- examples/flows/python/pythonPandasCSV.yml | 33 ---- examples/flows/python/pythonPartitions.yml | 34 ---- .../flows/python/pythonScriptContainer.yml | 22 --- examples/flows/python/pythonScriptVenv.yml | 37 ---- examples/flows/staging/bingAds.yml | 8 - examples/flows/staging/csv.yml | 8 - examples/flows/staging/emarsys.yml | 8 - examples/flows/staging/facebook.yml | 8 - examples/flows/staging/googleAds.yml | 8 - examples/flows/staging/googleAnalytics.yml | 8 - .../flows/staging/googleSearchConsole.yml | 8 - examples/flows/staging/googleSheets.yml | 8 - examples/flows/staging/greenhouse.yml | 8 - examples/flows/staging/hubspot.yml | 8 - examples/flows/staging/mailchimp.yml | 8 - examples/flows/staging/microsoftDynamics.yml | 8 - examples/flows/staging/mongoDB.yml | 8 - examples/flows/staging/mysql.yml | 8 - examples/flows/staging/postgres.yml | 8 - examples/flows/staging/posthog.yml | 8 - examples/flows/staging/salesforce.yml | 8 - examples/flows/staging/shopify.yml | 8 - examples/flows/staging/sqlServer.yml | 8 - examples/flows/staging/stripe.yml | 8 - examples/mongo_db/filterMongoDB.yml | 17 -- examples/mongo_db/loadPokemon.yml | 23 --- examples/mongo_db/write.yml | 14 -- examples/mysql/extractLoadMySQL.yml | 44 ----- examples/postgres/apiJsonToPostgres.yml | 76 ------- .../postgres/apiJsonToPostgresPythonOnly.yml | 39 ---- .../apiJsonToPostgresPythonScript.yml | 53 ----- examples/postgres/batchLoad.yml | 22 --- examples/postgres/copyIn.yml | 45 ----- examples/postgres/eachSequentialPostgres.yml | 19 -- examples/postgres/extractLoadPostgres.yml | 40 ---- examples/postgres/postgresQuery.yml | 11 -- examples/postgres/postgresTrigger.yml | 21 -- examples/redis/cannotExpandTheList.yml | 32 --- examples/redis/jqTest.yml | 24 --- examples/redis/setGet.yml | 43 ---- examples/redis/setParallel.yml | 26 --- examples/redis/trigger.yml | 14 -- examples/rust/Cargo.toml | 9 - examples/rust/Dockerfile | 7 - examples/rust/README.md | 4 - examples/rust/src/main.rs | 57 ------ examples/scripts/api_users_to_json.py | 9 - examples/scripts/api_users_to_postgres.py | 21 -- examples/scripts/clean_messy_dataset.py | 12 -- examples/scripts/create_messy_dataset.py | 24 --- examples/scripts/etl_script.py | 61 ------ examples/scripts/generate_orders.py | 35 ---- examples/scripts/get_users.py | 9 - examples/scripts/gpu.py | 20 -- examples/scripts/hn_search.py | 26 --- examples/scripts/modal_getting_started.py | 20 -- examples/scripts/save_users_pg.py | 19 -- examples/scripts/vm_info.py | 16 -- examples/singer/gh_to_bq.yml | 45 ----- examples/singer/githubToBigquery_working.yml | 39 ---- examples/singer/postgres.yml | 34 ---- examples/singer/postgresToBQ_working.yml | 35 ---- examples/snowflake/bestQueryFlow.yml | 67 ------- examples/snowflake/fake_employees.py | 33 ---- examples/snowflake/final.yml | 64 ------ examples/snowflake/parallelLoad.yml | 74 ------- examples/snowflake/query.yml | 39 ---- examples/snowflake/queryTrigger.yml | 41 ---- examples/snowflake/stage.yml | 21 -- examples/spark/airbyteSpark.yml | 22 --- examples/triggers/backfill.yml | 35 ---- examples/triggers/variables_in_triggers.yml | 22 --- examples/triggers/webhookTrigger.yml | 15 -- examples/trino/README.md | 10 - examples/trino/query.yml | 32 --- examples/trino/simpleQuery.yml | 20 -- 191 files changed, 6074 deletions(-) delete mode 100644 examples/airbyte/airbyteCloud.yml delete mode 100644 examples/airbyte/airbyteCloudDbtCloud.yml delete mode 100644 examples/airbyte/airbyteDbtCore.yml delete mode 100644 examples/airbyte/airbyteSync.yml delete mode 100644 examples/airbyte/airbyteSyncParallel.yml delete mode 100644 examples/airbyte/airbyteSyncParallelWithDbt.yml delete mode 100644 examples/airbyte/destinations.tf delete mode 100644 examples/airbyte/main.tf delete mode 100644 examples/airbyte/outputs.tf delete mode 100644 examples/airbyte/sources.tf delete mode 100644 examples/airbyte/variables.tf delete mode 100644 examples/all-in-one/dataWarehouse.yml delete mode 100644 examples/all-in-one/dataWarehouseRefresh.yml delete mode 100644 examples/all-in-one/dwhRefresh.yml delete mode 100644 examples/all-in-one/metadata.yml delete mode 100644 examples/aws-cli/lisECSclusters.yml delete mode 100644 examples/aws_dynamodb/addItemToDynamoDB.yml delete mode 100644 examples/aws_dynamodb/dynamodb.tf delete mode 100644 examples/aws_dynamodb/ingest.yml delete mode 100644 examples/aws_dynamodb/scanDynamoDBTable.yml delete mode 100644 examples/aws_dynamodb/variables.tf delete mode 100644 examples/aws_rds_postgres_dbt/kestra.tf delete mode 100644 examples/aws_rds_postgres_dbt/main.tf delete mode 100644 examples/aws_rds_postgres_dbt/outputs.tf delete mode 100644 examples/aws_rds_postgres_dbt/variables.tf delete mode 100644 examples/aws_s3/README.md delete mode 100644 examples/aws_s3/downloadFromS3.yml delete mode 100644 examples/aws_s3/eventDrivenDuckDB.yml delete mode 100644 examples/aws_s3/eventDrivenDuckDBParallel.yml delete mode 100644 examples/aws_s3/img.png delete mode 100644 examples/aws_s3/main.tf delete mode 100644 examples/aws_s3/s3.tf delete mode 100644 examples/aws_s3/s3CreateBucket.yml delete mode 100644 examples/aws_s3/s3MapOverObjects.yml delete mode 100644 examples/aws_s3/s3Trigger.yml delete mode 100644 examples/aws_s3/s3TriggerCommands.yml delete mode 100644 examples/aws_s3/s3TriggerScript.yml delete mode 100644 examples/aws_s3/s3parallelUploads.yml delete mode 100644 examples/aws_s3/uploadFileToS3.yml delete mode 100644 examples/aws_s3/x_ways_to_upload_html_file.yml delete mode 100644 examples/aws_sns/smsEverySunday.yml delete mode 100644 examples/aws_sns/sns.tf delete mode 100644 examples/aws_sqs/publish.yml delete mode 100644 examples/aws_sqs/reactToSqsTrigger.yml delete mode 100644 examples/aws_sqs/sqs.tf delete mode 100644 examples/azure/absTrigger.yml delete mode 100644 examples/azure/azureBlobToBigQuery.yml delete mode 100644 examples/azure/azureCLI.yml delete mode 100644 examples/bigquery/gsheetsToBQ.yml delete mode 100644 examples/bigquery/loadCsvFromInput.yml delete mode 100644 examples/bigquery/loadFromGCSNorthwind.yml delete mode 100644 examples/bigquery/queryExportToCsv.yml delete mode 100644 examples/bigquery/wikipedia.yml delete mode 100644 examples/clickhouse/queryClickHouse.yml delete mode 100644 examples/defaults/taskDefaults.yml delete mode 100644 examples/deploy/flow1.yml delete mode 100644 examples/deploy/flow2.yml delete mode 100644 examples/deploy/flow3.yml delete mode 100644 examples/deploy/marketing__flow1.yml delete mode 100644 examples/deploy/marketing__flow2.yml delete mode 100644 examples/deploy/marketing__flow3.yml delete mode 100644 examples/dltHub/dltHub.yml delete mode 100644 examples/duckdb/csvDuckDBSlack.yml delete mode 100644 examples/duckdb/duckDBqueryS3.yml delete mode 100644 examples/duckdb/duckDbAnalyzeCsv.yml delete mode 100644 examples/duckdb/duckDbQueryToCsv.yml delete mode 100644 examples/duckdb/salesReport.yml delete mode 100644 examples/fivetran/fivetranDbtCloud.yml delete mode 100644 examples/fivetran/fivetranSync.yml delete mode 100644 examples/fivetran/fivetranSyncParallel.yml delete mode 100644 examples/fivetran/fivetranSyncParallelDbtCloud.yml delete mode 100644 examples/fivetran/fivetranSyncParallelDbtCore.yml delete mode 100644 examples/flows/analyzeSales.yml delete mode 100644 examples/flows/core/null.yml delete mode 100644 examples/flows/core/parallelPython.yml delete mode 100644 examples/flows/core/parallelSequences.yml delete mode 100644 examples/flows/core/retries.yml delete mode 100644 examples/flows/core/switch.yml delete mode 100644 examples/flows/core/terraform_ci_cd.tf delete mode 100644 examples/flows/ee_executions.yml delete mode 100644 examples/flows/ee_stream_auditlogs_to_bq.yml delete mode 100644 examples/flows/ee_stream_executions_to_bq.yml delete mode 100644 examples/flows/ee_stream_flows_to_bq.yml delete mode 100644 examples/flows/failure_handling/failureDemo.yml delete mode 100644 examples/flows/failure_handling/failureDemoAllowFailure.yml delete mode 100644 examples/flows/finance/billing.yml delete mode 100644 examples/flows/getting_started/helloParametrized.yml delete mode 100644 examples/flows/getting_started/helloParametrizedScheduled.yml delete mode 100644 examples/flows/getting_started/helloParametrizedSchedulesMultiple.yml delete mode 100644 examples/flows/getting_started/helloWorld.yml delete mode 100644 examples/flows/getting_started/helloWorldWithLabels.yml delete mode 100644 examples/flows/git/gitPython.yml delete mode 100644 examples/flows/git/gitPythonPublicRepository.yml delete mode 100644 examples/flows/git/gitPythonWithSecret.yml delete mode 100644 examples/flows/marketing/attribution.yml delete mode 100644 examples/flows/metrics/bashMetrics.yml delete mode 100644 examples/flows/metrics/pythonMetrics.yml delete mode 100644 examples/flows/notifications/slack/runtimeSLA.yml delete mode 100644 examples/flows/notifications/slack/slackFailureAlert.yml delete mode 100644 examples/flows/notifications/slack/slackSendMessage.yml delete mode 100644 examples/flows/outputs/outputFromPythonScript.yml delete mode 100644 examples/flows/outputs/passDataBetweenTasks.yml delete mode 100644 examples/flows/outputs/passOutputsReturnLog.yml delete mode 100644 examples/flows/product/personalizations.yml delete mode 100644 examples/flows/python/00_old_blueprint_core_python_outputs.yml delete mode 100644 examples/flows/python/analyzeSales.yml delete mode 100644 examples/flows/python/csvKit.yml delete mode 100644 examples/flows/python/dockerGcp.yml delete mode 100644 examples/flows/python/log_levels.yml delete mode 100644 examples/flows/python/pythonAnalyzeCSV.yml delete mode 100644 examples/flows/python/pythonAnalyzeOrders.yml delete mode 100644 examples/flows/python/pythonCsvEachParallel.yml delete mode 100644 examples/flows/python/pythonCsvEachParallelScriptPlugin.yml delete mode 100644 examples/flows/python/pythonDependenciesVenvPerTask.yml delete mode 100644 examples/flows/python/pythonExtractUnzipProcessPandasCSV.yml delete mode 100644 examples/flows/python/pythonPandasCSV.yml delete mode 100644 examples/flows/python/pythonPartitions.yml delete mode 100644 examples/flows/python/pythonScriptContainer.yml delete mode 100644 examples/flows/python/pythonScriptVenv.yml delete mode 100644 examples/flows/staging/bingAds.yml delete mode 100644 examples/flows/staging/csv.yml delete mode 100644 examples/flows/staging/emarsys.yml delete mode 100644 examples/flows/staging/facebook.yml delete mode 100644 examples/flows/staging/googleAds.yml delete mode 100644 examples/flows/staging/googleAnalytics.yml delete mode 100644 examples/flows/staging/googleSearchConsole.yml delete mode 100644 examples/flows/staging/googleSheets.yml delete mode 100644 examples/flows/staging/greenhouse.yml delete mode 100644 examples/flows/staging/hubspot.yml delete mode 100644 examples/flows/staging/mailchimp.yml delete mode 100644 examples/flows/staging/microsoftDynamics.yml delete mode 100644 examples/flows/staging/mongoDB.yml delete mode 100644 examples/flows/staging/mysql.yml delete mode 100644 examples/flows/staging/postgres.yml delete mode 100644 examples/flows/staging/posthog.yml delete mode 100644 examples/flows/staging/salesforce.yml delete mode 100644 examples/flows/staging/shopify.yml delete mode 100644 examples/flows/staging/sqlServer.yml delete mode 100644 examples/flows/staging/stripe.yml delete mode 100644 examples/mongo_db/filterMongoDB.yml delete mode 100644 examples/mongo_db/loadPokemon.yml delete mode 100644 examples/mongo_db/write.yml delete mode 100644 examples/mysql/extractLoadMySQL.yml delete mode 100644 examples/postgres/apiJsonToPostgres.yml delete mode 100644 examples/postgres/apiJsonToPostgresPythonOnly.yml delete mode 100644 examples/postgres/apiJsonToPostgresPythonScript.yml delete mode 100644 examples/postgres/batchLoad.yml delete mode 100644 examples/postgres/copyIn.yml delete mode 100644 examples/postgres/eachSequentialPostgres.yml delete mode 100644 examples/postgres/extractLoadPostgres.yml delete mode 100644 examples/postgres/postgresQuery.yml delete mode 100644 examples/postgres/postgresTrigger.yml delete mode 100644 examples/redis/cannotExpandTheList.yml delete mode 100644 examples/redis/jqTest.yml delete mode 100644 examples/redis/setGet.yml delete mode 100644 examples/redis/setParallel.yml delete mode 100644 examples/redis/trigger.yml delete mode 100644 examples/rust/Cargo.toml delete mode 100644 examples/rust/Dockerfile delete mode 100644 examples/rust/README.md delete mode 100644 examples/rust/src/main.rs delete mode 100644 examples/scripts/api_users_to_json.py delete mode 100644 examples/scripts/api_users_to_postgres.py delete mode 100644 examples/scripts/clean_messy_dataset.py delete mode 100644 examples/scripts/create_messy_dataset.py delete mode 100644 examples/scripts/etl_script.py delete mode 100644 examples/scripts/generate_orders.py delete mode 100644 examples/scripts/get_users.py delete mode 100644 examples/scripts/gpu.py delete mode 100644 examples/scripts/hn_search.py delete mode 100644 examples/scripts/modal_getting_started.py delete mode 100644 examples/scripts/save_users_pg.py delete mode 100644 examples/scripts/vm_info.py delete mode 100644 examples/singer/gh_to_bq.yml delete mode 100644 examples/singer/githubToBigquery_working.yml delete mode 100644 examples/singer/postgres.yml delete mode 100644 examples/singer/postgresToBQ_working.yml delete mode 100644 examples/snowflake/bestQueryFlow.yml delete mode 100644 examples/snowflake/fake_employees.py delete mode 100644 examples/snowflake/final.yml delete mode 100644 examples/snowflake/parallelLoad.yml delete mode 100644 examples/snowflake/query.yml delete mode 100644 examples/snowflake/queryTrigger.yml delete mode 100644 examples/snowflake/stage.yml delete mode 100644 examples/spark/airbyteSpark.yml delete mode 100644 examples/triggers/backfill.yml delete mode 100644 examples/triggers/variables_in_triggers.yml delete mode 100644 examples/triggers/webhookTrigger.yml delete mode 100644 examples/trino/README.md delete mode 100644 examples/trino/query.yml delete mode 100644 examples/trino/simpleQuery.yml diff --git a/examples/airbyte/airbyteCloud.yml b/examples/airbyte/airbyteCloud.yml deleted file mode 100644 index 5e47167..0000000 --- a/examples/airbyte/airbyteCloud.yml +++ /dev/null @@ -1,8 +0,0 @@ -id: airbyte_cloud -namespace: company.team - -tasks: - - id: sample_data_to_s3 - type: io.kestra.plugin.airbyte.cloud.jobs.Sync - connectionId: ac127cf2-9ae3-4f9b-9dd0-e3a0944d1447 - token: "{{ secret('AIRBYTE_CLOUD_API_TOKEN') }}" diff --git a/examples/airbyte/airbyteCloudDbtCloud.yml b/examples/airbyte/airbyteCloudDbtCloud.yml deleted file mode 100644 index 6f05ed4..0000000 --- a/examples/airbyte/airbyteCloudDbtCloud.yml +++ /dev/null @@ -1,31 +0,0 @@ -id: airbyte_cloud_dbt_cloud -namespace: company.team - -tasks: - - id: data_ingestion - type: io.kestra.plugin.core.flow.Parallel - tasks: - - id: salesforce - type: io.kestra.plugin.airbyte.cloud.jobs.Sync - connectionId: e3b1ce92-547c-436f-b1e8-23b6936c12ab - - - id: google_analytics - type: io.kestra.plugin.airbyte.cloud.jobs.Sync - connectionId: e3b1ce92-547c-436f-b1e8-23b6936c12cd - - - id: facebook_ads - type: io.kestra.plugin.airbyte.cloud.jobs.Sync - connectionId: e3b1ce92-547c-436f-b1e8-23b6936c12ef - - - id: dbt_cloud_job - type: io.kestra.plugin.dbt.cloud.TriggerRun - jobId: "396284" - accountId: "{{ secret('DBT_CLOUD_ACCOUNT_ID') }}" - token: "{{ secret('DBT_CLOUD_API_TOKEN') }}" - wait: true - -pluginDefaults: - - type: io.kestra.plugin.airbyte.cloud.jobs.Sync - values: - url: http://host.docker.internal:8000/ - token: "{{ secret('AIRBYTE_CLOUD_API_TOKEN') }}" diff --git a/examples/airbyte/airbyteDbtCore.yml b/examples/airbyte/airbyteDbtCore.yml deleted file mode 100644 index 13ac503..0000000 --- a/examples/airbyte/airbyteDbtCore.yml +++ /dev/null @@ -1,55 +0,0 @@ -id: airbyte_dbt_core -namespace: company.team - -tasks: - - id: data_ingestion - type: io.kestra.plugin.core.flow.Parallel - tasks: - - id: psyduck - type: io.kestra.plugin.airbyte.connections.Sync - connectionId: 4de8ab1e-50ef-4df0-aa01-7f21491081f1 - - - id: sample_data - type: io.kestra.plugin.airbyte.connections.Sync - connectionId: 71291950-ccc1-4875-91b7-e801376c549e - - - id: charizard - type: io.kestra.plugin.airbyte.connections.Sync - connectionId: 9bb96539-73e7-4b9a-9937-6ce861b49cb9 - - - id: pikachu - type: io.kestra.plugin.airbyte.connections.Sync - connectionId: 39c38950-b0b9-4fce-a303-06ced3dbfa75 - - - id: dbt - type: io.kestra.plugin.core.flow.WorkingDirectory - tasks: - - id: clone_repository - type: io.kestra.plugin.git.Clone - url: https://github.com/jwills/jaffle_shop_duckdb - branch: duckdb - - - id: dbt_build - type: io.kestra.plugin.dbt.cli.Build - debug: true - taskRunner: - type: io.kestra.plugin.scripts.runner.docker.Docker - containerImage: ghcr.io/kestra-io/dbt-duckdb:latest - dbtPath: /usr/local/bin/dbt - inputFiles: - .profile/profiles.yml: | - jaffle_shop: - outputs: - dev: - type: duckdb - path: ':memory:' - extensions: - - parquet - target: dev - -pluginDefaults: - - type: io.kestra.plugin.airbyte.connections.Sync - values: - url: http://host.docker.internal:8000/ - username: "{{ secret('AIRBYTE_USERNAME') }}" - password: "{{ secret('AIRBYTE_PASSWORD') }}" diff --git a/examples/airbyte/airbyteSync.yml b/examples/airbyte/airbyteSync.yml deleted file mode 100644 index f1251fe..0000000 --- a/examples/airbyte/airbyteSync.yml +++ /dev/null @@ -1,10 +0,0 @@ -id: airbyte_sync -namespace: company.team - -tasks: - - id: data_ingestion_sync - type: io.kestra.plugin.airbyte.connections.Sync - connectionId: e3b1ce92-547c-436f-b1e8-23b6936c12ab - url: http://host.docker.internal:8000/ - username: "{{ secret('AIRBYTE_USERNAME') }}" - password: "{{ secret('AIRBYTE_PASSWORD') }}" \ No newline at end of file diff --git a/examples/airbyte/airbyteSyncParallel.yml b/examples/airbyte/airbyteSyncParallel.yml deleted file mode 100644 index 17415a1..0000000 --- a/examples/airbyte/airbyteSyncParallel.yml +++ /dev/null @@ -1,28 +0,0 @@ -id: airbyte_sync_parallel -namespace: company.team -description: | - This flow will sync data from multiple sources in parallel using Airbyte. - The Airbyte API credentials, referenced in the `pluginDefaults`, must be provided in the environment variables. - -tasks: - - id: data_ingestion - type: io.kestra.plugin.core.flow.Parallel - tasks: - - id: salesforce - type: io.kestra.plugin.airbyte.connections.Sync - connectionId: e3b1ce92-547c-436f-b1e8-23b6936c12ab - - - id: google_analytics - type: io.kestra.plugin.airbyte.connections.Sync - connectionId: e3b1ce92-547c-436f-b1e8-23b6936c12cd - - - id: facebook_ads - type: io.kestra.plugin.airbyte.connections.Sync - connectionId: e3b1ce92-547c-436f-b1e8-23b6936c12ef - -pluginDefaults: - - type: io.kestra.plugin.airbyte.connections.Sync - values: - url: http://host.docker.internal:8000/ - username: "{{ secret('AIRBYTE_USERNAME') }}" - password: "{{ secret('AIRBYTE_PASSWORD') }}" diff --git a/examples/airbyte/airbyteSyncParallelWithDbt.yml b/examples/airbyte/airbyteSyncParallelWithDbt.yml deleted file mode 100644 index 466cfc7..0000000 --- a/examples/airbyte/airbyteSyncParallelWithDbt.yml +++ /dev/null @@ -1,70 +0,0 @@ -id: airbyte_sync_parallel_with_dbt -namespace: company.team -description: | - This flow will sync data from multiple sources in parallel using Airbyte. - The Airbyte API credentials, referenced in the `pluginDefaults`, must be provided in the environment variables. - -tasks: - - id: data_ingestion - type: io.kestra.plugin.core.flow.Parallel - tasks: - - id: salesforce - type: io.kestra.plugin.airbyte.connections.Sync - connectionId: e3b1ce92-547c-436f-b1e8-23b6936c12ab - - - id: google_analytics - type: io.kestra.plugin.airbyte.connections.Sync - connectionId: e3b1ce92-547c-436f-b1e8-23b6936c12cd - - - id: facebook_ads - type: io.kestra.plugin.airbyte.connections.Sync - connectionId: e3b1ce92-547c-436f-b1e8-23b6936c12ef - - - id: dbt - type: io.kestra.plugin.core.flow.WorkingDirectory - tasks: - - id: clone_repository - type: io.kestra.plugin.git.Clone - url: https://github.com/dbt-labs/jaffle_shop - branch: main - - - id: dbt_setup - type: io.kestra.plugin.dbt.cli.Setup - profiles: - jaffle_shop: - outputs: - dev: - type: bigquery - dataset: dwh - fixed_retries: 1 - keyfile: sa.json - location: EU - method: service-account - priority: interactive - project: geller - threads: 8 - timeout_seconds: 300 - target: dev - requirements: - - dbt-bigquery - taskRunner: - type: io.kestra.plugin.scripts.runner.docker.Docker - containerImage: python:3.10-slim - inputFiles: - sa.json: "{{ secret('GCP_SERVICE_ACCOUNT_JSON') }}" - - - id: dbt_build - type: io.kestra.plugin.dbt.cli.Build - debug: false - taskRunner: - type: io.kestra.plugin.scripts.runner.docker.Docker - containerImage: python:3.10-slim - inputFiles: - sa.json: "{{ secret('GCP_SERVICE_ACCOUNT_JSON') }}" - -pluginsDefaults: - - type: io.kestra.plugin.airbyte.connections.Sync - values: - url: http://host.docker.internal:8000/ - username: "{{ secret('AIRBYTE_USERNAME') }}" - password: "{{ secret('AIRBYTE_PASSWORD') }}" diff --git a/examples/airbyte/destinations.tf b/examples/airbyte/destinations.tf deleted file mode 100644 index 946b357..0000000 --- a/examples/airbyte/destinations.tf +++ /dev/null @@ -1,12 +0,0 @@ -resource "airbyte_destination_dev_null" "null_destination" { - configuration = { - destination_type = "dev-null" - test_destination = { - destination_dev_null_test_destination_silent = { - test_destination_type = "SILENT" - } - } - } - name = "Null Destination" - workspace_id = var.airbyte_workspace_id -} diff --git a/examples/airbyte/main.tf b/examples/airbyte/main.tf deleted file mode 100644 index 5f82f27..0000000 --- a/examples/airbyte/main.tf +++ /dev/null @@ -1,113 +0,0 @@ -terraform { - required_providers { - airbyte = { - source = "airbytehq/airbyte" - version = "0.1.1" - } - kestra = { - source = "kestra-io/kestra" - version = "~> 0.7.0" - } - } -} - -provider "airbyte" { - bearer_auth = var.airbyte_api_key -} - -provider "kestra" { - url = var.kestra_url -} - - -resource "airbyte_connection" "pokeapi_devnull" { - name = "PokeAPI → Null Destination" - source_id = airbyte_source_pokeapi.pokeapi.source_id - destination_id = airbyte_destination_dev_null.null_destination.destination_id -} - -resource "airbyte_connection" "sample_devnull" { - name = "Sample Data → Null Destination" - source_id = airbyte_source_faker.sample.source_id - destination_id = airbyte_destination_dev_null.null_destination.destination_id -} - -resource "airbyte_connection" "dockerhub_devnull" { - name = "DockerHub → Null Destination" - source_id = airbyte_source_dockerhub.dockerhub.source_id - destination_id = airbyte_destination_dev_null.null_destination.destination_id -} - - -resource "kestra_flow" "airbyte" { - keep_original_source = true - flow_id = "airbyte" - namespace = var.namespace - content = < {{outputDir}}/output.json diff --git a/examples/aws_dynamodb/addItemToDynamoDB.yml b/examples/aws_dynamodb/addItemToDynamoDB.yml deleted file mode 100644 index 7d8bc19..0000000 --- a/examples/aws_dynamodb/addItemToDynamoDB.yml +++ /dev/null @@ -1,35 +0,0 @@ -id: add_item_to_dynamodb -namespace: company.team -description: | - This flow adds an item to a DynamoDB table. - The `item` property can be either a map or a JSON string. - The `tableName` property must point to an already existing DynamoDB table. - The `region` property must be a valid AWS region. - It's recommended to set the `accessKeyId` and `secretKeyId` properties as environment variables. - -tasks: - - id: first_item_as_map - type: io.kestra.plugin.aws.dynamodb.PutItem - tableName: demo - region: "{{ secret('AWS_DEFAULT_REGION') }}" - accessKeyId: "{{ secret('AWS_ACCESS_KEY_ID') }}" - secretKeyId: "{{ secret('AWS_SECRET_KEY_ID') }}" - item: - id: 1 - flow: "{{ flow.id }}" - task: "{{ task.id }}" - executionId: "{{ execution.id }}" - - - id: second_item_as_json - type: io.kestra.plugin.aws.dynamodb.PutItem - tableName: demo - region: "{{ secret('AWS_DEFAULT_REGION') }}" - accessKeyId: "{{ secret('AWS_ACCESS_KEY_ID') }}" - secretKeyId: "{{ secret('AWS_SECRET_ACCESS_KEY') }}" - item: | - { - "id": 2, - "flow": "{{ flow.id }}", - "task": "{{ task.id }}", - "executionId": "{{ execution.id }}" - } diff --git a/examples/aws_dynamodb/dynamodb.tf b/examples/aws_dynamodb/dynamodb.tf deleted file mode 100644 index 01f880d..0000000 --- a/examples/aws_dynamodb/dynamodb.tf +++ /dev/null @@ -1,119 +0,0 @@ -terraform { - required_providers { - aws = { - source = "hashicorp/aws" - version = "~> 4.0" - } - kestra = { - source = "kestra-io/kestra" - version = "~> 0.7.0" - } - } -} - -provider "kestra" { - url = "http://localhost:8080" -} - -provider "aws" { - region = var.region - profile = "default" -} - -resource "aws_iam_policy" "dynamodb" { - name = "dynamodb" - description = "Policy to sync data to BigQuery from Fivetran" - policy = jsonencode({ - Version = "2012-10-17" - Statement = [ - { - Action = [ - "dynamodb:DescribeStream", - "dynamodb:DescribeTable", - "dynamodb:GetRecords", - "dynamodb:GetShardIterator", - "dynamodb:ListTables", - "dynamodb:Scan" - ] - Effect = "Allow" - Resource = "*" - }, - ] - }) -} - -module "dynamodb_table" { - source = "terraform-aws-modules/dynamodb-table/aws" - - name = "demo" - hash_key = "id" - - attributes = [ - { - name = "id" - type = "S" # N for number, S for string - } - ] -} - - -resource "kestra_flow" "addItemToDynamoDB" { - keep_original_source = true - flow_id = "addItemToDynamoDB" - namespace = var.namespace - content = < - Anomalous rows are attached in a CSV file.

- Best regards,
- Data Team - -triggers: - - id: poll_for_new_s3_files - type: io.kestra.plugin.aws.s3.Trigger - bucket: "{{ vars.bucket }}" - prefix: "{{ vars.source_prefix }}" - maxKeys: 1 # 1 file = 1 execution - interval: PT1S # every second - filter: FILES - action: MOVE - moveTo: - key: "{{ vars.destination_prefix }}/{{ vars.source_prefix }}" - region: "{{ secret('AWS_DEFAULT_REGION') }}" - accessKeyId: "{{ secret('AWS_ACCESS_KEY_ID') }}" - secretKeyId: "{{ secret('AWS_SECRET_KEY_ID') }}" diff --git a/examples/aws_s3/eventDrivenDuckDBParallel.yml b/examples/aws_s3/eventDrivenDuckDBParallel.yml deleted file mode 100644 index 7e57d2e..0000000 --- a/examples/aws_s3/eventDrivenDuckDBParallel.yml +++ /dev/null @@ -1,45 +0,0 @@ -id: s3_duckdb -namespace: company.team - -variables: - bucket: kestraio - prefix: monthly_orders - moved_prefix: stage_orders - -tasks: - - id: each - type: io.kestra.plugin.core.flow.ForEach - values: "{{ trigger.objects | jq('.[].key') }}" - tasks: - - id: query - type: io.kestra.plugin.jdbc.duckdb.Query - sql: | - SELECT * - FROM read_csv_auto('s3://{{vars.bucket}}/{{vars.moved_prefix}}/{{taskrun.value}}') - WHERE price * quantity != total; - fetchType: FETCH - url: "jdbc:duckdb:md:my_db?motherduck_token={{ secret('MOTHERDUCK_TOKEN') }}" - - - id: if_anomalies_detected - type: io.kestra.plugin.core.flow.If - condition: "{{ outputs.query['{{ taskrun.value }}'].size }}" - then: - - id: slack_anomaly_alert - type: io.kestra.plugin.notifications.slack.SlackIncomingWebhook - url: "{{ secret('SLACK_WEBHOOK') }}" - payload: | - {"channel":"#reporting","text":"Anomaly detected: `{{ outputs.query['{{ taskrun.value }}'].rows }}`"} - -triggers: - - id: wait_for_s3_object - type: io.kestra.plugin.aws.s3.Trigger - bucket: "{{ vars.bucket }}" - prefix: "{{ vars.prefix }}" - interval: PT1S - filter: FILES - action: MOVE - moveTo: - key: "{{ vars.moved_prefix }}/{{ vars.prefix }}" - region: "{{ secret('AWS_DEFAULT_REGION') }}" - accessKeyId: "{{ secret('AWS_ACCESS_KEY_ID') }}" - secretKeyId: "{{ secret('AWS_SECRET_KEY_ID') }}" diff --git a/examples/aws_s3/img.png b/examples/aws_s3/img.png deleted file mode 100644 index dfb2c21cc53a9cfcfbe12e618df677286aab1923..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 54664 zcmeFZXIzup_b(WVpdg?}5b1~th!g>l4pFM~Dk4p#Nta#&u>hhprAY~dDjfj<2~ALX z2|dz5YUnj|xI1|CoZrm<&b^;IcV^y^bd<6*owvK^zNTfK9*Pb#snyHqhF?%n_=h-A&W z(%Cu1E}Wd)d`nd6FWGbFg4j4yPQM_~ymR(685@$7tD%by(M89URDNL@kx<}XDd=2~ z^ie8)KrP$A-jzu*wiq<3G(_|8IWz+&#IG zKxNGpdaTAUMxPV{tv;s#fj7~WPDb_`?X-MwIjTutNnz@^1c9tS2W!hJt%$}?u9>NN zPWq*5$JBN_fI#Bt0j;kivuXAd;9BXr7ZM5~kXu4vTj#D%%VfmSeS9R*C$U~FNC|=a zup%LlcRj9ChIEx!Iy%&rC*7y{$JhshV{}9is1#Tm{QJLR$?LR%ViuvgRsu*92Y!Wn zfG@0X0MDRT0YBlKcy_(RvoZzGDcQg9l(GHq9H{p3{ZY}=iSzoEbD5Fu3=_Ge5LT0u z^<6mUueiEam~AwKO}`6FPxN*>P`bz`#Rc0#yD$UJYU6416t>>9V}D;a5!v+Ol)88r zDFm5?=XhnDR#|IMnoEB^Y-oTI0#U^?drGx^ZU`aL()%Xf#H|!ek&oYFWhc4Zu=k3? zBoK$QC zfj9->r>^hkIS#%|!AT>f!l!7NR7a4WP1509N&yzZ)5ET(C6#7OJV2MJ@`@Zh#N&dbF@*=OqPg@oU-0(8kp=!QEU+yb{D76&$!^#~` z%Y6uBiVPgXQzsh9#%pIey?J%mnhC8th8qz}M3C?s_ygp_E$%s2!L=s7;RHlq6ylzgz=jY!8+09aAL$p*@Nk5`_k6TXTj=p{ z(v}ng&wv6EVtc$Ei71*}HW8_%P76G6UE*)>_D4CJ#)Ib0Ur@~JxAQp{_8T^*iJmu{74yAW&ts=FZ z9xS?32*h(T7%y%oi(bbHt63-Qsm$~Gmb^?346a7nvr;r+GYOT z&=T-!_b#5i*?{+tyWN{}w8WOeKj(Z{`Xl;jpO;GHtEHxErfpnJ>PzTxFaFVWr-4^V zn714`M(DoZw&JUS+%!>I?vRG8qSf?Oj)YvC42yHam6|s#q+-%~kj`O1VP!gj>qZMj z(Q%i|I|1kZzsfUogUYH!dDqVN3Qe~r6gDOG3%hV2As&Ed<2(!+niYMjHJbUA89xm4 zqDHt)lGZU^YcA99*#yTXK0-2h0pRy28!{zFZo$_{7pq$M;UO=Nn3`Mvm1FRiYH$V~ zZ{dVR3Q&So8iG?_4T!13CI5L1z6eP2kpq9c&rYzl=U-;t)9+0mV$U}c0>671*tDHD zfQ&p9!ujLFL7@=dk-Y0b-=bmb!#%Tqd3lW0-xyo^vH)KkIsClw!)yj^FlpO5-&4>Z z>^ajMV?NC@)YdrJ$QSNDwxh$Y@?g%EybHh92_e;|dfQu9!Y38@dJRf#X77l){zQ0< z`fQovm3ys_Bed#zLBihG0b_yfVc1uRqqb&;`qFKCNtpmOe$!hYn0?0eH`b<_-?s12 z^*Fed`p(ZwZ6=+s(wqC`doX| zF0)wb!fj*=iqftB+Fd9O`)|f@rNL3z=2^CeH3o^^GrEEdn{5+%X3Gf%(}jnP?TUcdt!IQEpSi2?d_-6mt3-MR(x10qdmb1K;adGSSmL+n@AYKxI7>A)#p!QRjufj^uSO?`Z!m2XhA^cM^>}oL zxmM^BaeUyvjnry?g!S+a)fGW@S6!4P0%D^FG|)llDw19szkK(H&Vftbeowb_F3=d< zUJ}~2L>pTzqhqVMq-LrSr0qougZDq|@9mfcuAZVAxe>HPO^Ufj#z`T9py@Gx8K`o* z6}RNsFkD7JlQ9b&u*)FJ)G0_eU-S$}5?_A_E4{Tl-FYqk>`+hJ8VUv}KQF4?OVGw^Le>r`YzSf~-oEVYt&r+B77JBZM_uVrbgVa`px#=|k-dW17l>y%D5 zraN_3(bAxa>i59`LF%UCL`>^DFHq8nI$0cil2GPrLU!>#TTR<3S}y$2*?FC}!b8zG z1#aDk%Bl2S_GW)77PILtRiH$zK`-o9#k~Hpl~Q8;MR}4Pv^}A`ZdiTm;h_8cq%HaG zo!_by3!Ii4tyCaZYYx{v^4FqCP5LX-AvTmAJHGwlX;4OhuH(*x(QvzVJb`?xx43JOk`l$deB zygRI7`MOD~5GG1|=&moYroYVwO>okrhHhPVf4U+u2+Z7&T37-CxY~1ZM`zCh^3X(mLpYXa(8fKe zwpDJCeorp_hl95XXpTe}?=?r8SVIXkW&Sx-os_pwL$VQL6a#_(UEsHW+09sD9rm*E zTy4|g)11_VzXgpA`Hv~@PbY~qe#hsNA2Euax>==D7VB*P{=OZG;9Y;UGYfbl2JbVJ zXf<$JQ_Zi?4rECSk#8FGiiUp1_usQBd%P;^xjoUH-q`e`<_f(orbV-(NVzY)@%7He z@`LUqu-|KV{RHYpq9*c&UOO8eXDF;(?df^L18EeIo}!;lgsn7!3^SyJzL2 zN?!sCu-@>j-4$VaUPw{w;`X(_-C~{&K~yu(z+)fOftEdSB+H;{?kz!$iRqBbF$C#i z7q{NM=Y0vbXpDD$3tB{6dcuS)R7`KYTDO)^BCQNE+Qd5iK#H{SM(w=DNxV%4!Wg`B zTnBC!UWcdeB7wgTm;ib6m($iY(&o^WVV7+k@9Lnu5trmbpsOpS6wRw|S?NAeEK*gl zP&bKRw22ANoLflwezho@K^lb?_+{cqD*C@Pf?hR}n$v6SiPrI7tWG)&mb1jKZw2jB zOSn8{JHW0*LXqR>_Mqz^jhu69wc2u_T}mIz#t{MUw);Vsr_=fu2ts!N#v11#B-@Qd z!;1n>v6EjrtZeST4wgjY&y)=ruD(l_sbU$c1!Mlthn!K?BuZuNXQ?4q@v#7YcP*CC zELlO#plIOx2v+Iv3uoFZ9MnIeb@zj6-B~%>Olp4k;)a7z8x6tkI zEF?j6H?b|DSj8AP*hszm%}0Srg2ng6j!KGInkmvprbmROgu?WTY!Q{NM;AEWYh6Yj z040My#!JnO2)NfH^yBi`#KZO8WNoL(pM4esdlcg4D@29U;<)%Qbi-XGs;&5&=82U~ z^9B}e1_gd&ZCcnydhLl5@_^HX_~S#kJQsq#9P2N!lG${^Bd>aKc2sfGRMI=SwgW{& zZSqx)xGSQ2i=z6)Zt=T6LeG#!8V9E+)FjVT8)v5IHfmXoWU0KV*t(3A0CCgbr0GMg zR_BsE3KG1er54fx=c{n*?rmoyto3^PxNDvrnU;G_TVRLNKZ=%{q~6ekx^fm&daU~` zRvRbWMaQCCx9$r%%RzVl$oc&gxY9@S{U?_Crr?nO;XpjxnhfX`5GziV|4LD^wC_n< zVG09_EBU9ykYX{om~MNS-&*$;epjkw;enNfQytK^Wuk1QH>PWNtpXqbsF{w7(I~9q zz-FnY8N+d4%*)_m$ZIpuN~b-SZ$F^ zs9ifuPxJYtvzZ+vfpDGI8P?0ME=~Pyf}=@vVwkVAm2}RFmq+}ZK4Z10FttEg2r2>C;eu=N5nqQHk=4P^&5&<4S6Fj zsPp8gi%){Q2PqyRi7s)?D27?F+{0u^hu?-RKkElbkq@w!^*yua1GD}`1;igrmFBrY z2~-B&ga@Dhk3#={B@^CWl;j}4D8++NCKXu#RMXhSDb??+0H$ILpe|#{jg^jFwL7Ih1nO%(qtH?XJMu67Anv}&`z zPWAq*M~e@E{D>ZqcWA^ym(AUmL`oPAvdnVlq-TF7t^83jTXRB7X-xBFAHx<#NM|Je z4ddaWpSD$SG81mL*k{lv*F3jB1Xf-Wu#LSSL*d$^$|p8-3c`x_)zH!5%7fqwal(fcrO@Mv|lYP2Jk;^V&5}xjbmh?2wB~Ka z?ZipMp9I*wjTxr*`o%1+(LjLJPXrjh?EfI2nPZkOxr2ar{}s_QyUD z_#BA(L87iOS7`>ZrI>fwo7)u+=JY}F^d`meCGcd*1pTg-jgaOZN2kIg(h zn@QMY6gH_HE9_8GOsN>~d&2HhI_Bo-2dy;+$`3=BG*y{2ptpaMcS9FyMeQ$horWnX zNX32vZjC97Vf4mTw#9iW`{+9kK#Oe$Kby}Sy{~7OniHH&&$rLgKk8m{H)}K^oyv|* zw>U%%Eeo}N^V(v%0z&Kd#&3Zp2c{(`o+=YbpSXdU8y}^@6__QsrbhzA#Jg5$fvflh zVJ)CSubB5@<^e+CrTK<8A9%U}qK;zu^_) z7mz4`Yh~HT{H0M+vhmZ@SbAHs9C&EU#_bMqW&u;Z(W3TY^@|$x4{`i^<*szJaqvIs zk@C}=k`67SS_qk_`DfM2XEN{SRWvj*O01^D%8}XIxGWh8E@#PD{%n1JO6O+7;;aG6 zMZ0RL_78;V8z!1xQ&8_E&%`12n+#^9`31ExWedzEiQGukR%U1z*06hw+o13u)C)~< zg!g18nI1#pNHXJ}hR3SZS`=qEs~){-XdRVQZ;VlmElR5;a6OniC1|l4-n>Jm+l=HZbi=#R!Vb&U`T)FF?#?$_7%+)#yCyOm{A*55grk<*#d$4D zWMqd3cEX+hwi4Q*`)j$~l3KAZrsL&xqWO{->0s&Gie>TK$i>@H#=&>_-ZA?c|8x4& zY)B_ZN}ce;nBGFpvSKI*9we?KlDlP2e=~)EaQP&c#`PTpKDBVN0wV6;k6PrQS*w?Q zLcqJ)-Oj1ed_!z{CETw41VDj)$<;I$vhJPEQ=m^Tev7};c@4LjX5;EgeqK{beDd?p z$VDDa5q)B6GNyk$+bnzozw3;*8Ye8wB2;pin6pyJvVZIFkfYux0P?QCFije~pbSLI zA)P!^me%j-S=5M5nZ9$hQft5YD!k*T3ojFE`bdDHr<`jlcF^>dOAg~Sca6?9Ba(X` z-}`oFtCqWPFXN;Clh6rE*fP3!*)!yF#LyHTeV!pL^D`H&>W4wTjRCt} z*?jNQd3I%Xc_X4HmO3;ZcH;|XxwNn~_CgP^Y$53uY8-6J~8bvpoL>nmYW zf%?Y?NB?{vVkH5Bn3Ius%)u23J9g!qU*dK+TTaO-8CmdoK?ATE@8wR=3%xoJ(w`@P z%?y=;rHNeB1RG|-LkBtNldzlcWZH7;bd#avNPmu(rJm`q^0e@i-+L`S!e6UY-H znI#az0RRqk8N_lR`-%s0*G5Pbl=}W4x%Ar2QGYZ(pIO|o!}=Rk@K9t91_@5Z^9;$h zARi0;jr!6T+q55e7(U1#h*GM zk$21DIkW2kFQo9U1K3hsjLQ4#J+xx25m|_v$`)!J=e#Ti}sIXob2|W&`J6Ra6kAwIN&P=fCb3!*@AtBpX}`#d8YUB0G~mb)T5u+ zrXI_70g5yX8m7mtUZMH0;pbf4ZkTyjcS+H+=7)mv7#kM~2BCY#Vn@-o06Y$(ymA(Y z_CFUN;&%!TS>ppWjw6uu9dnc>D!d~L;m;1a88lL7+bQgSr^UDf9!-ifK zZ6qq3;Yy0MM1!=DXA)&yD;Yn55ZCZ;<;` z>5zMGyeeuxKd_y<*M`qeAv*8J1BWV-NM@3!MApqkt}`jrD76CQ{o%YY3Fn<)+p#0$ z1$E*`=|9qcjVv>ula7Rqah}P`y$&R;v~D` zIuM0XcH~YFt4|aM#z^6pTz0wW51n5RDU^;V=!4O?%Y{`S9NV9FK(X78XN$N1NK{D0 zJ{VTu+7ES&HsRXvX(dq{q8u@mmf0IV!wEUU6VOVcv-I8SotBZH=0jtXpLwk|)kb`Y zxzY8m9Y1Eq1Joq>I_4rbZFJ0y0!3PR^&O?HhN>$(N5T3hem0!|B&bebUy))wuX$gz z>kLQE;U@*eS`*9R0A9raNE_#&;5?K*ejU!+rzWB=e716oDXj7jfKzIn*H)8&!PaBC zt3Nl;fs&&(P%TK8RUzQZL7#IBCqhAhO$E*YdQ~+Z_JAEN^^+Um0K8p- zUTsf2%sg*Q32IG1uK$2=Y3fRh10<)~ej^eKqmGQTq7azks9I z0(B@T9_M=Y{8Kz`&x{*N0LA0>R3`IYPtfE4Tg+sYBziy*x&aLL^1^w=E#0>bW3UD>{RSLGiB=|VSe z%kX!Z+LQlj(-V#WdTZ?a$qqMAYzB}45-!|oFNT%X%wYa;=xZvh421I1KL`0BBpf!|efTB#e84GvMfG(x{irI7zoze9^BR5#D( zGz3l(?|Dor?;#3-#FCZWGi~yOs(N6{1fDH{q{^+PHc}~plctHL`C-QT&Z`8vgCFmU zxze45KpQN^JUUPF0rg}G!M9Y5pQIx2Im2;~qlJY3=TBtcBQeLkGAkl8+<`WWJxQ%U zERT77{hFd@+x9J2K9XmydI-6+YSI-hR0@`2azg}W#yPaQ-E)h1-?d!NX)nF8&@Zw; zuZ+>$XrG;{@d?ujHh!9DA`oP(%~lUXmxZ`gAHCrKpAEMEOH}vORX{+B)h?_V@TPnNf3*D+~jeogTh}aI|@ViiB z0^3%eIb7&#aM`;sah(Olq*4QBeh98Nc6{zJvmh)we2h(*J8TuHM&=E1GF zP3N-@wmxlscvh_9g>`pC8BVs0vTRIIB{T*`QN^-HpbzUM+z!a4g&*JAEvada6h6}1 zYb(DZmA|*FZ4qqm-Yc_GFKx0kflja^geC*iiG<#AgNFP#M5*}fqC3`S;P5?5s6R$k z^mvR;PP?;H+~iqeamN=cS*xqq!<3fu7Qq8<+sQHVdeHK zEKNJKemqyUtF#?H3SQFi5{X*=Fz-=Oook(<-)}P$up=~=8T7&!g*0~!a z@A~P8C9md;saE_9%R=oEWJzuY1$!S!x0x#KhlCX{T&yS%?;+A#5gzJ2hQBhA2@hqd z-Z00C`x$UUeXUEg0-w9)U%JTBV?KA5ZmY9mCY%p3a;bDl)(2J=$G}&eLATJJ+xRM? zCPZ3|ilPT&VWCOs`^4hmB1(nOx<9Y6-MObwD9o1=HbY{mRYaBPQXr}cjlw0JW7v2R zER9>SC}sEgVUWG8h~2;Cgl3o#TG&qu>=NQcJ}GeSjB9M$ej!DANFTFF&ty`t;*fHB zUT{T?u4wo5?iZUa+l{Ir)1N$B4ZRZUNA-=A+0KdN5NdqC4_gZQ#gc`BwtKh260G$g z=Qyw?O)o^Bvv7N7lywMKWk+pXLTUuNLt1%B6zR%{sOD9h~CuRt-_T?sG6 zXyz7O7`kWU~@@HCI9Z)A`pf^&QWUWkP$(VHYR&uZ7NIZJE z`+#btdyAf6Pjs`~%42S}Z%a%(W5LdLZnmd!zN3n_tSe=xw^3DW--On^+w{DZv;!y$ zl}O;kO!g~06B1ifXO*#DA&Ih+x*%{~778TF&fGt}VYnq^eaq(ioR=h{8s?*SSeuH5 zErPelLkqWINw0Bb#gFI6N64cMBYe^KcDIYd9gMoyH2DQI_DSZ#>vHz|TYGJ8-)c!9 zr$B59f8B|Ub(v8wMD&B~x%~U(MO@Vyud|7}EW=+K777peUaf|m?u;d?0?sU^L86WvBz?b3wK)s>D!mN^-F83`Z35pHHwWe2$zzq6gZTPg(HX`mqV?470O+ zszFkgQ_Os2S)8_}Ku||4e=NoOXOgfkv|Zv7dB%?gPKKKR5WlRIVwV`rb$A55=G{I4 z&wf~l8K&xh_emV&y`3~QiEvrzLb{T+|2R@!4k;+R6L}9hu#!HGsX8ti>2U7emaV~| z46RhKnB*@`X&a)p;crhMt`(?B z=3!$I=I8lJF*RCdf!5B^~4~sa!rvaEcV0^3jK%VPXlTL|iX) z>~x)y*eGo17#~W!DZb$q8g@ZrV>!2s!kJQ!Tg;_j7Cq3dakDo3F;rD@KOGbp7j)Rs0;(*;x=Jojb@}#}Mwkt<(hXyv;a^|QbW^&S7wPu97m>^v|zxG=-NrPMUl3L{`6=`J0 zIm6*DNotb}!SnRjXiP80?hg=02EF~d_%e|^t<|N$tar4G&zDi;tjgF+9q{@>?e<;l z*cE}=aCW4vM2E;a`2~&Ixz24RQ|>O0^jQ{c(YU{O4^5(K-J!o7R4~XJ5c}yHbZbXS-Z?7%#X_$n0!>l43Ho)ZN^e-#b!X z5e!R$HP&z6H%aP0Wk{?Gw=J~MJ z$KglLGWK!;N-{+5utHtzn>nAI{qdv1T~tDihk4^;aW&-vy0ZAEn58*^DkYPhatQP? zaGtDeE~GjIpEAVbQKs4AiMLNjCb~ayYcnlGZVLr3hOXBmcHG<5J}p08@EY0e+i+X- z8p)T8xraUb`q1#(N{t5r0pju*V+b@!=9$G^Dkd9zs|4lZ7@>gpUg6D7XR|ivVE_2h zvW3lEYmo*b>-mSp5@TG-2^KqNDUKgn~fY`)5m zo)qWl-#CQMnSC;F<7fb#7qd*+VV3UQPR~uRj50*qbens1Xg#a&%1jR#bXj@V?VP)a z(XtwT+V+DkjTP4J^~2;@EJ6PDFW8 zaU!IX5y16x{3tlZBPIRL0&r&E#wTJ=!0w&mWeR>2@@u z&W4J}3k*N(0V!|R=c19zz4dR^=0pX&3WHZ~x1oHSG1p8-KT@MLsibqatG^rP993OA z6xj)M5z?qzvGVZwz^jfEvTm}WOj0**;P{e_U`nzzkYC)j{yMkd)^D!J7n?NieGlt= z*0}2C`NN~G*q&D_Hz3p^_;^uBvR9q&>N$&oINIPK&bi__-mHW|V+%V`hq*@BMdL~p zAJd>g8xqJZUi@f5FG}s56!P6$dqI8&@AM^ymskQT`xtjR>D;fU;Nl1%A)qY};*o0h zRd&NYe{oloUgz3jBfe6I&!9s*38&_eS_d+6$26~?8+t)1_ zA&_Qp2oMLIydMStaj!qu##c42o~+g7w`)t3sP3AYUnJR^zC2@Eq=GGV$cwcU+lj}n(|87Vz1&S%YU+Z|4e<{4IBSN~EgqvN(=ed(;k$l}J=v@Re_T`}ArH z#i6ir{5=Q7_%r-6IOUJ|R}lQam+G>ZyR-XnL~qMbXgRP$1Oj;v(m9aCOI*k9En}c5 ze~DyKS-bso zi*AyIQuroddAJCKd=Cgcw?ov;>t6YUu^YP9zpLK1jxpKgtmfX4x>mfg z+}Jib>btZ*Q(`TPg;Av26s$aH*qj#}v^5zEv!qHAa?D`40Xwc6Y6G1Ne-lYTkvYzz zPkqgRIW$`bR4zu;*e2{|@7_edXm!m{iP_GfpOs*b1@*Q#%q8Cr@lt8)QXxwmm$TnC zOg(NN^dAR>(MgyU>Eos@d`wuHAtyM++5)Uy({s@1q)C144VO1LOOa(`Kq}V&k0w-d zyDg_^J=(75up@AKQF*16TwyF6bOz@O%fFljjh9n>J!7{6OHX0=^eby~d%mr?lunMD za$Ni(#uB3oau)@mh(I8F=wyc=Pe{w}%roY(0O zqbAk1Z}p{J9w5pK#E(!!1^j8-vuFd;{;}gQeF|7*K;uzhXskx}(%i(83OD9R%S4}n z;4as_51_p(-!ANf`Q8sw7LR^g^~rkX(2MwexA@w-P8>9{CoXOj(2HE2DBbE5HY%7D z^1bzblV8v>|K>tZm4cAz%y;QP`FNuOfis+l$K7@dlS!}Tu%JU**wjV7IkeD3_>kWE zrau}+hN?DlcQ%enypmpUk44yNO2yI;-xiR92bhdc6`@$32y(?A@gbQi4FL|#gi7vh z4s$<-hA{}&3pb8WzXlayp|!`Lg4HR=Oz1U6(wpL6)>x)LP|=WJk~;Ay#_C|sQ*zG{ z1-Bl_4=gVblya-;cy=oF8lxzXNj)_fD<~@pwJtuk#cc$Z<ywe2bQLrlb%TWwECk*sMxddp$49KiSc3i>=zzQ~xzSc&xIrp36Y`ivO zH@2!t`d_~j{5cufVC|uT&rJWlnWxZGshlGd9=K064QDg8IYzU}RQPTp#m8~g%=vEP z?`Yflu#5F)TxR4k1Mnj8_2`Zne4D?hp&FHV4<|^+8pZG;T}b3s7ACw=6zK~tu~k|o zB2K`Rd7Uif*jqJ7C7^T_Rd==?3 z!@IMkQEV~#q+#IO1~-DI0yYAhxp|jC5wu$aC9zy{*{3&iyRBRkTFd{Bupv7M=Rt)Y zV~`OTgk8UgiT-XWy?MYaxneOo+-G4a#i$|XY|c_3K8sShVm7%qQuL_yq4ST283eAA zFNWG+ArQ$huBU4$TNWxJPMgE*Fcpv6awadOOuR$G4u>Z?yXme`B*_l-%ba(;cwNMp zPcnZ9)G_o1w-qNR1~p2Wfm& zKogX7p_J4Qq1dxQ0RcuC+e;H}C}*bNGYnTjjo1B$Yiy#8X$-baMFhDB&cbspH`F&b zumuaN(n1&&3}G}1^}@*&LK8Pq!_M$(i|wbh3QfI88ZutI8x=`)tJQ_%dROU^b6K&f zTWmm-Z^E)<_qS79zN?j2y^SA;JO!;v{XF|^MQ>Ur&cuwM=6Q<`#~(*#n`)W}HY9py z6eu@h>#x0-bO``=df;t9(8G!RFKQ@RA~uy==+AqN86%-oMm-1{N!k3|g-6a(;rZ$` z8aOg%dLplB3GZfm1`V%T!m5JV(*sT2=;ZTH&=-ot_vuf`*7l>KHKfV|+#hD}^uw{rx@H1Yrl) zg9J`*|2vYO)mIk0b7;?WIhQSZXP6w1Z{VAJ|2F?%hhBvQ2)RP>EicMQgk2ZH!P{Rr zy;v4?#bFX!Q1r3&B|-Xm>PqBuk^tkX4w6WFgR%>#BL3-tBw0s5)}TC>xMjj4|3 zBo;fEXFe{TTz6pYEObM3q^ZVz7LjToiIgm8viDwyCe^k#prZJ^@-h@7L%5V{1opxw z_L%DJ=diKQ6HjzbSw=-tWJ(=cFYsVmku|&X^*3g&OAdE7WOT39Znsvu!K!MPYzID< zjXL+urqC{Q&R$}S0nH_U2NYyH$XoWCrU=P10UF^)Rjl4;)>N=hBjz@CC)c-Q8UZP8MP!u1d--5L*3m0myJd zB{U7aln<)7vb>v)QS!L)NR>mD>YsPWI9aX#=*2k=DDKZMhDy-%w<+U4o_}lb*ZWv` zUIU-+`g^CKf13Wz?EqWGYi~I#q(n+DpO=FspZjB(n{MH?r}k0Nov68s%r-1Z%b%pZ zD;y{C3dfQT;ff0nwD55n=;weq0Vh>dtXOMM#Hmy&!Kj;Mzd@TH$so3MR~FxVo^BMV zq+j-!f6rlZuD@8mh07&g^xXdFa=&xrE9u#Et`Mx@?XQ)5f9L`ZSynTWGkcN8r^y&4 zb5vK>>{>h(A5(Uh7}QM--?Wk{?LWBr*~;W{p2J-K(_7C#pRDkZo$Zp?eI@gx!j9QW z9buP6I`0nMLO*ZCz@OD%<|cf0fK-5jvuZ#Fck9Iq77mm@AL~17+Y|-@8YVe&T_>X< zCreLLPR!2hQ#u}=reEdrd3mVZw`rb1MNG>((9EILM9^25 z85ZdRS~~vbAsl{}i&)1Atdse7lnbEgHPBe{S`5!(D3SHR8`t%ES`*opSIvU>evB#* zqGVe~N>r{4d`VaC7d8FM3OLi*Pjw3?UG1Qw9M0GY&(4bF9`Ozhj=b}-XXW#iZQ!mM zRBOL>!M(thFe_6IBXHrP$L^H*UsfO@9y%GC|Ge<-l}u z&We!LvpVd>2B7<)D7HxDC+Lqm=ZX)&Aq4(9;Ld|&CGQt*MwOrj;%Px-x11Jg>)&kX z?u3)EJdAm&qESt##F8_l=$dJa;{QL6P*B}DNcrS*O6S>7P@+kZ%>E0!5&-@|DbHyr zUekqUJ%hip1hXqv6Sde&OV~s=A9xj9M1&0QgdaO z2P=E@6VDKG(Sy#rqHn{Ci$FLYP!n>!a~Zn-hXmjfKA+K^sw-bf^$NLzoyOVr3QC3C#Is&PDRJ^`?XCCx58DN}k!% zI2Jr6y;XOr+!03@Ybb6UDzW>XQPf2%fQpebYWkYfaVY6@kV0tFi`b2XEn=OC)>}yK zkPMZ&W^gViJt?3K3L+W~?geu&;2Xm+AJ#5$ji++b0L{kJ!yRfgvBzZip?s!c)|n(_tuJISOvj@-O| zEWJTPYoos zhL+eG4bJ5|V6I)%Fuisl%BMSoqv=lmQudi|o@u{~4-i^(qJ|6_lk#D~k(#O2?YV&k z`HWud%<6YAE|+G4?Z}qIbmL+(-{;HlCT{mAa(VTC_%^O&3QVZ^NPv=D0sXAAadt`( z*TxvmrhtzfoF@Lxz9^EQkEis4!Kf)QTS~4|!UeVa`;VzaC*MgMJ&^{&0?$0LvA2*Y zozT0lBDWJ4LAoG$PZOjI0Pa7h3&iOwgI?I~*C37y8dVoXObH;0uTIi+GHCtoC&OX$ z_1Z(VR3d+}b(XtKCM}UP((gkhGw^-$iumHb$87Q;oA~l!)bCUs+}&+>+XkN<#FwJ~ zt19&W=4WtP)K=`(Z;km8YuYRC9Qpf9`;{=GlL*JBqe`f-$jd@E%rS}5t+0w??$ic{ z$V8$H@yX5uicDj!l8Dmg9jon@4?*sVW69*8Djg24EFtxCVn5yMw>!{a9^^ZdH?aEr zOwQ_LXXRYS`kTH2Q4>F#jZ(CE-RJ#;#$?Z|IcYijO04I=)>qn&ZFqNVvp#uZc*+}V z&?)c;$Y0#G5dxIwR!#?{mVmN@=%g)C`?mzSKtzGNz`|`9XOwQ6qL9Mmyjv3y)luJW z9Ea0(RZZk$)BED!raW*~PuO3R;Ud&^MZ_DjNbc5TMZ$mPV z z+jmyIS$C?=A4xqZ$Gs?4h4=zvbnC`~upfHB@xEK*y%=nb*H}LJ%FR6EgZkN zMCWQM+p7ZwH6a}PjywC~qa%JUrElv5EY|zIOnSyw2oB?h#)xWHLRPn*|0IJ!@Q$Tj zKJ3V~BUJr1L3)cvgt12z8Wdirml5@|&)=C9>{+(FmGM@Ex2^y?B zdsyRM8ZvXO5{=Jt*D?SBFni@^m9ooc_T38vXqHKz=Mdbz7)47Bf{z;WFI z&fj6qkN8aH3}2Ud+fr!Jh-1H>odmd^%xh}A^WLaIVrL7pkMjmsupQ|(>S7iJW@_P- zeh#l{sHkK39Y+6iii1FxrBE$_Ai_7&X$CuWeu~2JD?~@(_8aGpWKei<+1ZCnrwtu| z>y4Zzl%M-c!}meqUS;ux$gK1mYNEdcPIGo4K1rA!w`sMmwwxwuLBp!hTcCo7g+PR+{YtkEYj_9QnL#Q zL7EqCiw)h~kbx#ApXT(CIW|>$EU_zd+P5P9K4L(Bv<`6B$0R<7+W%a zW<&Z>U6|Yk2;_eeHwlQ#F-W?{9@(H?rO;46=g)@1F|P#6Y~1gLrJc>vx61$Qv9Pq# zl)1*xeu|?GSSl+WjM^akbvq`)l^Aa&r$T@YZbl7L=E;4#1x`Zw?w!a%9;o!@`+ z19bO(I(gzB0G=$mGL%5^x$b=yTDtpX^**_ElaO0}xLLoBmJ0$4GpPqs5D5#vBiFG< z?YNcru!_FnxnLsD%zMjbM4m75jO7uhY1pC4Owzs*JJYf2;{J@gz@sHo&^Og?Fl(He zhKprs-kGckxB7U*t!=WWy*O#o^5LT&r(D#Pgf&Q9*=2^L=(R%Fy%FILhwCvGCXef4 zrSD-h$rB2p=Lq~Q95lG2*%60)Srw~M{7=ve-Q`a*1u3B$di?fTy;9pfJ1Z|i^`8w? znn5Y*mfxPVn*0(>{AmBt&sM+a>?q(v;UIo!H^E%is#+MXHwi@_sTun>oH<|duQd9jm>DJ8ck|~7x`uV4olHATa1%+5 zoFBX=c{;pTBJfuccVWo^8p2aiQ|~=xLWt%*OtBktU1SWVlJ*(Du`4f~_2rV|FkIGvqHk<~Zt| zWZ38S+Q;;O4>5Lzr$tZH^qsmKv5qMlvU|*jinA}p9P{Aj2oKMmcU`>fX!7wv|^GB}*HlqeM)>wR@eO;gdIA{WhfUZ*JE{!M0m<5gDSU z;SR()srZS(U3U_~>@0h5ZGwQH?-sr^@*HlhL#(q0F2~nFp+bGXzOW>22AF`G=QKjH z%Sn0_6ewH6m{EL{B00ZCU8^`;R*;VT)>k#XB7Io4nw|S=!l(n>UjEYheoUx8t)=S~ zq{gMwneu(Zg!8GERLw_m1Vwp0ee2DyN7q-18(NZ*q`BA2Y61c+e145wU7{V^53T&- z6|@U`X0qnGKKWzi)!5STRlmatj)`On)JZBs=WyuwV9891G7 zceUIG$vRPwaXHUrtaJCzhxu+npp8Ey`yA1HtI3f>CFcTsFNqI)C)m{R`#<0CBGTF; zy5GGo0NZo26n$oOI&(Y({$j-kBu|cw(6^CC>+vT3w4!Ao4~_q(m_*S7r32r1T+}+a zl`%COyV8_s6nKWykdQE2lST`AI1`xE5*v~;WuUFl!e<}L;pbr%b1+>ex|?KjEHDY* z`&m{KjYcD)F>a%m)zu23E|`)wJGk3M*1+~0cY5kU-ro9Hrg18yc+55WV|Tuc`bO(B zkO}S(8}Hts)nJi2d?lgZW8y2|8hLie5h zMedJ;cbnTD?*7!7k7zvuzKvmwaDJ8r#>#P@=e>GHShPu=@l`*s1A^Ukgz%J~-*H7S z(LB0lhibPLJGnj&u8w`5I=|k~R=B<|>E1l%x1PHC@$Hxs9Y|^L+}=OCF_Y)kaCE#l zvg=K4KDc`5H-5M!Ia5BSsY4x(fZ{&Ax^@mHNvuzF`mrW4G4b%7JH-0LW)Z69+CT{0 z2pnaY+$}3-W9gVI?JdWa2P`GVr1&xh)r=5nJ=kEJ5Tb%ZPga#nR9HXGUFiXBd91q%=n7 zJ6FrmzBj(uoKxg@Xl&JABqCuX{L1C<*>bP<%sE8MkVWiT)Q!^C9s?@OgZ|3$un$`# zE4Nem4aqa#E=0#;Ru^Dzb6xAU+0W)}l`IbkXZFU;PIrD#>k+oJu8<`McXy+kyJTq1 zn>w4Fru^T|xYjuDpT9mY!6^}D&tiKzkE5gVAT&V&7odXhQV)R-R!@hEK9MVZPMS6`Qf^?<#t$@^kQlt}XpdwA0^w5hG=^a$0mrxU_5v7F)NDT=kcRt+P zz0bY(xz9cKoz#M4RX@{VHp^R9pI2Gw=3kXnR2!g7z4*$Yex|Ec zr>DyWuZQZM>0IyK{t=-7H+;;d?`t8I&XZcK~N&7CHz*;s6wy<9(J$UV+iStm9w;OF?@2%)y5VUCP7HzyYN!x#d zX1Lnp=fIYh?Vb0R(&5imx|Y&6KTVv`vVpgo%|5KwUUO;gE6e_7=oIj<#*Ef|C;0Gi z4#Q3_-qdQI6x{VHvm^=6f(qal9)B#czMx9s@pGfIdUn*Jy|=-4l0HwBYt*?3eKqXL z1Djg;T`I_M21Ye(D81j^w^Bx;iqw!?!6*cx*}=UGQ&4vNBFb-qBG8Y z&Rw~Pd##H#Kr>?-HB#wx-sdHM;`UcNMI|seuUyydG^_I9hrLFilgZb$;Qqryo@F(j zOg=SBVRI)q$s|~SWJWv~1CaD$9kyPFez?}Qan62HBgN<|%HQ&CF1~(1wn#6Ki^-hS zrmpRhkWl5;DtWD_VqVQX;L|SiomA`dk7@L^RyJ2Ilx%dwE2u2Zt|rf2$~v!z~=6%=gv+!9%I>mJQEpX-;V(YMgv+TA+c=1H{guS97} zUPR{TSlze8_|k>%HO$;@ZVe)$}+R zQvIbs>8$$Av51^fN&^#_Yw~lAGHYK7QXZT`TB!zB-DV{6A|vH#^l#W|zyc6z`9P1( z=iXqy9a2C0?uu`{YpFSZcRbUiT-UnAc=r~6nwQ!w817N8tNHG5;l`H2j(QjXY2Stf zz0P94Gw(awS=x^$!{Q&iNp5V;D!y>5vny^ePUJzwR+OS?1yY07wyrRomFDhRchj18 zQSPziv#?ueAYIakA54ERW>L;1X>C!@7~egwSXfGV=u2o22@Mpaqjl=gntFt`8~GPEXm@>6PSl+mwpg$GKIE0y@OMvU7%v9!@Vb%MXIs%E zSSyomz)r-q5yAJ94!<1arlB4d84cN*b zsf{S5Ur%%|9;al!97{P;;q%12=~wCK`;A}9>{-~L)wyRsd@Zbg5U};=hUJxdudvf` z$0A!p4HNLBlImlVW<}Oh@SpQ6_GdwB!x-|EI7a82>wjez14)(x&A_USD7}3@PED#+ zQ+RHao8Z~@OeaywcwGRTxd^-?c(;9Hf?2VM~-ikjbf1>Z#)~cjS90zOn>zKu$wF<}a z-emF5O2r!zC26nvG0h0JNPDpOv9aqmPgS=-cUw?qyS`nJy-tuN&aa+6y(yKJvK2x7 zN(IJyn$lAk8CY-qjJ*6gzK-fB-Eq(<=-;Ei#pta>&0HJLswVndB3s<`a!|ZVRpIda>F+uN_p!%ayL-c=lL#bVth7UgQY9NNI|HiFX>W9J44h`!0STK zC-Xz(iMXH}IXp=JaXM>KH|jxHbV++uY^ug9JUQ7nNnS21SKh>#>pO_Lwqg7; z4eNJ2qx7y_>Y(ig9#GooC4_X1YGD29Bccj_R@2cJBTqPM(Oppot+=qXVs=3^oJ*i2 zL^3XLW0+~JJ(G6EOHL|L$|z6~QQ$B809mQ1JGFF#-$oXid9+^fcC>`Zaxcxd5k`vq z&12Pi`zO|t_>4M4m+Li}-rbGrNBu2~Dw<(}GQ0ZkMM3n|sI3l++pmU2m&6th0uy{Y z1F^R+Xooz#?3PLp+zF=YQS8AaX~VdOPU>QPU4J4?t?;SyB(KsrnXAaqgj+D~^^;%( zZc}b{qZH%x7=gT z>D?9mz{)jB;*Wv7HG+;(jO+fUsEnAGW7y^sC1l{{!hNEbeN#!3m%-F%xj}eunuty; za~DgrYFC;z#~RiktUI}$t1NII);}{7FUPEF)kvt~u#(nLJkl7vsk@I|8M}xG*fSs! zN&>l+0_JB?S4+{u=S?DOp*tG4 zaSw5W0XIL#1Y6oo`i7OYv1jsEJc=7t;@y1&}SzaW35BCOwN?D2wtmMabhAKP^0d#%ze~y+iqNuz!=iFD$g(b+J$R<_J@%|H7_YYOV=W z@!^aNaJx~GAATrcre4d`6kMJ)ex=-Enai~>DX~%te|7TKdE_bnBr3;eD=X5&ba^KM zd2rEVPphb>UcK`rUu~u0; zz1fdfkv&0oWr&T+_;N~`VT1^quFi}^i3BoG*5wN23t5TAS<{!>?M%ETl-Z^gT~gz8 zH2PG)RKU%qA;?J$<#tKVdnnrL2xAiwt8gwM#MfrM{XIXw_`pIIsxAf$_Nzbz6}w9q zpSl=zL8n$xwma@2RhJj>doN~YA=$|L_TM>}0_j5DrU((y})!{c~b(=pKq z`8z=1zG|+IFzGb<7i{%lDwb2&X4SyT`TSc&ryD`vU$UFN4ix#srW0v%IHoO)a|H;z zc^9RMjV_NCoZ(7Cd`~-#c@gBM90D{##$Sh54(aCRcRTQiA2P#YqZeH*rEq7Z^ZxzD z6S^)nt8c^NagQ5#y{zov#JW?)vY?Le?gQchKDZi<86HyFX;xX_DRsu)SEQ= z8Q^1fy1Xwu$Wt$aZj7DKeWDEGP62*0H!@O$M*lllJ+gF`zZS@4`Jxq`T)o$2ffnT{ z(>+FMV5#`|ofLt4v~(#l-3tE>VW~t_FfEP@Hq!D`#k7^;%(?QkjkrB{Q6zI}X}gPIY$%EuJK)I7OSWo#j=aHEDj+ zapNjY3_`iHd0-|@>24vV0oE;}b1P`A`O;7r5J&)}0fq$2CC*3Ogte+t7kqG1!b>D* z+wPfg)trwDS}UPtD+@c}3LB|52OH(}6VwHE&HbYG>*yzUzBcxdY;T@<&5iU>nYFsy zR#ut?+G^1^BAA9byFS&shGM^*MD4h-osV`Bn7vE#ne)hfIi(vmdg~=GG6T-@rOcm7;0RqF>7;J>%d1F_ z+1XsdE98d2%|kKO_uo`a&%(2a?Fuydjwf|pz#SW&I4iEgQuK^N{Y$N3Y&-YEygJ;iv)qKeCmYmCJ&M$sx7EMFjam^&Sd|Q3M~5! zm{uN#V?;8G835NTR!_xv10v#o@ zXgrm`BoIOj`_ogptKg9PJQJ1UMvWs#2we1QbL4$>cQl@0EO<4WU434=(J8Dw1L>3i zEZQguZJ5l3U=SO2Z)QG?j}7R(Bj6+OVdJ_=hdyJlflIV#`Ba_|5w(#eFOwgo(jN%S z334mfosxmd3Q=b7;-49#kI?A*XB^V6t3LUASi71JfAA#GIjdZ#I63#Cr$Yo z<0nc|15s~U%fn#M3aKtnCGaHUP>c&d?r^kMKrfSAPkme6g$B3P=wCI({5uSd!VPZ2 z4y5qtz<`dRxmW^|*WWR~^>&k^^*@PNq0@>R?A zp$8&#w8I&#f-?z|?fdJdTrQSmHe=Fq`SK_9X6uZDdvDv_d(~6ZwkLF#FCd@tnZib@ z!jP2AMDosyYgl^Pn5Hc&Ir6=2Nmol!&(ElEJN73*Zq$^q-e5aaq37QV%5ym{;gn`q zjFTdmGJgajy>b@#wmCSzr<6?2D1?{IOyI-{E_5o9$(ZhiQlf(TY1;NhLxh6 zrK1vf2)x6|XIUja0kkHBvp!Loc)H2I1~;EKJu56BRysXSBzk13Nfmkoq+>j(9kwQD zd)z$Cr3ySmD+kB%vezdiRw(iNNBE^q=zd}nM(S6u(;^D8*I1)q7#Y)zjTcATa=RG z&%t|?f2;c#Ro9SYvwIkR?%Zs@?`N`axVe%zp@`8VvH04B_QVQjry?n^uDSqFKDR1J zti%>8`Sfs1Ao`BgIw^6hji>tL#s>v59^1U12R-v2jb1#;ue6tR%xl6kO(NC)w^@)c zirn3B*Cyq>LdXw*6$$iA&j?1`5tpX_B*#QqCTx67U9$ zzx)HM;$Y6u@n>m7=fE#E*MK(wLnIH_i17^>`!xEm!Kz@Nq$dEE^{8xP;vS{UUX8aB ziwshy1X7ZcLPbqvG@-pSE?z?>R)fq=JR86;c@KJof*thHn6vNXk5(o(9lK5XE-sH!KGHhNcM$g zxs=q(Us_IuHiz{?4G+Dw8n%)>v%!tL8x(Z3wzP3VmtK(n-H3 zGRI6Pw?u@cmrS}e-ZJ?SmW_Wl@wsWR!M4eRu80LtzT6d&F7`Z_uE~tgACdJ2K1$%l{cP|92QP0{BtNR6nVDA)l z8yXAVDtBvG;zKu#X6V_!7`l!9B>Xe}$}7v%F5zX-lpEwl>*Yu+pCGkk|1Saj=2@D3 zENi^E(w37`0CH@@|6`&QJ8;lKESsqdF;>>I%#0OnAP2>fsULl4XTRCJFXRmZywA_FB_p7)Rf3h0@1`~DVi29zTHk!uDr(qMrP|5_I69&YtE6U zA_L)vxjvu|72x35>Nyn>(Mk_@6UN=m@Rq%O_lv1a73&9yt_F%S3;dJ%qRs0=14>a-&9ZR9>vz6yN#U?R&)a1VaggI!cOIOS~)6M|PNVqbr3%fhag`r|p!prb|f zO|Ix8)yEYs%UGzIty4Ty4q3jIMSK|w(Cm_ITpr_%)ryHk+Fv}#Z*OwFUL8AI+rdWr zEp<_jz_$O0VtD}mEQE8fpN-a08TN@4kOKg#`QvYDg*2449!g{Mn`mGMhz9Zp7eJJN zZ^r=VAmrczh(D1GWdr+t0YoOa2B-s2s^*XbrolsqX#nL~1P9-MFwzHT2;iB}1rSH! zf9|DS@I%==wjY1_W!;}Rno0if;w{#*MuMx%iukRzY+=8e~%ce#J%ie=k_8!R=nyZ7$;x{j0(^}x1q>8gzf;wX4_Pvpa4IhwnmZ|+2>Cp^MjHz{AmfNy;Ld*j6%NJHqQ$jQIu zw15ESk=fj&``8$)p!X-Sy=a_AZbH$zdea{>*OYCP?Ow8q7KaDY+l}iTS_ryG9La#+86ckjOpj-<#A{EKt ze}@f(N>j1M@Ubvs`2pZWvRAf+BASwN!>yP<`Qf!;e%9G~dul8*Ek4h@d zZQ_a>-`ni;x%RB*qe{p!C0a8ppJrI)3TH2@3n6x!**E(oL5ukNByb$66T1gGgR z0=e+Mv zR-^K(jXselRM#$all?wUb@Wtyo1&kdAM|V7N%L#Ar;hI$mxOgq(9iP#;8O)~?H z{aW?IN+R`#eTgS%Rb}9iYmqIF-@?}4ysy(_b*52XV4tZ<#bv_glJg+Jh6I{>~-FSTTuaU7dQxsfz zg79k`Ek;=b!hRw>!CVfmJb7Ahum0-(EcZ1WZOa}|d``1HYbT!$O&5$oz>qPea zEcfSS&cy!Us*XEE0jRfyStmVLluK7$)y?l~yv5k&RdU!g$;~y|D_|>qJZ2fQ3dx#a zdi(!y#X5tJ23~Srojd8oN&$F)6edM(-WUJSgqv5%8@`Vy4#FE zJm2qFG>-p@0CDtSFQhmh2}A~iGOz@}AZc9(ak&&!w!ixQZcJCGa#|6UO)A$^#r(Fo zYvDcWmzTwW$pm8a0q2s{*9`h6z9duITK<3rEP4UNPS;J*&7z>Er`q_u0XysU;D;nw?+V zDXfy;Y`!~K`PuXmNHqa9YA}QRQD*wAY|V-&DB@Of{}k(;^*PS!IG`V5Q1dH{Ykp}; zKiB5i3wy=GkUzE{Z&c#%vh->YrAv$lb==Ah-s`=XuLQtnG8(M;25(Fqg+T&58)!;g z##HXSV>>z18byAn0PSA^ZRO}Sz~DavG4I`MXGaWSm&y11N;y7D+4Ws@0yFF)h~cJC zt4(dpHKxpdbebLk>rD;lFjWK%hVo$C6)CYtGV6A6sYZE->Q@96Bvy3(`Eu}k>8w)T zN!@*voC@$lNgGKHf12WO)nRRp%`bk}ZsNU`jl4qBS_s+=v;-vQq=-Bt_94sS5|KehA+C4_e#vVDJjk~L*qTh z!2YPY&;ror_*Jesp#$+-AWsOCR)Ox=B>QGZ8KW&`NFvi=DKV}4=V(Z_-KY&-5?lf0 z>!(XTUfbBi)zK(;TI*2jvmzY$9FhZF*5cyi!G!XF^ympRWKW?5g67~ZC<)mKHAvy0 z^lh;7P}|P`Z*!rTBbJc8_JS;^7fv3_^Pg4~TddV71S+6*P@jQ6+knvdEnRwi|lG$N+G}BNNw&+1oIJERD)oN{tI?)s`FY*Fn~k!;tWF6B+8yWAjOE^Hzh!-h~oi!ORD- z!Ve9#Ek$XxK7Hd~_0URM>)qN_lOyAgQj|fvmPxHjfUR-s@JCkp`(>v+IA?N1c#0mb zRj}8Lq;fgLnhm3TEhu4CU`k7CP&-b4w>?#%F%ly;+h(?w?5T%UHk9APe6{M3 zdmxBDP;F=%j5|Q{L4z+)FJ^aU?kYdB0tdNJL86089Gzi%cvnQU*OqgvaDax5`yF_{F)sw9eGQnsTYT#B82%_TPN7~aAEs!QbV zJ@Rp~)_o9&-vMPnGXif;9(fle3yw3TQ+Fw|q2o;Gdw^(FTkSk447jheT^HE6nLl4d zWOMxZ@d)6xSgdueR?qZ5Sue9$b;P|hTKAZ4k1&q{XmOPbzP3_}#b^4XOag-7=EPP8 zWs8SGfb4AV!YmrsYMsY-Z)}qQjj0=;S0?KtbGw2oT``yG9p4xj^j_;8k(TrM$x!=n zF2H@Sa-HioNNwA)p5jA1SL?m3?39q2JRw&8rZ?%h@zD25JT9{=u=g5T`YGfw3IJi1 zF9K>pBRM>|w%)YKvJSTIVN6<)p zhoJPjfgL*Tlz842IThGvq0wSJ>G$T!fcxxszCn>(J8%E7VU-6{OsXZ%+rcYr`gi;u zV;(tid;;$0Z;6AGkKlp1-NbuYO_pvU@=6UlDU_R|OUz{@jy7RFF;W&y#5RcXAVgW| zID34wjySHo-4>B%ef-l<<6XbUO&4RUd&PbXy^nv_vc9o3uhYz@%4j9_BSI*k>y1{b zJho0$fkP*^(s_KuoIs_{I3VZw9qt}56upx*TGaZ$cWCYq0Le?b08#IG{6MC2jE}$U z%KA=UV*5!Su9UT%jyegyo;7haNF8t6Rv&XbHZw#daR*Z)_jtQZrbjo|b6N7{*0PMl zRt}E9=7Ug!su~_>-fJmjOnAvczy7k5`?H?`CSkw})8PYZlgDb|!|O%xfo=b|`Pk z3vy06=J=e<=^nvDcrS-(?$vCJM1@CPaEv9kVRek#-{-~NOju}I7rrF${&D-~ZQ6RVu`>agmwx=ZrNm_S7}f-H`8!^4u{ z?4Ep-cIX*h66>XNLoMW8!tA*lx!uEfkkvxkaz=j2vfy^~aurbL)u9~>N{}ewWFjQ` zQ^^cJC8|>aYWP(p6`Gd{`){We6k9q^p2`0++rauZuRbM1S@eZ}RvX1M@ zlF3lJ5Ak#R+{M#V@@gRS?eD4516JKBmKpi? z{PcV+bi~rry6b*~)AsC&p7ZI|Tp6*es4b4LLiOc_P&z(r+}?fhD`0W6oUbJD+ArKI zk&VK@I?c&YgF)L5GM`?Ev~UHfL6y`FW;E2yMjf^jQ(UO9WBdw2+Bz1+Lkhl$t}Eg* zBpJRGY|`A)?ZHPD#mk+mISv)}ziv)dhdb2V;1 zEHC%25y@EM_}-5?+JdU_^MABdaEMCaW$L)QSc7VZ99$M)ZU1hDJlQg3)f()hmS69p z=Ov?!d_CTxLOkLDT`Wp)R>iH#D3w_Uz_27D?_MEu?rARwj6B%1BrYL$#gV(Er?sjM zou2r!eZQj16XiP=n}(IRt!OE&VOax5IxjH?prYra(*d+&bLbyoJ8Htto&9Pc()&IR zmFBOx|InrxY}%AWy#*}R#Y)sl)Pi4$?nSIpYK)g3)zJhHdHl~l#?O#CM&;=00IC9} zcEB&?wjYa0dm}`z(=1yv{2Jun7zH7;|6H3lq_J{@cGzg7y0Zi$60MXXFN7dZ6J|Rm z@+CfZ(CFX%Yj1I&b#0T6tHW>Sardj%a@+Bs;^`pLQcnAxm%$IQ`Q?zT59=q+_q(m_ zfKf^YR_a5;0Ltum?A|v$B1Uw-U5P^vu!#`cM4efL5_wq3*&;`?Uc<#Dik`N@5u+l@ z9HVi)6Lxu7Qs^EaPeqDMs+#@?i#3w}_*}0%?$982HB|8c6gCbjzkeieOk|ZtzcBuY z!O%?ICE>`~M9G!&wJ!p);o8-@nw~N2t`6~6dd+4v97@!k8cGu@yP_YtAL{GbokREt*R?U06@pF?GE zST!4Ec3LdyN9nT*gYI-tTh$TPHS+_8;$O ztn&yRaXle^H~KrUPhA!w$MsCBgsTpYdSau;HP&A5fchQ3g_c1jFaQy3fW>12RShTy z$3~C^n*cpgh#_w}R?ns-&f>{kz$F>`OJ(a7FS4s!wWFbyu`?VVa%|7EJq9raIRqCW z9W-InRla3-JNNq<3~&f^tzas_(P`T2C@%1{pc&ww0n@a=ZDPsI0?-&o-vQ+ z0zwCy`oQ-5dZ+}lLBVv&bI+8zs7b*bJT)FN_dxBB6u=7!OL%XjSUt=lT}q=5_bc-# zY}x?`c?~pBAdC+e(-%}AoGQPm0)jk0LRr)7PU@=V0`3KYC_t`3_`(SQT7L-DzyQ|l z;N=v`Y{$^Nu{It^_K@uj3Y2)LdjaS0_T`3XHP9MhNgq7C(3{F}S|I{mGgY7$)zjc! zDq&>}#7cJq)(Y^tZy*6&Q2F;Gko9>z44?A4zI)MNyofj~443U1XkjT@1;_+MDrr#4 z!q2CnKuU?TD_c-JPo0O(?WHr|H(Ub;cw>fZH>P8vG0@q}!91tS z`>QT}&zo4Xiila!4`4!P%@3jdN0Aoo%a?As(eETGlTKxWBfiVR?C>xt!}eXVb#Jq3 zJclpC=Ec8fppYn1piT2=%su8GvuN~-K1Z>|a<>gl!!r9WB*> zi}3osvN-@B9C)D$c8$)J z+_ikX9BC?CH{TGmmHgM_8GvX(7?6X(u(Ga}N~5M%`>zW2C%4lc|DFeM1vZ~z6HcOM=*H$hz5hU24H-`W%7x%i8En)APrN^J>)$U!nY){< zK&{U-uh*AMVVlcxlYg2rnBh2>0|2Z0%&aLdcfkkG7K1?|F(EXRONp1q#{4oTKDRNg9GN83~Ge ztp6faZ%Tg{cDFD#3659qH{KF2vfO;plUQk`?)RqJ1%eq5^s)|L%rYB3iqgn^+JqLT zpcsUpNt(f)gdH2xO&2F987?$X|Etg{B(O*I0r(Tg8etjZ(A}3%;-?w#1%kRg!1_h; zDQa9#Aa%#MpxB>>Nt7%&lQxHs@|&uj2NfF`Ai*GKHVY%~_$f(DX9;Zi^ZR7CKuG>? zkV(3=C8xWP9@~)>kKk*4mTNi$AVCo0PjNzCC3bMVPh)UBJajN;oWl=*!441rNtLaI zQk{`Azz+BOcQ`l!TypRU)cr0`YlOq07{{R>*Z-OC-wFYB73ic0j{W~OJWuL=^3+@J8x;2n7Vk~sm8yLI zA%|!0ACC;|lixx9qW@H4Nn=bMFpn@k$Ed0qZ~~WUF*UE$5e9w z?;OdBZl8*zx0{l}04i5h#Zte5d|LRDFDb85Q%Bpyq%mql*B61u$Xkc;fU11IS8DJ~ zNB%JGFrO==iq<43-#}@Va*@ku>a-&o4#)IlJN%$lwOS$KwB_Gln9$IJ_-PZe&hkC_ZfXB&5hO8hnoB0u=^+7!SK2@sdAdx4NEG#4aYpr2RZks8k85rg~YWz zbfPQ?AT{YIBc{fpB1je@ta?s{W?N5Lro8TAo%5aH_J^V^YhB6KcdzAOo#qhRpZHq3 zG3_ID0r|C4g|?-&OSAM`R`tvD2Vlz({cBQ4e7;{>!-@vM61N-WSNPaP>HhbQ`r_&c z`RA_FPuqLo!N*dZi=FJ3zHViB2%bBWN*nfPUTq#4Ca;sHoQEF|7_ZH}no-e*4;jc) zGP5eGGu~PM+(w^g2(KS`2M+BWG`%qrA|9HA;RR1oMa_% zy@@^uZ=&zZ1JENgDRCC;L8V!rvi&0DEkEFP>sU1A2~UOCm~`uPo|oW3T@3{%U;fOt z_`|v6$Mzh5oJnVx&kzqmNM)*xo}WCG>rDVz-=1A)hn!xHOO`OAGT?F#mVX~%(M?L7 zuxs1suj#NATsa}DK+|v5#CiX$OvF@LxRX4QT3v@Cow8DDqj4KICAjy-5boD3zaMm$ zENDfGk_;mZX)cyiTCu78#zUIApo*)?n%y#V=Mz@`PD8Y&#qw(evf!pAF9W?1n7 z3oxy$|IN7xzq+i!2YO#YH1c~(gWNfMci$p z$IW*yUsoEpar2PAPjl(CPuETa1MT>=`tYPjlk_ipi&zg z`#$FK)%7zOE{JV9KYPJT{W*kHJ5jv34RtsSavcu9d>kD=8J+-f33c7UmDbLEP3%k8ALMYM%ihjeo3suLt}3EK^` zi+fJn-b0%a+l!n+u62QDgw`I)E7;aaY3VqohwS@2!40GtgrZZN;j?=(Dk*h(K}9KM!?;*$S*=v8XdLZs@2!_;(zR^6yOy^r!0;u4-na zSq-9i9!WO*`~$Xt7=pm1To%{5!*9b?_9yuoy(wo?swSFy#zY1{_#WFoy8K!K04pyy zP7%G*F0`(Nc;JCE^PdBWuR&`oo@+TOoP$P~jWKXiE=KuJ!$z2OaSi{yn0YuTQPR3; zVk}4Sg#9_UZm?=yE1Hnzt75TaAC z449B12K@JqbjL(@U9)fl6R zv_Bpbf0iByhZ!t`mx8ZvGv-Y|BnaSk|906yD?6a#@sJ-50HDA9I^cWfb3>6U7kZS0 zfp{rEnD7D8z|HeM%LqI02WbFoSn;^W&T=+joA!BUnFiuqoc8~VH1OQgiOYe@9#Y3X zp^2df00Bex*!FzHX(yP<_SqTuY1sKxWbZ5hHk_k8xWJ1#G+^Xq>>=SEM%XD8DNzT4Y>F_^!qO zh$(>h(2R3UEZyP|7V=*RqChw%Ba8?HbAU7$%2QVUN>wc?y~%@|h+Gc3I}pbN?}1F8 z|6mDEB;p{xFX+~a4P)RwaTj9#|9L_NmE&j`9~4Mn(+M`S%}c`LU2C&RJzsOeP@UmR zAz8|I@;&Rvej^Wn{!a;8gK)CgmqCN}vVk*Y8=P0}_BHq$NOkxNwQykaB> z1~Bc;2ccB|`tSgD6i#C70C-3*rY_e3go zSvO1t;%onnhd}tyqJF>XnCDLrx^|uI6nsvl|W6R2lgV_u19o6j%oiAhEZM=Q~lGfHyY>SGB9vc4FtH&1n0oB zK|~x=&S+7JCJe+4zQ+kKWy3@6;$pCJ|3womVV**l8Dg$0-|A$Ls&YeXH9qb*ph4GX zHq)uxqIH_m#1wd@dmL+33%rBCnaj#QxuqVz%mawO+Lj9vnQ4=C#JJ@xwL*Wh8QJ68iUr-gDx=S^-ZdthrRaGD*N0SeQiU=De%Ap zP=Q=k5cjqO5*;Kc=w`1M`2eRz0&;5j;q{`W-}76)#s7QGF1@5)kcC{Umhp0M6_L1k?W0#;fP_SebX>aQ`0^w(qv;>1j2+|jjezhEIRfv$%%}I` zZG{y8{~!p-+X>SGND6`s9HjpGCGX=r%UGoQ`oZGxIrI{kAP4m_&&FoPckdCJMRXK4 zb01z=@;0?;eT~Yl^NsLWHguSTDySSl8{+PBiU|@U#rJ`}B+zwGWpl2v;|5znFMDqruvQdmD2s#YOmi9`>gD&YhiR(M-lYN|6S~7|5YY8w0iu^#4N6oovcb;X@4u6ZqPF3Fu4Gy!!=yolMed*B? z%QC&xKr%|$9&7HE&)f=hY%z0sN6zdcALP>sLBB=Ey>-ym%Ddc-*i!lYB1^&BZuBDZI=gKiLz&X@cN zW5%5@R`))a7#D5M&M5 z<`eriz4vxJ;CVt$NG$GfjmuB`-e!|uCM#;?>fOU4O3ueG4>juTar*bki>xRL5CQ}O z=9FxYhq3Rhl{h(nRn1(F$CgW+7Paf;Jl$w1c0X<-kyvBW6i!T&+p|<%+WR78#b^>L z|Fh6}IeO0Trt`@Z znPcRie7CvsX`$XGmsOBevc(+3U~CcG*7qT-B+;Sndf||VwWU;`r%td7lJKss=lR>i zq2aix%abS2;|uV{oqqhBZn>_%gN-M9eLqR@0&Z8GQDKO|+kK~hjnAAEa^%J8{66MQ zY_F;9+={6T#vG#{?d$z6IP1s@Im0BJGP>bZ;4xV(u95_$(~JP(?3MSev4s0si;orD zs2&L#r0e%-bf}Y3jhA@X8ah|E{~jZ_<^NY;FzKDdv4*^nU%!sm52d}>-}Ri3o6#m* zbhtuP+Btoj%N_Te_fpdav47m>PQmJfKuIsfdy{?RW)Kx4qIK8&20_7p-Dz$|DnemH z4mq@W^&OGS1Sb&%6`t!hBNPfzJ2JkgXUn&Tq(`nYToiqGSaGf4h~QqzRV9L6^EE3x z_bXpHi=03%wm7y=vN|)|uM&W16Da_#ah<^+6Op35w2yUNwTjf3r?pgS&|sLg=*O64 znL!1vDbL9TfWwqvHrjiZcnCKarp}%lYBcekbQTYh?8Co)swUeUxYb@?j7=NOE3SJi z_6Cf2uP1}#`q>AHE|R21<)Jk{{dcf|XxIVl@=)G2q0X*%XLKRBd31qru4$gN4riKW zC>q!~5g%AIkTXOQsjV_nB|N~T%C6VkxPzZGF3WzqG_e+Q@1oi~t; zIILFvD73JBXtRnqO^ji2b^IDk|3H&)lju4hkVYE4yi$5IP_*kyMxasvA7HuODU9eI zH_P`BP4ZeuXK@X<(fn-6V{#~$@WY~ctHIxoE={D{pE3P!YEQTRKP$c_@Cue~UrtC9 zLivy$C9fY^6MIbwbBFY2EW^Hk8Y+g@kogoneCoWq$i^|8b$RQC zD8H$jFGk1D9A!>e&~Wfud}jY~=lMTi~w#JtNtv)SGE1f4w=D zp|(mB<3dMha!MiYM!Wp99G6yNPQ`NRwn$lBU4P?EDvZpV`v&)^Wdy;f=jyk{KF}PI|7OhqF zFtpqua8;oECNaZBRKbu)A>xvn3C|ELJi6%XY6FSAXv8>8art3^NJ`6p<7)zfHT<<>GwPfvt0nfc(+2i7=!|pe-+U0NX&x zY5C-U)%#usa(**bQ$;+p;N9$x+)ul_-{UkjYPZh=i?y_wjvBv5s@htRHM5!HVSzT) z*UE$PF4XsF%Qs5vVFOW1)Mf@JlBUx$GJ8RU+UP$1ZV3Bg2BEDKCcawu8Wi!k*TOG7 zvj|Sp2EN=sej0yqd71ZJ;B7LmOiSECc7Js}!)hk=b|>=AGpUE3&^m_RgHYoA%mPrzx`^k^@ zly=D5?)6vGmJ1p!=C#;!P5++5ju@_#)au08nqw=(!fJc{w9RYDo>AE}l{)l_lM-0~ z#YP9oIMf`4@)4`Vb7dFq2v1y#RYVG%WQjzttOvsbZ*Q&r%vMOj-wa>RqEFn>w`hk2 z?0u7ljy63&o)Sh=(z)4(G~1p<0mlZ!ZH{G%9nZVa9jO1YT_XQD>xClio#1`*1$i$w zF*S$`q({CUquZtV6fN<>QS{+Dcf*oTXwm76$i|Bx;^z$z!2!-q zizoQYRJPQj-{6y@A%vBMAfE3rgO4rIPX75)RDyoE&w3$&RRb!DkBmmUjhy)MA=;5D`6}jl6{+H$+_sNNx5cor7zb zBZ6)(9JkFCY0E!55Hw@P>cB~~=-dhN68~9Qc=EX=>P((}ifk(i21BjK72n66L$EsE z$U19V$l%M!Wu%by%@R+E%XFI4#^nd7nuEU)C+5^cF*FYZznV{R&LZBp`&ZvRu8~&n z)`=Rl$7<@K?=|PNRx0IS)mpVxa^TP>5<(9-xRUj4y%h3J{IxHG6k#LZ=~j8k##zsN zE$-tx>CnU>9i}Dap%BaHxOipA7P~Z#(3eyn&pW88bmk1_?vIjrVeY%kg7i7sRW8D> zI3Ol|7ZgQa)>tGUwT1Z&b(Sn$Uc>n}4Pilictv!NVqkjtGn3C065%wcJfyxBU-Kaj zd6l}i{h&Xv&%cjSvPK&>9@Im#!wXp91D>?uuZl}A-LycQce7fb?Dq?F>xVs+4s=zz4Vgf1LoTbOT2kY;==z5wM)Di4ExF3BI~*c&u?Ep z-ICU}&}^+Gw$Y9(j(cDNJ`6rOvQH+kcA~_x@a+QgSosD6(qEbhGXk&P6yeVPKN;%o zSOBGdrtA0fI$K4Ljo=xa{};H9J0m)=OTZWq3q(5qkjlGrL9Y)a{r&%s{=Z6%8Fbb~ zwZkT_K5kri1ryu*jm={J=-{OGk7C#%8jM1Anrbs+^G%;36XnxB#8x)8ds(L{e&rk6 zR$()6I6esLL^*PeHyqT5?>HbJ^~)EL>MPqV(wb!AKPl1ANM-9NsI&eW4K4omxV^KC zXe3|XoVc3f=kFn5w*L!z&Y)qNAv*KT3-uJp5c~GO3tes3M7|s3GKt@iIjKrF%V=JI zI(kE9=S7?sr;0bR^Q;?7j`6PB0E2&1QiG31I1wmoKT3RH-e)cjTn zrF@0Hfgd2bppt1r9ZIdc1pc-7Plc@1)oACvkzR%6>a>0;;;b)7z_u3D<iL8H8K&}UrOtOpMpLI?tqkzKdPVY88~b?k(N@fUixWOlV;vBuo%`Js zgCDmtKN2WYA`({;PbfYYqrSfwZ-lqo)C;7%k|>ZC50bJC?nAiHD4Sl&Q%t|mP>s zrn@dZx4TDQsii-gFA{0J);tt6!F1$P;Ja4EB2`a7a)}5NC8wn#MFB!9dq}bW*+lTU zmQV9E-U>*@%R8TLQ1QstVR4#nNAt!kWRI?;vpc#jvT8O+);PWHkM?)2axg_5H$+ey zKUB{Ka7w0_LY{q52}sU-T1x3x-yW}1sh#RkjaSG-K)(L!rrU^OCc(ckeG#NnZ`m= zqTsl4Xi;C>1PGjXM|ZR?p2TN{EJ**qyB%CD?j)xv;HDc-bw*b(BVYyMn!wZ}3V z_dx?Q=DQ?CNy!vHB~EL_|LHTYvUa%IiVmv}-l$uqL$Jc4BDs^z-j!F@#y0fnFw07v za|+h?^yx?8bG_sR6$Bi9l-P(s2~j74KZTQmD5Cry1h8=q9gM?vEC*=z-8ZiYu{=uE zhc`)P=o<+bK4m(QtQj{_!PDF3bazWyN4eU+4Q}o5N`dIM6%>_5V7zmC0JqH0Acy&o z5AqhJ<|5l7YZsZlHZMxo{eG@I0vAF_bhtdKq76G>xg5XU(xTC;%!MHhAt^ad%&;Q#M-smhg69xtHT3t z+>{WYqC8mkRqQ$&aWSn+b1{EMucRG}N=s8&dw<*6ffT^Ei>}T!7c{Z4@xC=}GHM!z zoM#%ypsO5$MX4X|zmE!8Bd&Z@%`M+L$!ArD&${lOure%!nldX-8!>CWm*97 zh6T?HDtpqm)~`0rwL<2Z3DTUqYm$LYG{VO%m(m+{G|sU&EPCYeJ#8nLJ)zGq*XlVW z^Ht3a#Rt+(MfSdhT8;{&4u( zYI=1+In8s<2$9YLnxJZu2_#;6WYIlsNl6oSG;XR>xb-ERk=uley?=RrGYiiaN6*a85n@)M~P_H(lLn zT?3O;eMu?P;$ZQ57T?WDXCB8F==DX7muKEJ%5u$8vAh+SY)%K8$R>#n=jin3vr8SB zJG;TYVqyxf2y}9MmIObt2YzrMwbkDN?rOJn9(J%eWYrdA5T;QgJ@~Zw3=FQihg7e^ zIwNHg4%<7)n>fapj@nNMoqCl&%z7V!6{PZU!NFo)Op%yasXi!OL5-#>Iq+ZOt3Io= zIdrZlm=~+HF>bG2fr(yGSE&eWUKD6t42s0SJ8w;=`FmAqZ+lphlH{Pu(7K^&`|tH(v&~idTJavv zIdH2gacsi$N$r^#4`LU-0{KE%sO=v!&zK|m!-8#t0_>BOYI2a6`Vi1fujG= zHOI6BA;kID#X4fBD|F|tvUK~zzp4~HkL0_5yB zV5BL&Y_k2O^5%}RqfmIaf9Ujz6+HTO06A*$XxSOTi~6_R74c=9hw(wlcxbr4r|2_p z8(nZE>gU}B&an-gJ?)0kHE$?`El{2sIt>BN4FsJ9cm$3iO} zuYl|-VT@eJ>JW4wy~5u__`w_PcIgKgn*}iJ0xre@4?Ejj|5o=;=(=8Ny#1q-b!gi~ z`=GJ?r-_E{^0XPt*3_Tx3#z5aKwisN9xmHsmUTjyN^ilq4^vRc5`U~?bMnOPo63ST z#nuPwH?x(4s>Jz(QO7R%r{Eu>Pl8$%^3MGD@-nucQqC8}D!JF};kD`V&P*9EZz5HU z5y(@#8RL=N<~DEy$|qa}+jIWr0|90E!LVqztWG7kt?AZ#_i5@S{q`PtNiO@V8xLxm zj7RHN#k(>uCi!ulM6Z}R4m?Z~qi!86nRS*&%lc*GwZrT8#`7zyPo}BIDUwQpk&{6u zX&IK$)LfFL49!AV(pu7A1A|^|ShC$1N5iFjJC76rCQI{(#-B_OnXr5ad8ONmJ9Fw{ zyT_4|{4L^zu>r|9XDV!|TVrseCG7GD0=mP2%6#0Vftic*gIx#A>%$l?rh09Z)N#0y z1H}!4e(lg{iRE#9Js+aW!rQd7j@Oi`&Bk^9icC?T_i7N4ZFj#h=KeI5jH!i9ig85z zf{B@MDGl|b6Kv=$XpeVg*mR{JEBuR-=#ubp&q8t1W_hcHdyHFtsh{*5NF@Hx(A)n0 zQN2m`>m>x);6ZTpUu;dRlitBX<-F3(nTlpaa4}41=+c+QeO#r=@sbZ8SqDZv>vf1b z%abgXC&|&PxmH?HzgpDP5Gq??c+{n?LgC9=fr>TLbA5O5iYvcBtCmgmUatbv^FPjw z5{151Fy?k`70W{)|(W3Ted<~5Fk%dW4BsSVU#Z;6kY zY){m&92?_gj4UJy?U=?r`umG}CqiepILBS}lgaj*5l7Sp?hTzwEesK?$j5fu_^;1c zC!JqWF+9H!ay7lmOpmktSD>6vT+u`DWZm0S;w_7uF7b|18uYjrSN6!_m1sRHY57UE zZ$59Z&$KP>ql;1h@<>;I*8Lm8#+#>LHV@xrh46|YyoH{hinj~tnOE^xz3(#HT}Uy_ z%}zN1u>&B3#kRE#Mqm0Gr%?LnsO9*OLP}$tyb$N0?g_}aqH%T=(qr>ZniySaDQtAL z?^6^6V!{KiF=`AsJE!5|nV~eDcRx%6Y$IPrnp|z1-G@`~?}TULl@XcuOXJHbEJ1e? zI;&Ba(cPwq%{%GEoQF?92wumY9ukD=$!{B)QD6mssm=-CRC{F>3ZaZQ<`ac!lkPMdeDMd|~r@}hWX^SU*c^emF zptrJ@Q{^n!%G1GCo(>9Vz+%E?dAVHg0A^AC&yx+b0bSqu1!|Nku3FsaCBx?F6}P&}tR8C$HeOX= zX{$1Yp&0S%I!)v}T-qm8N(J%iyq^D- zY+v#2YUd((#g@Q-cRSlcd9w0v2KG%-9y8Y}J-n{ytwcQGl1r*qcRZbpS~Ul^pckcv z$tBpST>rRy6e^LjGnA_j!!$n3ka103tdryZJ>}tDiX+!SS6Z9vRLTIqNy+D~U|e$g z-AUEg21PC(K&6KL1aCGOWP+iDRY`potd`|@Ki6wZ=%qHZUHxhc;~7rI>1OUtj4phk zb6so7sg_TR_S&mR<^rZaF`?D08)lx+Cl5#+c(9na3?Ifgy-VQl;cXy`|F=Bi=7M6s zX+q2OhMBAwT&%VqL{WRm*ShiU`B6sCbiXm42+OKin_Z$6N?C|&e8SRDZ&ND4RTDP6 z+L`g^Nbg9(Whn|}=xd5#8GeRd+*#Xg*#N%Qc@5WoysB1xHv9*sSH8w2=ai{YpY1mlKzSuSaZ((E zxW)%vBeI~4E03CDJe<9Bec9BT@jWzjzTk|{X&^s?TH#CKvP z!@0uJY?Jf+m5&&_5kYzXTMTY(f?>dSjqlRhIQ_1Y^2#bz*SNCn(}QJ33=QQHG?&pj z&Ig5yneQ?aB^NlT6%f3d)wEeX(=j*>^7jsee?Y#J$+lDHI zVNF}>g=??2y;N0AZQ0A^I{G(K+0>6vvFuTSyQGh(Te;0wr<&0?VuZ; z6w%8P>1{p^3Rzc7Ep;tKu6r*v{ z3F~kVQ6JF!GxFP4mgd12xU6d;p-Z=PsXv@>$X&BQb&tl5<^*Ud$&S?|k0V;0WN{aQ#gU*$ikTS6dUm)I}iF!?CKCPC5+?+gItEaK(?02}7 zP#;xHRUM2s4EjpgV#czOkSD)EDsw%3xy6rtPh`TM9aQTtXRPgFJD*mQ_!Xk|$ZETt zDI5)rz7-uQSC2kv_NwJ5*d$-?NNlG;&S55fBWVflxV~sVxN9HJ(i1HG# z%+;?jztWx8iyo3PPU(++D)?FX+Qr>NM4?=$u1+~nojXeEbQ_ zeinT$v+0;2`(gqw<&Y4hNqcK!6z&3?{GB0~H|d0WDnwtHJ~?og7(m| zC|JWWW9nVA5OLAEDPeY^$(gM=3+&loY-au z5<;C|Ui^@TXCD{yK@W|p&j1s4hg~e?y+T7y=h>^4HcFI!o#C?URKG(%AB}0Nv6Vbg zaAZ@$oWN`Tm5Tyvr|+v3*d_!Z|K7W;a*AY^3cbw<#|4Zhj?c4EHF6ph*?UOmdwF6G^L0c5KqNBb;Z(_piCBJ+1 zjG5ki+j6lFHXBlas%uR*(b@RZo*9K9{$6I0eFKyPrTF2Nu)&q9TEaX#{?j(x@aU&v zydCRWAg!tGs^0|NUn6+F$Y%`hT(~x|VLsYGe`?PzQ)HcPp?4;@VjolimaYTi-$v^J zO2&9UZim{W1vJ*R{F?U;tiTs!$w(!yS9@BXk}ge@jUV?Xr|UhZ=)GKk?-6qk6p zwQhg2K1^dOO&JGH&^{*mQ?UKqYdVRCbUrq`4sp({{Y{&>ZsWpaV1+QiZjmUcv}NC7 zv^zT;iXTDrZ~1I0BmfAMeI7oiKD2DrwP#ItW!f!mM*CNXI8 zq`X=If@-2zm+emqKsSIZZT>*B@UZo~$h>+Odx@0P79Mj*esx0ig~BSPZeO~Z9rJRP zc)qzfnE+|kR$n@7?=FfOli0kl%c9*hpjmzJ(~}68+@Fed9>nEv``~u`w&}zm`i16f z2M9|AN4Yb7WmSiR~gtD>scT|(x&Qti4DYW zn`^fN1d{Qkw*@5A9uabZyPaWgch12)Iz32rKWLv^wJFh+p$z*76DvB!@aZ;LZM_TJblc*Z$qe0XYV(C-^~qu`dCg!6R4s zjIv(~7@b=hc-3}x?95S46_?JZ_I6pGLe~Z;ZupQEVVpvqID@hDK#89ql_u#g8B5A2 z*-HzmgX^J}y27H@_^E?y0veSqPucrFhn0H3Jy-x-SUKmY7bkFa(D5vnTziQ>{GjQy zk_ll8{hBVdd%!Ap^Iny1sCVfYvd7%d%Tf<5d%ueHkFC?e1v~Hd63U-Tl}Bzmf0No3 zjMCd|aCj!-V6jjulPMXRBJSe@h~Hm}A4^RU3wCGB4z3AOEo}qvs+$o%6*s8LtzvqP zId#J<9-6gdfn(T=AC{oXTV_8t(PbK;soI;%nh(h7!e14^U@p#M;{_+8gOVyp%@^e- zOMwJlxO5AIrhyvvqZ;!9v=e^Cwp%zE8+W9<>r_j?!?4D-;kxN0=AL@Q9<#`iq6U7u zfU$=C=;yEY@-BQgcypx5wjj?M0BmOtBKs$5spi!Q-^>~>C2sTa))rD4d~VkF1gP)* zA3_dF*H8P{)Jf-UN6NqZ&3Ayw)q${5;-Ae2p3bs*CA#AZ2_D_M1vEi+?tjez4q$H1 zf_gfiM#yoyH8>oo|4zjLr~UB#0pn@e0AQljhm5aAd~nLO|1Jus{DjL@ONbK<>T7(} zLX*qbkmJVbtpW@}Y~hYF-qvfX+3?$=4vSpYx3Dcjg)Y@8g;q-z1y-A}Ip>xv0s10t zL1=Ap>ec?$I6L4Low)m3nDPt6a(nMuU$&od5Ox@O?Ds>zC+=mjhY<-Q&z$-{hT#50 z8xGZZRbDn-RUj_q1SwukL@5F@Y8%=3tj9-h<1c&n*H{_35e(l->u3iY%NrU3eh6?! zz-V{{erS8#=^j8wD(gbd#v=59ljjlvW+L?!w9wSo0!AHxQG}^s8D~7;8A@xn06~G1 z5Ary`woodzYhdQ9od>p!V&9*k&x}6XtJ_H89@5 z{l#f6rM)`gPfRAMzvW3L0k_B@nNUX71cJJa8G!h2T$pRbHe?~`Qosb{*uik5@4OG(wHFmRS8ng$K_YzH!=mp5E>P-mti6?&LDclT#>Rj*Zc6#omCEK%p^R=A4o6OC z0dSbU`O1xpe9-x0d6!amY<^99y%6e?RNN8gkbM8GNd>jSriI{>Vh#2lftx+Kg>DtE zlv`F95L_R|t3u3K{Ra-&jyG%4{7>5Y&1<8{X)cYkr3Bmus>ziJBvpuVe>3L_6~eeV zFANF{{qRXpHc0GWdtcdR)?#kq2v158t#vi=K1TQC)eKcNTmL?DO0v)dWNl*ekE;31 z3I7V;tPejjBFJQW^b1}yIYA=u)2rq8+}MJP2{&?E8G}vHZUv>}2(Yy0_2N$L80x+s z!)9x`lF}(pV=Awxh0fovfiG>K0}??gI?9ZDSgP9P=Qsdfylxu)#Jb1Yy)MYV*TsM7pHO;?*WN3K10Q7TP z&;VGSp}K8vZ3D8O-F*q>fDYxEJo53j&;Jv9b$9}m(9@i!>iKkYrnjlE#d$8b=+Lk? zEu2!_ggvSfbTtP=M!qNtF@LO4G&@)N8wkrh3XKp$1lw7?no3)y*gDq?^~z5ijYXN{ z5`&CA=Je~k^pdW|Q=CRsultcSRE1uDyQ_E!3MHH*)@0-^N~MQ~bqo+vP&P{ubVX+qOvD zM3Gc{Q_*TLFyMAv?w;cjc1fPE$Zdo6F6x=Rqt=>-=D#5#uKz6JVsOi)_XlE3P1YuU zVFSo^CxLD-wUQB_5SyXyzp}gbdks$PXiE(i#t$V2^^4s>Ev?MDRx;L_ABya=#%G zeX^eCiASJ=gk;{rqX5^+xuL20-SBXH7+cer{G!&o8=JM{?;QEkQ%#GgS{YaSky3A0 zOu@GlF0*;RPbHKgO;*0(r51HKj=PrJ2m9)IHd@s&#(c=->?*mhF?OH@DXV+$rfTT1hW&*$q1UrU{Rr7?ZQG4O zJ5;rzzm>Yu$*(Pq-L=RcE+900zLEO>UVJQ2$$JDIZi+>gLMtpW=fA-rQt6F%5!RdQ zZ4g79H9wcPMo7CzjG;TT>nws*I_m!=eM~BE2HpHjCwE9^rKyHBt}759yjbtT8m)FC z=Y2z>U<#xDj-#JOX}Cw0?Bfbr0Y)CW-!j+kkcu^lMs8l4Fpuvi9#yhrao`5*d-O1@ z3V@=YAq1`tP?q9LE@lycQk_nods!#e_!YG3D@#^u*76rwaafl|>H}ku?qZ`iKMH;L`6BW&e+|Pn8f0i$AV>RFK5);j_LoJ1O7WDZ6|S5o~b5 z57TTfs;>L#|LuV4|HZg2@F(9l#N5^wX)W&plV)6 z&&G7ib)ILiaRi>72tHTpdzC}j2#gO7{}k=L$6>}MVatKhTZSA@?2Xh|%Mx<+5} zlMogXy7`H5Yahs;L&}k4F1X4lO@Zj7ZDv%4il6!XFvoWKHV2v1OF-K36t7R;1v;J2 zt+H55gkxKbjx}xY;63=yJBpe^@MW`tQ2K+WK{EGtqBqAM^?y)gYr}pT#H|LN3#ZLh zx7ljyaBOE#>-ol{xEJp>Yrm#g%88gthYZE@a%?ArKOV2K+B?W4XGShI?Y@BEDPEy^ zm5WLI_o-VqZw5>%*6%N#T`?K=;VI@mG-jr$`d@#4H>ig)%L(15R!Ws~DjH|;wPdcp zZ~OwtnJH4qCXB{j=Z~lyzpiYgwGDy|}E3;$Xu;*gD933F&dh4cGNcT#u z`q6*TLLv3Oq8!`netN;;dE+r))_S?+5A>9vLZ7V$b8P33|Eo467UI!3*Dp~Nw7@{t zM-@>kn8q6OHHNrWUV&Lz(nWvc3Cspc?l zd1!LP0=21Ux7`*vXa6(&VAV{?sJX=y>hGuwR?EIOnTaN&Gb<|Mz}&#W=oxvpR#xJ+ zHZNGAZB_bgh4}{$bL*CVeT)+nO949285vv5=_bB)Iiv3VzoY6L!XVBO)~XydDNCtQ zV?S1HUbpL?o0J}0<^FrS(#LY)MQo#Hxz1AWj{I?O^*7R7Tu~u@CBFsjFhvOBH zB=Ec3rYb?m@z7iK8apDJEUSFz_Xyp^7!MNVy~4!T*%&P)=qSzyZJ}Ly^D|vHIc_zB kPC3HFK{x*k{L5t5VdhW0NDVgN0DmqUnCMqrxb^4%20L}hFaQ7m diff --git a/examples/aws_s3/main.tf b/examples/aws_s3/main.tf deleted file mode 100644 index 7a94da3..0000000 --- a/examples/aws_s3/main.tf +++ /dev/null @@ -1,21 +0,0 @@ -resource "aws_s3_bucket" "s3" { - bucket = "declarative-orchestration" -} - - -resource "kestra_flow" "uploadCsv" { - keep_original_source = true - flow_id = "upload_csv" - namespace = var.namespace - content = < - - - - - Kestra flow page - - -
-

Hello world

-
- - - commands: - - aws s3 cp {{ workingDir }}/index.html s3://kestraio/index3.html --content-type text/html - outputFiles: - - "*.html" - - - id: upload_index2 - type: io.kestra.plugin.aws.s3.Upload - bucket: kestraio - from: "{{ outputs.cli.uris['index.html'] }}" - key: index2.html - metadata: - Content-Type: text/html - region: "{{ secret('AWS_DEFAULT_REGION') }}" - accessKeyId: "{{ secret('AWS_ACCESS_KEY_ID') }}" - secretKeyId: "{{ secret('AWS_SECRET_ACCESS_KEY') }}" - - - id: upload_index - type: io.kestra.plugin.aws.s3.Upload - bucket: kestraio - from: "{{ outputs.cli.uris['index.html'] }}" - key: index.html - metadata: - Content-Type: text/html - region: "{{ secret('AWS_DEFAULT_REGION') }}" - accessKeyId: "{{ secret('AWS_ACCESS_KEY_ID') }}" - secretKeyId: "{{ secret('AWS_SECRET_ACCESS_KEY') }}" - - - id: upload_to_s3 - type: io.kestra.plugin.scripts.python.Script - disabled: true - env: - AWS_ACCESS_KEY_ID: "{{ secret('AWS_ACCESS_KEY_ID') }}" - AWS_SECRET_ACCESS_KEY: "{{ secret('AWS_SECRET_ACCESS_KEY') }}" - AWS_DEFAULT_REGION: "{{ secret('AWS_DEFAULT_REGION') }}" - taskRunner: - type: io.kestra.plugin.scripts.runner.docker.Docker - containerImage: ghcr.io/kestra-io/aws:latest - script: | - import boto3 - file = "index.html" - with open(file, "r") as file: - html_content = file.read() - - s3 = boto3.resource("s3") - s3.Bucket("kestraio").put_object(Key="index2.html", ContentType="text/html", Body=html_content) \ No newline at end of file diff --git a/examples/aws_sns/smsEverySunday.yml b/examples/aws_sns/smsEverySunday.yml deleted file mode 100644 index 097fb11..0000000 --- a/examples/aws_sns/smsEverySunday.yml +++ /dev/null @@ -1,25 +0,0 @@ -id: sendSMS -namespace: blueprint -description: | - This flow sends an SMS message to a phone number using AWS SNS. - The phone number must be registered in AWS SNS. - The AWS credentials must be provided in the environment variables. - The AWS SNS topic ARN must be provided in the inputs. - The SMS text must be provided in the inputs. -inputs: - - name: smsText - type: STRING - defaults: "Hello from Kestra and AWS SNS!" - - name: topicArn - type: STRING - defaults: arn:aws:sns:eu-central-1:338306982838:kestra -tasks: - - id: sendSMS - type: io.kestra.plugin.aws.sns.Publish - region: "{{ secret('AWS_DEFAULT_REGION') }}" - accessKeyId: "{{ secret('AWS_ACCESS_KEY_ID') }}" - secretKeyId: "{{ secret('AWS_SECRET_ACCESS_KEY') }}" - topicArn: "{{inputs.topicArn}}" - from: - data: | - {{inputs.smsText}} diff --git a/examples/aws_sns/sns.tf b/examples/aws_sns/sns.tf deleted file mode 100644 index e87704a..0000000 --- a/examples/aws_sns/sns.tf +++ /dev/null @@ -1,67 +0,0 @@ -terraform { - required_providers { - aws = { - source = "hashicorp/aws" - version = "~> 4.0" - } - - kestra = { - source = "kestra-io/kestra" # namespace of Kestra provider - version = "~> 0.7.0" # don't worry about 0.7.0 being displayed here - the provider works across the latest version as well - } - - } -} - -variable "region" { - default = "eu-central-1" -} - -variable "namespace" { - default = "prod" -} - -variable "phone_number" { - type = string -} - -provider "aws" { - region = var.region - profile = "default" -} - -provider "kestra" { - url = "http://localhost:8080" -} - -resource "aws_sns_topic" "topic" { - name = "kestra" - tags = { - project = "kestra" - } -} - -resource "aws_sns_topic_subscription" "sms" { - endpoint = var.phone_number - protocol = "sms" - topic_arn = aws_sns_topic.topic.arn -} - -resource "kestra_flow" "snsSendSMS" { - keep_original_source = true - flow_id = "snsSendSMS" - namespace = var.namespace - content = < 10 - ORDER BY avg_salary DESC; - store: true - - - id: exportResultsToCsv - type: io.kestra.plugin.serdes.csv.CsvWriter - from: "{{ outputs.average_salary_by_position.uri }}" \ No newline at end of file diff --git a/examples/duckdb/duckDbQueryToCsv.yml b/examples/duckdb/duckDbQueryToCsv.yml deleted file mode 100644 index 6d9168f..0000000 --- a/examples/duckdb/duckDbQueryToCsv.yml +++ /dev/null @@ -1,26 +0,0 @@ -id: duckDbQueryToCsv -namespace: prod -description: Analyse salary data - -tasks: - - id: download_csv - type: io.kestra.plugin.fs.http.Download - description: Data Job salaries from 2020 to 2023 (source ai-jobs.net) - uri: https://gist.githubusercontent.com/Ben8t/f182c57f4f71f350a54c65501d30687e/raw/940654a8ef6010560a44ad4ff1d7b24c708ebad4/salary-data.csv - - - id: average_salary_by_position - type: io.kestra.plugin.jdbc.duckdb.Query - inputFiles: - data.csv: "{{ outputs.download_csv.uri }}" - sql: | - SELECT - job_title, - ROUND(AVG(salary),2) AS avg_salary - FROM read_csv_auto('{{workingDir}}/data.csv', header=True) - GROUP BY job_title - HAVING COUNT(job_title) > 10 - ORDER BY avg_salary DESC; - fetch: true - - id: export_result - type: io.kestra.plugin.serdes.csv.CsvWriter - from: "{{ outputs.average_salary_by_position.uri }}" diff --git a/examples/duckdb/salesReport.yml b/examples/duckdb/salesReport.yml deleted file mode 100644 index 61e2468..0000000 --- a/examples/duckdb/salesReport.yml +++ /dev/null @@ -1,33 +0,0 @@ -id: salesReport -namespace: prod -tasks: - - id: downloadCsv - type: io.kestra.plugin.fs.http.Download - uri: https://gist.githubusercontent.com/anna-geller/15f19626d975877b40c3653b6745dcd6/raw/849e8f69a251ece8bfb32dbd6097e69af6fa7f7f/orders.csv - - - id: analyzeSales - type: io.kestra.plugin.jdbc.duckdb.Query - inputFiles: - data.csv: "{{outputs.downloadCsv.uri}}" - sql: | - SELECT sum(total) as total, avg(quantity) as avg_quantity - FROM read_csv_auto('{{workingDir}}/data.csv', header=True); - fetch: true - - - id: slack - type: io.kestra.plugin.notifications.slack.SlackIncomingWebhook - url: "{{envs.slack_webhook_reporting}}" - payload: | - {"channel": "#reporting", - "text": "Current sales numbers: total sales is `${{outputs.analyzeSales.rows[0].total}}` and average sales quantity is `{{outputs.analyzeSales.rows[0].avg_quantity}}`"} - -triggers: - - id: runAfterDWHRefresh - type: io.kestra.core.models.triggers.types.Flow - conditions: - - type: io.kestra.core.models.conditions.types.ExecutionStatusCondition - in: - - SUCCESS - - type: io.kestra.core.models.conditions.types.ExecutionFlowCondition - namespace: prod - flowId: dataWarehouseRefresh \ No newline at end of file diff --git a/examples/fivetran/fivetranDbtCloud.yml b/examples/fivetran/fivetranDbtCloud.yml deleted file mode 100644 index e1eb41b..0000000 --- a/examples/fivetran/fivetranDbtCloud.yml +++ /dev/null @@ -1,30 +0,0 @@ -id: fivetranDbtCloud -namespace: blueprints -description: | - -tasks: - - id: fivetran-syncs - type: io.kestra.plugin.core.flow.Parallel - tasks: - - id: salesforce - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: "enterYourFivetranConnectorId" - - id: google-analytics - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: "enterYourFivetranConnectorId" - - id: facebook - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: "enterYourFivetranConnectorId" - - - id: dbt-cloud-job - type: io.kestra.plugin.dbt.cloud.TriggerRun - jobId: "396284" - accountId: "{{secret('DBT_CLOUD_ACCOUNT_ID')}}" - token: "{{envs.dbt_cloud_api_token}}" - wait: true - -pluginDefaults: - - type: io.kestra.plugin.fivetran.connectors.Sync - values: - apiKey: "{{secret('FIVETRAN_API_KEY')}}" - apiSecret: "{{secret('FIVETRAN_API_SECRET')}}" diff --git a/examples/fivetran/fivetranSync.yml b/examples/fivetran/fivetranSync.yml deleted file mode 100644 index b585ca3..0000000 --- a/examples/fivetran/fivetranSync.yml +++ /dev/null @@ -1,8 +0,0 @@ -id: fivetranSync -namespace: prod -tasks: - - id: dataIngestionSync - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: vesicle_movement - apiKey: "{{secret('FIVETRAN_API_KEY')}}" - apiSecret: "{{secret('FIVETRAN_API_SECRET')}}" diff --git a/examples/fivetran/fivetranSyncParallel.yml b/examples/fivetran/fivetranSyncParallel.yml deleted file mode 100644 index c12f9a6..0000000 --- a/examples/fivetran/fivetranSyncParallel.yml +++ /dev/null @@ -1,31 +0,0 @@ -id: fivetran_sync_parallel -namespace: company.team - -tasks: - - id: marketing_extract_load - type: io.kestra.plugin.core.flow.WorkingDirectory - tasks: - - id: data_ingestion - type: io.kestra.plugin.core.flow.Parallel - tasks: - - id: salesforce - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: vesicle_movement - - - id: stripe - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: cell_delivery - - - id: google_analytics - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: equivocal_sandy - - - id: facebook_ads - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: molecule_transport - -pluginDefaults: - - type: io.kestra.plugin.fivetran.connectors.Sync - values: - apiKey: "{{ secret('FIVETRAN_API_KEY') }}" - apiSecret: "{{ secret('FIVETRAN_API_SECRET') }}" diff --git a/examples/fivetran/fivetranSyncParallelDbtCloud.yml b/examples/fivetran/fivetranSyncParallelDbtCloud.yml deleted file mode 100644 index 0b09382..0000000 --- a/examples/fivetran/fivetranSyncParallelDbtCloud.yml +++ /dev/null @@ -1,38 +0,0 @@ -id: fivetran_sync_parallel_dbt_cloud -namespace: company.team - -tasks: - - id: marketing_extract_load - type: io.kestra.plugin.core.flow.WorkingDirectory - tasks: - - id: data_ingestion - type: io.kestra.plugin.core.flow.Parallel - tasks: - - id: salesforce - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: vesicle_movement - - - id: stripe - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: cell_delivery - - - id: google-analytics - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: equivocal_sandy - - - id: facebook-ads - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: molecule_transport - - - id: dbt_cloud_job - type: io.kestra.plugin.dbt.cloud.TriggerRun - accountId: "{{ secret('DBT_CLOUD_ACCOUNT_ID') }}" - token: "{{ secret('DBT_CLOUD_API_TOKEN') }}" - jobId: "396284" - wait: true - -pluginDefaults: - - type: io.kestra.plugin.fivetran.connectors.Sync - values: - apiKey: "{{ secret('FIVETRAN_API_KEY') }}" - apiSecret: "{{ secret('FIVETRAN_API_SECRET') }}" diff --git a/examples/fivetran/fivetranSyncParallelDbtCore.yml b/examples/fivetran/fivetranSyncParallelDbtCore.yml deleted file mode 100644 index b56a103..0000000 --- a/examples/fivetran/fivetranSyncParallelDbtCore.yml +++ /dev/null @@ -1,73 +0,0 @@ -id: fivetran_sync_parallel_dbt_core -namespace: company.team -description: | - This flow runs Fivetran syncs in parallel and then runs dbt core's CLI commands. - The Fivetran API credentials, referenced in the `pluginDefaults`, must be provided in the environment variables. - -tasks: - - id: data_ingestion - type: io.kestra.plugin.core.flow.Parallel - tasks: - - id: salesforce - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: vehicle_movement - - - id: stripe - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: cell_delivery - - - id: google_analytics - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: equivocal_sandy - - - id: facebook_ads - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: molecule_transport - - - id: dbt_core - type: io.kestra.plugin.core.flow.WorkingDirectory - tasks: - - id: clone_repository - type: io.kestra.plugin.git.Clone - url: https://github.com/dbt-labs/jaffle_shop - branch: main - - - id: dbt_setup - type: io.kestra.plugin.dbt.cli.Setup - profiles: - jaffle_shop: - outputs: - dev: - type: bigquery - dataset: dwh - fixed_retries: 1 - keyfile: sa.json - location: EU - method: service-account - priority: interactive - project: geller - threads: 8 - timeout_seconds: 300 - target: dev - requirements: - - dbt-bigquery - taskRunner: - type: io.kestra.plugin.scripts.runner.docker.Docker - containerImage: python:3.10-slim - inputFiles: - sa.json: "{{ secret('GCP_SERVICE_ACCOUNT_JSON') }}" - - - id: dbt_build - type: io.kestra.plugin.dbt.cli.Build - debug: false - taskRunner: - type: io.kestra.plugin.scripts.runner.docker.Docker - containerImage: python:3.10-slim - inputFiles: - sa.json: "{{ secret('GCP_SERVICE_ACCOUNT_JSON') }}" - -pluginDefaults: - - type: io.kestra.plugin.fivetran.connectors.Sync - values: - apiKey: "{{ secret('FIVETRAN_API_KEY') }}" - apiSecret: "{{ secret('FIVETRAN_API_SECRET') }}" diff --git a/examples/flows/analyzeSales.yml b/examples/flows/analyzeSales.yml deleted file mode 100644 index 07559f6..0000000 --- a/examples/flows/analyzeSales.yml +++ /dev/null @@ -1,26 +0,0 @@ -id: analyzeSales -namespace: prod -tasks: - - id: downloadCSV - type: io.kestra.plugin.fs.http.Download - uri: https://raw.githubusercontent.com/kestra-io/datasets/main/csv/orders.csv - - - id: analyzeSales - type: io.kestra.core.tasks.scripts.Python - runner: DOCKER - dockerOptions: - image: ghcr.io/kestra-io/pydata:latest - inputFiles: - data.csv: "{{outputs.downloadCSV.uri}}" - main.py: | - import pandas as pd - from kestra import Kestra - - df = pd.read_csv("data.csv") - sales = df.total.sum() - med = df.quantity.median() - - Kestra.outputs({"total_sales": sales, "median_quantity": med}) - - top_sellers = df.sort_values(by="total", ascending=False).head(3) - print(f"Top 3 orders: {top_sellers}") diff --git a/examples/flows/core/null.yml b/examples/flows/core/null.yml deleted file mode 100644 index f1bede1..0000000 --- a/examples/flows/core/null.yml +++ /dev/null @@ -1,22 +0,0 @@ -id: null_input -namespace: blueprint -description: | - This flow takes an optional input parameter. It then executes subsequent tasks based on whether the input was provided or not. - -inputs: - - name: parameter - type: STRING - required: false - -tasks: - - id: if - type: io.kestra.core.tasks.flows.If - condition: "{{inputs.customInput ?? false }}" - then: - - id: if-not-null - type: io.kestra.plugin.core.log.Log - message: Received input {{inputs.parameter}} - else: - - id: if-null - type: io.kestra.plugin.core.log.Log - message: No input provided \ No newline at end of file diff --git a/examples/flows/core/parallelPython.yml b/examples/flows/core/parallelPython.yml deleted file mode 100644 index 3a98c70..0000000 --- a/examples/flows/core/parallelPython.yml +++ /dev/null @@ -1,69 +0,0 @@ -id: parallel_python -namespace: company.team - -tasks: - - id: working_dir - type: io.kestra.plugin.core.flow.WorkingDirectory - tasks: - - id: py - type: io.kestra.plugin.scripts.python.Script - taskRunner: - type: io.kestra.plugin.scripts.runner.docker.Docker - containerImage: ghcr.io/kestra-io/pydata:latest - script: | - import pandas as pd - import numpy as np - from faker import Faker - import random - import os - - # Init faker - fake = Faker() - - # Define product-price mapping - product_price = {i: random.randint(10, 500) for i in range(1, 21)} - - # Define function to create monthly order data - def create_monthly_data(month, year=2023): - num_orders = random.randint(100, 1000) # Randomize number of orders - - # Generate random data - order_ids = range(1, num_orders + 1) - customer_names = [fake.name() for _ in range(num_orders)] - customer_emails = [fake.email() for _ in range(num_orders)] - product_ids = np.random.choice(list(product_price.keys()), size=num_orders) - prices = [product_price[pid] for pid in product_ids] - quantities = np.random.randint(1, 10, size=num_orders) - totals = np.multiply(prices, quantities) - - # Create dataframe - df = pd.DataFrame({ - 'order_id': order_ids, - 'customer_name': customer_names, - 'customer_email': customer_emails, - 'product_id': product_ids, - 'price': prices, - 'quantity': quantities, - 'total': totals - }) - - # Convert month to string with zero padding - month_str = str(month).zfill(2) - - # Save to csv - df.to_csv(f'orders_{year}_{month_str}.csv', index=False) - - # Create monthly data for each month in 2023 - for month in range(1, 13): - create_monthly_data(month) - outputFiles: - - "*.csv" - - - id: each - type: io.kestra.plugin.core.flow.ForEach - concurrencyLimit: 0 - values: "{{ outputs.py.uris | jq('.[]') }}" - tasks: - - id: path - type: io.kestra.core.tasks.debugs.Return - format: "{{ taskrun.value }}" diff --git a/examples/flows/core/parallelSequences.yml b/examples/flows/core/parallelSequences.yml deleted file mode 100644 index 48b2a11..0000000 --- a/examples/flows/core/parallelSequences.yml +++ /dev/null @@ -1,26 +0,0 @@ -id: parallelSequences -namespace: prod.demo -description: Run two sequences in parallel -tasks: - - id: parallel - type: io.kestra.plugin.core.flow.Parallel - tasks: - - id: sequence1 - type: io.kestra.core.tasks.flows.Sequential - tasks: - - id: task1 - type: io.kestra.core.tasks.debugs.Return - format: "{{ task.id }}" - - id: task2 - type: io.kestra.core.tasks.debugs.Return - format: "{{ task.id }}" - - - id: sequence2 - type: io.kestra.core.tasks.flows.Sequential - tasks: - - id: task3 - type: io.kestra.core.tasks.debugs.Return - format: "{{ task.id }}" - - id: task4 - type: io.kestra.core.tasks.debugs.Return - format: "{{ task.id }}" \ No newline at end of file diff --git a/examples/flows/core/retries.yml b/examples/flows/core/retries.yml deleted file mode 100644 index 65101f9..0000000 --- a/examples/flows/core/retries.yml +++ /dev/null @@ -1,19 +0,0 @@ -id: retries -namespace: blueprint - -tasks: - - id: failed - type: io.kestra.core.tasks.scripts.Bash - commands: - - 'if [ "{{taskrun.attemptsCount}}" -eq 4 ]; then exit 0; else exit 1; fi' - retry: - type: constant - interval: PT0.25S - maxAttempt: 5 - maxDuration: PT1M - warningOnRetry: true - -errors: - - id: never-happen - type: io.kestra.core.tasks.debugs.Echo - format: Never happened {{task.id}} \ No newline at end of file diff --git a/examples/flows/core/switch.yml b/examples/flows/core/switch.yml deleted file mode 100644 index 66edc64..0000000 --- a/examples/flows/core/switch.yml +++ /dev/null @@ -1,24 +0,0 @@ -id: switch -namespace: prod.staging -description: | - **Switch tasks depending on a specific value** -inputs: - - name: string - type: STRING -tasks: - - id: switch - type: io.kestra.core.tasks.flows.Switch - value: "{{ inputs.string }}" - cases: - A: - - id: a - type: io.kestra.core.tasks.debugs.Return - format: "The input is {{ inputs.string }}" - B: - - id: b - type: io.kestra.core.tasks.debugs.Return - format: "The input is {{ inputs.string }}" - defaults: - - id: default - type: io.kestra.core.tasks.debugs.Return - format: "This is the default case" diff --git a/examples/flows/core/terraform_ci_cd.tf b/examples/flows/core/terraform_ci_cd.tf deleted file mode 100644 index 31c102a..0000000 --- a/examples/flows/core/terraform_ci_cd.tf +++ /dev/null @@ -1,20 +0,0 @@ -terraform { - required_providers { - kestra = { - source = "kestra-io/kestra" - version = "~> 0.7.0" - } - } -} - -provider "kestra" { - url = "http://localhost:8080" -} - -resource "kestra_flow" "flows" { - keep_original_source = true - for_each = fileset(path.module, "flows/*.yml") - flow_id = yamldecode(templatefile(each.value, {}))["id"] - namespace = yamldecode(templatefile(each.value, {}))["namespace"] - content = templatefile(each.value, {}) -} \ No newline at end of file diff --git a/examples/flows/ee_executions.yml b/examples/flows/ee_executions.yml deleted file mode 100644 index 0b1bd03..0000000 --- a/examples/flows/ee_executions.yml +++ /dev/null @@ -1,85 +0,0 @@ -id: executions-to-bigquery -namespace: prod - -tasks: - - id: consume - type: io.kestra.plugin.kafka.Consume - properties: - auto.offset.reset: earliest - bootstrap.servers: prd-kafka.database.svc.cluster.local:9092 - topic: kestra_execution - valueDeserializer: JSON - maxRecords: 100 - - - id: transform - type: io.kestra.plugin.scripts.nashorn.FileTransform - from: "{{ outputs.consume.uri }}" - script: | - var jacksonMapper = Java.type('io.kestra.core.serializers.JacksonMapper'); - delete row['headers']; - - var value = row['value'] - - row['id'] = value['id'] - row['originalId'] = value['originalId'] - row['deleted'] = value['deleted'] - row['namespace'] = value['namespace'] - row['flowId'] = value['flowId'] - row['flowRevision'] = value['flowRevision'] - row['taskRunList'] = value['taskRunList'] - row['variables'] = value['variables'] - row['state'] = value['state']['current'] - row['state_history'] = value['state']['histories'] - row['duration'] = value['state']['duration'] - row['startDate'] = value['state']['startDate'] - row['endDate'] = value['state']['endDate'] - row['trigger_id'] = value['trigger']['id'] - row['trigger_type'] = value['trigger']['type'] - row['trigger_variables'] = value['trigger']['variables'] - row['value'] = jacksonMapper.ofJson().writeValueAsString(value) - - - id: avroWriter - type: io.kestra.plugin.serdes.avro.AvroWriter - from: "{{ outputs.transform.uri }}" - description: convert the file from Kestra internal storage to avro. - schema: | - { - "type": "record", - "name": "Root", - "fields": - [ - { "name": "id", "type": ["null", "string"] }, - { "name": "originalId", "type": ["null", "string"] }, - { "name": "deleted", "type": ["null", "string"] }, - { "name": "namespace", "type": ["null", "string"] }, - { "name": "flowId", "type": ["null", "string"] }, - { "name": "flowRevision", "type": ["null", "string"] }, - { "name": "taskRunList", "type": ["null", "string"] }, - { "name": "variables", "type": ["null", "string"] }, - { "name": "state", "type": ["null", "string"] }, - { "name": "state_history", "type": ["null", "string"] }, - { "name": "duration", "type": ["null", "double"] }, - { "name": "startDate", "type": ["null", "string"] }, - { "name": "endDate", "type": ["null", "string"] }, - { "name": "trigger_id", "type": ["null", "string"] }, - { "name": "trigger_type", "type": ["null", "string"] }, - { "name": "trigger_variables", "type": ["null", "string"] }, - { "name": "value", "type": ["null", "string"] } - ] - } - - - id: load - type: io.kestra.plugin.gcp.bigquery.Load - avroOptions: - useAvroLogicalTypes: true - destinationTable: geller.dwh.executions - format: AVRO - from: "{{outputs.avroWriter.uri }}" - writeDisposition: WRITE_TRUNCATE - serviceAccount: "{{ secret('GCP_CREDS') }}" - projectId: geller - -triggers: - - id: schedule - type: io.kestra.plugin.core.trigger.Schedule - cron: "0 11 * * *" \ No newline at end of file diff --git a/examples/flows/ee_stream_auditlogs_to_bq.yml b/examples/flows/ee_stream_auditlogs_to_bq.yml deleted file mode 100644 index 11d5ce9..0000000 --- a/examples/flows/ee_stream_auditlogs_to_bq.yml +++ /dev/null @@ -1,83 +0,0 @@ -id: auditlogs_to_bigquery -namespace: prod - -title: Stream kestra audit logs to BigQuery -description: | - This flow shows how to stream Kestra audit logs to BigQuery. The audit logs are stored in the `kestra_auditlogs` Kafka topic. The flow consumes the audit logs from the Kafka topic, transforms the data, and loads it into BigQuery. - - The flow is triggered every day at 10 AM UTC. You can customize the trigger by changing the cron expression, timezone and more. For more information about cron expressions, visit the [following documentation](https://kestra.io/docs/developer-guide/triggers/schedule). - -tasks: - - id: consume - type: io.kestra.plugin.kafka.Consume - properties: - auto.offset.reset: earliest - bootstrap.servers: prd-kafka.database.svc.cluster.local:9092 - topic: kestra_auditlogs - valueDeserializer: JSON - maxRecords: 100 - - - id: transform - type: io.kestra.plugin.scripts.nashorn.FileTransform - from: "{{ outputs.consume.uri }}" - script: | - var jacksonMapper = Java.type('io.kestra.core.serializers.JacksonMapper'); - delete row['headers']; - - var value = row['value'] - - row['id'] = value['id'] - row['type'] = value['type'] - row['detail'] = value['detail'] - row['date'] = value['date'] - row['deleted'] = value['deleted'] - row['value'] = jacksonMapper.ofJson().writeValueAsString(value) - row['detail_type'] = value['detail']['type'] - row['detail_cls'] = value['detail']['cls'] - row['detail_permission'] = value['detail']['permission'] - row['detail_id'] = value['detail']['id'] - row['detail_namespace'] = value['detail']['namespace'] - row['detail_flowId'] = value['detail']['flowId'] - row['detail_executionId'] = value['detail']['executionId'] - - - id: avroWriter - type: io.kestra.plugin.serdes.avro.AvroWriter - from: "{{ outputs.transform.uri }}" - description: convert the file from Kestra internal storage to avro. - schema: | - { - "type": "record", - "name": "Root", - "fields": - [ - { "name": "id", "type": ["null", "string"] }, - { "name": "type", "type": ["null", "string"] }, - { "name": "detail", "type": ["null", "string"] }, - { "name": "date", "type": ["null", "string"] }, - { "name": "deleted", "type": ["null", "string"] }, - { "name": "value", "type": ["null", "string"] }, - { "name": "detail_type", "type": ["null", "string"] }, - { "name": "detail_cls", "type": ["null", "string"] }, - { "name": "detail_permission", "type": ["null", "string"] }, - { "name": "detail_id", "type": ["null", "string"] }, - { "name": "detail_namespace", "type": ["null", "string"] }, - { "name": "detail_flowId", "type": ["null", "string"] }, - { "name": "detail_executionId", "type": ["null", "string"] } - ] - } - - - id: load - type: io.kestra.plugin.gcp.bigquery.Load - avroOptions: - useAvroLogicalTypes: true - destinationTable: geller.dwh.autditlogs - format: AVRO - from: "{{outputs.avroWriter.uri }}" - writeDisposition: WRITE_TRUNCATE - serviceAccount: "{{ secret('GCP_CREDS') }}" - projectId: geller - -triggers: - - id: schedule - type: io.kestra.plugin.core.trigger.Schedule - cron: "0 10 * * *" diff --git a/examples/flows/ee_stream_executions_to_bq.yml b/examples/flows/ee_stream_executions_to_bq.yml deleted file mode 100644 index 0b1bd03..0000000 --- a/examples/flows/ee_stream_executions_to_bq.yml +++ /dev/null @@ -1,85 +0,0 @@ -id: executions-to-bigquery -namespace: prod - -tasks: - - id: consume - type: io.kestra.plugin.kafka.Consume - properties: - auto.offset.reset: earliest - bootstrap.servers: prd-kafka.database.svc.cluster.local:9092 - topic: kestra_execution - valueDeserializer: JSON - maxRecords: 100 - - - id: transform - type: io.kestra.plugin.scripts.nashorn.FileTransform - from: "{{ outputs.consume.uri }}" - script: | - var jacksonMapper = Java.type('io.kestra.core.serializers.JacksonMapper'); - delete row['headers']; - - var value = row['value'] - - row['id'] = value['id'] - row['originalId'] = value['originalId'] - row['deleted'] = value['deleted'] - row['namespace'] = value['namespace'] - row['flowId'] = value['flowId'] - row['flowRevision'] = value['flowRevision'] - row['taskRunList'] = value['taskRunList'] - row['variables'] = value['variables'] - row['state'] = value['state']['current'] - row['state_history'] = value['state']['histories'] - row['duration'] = value['state']['duration'] - row['startDate'] = value['state']['startDate'] - row['endDate'] = value['state']['endDate'] - row['trigger_id'] = value['trigger']['id'] - row['trigger_type'] = value['trigger']['type'] - row['trigger_variables'] = value['trigger']['variables'] - row['value'] = jacksonMapper.ofJson().writeValueAsString(value) - - - id: avroWriter - type: io.kestra.plugin.serdes.avro.AvroWriter - from: "{{ outputs.transform.uri }}" - description: convert the file from Kestra internal storage to avro. - schema: | - { - "type": "record", - "name": "Root", - "fields": - [ - { "name": "id", "type": ["null", "string"] }, - { "name": "originalId", "type": ["null", "string"] }, - { "name": "deleted", "type": ["null", "string"] }, - { "name": "namespace", "type": ["null", "string"] }, - { "name": "flowId", "type": ["null", "string"] }, - { "name": "flowRevision", "type": ["null", "string"] }, - { "name": "taskRunList", "type": ["null", "string"] }, - { "name": "variables", "type": ["null", "string"] }, - { "name": "state", "type": ["null", "string"] }, - { "name": "state_history", "type": ["null", "string"] }, - { "name": "duration", "type": ["null", "double"] }, - { "name": "startDate", "type": ["null", "string"] }, - { "name": "endDate", "type": ["null", "string"] }, - { "name": "trigger_id", "type": ["null", "string"] }, - { "name": "trigger_type", "type": ["null", "string"] }, - { "name": "trigger_variables", "type": ["null", "string"] }, - { "name": "value", "type": ["null", "string"] } - ] - } - - - id: load - type: io.kestra.plugin.gcp.bigquery.Load - avroOptions: - useAvroLogicalTypes: true - destinationTable: geller.dwh.executions - format: AVRO - from: "{{outputs.avroWriter.uri }}" - writeDisposition: WRITE_TRUNCATE - serviceAccount: "{{ secret('GCP_CREDS') }}" - projectId: geller - -triggers: - - id: schedule - type: io.kestra.plugin.core.trigger.Schedule - cron: "0 11 * * *" \ No newline at end of file diff --git a/examples/flows/ee_stream_flows_to_bq.yml b/examples/flows/ee_stream_flows_to_bq.yml deleted file mode 100644 index 1176127..0000000 --- a/examples/flows/ee_stream_flows_to_bq.yml +++ /dev/null @@ -1,60 +0,0 @@ -id: flows-to-bigquery -namespace: prod - -tasks: - - id: consume - type: io.kestra.plugin.kafka.Consume - properties: - auto.offset.reset: earliest - bootstrap.servers: prd-kafka.database.svc.cluster.local:9092 - topic: kestra_flow - valueDeserializer: JSON - - - id: transform - type: io.kestra.plugin.scripts.nashorn.FileTransform - from: "{{ outputs.consume.uri }}" - script: | - var jacksonMapper = Java.type('io.kestra.core.serializers.JacksonMapper'); - delete row['headers']; - - var value = row['value'] - - row['id'] = value['id'] - row['namespace'] = value['namespace'] - row['revision'] = value['revision'] - row['deleted'] = value['deleted'] - row['value'] = jacksonMapper.ofJson().writeValueAsString(value) - - - id: avroWriter - type: io.kestra.plugin.serdes.avro.AvroWriter - from: "{{ outputs.transform.uri }}" - description: convert the file from Kestra internal storage to avro. - schema: | - { - "type": "record", - "name": "Root", - "fields": - [ - { "name": "id", "type": ["null", "string"] }, - { "name": "namespace", "type": ["null", "string"] }, - { "name": "revision", "type": ["null", "string"] }, - { "name": "deleted", "type": ["null", "string"] }, - { "name": "value", "type": ["null", "string"] } - ] - } - - - id: load - type: io.kestra.plugin.gcp.bigquery.Load - avroOptions: - useAvroLogicalTypes: true - destinationTable: geller.dwh.flows - format: AVRO - from: "{{outputs.avroWriter.uri }}" - writeDisposition: WRITE_TRUNCATE - serviceAccount: "{{ secret('GCP_CREDS') }}" - projectId: geller - -triggers: - - id: schedule - type: io.kestra.plugin.core.trigger.Schedule - cron: "0 10 * * *" diff --git a/examples/flows/failure_handling/failureDemo.yml b/examples/flows/failure_handling/failureDemo.yml deleted file mode 100644 index 63e34f6..0000000 --- a/examples/flows/failure_handling/failureDemo.yml +++ /dev/null @@ -1,11 +0,0 @@ -id: failureDemo -namespace: prod -tasks: - - id: fail - type: io.kestra.core.tasks.scripts.Bash - commands: - - exit 1 - - id: never - type: io.kestra.core.tasks.scripts.Bash - commands: - - echo "this will never run" \ No newline at end of file diff --git a/examples/flows/failure_handling/failureDemoAllowFailure.yml b/examples/flows/failure_handling/failureDemoAllowFailure.yml deleted file mode 100644 index 98d16f6..0000000 --- a/examples/flows/failure_handling/failureDemoAllowFailure.yml +++ /dev/null @@ -1,28 +0,0 @@ -id: failureDemoAllowFailure -namespace: blueprint -description: | - - -tasks: - - id: allowFailure - type: io.kestra.core.tasks.flows.AllowFailure - tasks: - - id: failSilently - type: io.kestra.core.tasks.scripts.Bash - commands: - - exit 1 - - - id: printToConsole - type: io.kestra.core.tasks.scripts.Bash - commands: - - echo "this will run since previous failure was allowed ✅" - - - id: fail - type: io.kestra.core.tasks.scripts.Bash - commands: - - echo "failing and blocking downstream tasks" && exit 1 - - - id: willNeverRun - type: io.kestra.core.tasks.scripts.Bash - commands: - - echo "this will never run ❌" \ No newline at end of file diff --git a/examples/flows/finance/billing.yml b/examples/flows/finance/billing.yml deleted file mode 100644 index 6b81ff0..0000000 --- a/examples/flows/finance/billing.yml +++ /dev/null @@ -1,18 +0,0 @@ -id: billing -namespace: prod.finance -tasks: - - id: dataIngestionSync - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: billing - apiKey: "{{secret('FIVETRAN_API_KEY')}}" - apiSecret: "{{secret('FIVETRAN_API_SECRET')}}" -triggers: - - id: runAfterDwhRefresh - type: io.kestra.core.models.triggers.types.Flow - conditions: - - type: io.kestra.core.models.conditions.types.ExecutionStatusCondition - in: - - SUCCESS - - type: io.kestra.core.models.conditions.types.ExecutionFlowCondition - namespace: prod - flowId: dataWarehouseRefresh \ No newline at end of file diff --git a/examples/flows/getting_started/helloParametrized.yml b/examples/flows/getting_started/helloParametrized.yml deleted file mode 100644 index e48136a..0000000 --- a/examples/flows/getting_started/helloParametrized.yml +++ /dev/null @@ -1,15 +0,0 @@ -id: helloParametrized -namespace: prod -description: | - This flow takes a runtime-specific input and uses it to log a message to the console. - -inputs: - - name: user - type: STRING - defaults: Data Engineer - required: false - -tasks: - - id: hello - type: io.kestra.plugin.core.log.Log - message: Hello {{inputs.user}} from Kestra! \ No newline at end of file diff --git a/examples/flows/getting_started/helloParametrizedScheduled.yml b/examples/flows/getting_started/helloParametrizedScheduled.yml deleted file mode 100644 index fdf47ce..0000000 --- a/examples/flows/getting_started/helloParametrizedScheduled.yml +++ /dev/null @@ -1,24 +0,0 @@ -id: helloParametrizedScheduled -namespace: prod -description: | - This flow takes a runtime-specific input and uses it to log a message to the console. - The flow includes a schedule that runs every minute with a custom input parameter value. - -inputs: - - name: user - type: STRING - defaults: Data Engineer - required: false - -tasks: - - id: hello - type: io.kestra.plugin.core.log.Log - message: Hello {{ inputs.user }} from Kestra! - -triggers: - - id: everyMinute - type: io.kestra.plugin.core.trigger.Schedule - cron: "*/1 * * * *" - inputs: - name: user - value: custom value \ No newline at end of file diff --git a/examples/flows/getting_started/helloParametrizedSchedulesMultiple.yml b/examples/flows/getting_started/helloParametrizedSchedulesMultiple.yml deleted file mode 100644 index fc355e0..0000000 --- a/examples/flows/getting_started/helloParametrizedSchedulesMultiple.yml +++ /dev/null @@ -1,37 +0,0 @@ -id: helloParametrizedSchedulesMultiple -namespace: blueprint -description: | - This flow takes a runtime-specific input and uses it to log a message to the console. - The flow has two scheduled attached to it: - - it runs every 15 minutes with the default input parameter value - - it runs every 1 minute with a custom input parameter value - - Note that both schedules are currently disabled. - To start scheduling the flow, set the `disabled` property to `false` or delete that property. - -inputs: - - name: user - type: STRING - defaults: Data Engineer - required: false - -tasks: - - id: hello - type: io.kestra.plugin.core.log.Log - message: Hello {{ inputs.user }} from Kestra! - -triggers: - - id: quarterHourly - type: io.kestra.plugin.core.trigger.Schedule - disabled: true - cron: "*/15 * * * *" - inputs: - name: user - - - id: everyMinute - type: io.kestra.plugin.core.trigger.Schedule - disabled: true - cron: "*/1 * * * *" - inputs: - name: user - value: custom value \ No newline at end of file diff --git a/examples/flows/getting_started/helloWorld.yml b/examples/flows/getting_started/helloWorld.yml deleted file mode 100644 index a81712a..0000000 --- a/examples/flows/getting_started/helloWorld.yml +++ /dev/null @@ -1,8 +0,0 @@ -id: helloWorld -namespace: prod -description: This flow logs a message to the console - -tasks: - - id: hello - type: io.kestra.plugin.core.log.Log - message: Hello world! \ No newline at end of file diff --git a/examples/flows/getting_started/helloWorldWithLabels.yml b/examples/flows/getting_started/helloWorldWithLabels.yml deleted file mode 100644 index 808bcef..0000000 --- a/examples/flows/getting_started/helloWorldWithLabels.yml +++ /dev/null @@ -1,10 +0,0 @@ -id: helloWorldWithLabels -namespace: prod -description: This flow logs a message to the console -labels: - env: prod - owner: anna-geller -tasks: - - id: hello - type: io.kestra.plugin.core.log.Log - message: Hello world! \ No newline at end of file diff --git a/examples/flows/git/gitPython.yml b/examples/flows/git/gitPython.yml deleted file mode 100644 index 4e2faf5..0000000 --- a/examples/flows/git/gitPython.yml +++ /dev/null @@ -1,22 +0,0 @@ -id: git_python -namespace: company.team -description: | - This flow clones a git repository and runs a python ETL script. - The python tasks will install the required packages before running the script. - -tasks: - - id: python_scripts - type: io.kestra.plugin.core.flow.WorkingDirectory - tasks: - - id: clone_repository - type: io.kestra.plugin.git.Clone - description: password is only required for private repositories - url: https://github.com/kestra-io/examples - branch: main - - - id: python_etl - type: io.kestra.plugin.scripts.python.Commands - beforeCommands: - - pip install requests pandas - commands: - - ./bin/python examples/scripts/etl_script.py diff --git a/examples/flows/git/gitPythonPublicRepository.yml b/examples/flows/git/gitPythonPublicRepository.yml deleted file mode 100644 index 26afebb..0000000 --- a/examples/flows/git/gitPythonPublicRepository.yml +++ /dev/null @@ -1,19 +0,0 @@ -id: git_python_public_repository -namespace: company.team - -tasks: - - id: python_scripts - type: io.kestra.plugin.core.flow.WorkingDirectory - tasks: - - id: clone_repository - type: io.kestra.plugin.git.Clone - url: https://github.com/anna-geller/kestra-flows - branch: main - username: anna-geller - - - id: python_etl - type: io.kestra.plugin.scripts.python.Commands - beforeCommands: - - pip install requests pandas - commands: - - ./bin/python examples/scripts/etl_script.py diff --git a/examples/flows/git/gitPythonWithSecret.yml b/examples/flows/git/gitPythonWithSecret.yml deleted file mode 100644 index 21906f1..0000000 --- a/examples/flows/git/gitPythonWithSecret.yml +++ /dev/null @@ -1,20 +0,0 @@ -id: git_python_with_secret -namespace: company.team - -tasks: - - id: py - type: io.kestra.plugin.core.flow.WorkingDirectory - tasks: - - id: clone_repository - type: io.kestra.plugin.git.Clone - url: https://github.com/anna-geller/kestra-flows - branch: main - username: anna-geller - password: "{{ secret('GITHUB_ACCESS_TOKEN') }}" - - - id: py_bash - type: io.kestra.core.tasks.scripts.Python - beforeCommands: - - pip install requests pandas - commands: - - ./bin/python examples/scripts/etl_script.py diff --git a/examples/flows/marketing/attribution.yml b/examples/flows/marketing/attribution.yml deleted file mode 100644 index 33563f5..0000000 --- a/examples/flows/marketing/attribution.yml +++ /dev/null @@ -1,8 +0,0 @@ -id: attribution -namespace: prod.marketing -tasks: - - id: dataIngestionSync - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: attribution - apiKey: "{{secret('FIVETRAN_API_KEY')}}" - apiSecret: "{{secret('FIVETRAN_API_SECRET')}}" diff --git a/examples/flows/metrics/bashMetrics.yml b/examples/flows/metrics/bashMetrics.yml deleted file mode 100644 index 5de8e20..0000000 --- a/examples/flows/metrics/bashMetrics.yml +++ /dev/null @@ -1,23 +0,0 @@ -id: bashMetrics -namespace: prod -tasks: - - id: nrRowsMetric - type: io.kestra.core.tasks.scripts.Bash - commands: - - echo '::{"metrics":[{"name":"nr_rows","type":"counter","value":800,"tags":{"schema":"analytics"}}]}::' - - id: nrRowsOutput - type: io.kestra.core.tasks.scripts.Bash - commands: - - echo '::{"outputs":{"nr_rows":800}}::' - - id: metrics - type: io.kestra.core.tasks.scripts.Bash - commands: - - echo '::{"metrics":[{"name":"datasets_count","type":"counter","value":3,"tags":{"schema":"analytics"}}]}::' - - id: randomMetrics - type: io.kestra.core.tasks.scripts.Bash - commands: - - RANDOM=$$$(date +%s) - - random_number=$(( RANDOM % 4 )) - - echo "Sleep for $random_number" - - sleep "$(( RANDOM % 4 ))" - - echo "::{\"metrics\":[{\"name\":\"time\",\"type\":\"timer\",\"value\":$random_number}]}::" \ No newline at end of file diff --git a/examples/flows/metrics/pythonMetrics.yml b/examples/flows/metrics/pythonMetrics.yml deleted file mode 100644 index bc6c106..0000000 --- a/examples/flows/metrics/pythonMetrics.yml +++ /dev/null @@ -1,44 +0,0 @@ -id: pythonMetrics -namespace: prod -description: - This flow will fetch an API and will use Kestra to report metrics & outputs. -inputs: - - type: DATETIME - name: date - description: The exchange rate date you want to be downloaded - required: false -tasks: - - id: download - type: io.kestra.core.tasks.scripts.Python - description: Download the schedule date data from api, if the execution is manual (not schedule), use the current day - dockerOptions: - image: python:3.11-slim - inputFiles: - main.py: | - import json - import requests - from kestra import Kestra - import sys - - date = "{{ inputs.date ?? schedule.date ?? execution.startDate | dateAdd(-1, 'DAYS', format='yyyy-MM-dd') }}" - print(f"Starting request for {date}") - - response = requests.get(f"https://api.exchangerate.host/{date}") - data = response.json() - - print(f"Found {len(data['rates'])} rates") - - with open ("{{ outputFiles.exported }}", 'w') as f: - for currency, exchange in data['rates'].items(): - f.write(json.dumps({'date': date, 'currency': currency, 'exchange': exchange}) + chr(10)) - - # add kestra metrics & outputs - Kestra.counter('rates.count', len(data['rates'])) - Kestra.outputs({'date': date}) - - print("Done !") - outputFiles: - - exported - requirements: - - requests - runner: DOCKER \ No newline at end of file diff --git a/examples/flows/notifications/slack/runtimeSLA.yml b/examples/flows/notifications/slack/runtimeSLA.yml deleted file mode 100644 index 4b10818..0000000 --- a/examples/flows/notifications/slack/runtimeSLA.yml +++ /dev/null @@ -1,21 +0,0 @@ -id: runtimeSLA -namespace: prod.monitoring -description: Zombie executions that get stuck -tasks: - - id: sendAlertOnZombieTask - type: io.kestra.plugin.notifications.slack.SlackExecution - url: "{{envs.slack_webhook}}" - channel: "#general" - executionId: "{{trigger.executionId}}" - - id: failExecution - type: io.kestra.core.tasks.executions.Fail - executionId: "{{trigger.executionId}}" -triggers: - - id: listen - type: io.kestra.core.models.triggers.types.Flow - conditions: - - type: io.kestra.core.models.conditions.types.SLA - duration: PT24H - - type: io.kestra.core.models.conditions.types.ExecutionNamespaceCondition - namespace: prod - prefix: true diff --git a/examples/flows/notifications/slack/slackFailureAlert.yml b/examples/flows/notifications/slack/slackFailureAlert.yml deleted file mode 100644 index c1a569d..0000000 --- a/examples/flows/notifications/slack/slackFailureAlert.yml +++ /dev/null @@ -1,25 +0,0 @@ -id: slackFailureAlert -namespace: prod.monitoring -description: | - This flow sends a Slack alert any time a flow from the `prod` namespace finishes with errors or warnings. Thanks to the `executionId` variable, the alert includes a link to the failed flow's execution page. - - Given that this flow runs on a Flow trigger, there is no need for boilerplate code to define alert logic in each flow separately. Instead, the Flow trigger allows you to define that logic only once. The trigger will listen to the execution state of any flow in the `prod` namespace, including all child namespaces, and will automatically send Slack messages on failure. - -tasks: - - id: send - type: io.kestra.plugin.notifications.slack.SlackExecution - url: "{{envs.slack_webhook}}" - channel: "#general" - executionId: "{{trigger.executionId}}" - -triggers: - - id: listen - type: io.kestra.core.models.triggers.types.Flow - conditions: - - type: io.kestra.core.models.conditions.types.ExecutionStatusCondition - in: - - FAILED - - WARNING - - type: io.kestra.core.models.conditions.types.ExecutionNamespaceCondition - namespace: prod - prefix: true diff --git a/examples/flows/notifications/slack/slackSendMessage.yml b/examples/flows/notifications/slack/slackSendMessage.yml deleted file mode 100644 index e064192..0000000 --- a/examples/flows/notifications/slack/slackSendMessage.yml +++ /dev/null @@ -1,11 +0,0 @@ -id: sendSlackMessage -namespace: prod -tasks: - - id: hello - type: io.kestra.plugin.core.log.Log - message: Hello world - - id: slack - type: io.kestra.plugin.notifications.slack.SlackIncomingWebhook - url: "{{envs.slack_webhook}}" - payload: | - {"channel": "#general", "text": "Flow `{{ flow.namespace }}.{{ flow.id }}` started with execution ID `{{ execution.id }}`"} \ No newline at end of file diff --git a/examples/flows/outputs/outputFromPythonScript.yml b/examples/flows/outputs/outputFromPythonScript.yml deleted file mode 100644 index d22f730..0000000 --- a/examples/flows/outputs/outputFromPythonScript.yml +++ /dev/null @@ -1,25 +0,0 @@ -id: output_from_python_script -namespace: company.team - -tasks: - - id: wdir - type: io.kestra.plugin.core.flow.WorkingDirectory - disabled: true - tasks: - - id: my_python - type: io.kestra.plugin.scripts.python.Script - taskRunner: - type: io.kestra.plugin.core.runner.Process - script: | - f = open("myfile.txt", "a") - f.write("Hi, this is output from a script 👋") - f.close() - outputFiles: - - myfile.txt - - - id: my_shell - type: io.kestra.plugin.scripts.shell.Commands - taskRunner: - type: io.kestra.plugin.core.runner.Process - commands: - - cat {{ outputs.my_python.outputFiles['myfile.txt']}} diff --git a/examples/flows/outputs/passDataBetweenTasks.yml b/examples/flows/outputs/passDataBetweenTasks.yml deleted file mode 100644 index 1bc59eb..0000000 --- a/examples/flows/outputs/passDataBetweenTasks.yml +++ /dev/null @@ -1,32 +0,0 @@ -id: passDataBetweenTasks -namespace: blueprint - -tasks: - - id: passOutput - type: io.kestra.core.tasks.debugs.Return - format: "hello" - - - id: pyOutputs - type: io.kestra.plugin.scripts.python.Script - docker: - image: ghcr.io/kestra-io/pydata:latest - script: | - import json - from kestra import Kestra - - my_kv_pair = {'mykey': 'from Kestra'} - Kestra.outputs(my_kv_pair) - - with open('{{outputDir}}/myoutput.json', 'w') as f: - json.dump(my_kv_pair, f) - - - id: takeInputs - type: io.kestra.plugin.core.log.Log - message: | - data from previous tasks: {{outputs.passOutput.value}} and {{outputs.pyOutputs.vars.mykey}} - - - id: checkOutputFile - type: io.kestra.plugin.scripts.shell.Commands - runner: PROCESS - commands: - - cat {{outputs.pyOutputs.outputFiles['myoutput.json']}} \ No newline at end of file diff --git a/examples/flows/outputs/passOutputsReturnLog.yml b/examples/flows/outputs/passOutputsReturnLog.yml deleted file mode 100644 index 9cc241e..0000000 --- a/examples/flows/outputs/passOutputsReturnLog.yml +++ /dev/null @@ -1,19 +0,0 @@ -id: passDataBetweenTasks -namespace: blueprint -description: | - -tasks: - - id: passOutput - type: io.kestra.core.tasks.debugs.Return - format: "hello world!" - - - id: pythonVars - type: io.kestra.core.tasks.scripts.Python - inputFiles: - main.py: | - from kestra import Kestra - Kestra.outputs({'somekey': 'some value'}) - - - id: takeInput - type: io.kestra.plugin.core.log.Log - message: "data from previous tasks - {{outputs.passOutput.value}} and {{outputs.pythonVars.vars.somekey}}" \ No newline at end of file diff --git a/examples/flows/product/personalizations.yml b/examples/flows/product/personalizations.yml deleted file mode 100644 index 97947c4..0000000 --- a/examples/flows/product/personalizations.yml +++ /dev/null @@ -1,18 +0,0 @@ -id: personalizations -namespace: prod.product -tasks: - - id: dataIngestionSync - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: personalizations - apiKey: "{{secret('FIVETRAN_API_KEY')}}" - apiSecret: "{{secret('FIVETRAN_API_SECRET')}}" -triggers: - - id: runAfterDwhRefresh - type: io.kestra.core.models.triggers.types.Flow - conditions: - - type: io.kestra.core.models.conditions.types.ExecutionStatusCondition - in: - - SUCCESS - - type: io.kestra.core.models.conditions.types.ExecutionFlowCondition - namespace: prod - flowId: dataWarehouseRefresh \ No newline at end of file diff --git a/examples/flows/python/00_old_blueprint_core_python_outputs.yml b/examples/flows/python/00_old_blueprint_core_python_outputs.yml deleted file mode 100644 index ea68c25..0000000 --- a/examples/flows/python/00_old_blueprint_core_python_outputs.yml +++ /dev/null @@ -1,36 +0,0 @@ -id: pandasETL -namespace: blueprint - -tasks: - - id: extractCsv - type: io.kestra.core.tasks.scripts.Python - outputFiles: - - csvFile - inputFiles: - main.py: | - import pandas as pd - data = { - 'Column1': ['A', 'B', 'C', 'D'], - 'Column2': [1, 2, 3, 4], - 'Column3': [5, 6, 7, 8] - } - df = pd.DataFrame(data) - df.to_csv("{{ outputFiles.csvFile }}", index=False) - runner: DOCKER - dockerOptions: - image: ghcr.io/kestra-io/pydata:latest - - - id: transformAndLoadCsv - type: io.kestra.core.tasks.scripts.Python - outputFiles: - - finalResult - inputFiles: - data.csv: "{{ outputs.extractCsv.outputFiles.csvFile }}" - main.py: | - import pandas as pd - df = pd.read_csv("data.csv") - df['Column4'] = df['Column2'] + df['Column3'] - df.to_csv("{{ outputFiles.finalResult }}", index=False) - runner: DOCKER - dockerOptions: - image: ghcr.io/kestra-io/pydata:latest \ No newline at end of file diff --git a/examples/flows/python/analyzeSales.yml b/examples/flows/python/analyzeSales.yml deleted file mode 100644 index 9ae15e5..0000000 --- a/examples/flows/python/analyzeSales.yml +++ /dev/null @@ -1,25 +0,0 @@ -id: analyzeSales -namespace: prod -tasks: - - id: downloadCSV - type: io.kestra.plugin.fs.http.Download - uri: https://gist.githubusercontent.com/anna-geller/15f19626d975877b40c3653b6745dcd6/raw/849e8f69a251ece8bfb32dbd6097e69af6fa7f7f/orders.csv - - - id: analyzeSales - type: io.kestra.core.tasks.scripts.Python - inputFiles: - data.csv: "{{outputs.downloadCSV.uri}}" - main.py: | - import pandas as pd - from kestra import Kestra - - df = pd.read_csv("data.csv") - sales = df.total.sum() - med = df.quantity.median() - - Kestra.outputs({"total_sales": sales, "median_quantity": med}) - - top_sellers = df.sort_values(by="total", ascending=False).head(3) - print(f"Top 3 orders: {top_sellers}") - requirements: - - pandas diff --git a/examples/flows/python/csvKit.yml b/examples/flows/python/csvKit.yml deleted file mode 100644 index b9fc510..0000000 --- a/examples/flows/python/csvKit.yml +++ /dev/null @@ -1,65 +0,0 @@ -id: csvKit -namespace: prod -labels: - env: PRD -description: | - # Kestra Tutorial - As you notice, we can use markdown here. -tasks: - - id: download - type: io.kestra.plugin.fs.http.Download - uri: "https://gist.githubusercontent.com/tchiotludo/2b7f28f4f507074e60150aedb028e074/raw/6b6348c4f912e79e3ffccaf944fd019bf51cba30/conso-elec-gaz-annuelle-par-naf-agregee-region.csv" - retry: - type: constant - maxDuration: PT1H - interval: PT10M - - id: parallel - type: io.kestra.plugin.core.flow.Parallel - tasks: - - id: analyze-data-sum - type: io.kestra.core.tasks.scripts.Python - runner: DOCKER - dockerOptions: - image: python - inputFiles: - data.csv: "{{outputs.download.uri}}" - main.py: | - import pandas as pd - from kestra import Kestra - data = pd.read_csv("data.csv", sep=";") - sumOfConsumption = data['conso'].sum() - Kestra.outputs({'sumOfConsumption': int(sumOfConsumption)}) - requirements: - - pandas - - id: analyze-data-mean - type: io.kestra.core.tasks.scripts.Python - runner: DOCKER - dockerOptions: - image: python - inputFiles: - data.csv: "{{outputs.download.uri}}" - main.py: | - import pandas as pd - from kestra import Kestra - data = pd.read_csv("data.csv", sep=";") - meanOfConsumption = data['conso'].mean() - Kestra.outputs({'meanOfConsumption': int(meanOfConsumption)}) - requirements: - - pandas - - id: bash - type: io.kestra.core.tasks.scripts.Bash - runner: DOCKER - dockerOptions: - image: jdkelley/csvkit:latest - inputFiles: - data.csv: "{{ outputs.download.uri }}" - outputFiles: - - data_update - commands: - - "csvcut -d ';' -c annee,conso data.csv > new.csv" - - "csvstat new.csv" - - "cat new.csv > {{ outputFiles.data_update }}" -errors: - - id: error-handling - type: io.kestra.plugin.core.log.Log - message: "An error occurred." \ No newline at end of file diff --git a/examples/flows/python/dockerGcp.yml b/examples/flows/python/dockerGcp.yml deleted file mode 100644 index 41ba847..0000000 --- a/examples/flows/python/dockerGcp.yml +++ /dev/null @@ -1,44 +0,0 @@ -id: python_docker_gcp -namespace: blueprint - -tasks: - - id: wdir - type: io.kestra.plugin.core.flow.WorkingDirectory - tasks: - - id: download_csv - type: io.kestra.plugin.fs.http.Download - uri: https://raw.githubusercontent.com/kestra-io/datasets/main/csv/orders.csv - - - id: fetch_auth_token - type: io.kestra.plugin.gcp.auth.OauthAccessToken - projectId: YOUR_GCP_PROJECT_NAME - serviceAccount: "{{ secret('GCP_SERVICE_ACCOUNT_JSON') }}" - - - id: analyze_sales - type: io.kestra.plugin.scripts.python.Script - inputFiles: - data.csv: "{{ outputs.download_csv.uri }}" - script: | - import pandas as pd - from kestra import Kestra - - df = pd.read_csv("data.csv") - sales = df.total.sum() - med = df.quantity.median() - - Kestra.outputs({"total_sales": sales, "median_quantity": med}) - - top_sellers = df.sort_values(by="total", ascending=False).head(3) - print(f"Top 3 orders: {top_sellers}") - taskRunner: - type: io.kestra.plugin.scripts.runner.docker.Docker - containerImage: yourGcpRegion-docker.pkg.dev/YOUR_GCP_PROJECT_NAME/flows/python:latest - config: | - { - "auths": { - "europe-west3-docker.pkg.dev": { - "username": "oauth2accesstoken", - "password": "{{outputs.fetchAuthToken.accessToken.tokenValue}}" - } - } - } diff --git a/examples/flows/python/log_levels.yml b/examples/flows/python/log_levels.yml deleted file mode 100644 index 7971a3a..0000000 --- a/examples/flows/python/log_levels.yml +++ /dev/null @@ -1,62 +0,0 @@ -id: dlt -namespace: dev - -tasks: - - id: warningState - type: io.kestra.plugin.scripts.python.Script - script: | - import logging - import sys - - # Configure the logging module - logging.basicConfig( - level=logging.INFO, - format='%(asctime)s - %(levelname)s - %(message)s', - handlers=[ - logging.StreamHandler() - ] - ) - - logging.info('This is an info message.') - logging.warning('This is a warning message.') - sys.exit(0) - - - id: successState - type: io.kestra.plugin.scripts.python.Script - warningOnStdErr: false - script: | - import logging - import sys - - # Configure the logging module - logging.basicConfig( - level=logging.INFO, - format='%(asctime)s - %(levelname)s - %(message)s', - handlers=[ - logging.StreamHandler() - ] - ) - - logging.info('This is an info message.') - logging.warning('This is a warning message.') - sys.exit(0) - - - id: failedStateDespiteIgnoringWarnings - type: io.kestra.plugin.scripts.python.Script - warningOnStdErr: false - script: | - import logging - import sys - - # Configure the logging module - logging.basicConfig( - level=logging.INFO, - format='%(asctime)s - %(levelname)s - %(message)s', - handlers=[ - logging.StreamHandler() - ] - ) - - logging.info('This is an info message.') - logging.warning('The flow is about to fail...') - sys.exit(1) \ No newline at end of file diff --git a/examples/flows/python/pythonAnalyzeCSV.yml b/examples/flows/python/pythonAnalyzeCSV.yml deleted file mode 100644 index bcedeb0..0000000 --- a/examples/flows/python/pythonAnalyzeCSV.yml +++ /dev/null @@ -1,16 +0,0 @@ -id: pythonAnalyzeCSV -namespace: prod -inputs: - - name: url - type: STRING - defaults: https://raw.githubusercontent.com/dbt-labs/jaffle_shop/main/seeds/raw_customers.csv -tasks: - - id: pandas-task - type: io.kestra.core.tasks.scripts.Python - inputFiles: - main.py: | - import pandas as pd - df = pd.read_csv("{{inputs.url}}") - df.info() - requirements: - - pandas diff --git a/examples/flows/python/pythonAnalyzeOrders.yml b/examples/flows/python/pythonAnalyzeOrders.yml deleted file mode 100644 index 5c36b26..0000000 --- a/examples/flows/python/pythonAnalyzeOrders.yml +++ /dev/null @@ -1,50 +0,0 @@ -id: pythonAnalyzeOrders -namespace: prod -description: | - This flow generates a CSV file with 100 random orders and then calculates the sum and average of the "total" column. It then reports the results as outputs. - The CSV file generated by a Python task is set as `outputFiles`, allowing you to download the file from the UI's Execution page. - -tasks: - - id: outputCsvAndMetrics - type: io.kestra.core.tasks.scripts.Python - requirements: - - faker - outputFiles: - - orders.csv - inputFiles: - main.py: | - import csv - import random - from faker import Faker - from kestra import Kestra - - fake = Faker() - - # Define the list of columns for the CSV file - columns = ['order_id', 'customer_name', 'customer_email', 'product_id', 'price', 'quantity', 'total'] - - # Generate 100 random orders - orders = [] - for i in range(100): - order_id = i + 1 - customer_name = fake.name() - customer_email = fake.email() - product_id = random.randint(1, 20) - price = round(random.uniform(10.0, 200.0), 2) - quantity = random.randint(1, 10) - total = round(price * quantity, 2) - orders.append([order_id, customer_name, customer_email, product_id, price, quantity, total]) - - # Write the orders to a CSV file - with open('orders.csv', 'w', newline='') as file: - writer = csv.writer(file) - writer.writerow(columns) - writer.writerows(orders) - - # Calculate and print the sum and average of the "total" column - total_sum = sum(order[6] for order in orders) - average_order = round(total_sum / len(orders), 2) - print(f'Total sum: {total_sum}') - print(f'Average Order value: {average_order}') - - Kestra.outputs({'total_sum': total_sum, 'average_order': average_order}) diff --git a/examples/flows/python/pythonCsvEachParallel.yml b/examples/flows/python/pythonCsvEachParallel.yml deleted file mode 100644 index 21dfe79..0000000 --- a/examples/flows/python/pythonCsvEachParallel.yml +++ /dev/null @@ -1,26 +0,0 @@ -id: pythonCsvEachParallel -namespace: prod -description: Welcome to Kestra! 👋 - -tasks: - - id: hello - type: io.kestra.plugin.core.log.Log - message: Hey there, {{ inputs.user }}! - - id: csv - type: io.kestra.core.tasks.flows.EachParallel - value: ["https://raw.githubusercontent.com/dbt-labs/jaffle_shop/main/seeds/raw_customers.csv", "https://raw.githubusercontent.com/dbt-labs/jaffle_shop/main/seeds/raw_orders.csv", "https://raw.githubusercontent.com/dbt-labs/jaffle_shop/main/seeds/raw_payments.csv"] - tasks: - - id: pandasTransform - type: io.kestra.core.tasks.scripts.Python - inputFiles: - data.csv: "{{taskrun.value}}" - main.py: | - import pandas as pd - df = pd.read_csv("data.csv") - df.info() - requirements: - - pandas - - id: goodbye - type: io.kestra.core.tasks.scripts.Bash - commands: - - echo See you soon, {{ inputs.user }}! diff --git a/examples/flows/python/pythonCsvEachParallelScriptPlugin.yml b/examples/flows/python/pythonCsvEachParallelScriptPlugin.yml deleted file mode 100644 index e9073c9..0000000 --- a/examples/flows/python/pythonCsvEachParallelScriptPlugin.yml +++ /dev/null @@ -1,20 +0,0 @@ -id: pythonCsvEachParallelScriptPlugin -namespace: blueprint - -tasks: - - id: csv - type: io.kestra.core.tasks.flows.EachParallel - value: - - https://raw.githubusercontent.com/dbt-labs/jaffle_shop/main/seeds/raw_customers.csv - - https://raw.githubusercontent.com/dbt-labs/jaffle_shop/main/seeds/raw_orders.csv - - https://raw.githubusercontent.com/dbt-labs/jaffle_shop/main/seeds/raw_payments.csv - tasks: - - id: pandas - type: io.kestra.plugin.scripts.python.Script - warningOnStdErr: false - docker: - image: ghcr.io/kestra-io/pydata:latest - script: | - import pandas as pd - df = pd.read_csv("{{taskrun.value}}") - df.info() diff --git a/examples/flows/python/pythonDependenciesVenvPerTask.yml b/examples/flows/python/pythonDependenciesVenvPerTask.yml deleted file mode 100644 index c40952b..0000000 --- a/examples/flows/python/pythonDependenciesVenvPerTask.yml +++ /dev/null @@ -1,23 +0,0 @@ -id: pythonDependenciesVenvPerTask -namespace: prod -tasks: - - id: compareInstallTimesBetweenVersions - type: io.kestra.plugin.core.flow.Parallel - concurrent: 2 - tasks: - - id: latestRequests - type: io.kestra.core.tasks.scripts.Python - inputFiles: - main.py: | - import requests - print(requests.__version__) - requirements: - - requests - - id: olderRequests - type: io.kestra.core.tasks.scripts.Python - inputFiles: - main.py: | - import requests - print(requests.__version__) - requirements: - - requests==2.20 diff --git a/examples/flows/python/pythonExtractUnzipProcessPandasCSV.yml b/examples/flows/python/pythonExtractUnzipProcessPandasCSV.yml deleted file mode 100644 index 2a1df0b..0000000 --- a/examples/flows/python/pythonExtractUnzipProcessPandasCSV.yml +++ /dev/null @@ -1,30 +0,0 @@ -id: pythonExtractUnzipProcessPandasCSV -namespace: prod -description: | - This flow downloads a zip file, unzips it, and processes the CSV file inside it using Python. - The downloaded and unzipped file is available as `outputs.task_id.files['filename']` - and can be used as input for other tasks. - -tasks: - - id: getZipFile - type: io.kestra.plugin.fs.http.Download - uri: "https://wri-dataportal-prod.s3.amazonaws.com/manual/global_power_plant_database_v_1_3.zip" - - - id: unzip - type: io.kestra.plugin.compress.ArchiveDecompress - description: | - if you check the plugin documentation, you will see that the output is a map of unzipped `files` and their - corresponding location within Kestra's internal storage accessible as `outputs.task_id.files['filename']` - from: "{{outputs.getZipFile.uri}}" - algorithm: ZIP - - - id: processCSV - type: io.kestra.core.tasks.scripts.Python - inputFiles: - data.csv: "{{outputs.unzip.files['global_power_plant_database.csv']}}" - main.py: | - import pandas as pd - df = pd.read_csv("data.csv", low_memory=False) - print(df.info()) - requirements: - - pandas \ No newline at end of file diff --git a/examples/flows/python/pythonPandasCSV.yml b/examples/flows/python/pythonPandasCSV.yml deleted file mode 100644 index 9ec4236..0000000 --- a/examples/flows/python/pythonPandasCSV.yml +++ /dev/null @@ -1,33 +0,0 @@ -id: pythonPandasCSV -namespace: prod -description: | - ## Simple Python script - First task downloads CSV to Kestra's internal storage and will make it available via task outputs. - Next task will analyze it. -inputs: - - name: csvUrl - type: STRING - defaults: https://www.data.gouv.fr/fr/datasets/r/d33eabc9-e2fd-4787-83e5-a5fcfb5af66d -tasks: - - id: extract - type: io.kestra.plugin.fs.http.Download - uri: "{{inputs.csvUrl}}" - - id: analyzeData - type: io.kestra.core.tasks.scripts.Python - description: | - We define an input file named 'data.csv' that will be available in the Python task's working directory. - This file is fetched from the internal storage by using the 'uri' output of the 'extract' task. - Then, the file 'main.py' is the Python script that will be executed. - It uses Pandas to read the CSV file, computes the sum and sets is as task output. - Because the script uses `pandas`, we must list it in the requirements. - inputFiles: - data.csv: "{{outputs.extract.uri}}" - main.py: | - import pandas as pd - from kestra import Kestra - data = pd.read_csv("data.csv", sep=";") - data.info() - sumOfConsumption = data['conso'].sum() - Kestra.outputs({'sumOfConsumption': int(sumOfConsumption)}) - requirements: - - pandas diff --git a/examples/flows/python/pythonPartitions.yml b/examples/flows/python/pythonPartitions.yml deleted file mode 100644 index d9adb62..0000000 --- a/examples/flows/python/pythonPartitions.yml +++ /dev/null @@ -1,34 +0,0 @@ -id: pythonPartitions -namespace: prod -description: | - This flow extracts a list of partitions and then processes each partition in parallel in isolated Python scripts. The flow will then track the - number of rows and the processing time for each partition, which you can inspect in the Metrics tab. - -tasks: - - id: getPartitions - type: io.kestra.core.tasks.scripts.Python - inputFiles: - main.py: | - from kestra import Kestra - partitions = [f"file_{nr}.parquet" for nr in range(1, 10)] - Kestra.outputs({'partitions': partitions}) - - - id: processPartitions - type: io.kestra.core.tasks.flows.EachParallel - value: '{{outputs.getPartitions.vars.partitions}}' - tasks: - - id: partition - type: io.kestra.core.tasks.scripts.Python - inputFiles: - main.py: | - import random - import time - from kestra import Kestra - - filename = '{{ taskrun.value }}' - print(f"Reading and processing partition {filename}") - nr_rows = random.randint(1, 1000) - processing_time = random.randint(1, 20) - time.sleep(processing_time) - Kestra.counter('nr_rows', nr_rows, {'partition': filename}) - Kestra.timer('processing_time', processing_time, {'partition': filename}) diff --git a/examples/flows/python/pythonScriptContainer.yml b/examples/flows/python/pythonScriptContainer.yml deleted file mode 100644 index 4ebb91e..0000000 --- a/examples/flows/python/pythonScriptContainer.yml +++ /dev/null @@ -1,22 +0,0 @@ -id: pythonScriptContainer -namespace: prod -description: | - This flow starts a Docker container, installs custom packages - and runs a Python task script inside it. - -tasks: - - id: pythonScriptContainer - type: io.kestra.core.tasks.scripts.Python - inputFiles: - main.py: | - import pandas as pd - import requests - - print(f"pandas version: {pd.__version__}") - print(f"requests version: {requests.__version__}") - requirements: - - requests - - pandas - runner: DOCKER - dockerOptions: - image: python:3.11-slim \ No newline at end of file diff --git a/examples/flows/python/pythonScriptVenv.yml b/examples/flows/python/pythonScriptVenv.yml deleted file mode 100644 index 1213005..0000000 --- a/examples/flows/python/pythonScriptVenv.yml +++ /dev/null @@ -1,37 +0,0 @@ -id: python_script_venv -namespace: company.team -description: | - This flow creates a CSV file from a CLI using a Bash task. - It then processes it in Python with `pandas` in a virtual environment. - Finally, it collects metrics (orders, sales) from the data processing task. - -tasks: - - id: csv - type: io.kestra.plugin.core.flow.WorkingDirectory - tasks: - - id: extract_csv - type: io.kestra.plugin.scripts.shell.Commands - description: Create a CSV file - commands: - - echo "order_id,total_amount" > output.csv - - echo "1,100" >> output.csv - - echo "2,200" >> output.csv - - echo "3,300" >> output.csv - - echo Generated the CSV file ✅ - - - id: process_csv - type: io.kestra.plugin.scripts.python.Script - beforeCommands: - - pip install pandas==1.5.3 - script: | - from kestra import Kestra - import pandas as pd - - df = pd.read_csv("output.csv") - orders = df["order_id"].count() - sales = df["total_amount"].sum() - print(f"there are {orders} orders with total sales of {sales}") - - tags = dict(dashboard="sales") - Kestra.counter("orders", int(orders), tags) - Kestra.counter("sales", int(sales), tags) diff --git a/examples/flows/staging/bingAds.yml b/examples/flows/staging/bingAds.yml deleted file mode 100644 index 178a1ac..0000000 --- a/examples/flows/staging/bingAds.yml +++ /dev/null @@ -1,8 +0,0 @@ -id: bingAds -namespace: prod.staging -tasks: - - id: dataIngestionSync - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: bingAds - apiKey: "{{secret('FIVETRAN_API_KEY')}}" - apiSecret: "{{secret('FIVETRAN_API_SECRET')}}" diff --git a/examples/flows/staging/csv.yml b/examples/flows/staging/csv.yml deleted file mode 100644 index 3a2f043..0000000 --- a/examples/flows/staging/csv.yml +++ /dev/null @@ -1,8 +0,0 @@ -id: csv -namespace: prod.staging -tasks: - - id: dataIngestionSync - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: csv - apiKey: "{{secret('FIVETRAN_API_KEY')}}" - apiSecret: "{{secret('FIVETRAN_API_SECRET')}}" diff --git a/examples/flows/staging/emarsys.yml b/examples/flows/staging/emarsys.yml deleted file mode 100644 index bfc5690..0000000 --- a/examples/flows/staging/emarsys.yml +++ /dev/null @@ -1,8 +0,0 @@ -id: emarsys -namespace: prod.staging -tasks: - - id: dataIngestionSync - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: emarsys - apiKey: "{{secret('FIVETRAN_API_KEY')}}" - apiSecret: "{{secret('FIVETRAN_API_SECRET')}}" diff --git a/examples/flows/staging/facebook.yml b/examples/flows/staging/facebook.yml deleted file mode 100644 index 47658b5..0000000 --- a/examples/flows/staging/facebook.yml +++ /dev/null @@ -1,8 +0,0 @@ -id: facebook -namespace: prod.staging -tasks: - - id: dataIngestionSync - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: facebook - apiKey: "{{secret('FIVETRAN_API_KEY')}}" - apiSecret: "{{secret('FIVETRAN_API_SECRET')}}" diff --git a/examples/flows/staging/googleAds.yml b/examples/flows/staging/googleAds.yml deleted file mode 100644 index d359d12..0000000 --- a/examples/flows/staging/googleAds.yml +++ /dev/null @@ -1,8 +0,0 @@ -id: googleAds -namespace: prod.staging -tasks: - - id: dataIngestionSync - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: googleAds - apiKey: "{{secret('FIVETRAN_API_KEY')}}" - apiSecret: "{{secret('FIVETRAN_API_SECRET')}}" diff --git a/examples/flows/staging/googleAnalytics.yml b/examples/flows/staging/googleAnalytics.yml deleted file mode 100644 index 2269aa1..0000000 --- a/examples/flows/staging/googleAnalytics.yml +++ /dev/null @@ -1,8 +0,0 @@ -id: googleAnalytics -namespace: prod.staging -tasks: - - id: dataIngestionSync - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: googleAnalytics - apiKey: "{{secret('FIVETRAN_API_KEY')}}" - apiSecret: "{{secret('FIVETRAN_API_SECRET')}}" diff --git a/examples/flows/staging/googleSearchConsole.yml b/examples/flows/staging/googleSearchConsole.yml deleted file mode 100644 index 171eacb..0000000 --- a/examples/flows/staging/googleSearchConsole.yml +++ /dev/null @@ -1,8 +0,0 @@ -id: googleSearchConsole -namespace: prod.staging -tasks: - - id: dataIngestionSync - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: googleSearchConsole - apiKey: "{{secret('FIVETRAN_API_KEY')}}" - apiSecret: "{{secret('FIVETRAN_API_SECRET')}}" diff --git a/examples/flows/staging/googleSheets.yml b/examples/flows/staging/googleSheets.yml deleted file mode 100644 index d36604f..0000000 --- a/examples/flows/staging/googleSheets.yml +++ /dev/null @@ -1,8 +0,0 @@ -id: googleSheets -namespace: prod.staging -tasks: - - id: dataIngestionSync - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: googleSheets - apiKey: "{{secret('FIVETRAN_API_KEY')}}" - apiSecret: "{{secret('FIVETRAN_API_SECRET')}}" diff --git a/examples/flows/staging/greenhouse.yml b/examples/flows/staging/greenhouse.yml deleted file mode 100644 index d4ae409..0000000 --- a/examples/flows/staging/greenhouse.yml +++ /dev/null @@ -1,8 +0,0 @@ -id: greenhouse -namespace: prod.staging -tasks: - - id: dataIngestionSync - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: greenhouse - apiKey: "{{secret('FIVETRAN_API_KEY')}}" - apiSecret: "{{secret('FIVETRAN_API_SECRET')}}" diff --git a/examples/flows/staging/hubspot.yml b/examples/flows/staging/hubspot.yml deleted file mode 100644 index 82441c6..0000000 --- a/examples/flows/staging/hubspot.yml +++ /dev/null @@ -1,8 +0,0 @@ -id: hubspot -namespace: prod.staging -tasks: - - id: dataIngestionSync - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: hubspot - apiKey: "{{secret('FIVETRAN_API_KEY')}}" - apiSecret: "{{secret('FIVETRAN_API_SECRET')}}" diff --git a/examples/flows/staging/mailchimp.yml b/examples/flows/staging/mailchimp.yml deleted file mode 100644 index acadfa3..0000000 --- a/examples/flows/staging/mailchimp.yml +++ /dev/null @@ -1,8 +0,0 @@ -id: mailchimp -namespace: prod.staging -tasks: - - id: dataIngestionSync - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: mailchimp - apiKey: "{{secret('FIVETRAN_API_KEY')}}" - apiSecret: "{{secret('FIVETRAN_API_SECRET')}}" diff --git a/examples/flows/staging/microsoftDynamics.yml b/examples/flows/staging/microsoftDynamics.yml deleted file mode 100644 index a39581d..0000000 --- a/examples/flows/staging/microsoftDynamics.yml +++ /dev/null @@ -1,8 +0,0 @@ -id: microsoftDynamics -namespace: prod.staging -tasks: - - id: dataIngestionSync - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: microsoftDynamics - apiKey: "{{secret('FIVETRAN_API_KEY')}}" - apiSecret: "{{secret('FIVETRAN_API_SECRET')}}" diff --git a/examples/flows/staging/mongoDB.yml b/examples/flows/staging/mongoDB.yml deleted file mode 100644 index 3f1f3b9..0000000 --- a/examples/flows/staging/mongoDB.yml +++ /dev/null @@ -1,8 +0,0 @@ -id: mongoDB -namespace: prod.staging -tasks: - - id: dataIngestionSync - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: mongoDB - apiKey: "{{secret('FIVETRAN_API_KEY')}}" - apiSecret: "{{secret('FIVETRAN_API_SECRET')}}" diff --git a/examples/flows/staging/mysql.yml b/examples/flows/staging/mysql.yml deleted file mode 100644 index 9b472d7..0000000 --- a/examples/flows/staging/mysql.yml +++ /dev/null @@ -1,8 +0,0 @@ -id: mysql -namespace: prod.staging -tasks: - - id: dataIngestionSync - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: mysql - apiKey: "{{secret('FIVETRAN_API_KEY')}}" - apiSecret: "{{secret('FIVETRAN_API_SECRET')}}" diff --git a/examples/flows/staging/postgres.yml b/examples/flows/staging/postgres.yml deleted file mode 100644 index 2d1000a..0000000 --- a/examples/flows/staging/postgres.yml +++ /dev/null @@ -1,8 +0,0 @@ -id: postgres -namespace: prod.staging -tasks: - - id: dataIngestionSync - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: postgres - apiKey: "{{secret('FIVETRAN_API_KEY')}}" - apiSecret: "{{secret('FIVETRAN_API_SECRET')}}" diff --git a/examples/flows/staging/posthog.yml b/examples/flows/staging/posthog.yml deleted file mode 100644 index 7208b86..0000000 --- a/examples/flows/staging/posthog.yml +++ /dev/null @@ -1,8 +0,0 @@ -id: posthog -namespace: prod.staging -tasks: - - id: dataIngestionSync - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: posthog - apiKey: "{{secret('FIVETRAN_API_KEY')}}" - apiSecret: "{{secret('FIVETRAN_API_SECRET')}}" diff --git a/examples/flows/staging/salesforce.yml b/examples/flows/staging/salesforce.yml deleted file mode 100644 index 8a3c718..0000000 --- a/examples/flows/staging/salesforce.yml +++ /dev/null @@ -1,8 +0,0 @@ -id: salesforce -namespace: prod.staging -tasks: - - id: dataIngestionSync - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: salesforce - apiKey: "{{secret('FIVETRAN_API_KEY')}}" - apiSecret: "{{secret('FIVETRAN_API_SECRET')}}" diff --git a/examples/flows/staging/shopify.yml b/examples/flows/staging/shopify.yml deleted file mode 100644 index c0c599e..0000000 --- a/examples/flows/staging/shopify.yml +++ /dev/null @@ -1,8 +0,0 @@ -id: shopify -namespace: prod.staging -tasks: - - id: dataIngestionSync - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: shopify - apiKey: "{{secret('FIVETRAN_API_KEY')}}" - apiSecret: "{{secret('FIVETRAN_API_SECRET')}}" diff --git a/examples/flows/staging/sqlServer.yml b/examples/flows/staging/sqlServer.yml deleted file mode 100644 index a8f0347..0000000 --- a/examples/flows/staging/sqlServer.yml +++ /dev/null @@ -1,8 +0,0 @@ -id: sqlServer -namespace: prod.staging -tasks: - - id: dataIngestionSync - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: sqlServer - apiKey: "{{secret('FIVETRAN_API_KEY')}}" - apiSecret: "{{secret('FIVETRAN_API_SECRET')}}" diff --git a/examples/flows/staging/stripe.yml b/examples/flows/staging/stripe.yml deleted file mode 100644 index 9d9950f..0000000 --- a/examples/flows/staging/stripe.yml +++ /dev/null @@ -1,8 +0,0 @@ -id: stripe -namespace: prod.staging -tasks: - - id: dataIngestionSync - type: io.kestra.plugin.fivetran.connectors.Sync - connectorId: stripe - apiKey: "{{secret('FIVETRAN_API_KEY')}}" - apiSecret: "{{secret('FIVETRAN_API_SECRET')}}" diff --git a/examples/mongo_db/filterMongoDB.yml b/examples/mongo_db/filterMongoDB.yml deleted file mode 100644 index 9449116..0000000 --- a/examples/mongo_db/filterMongoDB.yml +++ /dev/null @@ -1,17 +0,0 @@ -id: filterMongoDB -namespace: dev -description: | - This flow will filter a MongoDB collection to find Pokemon with a base experience greater than 100. - -tasks: - - id: filter - type: io.kestra.plugin.mongodb.Find - connection: - uri: "mongodb://host.docker.internal:27017/" - database: "local" - collection: "pokemon" - store: true - filter: - base_experience: - $gt: 100 - # name: psyduck diff --git a/examples/mongo_db/loadPokemon.yml b/examples/mongo_db/loadPokemon.yml deleted file mode 100644 index 4d85b08..0000000 --- a/examples/mongo_db/loadPokemon.yml +++ /dev/null @@ -1,23 +0,0 @@ -id: loadPokemon -namespace: dev -inputs: - - name: pokemon - type: STRING - defaults: psyduck -description: | - This flow will load a pokemon from the PokeAPI into a MongoDB database. - The pokemon name can be provided as an input at runtime. - -tasks: - - id: pokemon - type: io.kestra.plugin.fs.http.Request - uri: https://pokeapi.co/api/v2/pokemon/{{inputs.pokemon}} - method: GET - - - id: load - type: io.kestra.plugin.mongodb.InsertOne - connection: - uri: "mongodb://host.docker.internal:27017/" - database: "local" - collection: "pokemon" - document: "{{outputs.pokemon.body}}" diff --git a/examples/mongo_db/write.yml b/examples/mongo_db/write.yml deleted file mode 100644 index 96a7baf..0000000 --- a/examples/mongo_db/write.yml +++ /dev/null @@ -1,14 +0,0 @@ -id: write -namespace: prod.staging -tasks: - - id: write - type: io.kestra.plugin.mongodb.InsertOne - connection: - uri: "mongodb://root:example@mongo:27017/" - database: "my_database" - collection: "my_collection" - document: - _id: - $oid: 60930c39a982931c20ef6cd6 - name: "John Doe" - city: "Berlin" \ No newline at end of file diff --git a/examples/mysql/extractLoadMySQL.yml b/examples/mysql/extractLoadMySQL.yml deleted file mode 100644 index abcef7f..0000000 --- a/examples/mysql/extractLoadMySQL.yml +++ /dev/null @@ -1,44 +0,0 @@ -id: extractLoadMySQL -namespace: blueprint -description: | - This flow will extract data from a remote CSV file and load it into a MySQL database. - The database credentials can be provided using environment variables. - - docker run -d -p 3306:3306 --name mymysql -v mymysqldb:/var/lib/mysql -e MYSQL_ROOT_PASSWORD=qwerasdfyxcv1234 -e MYSQL_DATABASE=stage -e MYSQL_USER=dataengineer -e MYSQL_PASSWORD=qwerasdfyxcv1234 mysql:latest - -variables: - db: jdbc:mysql://host.docker.internal:3306/stage - table: orders - user: dataengineer - -tasks: - - id: extract - type: io.kestra.plugin.fs.http.Download - uri: https://raw.githubusercontent.com/kestra-io/datasets/main/csv/orders.csv - - - id: query - type: io.kestra.plugin.jdbc.postgresql.Query - url: "{{vars.db}}" - username: "{{vars.user}}" - password: "{{envs.db_password}}" - sql: | - create table if not exists {{vars.table}} - ( - order_id integer, - customer_name varchar(50), - customer_email varchar(50), - product_id integer, - price real, - quantity integer, - total real - ); - - - id: loadToPostgres - type: io.kestra.plugin.jdbc.postgresql.CopyIn - url: "{{vars.db}}" - username: "{{vars.user}}" - password: "{{envs.db_password}}" - from: "{{ outputs.extract.uri }}" - format: CSV - header: true - table: "{{vars.table}}" diff --git a/examples/postgres/apiJsonToPostgres.yml b/examples/postgres/apiJsonToPostgres.yml deleted file mode 100644 index df72a34..0000000 --- a/examples/postgres/apiJsonToPostgres.yml +++ /dev/null @@ -1,76 +0,0 @@ -id: apiJsonToPostgres -namespace: dev -tasks: - - id: download - type: io.kestra.plugin.fs.http.Download - uri: https://gorest.co.in/public/v2/users - - - id: ionToJSON - type: "io.kestra.plugin.serdes.json.JsonReader" - from: "{{outputs.download.uri}}" - newLine: false - - - id: json - type: io.kestra.plugin.serdes.json.JsonWriter - from: "{{outputs.ionToJSON.uri}}" - - - id: addColumn - type: io.kestra.plugin.scripts.jython.FileTransform - from: "{{outputs.json.uri}}" - script: | - logger.info('row: {}', row) - row['inserted_from'] = 'kestra' - - - id: parallel - type: io.kestra.plugin.core.flow.Parallel - tasks: - - id: postgres - type: io.kestra.core.tasks.flows.Sequential - tasks: - - id: finalCSV - type: io.kestra.plugin.serdes.csv.CsvWriter - from: "{{outputs.addColumn.uri}}" - header: true - - - id: createTable - type: io.kestra.plugin.jdbc.postgresql.Query - url: jdbc:postgresql://host.docker.internal:5432/ - username: postgres - password: "{{ secret('DB_PASSWORD') }}" - sql: | - CREATE TABLE IF NOT EXISTS public.newusers - ( - id int, - name VARCHAR, - email VARCHAR, - gender VARCHAR, - status VARCHAR, - inserted_from VARCHAR - ); - - - id: loadData - type: io.kestra.plugin.jdbc.postgresql.CopyIn - url: jdbc:postgresql://host.docker.internal:5432/ - username: postgres - password: "{{ secret('DB_PASSWORD') }}" - format: CSV - from: "{{outputs.finalCSV.uri}}" - table: public.newusers - header: true - - - id: s3 - type: io.kestra.core.tasks.flows.Sequential - tasks: - - id: finalJSON - type: io.kestra.plugin.serdes.json.JsonWriter - from: "{{outputs.addColumn.uri}}" - - - id: jsonToS3 - type: io.kestra.plugin.aws.s3.Upload - disabled: true - from: "{{outputs.finalJSON.uri}}" - key: users.json - bucket: kestraio - region: eu-central-1 - accessKeyId: "{{ secret('AWS_ACCESS_KEY_ID') }}" - secretKeyId: "{{ secret('AWS_SECRET_ACCESS_KEY') }}" diff --git a/examples/postgres/apiJsonToPostgresPythonOnly.yml b/examples/postgres/apiJsonToPostgresPythonOnly.yml deleted file mode 100644 index 2fe2ccb..0000000 --- a/examples/postgres/apiJsonToPostgresPythonOnly.yml +++ /dev/null @@ -1,39 +0,0 @@ -id: apiJsonToPostgresPythonOnly -namespace: dev - -tasks: - - id: apiToPostgres - type: io.kestra.plugin.scripts.python.Script - beforeCommands: - - pip install requests pandas psycopg2 sqlalchemy > /dev/null - warningOnStdErr: false - script: | - import pandas as pd - import requests - from sqlalchemy import create_engine - - URL = "https://gorest.co.in/public/v2/users" - req = requests.get(url=URL) - res = req.json() - - df_users = pd.DataFrame(res) - df_users['inserted_from'] = 'kestra' - df_users.head() - password = "{{secret('DB_PASSWORD')}}" - host = "host.docker.internal" - - engine = create_engine( - f"postgresql://postgres:{password}@{host}:5432" - ) - - df_users.to_sql("my_users", engine, if_exists="append", index=False) - df_users.to_csv("{{outputDir}}/users.csv", index=False) - - - id: s3upload - type: io.kestra.plugin.aws.s3.Upload - from: "{{outputs.apiToPostgres.outputFiles['users.csv']}}" - key: users.csv - bucket: kestraio - region: eu-central-1 - accessKeyId: "{{ secret('AWS_ACCESS_KEY_ID')}}" - secretKeyId: "{{ secret('AWS_SECRET_ACCESS_KEY')}}" diff --git a/examples/postgres/apiJsonToPostgresPythonScript.yml b/examples/postgres/apiJsonToPostgresPythonScript.yml deleted file mode 100644 index c9abe7f..0000000 --- a/examples/postgres/apiJsonToPostgresPythonScript.yml +++ /dev/null @@ -1,53 +0,0 @@ -id: api_json_to_postgres_python_script -namespace: company.team - -tasks: - - id: download - type: io.kestra.plugin.fs.http.Download - uri: https://gorest.co.in/public/v2/users - - - id: add_column - type: io.kestra.plugin.scripts.jython.FileTransform - from: "{{ outputs.download.uri }}" - script: | - logger.info('row: {}', row) - for dict_obj in row: - dict_obj['inserted_from'] = 'kestra' - - - id: to_json - type: io.kestra.plugin.serdes.json.JsonWriter - from: "{{ outputs.add_column.uri }}" - - - id: save_users_s3 - type: io.kestra.plugin.aws.s3.Upload - from: "{{ outputs.to_json.uri }}" - key: users.json - bucket: kestraio - region: eu-central-1 - accessKeyId: "{{ secret('AWS_ACCESS_KEY_ID')}}" - secretKeyId: "{{ secret('AWS_SECRET_ACCESS_KEY')}}" - - - id: wdir - type: io.kestra.plugin.core.flow.WorkingDirectory - tasks: - - id: save-users-postgres - type: io.kestra.plugin.scripts.python.Script - beforeCommands: - - pip install pandas psycopg2 sqlalchemy > /dev/null - warningOnStdErr: false - inputFiles: - data.jsonl: "{{ outputs.to_json.uri }}" - script: | - import json - import pandas as pd - from sqlalchemy import create_engine - - with open("data.jsonl", "r") as f: - users = json.load(f) - - users = pd.DataFrame(users) - users.head() - - engine = create_engine(f"postgresql://postgres:"{{ secret('DB_PASSWORD') }}"@host.docker.internal:5432") - - users.to_sql("users", engine, if_exists="append", index=False) diff --git a/examples/postgres/batchLoad.yml b/examples/postgres/batchLoad.yml deleted file mode 100644 index 68fdf5b..0000000 --- a/examples/postgres/batchLoad.yml +++ /dev/null @@ -1,22 +0,0 @@ -id: batchLoad -namespace: prod.staging -# http://localhost:8080/ui/plugins/io.kestra.plugin.jdbc.postgresql.Batch -tasks: - - id: query - type: io.kestra.plugin.jdbc.postgresql.Query - url: jdbc:postgresql://dev:56982/ - username: postgres - password: pg_passwd - sql: | - SELECT * - FROM xref - LIMIT 1500; - store: true - - id: update - type: io.kestra.plugin.jdbc.postgresql.Batch - from: "{{ outputs.query.uri }}" - url: jdbc:postgresql://prod:56982/ - username: postgres - password: pg_passwd - sql: | - insert into xref values( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ) \ No newline at end of file diff --git a/examples/postgres/copyIn.yml b/examples/postgres/copyIn.yml deleted file mode 100644 index 5d3cfa2..0000000 --- a/examples/postgres/copyIn.yml +++ /dev/null @@ -1,45 +0,0 @@ -id: copyIn -namespace: blueprint - -tasks: - - id: download - type: io.kestra.plugin.fs.http.Download - uri: https://raw.githubusercontent.com/lukes/ISO-3166-Countries-with-Regional-Codes/master/all/all.csv - - - id: create_table - type: io.kestra.plugin.jdbc.postgresql.Query - url: jdbc:postgresql://host.docker.internal:5432/ - username: postgres - password: "{{ secret('DB_PASSWORD') }}" - sql: | - CREATE TABLE IF NOT EXISTS country_referential( - name VARCHAR, - "alpha-2" VARCHAR, - "alpha-3" VARCHAR, - "country-code" VARCHAR, - "iso_3166-2" VARCHAR, - region VARCHAR, - "sub-region" VARCHAR, - "intermediate-region" VARCHAR, - "region-code" VARCHAR, - "sub-region-code" VARCHAR, - "intermediate-region-code" VARCHAR - ); - - - id: copyin - type: io.kestra.plugin.jdbc.postgresql.CopyIn - url: jdbc:postgresql://host.docker.internal:5432/ - username: postgres - password: "{{ secret('DB_PASSWORD') }}" - format: CSV - from: '{{ outputs.download.uri }}' - table: country_referential - header: true - - - id: read - type: io.kestra.plugin.jdbc.postgresql.Query - url: jdbc:postgresql://host.docker.internal:5432/ - username: postgres - password: "{{ secret('DB_PASSWORD') }}" - sql: SELECT * FROM country_referential LIMIT 10 - fetch: true \ No newline at end of file diff --git a/examples/postgres/eachSequentialPostgres.yml b/examples/postgres/eachSequentialPostgres.yml deleted file mode 100644 index 7e57ee3..0000000 --- a/examples/postgres/eachSequentialPostgres.yml +++ /dev/null @@ -1,19 +0,0 @@ -id: eachSequentialPostgres -namespace: prod.staging -tasks: - - id: zombies - type: io.kestra.plugin.jdbc.postgresql.Query - url: jdbc:postgresql://kestra-prod-postgresql:5432/kestra - username: ** - password: ** - sql: "SELECT id, state_current, state_duration, start_date, end_date, to_json(value) FROM public.executions WHERE state_current = 'RUNNING' AND state_duration > 900 AND deleted = true ORDER BY key ASC LIMIT 100" - fetch: true - #store: true - - id: each - type: io.kestra.core.tasks.flows.EachSequential - tasks: - - id: return - type: io.kestra.core.tasks.debugs.Return - #format: "{{json taskrun.value['to_json']}}" - format: "{{ json(taskrun.value).id }}" - value: "{{outputs.zombies.rows}}" diff --git a/examples/postgres/extractLoadPostgres.yml b/examples/postgres/extractLoadPostgres.yml deleted file mode 100644 index 88f6a9a..0000000 --- a/examples/postgres/extractLoadPostgres.yml +++ /dev/null @@ -1,40 +0,0 @@ -id: extractLoadPostgres -namespace: blueprint -description: | - This flow will extract data from a remote CSV file and load it into a Postgres database. The database credentials can be provided using environment variables. - -variables: - db: jdbc:postgresql://host.docker.internal:5432/postgres - table: public.orders - -tasks: - - id: extract - type: io.kestra.plugin.fs.http.Download - uri: https://raw.githubusercontent.com/kestra-io/datasets/main/csv/orders.csv - - - id: query - type: io.kestra.plugin.jdbc.postgresql.Query - url: "{{vars.db}}" - username: postgres - password: "{{envs.db_password}}" - sql: | - create table if not exists {{vars.table}} - ( - order_id integer, - customer_name varchar(50), - customer_email varchar(50), - product_id integer, - price real, - quantity integer, - total real - ); - - - id: loadToPostgres - type: io.kestra.plugin.jdbc.postgresql.CopyIn - url: "{{vars.db}}" - username: postgres - password: "{{envs.db_password}}" - from: "{{ outputs.extract.uri }}" - format: CSV - header: true - table: "{{vars.table}}" diff --git a/examples/postgres/postgresQuery.yml b/examples/postgres/postgresQuery.yml deleted file mode 100644 index 43f0bf5..0000000 --- a/examples/postgres/postgresQuery.yml +++ /dev/null @@ -1,11 +0,0 @@ -id: postgresQuery -namespace: dev -tasks: -- id: query - type: io.kestra.plugin.jdbc.postgresql.Query - url: jdbc:postgresql://host.docker.internal:5432/ - username: postgres - password: "{{envs.db_password}}" - sql: select * from public.orders limit 10; - fetch: true - # store: true diff --git a/examples/postgres/postgresTrigger.yml b/examples/postgres/postgresTrigger.yml deleted file mode 100644 index fc81cb2..0000000 --- a/examples/postgres/postgresTrigger.yml +++ /dev/null @@ -1,21 +0,0 @@ -id: postgresTrigger -namespace: dev - -tasks: - - id: each - type: io.kestra.core.tasks.flows.EachParallel - value: "{{ trigger.rows }}" - tasks: - - id: return - type: io.kestra.core.tasks.debugs.Return - format: "{{json(taskrun.value)}}" - - id: query - type: io.kestra.plugin.jdbc.postgresql.Query -# sql: "{% for row in outputs.update.rows %} INSERT INTO pl_store_distribute (year_month,store_code, update_date) values ({{row.play_time}}, {{row.concert_id}}, TO_TIMESTAMP('{{row.timestamp_type}}', 'YYYY-MM-DDTHH:MI:SS.US') ); {% endfor %}" -# sql: "UPDATE orders SET status = 'PROCESSED' WHERE id = {{taskrun.value.id}}" -# http://localhost:8080/ui/plugins/io.kestra.plugin.jdbc.postgresql.Query -triggers: - - id: watch - type: io.kestra.plugin.jdbc.postgresql.Trigger - interval: "PT1M" - sql: SELECT * FROM orders WHERE status = 'NEW' diff --git a/examples/redis/cannotExpandTheList.yml b/examples/redis/cannotExpandTheList.yml deleted file mode 100644 index 35d8e95..0000000 --- a/examples/redis/cannotExpandTheList.yml +++ /dev/null @@ -1,32 +0,0 @@ -id: cannotExpandTheList -namespace: dev - -inputs: - - name: values - type: JSON - defaults: | - [ - {"dbt": ["build", "test", "snapshot"]}, - {"aws": ["s3", "sqs", "sns", "athena"]}, - {"gcp": ["big-query", "gcs", "cloudrun"]} - ] - -tasks: - - id: foreach - type: io.kestra.core.tasks.flows.EachParallel - value: "{{inputs.values}}" - tasks: - - id: set - type: "io.kestra.plugin.redis.Set" - url: redis://host.docker.internal:6379/0 - serdeType: STRING - key: "{{json(taskrun.value) | keys | first}}" - value: | - {{ taskrun.value | jq('.[]') | first }} - - - id: pushListPlugins - type: io.kestra.plugin.redis.ListPush - url: redis://host.docker.internal:6379/0 - serdeType: JSON - key: "{{json(taskrun.value) | keys | first}}" - from: "{{ taskrun.value | jq('.[]') | first }}" \ No newline at end of file diff --git a/examples/redis/jqTest.yml b/examples/redis/jqTest.yml deleted file mode 100644 index d6740ab..0000000 --- a/examples/redis/jqTest.yml +++ /dev/null @@ -1,24 +0,0 @@ -id: redisSet -namespace: dev - -inputs: - - name: values - type: JSON - defaults: | - [ - {"dbt": ["build", "test", "snapshot"]}, - {"aws": ["s3", "sqs", "sns", "athena"]}, - {"gcp": ["big-query", "gcs", "cloudrun"]} - ] - -tasks: - - id: foreach - type: io.kestra.core.tasks.flows.EachParallel - value: "{{inputs.values}}" - tasks: - - id: key - type: io.kestra.core.tasks.debugs.Return - format: "{{json(taskrun.value) | keys | first}}" - - id: value - type: io.kestra.core.tasks.debugs.Return - format: "{{ taskrun.value | jq('.[]') | first }}" \ No newline at end of file diff --git a/examples/redis/setGet.yml b/examples/redis/setGet.yml deleted file mode 100644 index 312cff6..0000000 --- a/examples/redis/setGet.yml +++ /dev/null @@ -1,43 +0,0 @@ -id: redisKeyValueStore -namespace: blueprint -description: | - This flow will set a key-value pair in Redis and then retrieve it. - The key-value pair will be set using inputs which can be provided at runtime. - -inputs: - - name: key - type: STRING - defaults: johndoe - - - name: value - type: JSON - defaults: | - { - "id": 123456, - "name": "John Doe", - "email": "johndoe@example.com", - "age": 30, - "address": { - "street": "123 Main Street", - "city": "Anytown", - "state": "California", - "country": "United States" - }, - "phone": "+1 555-123-4567", - "isPremium": true, - "interests": ["programming", "reading", "traveling"] - } - -tasks: - - id: set - type: "io.kestra.plugin.redis.Set" - url: redis://host.docker.internal:6379/0 - serdeType: JSON - key: "{{inputs.key}}" - value: "{{inputs.value}}" - - - id: get - type: io.kestra.plugin.redis.Get - url: redis://host.docker.internal:6379/0 - key: "{{inputs.key}}" - serdeType: JSON diff --git a/examples/redis/setParallel.yml b/examples/redis/setParallel.yml deleted file mode 100644 index da673b8..0000000 --- a/examples/redis/setParallel.yml +++ /dev/null @@ -1,26 +0,0 @@ -id: setParallel -namespace: blueprint - -inputs: - - name: values - type: JSON - description: Enter your favorite plugins and tasks - defaults: | - [ - {"dbt": ["build", "test", "snapshot"]}, - {"aws": ["s3", "sqs", "sns", "athena"]}, - {"gcp": ["big-query", "gcs", "cloudrun"]} - ] - -tasks: - - id: parallel - type: io.kestra.core.tasks.flows.EachParallel - value: "{{inputs.values}}" - tasks: - - id: set - type: "io.kestra.plugin.redis.Set" - url: redis://host.docker.internal:6379/0 - serdeType: STRING - key: "{{json(taskrun.value) | keys | first}}" - value: | - {{ taskrun.value | jq('.[]') | first }} \ No newline at end of file diff --git a/examples/redis/trigger.yml b/examples/redis/trigger.yml deleted file mode 100644 index 4d14ee9..0000000 --- a/examples/redis/trigger.yml +++ /dev/null @@ -1,14 +0,0 @@ -id: list-listen -namespace: dev - -tasks: - - id: echo trigger file - type: io.kestra.core.tasks.debugs.Echo - format: "{{ trigger.uri') }} containing {{ trigger.count }} lines" - -triggers: - - id: watch - type: io.kestra.plugin.redis.TriggerList - url: redis://host.docker.internal:6379/0 - key: mylist - maxRecords: 2 \ No newline at end of file diff --git a/examples/rust/Cargo.toml b/examples/rust/Cargo.toml deleted file mode 100644 index 8eefd3d..0000000 --- a/examples/rust/Cargo.toml +++ /dev/null @@ -1,9 +0,0 @@ -[package] -name = "etl" -version = "0.1.0" -edition = "2018" - -[dependencies] -csv = "1.1" -rand = "0.8" -serde = { version = "1.0", features = ["derive"] } diff --git a/examples/rust/Dockerfile b/examples/rust/Dockerfile deleted file mode 100644 index 07383cb..0000000 --- a/examples/rust/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -FROM rust:1.67 as builder -LABEL org.opencontainers.image.source=https://github.com/kestra-io/examples -LABEL org.opencontainers.image.description="Image with a toy example running ETL process in Rust" -WORKDIR /usr/src/etl -COPY . . -RUN cargo install --path . -CMD ["etl"] \ No newline at end of file diff --git a/examples/rust/README.md b/examples/rust/README.md deleted file mode 100644 index 9675046..0000000 --- a/examples/rust/README.md +++ /dev/null @@ -1,4 +0,0 @@ -docker build -t rustetl . -docker run -it --rm --name my-rust-etl rustetl -docker tag rustetl:latest ghcr.io/kestra-io/rust:latest -docker push ghcr.io/kestra-io/rust:latest \ No newline at end of file diff --git a/examples/rust/src/main.rs b/examples/rust/src/main.rs deleted file mode 100644 index 5b1b13b..0000000 --- a/examples/rust/src/main.rs +++ /dev/null @@ -1,57 +0,0 @@ -use std::error::Error; -use std::fs::File; -use std::process; - -use csv; -use rand::Rng; -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Deserialize, Serialize)] -struct Record { - id: u64, - value: u64, -} - -fn main() -> Result<(), Box> { - let n = 10; - let mut rng = rand::thread_rng(); - - // Generate random data - let mut records = Vec::new(); - for _ in 0..n { - records.push(Record { - id: rng.gen_range(1..n+1), - value: rng.gen_range(1..100), - }); - } - - // Write to a CSV file - let file = File::create("input.csv")?; - let mut wtr = csv::Writer::from_writer(file); - for record in &records { - wtr.serialize(record)?; - } - wtr.flush()?; - - // Read the created CSV file - let file = File::open("input.csv")?; - let mut rdr = csv::Reader::from_reader(file); - - // Transform the data (here we simply add 1 to each value) - let mut transformed_records = Vec::new(); - for result in rdr.deserialize() { - let mut record: Record = result?; - record.value += 1; - transformed_records.push(record); - } - - // Write transformed data to another CSV file - let file = File::create("output.csv")?; - let mut wtr = csv::Writer::from_writer(file); - for record in &transformed_records { - wtr.serialize(record)?; - } - wtr.flush()?; - - Ok(()) -} diff --git a/examples/scripts/api_users_to_json.py b/examples/scripts/api_users_to_json.py deleted file mode 100644 index 1bc1de3..0000000 --- a/examples/scripts/api_users_to_json.py +++ /dev/null @@ -1,9 +0,0 @@ -import json -import requests - -URL = "https://gorest.co.in/public/v2/users" -req = requests.get(url=URL) -res = req.json() - -with open("users.json", "w") as f: - json.dump(res, f) diff --git a/examples/scripts/api_users_to_postgres.py b/examples/scripts/api_users_to_postgres.py deleted file mode 100644 index 660fb52..0000000 --- a/examples/scripts/api_users_to_postgres.py +++ /dev/null @@ -1,21 +0,0 @@ -import pandas as pd -import requests -from sqlalchemy import create_engine - -URL = "https://gorest.co.in/public/v2/users" -req = requests.get(url=URL) -res = req.json() - -df_users = pd.DataFrame(res) -df_users['inserted_from'] = 'kestra' -df_users.head() - -password = "{{secret('DB_PASSWORD')}}" -host = "host.docker.internal" - -engine = create_engine( - f"postgresql://postgres:{password}@{host}:5432" -) - -df_users.to_sql("my_users", engine, if_exists="append", index=False) -df_users.to_csv("{{outputDir}}/users.csv", index=False) diff --git a/examples/scripts/clean_messy_dataset.py b/examples/scripts/clean_messy_dataset.py deleted file mode 100644 index 531ff0e..0000000 --- a/examples/scripts/clean_messy_dataset.py +++ /dev/null @@ -1,12 +0,0 @@ -import pandas as pd - -df = pd.read_csv("data.csv") - -# Replace non-numeric age values with NaN -df["Age"] = pd.to_numeric(df["Age"], errors="coerce") - -# mean imputation: fill NaN values with the mean age -mean_age = int(df["Age"].mean()) -print(f"Filling NULL values with mean: {mean_age}") -df["Age"] = df["Age"].fillna(mean_age) -df.to_csv("clean_dataset.csv", index=False) diff --git a/examples/scripts/create_messy_dataset.py b/examples/scripts/create_messy_dataset.py deleted file mode 100644 index ec8ed4e..0000000 --- a/examples/scripts/create_messy_dataset.py +++ /dev/null @@ -1,24 +0,0 @@ -from faker import Faker -import pandas as pd -import random - -fake = Faker() - - -def get_age(): - # add some non-numeric "messy" data - if random.randint(0, 4) == 0: - return random.choice(["None", "N/A", "???"]) - return random.randint(18, 65) - - -data = { - "First Name": [fake.first_name() for _ in range(1000)], - "Last Name": [fake.last_name() for _ in range(1000)], - "Age": [get_age() for _ in range(1000)], - "Email": [fake.email() for _ in range(1000)], -} - -df = pd.DataFrame(data) - -df.to_csv("data.csv", index=False) diff --git a/examples/scripts/etl_script.py b/examples/scripts/etl_script.py deleted file mode 100644 index 5ab81ba..0000000 --- a/examples/scripts/etl_script.py +++ /dev/null @@ -1,61 +0,0 @@ -import io -import os -import tempfile -import zipfile -import pandas as pd -import requests - -# import awswrangler as wr - - -def extract() -> pd.DataFrame: - url = "https://wri-dataportal-prod.s3.amazonaws.com/manual/global_power_plant_database_v_1_3.zip" - cols = ["country", "primary_fuel", "capacity_mw"] - response = requests.get(url) - if response.status_code == 200: - with zipfile.ZipFile(io.BytesIO(response.content)) as zfile: - with tempfile.TemporaryDirectory() as tempdir: - zfile.extractall(tempdir) - path = os.path.join(tempdir, "global_power_plant_database.csv") - df = pd.read_csv(path, usecols=cols) - return df.drop_duplicates() - - -def transform_mean_capacity_per_country(df: pd.DataFrame) -> pd.DataFrame: - return df.groupby(["country", "primary_fuel"]).mean() - - -def transform_total_capacity_worldwide(df: pd.DataFrame) -> pd.DataFrame: - return df[["primary_fuel", "capacity_mw"]].groupby(["primary_fuel"]).sum() - - -def load_to_data_lake(df: pd.DataFrame, dataset_name: str) -> None: - """ - For reproducibility on your end, we'll just print the command to load the data to the data lake. - """ - command = f""" - wr.s3.to_parquet( - df, - index=True, - dataset=True, - mode="overwrite", - database="default", - table=f"{dataset_name}", - path=f"s3://data-lake-silver/{dataset_name}/", - ) - """ - print(command) - - -def run_etl(): - raw_data = extract() - mean_capacity_per_country = transform_mean_capacity_per_country(raw_data) - total_capacity_worldwide = transform_total_capacity_worldwide(raw_data) - load_to_data_lake(mean_capacity_per_country, "mean_capacity_per_country") - load_to_data_lake(total_capacity_worldwide, "total_capacity_worldwide") - print(mean_capacity_per_country) - print(total_capacity_worldwide) - - -if __name__ == "__main__": - run_etl() diff --git a/examples/scripts/generate_orders.py b/examples/scripts/generate_orders.py deleted file mode 100644 index 1a50241..0000000 --- a/examples/scripts/generate_orders.py +++ /dev/null @@ -1,35 +0,0 @@ -import csv -import random -from faker import Faker -from kestra import Kestra - -fake = Faker() - -# Define the list of columns for the CSV file -columns = ['order_id', 'customer_name', 'customer_email', 'product_id', 'price', 'quantity', 'total'] - -# Generate 100 random orders -orders = [] -for i in range(100): - order_id = i + 1 - customer_name = fake.name() - customer_email = fake.email() - product_id = random.randint(1, 20) - price = round(random.uniform(10.0, 200.0), 2) - quantity = random.randint(1, 10) - total = round(price * quantity, 2) - orders.append([order_id, customer_name, customer_email, product_id, price, quantity, total]) - -# Write the orders to a CSV file -with open('orders.csv', 'w', newline='') as file: - writer = csv.writer(file) - writer.writerow(columns) - writer.writerows(orders) - -# Calculate and print the sum and average of the "total" column -total_sum = sum(order[6] for order in orders) -average_order = round(total_sum / len(orders), 2) -print(f'Total sum: {total_sum}') -print(f'Average Order value: {average_order}') - -Kestra.outputs({'total_sum': total_sum, 'average_order': average_order}) diff --git a/examples/scripts/get_users.py b/examples/scripts/get_users.py deleted file mode 100644 index 1bc1de3..0000000 --- a/examples/scripts/get_users.py +++ /dev/null @@ -1,9 +0,0 @@ -import json -import requests - -URL = "https://gorest.co.in/public/v2/users" -req = requests.get(url=URL) -res = req.json() - -with open("users.json", "w") as f: - json.dump(res, f) diff --git a/examples/scripts/gpu.py b/examples/scripts/gpu.py deleted file mode 100644 index f2b8ff2..0000000 --- a/examples/scripts/gpu.py +++ /dev/null @@ -1,20 +0,0 @@ -from modal import Stub, Image - -stub = Stub("gpu-demo") - - -@stub.function( - gpu="any", - image=( - Image.debian_slim().run_commands( - "pip install torch --extra-index-url https://download.pytorch.org/whl/cu117" - ) - ), -) -def print_gpu_info(): - import torch - - device_nr = torch.cuda.current_device() - gpu_count = torch.cuda.device_count() - device_name = torch.cuda.get_device_name(0) - print(f"Device: {device_nr}, GPU count: {gpu_count}, Device name: {device_name}") diff --git a/examples/scripts/hn_search.py b/examples/scripts/hn_search.py deleted file mode 100644 index 47017df..0000000 --- a/examples/scripts/hn_search.py +++ /dev/null @@ -1,26 +0,0 @@ -""" -modal run hackernews_alerts.py::stub.search_hackernews -""" -from datetime import datetime, timedelta -import requests - - -def search_hackernews(query: str = "kestra", window_size_days: int = 7): - url = "http://hn.algolia.com/api/v1/search" - - threshold = datetime.utcnow() - timedelta(days=window_size_days) - - params = { - "query": query, - "numericFilters": f"created_at_i>{threshold.timestamp()}", - } - - response = requests.get(url, params).json() - urls = [item["url"] for item in response["hits"] if item["url"]] - - print(f"Query returned {len(urls)} items.") - print(urls) - - -if __name__ == "__main__": - search_hackernews() diff --git a/examples/scripts/modal_getting_started.py b/examples/scripts/modal_getting_started.py deleted file mode 100644 index 39ce1de..0000000 --- a/examples/scripts/modal_getting_started.py +++ /dev/null @@ -1,20 +0,0 @@ -""" -modal token set --token-id {{secret('MODAL_TOKEN_ID')}} --token-secret {{secret('MODAL_TOKEN_SECRET')}} -modal token set --token-id {{envs.modal_token_id}} --token-secret {{envs.modal_token_secret}} -""" -import modal -from platform import node, platform - -stub = modal.Stub("example") - - -@stub.function() -def square(x): - print("This code is running on a remote worker!") - print(f"Network: {node()}. Instance: {platform()}.") - return x**2 - - -@stub.local_entrypoint() -def main(): - print("the square is", square.call(42)) diff --git a/examples/scripts/save_users_pg.py b/examples/scripts/save_users_pg.py deleted file mode 100644 index 394febe..0000000 --- a/examples/scripts/save_users_pg.py +++ /dev/null @@ -1,19 +0,0 @@ -import json -import os -import pandas as pd -from sqlalchemy import create_engine - -username = os.environ["DB_USERNAME"] -password = os.environ["DB_PASSWORD"] -host = os.environ["DB_HOST"] -port = os.environ["DB_PORT"] - -with open("users.json", "r") as f: - users = json.load(f) - -df_users = pd.DataFrame(users) -df_users["inserted_from"] = "kestra" - -engine = create_engine(f"postgresql://{username}:{password}@{host}:{port}") - -df_users.to_sql("users", engine, if_exists="append", index=False) diff --git a/examples/scripts/vm_info.py b/examples/scripts/vm_info.py deleted file mode 100644 index b42b906..0000000 --- a/examples/scripts/vm_info.py +++ /dev/null @@ -1,16 +0,0 @@ -import modal -from platform import node, platform - -stub = modal.Stub("hello") - - -@stub.function() -def square(x): - print("Hello from Modal!") - print(f"Network: {node()}. Instance: {platform()}.") - return x**2 - - -@stub.local_entrypoint() -def main(): - print("the square is", square.call(7)) \ No newline at end of file diff --git a/examples/singer/gh_to_bq.yml b/examples/singer/gh_to_bq.yml deleted file mode 100644 index 8d72bd1..0000000 --- a/examples/singer/gh_to_bq.yml +++ /dev/null @@ -1,45 +0,0 @@ -id: singer -namespace: company.team - -tasks: - - id: list - type: io.kestra.plugin.fs.http.Request - headers: - Accept: application/vnd.github+json - Authorization: Bearer {{ namespace.github.token }} - User-Agent: Kestra - uri: https://api.github.com/orgs/kestra-io/repos?per_page=100 - - - id: github - type: io.kestra.plugin.singer.taps.GitHub - accessToken: "{{ namespace.github.token }}" - repositories: "{{ outputs.list.body | jq('.[].full_name') }}" - startDate: "2019-07-01" - streamsConfigurations: - - replicationMethod: INCREMENTAL - selected: true - - selected: false - stream: projects - - selected: false - stream: project_cards - - selected: false - stream: project_columns - - selected: false - stream: team_memberships - - selected: false - pipPackages: - - tap-github==1.10.4 - - - id: bigquery_target - type: io.kestra.plugin.singer.targets.AdswerveBigQuery - addMetadataColumns: true - datasetId: github - from: "{{ outputs.github.raw }}" - location: EU - projectId: "{{namespace.gcp.projectId}}" - serviceAccount: "{{namespace.gcp.serviceAccount}}" - -triggers: - - id: schedule - type: io.kestra.plugin.core.trigger.Schedule - cron: 0 6 * * * diff --git a/examples/singer/githubToBigquery_working.yml b/examples/singer/githubToBigquery_working.yml deleted file mode 100644 index 5d42002..0000000 --- a/examples/singer/githubToBigquery_working.yml +++ /dev/null @@ -1,39 +0,0 @@ -id: github -namespace: company.team - -tasks: - - id: github - type: io.kestra.plugin.singer.taps.GitHub - accessToken: "{{ secret('GITHUB_ACCESS_TOKEN') }}" - taskRunner: - type: io.kestra.plugin.scripts.runner.docker.Docker - containerImage: python:3.8 - pipPackages: - - git+https://github.com/tchiotludo/tap-github.git@master - repositories: ["anna-geller/anna-geller"] - startDate: "2023-05-01" - streamsConfigurations: - - replicationMethod: INCREMENTAL - selected: true - - selected: false - stream: projects - - selected: false - stream: project_cards - - selected: false - stream: project_columns - - selected: false - stream: team_memberships - - - id: bigquery_target - type: io.kestra.plugin.singer.targets.AdswerveBigQuery - taskRunner: - type: io.kestra.plugin.scripts.runner.docker.Docker - containerImage: python:3.8 - pipPackages: - - git+https://github.com/adswerve/target-bigquery.git@feature/get-dataset-without-create - from: "{{ outputs.github.raw }}" - addMetadataColumns: true - location: EU - projectId: geller - datasetId: github - serviceAccount: "{{ secret('GCP_SERVICE_ACCOUNT_JSON') }}" diff --git a/examples/singer/postgres.yml b/examples/singer/postgres.yml deleted file mode 100644 index e7cf88f..0000000 --- a/examples/singer/postgres.yml +++ /dev/null @@ -1,34 +0,0 @@ -id: postgres_singer -namespace: company.team - -tasks: - - id: extract_tap - type: io.kestra.plugin.singer.taps.PipelinewisePostgres - taskRunner: - type: io.kestra.plugin.scripts.runner.docker.Docker - containerImage: python:3.8 - host: host.docker.internal - port: 5432 - dbName: postgres - username: postgres - password: "{{ secret('DB_PASSWORD') }}" - streamsConfigurations: - - replicationMethod: FULL_TABLE - selected: true - - - id: load_target - type: io.kestra.plugin.singer.targets.PipelinewisePostgres - taskRunner: - type: io.kestra.plugin.scripts.runner.docker.Docker - containerImage: python:3.8 - host: host.docker.internal - port: 5432 - dbName: demo - username: postgres - password: "{{ secret('DB_PASSWORD') }}" - from: "{{ outputs.extract_tap.raw }}" - addMetadataColumns: true - # primaryKeyRequired: false - defaultTargetSchema: raw - defaultTargetSchemaSelectPermission: stage # might be a list grp_stats - primaryKeyRequired: false diff --git a/examples/singer/postgresToBQ_working.yml b/examples/singer/postgresToBQ_working.yml deleted file mode 100644 index f4659d7..0000000 --- a/examples/singer/postgresToBQ_working.yml +++ /dev/null @@ -1,35 +0,0 @@ -id: postgres_to_bigquery_singer -namespace: company.team -description: | - This flow will extract raw data from a Postgres database and load it into BigQuery using the Singer protocol. - The credentials to both Postgres and BigQuery can be provided as environment variables. - -tasks: - - id: extract - type: io.kestra.plugin.singer.taps.PipelinewisePostgres - taskRunner: - type: io.kestra.plugin.scripts.runner.docker.Docker - containerImage: python:3.8 - host: host.docker.internal - port: 5432 - dbName: postgres - username: postgres - password: "{{ secret('DB_PASSWORD') }}" - streamsConfigurations: - - replicationMethod: FULL_TABLE # FULL_TABLE, INCREMENTAL, LOG_BASED -# replicationKeys: last_updated # Incremental load always needs replication key -# selected: true -# stream: string -# propertiesPattern: list of strings - - - id: bigquery_target - type: io.kestra.plugin.singer.targets.AdswerveBigQuery - addMetadataColumns: true - datasetId: singer - taskRunner: - type: io.kestra.plugin.scripts.runner.docker.Docker - containerImage: python:3.8 - from: "{{ outputs.extract.raw }}" - location: EU - projectId: geller - serviceAccount: "{{ secret('GCP_SERVICE_ACCOUNT_JSON') }}" diff --git a/examples/snowflake/bestQueryFlow.yml b/examples/snowflake/bestQueryFlow.yml deleted file mode 100644 index 708ab81..0000000 --- a/examples/snowflake/bestQueryFlow.yml +++ /dev/null @@ -1,67 +0,0 @@ -id: snowflake_query -namespace: company.team - -tasks: - - id: create_database - type: io.kestra.plugin.jdbc.snowflake.Query - sql: CREATE OR REPLACE DATABASE kestra; - - - id: create_table - type: io.kestra.plugin.jdbc.snowflake.Query - sql: | - CREATE OR REPLACE TABLE KESTRA.PUBLIC.EMPLOYEES ( - first_name STRING , - last_name STRING , - email STRING , - streetaddress STRING , - city STRING , - start_date DATE - ); - - - id: extract - type: io.kestra.plugin.fs.http.Download - uri: https://raw.githubusercontent.com/kestra-io/examples/main/datasets/employees00.csv - - - id: loadToInternalStage - type: io.kestra.plugin.jdbc.snowflake.Upload - from: "{{ outputs.extract.uri }}" - fileName: employees00.csv - prefix: raw - stageName: "@kestra.public.%employees" - compress: true - - - id: load_from_stage_to_table - type: io.kestra.plugin.jdbc.snowflake.Query - sql: | - COPY INTO KESTRA.PUBLIC.EMPLOYEES - FROM @kestra.public.%employees - FILE_FORMAT = (type = csv field_optionally_enclosed_by='"' skip_header = 1) - PATTERN = '.*employees0[0-9].csv.gz' - ON_ERROR = 'skip_file'; - - - id: analyze - type: io.kestra.plugin.jdbc.snowflake.Query - description: Growth of new hires per month - sql: | - SELECT year(START_DATE) as year, monthname(START_DATE) as month, count(*) as nr_employees - FROM kestra.public.EMPLOYEES - GROUP BY year(START_DATE), monthname(START_DATE) - ORDER BY nr_employees desc; - fetchType: STORE - - - id: csv_report - type: io.kestra.plugin.serdes.csv.CsvWriter - from: "{{ outputs.analyze.uri }}" - -pluginDefaults: - - type: io.kestra.plugin.jdbc.snowflake.Query - values: - url: jdbc:snowflake://.snowflakecomputing.com?warehouse=DEMO - username: "{{ secret('SNOWFLAKE_USERNAME') }}" - password: "{{ secret('SNOWFLAKE_PASSWORD') }}" - - - type: io.kestra.plugin.jdbc.snowflake.Upload - values: - url: jdbc:snowflake://.snowflakecomputing.com?warehouse=DEMO - username: "{{ secret('SNOWFLAKE_USERNAME') }}" - password: "{{ secret('SNOWFLAKE_PASSWORD') }}" \ No newline at end of file diff --git a/examples/snowflake/fake_employees.py b/examples/snowflake/fake_employees.py deleted file mode 100644 index c9923de..0000000 --- a/examples/snowflake/fake_employees.py +++ /dev/null @@ -1,33 +0,0 @@ -from faker import Faker -import datetime -import csv - -fake = Faker() - -employees = [] - -for _ in range(500): - first_name = fake.first_name() - last_name = fake.last_name() - email = f'{first_name.lower()}.{last_name.lower()}@kestra.io' - street_address = fake.street_address() - city = fake.city() - start_date = fake.date_between(datetime.date(2022, 1, 30), datetime.date(2023, 8, 1)) - - employee = { - "FIRST_NAME": first_name, - "LAST_NAME": last_name, - "EMAIL": email, - "STREETADDRESS": street_address, - "CITY": city, - "START_DATE": start_date - } - - employees.append(employee) - -keys = employees[0].keys() - -with open('employees00.csv', 'w', newline='') as output_file: - dict_writer = csv.DictWriter(output_file, keys) - dict_writer.writeheader() - dict_writer.writerows(employees) diff --git a/examples/snowflake/final.yml b/examples/snowflake/final.yml deleted file mode 100644 index f739e30..0000000 --- a/examples/snowflake/final.yml +++ /dev/null @@ -1,64 +0,0 @@ -id: snowflake_tutorial -namespace: company.team -description: | - This flow is an end-to-end tutorial for Snowflake. - It creates a database and a table. - It extracts data from an external source, and loads that data as a CSV file into Snowflake's internal stage. - The CSV file uploaded to stage is then loaded into the table. - Finally, a Query task validates that everything works as expected by running a query on the table and fetches the results to Kestra's internal storage. - -tasks: - - id: create_database - type: io.kestra.plugin.jdbc.snowflake.Query - sql: CREATE OR REPLACE DATABASE kestra; - - - id: create_table - type: io.kestra.plugin.jdbc.snowflake.Query - sql: | - CREATE OR REPLACE TABLE KESTRA.PUBLIC.EMPLOYEES ( - first_name STRING , - last_name STRING , - email STRING , - streetaddress STRING , - city STRING , - start_date DATE - ); - - - id: extract - type: io.kestra.plugin.fs.http.Download - uri: https://huggingface.co/datasets/kestra/datasets/raw/main/csv/orders.csv - - - id: load_to_internal_stage - type: io.kestra.plugin.jdbc.snowflake.Upload - from: "{{ outputs.extract.uri }}" - fileName: employees00.csv - prefix: raw - stageName: "@kestra.public.%employees" - compress: true - - - id: load_from_stage_to_table - type: io.kestra.plugin.jdbc.snowflake.Query - sql: | - COPY INTO KESTRA.PUBLIC.EMPLOYEES - FROM @kestra.public.%employees - FILE_FORMAT = (type = csv field_optionally_enclosed_by='"' skip_header = 1) - PATTERN = '.*employees0[0-9].csv.gz' - ON_ERROR = 'skip_file'; - - - id: download_result - type: io.kestra.plugin.jdbc.snowflake.Query - sql: SELECT * FROM KESTRA.PUBLIC.EMPLOYEES; - fetchType: STORE - -pluginDefaults: - - type: io.kestra.plugin.jdbc.snowflake.Query - values: - url: jdbc:snowflake://.snowflakecomputing.com?warehouse=DEMO - username: "{{ secret('SNOWFLAKE_USERNAME') }}" - password: "{{ secret('SNOWFLAKE_PASSWORD') }}" - - - type: io.kestra.plugin.jdbc.snowflake.Upload - values: - url: jdbc:snowflake://.snowflakecomputing.com?warehouse=DEMO - username: "{{ secret('SNOWFLAKE_USERNAME') }}" - password: "{{ secret('SNOWFLAKE_PASSWORD') }}" diff --git a/examples/snowflake/parallelLoad.yml b/examples/snowflake/parallelLoad.yml deleted file mode 100644 index 0f0267e..0000000 --- a/examples/snowflake/parallelLoad.yml +++ /dev/null @@ -1,74 +0,0 @@ -id: parallel_load -namespace: company.team - -tasks: - - id: create_database - type: io.kestra.plugin.jdbc.snowflake.Query - sql: CREATE OR REPLACE DATABASE kestra; - - - id: create_table - type: io.kestra.plugin.jdbc.snowflake.Query - sql: | - CREATE OR REPLACE TABLE KESTRA.PUBLIC.EMPLOYEES ( - first_name STRING , - last_name STRING , - email STRING , - streetaddress STRING , - city STRING , - start_date DATE - ); - - - id: extract_parallel - type: io.kestra.plugin.core.flow.ForEach - concurrencyLimit: 0 - values: - - employees01.csv - - employees02.csv - - employees03.csv - - employees04.csv - - employees05.csv - tasks: - - id: extract - type: io.kestra.plugin.fs.http.Download - uri: https://raw.githubusercontent.com/kestra-io/examples/main/datasets/{{ taskrun.value }} - - - id: load_parallel - type: io.kestra.plugin.core.flow.ForEach - concurrencyLimit: 0 - values: - - employees01.csv - - employees02.csv - - employees03.csv - - employees04.csv - - employees05.csv - tasks: - - id: load - type: io.kestra.plugin.jdbc.snowflake.Upload - from: "{{ outputs.extract.uri }}" - fileName: "{{ outputs.extract.uri }}" - prefix: raw - stageName: "@kestra.public.%employees" - compress: true - disabled: false - - - id: load_from_stage - type: io.kestra.plugin.jdbc.snowflake.Query - sql: | - COPY INTO KESTRA.PUBLIC.EMPLOYEES - FROM @kestra.public.%employees - FILE_FORMAT = (type = csv field_optionally_enclosed_by='"') - PATTERN = 'raw/employees0[1-5].csv.gz' - ON_ERROR = 'skip_file'; - disabled: false - -pluginDefaults: - - type: io.kestra.plugin.jdbc.snowflake.Query - values: - url: jdbc:snowflake://.snowflakecomputing.com?warehouse=DEMO - username: "{{ secret('SNOWFLAKE_USERNAME') }}" - password: "{{ secret('SNOWFLAKE_PASSWORD') }}" - - type: io.kestra.plugin.jdbc.snowflake.Upload - values: - url: jdbc:snowflake://.snowflakecomputing.com?warehouse=DEMO - username: "{{ secret('SNOWFLAKE_USERNAME') }}" - password: "{{ secret('SNOWFLAKE_PASSWORD') }}" \ No newline at end of file diff --git a/examples/snowflake/query.yml b/examples/snowflake/query.yml deleted file mode 100644 index 8c303ec..0000000 --- a/examples/snowflake/query.yml +++ /dev/null @@ -1,39 +0,0 @@ -id: snowflake_query -namespace: company.team - -inputs: - - name: myfile - type: FILE - -tasks: - - id: query - type: io.kestra.plugin.jdbc.snowflake.Query - url: jdbc:snowflake://.snowflakecomputing.com?warehouse=DEMO - username: "{{ secret('SNOWFLAKE_USERNAME') }}" - password: "{{ secret('SNOWFLAKE_PASSWORD') }}" - # fetchType: FETCH_ONE - autoCommit: false - sql: | - SELECT * FROM SNOWFLAKE_SAMPLE_DATA.TPCH_SF1.CUSTOMER LIMIT 5 - - - id: upload - type: io.kestra.plugin.jdbc.snowflake.Upload - url: jdbc:snowflake://.snowflakecomputing.com?warehouse=DEMO - database: DEMO - username: "{{ secret('SNOWFLAKE_USERNAME') }}" - password: "{{ secret('SNOWFLAKE_PASSWORD') }}" - from: "{{inputs.myfile}}" - compress: true - fileName: orders.csv - prefix: raw - stageName: raw - - - id: snow - type: io.kestra.plugin.jdbc.snowflake.Download - compress: true - fileName: report - stageName: mytable - url: jdbc:snowflake://.snowflakecomputing.com?warehouse=DEMO - database: DEMO - username: "{{ secret('SNOWFLAKE_USERNAME') }}" - password: "{{ secret('SNOWFLAKE_PASSWORD') }}" diff --git a/examples/snowflake/queryTrigger.yml b/examples/snowflake/queryTrigger.yml deleted file mode 100644 index b67c95c..0000000 --- a/examples/snowflake/queryTrigger.yml +++ /dev/null @@ -1,41 +0,0 @@ -id: snowflake_query_trigger -namespace: company.team - -tasks: - - id: each - type: io.kestra.plugin.core.flow.ForEach - values: "{{ trigger.rows }}" - tasks: - - id: welcome_message - type: io.kestra.plugin.scripts.shell.Commands - runner: PROCESS - commands: - - echo "{{ json(taskrun.value) }}" - - echo "Welcome to Kestra {{ json(taskrun.value).FIRST_NAME }} {{ json(taskrun.value).LAST_NAME }}" - - - id: delete - type: io.kestra.plugin.jdbc.snowflake.Query - description: Delete rows to avoid double trigger - sql: DELETE FROM KESTRA.PUBLIC.EMPLOYEES WHERE START_DATE >= '2023-08-01' - # TRUNCATE TABLE KESTRA.PUBLIC.EMPLOYEES; - -pluginDefaults: - - type: io.kestra.plugin.jdbc.snowflake.Trigger - values: - url: jdbc:snowflake://.snowflakecomputing.com?warehouse=DEMO - username: "{{ secret('SNOWFLAKE_USERNAME') }}" - password: "{{ secret('SNOWFLAKE_PASSWORD') }}" - - - type: io.kestra.plugin.jdbc.snowflake.Query - values: - url: jdbc:snowflake://.snowflakecomputing.com?warehouse=DEMO - username: "{{ secret('SNOWFLAKE_USERNAME') }}" - password: "{{ secret('SNOWFLAKE_PASSWORD') }}" - -triggers: - - id: wait - type: io.kestra.plugin.jdbc.snowflake.Trigger - sql: "SELECT * FROM KESTRA.PUBLIC.EMPLOYEES WHERE START_DATE >= '2023-08-01';" - interval: "PT5S" - fetchType: FETCH - disabled: false diff --git a/examples/snowflake/stage.yml b/examples/snowflake/stage.yml deleted file mode 100644 index 66d91fc..0000000 --- a/examples/snowflake/stage.yml +++ /dev/null @@ -1,21 +0,0 @@ -id: stage -namespace: dev - -tasks: - - id: query - type: io.kestra.plugin.jdbc.snowflake.Query - url: jdbc:snowflake://.snowflakecomputing.com?warehouse=DEMO - username: "{{ secret('SNOWFLAKE_USERNAME') }}" - password: "{{ secret('SNOWFLAKE_PASSWORD') }}" - # fetchOne: true - # autoCommit: false - database: demo - schema: public - sql: | - CREATE OR REPLACE STAGE SALES_STAGE; - - sql: | - "{% for row in outputs.update.rows %} INSERT INTO employees - (first_name, last_name, email, streetaddress, city, start_date) - values ({{ row.first_name }}, {{ row.last_name }}, {{ row.email }}, {{ row.streetaddress }}, - {{ row.city }}, {{ row.start_date }}); {% endfor %}" diff --git a/examples/spark/airbyteSpark.yml b/examples/spark/airbyteSpark.yml deleted file mode 100644 index 53c77e6..0000000 --- a/examples/spark/airbyteSpark.yml +++ /dev/null @@ -1,22 +0,0 @@ -id: airbyte_spark -namespace: company.team - -tasks: - - id: data_ingestion - type: io.kestra.plugin.airbyte.connections.Sync - connectionId: e3b1ce92-547c-436f-b1e8-23b6936c12ab - url: http://host.docker.internal:8000/ - username: "{{ secret('AIRBYTE_USERNAME') }}" - password: "{{ secret('AIRBYTE_PASSWORD') }}" - - - id: spark - type: io.kestra.plugin.spark.PythonSubmit - master: spark://localhost:7077 - mainScript: | - paste your Spark script here or point to file from Git - -triggers: - - id: daily - type: io.kestra.plugin.core.trigger.Schedule - cron: "0 9 * * *" - disabled: true \ No newline at end of file diff --git a/examples/triggers/backfill.yml b/examples/triggers/backfill.yml deleted file mode 100644 index a65d61e..0000000 --- a/examples/triggers/backfill.yml +++ /dev/null @@ -1,35 +0,0 @@ -id: yellow_taxi_data_backfill -namespace: company.team - -inputs: - - name: month_year - type: STRING - defaults: 2023-01 - -tasks: - - id: download_from_inputs - type: io.kestra.plugin.core.http.Download - uri: "https://d37ci6vzurychx.cloudfront.net/trip-data/yellow_tripdata_{{ inputs.month_year }}.parquet" - method: GET - disabled: true - - - id: download_based_on_date - type: io.kestra.plugin.core.http.Download - uri: "https://d37ci6vzurychx.cloudfront.net/trip-data/yellow_tripdata_{{ schedule.date ?? execution.startDate | date('yyyy-MM') }}.parquet" - - - id: shove_it_to_s3 - type: io.kestra.plugin.aws.s3.Upload - from: "{{ outputs.download_based_on_date.uri }}" - key: "yellow_taxi/raw/{{ schedule.date ?? execution.startDate | date('yyyy-MM') }}/yellow_taxi_{{ schedule.date ?? execution.startDate | date('yyyy-MM') }}.parquet" - bucket: annageller - region: eu-central-1 - accessKeyId: "{{secret('AWS_ACCESS_KEY_ID')}}" - secretKeyId: "{{secret('AWS_SECRET_ACCESS_KEY')}}" - -triggers: - - id: schedule - type: io.kestra.core.trigger.Schedule - cron: "0 8 1 * *" - disabled: true - backfill: - start: 2023-01-01T00:00:00Z \ No newline at end of file diff --git a/examples/triggers/variables_in_triggers.yml b/examples/triggers/variables_in_triggers.yml deleted file mode 100644 index 04a6786..0000000 --- a/examples/triggers/variables_in_triggers.yml +++ /dev/null @@ -1,22 +0,0 @@ -id: variables_in_trigger -namespace: company.team - -inputs: - - name: myinput - type: STRING - defaults: hello - -variables: - myvar: myvalue - -tasks: - - id: hello - type: io.kestra.plugin.core.log.Log - message: hey "{{ inputs.myinput }}" - -triggers: - - id: minutely - type: io.kestra.plugin.core.trigger.Schedule - cron: "*/1 * * * *" - inputs: - myinput: "{{ vars.myvar }}" diff --git a/examples/triggers/webhookTrigger.yml b/examples/triggers/webhookTrigger.yml deleted file mode 100644 index 25b4ba4..0000000 --- a/examples/triggers/webhookTrigger.yml +++ /dev/null @@ -1,15 +0,0 @@ -id: webhook_trigger -namespace: company.team -description: | - To trigger that flow using `curl`: - curl -X GET "http://localhost:8080/api/v1/executions/webhookTrigger/prod/webhook/ThisIsaSuperSecretKey42" -H "accept: text/json" - -tasks: - - id: hello - type: io.kestra.plugin.core.log.Log - message: Hello from a flow triggered by a webhook - -triggers: - - id: webhook - type: io.kestra.core.models.triggers.types.Webhook - key: "{{ secret('WEBHOOK_KEY') }}" diff --git a/examples/trino/README.md b/examples/trino/README.md deleted file mode 100644 index 14c9495..0000000 --- a/examples/trino/README.md +++ /dev/null @@ -1,10 +0,0 @@ -Don't use semicolon at the end, otherwise query will fail. - - -Query failed (#20230628_105423_00044_t492g): This connector does not support inserts - -Query failed (#20230628_105317_00039_t492g): This connector does not support modifying table rows - -Password is trino - -Must use port 8090, otherwise it will reuse Kestra port \ No newline at end of file diff --git a/examples/trino/query.yml b/examples/trino/query.yml deleted file mode 100644 index f53b633..0000000 --- a/examples/trino/query.yml +++ /dev/null @@ -1,32 +0,0 @@ -id: trino_query -namespace: company.team - -tasks: - - id: count_initial - type: io.kestra.plugin.jdbc.trino.Query - sql: select count(*) as nr_rows from tpch.tiny.customer - fetchType: FETCH - - - id: extract - type: io.kestra.plugin.jdbc.trino.Query - sql: select * from tpch.sf1.customer LIMIT 5 - fetchType: FETCH - - - id: load - type: io.kestra.plugin.jdbc.trino.Query - sql: | - "{% for row in outputs.extract.rows %} insert into tpch.tiny.customer - (custkey, name, address, nationkey, phone, acctbal, mktsegment, comment) values - ({{ row.custkey }}, '{{ row.name }}', '{{ row.address }}', {{ row.nationkey }}, - '{{ row.phone }}', {{ row.acctbal }}, '{{ row.mktsegment }}', '{{ row.comment }}'); {% endfor %}" - - - id: count_after_load - type: io.kestra.plugin.jdbc.trino.Query - sql: select count(*) as nr_rows from tpch.tiny.customer - fetchType: FETCH - -pluginDefaults: - - type: io.kestra.plugin.jdbc.trino.Query - values: - url: jdbc:trino://host.docker.internal:8090/tpch - username: trino diff --git a/examples/trino/simpleQuery.yml b/examples/trino/simpleQuery.yml deleted file mode 100644 index 256fbe9..0000000 --- a/examples/trino/simpleQuery.yml +++ /dev/null @@ -1,20 +0,0 @@ -id: simpleQuery -namespace: dev - -tasks: - - id: analyzeOrders - type: io.kestra.plugin.jdbc.trino.Query - url: jdbc:trino://host.docker.internal:8090/tpch - username: trino - sql: | - select orderpriority, sum(totalprice) as total - from tpch.tiny.orders - group by orderpriority - order by orderpriority - store: true - - - id: csvReport - type: io.kestra.plugin.serdes.csv.CsvWriter - from: "{{outputs.analyzeOrders.uri}}" - -