diff --git a/.mvn/wrapper/maven-wrapper.properties b/.mvn/wrapper/maven-wrapper.properties new file mode 100644 index 0000000..f3283b0 --- /dev/null +++ b/.mvn/wrapper/maven-wrapper.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.3/apache-maven-3.6.3-bin.zip +wrapperUrl=https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.2.0/maven-wrapper-3.2.0.jar diff --git a/CHANGELOG.md b/CHANGELOG.md index 6edb00a..b5c29ad 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,12 +4,60 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [4.11.0] - 2023-12-03 +### Added +- Add support for connections with multiple contact points using different ports (see feature request + [#41](https://github.com/ing-bank/cassandra-jdbc-wrapper/issues/41)). +- Handle additional types and conversions in the methods `CassandraPreparedStatement.setObject()`: + - JDBC types `BLOB`, `CLOB`, `NCLOB` and Java types `java.sql.Blob`, `java.sql.Clob`, and `java.sql.NClob` handled as + arrays of bytes (CQL type `blob`) + - JDBC types `LONGVARCHAR`, `NCHAR`, `NVARCHAR`, `LONGNVARCHAR` and `DATALINK` and Java type `java.net.URL` handled + as string (CQL types `text`, `varchar` and `ascii`) + - JDBC type `TIME_WITH_TIMEZONE` and Java types `java.time.OffsetTime` and `java.time.LocalTime` handled as + `LocalTime` (CQL type `time`) + - JDBC type `TIMESTAMP_WITH_TIMEZONE` and Java types `java.util.OffsetDateTime`, `java.time.LocalDateTime`, + `java.util.Date` and `java.util.Calendar` handled as `Instant` (CQL type `timestamp`) + - Java type `java.time.LocalDate` (CQL type `date`) + - JDBC type `BIT` handled as boolean (CQL type `boolean`) + - JDBC type `NUMERIC` handled as `BigDecimal` (CQL type `decimal`) + - JDBC type `REAL` handled as float number (CQL type `float`) +- Handle `java.util.Calendar` in the methods `CassandraResultSet.getObject(int | String, Class)`. +- Implement the following methods in `CassandraResultSet`: `getAsciiStream(int | String)`, + `getCharacterStream(int | String)`, `getClob(int | String)`, `getNClob(int | String)`. +### Changed +- Deprecate the parameter `version` (CQL version) in JDBC URL because this one is purely informational and has no + effect. This will be removed in the next release. +- The index type returned by `CassandraDatabaseMetaData.getIndexInfo(String, String, String, boolean, boolean)` is + now always `tableIndexOther`. +- Improve the accuracy of the JDBC metadata of the collection types (`list`, `map`, `set` and `vector`). +- Update the following methods of `CassandraDatabaseMetaData`: `getNumericFunctions()`, `getSQLKeywords()`, + `getSystemFunctions()`, `getTimeDateFunctions()` and `getTypeInfo()` to add the new math, date/time and + [data masking](https://cwiki.apache.org/confluence/display/CASSANDRA/CEP-20%3A+Dynamic+Data+Masking) + functions introduced in Cassandra 5.0 and take into account the version of the database the driver in connected to. +- Update Apache Commons IO to version 2.15.0. +- Update Apache Commons Lang to version 3.14.0. +- Update Jackson dependencies to version 2.16.0. +- Use Apache Cassandra® 5.0 image to run tests. +- Replace references to "DataStax Java driver" by "Java Driver for Apache Cassandra®" following the transfer of the + codebase to Apache Software Foundation (see: + [IP clearance status](https://incubator.apache.org/ip-clearance/cassandra-java-driver.html) and + [CEP-8](https://cwiki.apache.org/confluence/x/5Y1rDQ)) +### Fixed +- Fix `NullPointerException` issue [#38](https://github.com/ing-bank/cassandra-jdbc-wrapper/issues/38) when a null + type name pattern is specified in a call to `CassandraDatabaseMetaData.getUDTs(String, String, String, int[])`. +- Fix issue [#39](https://github.com/ing-bank/cassandra-jdbc-wrapper/issues/39): return `false` when the method + `isSearchable(int)` is called on the metadata of a result set without table or schema name (typically on + `CassandraMetadataResultSet`s). +- Fix incorrect consistency level used to execute simple prepared statements. +- Fix issue preventing to retrieve the metadata of an empty `CassandraMetadataResultSet`. +- Add null safety on some methods of `CassandraResultSet` and `CassandraMetadataResultSet`. + ## [4.10.2] - 2023-11-01 ### Fixed - Fix issue [#33](https://github.com/ing-bank/cassandra-jdbc-wrapper/issues/33) to handle `VARBINARY` and `LONGVARBINARY` types with either `ByteArrayInputStream` or `byte[]` in the methods `CassandraPreparedStatement.setObject()`. -- Fix issue [#35](https://github.com/ing-bank/cassandra-jdbc-wrapper/issues/35) to fix configuration of the local +- Fix issue [#35](https://github.com/ing-bank/cassandra-jdbc-wrapper/issues/35): configuration of the local datacenter using the one from the configuration file when such a file is used. ## [4.10.1] - 2023-10-07 @@ -33,7 +81,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Implement the following methods of `CassandraDatabaseMetaData`: `getBestRowIdentifier(String, String, String, int, boolean)` and `getAttributes(String, String, String, String)`. ### Changed -- Update DataStax Java Driver for Apache Cassandra(R) to version 4.17.0. +- Update DataStax Java Driver for Apache Cassandra® to version 4.17.0. - Update Apache Commons IO to version 2.13.0. - Update Apache Commons Lang to version 3.13.0. - Update Jackson dependencies to version 2.15.2. @@ -71,7 +119,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) `getUDTs(String, String, String, int[])`. ### Changed - Harmonize the implementations of `Wrapper` interface. -- Rewrite the tests using Testcontainers with Apache Cassandra(R) 4.1.0 image. +- Rewrite the tests using Testcontainers with Apache Cassandra® 4.1.0 image. - Modify the implementation of `setQueryTimeout(int)` and `getQueryTimeout()` in `CassandraStatement` to update the request timeout on a specific statement. ### Removed @@ -90,7 +138,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) `CassandraPreparedStatement` of `PreparedStatement` interface. It fixes the issue [#19](https://github.com/adejanovski/cassandra-jdbc-wrapper/issues/19) of the [original project]. ### Changed -- Update DataStax Java Driver for Apache Cassandra(R) to version 4.15.0. +- Update DataStax Java Driver for Apache Cassandra® to version 4.15.0. - Fully implement methods from `Wrapper` interface for Cassandra connections, results sets and statements (see pull request [#14](https://github.com/ing-bank/cassandra-jdbc-wrapper/pull/14)). ### Fixed @@ -108,7 +156,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Add an additional `CassandraConnection` constructor using a pre-existing session (see pull request [#8](https://github.com/ing-bank/cassandra-jdbc-wrapper/pull/8)). ### Changed -- Update DataStax Java Driver for Apache Cassandra(R) to version 4.14.1. +- Update DataStax Java Driver for Apache Cassandra® to version 4.14.1. ## [4.6.0] - 2022-03-20 ### Added @@ -118,7 +166,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Add query parameter `requesttimeout` to specify a non-default timeout for queries. See the feature request [#5](https://github.com/ing-bank/cassandra-jdbc-wrapper/discussions/5). ### Changed -- Update DataStax Java Driver for Apache Cassandra(R) to version 4.14.0. +- Update DataStax Java Driver for Apache Cassandra® to version 4.14.0. - Update Apache Commons Lang to version 3.12.0. ### Removed - Remove `cassandra-all` and `libthrift` dependencies to limit exposure to vulnerable libraries (see pull request @@ -138,7 +186,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Add codecs for conversions between `Integer` and CQL types `varint`, `smallint` and `tinyint`. It also fixes the issue [#33](https://github.com/adejanovski/cassandra-jdbc-wrapper/issues/33) of the [original project]. ### Changed -- Update DataStax Java Driver for Apache Cassandra(R) to version 4.10.0. +- Update DataStax Java Driver for Apache Cassandra® to version 4.10.0. - Update `cassandra-all` to version 3.11.9. - Improve documentation and code quality (refactoring, removing dead code, adding tests, ...). - Improve the implementation of the metadata precision/size for the columns. @@ -159,7 +207,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ## 4.4.0 - 2020-12-23 For this version, the changelog lists the main changes comparatively to the latest version of the [original project]. ### Changed -- Update DataStax Java Driver for Apache Cassandra(R) to version 4.9.0. +- Update DataStax Java Driver for Apache Cassandra® to version 4.9.0. - Update `cassandra-all` to version 3.11.8. - Force using `libthrift` 0.13.0 instead of the vulnerable version included into `cassandra-all`. - Manage separately the type `LocalDate` in `CassandraResultSet`. @@ -171,6 +219,7 @@ For this version, the changelog lists the main changes comparatively to the late - Fix logs in `CassandraConnection` constructor. [original project]: https://github.com/adejanovski/cassandra-jdbc-wrapper/ +[4.11.0]: https://github.com/ing-bank/cassandra-jdbc-wrapper/compare/v4.10.2...v4.11.0 [4.10.2]: https://github.com/ing-bank/cassandra-jdbc-wrapper/compare/v4.10.1...v4.10.2 [4.10.1]: https://github.com/ing-bank/cassandra-jdbc-wrapper/compare/v4.10.0...v4.10.1 [4.10.0]: https://github.com/ing-bank/cassandra-jdbc-wrapper/compare/v4.9.1...v4.10.0 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6e099b4..32ed4b7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -52,6 +52,12 @@ To run the tests, execute the following command: ``` mvn test ``` +Regarding the tests, you need **Docker** installed on your machine. +Indeed, most of the tests in this project are based on +[Testcontainers for Cassandra](https://java.testcontainers.org/modules/databases/cassandra/), because testing a JDBC API +implementation requires ensuring that the driver is able to connect to a database and execute queries correctly. +For example, very common JDBC drivers like those for [PostgreSQL](https://github.com/pgjdbc/pgjdbc) or +[MS SQL Server](https://github.com/Microsoft/mssql-jdbc/) are also tested against a real database. ### Submit a pull request @@ -63,12 +69,20 @@ Once your changes and tests are ready for review, submit them: to verify it or simply run `mvn clean install` and check the logs). 3. Rebase your changes: update your local repository with the most recent code from the original repository, and rebase -your branch on top of the latest `release/next` branch. It is better that your initial changes are squashed into a -single commit. If more changes are required to validate the pull request, we invite you to add them as separate commits. + your branch on top of the latest `release/next` branch. It is better that your initial changes are squashed into a + single commit. If more changes are required to validate the pull request, we invite you to add them as separate + commits. 4. Finally, push your local changes to your forked repository and submit a pull request into the branch `release/next` -with a title which sums up the changes that you have made (try to not exceed 50 characters), and provide more details in -the body. If necessary, also mention the number of the issue solved by your changes, e.g. "Closes #123". + with a title which sums up the changes that you have made (try to not exceed 50 characters), and provide more details + in the body. If necessary, also mention the number of the issue solved by your changes, e.g. "Closes #123". + +### About dependencies + +If your changes require to add a new dependency or update an existing one, be sure to check these points first of all: +* the dependency is the latest stable version of the library compatible with JDK 8 +* the dependency does not introduce vulnerabilities +* the version of the dependency is specified in a property `.version` in `pom.xml`. ### License headers diff --git a/NOTICE.txt b/NOTICE.txt new file mode 100644 index 0000000..c310184 --- /dev/null +++ b/NOTICE.txt @@ -0,0 +1,40 @@ +JDBC wrapper of the Java Driver for Apache Cassandra® +Copyright 2020- ING Bank + +This product includes software developed by The Apache Software Foundation (http://www.apache.org/). + +Apache Cassandra Java Driver +Copyright 2012- The Apache Software Foundation +This product includes software developed as part of the Apache Cassandra Java Driver project +( https://github.com/apache/cassandra-java-driver ). + +Apache Commons Collections +Copyright 2001-2023 The Apache Software Foundation +This product includes software developed as part of the Apache Commons Collections project +( https://github.com/apache/commons-collections ). + +Apache Commons IO +Copyright 2002-2023 The Apache Software Foundation +This product includes software developed as part of the Apache Commons IO project +( https://github.com/apache/commons-io ). + +Apache Commons Lang +Copyright 2001-2023 The Apache Software Foundation +This product includes software developed as part of the Apache Commons Lang project +( https://github.com/apache/commons-lang ). + +FasterXML Jackson +Copyright 2012 FasterXML.com +This product includes software developed as part of the Jackson project +( https://github.com/FasterXML/jackson ). + +Caffeine +Copyright 2018 Ben Manes +This product includes software developed as part of the Caffeine project +( https://github.com/ben-manes/caffeine ). + +Semver4j +Copyright 2022-present Semver4j contributors +This product includes software developed as part of the Semver4j project +( https://github.com/semver4j/semver4j ), which is available under a MIT license. +For details, see https://github.com/semver4j/semver4j/blob/main/LICENSE diff --git a/README.md b/README.md index 5128dd4..bfdd68b 100644 --- a/README.md +++ b/README.md @@ -1,11 +1,11 @@ -# Cassandra JDBC wrapper for the Datastax Java Driver +# JDBC wrapper of the Java Driver for Apache Cassandra® [![Apache 2.0 License](https://img.shields.io/badge/license-Apache%202.0-blue.svg)](https://www.apache.org/licenses/LICENSE-2.0.txt) ![Build Status](https://img.shields.io/github/actions/workflow/status/ing-bank/cassandra-jdbc-wrapper/ci-workflow.yml) [![Maven Central](https://img.shields.io/maven-central/v/com.ing.data/cassandra-jdbc-wrapper)](https://search.maven.org/search?q=g:com.ing.data%20AND%20cassandra-jdbc-wrapper) [![Javadoc](https://javadoc.io/badge2/com.ing.data/cassandra-jdbc-wrapper/javadoc.svg)](https://javadoc.io/doc/com.ing.data/cassandra-jdbc-wrapper) -This is a JDBC wrapper of the DataStax Java Driver for Apache Cassandra (C*), which offers a simple JDBC compliant +This is a JDBC wrapper of the Java Driver for Apache Cassandra®, which offers a simple JDBC compliant API to work with CQL3. This JDBC wrapper is based on a fork of the project @@ -27,14 +27,14 @@ The JDBC wrapper offers access to most of the core module features: ### Prerequisites -The wrapper uses DataStax Java driver for Apache Cassandra(R) 4.4.0 or greater. This driver is designed for Apache -Cassandra(R) 2.1+ and DataStax Enterprise (5.0+). So, it will throw "unsupported feature" exceptions if used against an +The wrapper uses Java Driver for Apache Cassandra® 4.4.0 or greater. This driver is designed for Apache +Cassandra® 2.1+ and DataStax Enterprise (5.0+). So, it will throw "unsupported feature" exceptions if used against an older version of Cassandra cluster. For more information, please check the [compatibility matrix](https://docs.datastax.com/en/driver-matrix/doc/driver_matrix/javaDrivers.html) and read the [driver documentation](https://docs.datastax.com/en/developer/java-driver/latest/). -If you are having issues connecting to the cluster (seeing `NoHostAvailableConnection` exceptions) please check the -[connection requirements](https://github.com/datastax/java-driver/wiki/Connection-requirements). +If you meet issues connecting to the cluster (seeing `NoHostAvailableConnection` exceptions) please check if your +configuration is correct, and you specified a valid local datacenter if you use the default load-balancing policy. This project requires Java 8 JDK (minimum). @@ -47,7 +47,12 @@ git clone https://github.com/ing-bank/cassandra-jdbc-wrapper.git To compile and run tests, execute the following Maven command: ```bash -mvn clean package +./mvnw clean package +``` + +To build a bundled version of the JDBC wrapper, run the following command: +```bash +./mvnw clean package -Pbundle ``` #### Some considerations about running tests @@ -55,7 +60,7 @@ mvn clean package If for some reason the tests using DataStax Enterprise server (`*DseContainerTest`) fail in your local environment, you might disable them using the Maven profile `disableDseTests`: ```bash -mvn clean package -PdisableDseTests +./mvnw clean package -PdisableDseTests ``` The test suite also includes integration tests with AstraDB (`DbaasAstraIntegrationTest`). These tests require an @@ -81,6 +86,30 @@ You can install it in your application using the following Gradle dependency: implementation 'com.ing.data:cassandra-jdbc-wrapper:${cassandra-jdbc-wrapper.version}' ``` +### Other integrations + +To use this JDBC wrapper for Apache Cassandra® in database administration tools such as DBeaver Community Edition or +JetBrains DataGrip, you can have a look to the following links: +* [connecting DBeaver to Cassandra cluster](https://stackoverflow.com/a/77100652/13292108) +* [connecting DataGrip to Cassandra cluster](https://awesome-astra.github.io/docs/pages/data/explore/datagrip/); note + this example uses Astra JDBC driver (based on this project), so refer to the "Usage" section below to adapt driver + class and JDBC URL values. + +This JDBC wrapper for Apache Cassandra® is also used to run +[Liquibase for Cassandra databases](https://github.com/liquibase/liquibase-cassandra) (from Liquibase 4.25.0). To execute Liquibase scripts on +your Cassandra database, specify the following properties in your Liquibase properties file: +``` +driver: com.ing.data.cassandra.jdbc.CassandraDriver +url: jdbc:cassandra://:/?compliancemode=Liquibase +``` +See the "Usage" section below for further details about the allowed parameters in the JDBC URL. +For further details about Liquibase usage, please check the +[official documentation](https://contribute.liquibase.com/extensions-integrations/directory/database-tutorials/cassandra/). + +> _Note:_ The version 4.25.0 of Liquibase extension for Cassandra is currently affected by an issue preventing it +> working correctly. See [this issue](https://github.com/liquibase/liquibase-cassandra/issues/242) for further +> information. + ## Usage Connect to a Cassandra cluster using the following arguments: @@ -89,15 +118,27 @@ Connect to a Cassandra cluster using the following arguments: please read the section "[Connecting to DBaaS](#connecting-to-dbaas)"; to use a configuration file, please read the section "[Using a configuration file](#using-a-configuration-file)") -You can give the driver any number of hosts you want separated by "--". +You can give the driver any number of hosts you want separated by "--". You can optionally specify a port for each host. +If only one port is specified after all the listed hosts, it applies to all hosts. If no port is specified at all, the +default Cassandra port (9042) is used. They will be used as contact points for the driver to discover the entire cluster. Give enough hosts taking into account that some nodes may be unavailable upon establishing the JDBC connection. +Here are some examples of connection strings with single or multiple contact points: + +| Valid JDBC URL | Contact points used for connection | +|--------------------------------------------------------------|------------------------------------| +| jdbc:cassandra://localhost/keyspace | localhost:9042 | +| jdbc:cassandra://localhost:9043/keyspace | localhost:9043 | +| jdbc:cassandra://host1--host2/keyspace | host1:9042, host2:9042 | +| jdbc:cassandra://host1--host2:9043/keyspace | host1:9043, host2:9043 | +| jdbc:cassandra://host1:9042--host2--host3:9043/keyspace | host1:9042, host2:9043, host3:9043 | + You also have to specify the name of the local data center to use when the default load balancing policy is defined (see paragraph below about load balancing policies) and no configuration file is specified. Statements and prepared statements can be executed as with any JDBC driver, but note that queries must be expressed in -CQL3. +CQL3 (Cassandra Query Language). Java example: ```java @@ -154,7 +195,7 @@ jdbc:cassandra://host1--host2--host3:9042/keyspace?requesttimeout=5000 ### Specifying load balancing policies -In versions 4+ of DataStax Java driver for Apache Cassandra(R), the load balancing is defined with +In versions 4+ of Java Driver for Apache Cassandra®, the load balancing is defined with `DefaultLoadBalancingPolicy` by default (see [Load balancing](https://docs.datastax.com/en/developer/java-driver/latest/manual/core/load_balancing/) documentation). @@ -174,7 +215,7 @@ The custom policy must implement `LoadBalancingPolicy` interface. ### Specifying retry policies -In versions 4+ of DataStax Java driver for Apache Cassandra(R), the retry policy is defined with `DefaultRetryPolicy` by +In versions 4+ of Java Driver for Apache Cassandra®, the retry policy is defined with `DefaultRetryPolicy` by default (see [Retries](https://docs.datastax.com/en/developer/java-driver/latest/manual/core/retries/) documentation). However, if you want to use a custom policy, add a `retry` argument to the JDBC URL and give the full package of the @@ -187,7 +228,7 @@ The custom policy must implement `RetryPolicy` interface. ### Specifying reconnection policies -In versions 4+ of DataStax Java driver for Apache Cassandra(R), the reconnection policy is defined with +In versions 4+ of Java driver for Apache Cassandra®, the reconnection policy is defined with `ExponentialReconnectionPolicy` by default (see [Reconnection](https://docs.datastax.com/en/developer/java-driver/latest/manual/core/reconnection/) documentation). @@ -278,7 +319,8 @@ For further information about custom implementations of `SslEngineFactory`, see An alternative JDBC driver based on this one exists to ease the connection to the cloud [Cassandra-based DBaaS AstraDB](https://www.datastax.com/astra) cluster: -[Astra JDBC driver](https://github.com/DataStax-Examples/astra-jdbc-connector/tree/main). Do not hesitate to use it if you are in this specific situation. +[Astra JDBC driver](https://github.com/DataStax-Examples/astra-jdbc-connector/tree/main). +Do not hesitate to use it if you are in this specific situation. It's still possible to connect to AstraDB using this JDBC wrapper, so one would need to specify: * `secureconnectbundle`: the fully qualified path of the cloud secure connect bundle file @@ -329,11 +371,11 @@ public class HelloCassandra { public void selectValuesFromCassandra(final Connection connection) { final Statement statement = connection.createStatement(); final ResultSet result = statement.executeQuery( - "SELECT bValue, iValue FROM test_table WHERE keyname = 'key0';" + "SELECT b_value, i_value FROM test_table WHERE keyname = 'key0';" ); while (result.next()) { - System.out.println("bValue = " + result.getBoolean("bValue")); - System.out.println("iValue = " + result.getInt("iValue")); + System.out.println("b_value = " + result.getBoolean("b_value")); + System.out.println("i_value = " + result.getInt("i_value")); } } } @@ -393,6 +435,8 @@ public class HelloCassandra { preparedStatement.setObject(2, "test"); // ascii final ByteArrayInputStream baInputStream = new ByteArrayInputStream("test".getBytes(StandardCharsets.UTF_8)); preparedStatement.setObject(3, baInputStream); // blob + // Alternatively, you can also use byte arrays for blobs: + preparedStatement.setObject(3, "test".getBytes(StandardCharsets.UTF_8)); preparedStatement.setObject(4, true); // boolean preparedStatement.setObject(5, new BigDecimal(5.1)); // decimal preparedStatement.setObject(6, (double) 5.1); // double @@ -437,8 +481,8 @@ public class HelloCassandra { #### Insert/update -There are two ways to insert/update data using asynchronous queries. The first is to use JDBC batches (we're not talking -about Cassandra atomic batches here). +There are two ways to insert/update data using asynchronous queries. The first one is to use JDBC batches (we're not +talking about Cassandra atomic batches here). With simple statements: ```java @@ -446,8 +490,8 @@ public class HelloCassandra { public void insertUsingJdbcBatches(final Connection connection) { final Statement statement = connection.createStatement(); - for(int i = 0; i < 10; i++){ - statement.addBatch("INSERT INTO testCollection (keyValue, lValue) VALUES (" + i + ", [1, 3, 12345])"); + for (int i = 0; i < 10; i++) { + statement.addBatch("INSERT INTO test_table (key_value, l_value) VALUES (" + i + ", [1, 3, 12345])"); } final int[] counts = statement.executeBatch(); @@ -461,7 +505,7 @@ With prepared statements: public class HelloCassandra { public void insertUsingJdbcBatches(final Connection connection) { final PreparedStatement statement = connection.prepareStatement( - "INSERT INTO testCollection (keyValue, lValue) VALUES (?, ?)" + "INSERT INTO test_table (key_value, l_value) VALUES (?, ?)" ); for (int i = 0; i < 10; i++) { @@ -484,7 +528,7 @@ public class HelloCassandra { final StringBuilder queryBuilder = new StringBuilder(); for (int i = 0; i < 10; i++) { - queryBuilder.append("INSERT INTO testCollection (keyValue, lValue) VALUES(") + queryBuilder.append("INSERT INTO test_table (key_value, l_value) VALUES (") .append(i) .append(", [1, 3, 12345]);"); } @@ -504,7 +548,7 @@ public class HelloCassandra { public void multipleSelectQueries(final Connection connection) { final StringBuilder queries = new StringBuilder(); for (int i = 0; i < 10; i++) { - queries.append("SELECT * FROM testCollection where keyValue = ").append(i).append(";"); + queries.append("SELECT * FROM test_table WHERE key_value = ").append(i).append(";"); } // Send all the select queries at once. @@ -514,7 +558,7 @@ public class HelloCassandra { // Get all the results from all the select queries in a single result set. final ArrayList ids = new ArrayList<>(); while (result.next()){ - ids.add(result.getInt("keyValue")); + ids.add(result.getInt("key_value")); } } } @@ -522,14 +566,14 @@ public class HelloCassandra { Make sure you send select queries that return the exact same columns, or you might get pretty unpredictable results. -### Working with Tuples and UDTs +### Working with Tuples and UDTs (User-Defined Types) To create a new `Tuple` object in Java (see [Tuple](https://docs.datastax.com/en/developer/java-driver/latest/manual/core/tuples/) documentation), use the `com.datastax.oss.driver.api.core.type.DataTypes.tupleOf(...).newValue()` method. Note that the UDT ([User-Defined Types](https://docs.datastax.com/en/developer/java-driver/latest/manual/core/udts/)) -fields cannot be instantiated outside the Datastax Java driver core. If you want to use prepared statements, you -must proceed as in the following example: +fields cannot be instantiated outside the Java Driver for Apache Cassandra® core. If you want to use prepared +statements, you must proceed as in the following example: ```java public class HelloCassandra { public void insertTuples(final Connection connection) { @@ -555,6 +599,7 @@ public class HelloCassandra { preparedStatement.setString(5, "midVal"); preparedStatement.setFloat(6, (float)2.0); preparedStatement.setObject(7, (Object)tuple); + // Execute the prepared statement. preparedStatement.execute(); preparedStatement.close(); @@ -586,6 +631,8 @@ public class HelloCassandra { + "{'map_key1' : {key : 'key1', value : 'value1'}," + "'map_key2' : {key : 'key2', value : 'value2'}}, " + "{'tuple1' : (1, 2), 'tuple2' : (2, 3)});"; + + // Execute the statement. insertStatement.execute(insertCql); insertStatement.close(); } @@ -629,11 +676,11 @@ public class JsonSubEntity { } ``` -The class `JsonSubEntity` above corresponds to the UDT `subType` in our Cassandra keyspace and the class `JsonEntity` +The class `JsonSubEntity` above corresponds to the UDT `subtype` in our Cassandra keyspace and the class `JsonEntity` matches the columns of the table `t_using_json`: ``` -CREATE TYPE IF NOT EXISTS subType (text_val text, bool_val boolean); -CREATE TABLE t_using_json (col_int int PRIMARY KEY, col_text text, col_udt frozen); +CREATE TYPE IF NOT EXISTS subtype (text_val text, bool_val boolean); +CREATE TABLE t_using_json (col_int int PRIMARY KEY, col_text text, col_udt frozen); ``` #### JSON support in result sets @@ -672,15 +719,14 @@ object into JSON to pass to Cassandra as shown below: public class HelloCassandra { public void insertJson(final Connection connection) { // Using INSERT INTO ... JSON syntax - final CassandraPreparedStatement insertStatement1 = connection.prepareStatement( - "INSERT INTO t_using_json JSON ?;"); + final CassandraPreparedStatement insertStatement1 = connection.prepareStatement("INSERT INTO t_using_json JSON ?;"); insertStatement1.setJson(1, new JsonEntity(1, "a text value", new JsonSubEntity("1.1", false))); insertStatement1.execute(); insertStatement1.close(); // Using fromJson() function - final CassandraPreparedStatement insertStatement2 = connection.prepareStatement( - "INSERT INTO t_using_json (col_int, col_text, col_udt) VALUES (?, ?, fromJson(?));"); + final CassandraPreparedStatement insertStatement2 = + connection.prepareStatement("INSERT INTO t_using_json (col_int, col_text, col_udt) VALUES (?, ?, fromJson(?));"); insertStatement2.setInt(1, 2); insertStatement2.setString(2, "another text value"); insertStatement2.setInt(3, new JsonSubEntity("2.1", true)); diff --git a/checkstyle-suppressions.xml b/checkstyle-suppressions.xml index 77a3da2..cc6caf2 100644 --- a/checkstyle-suppressions.xml +++ b/checkstyle-suppressions.xml @@ -4,10 +4,5 @@ "https://checkstyle.org/dtds/suppressions_1_2.dtd"> - - - - + diff --git a/mvnw b/mvnw new file mode 100755 index 0000000..8d937f4 --- /dev/null +++ b/mvnw @@ -0,0 +1,308 @@ +#!/bin/sh +# ---------------------------------------------------------------------------- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# ---------------------------------------------------------------------------- + +# ---------------------------------------------------------------------------- +# Apache Maven Wrapper startup batch script, version 3.2.0 +# +# Required ENV vars: +# ------------------ +# JAVA_HOME - location of a JDK home dir +# +# Optional ENV vars +# ----------------- +# MAVEN_OPTS - parameters passed to the Java VM when running Maven +# e.g. to debug Maven itself, use +# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +# MAVEN_SKIP_RC - flag to disable loading of mavenrc files +# ---------------------------------------------------------------------------- + +if [ -z "$MAVEN_SKIP_RC" ] ; then + + if [ -f /usr/local/etc/mavenrc ] ; then + . /usr/local/etc/mavenrc + fi + + if [ -f /etc/mavenrc ] ; then + . /etc/mavenrc + fi + + if [ -f "$HOME/.mavenrc" ] ; then + . "$HOME/.mavenrc" + fi + +fi + +# OS specific support. $var _must_ be set to either true or false. +cygwin=false; +darwin=false; +mingw=false +case "$(uname)" in + CYGWIN*) cygwin=true ;; + MINGW*) mingw=true;; + Darwin*) darwin=true + # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home + # See https://developer.apple.com/library/mac/qa/qa1170/_index.html + if [ -z "$JAVA_HOME" ]; then + if [ -x "/usr/libexec/java_home" ]; then + JAVA_HOME="$(/usr/libexec/java_home)"; export JAVA_HOME + else + JAVA_HOME="/Library/Java/Home"; export JAVA_HOME + fi + fi + ;; +esac + +if [ -z "$JAVA_HOME" ] ; then + if [ -r /etc/gentoo-release ] ; then + JAVA_HOME=$(java-config --jre-home) + fi +fi + +# For Cygwin, ensure paths are in UNIX format before anything is touched +if $cygwin ; then + [ -n "$JAVA_HOME" ] && + JAVA_HOME=$(cygpath --unix "$JAVA_HOME") + [ -n "$CLASSPATH" ] && + CLASSPATH=$(cygpath --path --unix "$CLASSPATH") +fi + +# For Mingw, ensure paths are in UNIX format before anything is touched +if $mingw ; then + [ -n "$JAVA_HOME" ] && [ -d "$JAVA_HOME" ] && + JAVA_HOME="$(cd "$JAVA_HOME" || (echo "cannot cd into $JAVA_HOME."; exit 1); pwd)" +fi + +if [ -z "$JAVA_HOME" ]; then + javaExecutable="$(which javac)" + if [ -n "$javaExecutable" ] && ! [ "$(expr "\"$javaExecutable\"" : '\([^ ]*\)')" = "no" ]; then + # readlink(1) is not available as standard on Solaris 10. + readLink=$(which readlink) + if [ ! "$(expr "$readLink" : '\([^ ]*\)')" = "no" ]; then + if $darwin ; then + javaHome="$(dirname "\"$javaExecutable\"")" + javaExecutable="$(cd "\"$javaHome\"" && pwd -P)/javac" + else + javaExecutable="$(readlink -f "\"$javaExecutable\"")" + fi + javaHome="$(dirname "\"$javaExecutable\"")" + javaHome=$(expr "$javaHome" : '\(.*\)/bin') + JAVA_HOME="$javaHome" + export JAVA_HOME + fi + fi +fi + +if [ -z "$JAVACMD" ] ; then + if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + else + JAVACMD="$(\unset -f command 2>/dev/null; \command -v java)" + fi +fi + +if [ ! -x "$JAVACMD" ] ; then + echo "Error: JAVA_HOME is not defined correctly." >&2 + echo " We cannot execute $JAVACMD" >&2 + exit 1 +fi + +if [ -z "$JAVA_HOME" ] ; then + echo "Warning: JAVA_HOME environment variable is not set." +fi + +# traverses directory structure from process work directory to filesystem root +# first directory with .mvn subdirectory is considered project base directory +find_maven_basedir() { + if [ -z "$1" ] + then + echo "Path not specified to find_maven_basedir" + return 1 + fi + + basedir="$1" + wdir="$1" + while [ "$wdir" != '/' ] ; do + if [ -d "$wdir"/.mvn ] ; then + basedir=$wdir + break + fi + # workaround for JBEAP-8937 (on Solaris 10/Sparc) + if [ -d "${wdir}" ]; then + wdir=$(cd "$wdir/.." || exit 1; pwd) + fi + # end of workaround + done + printf '%s' "$(cd "$basedir" || exit 1; pwd)" +} + +# concatenates all lines of a file +concat_lines() { + if [ -f "$1" ]; then + # Remove \r in case we run on Windows within Git Bash + # and check out the repository with auto CRLF management + # enabled. Otherwise, we may read lines that are delimited with + # \r\n and produce $'-Xarg\r' rather than -Xarg due to word + # splitting rules. + tr -s '\r\n' ' ' < "$1" + fi +} + +log() { + if [ "$MVNW_VERBOSE" = true ]; then + printf '%s\n' "$1" + fi +} + +BASE_DIR=$(find_maven_basedir "$(dirname "$0")") +if [ -z "$BASE_DIR" ]; then + exit 1; +fi + +MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"}; export MAVEN_PROJECTBASEDIR +log "$MAVEN_PROJECTBASEDIR" + +########################################################################################## +# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +# This allows using the maven wrapper in projects that prohibit checking in binary data. +########################################################################################## +wrapperJarPath="$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" +if [ -r "$wrapperJarPath" ]; then + log "Found $wrapperJarPath" +else + log "Couldn't find $wrapperJarPath, downloading it ..." + + if [ -n "$MVNW_REPOURL" ]; then + wrapperUrl="$MVNW_REPOURL/org/apache/maven/wrapper/maven-wrapper/3.2.0/maven-wrapper-3.2.0.jar" + else + wrapperUrl="https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.2.0/maven-wrapper-3.2.0.jar" + fi + while IFS="=" read -r key value; do + # Remove '\r' from value to allow usage on windows as IFS does not consider '\r' as a separator ( considers space, tab, new line ('\n'), and custom '=' ) + safeValue=$(echo "$value" | tr -d '\r') + case "$key" in (wrapperUrl) wrapperUrl="$safeValue"; break ;; + esac + done < "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.properties" + log "Downloading from: $wrapperUrl" + + if $cygwin; then + wrapperJarPath=$(cygpath --path --windows "$wrapperJarPath") + fi + + if command -v wget > /dev/null; then + log "Found wget ... using wget" + [ "$MVNW_VERBOSE" = true ] && QUIET="" || QUIET="--quiet" + if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then + wget $QUIET "$wrapperUrl" -O "$wrapperJarPath" || rm -f "$wrapperJarPath" + else + wget $QUIET --http-user="$MVNW_USERNAME" --http-password="$MVNW_PASSWORD" "$wrapperUrl" -O "$wrapperJarPath" || rm -f "$wrapperJarPath" + fi + elif command -v curl > /dev/null; then + log "Found curl ... using curl" + [ "$MVNW_VERBOSE" = true ] && QUIET="" || QUIET="--silent" + if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then + curl $QUIET -o "$wrapperJarPath" "$wrapperUrl" -f -L || rm -f "$wrapperJarPath" + else + curl $QUIET --user "$MVNW_USERNAME:$MVNW_PASSWORD" -o "$wrapperJarPath" "$wrapperUrl" -f -L || rm -f "$wrapperJarPath" + fi + else + log "Falling back to using Java to download" + javaSource="$MAVEN_PROJECTBASEDIR/.mvn/wrapper/MavenWrapperDownloader.java" + javaClass="$MAVEN_PROJECTBASEDIR/.mvn/wrapper/MavenWrapperDownloader.class" + # For Cygwin, switch paths to Windows format before running javac + if $cygwin; then + javaSource=$(cygpath --path --windows "$javaSource") + javaClass=$(cygpath --path --windows "$javaClass") + fi + if [ -e "$javaSource" ]; then + if [ ! -e "$javaClass" ]; then + log " - Compiling MavenWrapperDownloader.java ..." + ("$JAVA_HOME/bin/javac" "$javaSource") + fi + if [ -e "$javaClass" ]; then + log " - Running MavenWrapperDownloader.java ..." + ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$wrapperUrl" "$wrapperJarPath") || rm -f "$wrapperJarPath" + fi + fi + fi +fi +########################################################################################## +# End of extension +########################################################################################## + +# If specified, validate the SHA-256 sum of the Maven wrapper jar file +wrapperSha256Sum="" +while IFS="=" read -r key value; do + case "$key" in (wrapperSha256Sum) wrapperSha256Sum=$value; break ;; + esac +done < "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.properties" +if [ -n "$wrapperSha256Sum" ]; then + wrapperSha256Result=false + if command -v sha256sum > /dev/null; then + if echo "$wrapperSha256Sum $wrapperJarPath" | sha256sum -c > /dev/null 2>&1; then + wrapperSha256Result=true + fi + elif command -v shasum > /dev/null; then + if echo "$wrapperSha256Sum $wrapperJarPath" | shasum -a 256 -c > /dev/null 2>&1; then + wrapperSha256Result=true + fi + else + echo "Checksum validation was requested but neither 'sha256sum' or 'shasum' are available." + echo "Please install either command, or disable validation by removing 'wrapperSha256Sum' from your maven-wrapper.properties." + exit 1 + fi + if [ $wrapperSha256Result = false ]; then + echo "Error: Failed to validate Maven wrapper SHA-256, your Maven wrapper might be compromised." >&2 + echo "Investigate or delete $wrapperJarPath to attempt a clean download." >&2 + echo "If you updated your Maven version, you need to update the specified wrapperSha256Sum property." >&2 + exit 1 + fi +fi + +MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS" + +# For Cygwin, switch paths to Windows format before running java +if $cygwin; then + [ -n "$JAVA_HOME" ] && + JAVA_HOME=$(cygpath --path --windows "$JAVA_HOME") + [ -n "$CLASSPATH" ] && + CLASSPATH=$(cygpath --path --windows "$CLASSPATH") + [ -n "$MAVEN_PROJECTBASEDIR" ] && + MAVEN_PROJECTBASEDIR=$(cygpath --path --windows "$MAVEN_PROJECTBASEDIR") +fi + +# Provide a "standardized" way to retrieve the CLI args that will +# work with both Windows and non-Windows executions. +MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $*" +export MAVEN_CMD_LINE_ARGS + +WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +# shellcheck disable=SC2086 # safe args +exec "$JAVACMD" \ + $MAVEN_OPTS \ + $MAVEN_DEBUG_OPTS \ + -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \ + "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \ + ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@" diff --git a/mvnw.cmd b/mvnw.cmd new file mode 100644 index 0000000..c4586b5 --- /dev/null +++ b/mvnw.cmd @@ -0,0 +1,205 @@ +@REM ---------------------------------------------------------------------------- +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM http://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM ---------------------------------------------------------------------------- + +@REM ---------------------------------------------------------------------------- +@REM Apache Maven Wrapper startup batch script, version 3.2.0 +@REM +@REM Required ENV vars: +@REM JAVA_HOME - location of a JDK home dir +@REM +@REM Optional ENV vars +@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands +@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a keystroke before ending +@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven +@REM e.g. to debug Maven itself, use +@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files +@REM ---------------------------------------------------------------------------- + +@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on' +@echo off +@REM set title of command window +title %0 +@REM enable echoing by setting MAVEN_BATCH_ECHO to 'on' +@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO% + +@REM set %HOME% to equivalent of $HOME +if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%") + +@REM Execute a user defined script before this one +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre +@REM check for pre script, once with legacy .bat ending and once with .cmd ending +if exist "%USERPROFILE%\mavenrc_pre.bat" call "%USERPROFILE%\mavenrc_pre.bat" %* +if exist "%USERPROFILE%\mavenrc_pre.cmd" call "%USERPROFILE%\mavenrc_pre.cmd" %* +:skipRcPre + +@setlocal + +set ERROR_CODE=0 + +@REM To isolate internal variables from possible post scripts, we use another setlocal +@setlocal + +@REM ==== START VALIDATION ==== +if not "%JAVA_HOME%" == "" goto OkJHome + +echo. +echo Error: JAVA_HOME not found in your environment. >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +:OkJHome +if exist "%JAVA_HOME%\bin\java.exe" goto init + +echo. +echo Error: JAVA_HOME is set to an invalid directory. >&2 +echo JAVA_HOME = "%JAVA_HOME%" >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +@REM ==== END VALIDATION ==== + +:init + +@REM Find the project base dir, i.e. the directory that contains the folder ".mvn". +@REM Fallback to current working directory if not found. + +set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR% +IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir + +set EXEC_DIR=%CD% +set WDIR=%EXEC_DIR% +:findBaseDir +IF EXIST "%WDIR%"\.mvn goto baseDirFound +cd .. +IF "%WDIR%"=="%CD%" goto baseDirNotFound +set WDIR=%CD% +goto findBaseDir + +:baseDirFound +set MAVEN_PROJECTBASEDIR=%WDIR% +cd "%EXEC_DIR%" +goto endDetectBaseDir + +:baseDirNotFound +set MAVEN_PROJECTBASEDIR=%EXEC_DIR% +cd "%EXEC_DIR%" + +:endDetectBaseDir + +IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig + +@setlocal EnableExtensions EnableDelayedExpansion +for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a +@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS% + +:endReadAdditionalConfig + +SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe" +set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar" +set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +set WRAPPER_URL="https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.2.0/maven-wrapper-3.2.0.jar" + +FOR /F "usebackq tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO ( + IF "%%A"=="wrapperUrl" SET WRAPPER_URL=%%B +) + +@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +@REM This allows using the maven wrapper in projects that prohibit checking in binary data. +if exist %WRAPPER_JAR% ( + if "%MVNW_VERBOSE%" == "true" ( + echo Found %WRAPPER_JAR% + ) +) else ( + if not "%MVNW_REPOURL%" == "" ( + SET WRAPPER_URL="%MVNW_REPOURL%/org/apache/maven/wrapper/maven-wrapper/3.2.0/maven-wrapper-3.2.0.jar" + ) + if "%MVNW_VERBOSE%" == "true" ( + echo Couldn't find %WRAPPER_JAR%, downloading it ... + echo Downloading from: %WRAPPER_URL% + ) + + powershell -Command "&{"^ + "$webclient = new-object System.Net.WebClient;"^ + "if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^ + "$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^ + "}"^ + "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%WRAPPER_URL%', '%WRAPPER_JAR%')"^ + "}" + if "%MVNW_VERBOSE%" == "true" ( + echo Finished downloading %WRAPPER_JAR% + ) +) +@REM End of extension + +@REM If specified, validate the SHA-256 sum of the Maven wrapper jar file +SET WRAPPER_SHA_256_SUM="" +FOR /F "usebackq tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO ( + IF "%%A"=="wrapperSha256Sum" SET WRAPPER_SHA_256_SUM=%%B +) +IF NOT %WRAPPER_SHA_256_SUM%=="" ( + powershell -Command "&{"^ + "$hash = (Get-FileHash \"%WRAPPER_JAR%\" -Algorithm SHA256).Hash.ToLower();"^ + "If('%WRAPPER_SHA_256_SUM%' -ne $hash){"^ + " Write-Output 'Error: Failed to validate Maven wrapper SHA-256, your Maven wrapper might be compromised.';"^ + " Write-Output 'Investigate or delete %WRAPPER_JAR% to attempt a clean download.';"^ + " Write-Output 'If you updated your Maven version, you need to update the specified wrapperSha256Sum property.';"^ + " exit 1;"^ + "}"^ + "}" + if ERRORLEVEL 1 goto error +) + +@REM Provide a "standardized" way to retrieve the CLI args that will +@REM work with both Windows and non-Windows executions. +set MAVEN_CMD_LINE_ARGS=%* + +%MAVEN_JAVA_EXE% ^ + %JVM_CONFIG_MAVEN_PROPS% ^ + %MAVEN_OPTS% ^ + %MAVEN_DEBUG_OPTS% ^ + -classpath %WRAPPER_JAR% ^ + "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" ^ + %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %* +if ERRORLEVEL 1 goto error +goto end + +:error +set ERROR_CODE=1 + +:end +@endlocal & set ERROR_CODE=%ERROR_CODE% + +if not "%MAVEN_SKIP_RC%"=="" goto skipRcPost +@REM check for post script, once with legacy .bat ending and once with .cmd ending +if exist "%USERPROFILE%\mavenrc_post.bat" call "%USERPROFILE%\mavenrc_post.bat" +if exist "%USERPROFILE%\mavenrc_post.cmd" call "%USERPROFILE%\mavenrc_post.cmd" +:skipRcPost + +@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on' +if "%MAVEN_BATCH_PAUSE%"=="on" pause + +if "%MAVEN_TERMINATE_CMD%"=="on" exit %ERROR_CODE% + +cmd /C exit /B %ERROR_CODE% diff --git a/pom.xml b/pom.xml index 2a46c45..d2f77cc 100644 --- a/pom.xml +++ b/pom.xml @@ -5,11 +5,11 @@ com.ing.data cassandra-jdbc-wrapper - 4.10.2 + 4.11.0 jar Cassandra JDBC Wrapper - JDBC wrapper of the DataStax Java Driver for Apache Cassandra. + JDBC wrapper of the Java Driver for Apache Cassandra®. https://github.com/ing-bank/cassandra-jdbc-wrapper 2020 @@ -108,18 +108,19 @@ 9.3 2.9.3 4.4 - 2.14.0 - 3.13.0 - 4.17.0 - 2.15.2 + 2.15.0 + 3.14.0 + 4.17.0 + 2.16.0 + 5.2.2 2.2 - 5.10.0 - 1.10.0 + 5.10.1 + 1.10.1 1.18.30 3.12.4 1.7.36 - 1.19.0 + 1.19.3 0.6.11 3.3.0 @@ -138,7 +139,7 @@ com.datastax.oss java-driver-core - ${datastax.java.driver.version} + ${java.driver.version} @@ -186,6 +187,13 @@ ${jackson.version} + + + org.semver4j + semver4j + ${semver4j.version} + + org.mockito @@ -259,7 +267,7 @@ com.datastax.oss java-driver-query-builder - ${datastax.java.driver.version} + ${java.driver.version} test diff --git a/src/main/java/com/ing/data/cassandra/jdbc/AbstractResultSet.java b/src/main/java/com/ing/data/cassandra/jdbc/AbstractResultSet.java index 13f81ab..a986c6b 100644 --- a/src/main/java/com/ing/data/cassandra/jdbc/AbstractResultSet.java +++ b/src/main/java/com/ing/data/cassandra/jdbc/AbstractResultSet.java @@ -55,8 +55,9 @@ abstract class AbstractResultSet implements Wrapper { * @param columnIndex The column index (first column is 1). * @param type The data type to check. * @return {@code true} if the column CQL data type is the given one, {@code false} otherwise. + * @throws SQLException when the CQL type cannot be determined for the given column. */ - boolean isCqlType(final int columnIndex, @Nonnull final DataTypeEnum type) { + boolean isCqlType(final int columnIndex, @Nonnull final DataTypeEnum type) throws SQLException { final String columnType = StringUtils.substringBefore(DataTypeEnum.cqlName(getCqlDataType(columnIndex)), "<"); return type.cqlType.equalsIgnoreCase(columnType); } @@ -67,8 +68,9 @@ boolean isCqlType(final int columnIndex, @Nonnull final DataTypeEnum type) { * @param columnLabel The column name. * @param type The data type to check. * @return {@code true} if the column CQL data type is the given one, {@code false} otherwise. + * @throws SQLException when the CQL type cannot be determined for the given column. */ - boolean isCqlType(final String columnLabel, @Nonnull final DataTypeEnum type) { + boolean isCqlType(final String columnLabel, @Nonnull final DataTypeEnum type) throws SQLException { final String columnType = StringUtils.substringBefore(DataTypeEnum.cqlName(getCqlDataType(columnLabel)), "<"); return type.cqlType.equalsIgnoreCase(columnType); } @@ -78,16 +80,22 @@ boolean isCqlType(final String columnLabel, @Nonnull final DataTypeEnum type) { * * @param columnIndex The column index (first column is 1). * @return The CQL data type of the column. + * @throws SQLException when the CQL type cannot be determined for the given column. */ - abstract DataType getCqlDataType(int columnIndex); + abstract DataType getCqlDataType(int columnIndex) throws SQLException; /** * Gets the CQL type of the column with the given name. * * @param columnLabel The column name. * @return The CQL data type of the column. + * @throws SQLException when the CQL type cannot be determined for the given column. */ - abstract DataType getCqlDataType(String columnLabel); + abstract DataType getCqlDataType(String columnLabel) throws SQLException; + + public boolean absolute(final int row) throws SQLException { + throw new SQLFeatureNotSupportedException(NOT_SUPPORTED); + } public void cancelRowUpdates() throws SQLException { throw new SQLFeatureNotSupportedException(NOT_SUPPORTED); @@ -97,6 +105,10 @@ public void deleteRow() throws SQLException { throw new SQLFeatureNotSupportedException(NOT_SUPPORTED); } + public boolean first() throws SQLException { + throw new SQLFeatureNotSupportedException(NOT_SUPPORTED); + } + public Array getArray(final int columnIndex) throws SQLException { throw new SQLFeatureNotSupportedException(NOT_SUPPORTED); } @@ -165,11 +177,11 @@ public Object getObject(final String columnLabel, final Map> ma throw new SQLFeatureNotSupportedException(NOT_SUPPORTED); } - public T getObject(final String columnLabel, final Class type) throws SQLException { + public T getObject(final int columnIndex, final Class type) throws SQLException { throw new SQLFeatureNotSupportedException(NOT_SUPPORTED); } - public T getObject(final int columnIndex, final Class type) throws SQLException { + public T getObject(final String columnLabel, final Class type) throws SQLException { throw new SQLFeatureNotSupportedException(NOT_SUPPORTED); } @@ -201,6 +213,10 @@ public void insertRow() throws SQLException { throw new SQLFeatureNotSupportedException(NOT_SUPPORTED); } + public boolean last() throws SQLException { + throw new SQLFeatureNotSupportedException(NOT_SUPPORTED); + } + public void moveToCurrentRow() throws SQLException { throw new SQLFeatureNotSupportedException(NOT_SUPPORTED); } @@ -209,10 +225,18 @@ public void moveToInsertRow() throws SQLException { throw new SQLFeatureNotSupportedException(NOT_SUPPORTED); } + public boolean previous() throws SQLException { + throw new SQLFeatureNotSupportedException(NOT_SUPPORTED); + } + public void refreshRow() throws SQLException { throw new SQLFeatureNotSupportedException(NOT_SUPPORTED); } + public boolean relative(final int arg0) throws SQLException { + throw new SQLFeatureNotSupportedException(NOT_SUPPORTED); + } + public boolean rowDeleted() throws SQLException { throw new SQLFeatureNotSupportedException(NOT_SUPPORTED); } diff --git a/src/main/java/com/ing/data/cassandra/jdbc/CassandraConnection.java b/src/main/java/com/ing/data/cassandra/jdbc/CassandraConnection.java index 49cc2bf..71c94f4 100644 --- a/src/main/java/com/ing/data/cassandra/jdbc/CassandraConnection.java +++ b/src/main/java/com/ing/data/cassandra/jdbc/CassandraConnection.java @@ -88,20 +88,20 @@ */ public class CassandraConnection extends AbstractConnection implements Connection { - // Minimal Apache Cassandra version supported by the DataStax Java Driver for Apache Cassandra on top which this - // wrapper is built. + // Minimal Apache Cassandra version supported by the Java Driver for Apache Cassandra® on top which this wrapper is + // built. // If available, the effective version run by the node on which the connection is established will override these // values. /** - * Minimal Apache Cassandra major version supported by the DataStax Java Driver for Apache Cassandra. + * Minimal Apache Cassandra major version supported by the Java Driver for Apache Cassandra®. */ public static volatile int dbMajorVersion = 2; /** - * Minimal Apache Cassandra minor version supported by the DataStax Java Driver for Apache Cassandra. + * Minimal Apache Cassandra minor version supported by the Java Driver for Apache Cassandra®. */ public static volatile int dbMinorVersion = 1; /** - * Minimal Apache Cassandra patch version supported by the DataStax Java Driver for Apache Cassandra. + * Minimal Apache Cassandra patch version supported by the Java Driver for Apache Cassandra®. */ public static volatile int dbPatchVersion = 0; diff --git a/src/main/java/com/ing/data/cassandra/jdbc/CassandraDataSource.java b/src/main/java/com/ing/data/cassandra/jdbc/CassandraDataSource.java index f665cf1..9db0576 100644 --- a/src/main/java/com/ing/data/cassandra/jdbc/CassandraDataSource.java +++ b/src/main/java/com/ing/data/cassandra/jdbc/CassandraDataSource.java @@ -17,7 +17,7 @@ import com.datastax.oss.driver.api.core.ConsistencyLevel; import com.datastax.oss.driver.internal.core.loadbalancing.DefaultLoadBalancingPolicy; -import com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil; +import com.ing.data.cassandra.jdbc.utils.ContactPoint; import javax.sql.ConnectionPoolDataSource; import javax.sql.DataSource; @@ -25,22 +25,20 @@ import java.sql.DriverManager; import java.sql.SQLException; import java.sql.SQLFeatureNotSupportedException; -import java.sql.SQLNonTransientConnectionException; +import java.util.Collections; +import java.util.List; import java.util.Properties; import java.util.logging.Logger; -import static com.ing.data.cassandra.jdbc.utils.ErrorConstants.HOST_REQUIRED; import static com.ing.data.cassandra.jdbc.utils.ErrorConstants.NOT_SUPPORTED; import static com.ing.data.cassandra.jdbc.utils.ErrorConstants.NO_INTERFACE; -import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.DEFAULT_PORT; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.PROTOCOL; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_CONSISTENCY_LEVEL; +import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_CONTACT_POINTS; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_CQL_VERSION; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_DATABASE_NAME; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_LOCAL_DATACENTER; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_PASSWORD; -import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_PORT_NUMBER; -import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_SERVER_NAME; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_USER; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.createSubName; @@ -63,13 +61,9 @@ public class CassandraDataSource implements ConnectionPoolDataSource, DataSource */ protected static final String DATA_SOURCE_DESCRIPTION = "Cassandra Data Source"; /** - * The server host name where the data source is located. + * The contact points of the data source. */ - protected String serverName; - /** - * The port number of the data source, by default {@value JdbcUrlUtil#DEFAULT_PORT}. - */ - protected int portNumber = DEFAULT_PORT; + protected List contactPoints; /** * The database name. In case of Cassandra, i.e. the keyspace used as data source. */ @@ -84,7 +78,9 @@ public class CassandraDataSource implements ConnectionPoolDataSource, DataSource protected String password; /** * The CQL version. + * @deprecated For removal. */ + @Deprecated protected String version = null; /** * The consistency level. @@ -107,34 +103,64 @@ public class CassandraDataSource implements ConnectionPoolDataSource, DataSource * @param keyspace The keyspace. * @param user The username used to connect. * @param password The password used to connect. - * @param version The CQL version. * @param consistency The consistency level. + * @deprecated For removal. Use {@link #CassandraDataSource(List, String, String, String, String)} instead. + */ + @Deprecated + public CassandraDataSource(final String host, final int port, final String keyspace, final String user, + final String password, final String consistency) { + this(Collections.singletonList(ContactPoint.of(host, port)), keyspace, user, password, null, consistency, null); + } + + /** + * Constructor. + * + * @param contactPoints The contact points. + * @param keyspace The keyspace. + * @param user The username used to connect. + * @param password The password used to connect. + * @param consistency The consistency level. + */ + public CassandraDataSource(final List contactPoints, final String keyspace, final String user, + final String password, final String consistency) { + this(contactPoints, keyspace, user, password, null, consistency, null); + } + + /** + * Constructor. + * + * @param host The host name. + * @param port The port. + * @param keyspace The keyspace. + * @param user The username used to connect. + * @param password The password used to connect. + * @param version The CQL version. Deprecated, do not use anymore. + * @param consistency The consistency level. + * @deprecated For removal. Use {@link #CassandraDataSource(List, String, String, String, String)} instead. */ + @Deprecated public CassandraDataSource(final String host, final int port, final String keyspace, final String user, final String password, final String version, final String consistency) { - this(host, port, keyspace, user, password, version, consistency, null); + this(Collections.singletonList(ContactPoint.of(host, port)), + keyspace, user, password, version, consistency, null); } /** * Constructor specifying a local datacenter (required to use {@link DefaultLoadBalancingPolicy}). * - * @param host The host name. - * @param port The port. + * @param contactPoints The contact points. * @param keyspace The keyspace. * @param user The username used to connect. * @param password The password used to connect. - * @param version The CQL version. + * @param version The CQL version. Deprecated, do not use anymore. * @param consistency The consistency level. * @param localDataCenter The local datacenter. */ - public CassandraDataSource(final String host, final int port, final String keyspace, final String user, + public CassandraDataSource(final List contactPoints, final String keyspace, final String user, final String password, final String version, final String consistency, final String localDataCenter) { - if (host != null) { - setServerName(host); - } - if (port >= 0) { - setPortNumber(port); + if (contactPoints != null && !contactPoints.isEmpty()) { + setContactPoints(contactPoints); } if (version != null) { setVersion(version); @@ -160,28 +186,31 @@ public String getDescription() { } /** - * Gets the server host name where the data source is located. + * Gets the contact points of the data source. * - * @return The server host name where the data source is located. + * @return The contact points of the data source. */ - public String getServerName() { - return this.serverName; + public List getContactPoints() { + return this.contactPoints; } /** - * Sets the server host name where the data source is located. + * Sets the contact points of the data source. * - * @param serverName The host name. + * @param contactPoints The contact points of the data source. */ - public void setServerName(final String serverName) { - this.serverName = serverName; + public void setContactPoints(final List contactPoints) { + this.contactPoints = contactPoints; } /** * Gets the CQL version. * * @return The CQL version. + * @deprecated For removal. */ + @Deprecated + @SuppressWarnings("DeprecatedIsStillUsed") public String getVersion() { return this.version; } @@ -190,7 +219,10 @@ public String getVersion() { * Sets the CQL version. * * @param version The CQL version. + * @deprecated For removal. */ + @Deprecated + @SuppressWarnings("DeprecatedIsStillUsed") public void setVersion(final String version) { this.version = version; } @@ -221,24 +253,6 @@ public void setConsistency(final String consistency) { this.consistency = consistency; } - /** - * Gets the port number of the data source. - * - * @return The port number of the data source. - */ - public int getPortNumber() { - return this.portNumber; - } - - /** - * Sets the port number of the data source. - * - * @param portNumber The port number of the data source. - */ - public void setPortNumber(final int portNumber) { - this.portNumber = portNumber; - } - /** * Gets the database name. In case of Cassandra, i.e. the keyspace used as data source. * @@ -322,12 +336,9 @@ public CassandraConnection getConnection(final String user, final String passwor this.user = user; this.password = password; - if (this.serverName != null) { - props.setProperty(TAG_SERVER_NAME, this.serverName); - } else { - throw new SQLNonTransientConnectionException(HOST_REQUIRED); + if (this.contactPoints != null && !this.contactPoints.isEmpty()) { + props.put(TAG_CONTACT_POINTS, this.contactPoints); } - props.setProperty(TAG_PORT_NUMBER, String.valueOf(this.portNumber)); if (this.databaseName != null) { props.setProperty(TAG_DATABASE_NAME, this.databaseName); } diff --git a/src/main/java/com/ing/data/cassandra/jdbc/CassandraDatabaseMetaData.java b/src/main/java/com/ing/data/cassandra/jdbc/CassandraDatabaseMetaData.java index 8c7bce8..23de642 100644 --- a/src/main/java/com/ing/data/cassandra/jdbc/CassandraDatabaseMetaData.java +++ b/src/main/java/com/ing/data/cassandra/jdbc/CassandraDatabaseMetaData.java @@ -16,12 +16,15 @@ package com.ing.data.cassandra.jdbc; import com.datastax.oss.driver.api.core.data.UdtValue; +import com.ing.data.cassandra.jdbc.metadata.BasicVersionedMetadata; +import com.ing.data.cassandra.jdbc.metadata.BuiltInFunctionsMetadataBuilder; import com.ing.data.cassandra.jdbc.metadata.CatalogMetadataResultSetBuilder; import com.ing.data.cassandra.jdbc.metadata.ColumnMetadataResultSetBuilder; import com.ing.data.cassandra.jdbc.metadata.FunctionMetadataResultSetBuilder; import com.ing.data.cassandra.jdbc.metadata.SchemaMetadataResultSetBuilder; import com.ing.data.cassandra.jdbc.metadata.TableMetadataResultSetBuilder; import com.ing.data.cassandra.jdbc.metadata.TypeMetadataResultSetBuilder; +import com.ing.data.cassandra.jdbc.metadata.VersionedMetadata; import org.apache.commons.lang3.StringUtils; import java.sql.Connection; @@ -35,8 +38,10 @@ import java.util.Arrays; import java.util.List; +import static com.ing.data.cassandra.jdbc.utils.DriverUtil.CASSANDRA_5; +import static com.ing.data.cassandra.jdbc.utils.DriverUtil.buildMetadataList; import static com.ing.data.cassandra.jdbc.utils.DriverUtil.getDriverProperty; -import static com.ing.data.cassandra.jdbc.utils.DriverUtil.parseVersion; +import static com.ing.data.cassandra.jdbc.utils.DriverUtil.safeParseVersion; import static com.ing.data.cassandra.jdbc.utils.ErrorConstants.NOT_SUPPORTED; import static com.ing.data.cassandra.jdbc.utils.ErrorConstants.NO_INTERFACE; @@ -176,7 +181,7 @@ public ResultSet getClientInfoProperties() throws SQLException { /** * Retrieves a description of the access rights for a table's columns. *

- * Datastax Java driver for Apache Cassandra(R) currently does not provide information about permissions and only + * Java Driver for Apache Cassandra® currently does not provide information about permissions and only * super users can access to such information through {@code LIST ALL PERMISSIONS ON }, so it cannot * be implemented safely for any connection to the database, that's why this method will throw a * {@link SQLFeatureNotSupportedException}. @@ -266,8 +271,8 @@ public String getDatabaseProductName() { /** * Retrieves the version number of this database product. *

- * The version number returned by this method is the minimal version of Apache Cassandra supported by this JDBC - * implementation (see Datastax Java driver version embedded into this JDBC wrapper and + * The version number returned by this method is the minimal version of Apache Cassandra® supported by this JDBC + * implementation (see Java Driver version embedded into this JDBC wrapper and * * compatibility matrix for further details. *

@@ -287,12 +292,12 @@ public int getDefaultTransactionIsolation() { @Override public int getDriverMajorVersion() { - return parseVersion(getDriverVersion(), 0); + return safeParseVersion(getDriverVersion()).getMajor(); } @Override public int getDriverMinorVersion() { - return parseVersion(getDriverVersion(), 1); + return safeParseVersion(getDriverVersion()).getMinor(); } @Override @@ -399,12 +404,12 @@ public ResultSet getIndexInfo(final String catalog, final String schema, final S @Override public int getJDBCMajorVersion() { - return parseVersion(getDriverProperty("driver.jdbcVersion"), 0); + return safeParseVersion(getDriverProperty("driver.jdbcVersion")).getMajor(); } @Override public int getJDBCMinorVersion() { - return parseVersion(getDriverProperty("driver.jdbcVersion"), 1); + return safeParseVersion(getDriverProperty("driver.jdbcVersion")).getMinor(); } @Override @@ -511,9 +516,7 @@ public int getMaxUserNameLength() { @Override public String getNumericFunctions() throws SQLException { checkStatementClosed(); - // We consider here the vectors similarity functions introduced by CEP-30 as numeric functions (see - // https://issues.apache.org/jira/browse/CASSANDRA-18640). - return "similarity_cosine,similarity_euclidean,similarity_dot_product"; + return new BuiltInFunctionsMetadataBuilder(this.getDatabaseProductVersion()).buildNumericFunctionsList(); } @Override @@ -634,16 +637,71 @@ public String getSQLKeywords() throws SQLException { // SQL:2003 standard keywords (see: https://ronsavage.github.io/SQL/sql-2003-2.bnf.html#xref-keywords). // The CQL keywords are listed here: // https://cassandra.apache.org/doc/latest/cassandra/cql/appendices.html#appendix-A - // Also add new keywords relative to vector type introduced by CEP-30: + // Also add new keywords relative to vector type introduced by CEP-30 in Cassandra 5.0: // https://cwiki.apache.org/confluence/x/OQ40Dw - final List cqlKeywords = Arrays.asList("AGGREGATE", "ALLOW", "ANN OF", "APPLY", "ASCII", "AUTHORIZE", - "BATCH", "CLUSTERING", "COLUMNFAMILY", "COMPACT", "COUNTER", "CUSTOM", "ENTRIES", "FILTERING", "FINALFUNC", - "FROZEN", "FUNCTIONS", "IF", "INDEX", "INET", "INFINITY", "INITCOND", "JSON", "KEYS", "KEYSPACE", - "KEYSPACES", "LIMIT", "LIST", "LOGIN", "MODIFY", "NAN", "NOLOGIN", "NORECURSIVE", "NOSUPERUSER", "PASSWORD", - "PERMISSION", "PERMISSIONS", "RENAME", "REPLACE", "RETURNS", "ROLES", "SFUNC", "SMALLINT", "STORAGE", - "STYPE", "SUPERUSER", "TEXT", "TIMEUUID", "TINYINT", "TOKEN", "TRUNCATE", "TTL", "TUPLE", "UNLOGGED", "USE", - "USERS", "UUID", "VARINT", "VECTOR", "WRITETIME"); - return String.join(",", cqlKeywords); + final List cqlKeywords = Arrays.asList( + new BasicVersionedMetadata("AGGREGATE"), + new BasicVersionedMetadata("ALLOW"), + new BasicVersionedMetadata("ANN OF", CASSANDRA_5), + new BasicVersionedMetadata("APPLY"), + new BasicVersionedMetadata("ASCII"), + new BasicVersionedMetadata("AUTHORIZE"), + new BasicVersionedMetadata("BATCH"), + new BasicVersionedMetadata("CLUSTERING"), + new BasicVersionedMetadata("COLUMNFAMILY"), + new BasicVersionedMetadata("COMPACT"), + new BasicVersionedMetadata("COUNTER"), + new BasicVersionedMetadata("CUSTOM"), + new BasicVersionedMetadata("ENTRIES"), + new BasicVersionedMetadata("FILTERING"), + new BasicVersionedMetadata("FINALFUNC"), + new BasicVersionedMetadata("FROZEN"), + new BasicVersionedMetadata("FUNCTIONS"), + new BasicVersionedMetadata("IF"), + new BasicVersionedMetadata("INDEX"), + new BasicVersionedMetadata("INET"), + new BasicVersionedMetadata("INFINITY"), + new BasicVersionedMetadata("INITCOND"), + new BasicVersionedMetadata("JSON"), + new BasicVersionedMetadata("KEYS"), + new BasicVersionedMetadata("KEYSPACE"), + new BasicVersionedMetadata("KEYSPACES"), + new BasicVersionedMetadata("LIMIT"), + new BasicVersionedMetadata("LIST"), + new BasicVersionedMetadata("LOGIN"), + new BasicVersionedMetadata("MODIFY"), + new BasicVersionedMetadata("NAN"), + new BasicVersionedMetadata("NOLOGIN"), + new BasicVersionedMetadata("NORECURSIVE"), + new BasicVersionedMetadata("NOSUPERUSER"), + new BasicVersionedMetadata("PASSWORD"), + new BasicVersionedMetadata("PERMISSION"), + new BasicVersionedMetadata("PERMISSIONS"), + new BasicVersionedMetadata("RENAME"), + new BasicVersionedMetadata("REPLACE"), + new BasicVersionedMetadata("RETURNS"), + new BasicVersionedMetadata("ROLES"), + new BasicVersionedMetadata("SFUNC"), + new BasicVersionedMetadata("SMALLINT"), + new BasicVersionedMetadata("STORAGE"), + new BasicVersionedMetadata("STYPE"), + new BasicVersionedMetadata("SUPERUSER"), + new BasicVersionedMetadata("TEXT"), + new BasicVersionedMetadata("TIMEUUID"), + new BasicVersionedMetadata("TINYINT"), + new BasicVersionedMetadata("TOKEN"), + new BasicVersionedMetadata("TRUNCATE"), + new BasicVersionedMetadata("TTL"), + new BasicVersionedMetadata("TUPLE"), + new BasicVersionedMetadata("UNLOGGED"), + new BasicVersionedMetadata("USE"), + new BasicVersionedMetadata("USERS"), + new BasicVersionedMetadata("UUID"), + new BasicVersionedMetadata("VARINT"), + new BasicVersionedMetadata("VECTOR", CASSANDRA_5), + new BasicVersionedMetadata("WRITETIME") + ); + return buildMetadataList(cqlKeywords, this.getDatabaseProductVersion()); } @Override @@ -679,8 +737,7 @@ public String getSearchStringEscape() { @Override public String getStringFunctions() throws SQLException { checkStatementClosed(); - // Cassandra does not implement natively string functions. - return StringUtils.EMPTY; + return new BuiltInFunctionsMetadataBuilder(this.getDatabaseProductVersion()).buildSystemFunctionsList(); } /** @@ -725,13 +782,13 @@ public ResultSet getSuperTypes(final String catalog, final String schemaPattern, @Override public String getSystemFunctions() throws SQLException { checkStatementClosed(); - return "TOKEN,TTL,WRITETIME"; + return new BuiltInFunctionsMetadataBuilder(this.getDatabaseProductVersion()).buildSystemFunctionsList(); } /** * Retrieves a description of the access rights for each table available in a catalog (Cassandra cluster). *

- * Datastax Java driver for Apache Cassandra(R) currently does not provide information about permissions and only + * Java Driver for Apache Cassandra® currently does not provide information about permissions and only * super users can access to such information through {@code LIST ALL PERMISSIONS ON }, so it cannot * be implemented safely for any connection to the database, that's why this method will throw a * {@link SQLFeatureNotSupportedException}. @@ -778,15 +835,13 @@ public ResultSet getTables(final String catalog, final String schemaPattern, fin @Override public String getTimeDateFunctions() throws SQLException { checkStatementClosed(); - // See: https://cassandra.apache.org/doc/latest/cassandra/cql/functions.html - return "dateOf,now,minTimeuuid,maxTimeuuid,unixTimestampOf,toDate,toTimestamp,toUnixTimestamp,currentTimestamp," - + "currentDate,currentTime,currentTimeUUID"; + return new BuiltInFunctionsMetadataBuilder(this.getDatabaseProductVersion()).buildTimeDateFunctionsList(); } @Override public ResultSet getTypeInfo() throws SQLException { checkStatementClosed(); - return new TypeMetadataResultSetBuilder(this.statement).buildTypes(); + return new TypeMetadataResultSetBuilder(this.statement).buildTypes(this.getDatabaseProductVersion()); } /** diff --git a/src/main/java/com/ing/data/cassandra/jdbc/CassandraDriver.java b/src/main/java/com/ing/data/cassandra/jdbc/CassandraDriver.java index 68ac9ae..68d3788 100644 --- a/src/main/java/com/ing/data/cassandra/jdbc/CassandraDriver.java +++ b/src/main/java/com/ing/data/cassandra/jdbc/CassandraDriver.java @@ -33,9 +33,10 @@ import java.util.Properties; import static com.ing.data.cassandra.jdbc.utils.DriverUtil.getDriverProperty; -import static com.ing.data.cassandra.jdbc.utils.DriverUtil.parseVersion; +import static com.ing.data.cassandra.jdbc.utils.DriverUtil.safeParseVersion; import static com.ing.data.cassandra.jdbc.utils.ErrorConstants.NOT_SUPPORTED; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.PROTOCOL; +import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_CONTACT_POINTS; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_PASSWORD; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_USER; @@ -75,7 +76,9 @@ public Connection connect(final String url, final Properties properties) throws final Enumeration keys = properties.keys(); while (keys.hasMoreElements()) { final String key = (String) keys.nextElement(); - params.put(key, properties.getProperty(key)); + if (!TAG_CONTACT_POINTS.equals(key)) { + params.put(key, properties.getProperty(key)); + } } params.put(SessionHolder.URL_KEY, url); @@ -107,12 +110,12 @@ public Connection connect(final String url, final Properties properties) throws @Override public int getMajorVersion() { - return parseVersion(getDriverProperty("driver.version"), 0); + return safeParseVersion(getDriverProperty("driver.version")).getMajor(); } @Override public int getMinorVersion() { - return parseVersion(getDriverProperty("driver.version"), 1); + return safeParseVersion(getDriverProperty("driver.version")).getMinor(); } @Override diff --git a/src/main/java/com/ing/data/cassandra/jdbc/CassandraMetadataResultSet.java b/src/main/java/com/ing/data/cassandra/jdbc/CassandraMetadataResultSet.java index a9211cb..1f08c46 100644 --- a/src/main/java/com/ing/data/cassandra/jdbc/CassandraMetadataResultSet.java +++ b/src/main/java/com/ing/data/cassandra/jdbc/CassandraMetadataResultSet.java @@ -51,6 +51,7 @@ import java.sql.Statement; import java.sql.Time; import java.sql.Timestamp; +import java.sql.Types; import java.util.ArrayList; import java.util.Calendar; import java.util.Iterator; @@ -70,6 +71,7 @@ import static com.ing.data.cassandra.jdbc.utils.ErrorConstants.MUST_BE_POSITIVE; import static com.ing.data.cassandra.jdbc.utils.ErrorConstants.NOT_SUPPORTED; import static com.ing.data.cassandra.jdbc.utils.ErrorConstants.NO_INTERFACE; +import static com.ing.data.cassandra.jdbc.utils.ErrorConstants.UNABLE_TO_RETRIEVE_METADATA; import static com.ing.data.cassandra.jdbc.utils.ErrorConstants.VALID_LABELS; import static com.ing.data.cassandra.jdbc.utils.ErrorConstants.WAS_CLOSED_RS; @@ -194,18 +196,25 @@ private void populateColumns() { } @Override - DataType getCqlDataType(final int columnIndex) { - return this.currentRow.getColumnDefinitions().getType(columnIndex - 1); - } - - @Override - DataType getCqlDataType(final String columnLabel) { - return this.currentRow.getColumnDefinitions().getType(columnLabel); + DataType getCqlDataType(final int columnIndex) throws SQLException { + if (this.currentRow != null && this.currentRow.getColumnDefinitions() != null) { + return this.currentRow.getColumnDefinitions().getType(columnIndex - 1); + } + if (this.driverResultSet != null && this.driverResultSet.getColumnDefinitions() != null) { + return this.driverResultSet.getColumnDefinitions().getType(columnIndex - 1); + } + throw new SQLException(UNABLE_TO_RETRIEVE_METADATA); } @Override - public boolean absolute(final int row) throws SQLException { - throw new SQLFeatureNotSupportedException(NOT_SUPPORTED); + DataType getCqlDataType(final String columnLabel) throws SQLException { + if (this.currentRow != null && this.currentRow.getColumnDefinitions() != null) { + return this.currentRow.getColumnDefinitions().getType(columnLabel); + } + if (this.driverResultSet != null && this.driverResultSet.getColumnDefinitions() != null) { + return this.driverResultSet.getColumnDefinitions().getType(columnLabel); + } + throw new SQLException(UNABLE_TO_RETRIEVE_METADATA); } @Override @@ -284,17 +293,12 @@ public int findColumn(final String columnLabel) throws SQLException { checkName(columnLabel); if (this.currentRow != null) { return this.currentRow.getColumnDefinitions().getIndexOf(columnLabel) + 1; - } else if (this.driverResultSet != null) { + } else if (this.driverResultSet != null && this.driverResultSet.getColumnDefinitions() != null) { return this.driverResultSet.getColumnDefinitions().getIndexOf(columnLabel) + 1; } throw new SQLSyntaxErrorException(String.format(VALID_LABELS, columnLabel)); } - @Override - public boolean first() throws SQLException { - throw new SQLFeatureNotSupportedException(NOT_SUPPORTED); - } - @Override public BigDecimal getBigDecimal(final int columnIndex) throws SQLException { checkIndex(columnIndex); @@ -1015,11 +1019,6 @@ public boolean isLast() throws SQLException { return !this.rowsIterator.hasNext(); } - @Override - public boolean last() throws SQLException { - throw new SQLFeatureNotSupportedException(NOT_SUPPORTED); - } - @Override public synchronized boolean next() { if (hasMoreRows()) { @@ -1034,16 +1033,6 @@ public synchronized boolean next() { return false; } - @Override - public boolean previous() throws SQLException { - throw new SQLFeatureNotSupportedException(NOT_SUPPORTED); - } - - @Override - public boolean relative(final int arg0) throws SQLException { - throw new SQLFeatureNotSupportedException(NOT_SUPPORTED); - } - @Override public boolean wasNull() { return this.wasNull; @@ -1063,14 +1052,17 @@ public String getCatalogName(final int column) throws SQLException { } @Override - public String getColumnClassName(final int column) { + public String getColumnClassName(final int column) throws SQLException { if (currentRow != null) { return DataTypeEnum.fromCqlTypeName(getCqlDataType(column).asCql(false, false)).asJavaClass() .getCanonicalName(); } - return DataTypeEnum.fromCqlTypeName( - driverResultSet.getColumnDefinitions().asList().get(column - 1).getType().asCql(false, false)) - .asJavaClass().getCanonicalName(); + if (driverResultSet != null && driverResultSet.getColumnDefinitions() != null) { + return DataTypeEnum.fromCqlTypeName( + driverResultSet.getColumnDefinitions().asList().get(column - 1).getType().asCql(false, false)) + .asJavaClass().getCanonicalName(); + } + throw new SQLException(UNABLE_TO_RETRIEVE_METADATA); } @Override @@ -1085,16 +1077,19 @@ public int getColumnCount() { } @Override - public String getColumnLabel(final int column) { + public String getColumnLabel(final int column) throws SQLException { return getColumnName(column); } @Override - public String getColumnName(final int column) { - if (currentRow != null) { + public String getColumnName(final int column) throws SQLException { + if (currentRow != null && currentRow.getColumnDefinitions() != null) { return currentRow.getColumnDefinitions().getName(column - 1); } - return driverResultSet.getColumnDefinitions().asList().get(column - 1).getName(); + if (driverResultSet != null && driverResultSet.getColumnDefinitions() != null) { + return driverResultSet.getColumnDefinitions().asList().get(column - 1).getName(); + } + throw new SQLException(UNABLE_TO_RETRIEVE_METADATA); } @Override @@ -1102,10 +1097,12 @@ public int getColumnDisplaySize(final int column) { try { final AbstractJdbcType jdbcEquivalentType; final ColumnDefinitions.Definition columnDefinition; - if (currentRow != null) { + if (currentRow != null && currentRow.getColumnDefinitions() != null) { columnDefinition = currentRow.getColumnDefinitions().asList().get(column - 1); - } else { + } else if (driverResultSet != null && driverResultSet.getColumnDefinitions() != null) { columnDefinition = driverResultSet.getColumnDefinitions().asList().get(column - 1); + } else { + return DEFAULT_PRECISION; } jdbcEquivalentType = TypesMap.getTypeForComparator(columnDefinition.getType().toString()); @@ -1120,24 +1117,28 @@ public int getColumnDisplaySize(final int column) { } @Override - public int getColumnType(final int column) { + public int getColumnType(final int column) throws SQLException { final DataType type; if (currentRow != null) { type = getCqlDataType(column); - } else { + } else if (driverResultSet != null && driverResultSet.getColumnDefinitions() != null) { type = driverResultSet.getColumnDefinitions().asList().get(column - 1).getType(); + } else { + return Types.OTHER; } return TypesMap.getTypeForComparator(type.toString()).getJdbcType(); } @Override - public String getColumnTypeName(final int column) { + public String getColumnTypeName(final int column) throws SQLException { // Specification says "database specific type name"; for Cassandra this means the AbstractType. final DataType type; if (currentRow != null) { type = getCqlDataType(column); - } else { + } else if (driverResultSet != null && driverResultSet.getColumnDefinitions() != null) { type = driverResultSet.getColumnDefinitions().getType(column - 1); + } else { + return StringUtils.EMPTY; } return type.toString(); } @@ -1152,10 +1153,12 @@ public int getScale(final int column) { try { final AbstractJdbcType jdbcEquivalentType; final ColumnDefinitions.Definition columnDefinition; - if (currentRow != null) { + if (currentRow != null && currentRow.getColumnDefinitions() != null) { columnDefinition = currentRow.getColumnDefinitions().asList().get(column - 1); - } else { + } else if (driverResultSet != null && driverResultSet.getColumnDefinitions() != null) { columnDefinition = driverResultSet.getColumnDefinitions().asList().get(column - 1); + } else { + return DEFAULT_SCALE; } jdbcEquivalentType = TypesMap.getTypeForComparator(columnDefinition.getType().toString()); @@ -1180,10 +1183,12 @@ public String getSchemaName(final int column) throws SQLException { @Override public String getTableName(final int column) { final String tableName; - if (currentRow != null) { + if (currentRow != null && currentRow.getColumnDefinitions() != null) { tableName = currentRow.getColumnDefinitions().getTable(column - 1); - } else { + } else if (driverResultSet != null && driverResultSet.getColumnDefinitions() != null) { tableName = driverResultSet.getColumnDefinitions().getTable(column - 1); + } else { + return StringUtils.EMPTY; } return tableName; } @@ -1238,9 +1243,16 @@ public boolean isSearchable(final int column) throws SQLException { return false; } final String columnName = getColumnName(column); + final String schemaName = getSchemaName(column); + final String tableName = getTableName(column); + // If the schema or table name is not defined, always returns false since we cannot determine if the column + // is searchable in this context. + if (StringUtils.isEmpty(schemaName) || StringUtils.isEmpty(tableName)) { + return false; + } final AtomicBoolean searchable = new AtomicBoolean(false); - statement.connection.getSession().getMetadata().getKeyspace(getSchemaName(column)) - .flatMap(metadata -> metadata.getTable(getTableName(column))) + statement.connection.getSession().getMetadata().getKeyspace(schemaName) + .flatMap(metadata -> metadata.getTable(tableName)) .ifPresent(tableMetadata -> { boolean result; // Check first if the column is a clustering column or in a partitioning key. @@ -1257,12 +1269,14 @@ public boolean isSearchable(final int column) throws SQLException { } @Override - public boolean isSigned(final int column) { + public boolean isSigned(final int column) throws SQLException { final DataType type; if (currentRow != null) { type = getCqlDataType(column); - } else { + } else if (driverResultSet != null && driverResultSet.getColumnDefinitions() != null) { type = driverResultSet.getColumnDefinitions().asList().get(column - 1).getType(); + } else { + return false; } return TypesMap.getTypeForComparator(type.toString()).isSigned(); } diff --git a/src/main/java/com/ing/data/cassandra/jdbc/CassandraPreparedStatement.java b/src/main/java/com/ing/data/cassandra/jdbc/CassandraPreparedStatement.java index 399cad5..5bc9fba 100644 --- a/src/main/java/com/ing/data/cassandra/jdbc/CassandraPreparedStatement.java +++ b/src/main/java/com/ing/data/cassandra/jdbc/CassandraPreparedStatement.java @@ -46,6 +46,7 @@ import java.net.URL; import java.nio.ByteBuffer; import java.sql.Blob; +import java.sql.Clob; import java.sql.Date; import java.sql.ParameterMetaData; import java.sql.PreparedStatement; @@ -59,6 +60,11 @@ import java.sql.Time; import java.sql.Timestamp; import java.sql.Types; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.OffsetDateTime; +import java.time.OffsetTime; import java.util.ArrayList; import java.util.Calendar; import java.util.HashMap; @@ -68,7 +74,12 @@ import java.util.UUID; import java.util.concurrent.CompletionStage; +import static com.ing.data.cassandra.jdbc.utils.ConversionsUtil.convertToByteArray; +import static com.ing.data.cassandra.jdbc.utils.ConversionsUtil.convertToInstant; +import static com.ing.data.cassandra.jdbc.utils.ConversionsUtil.convertToLocalDate; +import static com.ing.data.cassandra.jdbc.utils.ConversionsUtil.convertToLocalTime; import static com.ing.data.cassandra.jdbc.utils.ErrorConstants.NO_RESULT_SET; +import static com.ing.data.cassandra.jdbc.utils.ErrorConstants.UNSUPPORTED_JDBC_TYPE; import static com.ing.data.cassandra.jdbc.utils.ErrorConstants.VECTOR_ELEMENTS_NOT_NUMBERS; import static com.ing.data.cassandra.jdbc.utils.JsonUtil.getObjectMapper; @@ -202,9 +213,9 @@ private void doExecute() throws SQLException { } // Force paging to avoid timeout and node harm. if (this.boundStatement.getPageSize() == 0) { - this.boundStatement.setPageSize(DEFAULT_FETCH_SIZE); + this.boundStatement = this.boundStatement.setPageSize(DEFAULT_FETCH_SIZE); } - this.boundStatement.setConsistencyLevel(this.connection.getDefaultConsistencyLevel()); + this.boundStatement = this.boundStatement.setConsistencyLevel(this.connection.getDefaultConsistencyLevel()); for (int i = 0; i < getBoundStatementVariableDefinitions().size(); i++) { // Set parameters to null if unset. if (!this.boundStatement.isSet(i)) { @@ -299,14 +310,15 @@ public ResultSet executeQuery() throws SQLException { * Language (DML) statement, such as {@code INSERT}, {@code UPDATE} or {@code DELETE}; or a CQL statement that * returns nothing, such as a DDL statement. * - * @return Always 0, for any statement. The rationale is that Datastax Java driver does not provide update count. + * @return Always 0, for any statement. The rationale is that Java Driver for Apache Cassandra® does not provide + * update count. * @throws SQLException when something went wrong during the execution of the statement. */ @Override public int executeUpdate() throws SQLException { checkNotClosed(); doExecute(); - // There is no updateCount available in Datastax Java driver, so return 0. + // There is no updateCount available in Java Driver for Apache Cassandra®, so return 0. return 0; } @@ -496,7 +508,11 @@ public void setObject(final int parameterIndex, final Object x) throws SQLExcept } else if (x.getClass().equals(ByteArrayInputStream.class)) { targetType = Types.BINARY; } else if (x instanceof byte[]) { - targetType = Types.BINARY; + targetType = Types.VARBINARY; + } else if (x instanceof Blob) { + targetType = Types.BLOB; + } else if (x instanceof Clob) { + targetType = Types.CLOB; } else if (x.getClass().equals(String.class)) { targetType = Types.VARCHAR; } else if (x.getClass().equals(Boolean.class)) { @@ -513,16 +529,23 @@ public void setObject(final int parameterIndex, final Object x) throws SQLExcept targetType = Types.OTHER; } else if (x.getClass().equals(Integer.class)) { targetType = Types.INTEGER; - } else if (x.getClass().equals(java.sql.Timestamp.class)) { + } else if (x.getClass().equals(java.sql.Timestamp.class) || x instanceof Calendar + || x.getClass().equals(java.util.Date.class) || x.getClass().equals(LocalDateTime.class)) { targetType = Types.TIMESTAMP; - } else if (x.getClass().equals(java.sql.Date.class)) { + } else if (x.getClass().equals(java.sql.Date.class) || x.getClass().equals(LocalDate.class)) { targetType = Types.DATE; - } else if (x.getClass().equals(java.sql.Time.class)) { + } else if (x.getClass().equals(java.sql.Time.class) || x.getClass().equals(LocalTime.class)) { targetType = Types.TIME; + } else if (x.getClass().equals(OffsetDateTime.class)) { + targetType = Types.TIMESTAMP_WITH_TIMEZONE; + } else if (x.getClass().equals(OffsetTime.class)) { + targetType = Types.TIME_WITH_TIMEZONE; } else if (x.getClass().equals(Byte.class)) { targetType = Types.TINYINT; } else if (x.getClass().equals(Short.class)) { targetType = Types.SMALLINT; + } else if (x.getClass().equals(URL.class)) { + targetType = Types.DATALINK; } else if (x.getClass().equals(CqlDuration.class)) { targetType = Types.OTHER; } else if (x.getClass().equals(UUID.class)) { @@ -558,39 +581,38 @@ public final void setObject(final int parameterIndex, final Object x, final int case Types.BINARY: case Types.VARBINARY: case Types.LONGVARBINARY: - final byte[] array; - if (x instanceof ByteArrayInputStream) { - array = new byte[((ByteArrayInputStream) x).available()]; - try { - ((ByteArrayInputStream) x).read(array); - } catch (final IOException e) { - LOG.warn("Exception while setting object of BINARY/VARBINARY/LONGVARBINARY type.", e); - } - } else if (x instanceof byte[]) { - array = (byte[]) x; - } else { - throw new SQLException("Unsupported parameter type: " + x.getClass()); - } + case Types.BLOB: + case Types.CLOB: + case Types.NCLOB: + final byte[] array = convertToByteArray(x); this.boundStatement = this.boundStatement.setByteBuffer(parameterIndex - 1, ByteBuffer.wrap(array)); break; case Types.BOOLEAN: + case Types.BIT: this.boundStatement = this.boundStatement.setBoolean(parameterIndex - 1, (Boolean) x); break; case Types.CHAR: - case Types.CLOB: case Types.VARCHAR: + case Types.LONGVARCHAR: + case Types.NCHAR: + case Types.NVARCHAR: + case Types.LONGNVARCHAR: + case Types.DATALINK: this.boundStatement = this.boundStatement.setString(parameterIndex - 1, x.toString()); break; case Types.TIMESTAMP: - this.boundStatement = this.boundStatement.setInstant(parameterIndex - 1, ((Timestamp) x).toInstant()); + case Types.TIMESTAMP_WITH_TIMEZONE: + this.boundStatement = this.boundStatement.setInstant(parameterIndex - 1, convertToInstant(x)); break; case Types.DECIMAL: + case Types.NUMERIC: this.boundStatement = this.boundStatement.setBigDecimal(parameterIndex - 1, (BigDecimal) x); break; case Types.DOUBLE: this.boundStatement = this.boundStatement.setDouble(parameterIndex - 1, (Double) x); break; case Types.FLOAT: + case Types.REAL: this.boundStatement = this.boundStatement.setFloat(parameterIndex - 1, (Float) x); break; case Types.INTEGER: @@ -610,10 +632,11 @@ public final void setObject(final int parameterIndex, final Object x, final int this.boundStatement = this.boundStatement.setByte(parameterIndex - 1, (Byte) x); break; case Types.DATE: - this.boundStatement = this.boundStatement.setLocalDate(parameterIndex - 1, ((Date) x).toLocalDate()); + this.boundStatement = this.boundStatement.setLocalDate(parameterIndex - 1, convertToLocalDate(x)); break; case Types.TIME: - this.boundStatement = this.boundStatement.setLocalTime(parameterIndex - 1, ((Time) x).toLocalTime()); + case Types.TIME_WITH_TIMEZONE: + this.boundStatement = this.boundStatement.setLocalTime(parameterIndex - 1, convertToLocalTime(x)); break; case Types.ROWID: this.boundStatement.setToNull(parameterIndex - 1); @@ -666,7 +689,7 @@ public final void setObject(final int parameterIndex, final Object x, final int } break; default: - throw new SQLException("Unsupported SQL type: " + targetSqlType); + throw new SQLException(String.format(UNSUPPORTED_JDBC_TYPE, targetSqlType)); } } diff --git a/src/main/java/com/ing/data/cassandra/jdbc/CassandraResultSet.java b/src/main/java/com/ing/data/cassandra/jdbc/CassandraResultSet.java index c7e338f..37c78ff 100644 --- a/src/main/java/com/ing/data/cassandra/jdbc/CassandraResultSet.java +++ b/src/main/java/com/ing/data/cassandra/jdbc/CassandraResultSet.java @@ -35,12 +35,16 @@ import com.ing.data.cassandra.jdbc.types.DataTypeEnum; import com.ing.data.cassandra.jdbc.types.TypesMap; import org.apache.commons.collections4.IteratorUtils; +import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.ByteArrayInputStream; +import java.io.CharArrayReader; +import java.io.IOException; import java.io.InputStream; +import java.io.Reader; import java.math.BigDecimal; import java.math.BigInteger; import java.math.RoundingMode; @@ -48,6 +52,7 @@ import java.net.MalformedURLException; import java.net.URL; import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; import java.sql.Blob; import java.sql.Clob; import java.sql.Date; @@ -71,6 +76,7 @@ import java.time.OffsetDateTime; import java.time.OffsetTime; import java.time.ZoneId; +import java.time.ZoneOffset; import java.util.ArrayList; import java.util.Calendar; import java.util.HashMap; @@ -79,7 +85,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.TimeZone; import java.util.UUID; import java.util.concurrent.atomic.AtomicBoolean; @@ -94,6 +99,10 @@ import static com.ing.data.cassandra.jdbc.utils.ErrorConstants.MUST_BE_POSITIVE; import static com.ing.data.cassandra.jdbc.utils.ErrorConstants.NOT_SUPPORTED; import static com.ing.data.cassandra.jdbc.utils.ErrorConstants.NO_INTERFACE; +import static com.ing.data.cassandra.jdbc.utils.ErrorConstants.UNABLE_TO_READ_VALUE; +import static com.ing.data.cassandra.jdbc.utils.ErrorConstants.UNABLE_TO_RETRIEVE_METADATA; +import static com.ing.data.cassandra.jdbc.utils.ErrorConstants.UNSUPPORTED_JSON_TYPE_CONVERSION; +import static com.ing.data.cassandra.jdbc.utils.ErrorConstants.UNSUPPORTED_TYPE_CONVERSION; import static com.ing.data.cassandra.jdbc.utils.ErrorConstants.VALID_LABELS; import static com.ing.data.cassandra.jdbc.utils.ErrorConstants.VECTOR_ELEMENTS_NOT_NUMBERS; import static com.ing.data.cassandra.jdbc.utils.ErrorConstants.WAS_CLOSED_RS; @@ -262,17 +271,18 @@ private void populateColumns() { @Override DataType getCqlDataType(final int columnIndex) { - return this.currentRow.getColumnDefinitions().get(columnIndex - 1).getType(); + if (this.currentRow != null) { + return this.currentRow.getColumnDefinitions().get(columnIndex - 1).getType(); + } + return this.driverResultSet.getColumnDefinitions().get(columnIndex - 1).getType(); } @Override DataType getCqlDataType(final String columnLabel) { - return this.currentRow.getColumnDefinitions().get(columnLabel).getType(); - } - - @Override - public boolean absolute(final int row) throws SQLException { - throw new SQLFeatureNotSupportedException(NOT_SUPPORTED); + if (this.currentRow != null) { + return this.currentRow.getColumnDefinitions().get(columnLabel).getType(); + } + return this.driverResultSet.getColumnDefinitions().get(columnLabel).getType(); } @Override @@ -353,8 +363,25 @@ public int findColumn(final String columnLabel) throws SQLException { } @Override - public boolean first() throws SQLException { - throw new SQLFeatureNotSupportedException(NOT_SUPPORTED); + public InputStream getAsciiStream(final int columnIndex) throws SQLException { + checkIndex(columnIndex); + final String s = this.currentRow.getString(columnIndex - 1); + if (s != null) { + return new ByteArrayInputStream(s.getBytes(StandardCharsets.US_ASCII)); + } else { + return null; + } + } + + @Override + public InputStream getAsciiStream(final String columnLabel) throws SQLException { + checkName(columnLabel); + final String s = this.currentRow.getString(columnLabel); + if (s != null) { + return new ByteArrayInputStream(s.getBytes(StandardCharsets.US_ASCII)); + } else { + return null; + } } @Override @@ -503,6 +530,70 @@ public byte[] getBytes(final String columnLabel) throws SQLException { return null; } + @Override + public Reader getCharacterStream(final int columnIndex) throws SQLException { + checkIndex(columnIndex); + final byte[] byteArray = this.getBytes(columnIndex); + if (byteArray != null) { + final InputStream inputStream = new ByteArrayInputStream(byteArray); + try { + return new CharArrayReader(IOUtils.toCharArray(inputStream, StandardCharsets.UTF_8)); + } catch (final IOException e) { + throw new SQLException(String.format(UNABLE_TO_READ_VALUE, Reader.class.getSimpleName()), e); + } + } else { + return null; + } + } + + @Override + public Reader getCharacterStream(final String columnLabel) throws SQLException { + checkName(columnLabel); + final byte[] byteArray = this.getBytes(columnLabel); + if (byteArray != null) { + final InputStream inputStream = new ByteArrayInputStream(byteArray); + try { + return new CharArrayReader(IOUtils.toCharArray(inputStream, StandardCharsets.UTF_8)); + } catch (final IOException e) { + throw new SQLException(String.format(UNABLE_TO_READ_VALUE, Reader.class.getSimpleName()), e); + } + } else { + return null; + } + } + + @Override + public Clob getClob(final int columnIndex) throws SQLException { + checkIndex(columnIndex); + final byte[] byteArray = getBytes(columnIndex); + if (byteArray != null) { + final InputStream inputStream = new ByteArrayInputStream(byteArray); + try { + return new javax.sql.rowset.serial.SerialClob(IOUtils.toCharArray(inputStream, StandardCharsets.UTF_8)); + } catch (final IOException e) { + throw new SQLException(String.format(UNABLE_TO_READ_VALUE, Clob.class.getSimpleName()), e); + } + } else { + return null; + } + } + + @Override + public Clob getClob(final String columnLabel) throws SQLException { + checkName(columnLabel); + final byte[] byteArray = getBytes(columnLabel); + if (byteArray != null) { + final InputStream inputStream = new ByteArrayInputStream(byteArray); + try { + return new javax.sql.rowset.serial.SerialClob(IOUtils.toCharArray(inputStream, StandardCharsets.UTF_8)); + } catch (final IOException e) { + throw new SQLException(String.format(UNABLE_TO_READ_VALUE, Clob.class.getSimpleName()), e); + } + } else { + return null; + } + } + @Override public int getConcurrency() throws SQLException { checkNotClosed(); @@ -755,6 +846,16 @@ public ResultSetMetaData getMetaData() { return this.metadata; } + @Override + public NClob getNClob(final int columnIndex) throws SQLException { + return (NClob) getClob(columnIndex); + } + + @Override + public NClob getNClob(final String columnLabel) throws SQLException { + return (NClob) getClob(columnLabel); + } + @Override public Object getObject(final int columnIndex) throws SQLException { checkIndex(columnIndex); @@ -1063,16 +1164,18 @@ public T getObject(final int columnIndex, final Class type) throws SQLExc returnValue = getTimestamp(columnIndex); } else if (type == LocalDate.class) { returnValue = getLocalDate(columnIndex); - } else if (type == LocalDateTime.class || type == LocalTime.class) { - final Timestamp timestamp = getTimestamp(columnIndex, Calendar.getInstance(TimeZone.getTimeZone("UTC"))); + } else if (type == LocalDateTime.class || type == LocalTime.class || type == Calendar.class) { + final Timestamp timestamp = getTimestamp(columnIndex, Calendar.getInstance()); if (timestamp == null) { returnValue = null; } else { final LocalDateTime ldt = LocalDateTime.ofInstant(timestamp.toInstant(), ZoneId.of("UTC")); if (type == java.time.LocalDateTime.class) { returnValue = ldt; - } else { + } else if (type == java.time.LocalTime.class) { returnValue = ldt.toLocalTime(); + } else { + returnValue = new Calendar.Builder().setInstant(ldt.toEpochSecond(ZoneOffset.UTC)).build(); } } } else if (type == java.time.OffsetDateTime.class) { @@ -1115,7 +1218,7 @@ public T getObject(final int columnIndex, final Class type) throws SQLExc } else if (type == CqlVector.class) { returnValue = getVector(columnIndex); } else { - throw new SQLException(String.format("Conversion to type %s not supported.", type.getSimpleName())); + throw new SQLException(String.format(UNSUPPORTED_TYPE_CONVERSION, type.getSimpleName())); } return type.cast(returnValue); @@ -1144,8 +1247,7 @@ public T getObjectFromJson(final int columnIndex, final Class type) throw try { return getObjectMapper().readValue(json, type); } catch (final JsonProcessingException e) { - throw new SQLException(String.format("Unable to convert the column of index %d to an instance of %s", - columnIndex, type.getName()), e); + throw new SQLException(String.format(UNSUPPORTED_JSON_TYPE_CONVERSION, columnIndex, type.getName()), e); } } return null; @@ -1462,11 +1564,6 @@ public boolean isLast() throws SQLException { return !this.rowsIterator.hasNext(); } - @Override - public boolean last() throws SQLException { - throw new SQLFeatureNotSupportedException(NOT_SUPPORTED); - } - @Override public synchronized boolean next() { if (hasMoreRows()) { @@ -1481,16 +1578,6 @@ public synchronized boolean next() { return false; } - @Override - public boolean previous() throws SQLException { - throw new SQLFeatureNotSupportedException(NOT_SUPPORTED); - } - - @Override - public boolean relative(final int arg0) throws SQLException { - throw new SQLFeatureNotSupportedException(NOT_SUPPORTED); - } - /** * Gets whether a column was a null value. * @@ -1700,9 +1787,16 @@ public boolean isSearchable(final int column) throws SQLException { return false; } final String columnName = getColumnName(column); + final String schemaName = getSchemaName(column); + final String tableName = getTableName(column); + // If the schema or table name is not defined (this should not happen here, but better to be careful), + // always returns false since we cannot determine if the column is searchable in this context. + if (StringUtils.isEmpty(schemaName) || StringUtils.isEmpty(tableName)) { + return false; + } final AtomicBoolean searchable = new AtomicBoolean(false); - statement.connection.getSession().getMetadata().getKeyspace(getSchemaName(column)) - .flatMap(metadata -> metadata.getTable(getTableName(column))) + statement.connection.getSession().getMetadata().getKeyspace(schemaName) + .flatMap(metadata -> metadata.getTable(tableName)) .ifPresent(tableMetadata -> { boolean result; // Check first if the column is a clustering column or in a partitioning key. diff --git a/src/main/java/com/ing/data/cassandra/jdbc/CassandraStatement.java b/src/main/java/com/ing/data/cassandra/jdbc/CassandraStatement.java index 248794c..d4ea88b 100644 --- a/src/main/java/com/ing/data/cassandra/jdbc/CassandraStatement.java +++ b/src/main/java/com/ing/data/cassandra/jdbc/CassandraStatement.java @@ -142,7 +142,7 @@ public class CassandraStatement extends AbstractStatement */ protected boolean escapeProcessing = true; /** - * The Datastax Java driver statement. + * The Java Driver for Apache Cassandra® statement. */ protected com.datastax.oss.driver.api.core.cql.Statement statement; /** diff --git a/src/main/java/com/ing/data/cassandra/jdbc/ColumnDefinitions.java b/src/main/java/com/ing/data/cassandra/jdbc/ColumnDefinitions.java index ed40119..55394e0 100644 --- a/src/main/java/com/ing/data/cassandra/jdbc/ColumnDefinitions.java +++ b/src/main/java/com/ing/data/cassandra/jdbc/ColumnDefinitions.java @@ -16,6 +16,7 @@ package com.ing.data.cassandra.jdbc; import com.datastax.oss.driver.api.core.type.DataType; +import org.apache.commons.lang3.StringUtils; import javax.annotation.Nonnull; import java.util.Arrays; @@ -310,6 +311,17 @@ public Definition(final String keyspace, final String table, final String name, this.type = type; } + /** + * Builds a column definition in an anonymous table (useful for metadata result sets built programmatically). + * + * @param name The column name. + * @param type The column type. + * @return A new column definition instance. + */ + public static Definition buildDefinitionInAnonymousTable(final String name, final DataType type) { + return new Definition(StringUtils.EMPTY, StringUtils.EMPTY, name, type); + } + /** * Gets the name of the keyspace this column is part of. * diff --git a/src/main/java/com/ing/data/cassandra/jdbc/SessionHolder.java b/src/main/java/com/ing/data/cassandra/jdbc/SessionHolder.java index 52eccdb..4ce471a 100644 --- a/src/main/java/com/ing/data/cassandra/jdbc/SessionHolder.java +++ b/src/main/java/com/ing/data/cassandra/jdbc/SessionHolder.java @@ -40,19 +40,18 @@ import com.ing.data.cassandra.jdbc.codec.TimestampToLongCodec; import com.ing.data.cassandra.jdbc.codec.TinyintToIntCodec; import com.ing.data.cassandra.jdbc.codec.VarintToIntCodec; +import com.ing.data.cassandra.jdbc.utils.ContactPoint; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.math.NumberUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.File; -import java.net.InetSocketAddress; import java.sql.SQLException; import java.sql.SQLNonTransientConnectionException; import java.time.Duration; import java.time.temporal.ChronoUnit; import java.util.ArrayList; -import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -70,6 +69,7 @@ import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_CONFIG_FILE; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_CONNECT_TIMEOUT; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_CONSISTENCY_LEVEL; +import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_CONTACT_POINTS; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_DATABASE_NAME; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_DEBUG; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_ENABLE_SSL; @@ -77,11 +77,9 @@ import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_LOAD_BALANCING_POLICY; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_LOCAL_DATACENTER; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_PASSWORD; -import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_PORT_NUMBER; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_RECONNECT_POLICY; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_REQUEST_TIMEOUT; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_RETRY_POLICY; -import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_SERVER_NAME; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_SSL_ENGINE_FACTORY; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_SSL_HOSTNAME_VERIFICATION; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_TCP_NO_DELAY; @@ -181,6 +179,7 @@ boolean acquire() { } } + @SuppressWarnings("unchecked") private Session createSession(final Properties properties) throws SQLException { File configurationFile = null; boolean configurationFileExists = false; @@ -209,8 +208,7 @@ private Session createSession(final Properties properties) throws SQLException { } } - final String hosts = properties.getProperty(TAG_SERVER_NAME); - final int port = Integer.parseInt(properties.getProperty(TAG_PORT_NUMBER)); + final List contactPoints = (List) properties.get(TAG_CONTACT_POINTS); final String cloudSecureConnectBundle = properties.getProperty(TAG_CLOUD_SECURE_CONNECT_BUNDLE); final String keyspace = properties.getProperty(TAG_DATABASE_NAME); final String username = properties.getProperty(TAG_USER, StringUtils.EMPTY); @@ -243,12 +241,14 @@ private Session createSession(final Properties properties) throws SQLException { if (StringUtils.isNotBlank(cloudSecureConnectBundle)) { driverConfigLoaderBuilder.withString(DefaultDriverOption.CLOUD_SECURE_CONNECT_BUNDLE, cloudSecureConnectBundle); - LOG.info("Cloud secure connect bundle used. Host(s) {} will be ignored.", hosts); + LOG.info("Cloud secure connect bundle used. Host(s) {} will be ignored.", + contactPoints.stream() + .map(ContactPoint::toString) + .collect(Collectors.joining(", "))); } else { - builder.addContactPoints(Arrays.stream(hosts.split("--")) - .map(host -> InetSocketAddress.createUnresolved(host, port)) - .collect(Collectors.toList()) - ); + builder.addContactPoints(contactPoints.stream() + .map(ContactPoint::toInetSocketAddress) + .collect(Collectors.toList())); } // Set request timeout (in milliseconds) if defined. @@ -261,7 +261,7 @@ private Session createSession(final Properties properties) throws SQLException { configureSocketOptions(driverConfigLoaderBuilder, properties); // Set credentials when applicable. - if (username.length() > 0) { + if (!username.isEmpty()) { builder.withAuthCredentials(username, password); } @@ -273,7 +273,7 @@ private Session createSession(final Properties properties) throws SQLException { if (configurationFile == null || !configurationFileExists) { builder.withLocalDatacenter(localDatacenter); } - if (loadBalancingPolicy.length() > 0) { + if (!loadBalancingPolicy.isEmpty()) { // if a custom load balancing policy has been given in the JDBC URL, parse it and add it to the cluster // builder. try { @@ -290,7 +290,7 @@ private Session createSession(final Properties properties) throws SQLException { } } - if (retryPolicy.length() > 0) { + if (!retryPolicy.isEmpty()) { // if retry policy has been given in the JDBC URL, parse it and add it to the cluster builder. try { driverConfigLoaderBuilder.withString(DefaultDriverOption.RETRY_POLICY_CLASS, retryPolicy); @@ -302,7 +302,7 @@ private Session createSession(final Properties properties) throws SQLException { } } - if (reconnectPolicy.length() > 0) { + if (!reconnectPolicy.isEmpty()) { // if reconnection policy has been given in the JDBC URL, parse it and add it to the cluster builder. try { final Map parsedPolicy = Optional.ofNullable( diff --git a/src/main/java/com/ing/data/cassandra/jdbc/metadata/BasicVersionedMetadata.java b/src/main/java/com/ing/data/cassandra/jdbc/metadata/BasicVersionedMetadata.java new file mode 100644 index 0000000..d863976 --- /dev/null +++ b/src/main/java/com/ing/data/cassandra/jdbc/metadata/BasicVersionedMetadata.java @@ -0,0 +1,82 @@ +/* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ing.data.cassandra.jdbc.metadata; + +import org.semver4j.Semver; + +/** + * A basic implementation of a versioned database metadata. + * + * @see VersionedMetadata + * @see BuiltInFunctionsMetadataBuilder + */ +public class BasicVersionedMetadata implements VersionedMetadata { + + private final String metadataName; + private final Semver validFrom; + private final Semver invalidFrom; + + /** + * Constructs a database metadata valid in the specified range of Cassandra versions. + * + * @param metadataName The metadata name (a built-in function name for example). + * @param validFrom The minimal Cassandra version from which the metadata exists. If {@code null}, we consider + * the metadata exists in any version of the Cassandra database. + * @param invalidFrom The first Cassandra version in which the metadata does not exist anymore. If {@code null}, + * we consider the metadata exists in any version of the Cassandra database greater than + * {@code validFrom}. + */ + public BasicVersionedMetadata(final String metadataName, final String validFrom, final String invalidFrom) { + this.metadataName = metadataName; + this.validFrom = Semver.coerce(validFrom); + this.invalidFrom = Semver.coerce(invalidFrom); + } + + /** + * Constructs a database metadata valid from the specified version of Cassandra. + * + * @param metadataName The metadata name (a built-in function name for example). + * @param validFrom The minimal Cassandra version from which the metadata exists. + */ + public BasicVersionedMetadata(final String metadataName, final String validFrom) { + this(metadataName, validFrom, null); + } + + /** + * Constructs a database metadata valid in any version of Cassandra. + * + * @param metadataName The metadata name (a built-in function name for example). + */ + public BasicVersionedMetadata(final String metadataName) { + this(metadataName, null); + } + + @Override + public String getName() { + return this.metadataName; + } + + @Override + public Semver isValidFrom() { + return this.validFrom; + } + + @Override + public Semver isInvalidFrom() { + return this.invalidFrom; + } + +} diff --git a/src/main/java/com/ing/data/cassandra/jdbc/metadata/BuiltInFunctionsMetadataBuilder.java b/src/main/java/com/ing/data/cassandra/jdbc/metadata/BuiltInFunctionsMetadataBuilder.java new file mode 100644 index 0000000..f87af07 --- /dev/null +++ b/src/main/java/com/ing/data/cassandra/jdbc/metadata/BuiltInFunctionsMetadataBuilder.java @@ -0,0 +1,139 @@ +/* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ing.data.cassandra.jdbc.metadata; + +import com.ing.data.cassandra.jdbc.CassandraDatabaseMetaData; +import org.apache.commons.lang3.StringUtils; + +import java.sql.DatabaseMetaData; +import java.util.Arrays; +import java.util.List; + +import static com.ing.data.cassandra.jdbc.utils.DriverUtil.CASSANDRA_5; +import static com.ing.data.cassandra.jdbc.utils.DriverUtil.buildMetadataList; + +/** + * Utility class building list of Cassandra built-in functions returned in {@link CassandraDatabaseMetaData}. + */ +public class BuiltInFunctionsMetadataBuilder { + + private final String databaseVersion; + + /** + * Constructor. + * + * @param databaseVersion The database version the driver is currently connected to. + */ + public BuiltInFunctionsMetadataBuilder(final String databaseVersion) { + this.databaseVersion = databaseVersion; + } + + /** + * Builds the comma-separated list of the math functions available in this Cassandra database. + * This method is used to implement the method {@link DatabaseMetaData#getNumericFunctions()}. + * + * @return A valid result for the implementation of {@link DatabaseMetaData#getNumericFunctions()}. + */ + public String buildNumericFunctionsList() { + final List numericFunctions = Arrays.asList( + // Math functions introduced in Cassandra 5.0 (see https://issues.apache.org/jira/browse/CASSANDRA-17221) + new BasicVersionedMetadata("abs", CASSANDRA_5), + new BasicVersionedMetadata("exp", CASSANDRA_5), + new BasicVersionedMetadata("log", CASSANDRA_5), + new BasicVersionedMetadata("log10", CASSANDRA_5), + new BasicVersionedMetadata("round", CASSANDRA_5), + // We consider here the vectors similarity functions introduced by CEP-30 as numeric functions (see + // https://issues.apache.org/jira/browse/CASSANDRA-18640). + new BasicVersionedMetadata("similarity_cosine", CASSANDRA_5), + new BasicVersionedMetadata("similarity_euclidean", CASSANDRA_5), + new BasicVersionedMetadata("similarity_dot_product", CASSANDRA_5) + ); + return buildMetadataList(numericFunctions, databaseVersion); + } + + /** + * Builds the comma-separated list of the time and date functions available in this Cassandra database. + * This method is used to implement the method {@link DatabaseMetaData#getTimeDateFunctions()}. + * + * @return A valid result for the implementation of {@link DatabaseMetaData#getTimeDateFunctions()}. + */ + public String buildTimeDateFunctionsList() { + // See: https://cassandra.apache.org/doc/latest/cassandra/cql/functions.html + // In Cassandra 5.0, functions named using camel cased have been renamed to use snake case + // (see https://issues.apache.org/jira/browse/CASSANDRA-18037) + // and deprecated functions dateOf and unixTimestampOf have been removed (see + // https://issues.apache.org/jira/browse/CASSANDRA-18328). + final List timeDateFunctions = Arrays.asList( + new BasicVersionedMetadata("dateOf", null, CASSANDRA_5), + new BasicVersionedMetadata("now"), + new BasicVersionedMetadata("minTimeuuid"), + new BasicVersionedMetadata("min_timeuuid", CASSANDRA_5), + new BasicVersionedMetadata("maxTimeuuid"), + new BasicVersionedMetadata("max_timeuuid", CASSANDRA_5), + new BasicVersionedMetadata("unixTimestampOf", null, CASSANDRA_5), + new BasicVersionedMetadata("toDate", null, CASSANDRA_5), + new BasicVersionedMetadata("to_date", CASSANDRA_5), + new BasicVersionedMetadata("toTimestamp", null, CASSANDRA_5), + new BasicVersionedMetadata("to_timestamp", CASSANDRA_5), + new BasicVersionedMetadata("toUnixTimestamp", null, CASSANDRA_5), + new BasicVersionedMetadata("to_unix_timestamp", CASSANDRA_5), + new BasicVersionedMetadata("currentTimestamp", null, CASSANDRA_5), + new BasicVersionedMetadata("current_timestamp", CASSANDRA_5), + new BasicVersionedMetadata("currentDate", null, CASSANDRA_5), + new BasicVersionedMetadata("current_date", CASSANDRA_5), + new BasicVersionedMetadata("currentTime", null, CASSANDRA_5), + new BasicVersionedMetadata("current_time", CASSANDRA_5), + new BasicVersionedMetadata("currentTimeUUID", null, CASSANDRA_5), + new BasicVersionedMetadata("current_timeuuid", CASSANDRA_5) + ); + return buildMetadataList(timeDateFunctions, databaseVersion); + } + + /** + * Builds the comma-separated list of the system functions available in this Cassandra database. + * This method is used to implement the method {@link DatabaseMetaData#getSystemFunctions()}. + * + * @return A valid result for the implementation of {@link DatabaseMetaData#getSystemFunctions()}. + */ + public String buildSystemFunctionsList() { + final List systemFunctions = Arrays.asList( + new BasicVersionedMetadata("token"), + new BasicVersionedMetadata("ttl"), + new BasicVersionedMetadata("writetime"), + // Masking functions introduced by CEP-20 (see + // https://cwiki.apache.org/confluence/display/CASSANDRA/CEP-20%3A+Dynamic+Data+Masking) + new BasicVersionedMetadata("mask_default", CASSANDRA_5), + new BasicVersionedMetadata("mask_hash", CASSANDRA_5), + new BasicVersionedMetadata("mask_inner", CASSANDRA_5), + new BasicVersionedMetadata("mask_null", CASSANDRA_5), + new BasicVersionedMetadata("mask_outer", CASSANDRA_5), + new BasicVersionedMetadata("mask_replace", CASSANDRA_5) + ); + return buildMetadataList(systemFunctions, this.databaseVersion); + } + + /** + * Builds the comma-separated list of the string functions available in this Cassandra database. + * This method is used to implement the method {@link DatabaseMetaData#getStringFunctions()}. + * + * @return A valid result for the implementation of {@link DatabaseMetaData#getStringFunctions()}. + */ + public String buildStringFunctionsList() { + // Cassandra does not implement natively string functions. + return StringUtils.EMPTY; + } + +} diff --git a/src/main/java/com/ing/data/cassandra/jdbc/metadata/CatalogMetadataResultSetBuilder.java b/src/main/java/com/ing/data/cassandra/jdbc/metadata/CatalogMetadataResultSetBuilder.java index f7f22d9..dff8b25 100644 --- a/src/main/java/com/ing/data/cassandra/jdbc/metadata/CatalogMetadataResultSetBuilder.java +++ b/src/main/java/com/ing/data/cassandra/jdbc/metadata/CatalogMetadataResultSetBuilder.java @@ -15,6 +15,7 @@ package com.ing.data.cassandra.jdbc.metadata; +import com.datastax.oss.driver.api.core.type.DataTypes; import com.ing.data.cassandra.jdbc.CassandraMetadataResultSet; import com.ing.data.cassandra.jdbc.CassandraStatement; @@ -22,6 +23,8 @@ import java.sql.SQLException; import java.util.ArrayList; +import static com.ing.data.cassandra.jdbc.ColumnDefinitions.Definition.buildDefinitionInAnonymousTable; + /** * Utility class building metadata result sets ({@link CassandraMetadataResultSet} objects) related to catalogs. */ @@ -52,10 +55,14 @@ public CatalogMetadataResultSetBuilder(final CassandraStatement statement) throw */ public CassandraMetadataResultSet buildCatalogs() throws SQLException { final ArrayList catalogs = new ArrayList<>(); - final MetadataRow row = new MetadataRow().addEntry(TABLE_CATALOG_SHORTNAME, - this.statement.getConnection().getCatalog()); - catalogs.add(row); - return CassandraMetadataResultSet.buildFrom(this.statement, new MetadataResultSet().setRows(catalogs)); + final MetadataRow.MetadataRowTemplate rowTemplate = new MetadataRow.MetadataRowTemplate( + buildDefinitionInAnonymousTable(TABLE_CATALOG_SHORTNAME, DataTypes.TEXT) + ); + + catalogs.add(new MetadataRow().withTemplate(rowTemplate, this.statement.getConnection().getCatalog())); + + return CassandraMetadataResultSet.buildFrom(this.statement, + new MetadataResultSet(rowTemplate).setRows(catalogs)); } } diff --git a/src/main/java/com/ing/data/cassandra/jdbc/metadata/ColumnMetadataResultSetBuilder.java b/src/main/java/com/ing/data/cassandra/jdbc/metadata/ColumnMetadataResultSetBuilder.java index 3b69f53..aa14311 100644 --- a/src/main/java/com/ing/data/cassandra/jdbc/metadata/ColumnMetadataResultSetBuilder.java +++ b/src/main/java/com/ing/data/cassandra/jdbc/metadata/ColumnMetadataResultSetBuilder.java @@ -15,6 +15,7 @@ package com.ing.data.cassandra.jdbc.metadata; +import com.datastax.oss.driver.api.core.type.DataTypes; import com.ing.data.cassandra.jdbc.CassandraMetadataResultSet; import com.ing.data.cassandra.jdbc.CassandraStatement; import com.ing.data.cassandra.jdbc.types.AbstractJdbcType; @@ -29,6 +30,7 @@ import java.util.Comparator; import java.util.concurrent.atomic.AtomicInteger; +import static com.ing.data.cassandra.jdbc.ColumnDefinitions.Definition.buildDefinitionInAnonymousTable; import static com.ing.data.cassandra.jdbc.types.AbstractJdbcType.DEFAULT_PRECISION; import static com.ing.data.cassandra.jdbc.types.TypesMap.getTypeForComparator; @@ -138,7 +140,32 @@ public CassandraMetadataResultSet buildColumns(final String schemaPattern, final String columnNamePattern) throws SQLException { final String catalog = this.connection.getCatalog(); final ArrayList columns = new ArrayList<>(); - + final MetadataRow.MetadataRowTemplate rowTemplate = new MetadataRow.MetadataRowTemplate( + buildDefinitionInAnonymousTable(TABLE_CATALOG_SHORTNAME, DataTypes.TEXT), + buildDefinitionInAnonymousTable(TABLE_SCHEMA, DataTypes.TEXT), + buildDefinitionInAnonymousTable(TABLE_NAME, DataTypes.TEXT), + buildDefinitionInAnonymousTable(COLUMN_NAME, DataTypes.TEXT), + buildDefinitionInAnonymousTable(DATA_TYPE, DataTypes.TEXT), + buildDefinitionInAnonymousTable(TYPE_NAME, DataTypes.TEXT), + buildDefinitionInAnonymousTable(COLUMN_SIZE, DataTypes.TEXT), + buildDefinitionInAnonymousTable(BUFFER_LENGTH, DataTypes.TEXT), + buildDefinitionInAnonymousTable(DECIMAL_DIGITS, DataTypes.TEXT), + buildDefinitionInAnonymousTable(NUM_PRECISION_RADIX, DataTypes.TEXT), + buildDefinitionInAnonymousTable(NULLABLE, DataTypes.TEXT), + buildDefinitionInAnonymousTable(REMARKS, DataTypes.TEXT), + buildDefinitionInAnonymousTable(COLUMN_DEFAULT, DataTypes.TEXT), + buildDefinitionInAnonymousTable(SQL_DATA_TYPE, DataTypes.TEXT), + buildDefinitionInAnonymousTable(SQL_DATETIME_SUB, DataTypes.TEXT), + buildDefinitionInAnonymousTable(CHAR_OCTET_LENGTH, DataTypes.TEXT), + buildDefinitionInAnonymousTable(ORDINAL_POSITION, DataTypes.TEXT), + buildDefinitionInAnonymousTable(IS_NULLABLE, DataTypes.TEXT), + buildDefinitionInAnonymousTable(SCOPE_CATALOG, DataTypes.TEXT), + buildDefinitionInAnonymousTable(SCOPE_SCHEMA, DataTypes.TEXT), + buildDefinitionInAnonymousTable(SCOPE_TABLE, DataTypes.TEXT), + buildDefinitionInAnonymousTable(SOURCE_DATA_TYPE, DataTypes.TEXT), + buildDefinitionInAnonymousTable(IS_AUTOINCREMENT, DataTypes.TEXT), + buildDefinitionInAnonymousTable(IS_GENERATED_COLUMN, DataTypes.TEXT) + ); filterBySchemaNamePattern(schemaPattern, keyspaceMetadata -> filterByTableNamePattern(tableNamePattern, keyspaceMetadata, tableMetadata -> { @@ -170,31 +197,31 @@ public CassandraMetadataResultSet buildColumns(final String schemaPattern, columnMetadata.getType(), e.getMessage()); } - final MetadataRow row = new MetadataRow() - .addEntry(TABLE_CATALOG_SHORTNAME, catalog) - .addEntry(TABLE_SCHEMA, keyspaceMetadata.getName().asInternal()) - .addEntry(TABLE_NAME, tableMetadata.getName().asInternal()) - .addEntry(COLUMN_NAME, columnMetadata.getName().asInternal()) - .addEntry(DATA_TYPE, String.valueOf(jdbcType)) - .addEntry(TYPE_NAME, columnMetadata.getType().toString()) - .addEntry(COLUMN_SIZE, String.valueOf(columnSize)) - .addEntry(BUFFER_LENGTH, String.valueOf(0)) - .addEntry(DECIMAL_DIGITS, null) - .addEntry(NUM_PRECISION_RADIX, String.valueOf(radix)) - .addEntry(NULLABLE, String.valueOf(DatabaseMetaData.columnNoNulls)) - .addEntry(REMARKS, null) - .addEntry(COLUMN_DEFAULT, null) - .addEntry(SQL_DATA_TYPE, null) - .addEntry(SQL_DATETIME_SUB, null) - .addEntry(CHAR_OCTET_LENGTH, String.valueOf(Integer.MAX_VALUE)) - .addEntry(ORDINAL_POSITION, String.valueOf(colIndex.getAndIncrement())) - .addEntry(IS_NULLABLE, StringUtils.EMPTY) - .addEntry(SCOPE_CATALOG, null) - .addEntry(SCOPE_SCHEMA, null) - .addEntry(SCOPE_TABLE, null) - .addEntry(SOURCE_DATA_TYPE, null) - .addEntry(IS_AUTOINCREMENT, NO_VALUE) - .addEntry(IS_GENERATED_COLUMN, NO_VALUE); + final MetadataRow row = new MetadataRow().withTemplate(rowTemplate, + catalog, // TABLE_CAT + keyspaceMetadata.getName().asInternal(), // TABLE_SCHEM + tableMetadata.getName().asInternal(), // TABLE_NAME + columnMetadata.getName().asInternal(), // COLUMN_NAME + String.valueOf(jdbcType), // DATA_TYPE + columnMetadata.getType().toString(), // TYPE_NAME + String.valueOf(columnSize), // COLUMN_SIZE + String.valueOf(0), // BUFFER_LENGTH + null, // DECIMAL_DIGITS + String.valueOf(radix), // NUM_PREC_RADIX + String.valueOf(DatabaseMetaData.columnNoNulls), // NULLABLE + null, // REMARKS + null, // COLUMN_DEF + null, // SQL_DATA_TYPE + null, // SQL_DATETIME_SUB + String.valueOf(Integer.MAX_VALUE), // CHAR_OCTET_LENGTH + String.valueOf(colIndex.getAndIncrement()), // ORDINAL_POSITION + StringUtils.EMPTY, // IS_NULLABLE + null, // SCOPE_CATALOG + null, // SCOPE_SCHEMA + null, // SCOPE_TABLE + null, // SOURCE_DATA_TYPE + NO_VALUE, // IS_AUTOINCREMENT + NO_VALUE); // IS_GENERATED_COLUMN columns.add(row); }, columnMetadata -> colIndex.getAndIncrement()); }, null), null); @@ -204,7 +231,8 @@ public CassandraMetadataResultSet buildColumns(final String schemaPattern, columns.sort(Comparator.comparing(row -> ((MetadataRow) row).getString(TABLE_SCHEMA)) .thenComparing(row -> ((MetadataRow) row).getString(TABLE_NAME)) .thenComparing(row -> ((MetadataRow) row).getString(ORDINAL_POSITION))); - return CassandraMetadataResultSet.buildFrom(this.statement, new MetadataResultSet().setRows(columns)); + return CassandraMetadataResultSet.buildFrom(this.statement, + new MetadataResultSet(rowTemplate).setRows(columns)); } } diff --git a/src/main/java/com/ing/data/cassandra/jdbc/metadata/FunctionMetadataResultSetBuilder.java b/src/main/java/com/ing/data/cassandra/jdbc/metadata/FunctionMetadataResultSetBuilder.java index 93f1d75..2e22b09 100644 --- a/src/main/java/com/ing/data/cassandra/jdbc/metadata/FunctionMetadataResultSetBuilder.java +++ b/src/main/java/com/ing/data/cassandra/jdbc/metadata/FunctionMetadataResultSetBuilder.java @@ -16,6 +16,7 @@ package com.ing.data.cassandra.jdbc.metadata; import com.datastax.oss.driver.api.core.CqlIdentifier; +import com.datastax.oss.driver.api.core.type.DataTypes; import com.ing.data.cassandra.jdbc.CassandraMetadataResultSet; import com.ing.data.cassandra.jdbc.CassandraStatement; import com.ing.data.cassandra.jdbc.types.AbstractJdbcType; @@ -28,6 +29,7 @@ import java.util.Comparator; import java.util.List; +import static com.ing.data.cassandra.jdbc.ColumnDefinitions.Definition.buildDefinitionInAnonymousTable; import static com.ing.data.cassandra.jdbc.types.TypesMap.getTypeForComparator; import static java.sql.DatabaseMetaData.functionColumnIn; import static java.sql.DatabaseMetaData.functionReturn; @@ -96,17 +98,25 @@ public CassandraMetadataResultSet buildFunctions(final String schemaPattern, final String functionNamePattern) throws SQLException { final String catalog = this.connection.getCatalog(); final ArrayList functionsRows = new ArrayList<>(); + final MetadataRow.MetadataRowTemplate rowTemplate = new MetadataRow.MetadataRowTemplate( + buildDefinitionInAnonymousTable(FUNCTION_CATALOG, DataTypes.TEXT), + buildDefinitionInAnonymousTable(FUNCTION_SCHEMA, DataTypes.TEXT), + buildDefinitionInAnonymousTable(FUNCTION_NAME, DataTypes.TEXT), + buildDefinitionInAnonymousTable(REMARKS, DataTypes.TEXT), + buildDefinitionInAnonymousTable(FUNCTION_TYPE, DataTypes.TEXT), + buildDefinitionInAnonymousTable(SPECIFIC_NAME, DataTypes.TEXT) + ); filterBySchemaNamePattern(schemaPattern, keyspaceMetadata -> filterByFunctionNamePattern(functionNamePattern, keyspaceMetadata, (functionSignature, functionMetadata) -> { - final MetadataRow row = new MetadataRow() - .addEntry(FUNCTION_CATALOG, catalog) - .addEntry(FUNCTION_SCHEMA, keyspaceMetadata.getName().asInternal()) - .addEntry(FUNCTION_NAME, functionSignature.getName().asInternal()) - .addEntry(REMARKS, StringUtils.EMPTY) - .addEntry(FUNCTION_TYPE, String.valueOf(DatabaseMetaData.functionNoTable)) - .addEntry(SPECIFIC_NAME, functionSignature.getName().asInternal()); + final MetadataRow row = new MetadataRow().withTemplate(rowTemplate, + catalog, // FUNCTION_CAT + keyspaceMetadata.getName().asInternal(), // FUNCTION_SCHEM + functionSignature.getName().asInternal(), // FUNCTION_NAME + StringUtils.EMPTY, // REMARKS + String.valueOf(DatabaseMetaData.functionNoTable), // FUNCTION_TYPE + functionSignature.getName().asInternal()); // SPECIFIC_NAME functionsRows.add(row); }), null); @@ -114,7 +124,8 @@ public CassandraMetadataResultSet buildFunctions(final String schemaPattern, // here SPECIFIC_NAME is equal to FUNCTION_NAME). functionsRows.sort(Comparator.comparing(row -> ((MetadataRow) row).getString(FUNCTION_SCHEMA)) .thenComparing(row -> ((MetadataRow) row).getString(FUNCTION_NAME))); - return CassandraMetadataResultSet.buildFrom(this.statement, new MetadataResultSet().setRows(functionsRows)); + return CassandraMetadataResultSet.buildFrom(this.statement, + new MetadataResultSet(rowTemplate).setRows(functionsRows)); } /** @@ -200,6 +211,25 @@ public CassandraMetadataResultSet buildFunctionColumns(final String schemaPatter final String columnNamePattern) throws SQLException { final String catalog = this.connection.getCatalog(); final ArrayList functionParamsRows = new ArrayList<>(); + final MetadataRow.MetadataRowTemplate rowTemplate = new MetadataRow.MetadataRowTemplate( + buildDefinitionInAnonymousTable(FUNCTION_CATALOG, DataTypes.TEXT), + buildDefinitionInAnonymousTable(FUNCTION_SCHEMA, DataTypes.TEXT), + buildDefinitionInAnonymousTable(FUNCTION_NAME, DataTypes.TEXT), + buildDefinitionInAnonymousTable(COLUMN_NAME, DataTypes.TEXT), + buildDefinitionInAnonymousTable(COLUMN_TYPE, DataTypes.TEXT), + buildDefinitionInAnonymousTable(DATA_TYPE, DataTypes.TEXT), + buildDefinitionInAnonymousTable(TYPE_NAME, DataTypes.TEXT), + buildDefinitionInAnonymousTable(PRECISION, DataTypes.TEXT), + buildDefinitionInAnonymousTable(LENGTH, DataTypes.TEXT), + buildDefinitionInAnonymousTable(SCALE, DataTypes.TEXT), + buildDefinitionInAnonymousTable(RADIX, DataTypes.TEXT), + buildDefinitionInAnonymousTable(NULLABLE, DataTypes.TEXT), + buildDefinitionInAnonymousTable(REMARKS, DataTypes.TEXT), + buildDefinitionInAnonymousTable(CHAR_OCTET_LENGTH, DataTypes.TEXT), + buildDefinitionInAnonymousTable(ORDINAL_POSITION, DataTypes.TEXT), + buildDefinitionInAnonymousTable(IS_NULLABLE, DataTypes.TEXT), + buildDefinitionInAnonymousTable(SPECIFIC_NAME, DataTypes.TEXT) + ); filterBySchemaNamePattern(schemaPattern, keyspaceMetadata -> filterByFunctionNamePattern(functionNamePattern, keyspaceMetadata, @@ -207,24 +237,24 @@ public CassandraMetadataResultSet buildFunctionColumns(final String schemaPatter // Function return type. final AbstractJdbcType returnJdbcType = getTypeForComparator(functionMetadata.getReturnType().asCql(false, true)); - final MetadataRow row = new MetadataRow() - .addEntry(FUNCTION_CATALOG, catalog) - .addEntry(FUNCTION_SCHEMA, keyspaceMetadata.getName().asInternal()) - .addEntry(FUNCTION_NAME, functionSignature.getName().asInternal()) - .addEntry(COLUMN_NAME, StringUtils.EMPTY) - .addEntry(COLUMN_TYPE, String.valueOf(functionReturn)) - .addEntry(DATA_TYPE, String.valueOf(returnJdbcType.getJdbcType())) - .addEntry(TYPE_NAME, functionMetadata.getReturnType().toString()) - .addEntry(PRECISION, String.valueOf(returnJdbcType.getPrecision(null))) - .addEntry(LENGTH, String.valueOf(Integer.MAX_VALUE)) - .addEntry(SCALE, String.valueOf(returnJdbcType.getScale(null))) - .addEntry(RADIX, String.valueOf(returnJdbcType.getPrecision(null))) - .addEntry(NULLABLE, String.valueOf(typeNullable)) - .addEntry(REMARKS, StringUtils.EMPTY) - .addEntry(CHAR_OCTET_LENGTH, null) - .addEntry(ORDINAL_POSITION, "0") - .addEntry(IS_NULLABLE, YES_VALUE) - .addEntry(SPECIFIC_NAME, functionSignature.getName().asInternal()); + final MetadataRow row = new MetadataRow().withTemplate(rowTemplate, + catalog, // FUNCTION_CAT + keyspaceMetadata.getName().asInternal(), // FUNCTION_SCHEM + functionSignature.getName().asInternal(), // FUNCTION_NAME + StringUtils.EMPTY, // COLUMN_NAME + String.valueOf(functionReturn), // COLUMN_TYPE + String.valueOf(returnJdbcType.getJdbcType()), // DATA_TYPE + functionMetadata.getReturnType().toString(), // TYPE_NAME + String.valueOf(returnJdbcType.getPrecision(null)), // PRECISION + String.valueOf(Integer.MAX_VALUE), // LENGTH + String.valueOf(returnJdbcType.getScale(null)), // SCALE + String.valueOf(returnJdbcType.getPrecision(null)), // RADIX + String.valueOf(typeNullable), // NULLABLE + StringUtils.EMPTY, // REMARKS + null, // CHAR_OCTET_LENGTH + "0", // ORDINAL_POSITION + YES_VALUE, // IS_NULLABLE + functionSignature.getName().asInternal()); // SPECIFIC_NAME functionParamsRows.add(row); // Function input parameters. @@ -234,24 +264,24 @@ public CassandraMetadataResultSet buildFunctionColumns(final String schemaPatter || matchesPattern(columnNamePattern, paramNames.get(i).asInternal())) { final AbstractJdbcType paramJdbcType = getTypeForComparator( functionSignature.getParameterTypes().get(i).asCql(false, true)); - final MetadataRow paramRow = new MetadataRow() - .addEntry(FUNCTION_CATALOG, catalog) - .addEntry(FUNCTION_SCHEMA, keyspaceMetadata.getName().asInternal()) - .addEntry(FUNCTION_NAME, functionSignature.getName().asInternal()) - .addEntry(COLUMN_NAME, paramNames.get(i).asInternal()) - .addEntry(COLUMN_TYPE, String.valueOf(functionColumnIn)) - .addEntry(DATA_TYPE, String.valueOf(paramJdbcType.getJdbcType())) - .addEntry(TYPE_NAME, functionSignature.getParameterTypes().get(i).toString()) - .addEntry(PRECISION, String.valueOf(paramJdbcType.getPrecision(null))) - .addEntry(LENGTH, String.valueOf(Integer.MAX_VALUE)) - .addEntry(SCALE, String.valueOf(paramJdbcType.getScale(null))) - .addEntry(RADIX, String.valueOf(paramJdbcType.getPrecision(null))) - .addEntry(NULLABLE, String.valueOf(typeNullable)) - .addEntry(REMARKS, StringUtils.EMPTY) - .addEntry(CHAR_OCTET_LENGTH, null) - .addEntry(ORDINAL_POSITION, String.valueOf(i + 1)) - .addEntry(IS_NULLABLE, YES_VALUE) - .addEntry(SPECIFIC_NAME, functionSignature.getName().asInternal()); + final MetadataRow paramRow = new MetadataRow().withTemplate(rowTemplate, + catalog, // FUNCTION_CAT + keyspaceMetadata.getName().asInternal(), // FUNCTION_SCHEM + functionSignature.getName().asInternal(), // FUNCTION_NAME + paramNames.get(i).asInternal(), // COLUMN_NAME + String.valueOf(functionColumnIn), // COLUMN_TYPE + String.valueOf(paramJdbcType.getJdbcType()), // DATA_TYPE + functionSignature.getParameterTypes().get(i).toString(), // TYPE_NAME + String.valueOf(paramJdbcType.getPrecision(null)), // PRECISION + String.valueOf(Integer.MAX_VALUE), // LENGTH + String.valueOf(paramJdbcType.getScale(null)), // SCALE + String.valueOf(paramJdbcType.getPrecision(null)), // RADIX + String.valueOf(typeNullable), // NULLABLE + StringUtils.EMPTY, // REMARKS + null, // CHAR_OCTET_LENGTH + String.valueOf(i + 1), // ORDINAL_POSITION + YES_VALUE, // IS_NULLABLE + functionSignature.getName().asInternal()); // SPECIFIC_NAME functionParamsRows.add(paramRow); } } @@ -264,7 +294,7 @@ public CassandraMetadataResultSet buildFunctionColumns(final String schemaPatter .thenComparing(row -> ((MetadataRow) row).getString(SPECIFIC_NAME)) .thenComparing(row -> Integer.valueOf(((MetadataRow) row).getString(ORDINAL_POSITION)))); return CassandraMetadataResultSet.buildFrom(this.statement, - new MetadataResultSet().setRows(functionParamsRows)); + new MetadataResultSet(rowTemplate).setRows(functionParamsRows)); } } diff --git a/src/main/java/com/ing/data/cassandra/jdbc/metadata/MetadataResultSet.java b/src/main/java/com/ing/data/cassandra/jdbc/metadata/MetadataResultSet.java index ca47dad..daab361 100644 --- a/src/main/java/com/ing/data/cassandra/jdbc/metadata/MetadataResultSet.java +++ b/src/main/java/com/ing/data/cassandra/jdbc/metadata/MetadataResultSet.java @@ -36,6 +36,16 @@ public class MetadataResultSet { public MetadataResultSet() { } + /** + * Constructor including the columns definitions from a metadata row template. + * + * @param rowTemplate The metadata row template from which the columns definitions of the metadata result set + * are extracted. + */ + public MetadataResultSet(final MetadataRow.MetadataRowTemplate rowTemplate) { + this.columnDefinitions = new ColumnDefinitions(rowTemplate.getColumnDefinitions()); + } + /** * Add rows to the metadata result set. * diff --git a/src/main/java/com/ing/data/cassandra/jdbc/metadata/MetadataRow.java b/src/main/java/com/ing/data/cassandra/jdbc/metadata/MetadataRow.java index 2b0992b..e1f4341 100644 --- a/src/main/java/com/ing/data/cassandra/jdbc/metadata/MetadataRow.java +++ b/src/main/java/com/ing/data/cassandra/jdbc/metadata/MetadataRow.java @@ -35,6 +35,8 @@ import java.util.Set; import java.util.UUID; +import static com.ing.data.cassandra.jdbc.utils.ErrorConstants.UNABLE_TO_POPULATE_METADATA_ROW; + /** * The content of a metadata row returned in a {@link CassandraMetadataResultSet}. */ @@ -73,6 +75,29 @@ public MetadataRow addEntry(final String key, final String value) { return this; } + /** + * Populates a metadata row defined by a row template with the specified values. + *

+ * The number of values must match the number of columns defined in the row template, otherwise a runtime + * exception will be thrown. + *

+ * + * @param template The row template. + * @param values The values used to populate the metadata row. + * @return The updated {@code MetadataRow} instance. + * @throws RuntimeException when the number of values does not match the number of columns defined in the row + * template. + */ + public MetadataRow withTemplate(final MetadataRowTemplate template, final String... values) { + if (template.getColumnDefinitions().length != values.length) { + throw new RuntimeException(UNABLE_TO_POPULATE_METADATA_ROW); + } + for (int i = 0; i < template.getColumnDefinitions().length; i++) { + this.addEntry(template.getColumnDefinitions()[i].getName(), values[i]); + } + return this; + } + /** * Gets the column definitions for the metadata row. * @@ -619,4 +644,34 @@ private Integer getIndex(final String name) { return idx; } + /** + * A template of metadata row. + *

+ * This is useful to define the columns of a row in a metadata result set and populate it. + *

+ */ + public static class MetadataRowTemplate { + + private final Definition[] columnDefinitions; + + /** + * Constructor. + * + * @param columnDefinitions The definitions of each column of the row template. + */ + public MetadataRowTemplate(final Definition... columnDefinitions) { + this.columnDefinitions = columnDefinitions; + } + + /** + * Gets the definitions of the columns in the row template. + * + * @return The array of columns definitions. + */ + public Definition[] getColumnDefinitions() { + return this.columnDefinitions; + } + + } + } diff --git a/src/main/java/com/ing/data/cassandra/jdbc/metadata/SchemaMetadataResultSetBuilder.java b/src/main/java/com/ing/data/cassandra/jdbc/metadata/SchemaMetadataResultSetBuilder.java index 5cad631..5e16d60 100644 --- a/src/main/java/com/ing/data/cassandra/jdbc/metadata/SchemaMetadataResultSetBuilder.java +++ b/src/main/java/com/ing/data/cassandra/jdbc/metadata/SchemaMetadataResultSetBuilder.java @@ -15,6 +15,7 @@ package com.ing.data.cassandra.jdbc.metadata; +import com.datastax.oss.driver.api.core.type.DataTypes; import com.ing.data.cassandra.jdbc.CassandraMetadataResultSet; import com.ing.data.cassandra.jdbc.CassandraStatement; @@ -23,6 +24,8 @@ import java.util.ArrayList; import java.util.Comparator; +import static com.ing.data.cassandra.jdbc.ColumnDefinitions.Definition.buildDefinitionInAnonymousTable; + /** * Utility class building metadata result sets ({@link CassandraMetadataResultSet} objects) related to schemas. */ @@ -63,17 +66,22 @@ public CassandraMetadataResultSet buildSchemas(final String schemaPattern) throws SQLException { final ArrayList schemas = new ArrayList<>(); final String catalog = this.connection.getCatalog(); + final MetadataRow.MetadataRowTemplate rowTemplate = new MetadataRow.MetadataRowTemplate( + buildDefinitionInAnonymousTable(TABLE_SCHEMA, DataTypes.TEXT), + buildDefinitionInAnonymousTable(TABLE_CATALOG, DataTypes.TEXT) + ); filterBySchemaNamePattern(schemaPattern, keyspaceMetadata -> { - final MetadataRow row = new MetadataRow() - .addEntry(TABLE_SCHEMA, keyspaceMetadata.getName().asInternal()) - .addEntry(TABLE_CATALOG, catalog); + final MetadataRow row = new MetadataRow().withTemplate(rowTemplate, + keyspaceMetadata.getName().asInternal(), // TABLE_SCHEM + catalog); // TABLE_CATALOG schemas.add(row); }, null); // Results should all have the same TABLE_CATALOG, so just sort them by TABLE_SCHEM. schemas.sort(Comparator.comparing(row -> row.getString(TABLE_SCHEMA))); - return CassandraMetadataResultSet.buildFrom(this.statement, new MetadataResultSet().setRows(schemas)); + return CassandraMetadataResultSet.buildFrom(this.statement, + new MetadataResultSet(rowTemplate).setRows(schemas)); } } diff --git a/src/main/java/com/ing/data/cassandra/jdbc/metadata/TableMetadataResultSetBuilder.java b/src/main/java/com/ing/data/cassandra/jdbc/metadata/TableMetadataResultSetBuilder.java index b458deb..5db7c90 100644 --- a/src/main/java/com/ing/data/cassandra/jdbc/metadata/TableMetadataResultSetBuilder.java +++ b/src/main/java/com/ing/data/cassandra/jdbc/metadata/TableMetadataResultSetBuilder.java @@ -18,8 +18,10 @@ import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.metadata.schema.ColumnMetadata; import com.datastax.oss.driver.api.core.metadata.schema.IndexMetadata; +import com.datastax.oss.driver.api.core.type.DataTypes; import com.ing.data.cassandra.jdbc.CassandraMetadataResultSet; import com.ing.data.cassandra.jdbc.CassandraStatement; +import com.ing.data.cassandra.jdbc.ColumnDefinitions; import com.ing.data.cassandra.jdbc.types.AbstractJdbcType; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -31,6 +33,7 @@ import java.util.Comparator; import java.util.Map; +import static com.ing.data.cassandra.jdbc.ColumnDefinitions.Definition.buildDefinitionInAnonymousTable; import static com.ing.data.cassandra.jdbc.types.AbstractJdbcType.DEFAULT_PRECISION; import static com.ing.data.cassandra.jdbc.types.TypesMap.getTypeForComparator; import static java.sql.DatabaseMetaData.bestRowNotPseudo; @@ -67,9 +70,14 @@ public TableMetadataResultSetBuilder(final CassandraStatement statement) throws */ public CassandraMetadataResultSet buildTableTypes() throws SQLException { final ArrayList tableTypes = new ArrayList<>(); - final MetadataRow row = new MetadataRow().addEntry(TABLE_TYPE, TABLE); - tableTypes.add(row); - return CassandraMetadataResultSet.buildFrom(this.statement, new MetadataResultSet().setRows(tableTypes)); + final MetadataRow.MetadataRowTemplate rowTemplate = new MetadataRow.MetadataRowTemplate( + buildDefinitionInAnonymousTable(TABLE_TYPE, DataTypes.TEXT) + ); + + tableTypes.add(new MetadataRow().withTemplate(rowTemplate, TABLE)); + + return CassandraMetadataResultSet.buildFrom(this.statement, + new MetadataResultSet(rowTemplate).setRows(tableTypes)); } /** @@ -113,28 +121,40 @@ public CassandraMetadataResultSet buildTables(final String schemaPattern, final String tableNamePattern) throws SQLException { final String catalog = this.connection.getCatalog(); final ArrayList tables = new ArrayList<>(); + final MetadataRow.MetadataRowTemplate rowTemplate = new MetadataRow.MetadataRowTemplate( + buildDefinitionInAnonymousTable(TABLE_CATALOG_SHORTNAME, DataTypes.TEXT), + buildDefinitionInAnonymousTable(TABLE_SCHEMA, DataTypes.TEXT), + buildDefinitionInAnonymousTable(TABLE_NAME, DataTypes.TEXT), + buildDefinitionInAnonymousTable(TABLE_TYPE, DataTypes.TEXT), + buildDefinitionInAnonymousTable(REMARKS, DataTypes.TEXT), + buildDefinitionInAnonymousTable(TYPE_CATALOG, DataTypes.TEXT), + buildDefinitionInAnonymousTable(TYPE_SCHEMA, DataTypes.TEXT), + buildDefinitionInAnonymousTable(TYPE_NAME, DataTypes.TEXT), + buildDefinitionInAnonymousTable(SELF_REFERENCING_COL_NAME, DataTypes.TEXT), + buildDefinitionInAnonymousTable(REF_GENERATION, DataTypes.TEXT) + ); filterBySchemaNamePattern(schemaPattern, keyspaceMetadata -> filterByTableNamePattern(tableNamePattern, keyspaceMetadata, tableMetadata -> { - final MetadataRow row = new MetadataRow() - .addEntry(TABLE_CATALOG_SHORTNAME, catalog) - .addEntry(TABLE_SCHEMA, keyspaceMetadata.getName().asInternal()) - .addEntry(TABLE_NAME, tableMetadata.getName().asInternal()) - .addEntry(TABLE_TYPE, TABLE) - .addEntry(REMARKS, tableMetadata.getOptions() - .get(CqlIdentifier.fromCql(CQL_OPTION_COMMENT)).toString()) - .addEntry(TYPE_CATALOG, null) - .addEntry(TYPE_SCHEMA, null) - .addEntry(TYPE_NAME, null) - .addEntry(SELF_REFERENCING_COL_NAME, null) - .addEntry(REF_GENERATION, null); + final MetadataRow row = new MetadataRow().withTemplate(rowTemplate, + catalog, // TABLE_CAT + keyspaceMetadata.getName().asInternal(), // TABLE_SCHEM + tableMetadata.getName().asInternal(), // TABLE_NAME + TABLE, // TABLE_TYPE + tableMetadata.getOptions().get(CqlIdentifier.fromCql(CQL_OPTION_COMMENT)).toString(), // REMARKS + null, // TYPE_CAT + null, // TYPE_SCHEM + null, // TYPE_NAME + null, // SELF_REFERENCING_COL_NAME + null); // REF_GENERATION tables.add(row); }, null), null); // Results should all have the same TABLE_CAT, so just sort them by TABLE_SCHEM then TABLE_NAME. tables.sort(Comparator.comparing(row -> ((MetadataRow) row).getString(TABLE_SCHEMA)) .thenComparing(row -> ((MetadataRow) row).getString(TABLE_NAME))); - return CassandraMetadataResultSet.buildFrom(this.statement, new MetadataResultSet().setRows(tables)); + return CassandraMetadataResultSet.buildFrom(this.statement, + new MetadataResultSet(rowTemplate).setRows(tables)); } /** @@ -203,32 +223,48 @@ public CassandraMetadataResultSet buildIndexes(final String schema, final boolean approximate) throws SQLException { final String catalog = this.connection.getCatalog(); final ArrayList indexes = new ArrayList<>(); + final MetadataRow.MetadataRowTemplate rowTemplate = new MetadataRow.MetadataRowTemplate( + buildDefinitionInAnonymousTable(TABLE_CATALOG_SHORTNAME, DataTypes.TEXT), + buildDefinitionInAnonymousTable(TABLE_SCHEMA, DataTypes.TEXT), + buildDefinitionInAnonymousTable(TABLE_NAME, DataTypes.TEXT), + buildDefinitionInAnonymousTable(NON_UNIQUE, DataTypes.TEXT), + buildDefinitionInAnonymousTable(INDEX_QUALIFIER, DataTypes.TEXT), + buildDefinitionInAnonymousTable(INDEX_NAME, DataTypes.TEXT), + buildDefinitionInAnonymousTable(TYPE, DataTypes.TEXT), + buildDefinitionInAnonymousTable(ORDINAL_POSITION, DataTypes.TEXT), + buildDefinitionInAnonymousTable(COLUMN_NAME, DataTypes.TEXT), + buildDefinitionInAnonymousTable(ASC_OR_DESC, DataTypes.TEXT), + buildDefinitionInAnonymousTable(CARDINALITY, DataTypes.TEXT), + buildDefinitionInAnonymousTable(PAGES, DataTypes.TEXT), + buildDefinitionInAnonymousTable(FILTER_CONDITION, DataTypes.TEXT) + ); filterBySchemaNamePattern(schema, keyspaceMetadata -> filterByTableNamePattern(tableName, keyspaceMetadata, tableMetadata -> { for (final Map.Entry index : tableMetadata.getIndexes().entrySet()) { final IndexMetadata indexMetadata = index.getValue(); - final MetadataRow row = new MetadataRow() - .addEntry(TABLE_CATALOG_SHORTNAME, catalog) - .addEntry(TABLE_SCHEMA, keyspaceMetadata.getName().asInternal()) - .addEntry(TABLE_NAME, tableMetadata.getName().asInternal()) - .addEntry(NON_UNIQUE, Boolean.TRUE.toString()) - .addEntry(INDEX_QUALIFIER, catalog) - .addEntry(INDEX_NAME, indexMetadata.getName().asInternal()) - .addEntry(TYPE, String.valueOf(DatabaseMetaData.tableIndexHashed)) - .addEntry(ORDINAL_POSITION, String.valueOf(1)) - .addEntry(COLUMN_NAME, indexMetadata.getTarget()) - .addEntry(ASC_OR_DESC, null) - .addEntry(CARDINALITY, String.valueOf(-1)) - .addEntry(PAGES, String.valueOf(-1)) - .addEntry(FILTER_CONDITION, null); + final MetadataRow row = new MetadataRow().withTemplate(rowTemplate, + catalog, // TABLE_CAT + keyspaceMetadata.getName().asInternal(), // TABLE_SCHEM + tableMetadata.getName().asInternal(), // TABLE_NAME + Boolean.TRUE.toString(), // NON_UNIQUE + catalog, // INDEX_QUALIFIER + indexMetadata.getName().asInternal(), // INDEX_NAME + String.valueOf(DatabaseMetaData.tableIndexOther), // TYPE + String.valueOf(1), // ORDINAL_POSITION + indexMetadata.getTarget(), // COLUMN_NAME + null, // ASC_OR_DESC + String.valueOf(-1), // CARDINALITY + String.valueOf(-1), // PAGES + null); // FILTER_CONDITION indexes.add(row); } }, null), null); // Results should all have the same NON_UNIQUE, TYPE and ORDINAL_POSITION, so just sort them by INDEX_NAME. indexes.sort(Comparator.comparing(row -> row.getString(INDEX_NAME))); - return CassandraMetadataResultSet.buildFrom(this.statement, new MetadataResultSet().setRows(indexes)); + return CassandraMetadataResultSet.buildFrom(this.statement, + new MetadataResultSet(rowTemplate).setRows(indexes)); } /** @@ -264,18 +300,26 @@ public CassandraMetadataResultSet buildPrimaryKeys(final String schema, final St throws SQLException { final String catalog = this.connection.getCatalog(); final ArrayList primaryKeys = new ArrayList<>(); + final MetadataRow.MetadataRowTemplate rowTemplate = new MetadataRow.MetadataRowTemplate( + buildDefinitionInAnonymousTable(TABLE_CATALOG_SHORTNAME, DataTypes.TEXT), + buildDefinitionInAnonymousTable(TABLE_SCHEMA, DataTypes.TEXT), + buildDefinitionInAnonymousTable(TABLE_NAME, DataTypes.TEXT), + buildDefinitionInAnonymousTable(COLUMN_NAME, DataTypes.TEXT), + buildDefinitionInAnonymousTable(KEY_SEQ, DataTypes.TEXT), + buildDefinitionInAnonymousTable(PRIMARY_KEY_NAME, DataTypes.TEXT) + ); filterBySchemaNamePattern(schema, keyspaceMetadata -> filterByTableNamePattern(tableName, keyspaceMetadata, tableMetadata -> { int seq = 1; for (final ColumnMetadata col : tableMetadata.getPrimaryKey()) { - final MetadataRow row = new MetadataRow() - .addEntry(TABLE_CATALOG_SHORTNAME, catalog) - .addEntry(TABLE_SCHEMA, keyspaceMetadata.getName().asInternal()) - .addEntry(TABLE_NAME, tableMetadata.getName().asInternal()) - .addEntry(COLUMN_NAME, col.getName().asInternal()) - .addEntry(KEY_SEQ, String.valueOf(seq)) - .addEntry(PRIMARY_KEY_NAME, null); + final MetadataRow row = new MetadataRow().withTemplate(rowTemplate, + catalog, // TABLE_CAT + keyspaceMetadata.getName().asInternal(), // TABLE_SCHEM + tableMetadata.getName().asInternal(), // TABLE_NAME + col.getName().asInternal(), // COLUMN_NAME + String.valueOf(seq), // KEY_SEQ + null); // PK_NAME primaryKeys.add(row); seq++; } @@ -283,7 +327,8 @@ public CassandraMetadataResultSet buildPrimaryKeys(final String schema, final St // Sort the results by COLUMN_NAME. primaryKeys.sort(Comparator.comparing(row -> row.getString(COLUMN_NAME))); - return CassandraMetadataResultSet.buildFrom(statement, new MetadataResultSet().setRows(primaryKeys)); + return CassandraMetadataResultSet.buildFrom(this.statement, + new MetadataResultSet(rowTemplate).setRows(primaryKeys)); } /** @@ -342,6 +387,16 @@ public CassandraMetadataResultSet buildPrimaryKeys(final String schema, final St public CassandraMetadataResultSet buildBestRowIdentifier(final String schema, final String table, final int scope) throws SQLException { final ArrayList bestRowIdentifiers = new ArrayList<>(); + final MetadataRow.MetadataRowTemplate rowTemplate = new MetadataRow.MetadataRowTemplate( + buildDefinitionInAnonymousTable(SCOPE, DataTypes.TEXT), + buildDefinitionInAnonymousTable(COLUMN_NAME, DataTypes.TEXT), + buildDefinitionInAnonymousTable(DATA_TYPE, DataTypes.TEXT), + buildDefinitionInAnonymousTable(TYPE_NAME, DataTypes.TEXT), + buildDefinitionInAnonymousTable(COLUMN_SIZE, DataTypes.TEXT), + buildDefinitionInAnonymousTable(BUFFER_LENGTH, DataTypes.TEXT), + buildDefinitionInAnonymousTable(DECIMAL_DIGITS, DataTypes.TEXT), + buildDefinitionInAnonymousTable(PSEUDO_COLUMN, DataTypes.TEXT) + ); filterBySchemaNamePattern(schema, keyspaceMetadata -> filterByTableNamePattern(table, keyspaceMetadata, tableMetadata -> { @@ -365,20 +420,21 @@ public CassandraMetadataResultSet buildBestRowIdentifier(final String schema, fi columnMetadata.getType(), e.getMessage()); } - final MetadataRow row = new MetadataRow() - .addEntry(SCOPE, String.valueOf(scope)) - .addEntry(COLUMN_NAME, columnMetadata.getName().asInternal()) - .addEntry(DATA_TYPE, String.valueOf(jdbcType)) - .addEntry(TYPE_NAME, columnMetadata.getType().toString()) - .addEntry(COLUMN_SIZE, String.valueOf(columnSize)) - .addEntry(BUFFER_LENGTH, String.valueOf(0)) - .addEntry(DECIMAL_DIGITS, null) - .addEntry(PSEUDO_COLUMN, String.valueOf(bestRowNotPseudo)); + final MetadataRow row = new MetadataRow().withTemplate(rowTemplate, + String.valueOf(scope), // SCOPE + columnMetadata.getName().asInternal(), // COLUMN_NAME + String.valueOf(jdbcType), // DATA_TYPE + columnMetadata.getType().toString(), // TYPE_NAME + String.valueOf(columnSize), // COLUMN_SIZE + String.valueOf(0), // BUFFER_LENGTH + null, // DECIMAL_DIGITS + String.valueOf(bestRowNotPseudo)); // PSEUDO_COLUMN bestRowIdentifiers.add(row); } }, null), null); // All the rows of the result set have the same scope, so there is no need to perform an additional sort. - return CassandraMetadataResultSet.buildFrom(statement, new MetadataResultSet().setRows(bestRowIdentifiers)); + return CassandraMetadataResultSet.buildFrom(this.statement, + new MetadataResultSet(rowTemplate).setRows(bestRowIdentifiers)); } } diff --git a/src/main/java/com/ing/data/cassandra/jdbc/metadata/TypeMetadataResultSetBuilder.java b/src/main/java/com/ing/data/cassandra/jdbc/metadata/TypeMetadataResultSetBuilder.java index cb8cf6e..95022cb 100644 --- a/src/main/java/com/ing/data/cassandra/jdbc/metadata/TypeMetadataResultSetBuilder.java +++ b/src/main/java/com/ing/data/cassandra/jdbc/metadata/TypeMetadataResultSetBuilder.java @@ -18,6 +18,7 @@ import com.datastax.oss.driver.api.core.CqlIdentifier; import com.datastax.oss.driver.api.core.data.UdtValue; import com.datastax.oss.driver.api.core.type.DataType; +import com.datastax.oss.driver.api.core.type.DataTypes; import com.datastax.oss.driver.api.core.type.UserDefinedType; import com.ing.data.cassandra.jdbc.CassandraMetadataResultSet; import com.ing.data.cassandra.jdbc.CassandraStatement; @@ -35,11 +36,14 @@ import java.util.Comparator; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.concurrent.atomic.AtomicReference; +import static com.ing.data.cassandra.jdbc.ColumnDefinitions.Definition.buildDefinitionInAnonymousTable; import static com.ing.data.cassandra.jdbc.types.AbstractJdbcType.DEFAULT_PRECISION; import static com.ing.data.cassandra.jdbc.types.AbstractJdbcType.DEFAULT_SCALE; import static com.ing.data.cassandra.jdbc.types.TypesMap.getTypeForComparator; +import static com.ing.data.cassandra.jdbc.utils.DriverUtil.existsInDatabaseVersion; import static java.sql.DatabaseMetaData.typeNullable; import static java.sql.DatabaseMetaData.typePredBasic; import static java.sql.Types.JAVA_OBJECT; @@ -112,11 +116,20 @@ public CassandraMetadataResultSet buildUDTs(final String schemaPattern, final St final int[] types) throws SQLException { final String catalog = this.connection.getCatalog(); final ArrayList udtsRows = new ArrayList<>(); + final MetadataRow.MetadataRowTemplate rowTemplate = new MetadataRow.MetadataRowTemplate( + buildDefinitionInAnonymousTable(TYPE_CATALOG, DataTypes.TEXT), + buildDefinitionInAnonymousTable(TYPE_SCHEMA, DataTypes.TEXT), + buildDefinitionInAnonymousTable(TYPE_NAME, DataTypes.TEXT), + buildDefinitionInAnonymousTable(CLASS_NAME, DataTypes.TEXT), + buildDefinitionInAnonymousTable(DATA_TYPE, DataTypes.TEXT), + buildDefinitionInAnonymousTable(REMARKS, DataTypes.TEXT), + buildDefinitionInAnonymousTable(BASE_TYPE, DataTypes.TEXT) + ); // Parse the fully-qualified type name, if necessary. String schemaName = schemaPattern; - final AtomicReference typeName = new AtomicReference<>(typeNamePattern); - if (typeNamePattern.contains(".")) { + final AtomicReference typeName = new AtomicReference<>(Objects.toString(typeNamePattern, "%")); + if (typeName.get().contains(".")) { final String[] fullyQualifiedTypeNameParts = typeNamePattern.split("\\."); schemaName = fullyQualifiedTypeNameParts[0]; typeName.set(fullyQualifiedTypeNameParts[1]); @@ -128,14 +141,14 @@ public CassandraMetadataResultSet buildUDTs(final String schemaPattern, final St final UserDefinedType udtMetadata = udt.getValue(); if (matchesPattern(typeName.get(), udtMetadata.getName().asInternal()) && (types == null || Arrays.stream(types).anyMatch(type -> type == JAVA_OBJECT))) { - final MetadataRow row = new MetadataRow() - .addEntry(TYPE_CATALOG, catalog) - .addEntry(TYPE_SCHEMA, keyspaceMetadata.getName().asInternal()) - .addEntry(TYPE_NAME, udtMetadata.getName().asInternal()) - .addEntry(CLASS_NAME, UdtValue.class.getName()) - .addEntry(DATA_TYPE, String.valueOf(JAVA_OBJECT)) - .addEntry(REMARKS, StringUtils.EMPTY) - .addEntry(BASE_TYPE, null); + final MetadataRow row = new MetadataRow().withTemplate(rowTemplate, + catalog, // TYPE_CAT + keyspaceMetadata.getName().asInternal(), // TYPE_SCHEM + udtMetadata.getName().asInternal(), // TYPE_NAME + UdtValue.class.getName(), // CLASS_NAME + String.valueOf(JAVA_OBJECT), // DATA_TYPE + StringUtils.EMPTY, // REMARKS + null); // BASE_TYPE udtsRows.add(row); } } @@ -144,7 +157,8 @@ public CassandraMetadataResultSet buildUDTs(final String schemaPattern, final St // Results should all have the same DATA_TYPE and TYPE_CAT so just sort them by TYPE_SCHEM then TYPE_NAME. udtsRows.sort(Comparator.comparing(row -> ((MetadataRow) row).getString(TYPE_SCHEMA)) .thenComparing(row -> ((MetadataRow) row).getString(TYPE_NAME))); - return CassandraMetadataResultSet.buildFrom(this.statement, new MetadataResultSet().setRows(udtsRows)); + return CassandraMetadataResultSet.buildFrom(this.statement, + new MetadataResultSet(rowTemplate).setRows(udtsRows)); } /** @@ -204,44 +218,70 @@ public CassandraMetadataResultSet buildUDTs(final String schemaPattern, final St * is returned for data types where the column size is not applicable. *

* + * @param databaseVersion The database version the driver is currently connected to. * @return A valid result set for implementation of {@link DatabaseMetaData#getTypeInfo()}. * @throws SQLException when something went wrong during the creation of the result set. */ - public CassandraMetadataResultSet buildTypes() throws SQLException { + public CassandraMetadataResultSet buildTypes(final String databaseVersion) throws SQLException { final ArrayList types = new ArrayList<>(); + final MetadataRow.MetadataRowTemplate rowTemplate = new MetadataRow.MetadataRowTemplate( + buildDefinitionInAnonymousTable(TYPE_NAME, DataTypes.TEXT), + buildDefinitionInAnonymousTable(DATA_TYPE, DataTypes.TEXT), + buildDefinitionInAnonymousTable(PRECISION, DataTypes.TEXT), + buildDefinitionInAnonymousTable(LITERAL_PREFIX, DataTypes.TEXT), + buildDefinitionInAnonymousTable(LITERAL_SUFFIX, DataTypes.TEXT), + buildDefinitionInAnonymousTable(CREATE_PARAMS, DataTypes.TEXT), + buildDefinitionInAnonymousTable(NULLABLE, DataTypes.TEXT), + buildDefinitionInAnonymousTable(CASE_SENSITIVE, DataTypes.TEXT), + buildDefinitionInAnonymousTable(SEARCHABLE, DataTypes.TEXT), + buildDefinitionInAnonymousTable(UNSIGNED_ATTRIBUTE, DataTypes.TEXT), + buildDefinitionInAnonymousTable(FIXED_PRECISION_SCALE, DataTypes.TEXT), + buildDefinitionInAnonymousTable(AUTO_INCREMENT, DataTypes.TEXT), + buildDefinitionInAnonymousTable(LOCALIZED_TYPE_NAME, DataTypes.TEXT), + buildDefinitionInAnonymousTable(MINIMUM_SCALE, DataTypes.TEXT), + buildDefinitionInAnonymousTable(MAXIMUM_SCALE, DataTypes.TEXT), + buildDefinitionInAnonymousTable(SQL_DATA_TYPE, DataTypes.TEXT), + buildDefinitionInAnonymousTable(SQL_DATETIME_SUB, DataTypes.TEXT), + buildDefinitionInAnonymousTable(NUM_PRECISION_RADIX, DataTypes.TEXT) + ); + for (final DataTypeEnum dataType : DataTypeEnum.values()) { + // Only include types existing in the current database version. + if (!existsInDatabaseVersion(databaseVersion, dataType)) { + continue; + } + final AbstractJdbcType jdbcType = getTypeForComparator(dataType.asLowercaseCql()); String literalQuotingSymbol = null; if (jdbcType.needsQuotes()) { literalQuotingSymbol = "'"; } - /* FIXME: some values should be adapted for list, set, map, vector, tuple and UDTs (JDBC type OTHER). - Special JDBC types similar to JdbcCounterColumn should be used for that. */ - final MetadataRow row = new MetadataRow() - .addEntry(TYPE_NAME, dataType.cqlType) - .addEntry(DATA_TYPE, String.valueOf(jdbcType.getJdbcType())) - .addEntry(PRECISION, String.valueOf(jdbcType.getPrecision(null))) - .addEntry(LITERAL_PREFIX, literalQuotingSymbol) - .addEntry(LITERAL_SUFFIX, literalQuotingSymbol) - .addEntry(CREATE_PARAMS, null) - .addEntry(NULLABLE, String.valueOf(typeNullable)) // absence is the equivalent of null in Cassandra - .addEntry(CASE_SENSITIVE, String.valueOf(jdbcType.isCaseSensitive())) - .addEntry(SEARCHABLE, String.valueOf(typePredBasic)) - .addEntry(UNSIGNED_ATTRIBUTE, String.valueOf(!jdbcType.isSigned())) - .addEntry(FIXED_PRECISION_SCALE, String.valueOf(!jdbcType.isCurrency())) - .addEntry(AUTO_INCREMENT, String.valueOf(false)) - .addEntry(LOCALIZED_TYPE_NAME, null) - .addEntry(MINIMUM_SCALE, String.valueOf(DEFAULT_SCALE)) - .addEntry(MAXIMUM_SCALE, String.valueOf(jdbcType.getScale(null))) - .addEntry(SQL_DATA_TYPE, null) - .addEntry(SQL_DATETIME_SUB, null) - .addEntry(NUM_PRECISION_RADIX, String.valueOf(jdbcType.getPrecision(null))); + final MetadataRow row = new MetadataRow().withTemplate(rowTemplate, + dataType.cqlType, // TYPE_NAME + String.valueOf(jdbcType.getJdbcType()), // DATA_TYPE + String.valueOf(jdbcType.getPrecision(null)), // PRECISION + literalQuotingSymbol, // LITERAL_PREFIX + literalQuotingSymbol, // LITERAL_SUFFIX + null, // CREATE_PARAMS + String.valueOf(typeNullable), // NULLABLE, absence is equivalent of null in Cassandra + String.valueOf(jdbcType.isCaseSensitive()), // CASE_SENSITIVE + String.valueOf(typePredBasic), // SEARCHABLE + String.valueOf(!jdbcType.isSigned()), // UNSIGNED_ATTRIBUTE + String.valueOf(!jdbcType.isCurrency()), // FIXED_PREC_SCALE + String.valueOf(false), // AUTO_INCREMENT + null, // LOCAL_TYPE_NAME + String.valueOf(DEFAULT_SCALE), // MINIMUM_SCALE + String.valueOf(jdbcType.getScale(null)), // MAXIMUM_SCALE + null, // SQL_DATA_TYPE + null, // SQL_DATETIME_SUB + String.valueOf(jdbcType.getPrecision(null))); // NUM_PREC_RADIX types.add(row); } // Sort results by DATA_TYPE. types.sort(Comparator.comparing(row -> Integer.valueOf(row.getString(DATA_TYPE)))); - return CassandraMetadataResultSet.buildFrom(this.statement, new MetadataResultSet().setRows(types)); + return CassandraMetadataResultSet.buildFrom(this.statement, + new MetadataResultSet(rowTemplate).setRows(types)); } /** @@ -323,6 +363,29 @@ public CassandraMetadataResultSet buildAttributes(final String schemaPattern, fi final String attributesNamePattern) throws SQLException { final String catalog = this.connection.getCatalog(); final ArrayList attributesRows = new ArrayList<>(); + final MetadataRow.MetadataRowTemplate rowTemplate = new MetadataRow.MetadataRowTemplate( + buildDefinitionInAnonymousTable(TYPE_CATALOG, DataTypes.TEXT), + buildDefinitionInAnonymousTable(TYPE_SCHEMA, DataTypes.TEXT), + buildDefinitionInAnonymousTable(TYPE_NAME, DataTypes.TEXT), + buildDefinitionInAnonymousTable(ATTRIBUTE_NAME, DataTypes.TEXT), + buildDefinitionInAnonymousTable(DATA_TYPE, DataTypes.TEXT), + buildDefinitionInAnonymousTable(ATTRIBUTE_TYPE_NAME, DataTypes.TEXT), + buildDefinitionInAnonymousTable(ATTRIBUTE_SIZE, DataTypes.TEXT), + buildDefinitionInAnonymousTable(DECIMAL_DIGITS, DataTypes.TEXT), + buildDefinitionInAnonymousTable(NUM_PRECISION_RADIX, DataTypes.TEXT), + buildDefinitionInAnonymousTable(NULLABLE, DataTypes.TEXT), + buildDefinitionInAnonymousTable(REMARKS, DataTypes.TEXT), + buildDefinitionInAnonymousTable(ATTRIBUTE_DEFAULT, DataTypes.TEXT), + buildDefinitionInAnonymousTable(SQL_DATA_TYPE, DataTypes.TEXT), + buildDefinitionInAnonymousTable(SQL_DATETIME_SUB, DataTypes.TEXT), + buildDefinitionInAnonymousTable(CHAR_OCTET_LENGTH, DataTypes.TEXT), + buildDefinitionInAnonymousTable(ORDINAL_POSITION, DataTypes.TEXT), + buildDefinitionInAnonymousTable(IS_NULLABLE, DataTypes.TEXT), + buildDefinitionInAnonymousTable(SCOPE_CATALOG, DataTypes.TEXT), + buildDefinitionInAnonymousTable(SCOPE_SCHEMA, DataTypes.TEXT), + buildDefinitionInAnonymousTable(SCOPE_TABLE, DataTypes.TEXT), + buildDefinitionInAnonymousTable(SOURCE_DATA_TYPE, DataTypes.TEXT) + ); // Parse the fully-qualified type name, if necessary. String schemaName = schemaPattern; @@ -371,28 +434,28 @@ public CassandraMetadataResultSet buildAttributes(final String schemaPattern, fi LOG.warn("Unable to get JDBC type for comparator [{}]: {}", attrType, e.getMessage()); } - final MetadataRow row = new MetadataRow() - .addEntry(TYPE_CATALOG, catalog) - .addEntry(TYPE_SCHEMA, keyspaceMetadata.getName().asInternal()) - .addEntry(TYPE_NAME, udtMetadata.getName().asInternal()) - .addEntry(ATTRIBUTE_NAME, attrName) - .addEntry(DATA_TYPE, String.valueOf(jdbcType)) - .addEntry(ATTRIBUTE_TYPE_NAME, attrType.toString()) - .addEntry(ATTRIBUTE_SIZE, String.valueOf(columnSize)) - .addEntry(DECIMAL_DIGITS, null) - .addEntry(NUM_PRECISION_RADIX, String.valueOf(radix)) - .addEntry(NULLABLE, String.valueOf(DatabaseMetaData.attributeNoNulls)) - .addEntry(REMARKS, null) - .addEntry(ATTRIBUTE_DEFAULT, null) - .addEntry(SQL_DATA_TYPE, null) - .addEntry(SQL_DATETIME_SUB, null) - .addEntry(CHAR_OCTET_LENGTH, String.valueOf(Integer.MAX_VALUE)) - .addEntry(ORDINAL_POSITION, String.valueOf(i + 1)) - .addEntry(IS_NULLABLE, StringUtils.EMPTY) - .addEntry(SCOPE_CATALOG, null) - .addEntry(SCOPE_SCHEMA, null) - .addEntry(SCOPE_TABLE, null) - .addEntry(SOURCE_DATA_TYPE, null); + final MetadataRow row = new MetadataRow().withTemplate(rowTemplate, + catalog, // TYPE_CATALOG + keyspaceMetadata.getName().asInternal(), // TYPE_SCHEMA + udtMetadata.getName().asInternal(), // TYPE_NAME + attrName, // ATTR_NAME + String.valueOf(jdbcType), // DATA_TYPE + attrType.toString(), // ATTR_TYPE_NAME + String.valueOf(columnSize), // ATTR_SIZE + null, // DECIMAL_DIGITS + String.valueOf(radix), // NUM_PREC_RADIX + String.valueOf(DatabaseMetaData.attributeNoNulls), // NULLABLE + null, // REMARKS + null, // ATTR_DEF + null, // SQL_DATA_TYPE + null, // SQL_DATETIME_SUB + String.valueOf(Integer.MAX_VALUE), // CHAR_OCTET_LENGTH + String.valueOf(i + 1), // ORDINAL_POSITION + StringUtils.EMPTY, // IS_NULLABLE + null, // SCOPE_CATALOG + null, // SCOPE_SCHEMA + null, // SCOPE_TABLE + null); // SOURCE_DATA_TYPE attributesRows.add(row); } }); @@ -403,7 +466,8 @@ public CassandraMetadataResultSet buildAttributes(final String schemaPattern, fi attributesRows.sort(Comparator.comparing(row -> ((MetadataRow) row).getString(TYPE_SCHEMA)) .thenComparing(row -> ((MetadataRow) row).getString(TYPE_NAME)) .thenComparing(row -> ((MetadataRow) row).getString(ORDINAL_POSITION))); - return CassandraMetadataResultSet.buildFrom(this.statement, new MetadataResultSet().setRows(attributesRows)); + return CassandraMetadataResultSet.buildFrom(this.statement, + new MetadataResultSet(rowTemplate).setRows(attributesRows)); } } diff --git a/src/main/java/com/ing/data/cassandra/jdbc/metadata/VersionedMetadata.java b/src/main/java/com/ing/data/cassandra/jdbc/metadata/VersionedMetadata.java new file mode 100644 index 0000000..3801289 --- /dev/null +++ b/src/main/java/com/ing/data/cassandra/jdbc/metadata/VersionedMetadata.java @@ -0,0 +1,48 @@ +/* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ing.data.cassandra.jdbc.metadata; + +import org.semver4j.Semver; + +/** + * A versioned database metadata (such as a CQL type, CQL keyword or a built-in function). + */ +public interface VersionedMetadata { + + /** + * Gets the metadata name (for example a CQL keyword, or a built-in function name). + * + * @return The metadata name. + */ + String getName(); + + /** + * Gets the minimal Cassandra version from which the metadata exists. If {@code null}, we consider the metadata + * always existed. + * + * @return The minimal version of Cassandra from which the metadata exists or {@code null}. + */ + Semver isValidFrom(); + + /** + * Gets the first Cassandra version in which the metadata does not exist anymore. If {@code null}, it means the + * metadata still exists in the latest version of Cassandra. + * + * @return The first version of Cassandra in which the metadata does not exist anymore or {@code null}. + */ + Semver isInvalidFrom(); + +} diff --git a/src/main/java/com/ing/data/cassandra/jdbc/optionset/Default.java b/src/main/java/com/ing/data/cassandra/jdbc/optionset/Default.java index 2a96461..7e072c9 100644 --- a/src/main/java/com/ing/data/cassandra/jdbc/optionset/Default.java +++ b/src/main/java/com/ing/data/cassandra/jdbc/optionset/Default.java @@ -30,7 +30,7 @@ public class Default extends AbstractOptionSet { @Override public String getCatalog() { - // It requires a query to table system.local since DataStax driver 4+. + // It requires a query to table system.local since Java Driver for Apache Cassandra® 4+. // If the query fails, return null. try (final Statement stmt = getConnection().createStatement()) { final ResultSet rs = stmt.executeQuery("SELECT cluster_name FROM system.local"); diff --git a/src/main/java/com/ing/data/cassandra/jdbc/optionset/OptionSet.java b/src/main/java/com/ing/data/cassandra/jdbc/optionset/OptionSet.java index a6c5189..b20d42a 100644 --- a/src/main/java/com/ing/data/cassandra/jdbc/optionset/OptionSet.java +++ b/src/main/java/com/ing/data/cassandra/jdbc/optionset/OptionSet.java @@ -34,7 +34,8 @@ public interface OptionSet { String getCatalog(); /** - * There is no {@code updateCount} available in Datastax Java driver, different flavour requires different response. + * There is no {@code updateCount} available in Java Driver for Apache Cassandra®, different flavour requires + * different response. * * @return A predefined update response. */ diff --git a/src/main/java/com/ing/data/cassandra/jdbc/package-info.java b/src/main/java/com/ing/data/cassandra/jdbc/package-info.java index 4e6a089..afc7861 100644 --- a/src/main/java/com/ing/data/cassandra/jdbc/package-info.java +++ b/src/main/java/com/ing/data/cassandra/jdbc/package-info.java @@ -14,6 +14,6 @@ */ /** - * Implementation of JDBC API for Cassandra databases by wrapping the DataStax Java Driver for Apache Cassandra. + * Implementation of JDBC API for Cassandra databases by wrapping the Java Driver for Apache Cassandra®. */ package com.ing.data.cassandra.jdbc; diff --git a/src/main/java/com/ing/data/cassandra/jdbc/types/AbstractJdbcCollection.java b/src/main/java/com/ing/data/cassandra/jdbc/types/AbstractJdbcCollection.java new file mode 100644 index 0000000..fec605a --- /dev/null +++ b/src/main/java/com/ing/data/cassandra/jdbc/types/AbstractJdbcCollection.java @@ -0,0 +1,74 @@ +/* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ing.data.cassandra.jdbc.types; + +import java.sql.Types; + +/** + * Abstract class providing description about the JDBC equivalent of any CQL type representing a collection. + */ +public abstract class AbstractJdbcCollection extends AbstractJdbcType { + + @Override + public boolean isCaseSensitive() { + return false; + } + + @Override + public int getScale(final T obj) { + return DEFAULT_SCALE; + } + + @Override + public int getPrecision(final T obj) { + return DEFAULT_PRECISION; + } + + @Override + public boolean isCurrency() { + return false; + } + + @Override + public boolean isSigned() { + return false; + } + + @Override + public String toString(final T obj) { + return obj.toString(); + } + + @Override + public boolean needsQuotes() { + return false; + } + + @Override + public abstract Class getType(); + + @Override + public int getJdbcType() { + return Types.OTHER; + } + + @Override + public abstract T compose(Object obj); + + @Override + public abstract Object decompose(T value); + +} diff --git a/src/main/java/com/ing/data/cassandra/jdbc/types/DataTypeEnum.java b/src/main/java/com/ing/data/cassandra/jdbc/types/DataTypeEnum.java index ff34159..d3278e5 100644 --- a/src/main/java/com/ing/data/cassandra/jdbc/types/DataTypeEnum.java +++ b/src/main/java/com/ing/data/cassandra/jdbc/types/DataTypeEnum.java @@ -23,6 +23,8 @@ import com.datastax.oss.driver.api.core.type.UserDefinedType; import com.datastax.oss.driver.api.core.type.VectorType; import com.datastax.oss.protocol.internal.ProtocolConstants.DataType; +import com.ing.data.cassandra.jdbc.metadata.VersionedMetadata; +import org.semver4j.Semver; import javax.annotation.Nonnull; import java.math.BigDecimal; @@ -38,10 +40,12 @@ import java.util.Set; import java.util.UUID; +import static com.ing.data.cassandra.jdbc.utils.DriverUtil.CASSANDRA_5; + /** * Enumeration of CQL data types and the corresponding Java types. */ -public enum DataTypeEnum { +public enum DataTypeEnum implements VersionedMetadata { /** * {@code ascii} CQL type (type {@value DataType#ASCII} in CQL native protocol) mapped to {@link String} Java type. @@ -170,7 +174,7 @@ public enum DataTypeEnum { * {@code vector} CQL type (type {@value DataType#LIST} in CQL native protocol) mapped to {@link CqlVector} Java * type. */ - VECTOR(DataType.LIST, CqlVector.class, "Vector"); + VECTOR(DataType.LIST, CqlVector.class, "Vector", CASSANDRA_5, null); static final String VECTOR_CLASSNAME = "org.apache.cassandra.db.marshal.VectorType"; @@ -186,6 +190,8 @@ public enum DataTypeEnum { public final String cqlType; final int protocolId; + final Semver validFrom; + final Semver invalidFrom; static { CQL_DATATYPE_TO_DATATYPE = new HashMap<>(); @@ -197,6 +203,43 @@ public enum DataTypeEnum { /** * Constructs a {@code DataTypeEnum} item. * + * @param protocolId The type ID as defined in CQL binary protocol. (see + * + * CQL binary protocol definition and {@link DataType}). + * @param javaType The corresponding Java type. + * @param cqlType The CQL type name. + * @param validFrom The minimal Cassandra version from which the CQL type exists. If {@code null}, we consider the + * type exists in any version of the Cassandra database. + * @param invalidFrom The first Cassandra version in which the CQL type does not exist anymore. If {@code null}, + * we consider the type exists in any version of the Cassandra database greater than + * {@code validFrom}. + */ + DataTypeEnum(final int protocolId, final Class javaType, final String cqlType, final String validFrom, + final String invalidFrom) { + this.protocolId = protocolId; + this.javaType = javaType; + this.cqlType = cqlType; + this.validFrom = Semver.coerce(validFrom); + this.invalidFrom = Semver.coerce(invalidFrom); + } + + /** + * Constructs a {@code DataTypeEnum} item valid from the specified version of Cassandra. + * + * @param protocolId The type ID as defined in CQL binary protocol. (see + * + * CQL binary protocol definition and {@link DataType}). + * @param javaType The corresponding Java type. + * @param cqlType The CQL type name. + * @param validFrom The minimal Cassandra version from which the CQL type exists. + */ + DataTypeEnum(final int protocolId, final Class javaType, final String cqlType, final String validFrom) { + this(protocolId, javaType, cqlType, validFrom, null); + } + + /** + * Constructs a {@code DataTypeEnum} item valid in any version of Cassandra. + * * @param protocolId The type ID as defined in CQL binary protocol. (see * * CQL binary protocol definition and {@link DataType}). @@ -204,9 +247,7 @@ public enum DataTypeEnum { * @param cqlType The CQL type name. */ DataTypeEnum(final int protocolId, final Class javaType, final String cqlType) { - this.protocolId = protocolId; - this.javaType = javaType; - this.cqlType = cqlType; + this(protocolId, javaType, cqlType, null); } /** @@ -328,6 +369,21 @@ public String toString() { return super.toString().toLowerCase(); } + @Override + public String getName() { + return this.cqlType; + } + + @Override + public Semver isValidFrom() { + return this.validFrom; + } + + @Override + public Semver isInvalidFrom() { + return this.invalidFrom; + } + /** * Gets the CQL name from a given {@link com.datastax.oss.driver.api.core.type.DataType} instance. * For vectors, dataType.asCql returns looks like 'org.apache.cassandra.db.marshal.VectorType(n)' where n is diff --git a/src/main/java/com/ing/data/cassandra/jdbc/types/JdbcList.java b/src/main/java/com/ing/data/cassandra/jdbc/types/JdbcList.java new file mode 100644 index 0000000..679ac5d --- /dev/null +++ b/src/main/java/com/ing/data/cassandra/jdbc/types/JdbcList.java @@ -0,0 +1,60 @@ +/* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ing.data.cassandra.jdbc.types; + +import com.datastax.oss.driver.shaded.guava.common.collect.Iterables; + +import java.util.List; + +/** + * JDBC description of {@code LIST} CQL type (corresponding Java type: {@link List}). + *

CQL type description: a collection of one or more ordered elements.

+ */ +@SuppressWarnings("rawtypes") +public class JdbcList extends AbstractJdbcCollection { + + /** + * Gets a {@code JdbcList} instance. + */ + public static final JdbcList INSTANCE = new JdbcList(); + + JdbcList() { + } + + @Override + public String toString(final List obj) { + return Iterables.toString(obj); + } + + @Override + public Class getType() { + return List.class; + } + + @Override + public List compose(final Object obj) { + if (obj != null && obj.getClass().isAssignableFrom(List.class)) { + return (List) obj; + } + return null; + } + + @Override + public Object decompose(final List value) { + return value; + } + +} diff --git a/src/main/java/com/ing/data/cassandra/jdbc/types/JdbcMap.java b/src/main/java/com/ing/data/cassandra/jdbc/types/JdbcMap.java new file mode 100644 index 0000000..a2a6480 --- /dev/null +++ b/src/main/java/com/ing/data/cassandra/jdbc/types/JdbcMap.java @@ -0,0 +1,70 @@ +/* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ing.data.cassandra.jdbc.types; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Map; + +/** + * JDBC description of {@code MAP} CQL type (corresponding Java type: {@link Map}). + *

CQL type description: a JSON-style array of literals.

+ */ +@SuppressWarnings("rawtypes") +public class JdbcMap extends AbstractJdbcCollection { + + /** + * Gets a {@code JdbcMap} instance. + */ + public static final JdbcMap INSTANCE = new JdbcMap(); + + private static final Logger LOG = LoggerFactory.getLogger(JdbcMap.class); + + JdbcMap() { + } + + @Override + public String toString(final Map obj) { + try { + return new ObjectMapper().writeValueAsString(obj); + } catch (final JsonProcessingException e) { + LOG.warn("Unable to format map [{}] as string", obj.toString()); + return null; + } + } + + @Override + public Class getType() { + return Map.class; + } + + @Override + public Map compose(final Object obj) { + if (obj != null && obj.getClass().isAssignableFrom(Map.class)) { + return (Map) obj; + } + return null; + } + + @Override + public Object decompose(final Map value) { + return value; + } + +} diff --git a/src/main/java/com/ing/data/cassandra/jdbc/types/JdbcSet.java b/src/main/java/com/ing/data/cassandra/jdbc/types/JdbcSet.java new file mode 100644 index 0000000..0919de6 --- /dev/null +++ b/src/main/java/com/ing/data/cassandra/jdbc/types/JdbcSet.java @@ -0,0 +1,60 @@ +/* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ing.data.cassandra.jdbc.types; + +import com.datastax.oss.driver.shaded.guava.common.collect.Iterables; + +import java.util.Set; + +/** + * JDBC description of {@code SET} CQL type (corresponding Java type: {@link Set}). + *

CQL type description: a collection of one or more elements.

+ */ +@SuppressWarnings("rawtypes") +public class JdbcSet extends AbstractJdbcCollection { + + /** + * Gets a {@code JdbcSet} instance. + */ + public static final JdbcSet INSTANCE = new JdbcSet(); + + JdbcSet() { + } + + @Override + public String toString(final Set obj) { + return Iterables.toString(obj); + } + + @Override + public Class getType() { + return Set.class; + } + + @Override + public Set compose(final Object obj) { + if (obj != null && obj.getClass().isAssignableFrom(Set.class)) { + return (Set) obj; + } + return null; + } + + @Override + public Object decompose(final Set value) { + return value; + } + +} diff --git a/src/main/java/com/ing/data/cassandra/jdbc/types/JdbcVector.java b/src/main/java/com/ing/data/cassandra/jdbc/types/JdbcVector.java new file mode 100644 index 0000000..c9af233 --- /dev/null +++ b/src/main/java/com/ing/data/cassandra/jdbc/types/JdbcVector.java @@ -0,0 +1,58 @@ +/* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ing.data.cassandra.jdbc.types; + +import com.datastax.oss.driver.api.core.data.CqlVector; + +/** + * JDBC description of {@code VECTOR} CQL type (corresponding Java type: {@link CqlVector}). + *

CQL type description: a n-dimensional vector.

+ */ +@SuppressWarnings("rawtypes") +public class JdbcVector extends AbstractJdbcCollection { + + /** + * Gets a {@code JdbcVector} instance. + */ + public static final JdbcVector INSTANCE = new JdbcVector(); + + JdbcVector() { + } + + @Override + public String toString(final CqlVector obj) { + return obj.toString(); + } + + @Override + public Class getType() { + return CqlVector.class; + } + + @Override + public CqlVector compose(final Object obj) { + if (obj != null && obj.getClass().isAssignableFrom(CqlVector.class)) { + return (CqlVector) obj; + } + return null; + } + + @Override + public Object decompose(final CqlVector value) { + return value; + } + +} diff --git a/src/main/java/com/ing/data/cassandra/jdbc/types/TypesMap.java b/src/main/java/com/ing/data/cassandra/jdbc/types/TypesMap.java index 8512cf3..039a9c8 100644 --- a/src/main/java/com/ing/data/cassandra/jdbc/types/TypesMap.java +++ b/src/main/java/com/ing/data/cassandra/jdbc/types/TypesMap.java @@ -40,6 +40,9 @@ public final class TypesMap { TYPES_MAP.put("org.apache.cassandra.db.marshal.Int32Type", JdbcInt32.INSTANCE); TYPES_MAP.put("org.apache.cassandra.db.marshal.LexicalUUIDType", JdbcLexicalUUID.INSTANCE); TYPES_MAP.put("org.apache.cassandra.db.marshal.LongType", JdbcLong.INSTANCE); + TYPES_MAP.put("org.apache.cassandra.db.marshal.ListType", JdbcList.INSTANCE); + TYPES_MAP.put("org.apache.cassandra.db.marshal.MapType", JdbcMap.INSTANCE); + TYPES_MAP.put("org.apache.cassandra.db.marshal.SetType", JdbcSet.INSTANCE); TYPES_MAP.put("org.apache.cassandra.db.marshal.ShortType", JdbcShort.INSTANCE); TYPES_MAP.put("org.apache.cassandra.db.marshal.TimeType", JdbcTime.INSTANCE); TYPES_MAP.put("org.apache.cassandra.db.marshal.TimestampType", JdbcTimestamp.INSTANCE); @@ -48,6 +51,7 @@ public final class TypesMap { TYPES_MAP.put("org.apache.cassandra.db.marshal.UserType", JdbcUdt.INSTANCE); TYPES_MAP.put("org.apache.cassandra.db.marshal.UTF8Type", JdbcUTF8.INSTANCE); TYPES_MAP.put("org.apache.cassandra.db.marshal.UUIDType", JdbcUUID.INSTANCE); + TYPES_MAP.put("org.apache.cassandra.db.marshal.VectorType", JdbcVector.INSTANCE); TYPES_MAP.put("org.apache.cassandra.db.marshal.ascii", JdbcAscii.INSTANCE); TYPES_MAP.put("org.apache.cassandra.db.marshal.bigint", JdbcLong.INSTANCE); @@ -61,6 +65,9 @@ public final class TypesMap { TYPES_MAP.put("org.apache.cassandra.db.marshal.float", JdbcFloat.INSTANCE); TYPES_MAP.put("org.apache.cassandra.db.marshal.inet", JdbcInetAddress.INSTANCE); TYPES_MAP.put("org.apache.cassandra.db.marshal.int", JdbcInt32.INSTANCE); + TYPES_MAP.put("org.apache.cassandra.db.marshal.list", JdbcList.INSTANCE); + TYPES_MAP.put("org.apache.cassandra.db.marshal.map", JdbcList.INSTANCE); + TYPES_MAP.put("org.apache.cassandra.db.marshal.set", JdbcList.INSTANCE); TYPES_MAP.put("org.apache.cassandra.db.marshal.smallint", JdbcShort.INSTANCE); TYPES_MAP.put("org.apache.cassandra.db.marshal.text", JdbcUTF8.INSTANCE); TYPES_MAP.put("org.apache.cassandra.db.marshal.time", JdbcTime.INSTANCE); @@ -72,6 +79,7 @@ public final class TypesMap { TYPES_MAP.put("org.apache.cassandra.db.marshal.uuid", JdbcUUID.INSTANCE); TYPES_MAP.put("org.apache.cassandra.db.marshal.varchar", JdbcUTF8.INSTANCE); TYPES_MAP.put("org.apache.cassandra.db.marshal.varint", JdbcInteger.INSTANCE); + TYPES_MAP.put("org.apache.cassandra.db.marshal.vector", JdbcVector.INSTANCE); } private TypesMap() { diff --git a/src/main/java/com/ing/data/cassandra/jdbc/utils/ContactPoint.java b/src/main/java/com/ing/data/cassandra/jdbc/utils/ContactPoint.java new file mode 100644 index 0000000..58b5491 --- /dev/null +++ b/src/main/java/com/ing/data/cassandra/jdbc/utils/ContactPoint.java @@ -0,0 +1,102 @@ +/* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ing.data.cassandra.jdbc.utils; + +import java.net.InetSocketAddress; +import java.util.Objects; + +import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.DEFAULT_PORT; + +/** + * The representation of contact point in a Cassandra cluster. + *

+ * This class is used to parse JDBC URL and extract hosts and ports of the contact points. + *

+ */ +public final class ContactPoint { + + private final String host; + private final int port; + + private ContactPoint(final String host, final int port) { + this.host = host; + this.port = port; + } + + /** + * Instantiates a contact point from the host and port. + * + * @param host The hostname. + * @param port The port. If {@code null}, the default Cassandra port ({@value JdbcUrlUtil#DEFAULT_PORT}) is used. + * @return The contact point representation. + */ + public static ContactPoint of(final String host, final Integer port) { + if (port == null) { + return new ContactPoint(host, DEFAULT_PORT); + } + return new ContactPoint(host, port); + } + + /** + * Gets the hostname of the contact point. + * + * @return The hostname of the contact point. + */ + public String getHost() { + return this.host; + } + + /** + * Gets the port of the contact point. + * + * @return The port of the contact point. + */ + public Integer getPort() { + return this.port; + } + + /** + * Converts the contact point into a socket address usable to instantiate a connection to a Cassandra cluster. + * + * @return The socket address corresponding to the contact point. + */ + public InetSocketAddress toInetSocketAddress() { + return InetSocketAddress.createUnresolved(this.host, this.port); + } + + @Override + public String toString() { + return String.format("%s:%d", this.host, this.port); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final ContactPoint that = (ContactPoint) o; + return this.port == that.getPort() && Objects.equals(this.host, that.getHost()); + } + + @Override + public int hashCode() { + return Objects.hash(this.host, this.port); + } + +} diff --git a/src/main/java/com/ing/data/cassandra/jdbc/utils/ConversionsUtil.java b/src/main/java/com/ing/data/cassandra/jdbc/utils/ConversionsUtil.java new file mode 100644 index 0000000..69e3a0d --- /dev/null +++ b/src/main/java/com/ing/data/cassandra/jdbc/utils/ConversionsUtil.java @@ -0,0 +1,176 @@ +/* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ing.data.cassandra.jdbc.utils; + +import org.apache.commons.io.IOUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.Reader; +import java.nio.charset.StandardCharsets; +import java.sql.Blob; +import java.sql.Clob; +import java.sql.Date; +import java.sql.NClob; +import java.sql.SQLException; +import java.sql.Time; +import java.sql.Timestamp; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.OffsetDateTime; +import java.time.OffsetTime; +import java.time.ZoneOffset; +import java.util.Calendar; + +/** + * A set of static utility methods for types conversions. + */ +public final class ConversionsUtil { + + static final Logger LOG = LoggerFactory.getLogger(ConversionsUtil.class); + + static final String BINARY_FAILED_CONVERSION = "Unable to convert %s object to byte array."; + + private ConversionsUtil() { + // Private constructor to hide the public one. + } + + /** + * Converts an object of one these types to a byte array for storage in a column of type {@code blob}: + *
    + *
  • {@code byte[]}
  • + *
  • {@link ByteArrayInputStream}
  • + *
  • {@link Blob}
  • + *
  • {@link Clob}
  • + *
  • {@link NClob} (this is handled as {@link Clob} since it's just a superinterface of this one)
  • + *
+ * + * @param x The object to convert. + * @return The byte array resulting of the object conversion. An empty array is returned if the type is valid but + * the conversion failed for some reason (see the logged error for further details). + * @throws SQLException when the type of the object to convert is not supported. + */ + @SuppressWarnings("ResultOfMethodCallIgnored") + public static byte[] convertToByteArray(final Object x) throws SQLException { + byte[] array = new byte[0]; + if (x instanceof ByteArrayInputStream) { + array = new byte[((ByteArrayInputStream) x).available()]; + try { + ((ByteArrayInputStream) x).read(array); + } catch (final IOException e) { + LOG.warn(String.format(BINARY_FAILED_CONVERSION, x.getClass()), e); + } + } else if (x instanceof byte[]) { + array = (byte[]) x; + } else if (x instanceof Blob) { + try { + final InputStream stream = ((Blob) x).getBinaryStream(); + array = new byte[stream.available()]; + stream.read(array); + } catch (final IOException | SQLException e) { + LOG.warn(String.format(BINARY_FAILED_CONVERSION, x.getClass()), e); + } + } else if (x instanceof Clob) { + try (Reader reader = ((Clob) x).getCharacterStream()) { + array = IOUtils.toByteArray(reader, StandardCharsets.UTF_8); + } catch (final IOException | SQLException e) { + LOG.warn(String.format(BINARY_FAILED_CONVERSION, x.getClass()), e); + } + } else { + throw new SQLException(String.format(ErrorConstants.UNSUPPORTED_PARAMETER_TYPE, x.getClass())); + } + return array; + } + + /** + * Converts an object of one these types to a {@link LocalDate} for storage in a column of type {@code date}: + *
    + *
  • {@link LocalDate}
  • + *
  • {@link Date}
  • + *
+ * + * @param x The object to convert. + * @return The {@link LocalDate} instance resulting of the object conversion. + * @throws SQLException when the type of the object to convert is not supported. + */ + public static LocalDate convertToLocalDate(final Object x) throws SQLException { + if (x instanceof LocalDate) { + return (LocalDate) x; + } else if (x instanceof java.sql.Date) { + return ((Date) x).toLocalDate(); + } else { + throw new SQLException(String.format(ErrorConstants.UNSUPPORTED_PARAMETER_TYPE, x.getClass())); + } + } + + /** + * Converts an object of one these types to a {@link LocalTime} for storage in a column of type {@code time}: + *
    + *
  • {@link LocalTime}
  • + *
  • {@link Time}
  • + *
+ * + * @param x The object to convert. + * @return The {@link LocalTime} instance resulting of the object conversion. + * @throws SQLException when the type of the object to convert is not supported. + */ + public static LocalTime convertToLocalTime(final Object x) throws SQLException { + if (x instanceof LocalTime) { + return (LocalTime) x; + } else if (x instanceof java.sql.Time) { + return ((Time) x).toLocalTime(); + } else if (x instanceof OffsetTime) { + return ((OffsetTime) x).toLocalTime(); + } else { + throw new SQLException(String.format(ErrorConstants.UNSUPPORTED_PARAMETER_TYPE, x.getClass())); + } + } + + /** + * Converts an object of one these types to a {@link Instant} for storage in a column of type {@code timestamp}: + *
    + *
  • {@link LocalDateTime}
  • + *
  • {@link Timestamp}
  • + *
  • {@link java.util.Date}
  • + *
  • {@link Calendar}
  • + *
  • {@link OffsetDateTime}
  • + *
+ * + * @param x The object to convert. + * @return The {@link LocalTime} instance resulting of the object conversion. + * @throws SQLException when the type of the object to convert is not supported. + */ + public static Instant convertToInstant(final Object x) throws SQLException { + if (x instanceof LocalDateTime) { + return ((LocalDateTime) x).atZone(ZoneOffset.systemDefault()).toInstant(); + } else if (x instanceof java.sql.Timestamp) { + return ((Timestamp) x).toInstant(); + } else if (x instanceof java.util.Date) { + return ((java.util.Date) x).toInstant(); + } else if (x instanceof Calendar) { + return ((Calendar) x).toInstant(); + } else if (x instanceof OffsetDateTime) { + return ((OffsetDateTime) x).toInstant(); + } else { + throw new SQLException(String.format(ErrorConstants.UNSUPPORTED_PARAMETER_TYPE, x.getClass())); + } + } +} diff --git a/src/main/java/com/ing/data/cassandra/jdbc/utils/DriverUtil.java b/src/main/java/com/ing/data/cassandra/jdbc/utils/DriverUtil.java index 1d5cfbb..d850ede 100644 --- a/src/main/java/com/ing/data/cassandra/jdbc/utils/DriverUtil.java +++ b/src/main/java/com/ing/data/cassandra/jdbc/utils/DriverUtil.java @@ -15,13 +15,18 @@ package com.ing.data.cassandra.jdbc.utils; +import com.ing.data.cassandra.jdbc.metadata.VersionedMetadata; import org.apache.commons.lang3.StringUtils; +import org.semver4j.RangesExpression; +import org.semver4j.Semver; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.io.InputStream; +import java.util.List; import java.util.Properties; +import java.util.stream.Collectors; /** * A set of static utility methods and constants used by the JDBC driver. @@ -63,6 +68,16 @@ public final class DriverUtil { */ public static final String NULL_KEYWORD = "NULL"; + /** + * Cassandra version 5.0. Used for types and built-in functions versioning. + */ + public static final String CASSANDRA_5 = "5.0"; + + /** + * Cassandra version 4.0. Used for types and built-in functions versioning. + */ + public static final String CASSANDRA_4 = "4.0"; + static final Logger LOG = LoggerFactory.getLogger(DriverUtil.class); private DriverUtil() { @@ -88,27 +103,64 @@ public static String getDriverProperty(final String name) { } /** - * Gets a part of a version string. + * Gets the {@link Semver} representation of a version string. *

* It uses the dot character as separator to parse the different parts of a version (major, minor, patch). *

* * @param version The version string (for example X.Y.Z). - * @param part The part of the version to extract (for the semantic versioning, use 0 for the major version, 1 for - * the minor and 2 for the patch). - * @return The requested part of the version, or 0 if the requested part cannot be parsed correctly. + * @return The parsed version, or {@link Semver#ZERO} if the string cannot be parsed correctly. */ - public static int parseVersion(final String version, final int part) { - if (StringUtils.isBlank(version) || StringUtils.countMatches(version, ".") < part || part < 0) { - return 0; + public static Semver safeParseVersion(final String version) { + if (StringUtils.isBlank(version)) { + return Semver.ZERO; } else { - try { - return Integer.parseInt(version.split("\\.")[part]); - } catch (final NumberFormatException ex) { - LOG.error("Unable to parse version: {}", version); - return 0; + final Semver parsedVersion = Semver.coerce(version); + if (parsedVersion == null) { + return Semver.ZERO; } + return parsedVersion; } } + /** + * Checks whether the database metadata (CQL type or built-in function) exists in the current database version. + * + * @param dbVersion The version of the Cassandra database the driver is currently connected to. + * @param versionedMetadata The database metadata to check. + * @return {@code true} if the database metadata exists in the current database version, {@code false} otherwise. + */ + public static boolean existsInDatabaseVersion(final String dbVersion, + final VersionedMetadata versionedMetadata) { + final Semver parseDatabaseVersion = Semver.coerce(dbVersion); + if (parseDatabaseVersion == null) { + return false; + } + Semver minVersion = Semver.ZERO; + if (versionedMetadata.isValidFrom() != null) { + minVersion = versionedMetadata.isValidFrom(); + } + final RangesExpression validRange = RangesExpression.greaterOrEqual(minVersion); + if (versionedMetadata.isInvalidFrom() != null) { + validRange.and(RangesExpression.less(versionedMetadata.isInvalidFrom())); + } + return parseDatabaseVersion.satisfies(validRange); + } + + /** + * Builds an alphabetically sorted and comma-separated list of metadata (such as built-in functions or CQL + * keywords) existing in the specified Cassandra version. + * + * @param metadataList The list of possible metadata to format. + * @param dbVersion The version of the Cassandra database the driver is currently connected to. + * @return The formatted list of metadata. + */ + public static String buildMetadataList(final List metadataList, final String dbVersion) { + return metadataList.stream() + .filter(metadata -> existsInDatabaseVersion(dbVersion, metadata)) + .map(VersionedMetadata::getName) + .sorted() + .collect(Collectors.joining(",")); + } + } diff --git a/src/main/java/com/ing/data/cassandra/jdbc/utils/ErrorConstants.java b/src/main/java/com/ing/data/cassandra/jdbc/utils/ErrorConstants.java index 084dab3..0ba99cf 100644 --- a/src/main/java/com/ing/data/cassandra/jdbc/utils/ErrorConstants.java +++ b/src/main/java/com/ing/data/cassandra/jdbc/utils/ErrorConstants.java @@ -15,7 +15,9 @@ package com.ing.data.cassandra.jdbc.utils; +import com.ing.data.cassandra.jdbc.CassandraPreparedStatement; import com.ing.data.cassandra.jdbc.CassandraResultSet; +import com.ing.data.cassandra.jdbc.metadata.MetadataRow; import java.net.URI; import java.sql.Connection; @@ -88,7 +90,7 @@ public final class ErrorConstants { /** * Error message used in any SQL exception thrown because a {@code null} result set has been returned by the - * DataStax driver when a query is executed. + * Java Driver for Apache Cassandra® when a query is executed. */ public static final String NO_RESULT_SET = "No ResultSet returned from the CQL statement passed in an 'executeQuery()' method."; @@ -169,6 +171,13 @@ public final class ErrorConstants { public static final String HOST_IN_URL = "Connection url must specify a host, e.g. jdbc:cassandra://localhost:9042/keyspace"; + /** + * Error message used in any SQL exception thrown when the contact points in the JDBC URL cannot be parsed. This + * message is a template expecting the value of the invalid contact point as placeholder (example: + * {@code String.format(INVALID_CONTACT_POINT, "invalid:host")}). + */ + public static final String INVALID_CONTACT_POINT = "Invalid contact point: %s"; + /** * Error message used in any SQL exception thrown when a connection cannot be established due to a missing host * name. @@ -223,6 +232,52 @@ public final class ErrorConstants { */ public static final String VECTOR_ELEMENTS_NOT_NUMBERS = "Vector elements are not numbers."; + /** + * Error message used in any SQL exception thrown when the target JDBC type specified in the method + * {@link CassandraPreparedStatement#setObject(int, Object, int)} and its variants is not supported. + */ + public static final String UNSUPPORTED_JDBC_TYPE = "Unsupported JDBC type: %s"; + + /** + * Error message used in any SQL exception thrown when the conversion of the specified object in the method + * {@link CassandraPreparedStatement#setObject(int, Object, int)} and its variants is not supported. + */ + public static final String UNSUPPORTED_PARAMETER_TYPE = "Unsupported parameter type: %s"; + + /** + * Error message used in any SQL exception thrown when the conversion to the specified type in the methods + * {@link CassandraResultSet#getObject(int, Class)} and {@link CassandraResultSet#getObject(String, Class)} is not + * supported. + */ + public static final String UNSUPPORTED_TYPE_CONVERSION = "Conversion to type %s not supported."; + + /** + * Error message used in any SQL exception thrown when the conversion to a specific type in a getter method of + * {@link CassandraResultSet} failed. + */ + public static final String UNABLE_TO_READ_VALUE = "Unable to read value as %s."; + + /** + * Error message used in any SQL exception thrown when the conversion to the specified type in the methods + * {@link CassandraResultSet#getObjectFromJson(int, Class)}, + * {@link CassandraResultSet#getObjectFromJson(String, Class)} and + * {@link CassandraResultSet#getObjectFromJson(Class)} is not supported. + */ + public static final String UNSUPPORTED_JSON_TYPE_CONVERSION = + "Unable to convert the column of index %d to an instance of %s"; + + /** + * Error message used in any SQL exception thrown when it is not possible to retrieve some metadata of any + * {@link ResultSet}. + */ + public static final String UNABLE_TO_RETRIEVE_METADATA = "Unable to retrieve metadata for result set."; + + /** + * Error message used in any runtime exception thrown when populating a {@link MetadataRow} failed due to a mismatch + * between the number of provided values and the number of columns in the row. + */ + public static final String UNABLE_TO_POPULATE_METADATA_ROW = "Unable to populate a metadata row."; + private ErrorConstants() { // Private constructor to hide the public one. } diff --git a/src/main/java/com/ing/data/cassandra/jdbc/utils/JdbcUrlUtil.java b/src/main/java/com/ing/data/cassandra/jdbc/utils/JdbcUrlUtil.java index c67577b..5e99177 100644 --- a/src/main/java/com/ing/data/cassandra/jdbc/utils/JdbcUrlUtil.java +++ b/src/main/java/com/ing/data/cassandra/jdbc/utils/JdbcUrlUtil.java @@ -31,15 +31,20 @@ import java.sql.SQLNonTransientConnectionException; import java.sql.SQLSyntaxErrorException; import java.time.Duration; +import java.util.Arrays; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Properties; +import java.util.concurrent.atomic.AtomicInteger; import java.util.regex.Matcher; import java.util.regex.Pattern; +import java.util.stream.Collectors; import static com.ing.data.cassandra.jdbc.utils.ErrorConstants.BAD_KEYSPACE; import static com.ing.data.cassandra.jdbc.utils.ErrorConstants.HOST_IN_URL; import static com.ing.data.cassandra.jdbc.utils.ErrorConstants.HOST_REQUIRED; +import static com.ing.data.cassandra.jdbc.utils.ErrorConstants.INVALID_CONTACT_POINT; import static com.ing.data.cassandra.jdbc.utils.ErrorConstants.SECURECONENCTBUNDLE_REQUIRED; import static com.ing.data.cassandra.jdbc.utils.ErrorConstants.URI_IS_SIMPLE; @@ -66,20 +71,29 @@ public final class JdbcUrlUtil { /** * JDBC URL parameter key for the CQL version. + * @deprecated For removal. */ + @Deprecated + @SuppressWarnings("DeprecatedIsStillUsed") public static final String KEY_VERSION = "version"; /** * Property name used to retrieve the active CQL version when the connection to Cassandra is established. This * property is mapped from the JDBC URL parameter {@code version} or from the default value defined in the * property {@code database.defaultCqlVersion} of the resource file 'jdbc-driver.properties'. + * @deprecated For removal, because {@link #KEY_VERSION} and {@link #TAG_CQL_VERSION} are deprecated. */ + @Deprecated + @SuppressWarnings("DeprecatedIsStillUsed") public static final String TAG_ACTIVE_CQL_VERSION = "activeCqlVersion"; /** * Property name used to retrieve the active CQL version when the connection to Cassandra is established. This * property is mapped from the JDBC URL parameter {@code version}. + * @deprecated For removal. */ + @Deprecated + @SuppressWarnings("DeprecatedIsStillUsed") public static final String TAG_CQL_VERSION = "cqlVersion"; /** @@ -301,16 +315,12 @@ public final class JdbcUrlUtil { * Property name used to retrieve the contact points when the connection to Cassandra is established. This property * is mapped from the JDBC URL host. */ - public static final String TAG_SERVER_NAME = "serverName"; - - /** - * Property name used to retrieve the port used when the connection to Cassandra is established. This property - * is mapped from the JDBC URL port. - */ - public static final String TAG_PORT_NUMBER = "portNumber"; + public static final String TAG_CONTACT_POINTS = "contactPoints"; static final Logger LOG = LoggerFactory.getLogger(JdbcUrlUtil.class); + private static final String HOST_SEPARATOR = "--"; + private JdbcUrlUtil() { // Private constructor to hide the public one. } @@ -335,7 +345,6 @@ public static Properties parseURL(final String url) throws SQLException { final Properties props = new Properties(); if (url != null) { - props.setProperty(TAG_PORT_NUMBER, String.valueOf(DEFAULT_PORT)); boolean isDbaasConnection = false; int uriStartIndex = PROTOCOL.length(); if (url.startsWith(PROTOCOL_DBAAS)) { @@ -351,17 +360,18 @@ public static Properties parseURL(final String url) throws SQLException { } if (!isDbaasConnection) { - final String host = uri.getHost(); - if (host == null) { - throw new SQLNonTransientConnectionException(HOST_IN_URL); - } - props.setProperty(TAG_SERVER_NAME, host); - - int port = DEFAULT_PORT; - if (uri.getPort() >= 0) { - port = uri.getPort(); + try { + if (StringUtils.isBlank(uri.getAuthority())) { + throw new SQLNonTransientConnectionException(HOST_IN_URL); + } + final List contactPoints = parseContactPoints(uri.getAuthority()); + if (contactPoints.isEmpty()) { + throw new SQLNonTransientConnectionException(HOST_IN_URL); + } + props.put(TAG_CONTACT_POINTS, contactPoints); + } catch (final RuntimeException e) { + throw new SQLNonTransientConnectionException(e.getMessage()); } - props.setProperty(TAG_PORT_NUMBER, String.valueOf(port)); } String keyspace = uri.getPath(); @@ -456,14 +466,45 @@ public static Properties parseURL(final String url) throws SQLException { return props; } + private static List parseContactPoints(final String toParse) { + // Check whether the value to parse ends with a port. If yes, we'll use this port as the common port for all + // the hosts except if another port is specified for the host. When no port is specified at all, use the default + // Cassandra port. + final AtomicInteger defaultPort = new AtomicInteger(DEFAULT_PORT); + final Pattern endWithPort = Pattern.compile(":(\\d{1,5})$"); + final Matcher endWithPortMatcher = endWithPort.matcher(toParse); + if (endWithPortMatcher.find()) { + final String portValue = endWithPortMatcher.group(1); + if (portValue != null) { + defaultPort.set(Integer.parseInt(portValue)); + } + } + + return Arrays.stream(toParse.split(HOST_SEPARATOR)) + .map(part -> { + try { + int port = defaultPort.get(); + final String[] splitPart = part.split(":"); + if (splitPart.length > 1) { + port = Integer.parseInt(splitPart[1]); + } + return ContactPoint.of(splitPart[0], port); + } catch (final Exception e) { + throw new RuntimeException(String.format(INVALID_CONTACT_POINT, part)); + } + }) + .collect(Collectors.toList()); + } + /** * Creates a "sub-name" portion of a JDBC URL from properties. * * @param props A {@link Properties} instance containing all the properties to be considered. - * @return A "sub-name" portion of a JDBC URL (for example: //myhost:9160/Test1?version=3.0.0). + * @return A "sub-name" portion of a JDBC URL (for example: //myhost:9160/Test1?localdatacenter=DC1). * @throws SQLException when something went wrong during the "sub-name" creation. * @throws SQLNonTransientConnectionException when the host name is missing. */ + @SuppressWarnings("unchecked") public static String createSubName(final Properties props) throws SQLException { // Make the keyspace always start with a "/" for URI. String keyspace = props.getProperty(TAG_DATABASE_NAME); @@ -471,19 +512,21 @@ public static String createSubName(final Properties props) throws SQLException { keyspace = StringUtils.prependIfMissing(keyspace, "/"); } - final String host = props.getProperty(TAG_SERVER_NAME); - if (host == null) { + String hostsAndPorts = null; + final List contactPoints = (List) props.get(TAG_CONTACT_POINTS); + if (contactPoints != null && !contactPoints.isEmpty()) { + hostsAndPorts = contactPoints.stream() + .map(ContactPoint::toString) + .collect(Collectors.joining(HOST_SEPARATOR)); + } + if (hostsAndPorts == null) { throw new SQLNonTransientConnectionException(HOST_REQUIRED); } // Build a valid URI from parts. final URI uri; - int port = DEFAULT_PORT; - if (StringUtils.isNotBlank(props.getProperty(TAG_PORT_NUMBER))) { - port = Integer.parseInt(props.getProperty(TAG_PORT_NUMBER)); - } try { - uri = new URI(null, null, host, port, keyspace, makeQueryString(props), null); + uri = new URI(null, hostsAndPorts, keyspace, makeQueryString(props), null); } catch (final Exception e) { throw new SQLNonTransientConnectionException(e); } @@ -496,26 +539,18 @@ public static String createSubName(final Properties props) throws SQLException { } /** - * Builds the URI part containing the query parameters "consistency" and "version" from properties. + * Builds the URI part containing the query parameter "consistency" from properties. * * @param props A {@link Properties} instance containing all the properties to be considered. - * @return The URI part containing the query parameters (for example: "consistency=ONE&version=3.0.0") or - * {@code null} if neither version nor consistency are defined in the provided properties. + * @return The URI part containing the query parameter "consistency" (for example: "consistency=ONE") or + * {@code null} if consistency is not defined in the provided properties. */ static String makeQueryString(final Properties props) { final StringBuilder sb = new StringBuilder(); - final String version = props.getProperty(TAG_CQL_VERSION); final String consistency = props.getProperty(TAG_CONSISTENCY_LEVEL); if (StringUtils.isNotBlank(consistency)) { sb.append(KEY_CONSISTENCY).append("=").append(consistency); } - if (StringUtils.isNotBlank(version)) { - if (sb.length() != 0) { - sb.append("&"); - } - sb.append(KEY_VERSION).append("=").append(version); - } - if (sb.length() > 0) { return sb.toString().trim(); } else { diff --git a/src/main/resources/jdbc-driver.properties b/src/main/resources/jdbc-driver.properties index b4372f4..c1ee1eb 100644 --- a/src/main/resources/jdbc-driver.properties +++ b/src/main/resources/jdbc-driver.properties @@ -19,4 +19,5 @@ driver.name=Cassandra JDBC Driver driver.jdbcVersion=4.0 database.productName=Cassandra +# TODO: 'version' parameter is deprecated, this property has to be removed. database.defaultCqlVersion=3.0.0 diff --git a/src/test/java/com/ing/data/cassandra/jdbc/BatchStatementsUnitTest.java b/src/test/java/com/ing/data/cassandra/jdbc/BatchStatementsUnitTest.java index f11dd29..aa6c87a 100644 --- a/src/test/java/com/ing/data/cassandra/jdbc/BatchStatementsUnitTest.java +++ b/src/test/java/com/ing/data/cassandra/jdbc/BatchStatementsUnitTest.java @@ -43,8 +43,8 @@ class BatchStatementsUnitTest extends UsingCassandraContainerTest { @BeforeAll static void finalizeSetUpTests() throws Exception { - initConnection(KEYSPACE, "version=3.0.0", "localdatacenter=datacenter1"); - sqlConnection2 = newConnection(KEYSPACE, "version=3.0.0", "localdatacenter=datacenter1"); + initConnection(KEYSPACE, "localdatacenter=datacenter1"); + sqlConnection2 = newConnection(KEYSPACE, "localdatacenter=datacenter1"); } @AfterAll diff --git a/src/test/java/com/ing/data/cassandra/jdbc/CollectionsUnitTest.java b/src/test/java/com/ing/data/cassandra/jdbc/CollectionsUnitTest.java index f4ade4a..14b4019 100644 --- a/src/test/java/com/ing/data/cassandra/jdbc/CollectionsUnitTest.java +++ b/src/test/java/com/ing/data/cassandra/jdbc/CollectionsUnitTest.java @@ -51,7 +51,7 @@ class CollectionsUnitTest extends UsingCassandraContainerTest { @BeforeAll static void finalizeSetUpTests() throws Exception { - initConnection(KEYSPACE, "version=3.0.0", "localdatacenter=datacenter1"); + initConnection(KEYSPACE, "localdatacenter=datacenter1"); } @Test diff --git a/src/test/java/com/ing/data/cassandra/jdbc/DataSourceUnitTest.java b/src/test/java/com/ing/data/cassandra/jdbc/DataSourceUnitTest.java index aeabe3c..8923b8b 100644 --- a/src/test/java/com/ing/data/cassandra/jdbc/DataSourceUnitTest.java +++ b/src/test/java/com/ing/data/cassandra/jdbc/DataSourceUnitTest.java @@ -13,12 +13,14 @@ */ package com.ing.data.cassandra.jdbc; +import com.ing.data.cassandra.jdbc.utils.ContactPoint; import org.junit.jupiter.api.Test; import javax.sql.DataSource; import java.sql.SQLException; +import java.util.Collections; -import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_CQL_VERSION; +import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_CONSISTENCY_LEVEL; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -30,22 +32,24 @@ class DataSourceUnitTest extends UsingCassandraContainerTest { private static final String KEYSPACE = "test_keyspace"; private static final String USER = "testuser"; private static final String PASSWORD = "secret"; - private static final String VERSION = "3.0.0"; private static final String CONSISTENCY = "ONE"; @Test void givenParameters_whenConstructDataSource_returnCassandraDataSource() throws Exception { final CassandraDataSource cds = new CassandraDataSource( - "localhost", 9042, KEYSPACE, USER, PASSWORD, VERSION, CONSISTENCY); - assertEquals("localhost", cds.getServerName()); - assertEquals(9042, cds.getPortNumber()); + Collections.singletonList(ContactPoint.of("localhost", 9042)), KEYSPACE, USER, PASSWORD, CONSISTENCY); + assertNotNull(cds.getContactPoints()); + assertEquals(1, cds.getContactPoints().size()); + final ContactPoint dsContactPoint = cds.getContactPoints().get(0); + assertEquals("localhost", dsContactPoint.getHost()); + assertEquals(9042, dsContactPoint.getPort()); assertEquals(KEYSPACE, cds.getDatabaseName()); assertEquals(USER, cds.getUser()); assertEquals(PASSWORD, cds.getPassword()); - assertEquals(VERSION, cds.getVersion()); - final DataSource ds = new CassandraDataSource(cassandraContainer.getContactPoint().getHostName(), - cassandraContainer.getContactPoint().getPort(), KEYSPACE, USER, PASSWORD, VERSION, CONSISTENCY); + final DataSource ds = new CassandraDataSource(Collections.singletonList(ContactPoint.of( + cassandraContainer.getContactPoint().getHostName(), cassandraContainer.getContactPoint().getPort())), + KEYSPACE, USER, PASSWORD, CONSISTENCY); assertNotNull(ds); // null username and password @@ -58,16 +62,15 @@ void givenParameters_whenConstructDataSource_returnCassandraDataSource() throws cnx = ds.getConnection(); assertFalse(cnx.isClosed()); ds.setLoginTimeout(5); - assertEquals(VERSION, ((CassandraConnection) cnx).getConnectionProperties().get(TAG_CQL_VERSION)); + assertEquals(CONSISTENCY, ((CassandraConnection) cnx).getConnectionProperties().get(TAG_CONSISTENCY_LEVEL)); assertEquals(5, ds.getLoginTimeout()); } - @Test void givenCassandraDataSource_whenIsWrapperFor_returnExpectedValue() throws Exception { - final DataSource ds = new CassandraDataSource( - cassandraContainer.getContactPoint().getHostName(), cassandraContainer.getContactPoint().getPort(), - KEYSPACE, USER, PASSWORD, VERSION, CONSISTENCY); + final DataSource ds =new CassandraDataSource(Collections.singletonList(ContactPoint.of( + cassandraContainer.getContactPoint().getHostName(), cassandraContainer.getContactPoint().getPort())), + KEYSPACE, USER, PASSWORD, CONSISTENCY); // Assert it is a wrapper for DataSource. assertTrue(ds.isWrapperFor(DataSource.class)); @@ -78,17 +81,17 @@ void givenCassandraDataSource_whenIsWrapperFor_returnExpectedValue() throws Exce @Test void givenCassandraDataSource_whenUnwrap_returnUnwrappedDatasource() throws Exception { - final DataSource ds = new CassandraDataSource( - cassandraContainer.getContactPoint().getHostName(), cassandraContainer.getContactPoint().getPort(), - KEYSPACE, USER, PASSWORD, VERSION, CONSISTENCY); + final DataSource ds =new CassandraDataSource(Collections.singletonList(ContactPoint.of( + cassandraContainer.getContactPoint().getHostName(), cassandraContainer.getContactPoint().getPort())), + KEYSPACE, USER, PASSWORD, CONSISTENCY); assertNotNull(ds.unwrap(DataSource.class)); } @Test void givenCassandraDataSource_whenUnwrapToInvalidInterface_throwException() { - final DataSource ds = new CassandraDataSource( - cassandraContainer.getContactPoint().getHostName(), cassandraContainer.getContactPoint().getPort(), - KEYSPACE, USER, PASSWORD, VERSION, CONSISTENCY); + final DataSource ds = new CassandraDataSource(Collections.singletonList(ContactPoint.of( + cassandraContainer.getContactPoint().getHostName(), cassandraContainer.getContactPoint().getPort())), + KEYSPACE, USER, PASSWORD, CONSISTENCY); assertThrows(SQLException.class, () -> ds.unwrap(this.getClass())); } } diff --git a/src/test/java/com/ing/data/cassandra/jdbc/JdbcRegressionUnitTest.java b/src/test/java/com/ing/data/cassandra/jdbc/JdbcRegressionUnitTest.java index e087684..459146f 100644 --- a/src/test/java/com/ing/data/cassandra/jdbc/JdbcRegressionUnitTest.java +++ b/src/test/java/com/ing/data/cassandra/jdbc/JdbcRegressionUnitTest.java @@ -71,7 +71,7 @@ class JdbcRegressionUnitTest extends UsingCassandraContainerTest { @BeforeAll static void finalizeSetUpTests() throws Exception { - initConnection(KEYSPACE, "version=3.0.0", "localdatacenter=datacenter1"); + initConnection(KEYSPACE, "localdatacenter=datacenter1"); // Update cluster name according to the configured name. /* try (final Statement statement = sqlConnection.createStatement()) { diff --git a/src/test/java/com/ing/data/cassandra/jdbc/JsonSupportUnitTest.java b/src/test/java/com/ing/data/cassandra/jdbc/JsonSupportUnitTest.java index 5536cdc..bd84b78 100644 --- a/src/test/java/com/ing/data/cassandra/jdbc/JsonSupportUnitTest.java +++ b/src/test/java/com/ing/data/cassandra/jdbc/JsonSupportUnitTest.java @@ -13,7 +13,7 @@ */ package com.ing.data.cassandra.jdbc; -import com.datastax.driver.core.utils.UUIDs; +import com.datastax.oss.driver.api.core.uuid.Uuids; import com.ing.data.cassandra.jdbc.utils.CustomObject; import com.ing.data.cassandra.jdbc.utils.CustomObjectStringOnly; import com.ing.data.cassandra.jdbc.utils.JsonResult; @@ -59,7 +59,7 @@ class JsonSupportUnitTest extends UsingCassandraContainerTest { @BeforeAll static void finalizeSetUpTests() throws Exception { - initConnection(KEYSPACE, "version=3.0.0", "localdatacenter=datacenter1"); + initConnection(KEYSPACE, "localdatacenter=datacenter1"); } @Test @@ -117,11 +117,11 @@ void givenInsertStatementUsingFromJsonFunction_whenExecute_insertExpectedValues( }}) .textValue("example text") .timeValue(nowTime) - .timeuuidValue(UUIDs.timeBased()) + .timeuuidValue(Uuids.timeBased()) .tsValue(nowDateTime) .tinyintValue((byte) 12) .tupleValue(Arrays.asList("10", "ten")) - .uuidValue(UUIDs.random()) + .uuidValue(Uuids.random()) .varcharValue("varchar example") .varintValue(BigInteger.valueOf(987123L)) .build()); diff --git a/src/test/java/com/ing/data/cassandra/jdbc/MetadataResultSetsUnitTest.java b/src/test/java/com/ing/data/cassandra/jdbc/MetadataResultSetsUnitTest.java index f2a49db..c31afda 100644 --- a/src/test/java/com/ing/data/cassandra/jdbc/MetadataResultSetsUnitTest.java +++ b/src/test/java/com/ing/data/cassandra/jdbc/MetadataResultSetsUnitTest.java @@ -23,6 +23,9 @@ import com.ing.data.cassandra.jdbc.types.DataTypeEnum; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -34,8 +37,11 @@ import java.sql.Types; import java.util.ArrayList; import java.util.List; +import java.util.stream.Stream; import static com.ing.data.cassandra.jdbc.types.DataTypeEnum.VECTOR; +import static com.ing.data.cassandra.jdbc.utils.DriverUtil.CASSANDRA_4; +import static com.ing.data.cassandra.jdbc.utils.DriverUtil.CASSANDRA_5; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.is; import static org.hamcrest.core.IsIterableContaining.hasItem; @@ -52,10 +58,11 @@ class MetadataResultSetsUnitTest extends UsingCassandraContainerTest { private static final String KEYSPACE = "test_keyspace"; private static final String ANOTHER_KEYSPACE = "test_keyspace2"; + private static final String VECTORS_KEYSPACE = "test_keyspace_vect"; @BeforeAll static void finalizeSetUpTests() throws Exception { - initConnection(KEYSPACE, "version=3.0.0", "localdatacenter=datacenter1"); + initConnection(KEYSPACE, "localdatacenter=datacenter1"); } /* @@ -463,6 +470,24 @@ void givenStatement_whenBuildUDTsWithNonJavaObjectTypes_returnEmptyResultSet() t assertFalse(result.next()); } + @Test + void givenStatement_whenBuildUDTsNotSpecifyingTypeNamePattern_returnExpectedResultSet() throws SQLException { + final CassandraStatement statement = (CassandraStatement) sqlConnection.createStatement(); + final ResultSet result = new TypeMetadataResultSetBuilder(statement).buildUDTs(KEYSPACE, null, + new int[]{Types.JAVA_OBJECT}); + assertNotNull(result); + final List foundColumns = new ArrayList<>(); + int resultSize = 0; + while (result.next()) { + ++resultSize; + foundColumns.add(String.join(";", result.getString(2), result.getString(3))); + } + assertEquals(3, resultSize); + assertEquals(KEYSPACE.concat(";customtype1"), foundColumns.get(0)); + assertEquals(KEYSPACE.concat(";customtype2"), foundColumns.get(1)); + assertEquals(KEYSPACE.concat(";type_in_different_ks"), foundColumns.get(2)); + } + @Test void givenStatement_whenBuildUDTsNotSpecifyingSchemaPattern_returnExpectedResultSet() throws SQLException { final CassandraStatement statement = (CassandraStatement) sqlConnection.createStatement(); @@ -481,10 +506,19 @@ void givenStatement_whenBuildUDTsNotSpecifyingSchemaPattern_returnExpectedResult assertEquals(ANOTHER_KEYSPACE.concat(";type_in_different_ks"), foundColumns.get(1)); } - @Test - void givenStatement_whenBuildTypes_returnExpectedResultSet() throws SQLException { + static Stream buildCqlTypesTestCases() { + return Stream.of( + Arguments.of(CASSANDRA_5, DataTypeEnum.values().length), + Arguments.of(CASSANDRA_4, DataTypeEnum.values().length - 1) // Type VECTOR appears in Cassandra 5.0 + ); + } + + @ParameterizedTest + @MethodSource("buildCqlTypesTestCases") + void givenStatement_whenBuildTypes_returnExpectedResultSet(final String dbVersion, final int expectedNbOfTypes) + throws SQLException { final CassandraStatement statement = (CassandraStatement) sqlConnection.createStatement(); - final ResultSet result = new TypeMetadataResultSetBuilder(statement).buildTypes(); + final ResultSet result = new TypeMetadataResultSetBuilder(statement).buildTypes(dbVersion); assertNotNull(result); assertEquals(18, result.getMetaData().getColumnCount()); assertEquals("TYPE_NAME", result.getMetaData().getColumnName(1)); @@ -515,7 +549,7 @@ void givenStatement_whenBuildTypes_returnExpectedResultSet() throws SQLException } foundColumns.add(String.join(";", results)); } - assertEquals(DataTypeEnum.values().length, resultSize); + assertEquals(expectedNbOfTypes, resultSize); assertEquals("tinyint;-6;4;null;null;null;1;false;2;false;true;false;null;0;0;null;null;4", foundColumns.get(0)); assertEquals("bigint;-5;20;null;null;null;1;false;2;false;true;false;null;0;0;null;null;20", @@ -556,11 +590,11 @@ void givenStatement_whenBuildTypes_returnExpectedResultSet() throws SQLException foundColumns.get(18)); assertEquals("inet;1111;39;null;null;null;1;false;2;false;true;false;null;0;0;null;null;39", foundColumns.get(19)); - assertEquals("list;1111;-1;';';null;1;true;2;true;true;false;null;0;0;null;null;-1", + assertEquals("list;1111;-1;null;null;null;1;false;2;true;true;false;null;0;0;null;null;-1", foundColumns.get(20)); - assertEquals("map;1111;-1;';';null;1;true;2;true;true;false;null;0;0;null;null;-1", + assertEquals("map;1111;-1;null;null;null;1;false;2;true;true;false;null;0;0;null;null;-1", foundColumns.get(21)); - assertEquals("set;1111;-1;';';null;1;true;2;true;true;false;null;0;0;null;null;-1", + assertEquals("set;1111;-1;null;null;null;1;false;2;true;true;false;null;0;0;null;null;-1", foundColumns.get(22)); assertEquals("timeuuid;1111;36;null;null;null;1;false;2;true;true;false;null;0;0;null;null;36", foundColumns.get(23)); @@ -570,8 +604,11 @@ void givenStatement_whenBuildTypes_returnExpectedResultSet() throws SQLException foundColumns.get(25)); assertEquals("uuid;1111;36;null;null;null;1;false;2;true;true;false;null;0;0;null;null;36", foundColumns.get(26)); - assertEquals(VECTOR.cqlType.concat(";1111;-1;';';null;1;true;2;true;true;false;null;0;0;null;null;-1"), - foundColumns.get(27)); + if (CASSANDRA_5.equals(dbVersion)) { + assertEquals(VECTOR.cqlType + .concat(";1111;-1;null;null;null;1;false;2;true;true;false;null;0;0;null;null;-1"), + foundColumns.get(27)); + } } @Test @@ -688,4 +725,52 @@ void givenStatement_whenBuildFunctionColumns_returnExpectedResultSet() throws SQ foundColumns.get(2)); } + /* + * Indexes metadata + */ + + @Test + void givenStatement_whenBuildIndexes_returnExpectedResultSet() throws SQLException { + final CassandraStatement statement = (CassandraStatement) sqlConnection.createStatement(); + ResultSet result = new TableMetadataResultSetBuilder(statement) + .buildIndexes(VECTORS_KEYSPACE, "pet_supply_vectors", false, false); + assertNotNull(result); + assertEquals(13, result.getMetaData().getColumnCount()); + assertEquals("TABLE_CAT", result.getMetaData().getColumnName(1)); + assertEquals("TABLE_SCHEM", result.getMetaData().getColumnName(2)); + assertEquals("TABLE_NAME", result.getMetaData().getColumnName(3)); + assertEquals("NON_UNIQUE", result.getMetaData().getColumnName(4)); + assertEquals("INDEX_QUALIFIER", result.getMetaData().getColumnName(5)); + assertEquals("INDEX_NAME", result.getMetaData().getColumnName(6)); + assertEquals("TYPE", result.getMetaData().getColumnName(7)); + assertEquals("ORDINAL_POSITION", result.getMetaData().getColumnName(8)); + assertEquals("COLUMN_NAME", result.getMetaData().getColumnName(9)); + assertEquals("ASC_OR_DESC", result.getMetaData().getColumnName(10)); + assertEquals("CARDINALITY", result.getMetaData().getColumnName(11)); + assertEquals("PAGES", result.getMetaData().getColumnName(12)); + assertEquals("FILTER_CONDITION", result.getMetaData().getColumnName(13)); + final List foundColumns = new ArrayList<>(); + int resultSize = 0; + while (result.next()) { + ++resultSize; + foundColumns.add(String.join(";", result.getString(2), result.getString(3), result.getString(4), + result.getString(6), result.getString(7), result.getString(8), result.getString(9))); + } + assertEquals(1, resultSize); + assertThat(foundColumns, + hasItem(is(VECTORS_KEYSPACE.concat(";pet_supply_vectors;true;idx_vector;3;1;product_vector")))); + + result = new TableMetadataResultSetBuilder(statement).buildIndexes(ANOTHER_KEYSPACE, "cf_test2", false, false); + assertNotNull(result); + foundColumns.clear(); + resultSize = 0; + while (result.next()) { + ++resultSize; + foundColumns.add(String.join(";", result.getString(2), result.getString(3), result.getString(4), + result.getString(6), result.getString(7), result.getString(8), result.getString(9))); + } + assertEquals(1, resultSize); + assertThat(foundColumns, hasItem(is(ANOTHER_KEYSPACE.concat(";cf_test2;true;int_values_idx;3;1;t2ivalue")))); + } + } diff --git a/src/test/java/com/ing/data/cassandra/jdbc/PooledUnitTest.java b/src/test/java/com/ing/data/cassandra/jdbc/PooledUnitTest.java index e101911..8b2f538 100644 --- a/src/test/java/com/ing/data/cassandra/jdbc/PooledUnitTest.java +++ b/src/test/java/com/ing/data/cassandra/jdbc/PooledUnitTest.java @@ -13,6 +13,7 @@ */ package com.ing.data.cassandra.jdbc; +import com.ing.data.cassandra.jdbc.utils.ContactPoint; import org.junit.jupiter.api.Test; import javax.sql.DataSource; @@ -21,6 +22,7 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; +import java.util.Collections; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -40,7 +42,8 @@ class PooledUnitTest extends UsingCassandraContainerTest { @Test void givenPooledDataSource_whenGetAndCloseConnection2MillionTimes_manageConnectionsProperly() throws Exception { final CassandraDataSource connectionPoolDataSource = new CassandraDataSource( - cassandraContainer.getContactPoint().getHostName(), cassandraContainer.getContactPoint().getPort(), + Collections.singletonList(ContactPoint.of( + cassandraContainer.getContactPoint().getHostName(), cassandraContainer.getContactPoint().getPort())), KEYSPACE, USER, PASSWORD, VERSION, CONSISTENCY, LOCAL_DATACENTER); final DataSource pooledCassandraDataSource = new PooledCassandraDataSource(connectionPoolDataSource); @@ -55,7 +58,8 @@ void givenPooledDataSource_whenGetAndCloseConnection2MillionTimes_manageConnecti @Test void givenPooledDataSource_whenExecute5ThousandsPreparedStatements_getExpectedResults() throws Exception { final CassandraDataSource connectionPoolDataSource = new CassandraDataSource( - cassandraContainer.getContactPoint().getHostName(), cassandraContainer.getContactPoint().getPort(), + Collections.singletonList(ContactPoint.of( + cassandraContainer.getContactPoint().getHostName(), cassandraContainer.getContactPoint().getPort())), KEYSPACE, USER, PASSWORD, VERSION, CONSISTENCY, LOCAL_DATACENTER); final DataSource pooledCassandraDataSource = new PooledCassandraDataSource(connectionPoolDataSource); final Connection connection = pooledCassandraDataSource.getConnection(); @@ -79,7 +83,8 @@ void givenPooledDataSource_whenExecute5ThousandsPreparedStatements_getExpectedRe @Test void givenPooledDataSource_whenExecuteStatement_getExpectedResults() throws Exception { final CassandraDataSource connectionPoolDataSource = new CassandraDataSource( - cassandraContainer.getContactPoint().getHostName(), cassandraContainer.getContactPoint().getPort(), + Collections.singletonList(ContactPoint.of( + cassandraContainer.getContactPoint().getHostName(), cassandraContainer.getContactPoint().getPort())), KEYSPACE, USER, PASSWORD, VERSION, CONSISTENCY, LOCAL_DATACENTER); final DataSource pooledCassandraDataSource = new PooledCassandraDataSource(connectionPoolDataSource); final Connection connection = pooledCassandraDataSource.getConnection(); @@ -97,7 +102,8 @@ void givenPooledDataSource_whenExecuteStatement_getExpectedResults() throws Exce @Test void givenPooledCassandraDataSource_whenUnwrap_returnUnwrappedDataSource() throws Exception { final CassandraDataSource connectionPoolDataSource = new CassandraDataSource( - cassandraContainer.getContactPoint().getHostName(), cassandraContainer.getContactPoint().getPort(), + Collections.singletonList(ContactPoint.of( + cassandraContainer.getContactPoint().getHostName(), cassandraContainer.getContactPoint().getPort())), KEYSPACE, USER, PASSWORD, VERSION, CONSISTENCY, LOCAL_DATACENTER); assertNotNull(connectionPoolDataSource.unwrap(DataSource.class)); } @@ -105,7 +111,8 @@ void givenPooledCassandraDataSource_whenUnwrap_returnUnwrappedDataSource() throw @Test void givenPooledCassandraDataSource_whenUnwrapToInvalidInterface_throwException() { final CassandraDataSource connectionPoolDataSource = new CassandraDataSource( - cassandraContainer.getContactPoint().getHostName(), cassandraContainer.getContactPoint().getPort(), + Collections.singletonList(ContactPoint.of( + cassandraContainer.getContactPoint().getHostName(), cassandraContainer.getContactPoint().getPort())), KEYSPACE, USER, PASSWORD, VERSION, CONSISTENCY, LOCAL_DATACENTER); assertThrows(SQLException.class, () -> connectionPoolDataSource.unwrap(this.getClass())); } diff --git a/src/test/java/com/ing/data/cassandra/jdbc/PreparedStatementsUnitTest.java b/src/test/java/com/ing/data/cassandra/jdbc/PreparedStatementsUnitTest.java index 7baad38..1488ffc 100644 --- a/src/test/java/com/ing/data/cassandra/jdbc/PreparedStatementsUnitTest.java +++ b/src/test/java/com/ing/data/cassandra/jdbc/PreparedStatementsUnitTest.java @@ -13,31 +13,47 @@ */ package com.ing.data.cassandra.jdbc; -import com.datastax.driver.core.utils.UUIDs; import com.datastax.oss.driver.api.core.data.CqlDuration; import com.datastax.oss.driver.api.core.data.TupleValue; import com.datastax.oss.driver.api.core.type.DataTypes; +import com.datastax.oss.driver.api.core.uuid.Uuids; import com.ing.data.cassandra.jdbc.types.DataTypeEnum; import com.ing.data.cassandra.jdbc.types.JdbcAscii; import com.ing.data.cassandra.jdbc.types.JdbcBoolean; import com.ing.data.cassandra.jdbc.types.JdbcInt32; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import javax.sql.rowset.serial.SerialBlob; +import javax.sql.rowset.serial.SerialClob; import java.net.InetAddress; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.sql.Date; import java.sql.ParameterMetaData; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; import java.sql.Statement; +import java.sql.Time; +import java.sql.Timestamp; import java.sql.Types; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.time.Month; +import java.time.OffsetDateTime; +import java.time.OffsetTime; +import java.time.ZoneId; +import java.util.Calendar; import java.util.UUID; +import static java.time.ZoneOffset.UTC; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; class PreparedStatementsUnitTest extends UsingCassandraContainerTest { @@ -45,7 +61,7 @@ class PreparedStatementsUnitTest extends UsingCassandraContainerTest { @BeforeAll static void finalizeSetUpTests() throws Exception { - initConnection(KEYSPACE, "version=3.0.0", "localdatacenter=datacenter1"); + initConnection(KEYSPACE, "localdatacenter=datacenter1"); } @Test @@ -124,7 +140,7 @@ void givenPreparedStatement_whenExecute_insertExpectedValues() throws Exception preparedStatement.setObject(2, aTuple, Types.OTHER); preparedStatement.setObject(3, InetAddress.getByName("127.0.0.1"), Types.OTHER); preparedStatement.setObject(4, CqlDuration.from("15s"), Types.OTHER); - final UUID generatedUuid = UUIDs.random(); + final UUID generatedUuid = Uuids.random(); preparedStatement.setObject(5, generatedUuid, Types.OTHER); preparedStatement.execute(); @@ -136,4 +152,163 @@ void givenPreparedStatement_whenExecute_insertExpectedValues() throws Exception assertEquals(CqlDuration.from("15s"), resultSet.getObject("col_duration")); assertEquals(generatedUuid, resultSet.getObject("col_uuid")); } + + @SuppressWarnings("ResultOfMethodCallIgnored") + @Test + void givenPreparedStatementWithBlobs_whenExecute_insertExpectedValues() throws Exception { + final Statement statement = sqlConnection.createStatement(); + final String insertQuery = "INSERT INTO test_ps_blobs (col_key, col_blob) VALUES (?, ?);"; + final PreparedStatement preparedStatement = sqlConnection.prepareStatement(insertQuery); + preparedStatement.setString(1, "key1"); + preparedStatement.setObject(2, new SerialBlob("testJavaSqlBlob".getBytes(StandardCharsets.UTF_8))); + preparedStatement.execute(); + + preparedStatement.setString(1, "key2"); + preparedStatement.setObject(2, new SerialClob("testJavaSqlClob-with accents: Äèéî".toCharArray())); + preparedStatement.execute(); + + preparedStatement.setString(1, "key3"); + preparedStatement.setObject(2, new SerialClob("testJavaSqlNClob".toCharArray()), Types.NCLOB); + preparedStatement.execute(); + + ResultSet resultSet = statement.executeQuery("SELECT * FROM test_ps_blobs WHERE col_key = 'key1';"); + assertTrue(resultSet.next()); + byte[] array = new byte[resultSet.getBinaryStream("col_blob").available()]; + resultSet.getBinaryStream("col_blob").read(array); + assertEquals("testJavaSqlBlob", new String(array, StandardCharsets.UTF_8)); + + resultSet = statement.executeQuery("SELECT * FROM test_ps_blobs WHERE col_key = 'key2';"); + assertTrue(resultSet.next()); + array = new byte[resultSet.getBinaryStream("col_blob").available()]; + resultSet.getBinaryStream("col_blob").read(array); + assertEquals("testJavaSqlClob-with accents: Äèéî", new String(array, StandardCharsets.UTF_8)); + + resultSet = statement.executeQuery("SELECT * FROM test_ps_blobs WHERE col_key = 'key3';"); + assertTrue(resultSet.next()); + array = new byte[resultSet.getBinaryStream("col_blob").available()]; + resultSet.getBinaryStream("col_blob").read(array); + assertEquals("testJavaSqlNClob", new String(array, StandardCharsets.UTF_8)); + } + + @Test + void givenPreparedStatementWithUrlAndVarcharTypes_whenExecute_insertExpectedValues() throws Exception { + final Statement statement = sqlConnection.createStatement(); + final String insertQuery = "INSERT INTO test_ps_texts (col_key, col_text) VALUES (?, ?);"; + final PreparedStatement preparedStatement = sqlConnection.prepareStatement(insertQuery); + preparedStatement.setObject(1, "key1"); + preparedStatement.setObject(2, new URL("https://cassandra.apache.org/")); + preparedStatement.execute(); + + preparedStatement.setObject(1, "key2"); + preparedStatement.setObject(2, "longvarchar", Types.LONGVARCHAR); + preparedStatement.execute(); + + preparedStatement.setObject(1, "key3"); + preparedStatement.setObject(2, "c", Types.NCHAR); + preparedStatement.execute(); + + preparedStatement.setObject(1, "key4"); + preparedStatement.setObject(2, "nvarchar", Types.NVARCHAR); + preparedStatement.execute(); + + preparedStatement.setObject(1, "key5"); + preparedStatement.setObject(2, "longnvarchar", Types.LONGNVARCHAR); + preparedStatement.execute(); + + ResultSet resultSet = statement.executeQuery("SELECT * FROM test_ps_texts WHERE col_key = 'key1';"); + assertTrue(resultSet.next()); + assertEquals("https://cassandra.apache.org/", resultSet.getString("col_text")); + resultSet = statement.executeQuery("SELECT * FROM test_ps_texts WHERE col_key = 'key2';"); + assertTrue(resultSet.next()); + assertEquals("longvarchar", resultSet.getString("col_text")); + resultSet = statement.executeQuery("SELECT * FROM test_ps_texts WHERE col_key = 'key3';"); + assertTrue(resultSet.next()); + assertEquals("c", resultSet.getString("col_text")); + resultSet = statement.executeQuery("SELECT * FROM test_ps_texts WHERE col_key = 'key4';"); + assertTrue(resultSet.next()); + assertEquals("nvarchar", resultSet.getString("col_text")); + resultSet = statement.executeQuery("SELECT * FROM test_ps_texts WHERE col_key = 'key5';"); + assertTrue(resultSet.next()); + assertEquals("longnvarchar", resultSet.getString("col_text")); + } + + @Test + void givenPreparedStatementWithBitType_whenExecute_insertExpectedValues() throws Exception { + final Statement statement = sqlConnection.createStatement(); + final String insertQuery = "INSERT INTO cf_test_ps (keyname, t1bValue) VALUES (?, ?);"; + final PreparedStatement preparedStatement = sqlConnection.prepareStatement(insertQuery); + preparedStatement.setObject(1, "key1"); + preparedStatement.setObject(2, false, Types.BIT); + preparedStatement.execute(); + + preparedStatement.setObject(1, "key2"); + preparedStatement.setObject(2, true, Types.BIT); + preparedStatement.execute(); + + ResultSet resultSet = statement.executeQuery("SELECT * FROM cf_test_ps WHERE keyname = 'key1';"); + assertTrue(resultSet.next()); + assertFalse(resultSet.getBoolean("t1bValue")); + + resultSet = statement.executeQuery("SELECT * FROM cf_test_ps WHERE keyname = 'key2';"); + assertTrue(resultSet.next()); + assertTrue(resultSet.getBoolean("t1bValue")); + } + + @Test + void givenPreparedStatementWithDateTimes_whenExecute_insertExpectedValues() throws Exception { + final Statement statement = sqlConnection.createStatement(); + final String insertQuery = "INSERT INTO test_ps_datetimes (col_key, col_time, col_date, col_ts) " + + "VALUES (?, ?, ?, ?);"; + final PreparedStatement preparedStatement = sqlConnection.prepareStatement(insertQuery); + preparedStatement.setObject(1, "key1"); + preparedStatement.setObject(2, LocalTime.of(15, 35, 40, 123456789)); + preparedStatement.setObject(3, LocalDate.of(2023, Month.OCTOBER, 31)); + preparedStatement.setObject(4, LocalDateTime.of(2023, Month.OCTOBER, 31, 16, 40, 25, 123456789)); + preparedStatement.execute(); + + final OffsetDateTime testOffsetDateTime = OffsetDateTime.of(2023, 10, 31, 16, 40, 25, 123456789, UTC); + final long testDateTimeInMillis = testOffsetDateTime.toInstant().toEpochMilli(); + preparedStatement.setObject(1, "key2"); + preparedStatement.setObject(2, OffsetTime.of(15, 35, 40, 123456789, UTC)); + preparedStatement.setObject(3, LocalDate.now()); + preparedStatement.setObject(4, testOffsetDateTime); + preparedStatement.execute(); + + preparedStatement.setObject(1, "key3"); + preparedStatement.setObject(2, LocalTime.now()); + preparedStatement.setObject(3, LocalDate.now()); + preparedStatement.setObject(4, new java.util.Date(testDateTimeInMillis)); + preparedStatement.execute(); + + preparedStatement.setObject(1, "key4"); + preparedStatement.setObject(2, LocalTime.now()); + preparedStatement.setObject(3, LocalDate.now()); + preparedStatement.setObject(4, new Calendar.Builder().setInstant(testDateTimeInMillis).build()); + preparedStatement.execute(); + + ResultSet resultSet = statement.executeQuery("SELECT * FROM test_ps_datetimes WHERE col_key = 'key1';"); + assertTrue(resultSet.next()); + // Note: Cassandra max precision for timestamps is milliseconds, not nanoseconds + assertEquals(Time.valueOf(LocalTime.of(15, 35, 40, 123000000)), resultSet.getTime("col_time")); + assertEquals(Date.valueOf(LocalDate.of(2023, Month.OCTOBER, 31)), resultSet.getDate("col_date")); + assertEquals(Timestamp.valueOf(LocalDateTime.of(2023, Month.OCTOBER, 31, 16, 40, 25, 123000000)), + resultSet.getTimestamp("col_ts")); + + final Timestamp expectedTimestamp = Timestamp.valueOf( + OffsetDateTime.of(2023, 10, 31, 16, 40, 25, 123000000, UTC) + .atZoneSameInstant(ZoneId.systemDefault()).toLocalDateTime()); + resultSet = statement.executeQuery("SELECT * FROM test_ps_datetimes WHERE col_key = 'key2';"); + assertTrue(resultSet.next()); + assertEquals(Time.valueOf(OffsetTime.of(15, 35, 40, 123000000, UTC).toLocalTime()), + resultSet.getTime("col_time")); + assertEquals(expectedTimestamp, resultSet.getTimestamp("col_ts")); + + resultSet = statement.executeQuery("SELECT * FROM test_ps_datetimes WHERE col_key = 'key3';"); + assertTrue(resultSet.next()); + assertEquals(expectedTimestamp, resultSet.getTimestamp("col_ts")); + + resultSet = statement.executeQuery("SELECT * FROM test_ps_datetimes WHERE col_key = 'key4';"); + assertTrue(resultSet.next()); + assertEquals(expectedTimestamp, resultSet.getTimestamp("col_ts")); + } } diff --git a/src/test/java/com/ing/data/cassandra/jdbc/ResultSetUnitTest.java b/src/test/java/com/ing/data/cassandra/jdbc/ResultSetUnitTest.java index 5abcd4a..2052ea4 100644 --- a/src/test/java/com/ing/data/cassandra/jdbc/ResultSetUnitTest.java +++ b/src/test/java/com/ing/data/cassandra/jdbc/ResultSetUnitTest.java @@ -14,18 +14,24 @@ package com.ing.data.cassandra.jdbc; import com.datastax.oss.driver.api.core.cql.ExecutionInfo; +import org.apache.commons.io.IOUtils; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import java.nio.charset.StandardCharsets; import java.sql.ResultSet; import java.sql.SQLSyntaxErrorException; import java.sql.SQLWarning; import java.sql.Statement; +import java.time.OffsetDateTime; import java.util.Arrays; +import java.util.Calendar; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -39,7 +45,7 @@ class ResultSetUnitTest extends UsingCassandraContainerTest { @BeforeAll static void finalizeSetUpTests() throws Exception { - initConnection(KEYSPACE, "version=3.0.0", "localdatacenter=datacenter1"); + initConnection(KEYSPACE, "localdatacenter=datacenter1"); } @Test @@ -92,4 +98,46 @@ void givenSelectStatementGeneratingWarning_whenGetWarnings_returnExpectedWarning assertEquals("Second warning message", nextWarning.getMessage()); } + @Test + void givenResultSetWithRows_whenGetObjectAsCalendar_returnExpectedValue() throws Exception { + final String cql = "SELECT col_ts FROM tbl_test_timestamps WHERE keyname = 'key1'"; + final Statement statement = sqlConnection.createStatement(); + final ResultSet rs = statement.executeQuery(cql); + assertTrue(rs.next()); + assertEquals(new Calendar.Builder() + .setInstant(OffsetDateTime.parse("2023-11-01T11:30:25.789+01:00").toEpochSecond()) + .build(), rs.getObject("col_ts", Calendar.class)); + } + + @Test + void givenResultSetWithRows_whenGetClob_returnExpectedValue() throws Exception { + final String cql = "SELECT col_blob FROM tbl_test_blobs WHERE keyname = 'key1'"; + final Statement statement = sqlConnection.createStatement(); + final ResultSet rs = statement.executeQuery(cql); + assertTrue(rs.next()); + final byte[] byteArray = IOUtils.toByteArray(rs.getClob("col_blob").getCharacterStream(), + StandardCharsets.UTF_8); + assertArrayEquals("testValueAsClobInUtf8 with accents: Äîéè".getBytes(StandardCharsets.UTF_8), byteArray); + } + + @Test + void givenResultSetWithRows_whenGetAsciiStream_returnExpectedValue() throws Exception { + final String cql = "SELECT col_ascii FROM tbl_test_texts WHERE keyname = 'key1'"; + final Statement statement = sqlConnection.createStatement(); + final ResultSet rs = statement.executeQuery(cql); + assertTrue(rs.next()); + final byte[] byteArray = IOUtils.toByteArray(rs.getAsciiStream("col_ascii")); + assertArrayEquals("testValueAscii".getBytes(StandardCharsets.US_ASCII), byteArray); + } + + @Test + void givenResultSetWithRows_whenGetCharacterStream_returnExpectedValue() throws Exception { + final String cql = "SELECT col_blob FROM tbl_test_blobs WHERE keyname = 'key1'"; + final Statement statement = sqlConnection.createStatement(); + final ResultSet rs = statement.executeQuery(cql); + assertTrue(rs.next()); + final byte[] byteArray = IOUtils.toByteArray(rs.getCharacterStream("col_blob"), StandardCharsets.UTF_8); + assertArrayEquals("testValueAsClobInUtf8 with accents: Äîéè".getBytes(StandardCharsets.UTF_8), byteArray); + } + } diff --git a/src/test/java/com/ing/data/cassandra/jdbc/UsingCassandraContainerTest.java b/src/test/java/com/ing/data/cassandra/jdbc/UsingCassandraContainerTest.java index b3cad8b..20dc31b 100644 --- a/src/test/java/com/ing/data/cassandra/jdbc/UsingCassandraContainerTest.java +++ b/src/test/java/com/ing/data/cassandra/jdbc/UsingCassandraContainerTest.java @@ -28,13 +28,14 @@ abstract class UsingCassandraContainerTest { // For the official Cassandra image, see here: https://hub.docker.com/_/cassandra - static final DockerImageName CASSANDRA_IMAGE = DockerImageName.parse("cassandra:4.1.3"); + static final DockerImageName CASSANDRA_IMAGE = DockerImageName.parse("cassandra:5.0"); static CassandraConnection sqlConnection = null; // Using @Container annotation restarts a new container for each test of the class, so as it takes ~20/30 sec. to // start a Cassandra container, we just want to have one container instance for all the tests of the class. See: // https://www.testcontainers.org/test_framework_integration/manual_lifecycle_control/#singleton-containers + @SuppressWarnings("resource") static final CassandraContainer cassandraContainer = new CassandraContainer<>(CASSANDRA_IMAGE) .withEnv("CASSANDRA_DC", "datacenter1") .withEnv("CASSANDRA_CLUSTER_NAME", "embedded_test_cluster") diff --git a/src/test/java/com/ing/data/cassandra/jdbc/UtilsUnitTest.java b/src/test/java/com/ing/data/cassandra/jdbc/UtilsUnitTest.java index aec67aa..e8cc669 100644 --- a/src/test/java/com/ing/data/cassandra/jdbc/UtilsUnitTest.java +++ b/src/test/java/com/ing/data/cassandra/jdbc/UtilsUnitTest.java @@ -15,120 +15,120 @@ import com.datastax.oss.driver.api.core.config.DefaultDriverOption; import com.datastax.oss.driver.api.core.config.DriverOption; +import com.ing.data.cassandra.jdbc.metadata.BasicVersionedMetadata; +import com.ing.data.cassandra.jdbc.utils.ContactPoint; import org.apache.commons.lang3.StringUtils; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.ValueSource; +import org.semver4j.Semver; import java.sql.SQLException; import java.sql.SQLNonTransientConnectionException; import java.sql.SQLSyntaxErrorException; import java.time.Duration; +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Properties; import java.util.stream.Stream; +import static com.ing.data.cassandra.jdbc.utils.DriverUtil.CASSANDRA_4; +import static com.ing.data.cassandra.jdbc.utils.DriverUtil.CASSANDRA_5; +import static com.ing.data.cassandra.jdbc.utils.DriverUtil.buildMetadataList; +import static com.ing.data.cassandra.jdbc.utils.DriverUtil.existsInDatabaseVersion; import static com.ing.data.cassandra.jdbc.utils.DriverUtil.getDriverProperty; -import static com.ing.data.cassandra.jdbc.utils.DriverUtil.parseVersion; +import static com.ing.data.cassandra.jdbc.utils.DriverUtil.safeParseVersion; import static com.ing.data.cassandra.jdbc.utils.ErrorConstants.BAD_KEYSPACE; import static com.ing.data.cassandra.jdbc.utils.ErrorConstants.HOST_IN_URL; import static com.ing.data.cassandra.jdbc.utils.ErrorConstants.HOST_REQUIRED; +import static com.ing.data.cassandra.jdbc.utils.ErrorConstants.INVALID_CONTACT_POINT; import static com.ing.data.cassandra.jdbc.utils.ErrorConstants.SECURECONENCTBUNDLE_REQUIRED; import static com.ing.data.cassandra.jdbc.utils.ErrorConstants.URI_IS_SIMPLE; -import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.DEFAULT_PORT; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.PROTOCOL; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_CLOUD_SECURE_CONNECT_BUNDLE; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_CONNECTION_RETRIES; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_CONSISTENCY_LEVEL; -import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_CQL_VERSION; +import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_CONTACT_POINTS; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_DATABASE_NAME; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_DEBUG; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_LOAD_BALANCING_POLICY; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_LOCAL_DATACENTER; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_PASSWORD; -import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_PORT_NUMBER; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_RECONNECT_POLICY; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_REQUEST_TIMEOUT; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_RETRY_POLICY; -import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_SERVER_NAME; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.TAG_USER; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.createSubName; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.parseReconnectionPolicy; import static com.ing.data.cassandra.jdbc.utils.JdbcUrlUtil.parseURL; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; class UtilsUnitTest { static Stream buildUrlParsingTestCases() { return Stream.of( Arguments.of("jdbc:cassandra://localhost:9042/astra?secureconnectbundle=/path/to/location/filename.extn&user=user1&password=password1", - new HashMap() {{ - put(TAG_SERVER_NAME, "localhost"); - put(TAG_PORT_NUMBER, "9042"); + new HashMap() {{ + put(TAG_CONTACT_POINTS, Collections.singletonList(ContactPoint.of("localhost", 9042))); put(TAG_DATABASE_NAME, "astra"); put(TAG_CLOUD_SECURE_CONNECT_BUNDLE, "/path/to/location/filename.extn"); put(TAG_USER, "user1"); put(TAG_PASSWORD, "password1"); }}), Arguments.of("jdbc:cassandra:dbaas:///astra?secureconnectbundle=/path/to/location/filename.extn&user=user1&password=password1", - new HashMap() {{ - put(TAG_SERVER_NAME, null); - put(TAG_PORT_NUMBER, String.valueOf(DEFAULT_PORT)); + new HashMap() {{ + put(TAG_CONTACT_POINTS, null); put(TAG_DATABASE_NAME, "astra"); put(TAG_CLOUD_SECURE_CONNECT_BUNDLE, "/path/to/location/filename.extn"); put(TAG_USER, "user1"); put(TAG_PASSWORD, "password1"); }}), - Arguments.of("jdbc:cassandra://localhost:9042/Keyspace1?version=3.0.0&consistency=QUORUM", - new HashMap() {{ - put(TAG_SERVER_NAME, "localhost"); - put(TAG_PORT_NUMBER, "9042"); + Arguments.of("jdbc:cassandra://localhost:9042/Keyspace1?consistency=QUORUM", + new HashMap() {{ + put(TAG_CONTACT_POINTS, Collections.singletonList(ContactPoint.of("localhost", 9042))); put(TAG_DATABASE_NAME, "Keyspace1"); - put(TAG_CQL_VERSION, "3.0.0"); put(TAG_CONSISTENCY_LEVEL, "QUORUM"); }}), Arguments.of("jdbc:cassandra://localhost/Keyspace1?consistency=QUORUM", - new HashMap() {{ - put(TAG_SERVER_NAME, "localhost"); - put(TAG_PORT_NUMBER, "9042"); + new HashMap() {{ + put(TAG_CONTACT_POINTS, Collections.singletonList(ContactPoint.of("localhost", 9042))); put(TAG_DATABASE_NAME, "Keyspace1"); - put(TAG_CQL_VERSION, null); put(TAG_CONSISTENCY_LEVEL, "QUORUM"); }}), - Arguments.of("jdbc:cassandra://localhost/Keyspace1?version=2.0.0", - new HashMap() {{ - put(TAG_SERVER_NAME, "localhost"); - put(TAG_PORT_NUMBER, "9042"); + Arguments.of("jdbc:cassandra://localhost/Keyspace1", + new HashMap() {{ + put(TAG_CONTACT_POINTS, Collections.singletonList(ContactPoint.of("localhost", 9042))); put(TAG_DATABASE_NAME, "Keyspace1"); - put(TAG_CQL_VERSION, "2.0.0"); put(TAG_CONSISTENCY_LEVEL, null); }}), Arguments.of("jdbc:cassandra://localhost", - new HashMap() {{ - put(TAG_SERVER_NAME, "localhost"); - put(TAG_PORT_NUMBER, "9042"); + new HashMap() {{ + put(TAG_CONTACT_POINTS, Collections.singletonList(ContactPoint.of("localhost", 9042))); put(TAG_DATABASE_NAME, null); - put(TAG_CQL_VERSION, null); put(TAG_CONSISTENCY_LEVEL, null); }}), Arguments.of("jdbc:cassandra://localhost/Keyspace1?localdatacenter=DC1", - new HashMap() {{ - put(TAG_SERVER_NAME, "localhost"); - put(TAG_PORT_NUMBER, "9042"); + new HashMap() {{ + put(TAG_CONTACT_POINTS, Collections.singletonList(ContactPoint.of("localhost", 9042))); put(TAG_DATABASE_NAME, "Keyspace1"); put(TAG_LOCAL_DATACENTER, "DC1"); }}), - Arguments.of("jdbc:cassandra://localhost/Keyspace1?localdatacenter=DC1&debug=true" + Arguments.of("jdbc:cassandra://127.0.0.1/Keyspace1?localdatacenter=DC1&debug=true" + "&retries=5&requesttimeout=3000&loadbalancing=com.company.package.CustomLBPolicy" + "&retry=com.company.package.CustomRetryPolicy&reconnection=ConstantReconnectionPolicy()", - new HashMap() {{ - put(TAG_SERVER_NAME, "localhost"); - put(TAG_PORT_NUMBER, "9042"); + new HashMap() {{ + put(TAG_CONTACT_POINTS, Collections.singletonList(ContactPoint.of("127.0.0.1", 9042))); put(TAG_DATABASE_NAME, "Keyspace1"); put(TAG_LOCAL_DATACENTER, "DC1"); put(TAG_DEBUG, "true"); @@ -137,17 +137,46 @@ static Stream buildUrlParsingTestCases() { put(TAG_RETRY_POLICY, "com.company.package.CustomRetryPolicy"); put(TAG_RECONNECT_POLICY, "ConstantReconnectionPolicy()"); put(TAG_REQUEST_TIMEOUT, "3000"); + }}), + Arguments.of("jdbc:cassandra://host1--host2", + new HashMap() {{ + put(TAG_CONTACT_POINTS, Arrays.asList(ContactPoint.of("host1", 9042), + ContactPoint.of("host2", 9042))); + }}), + Arguments.of("jdbc:cassandra://host1--host2:9043", + new HashMap() {{ + put(TAG_CONTACT_POINTS, Arrays.asList(ContactPoint.of("host1", 9043), + ContactPoint.of("host2", 9043))); + }}), + Arguments.of("jdbc:cassandra://host1:9042--host2:9043", + new HashMap() {{ + put(TAG_CONTACT_POINTS, Arrays.asList(ContactPoint.of("host1", 9042), + ContactPoint.of("host2", 9043))); + }}), + Arguments.of("jdbc:cassandra://host1:9042--host2--host3:9043", + new HashMap() {{ + put(TAG_CONTACT_POINTS, Arrays.asList(ContactPoint.of("host1", 9042), + ContactPoint.of("host2", 9043), ContactPoint.of("host3", 9043))); }}) ); } + @SuppressWarnings("unchecked") @ParameterizedTest @MethodSource("buildUrlParsingTestCases") void givenJdbcUrl_whenParseUrl_returnExpectedProperties(final String jdbcUrl, - final Map expectedProperties) + final Map expectedProperties) throws SQLException { final Properties result = parseURL(jdbcUrl); - expectedProperties.forEach((key, value) -> assertEquals(value, result.getProperty(key))); + expectedProperties.forEach((key, value) -> { + if (TAG_CONTACT_POINTS.equals(key) && value instanceof List) { + final List expectedContactPoints = (List) value; + assertThat((List) result.get(key), + containsInAnyOrder(expectedContactPoints.toArray(new ContactPoint[0]))); + } else { + assertEquals(value, result.getProperty(key)); + } + }); } static Stream buildReconnectionPolicyParsingTestCases() { @@ -194,12 +223,21 @@ void givenReconnectionPolicyString_whenParsePolicy_returnExpectedOptions( @Test void testCreateSubName() throws Exception { - final String jdbcUrl = "jdbc:cassandra://localhost:9042/Keyspace1?consistency=QUORUM&version=3.0.0"; + final String jdbcUrl = "jdbc:cassandra://localhost:9042/Keyspace1?consistency=QUORUM"; final Properties props = parseURL(jdbcUrl); final String result = createSubName(props); assertEquals(jdbcUrl, PROTOCOL + result); } + @Test + void testCreateSubNameWithMultipleContactPoints() throws Exception { + final String jdbcUrl = "jdbc:cassandra://host1:9042--host2--host3:9043/Keyspace1?consistency=QUORUM"; + final Properties props = parseURL(jdbcUrl); + final String result = createSubName(props); + assertEquals("jdbc:cassandra://host1:9042--host2:9043--host3:9043/Keyspace1?consistency=QUORUM", + PROTOCOL + result); + } + @Test void testCreateSubNameWithoutParams() throws Exception { final String jdbcUrl = "jdbc:cassandra://localhost:9042/Keyspace1"; @@ -220,6 +258,13 @@ void testNullHost() { assertEquals(HOST_IN_URL, exception.getMessage()); } + @Test + void testInvalidPort() { + final SQLNonTransientConnectionException exception = assertThrows(SQLNonTransientConnectionException.class, + () -> parseURL("jdbc:cassandra://localhost:badPort")); + assertEquals(String.format(INVALID_CONTACT_POINT, "localhost:badPort"), exception.getMessage()); + } + @Test void testInvalidKeyspaceName() { final String invalidKeyspaceName = "bad-keyspace"; @@ -236,23 +281,15 @@ void testNotNullUserInfo() { } @Test - void testCreateSubNameWithoutHost() throws Exception { + void testCreateSubNameWithoutContactPoints() throws Exception { final String jdbcUrl = "jdbc:cassandra://localhost:9042/Keyspace1"; final Properties props = parseURL(jdbcUrl); - props.remove(TAG_SERVER_NAME); + props.remove(TAG_CONTACT_POINTS); final SQLNonTransientConnectionException exception = assertThrows(SQLNonTransientConnectionException.class, () -> createSubName(props)); assertEquals(HOST_REQUIRED, exception.getMessage()); } - @Test - void testCreateSubNameWithInvalidPortNumber() throws Exception { - final String jdbcUrl = "jdbc:cassandra://localhost/Keyspace1"; - final Properties props = parseURL(jdbcUrl); - props.put(TAG_PORT_NUMBER, "-9042"); - assertThrows(SQLNonTransientConnectionException.class, () -> createSubName(props)); - } - @ParameterizedTest @ValueSource(strings = {"jdbc:cassandra:dbaas:///astra", "jdbc:cassandra:dbaas:///astra?user=User1"}) void testMissingSecureConnectBundleOnDbaasConenctionString(final String jdbcUrl) { @@ -268,13 +305,29 @@ void testGetDriverProperty() { } @Test - void testParseVersion() { - assertEquals(0, parseVersion(StringUtils.EMPTY, 0)); - assertEquals(0, parseVersion("1.0.0", 3)); - assertEquals(0, parseVersion("1.0.0", -1)); - assertEquals(1, parseVersion("1.2.3", 0)); - assertEquals(2, parseVersion("1.2.3", 1)); - assertEquals(3, parseVersion("1.2.3", 2)); - assertEquals(0, parseVersion("1.a", 1)); + void testSafeParseVersion() { + assertEquals(Semver.ZERO, safeParseVersion(StringUtils.EMPTY)); + assertEquals(Semver.ZERO, safeParseVersion("alpha")); + assertEquals(Semver.parse("1.0.0"), safeParseVersion("1")); + assertEquals(Semver.parse("1.0.0"), safeParseVersion("1.0")); + assertEquals(Semver.parse("1.2.3"), safeParseVersion("1.2.3")); + } + + @Test + void testExistsInDatabaseVersion() { + assertTrue(existsInDatabaseVersion(CASSANDRA_4, new BasicVersionedMetadata("TEST"))); + assertTrue(existsInDatabaseVersion(CASSANDRA_5, new BasicVersionedMetadata("TEST", CASSANDRA_4))); + assertFalse(existsInDatabaseVersion(CASSANDRA_5, new BasicVersionedMetadata("TEST", null, CASSANDRA_5))); + assertFalse(existsInDatabaseVersion(CASSANDRA_4, new BasicVersionedMetadata("TEST", CASSANDRA_5))); + } + + @Test + void testBuildMetadataList() { + assertEquals("a,b,d", buildMetadataList(Arrays.asList( + new BasicVersionedMetadata("a"), + new BasicVersionedMetadata("d", CASSANDRA_4), + new BasicVersionedMetadata("b", null, CASSANDRA_5), + new BasicVersionedMetadata("c", CASSANDRA_5) + ), CASSANDRA_4)); } } diff --git a/src/test/java/com/ing/data/cassandra/jdbc/VectorsDseContainerTest.java b/src/test/java/com/ing/data/cassandra/jdbc/VectorsDseContainerTest.java index 6bc816c..fe2f607 100644 --- a/src/test/java/com/ing/data/cassandra/jdbc/VectorsDseContainerTest.java +++ b/src/test/java/com/ing/data/cassandra/jdbc/VectorsDseContainerTest.java @@ -27,7 +27,7 @@ class VectorsDseContainerTest extends UsingDseContainerTest { @BeforeAll static void finalizeSetUpTests() throws Exception { initializeContainer(); - initConnection(KEYSPACE, "version=3.0.0", "localdatacenter=datacenter1"); + initConnection(KEYSPACE, "localdatacenter=datacenter1"); } @Test diff --git a/src/test/java/com/ing/data/cassandra/jdbc/VectorsUnitTest.java b/src/test/java/com/ing/data/cassandra/jdbc/VectorsUnitTest.java index 2e76e29..df87675 100644 --- a/src/test/java/com/ing/data/cassandra/jdbc/VectorsUnitTest.java +++ b/src/test/java/com/ing/data/cassandra/jdbc/VectorsUnitTest.java @@ -14,8 +14,8 @@ package com.ing.data.cassandra.jdbc; import com.datastax.oss.driver.api.core.data.CqlVector; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; import java.sql.ResultSet; @@ -29,15 +29,13 @@ /** * Test CQL Vector data type */ -// FIXME: Implement vector testing when Cassandra 5.0 is available. -@Disabled class VectorsUnitTest extends UsingCassandraContainerTest { private static final String KEYSPACE = "test_keyspace_vect"; @BeforeAll static void finalizeSetUpTests() throws Exception { - initConnection(KEYSPACE, "version=3.0.0", "localdatacenter=datacenter1"); + initConnection(KEYSPACE, "localdatacenter=datacenter1"); } @Test @@ -61,12 +59,25 @@ void givenVectorInsertStatement_whenExecute_insertExpectedValues() throws Except assertEquals(8, intsVector.get(2)); final CqlVector floatsVector = ((CassandraResultSet) resultSet).getVector(2); assertEquals(4, floatsVector.size()); - assertEquals(2.1, floatsVector.get(0)); - assertEquals(3.7, floatsVector.get(1)); - assertEquals(9.0, floatsVector.get(2)); - assertEquals(5.5, floatsVector.get(2)); + assertEquals(2.1f, floatsVector.get(0)); + assertEquals(3.7f, floatsVector.get(1)); + assertEquals(9.0f, floatsVector.get(2)); + assertEquals(5.5f, floatsVector.get(3)); statement.close(); } + @Test + void givenVectorTable_whenSimilaritySearch_shouldReturnResults() throws Exception { + final CassandraPreparedStatement prepStatement = sqlConnection.prepareStatement( + "SELECT product_id, product_vector," + + "similarity_dot_product(product_vector,[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]) as similarity " + + "FROM pet_supply_vectors ORDER BY product_vector ANN OF [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0] " + + "LIMIT 2"); + java.sql.ResultSet rs = prepStatement.executeQuery(); + Assertions.assertTrue(rs.next()); + Assertions.assertNotNull(rs.getObject("product_vector")); + Assertions.assertEquals(3.0d, rs.getDouble("similarity")); + } + } diff --git a/src/test/resources/initEmbeddedCassandra.cql b/src/test/resources/initEmbeddedCassandra.cql index 0a8d1a6..ed96d90 100644 --- a/src/test/resources/initEmbeddedCassandra.cql +++ b/src/test/resources/initEmbeddedCassandra.cql @@ -27,6 +27,24 @@ t3iValue int, PRIMARY KEY(keyname, t3iValue)) WITH comment = 'Third table in the keyspace'; +CREATE TABLE tbl_test_timestamps ( +keyname text PRIMARY KEY, +col_ts timestamp); + +INSERT INTO tbl_test_timestamps (keyname, col_ts) VALUES('key1', '2023-11-01T11:30:25.789+0100'); + +CREATE TABLE tbl_test_blobs ( +keyname text PRIMARY KEY, +col_blob blob); + +INSERT INTO tbl_test_blobs (keyname, col_blob) VALUES('key1', textAsBlob('testValueAsClobInUtf8 with accents: Äîéè')); + +CREATE TABLE tbl_test_texts ( +keyname text PRIMARY KEY, +col_ascii ascii); + +INSERT INTO tbl_test_texts (keyname, col_ascii) VALUES('key1', 'testValueAscii'); + CREATE TYPE CustomType1 ( key1 int, value1 text, @@ -60,6 +78,9 @@ t2bValue boolean, t2iValue int) WITH comment = 'Second table in the keyspace'; +CREATE INDEX IF NOT EXISTS int_values_idx +ON cf_test2 (t2iValue); + CREATE TYPE type_in_different_ks ( t_key int, t_value text, @@ -156,6 +177,20 @@ col_inet inet, col_duration duration, col_uuid uuid); +CREATE TABLE test_ps_blobs ( +col_key text PRIMARY KEY, +col_blob blob); + +CREATE TABLE test_ps_texts ( +col_key text PRIMARY KEY, +col_text text); + +CREATE TABLE test_ps_datetimes ( +col_key text PRIMARY KEY, +col_time time, +col_date date, +col_ts timestamp); + /* Init keyspaces, UDTs and tables for JsonSupportUnitTest */ DROP KEYSPACE IF EXISTS test_json_support; CREATE KEYSPACE "test_json_support" WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}; @@ -222,10 +257,37 @@ varintValue: 4321 DROP KEYSPACE IF EXISTS test_keyspace_vect; CREATE KEYSPACE "test_keyspace_vect" WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}; -/* FIXME: Uncomment this script part when Cassandra 5.0 is available. USE test_keyspace_vect; CREATE TABLE vectors_test ( keyValue int PRIMARY KEY, intsVector vector, floatsVector vector); -*/ + +CREATE TABLE IF NOT EXISTS pet_supply_vectors ( + product_id TEXT PRIMARY KEY, + product_name TEXT, + product_vector vector +); + +/* Ni similarity search without the SAI INDEX. */ +CREATE CUSTOM INDEX IF NOT EXISTS idx_vector +ON pet_supply_vectors(product_vector) +USING 'StorageAttachedIndex'; + +INSERT INTO pet_supply_vectors (product_id, product_name, product_vector) +VALUES ('pf1843','HealthyFresh - Chicken raw dog food',[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]); + +INSERT INTO pet_supply_vectors (product_id, product_name, product_vector) +VALUES ('pf1844','HealthyFresh - Beef raw dog food',[1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]); + +INSERT INTO pet_supply_vectors (product_id, product_name, product_vector) +VALUES ('pt0021','Dog Tennis Ball Toy',[0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0]); + +INSERT INTO pet_supply_vectors (product_id, product_name, product_vector) +VALUES ('pt0041','Dog Ring Chew Toy',[0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0]); + +INSERT INTO pet_supply_vectors (product_id, product_name, product_vector) +VALUES ('pf7043','PupperSausage Bacon dog Treats',[0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1]); + +INSERT INTO pet_supply_vectors (product_id, product_name, product_vector) +VALUES ('pf7044','PupperSausage Beef dog Treats',[0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0]);