From 580cdc1c7028de84db712e5ba006d350e81ff689 Mon Sep 17 00:00:00 2001 From: Leo Cai Date: Sun, 5 Nov 2023 15:09:01 +0800 Subject: [PATCH 1/5] add type tests and fix all in both windows and linux platform --- .clang-format | 4 +- .gitignore | 5 + CMakeSettings.json | 39 ++++ driver/api/odbc.cpp | 100 ++++----- driver/escaping/escape_sequences.cpp | 48 ++-- driver/escaping/function_declare.h | 34 +-- driver/statement.cpp | 6 +- driver/test/column_bindings_it.cpp | 24 +- driver/test/common_utils.h | 2 +- driver/test/datetime_it.cpp | 51 ++--- driver/test/escape_sequences_ut.cpp | 152 ++++++------- driver/test/misc_it.cpp | 14 +- driver/test/nano_it.cpp | 59 ++--- driver/test/performance_it.cpp | 16 +- .../test/statement_parameter_bindings_it.cpp | 46 ++-- driver/test/statement_parameters_it.cpp | 10 +- driver/utils/type_info.cpp | 212 +++++++++--------- driver/utils/type_parser.cpp | 10 +- packaging/RegConfig.patch.wxs | 4 +- test/CMakeLists.txt | 3 +- test/mssql.linked.server.sql | 38 +++- test/parameterized/parameterized/datatypes.py | 110 ++++----- .../parameterized/parameterized/funcvalues.py | 12 +- test/parameterized/parameterized/sanity.py | 34 +-- test/test.sh | 197 ++++++++-------- tests-pyodbc/__init__.py | 0 tests-pyodbc/config.yaml | 6 + tests-pyodbc/requirements.txt | 3 + tests-pyodbc/test_suites/conftest.py | 10 + tests-pyodbc/test_suites/test_types.py | 25 +++ tests-pyodbc/test_suites/type_testsuites.py | 68 ++++++ tests-pyodbc/test_suites/utils.py | 6 + 32 files changed, 775 insertions(+), 573 deletions(-) create mode 100644 tests-pyodbc/__init__.py create mode 100644 tests-pyodbc/config.yaml create mode 100644 tests-pyodbc/requirements.txt create mode 100644 tests-pyodbc/test_suites/conftest.py create mode 100644 tests-pyodbc/test_suites/test_types.py create mode 100644 tests-pyodbc/test_suites/type_testsuites.py create mode 100644 tests-pyodbc/test_suites/utils.py diff --git a/.clang-format b/.clang-format index c0e4e7b..f67cb3d 100644 --- a/.clang-format +++ b/.clang-format @@ -21,11 +21,11 @@ BreakConstructorInitializersBeforeComma: false Cpp11BracedListStyle: true ColumnLimit: 140 ConstructorInitializerAllOnOneLineOrOnePerLine: true -ConstructorInitializerIndentWidth: 60 +#ConstructorInitializerIndentWidth: 60 ExperimentalAutoDetectBinPacking: true UseTab: Never TabWidth: 4 -IndentWidth: 4 +#IndentWidth: 4 Standard: Cpp11 PointerAlignment: Middle MaxEmptyLinesToKeep: 2 diff --git a/.gitignore b/.gitignore index ea8cc43..35a53e2 100644 --- a/.gitignore +++ b/.gitignore @@ -3,9 +3,14 @@ /build-* .vs/ +**/.vs/ CMakeFiles/ CMakeCache.txt __pycache__/ *.py[cod] +.idea/ +**/.idea/ +.vscode/ +**/.vscode/ diff --git a/CMakeSettings.json b/CMakeSettings.json index fbf8000..746e53f 100644 --- a/CMakeSettings.json +++ b/CMakeSettings.json @@ -1,5 +1,44 @@ { "configurations": [ + { + "name": "vs2022-x64-Debug", + "generator": "Visual Studio 17 2022 Win64", + "configurationType": "Debug", + "buildRoot": "${projectDir}\\build-win-${name}", + "installRoot": "${projectDir}\\build-win-${name}\\prefix", + "cmakeCommandArgs": "", + "buildCommandArgs": "-v:d", + "ctestCommandArgs": "", + "inheritEnvironments": [ + "msvc_x64_x64" + ] + }, + { + "name": "vs2022-x64-Release", + "generator": "Visual Studio 17 2022 Win64", + "configurationType": "Release", + "buildRoot": "${projectDir}\\build-win-${name}", + "installRoot": "${projectDir}\\build-win-${name}\\prefix", + "cmakeCommandArgs": "", + "buildCommandArgs": "-v:d", + "ctestCommandArgs": "", + "inheritEnvironments": [ + "msvc_x64_x64" + ] + }, + { + "name": "vs2022-x64-RelWithDebInfo", + "generator": "Visual Studio 17 2022 Win64", + "configurationType": "RelWithDebInfo", + "buildRoot": "${projectDir}\\build-win-${name}", + "installRoot": "${projectDir}\\build-win-${name}\\prefix", + "cmakeCommandArgs": "", + "buildCommandArgs": "-v:d", + "ctestCommandArgs": "", + "inheritEnvironments": [ + "msvc_x64_x64" + ] + }, { "name": "vs2019-x64-Debug", "generator": "Visual Studio 16 2019 Win64", diff --git a/driver/api/odbc.cpp b/driver/api/odbc.cpp index 4b6d9c0..b863708 100755 --- a/driver/api/odbc.cpp +++ b/driver/api/odbc.cpp @@ -858,37 +858,37 @@ SQLRETURN SQL_API EXPORTED_FUNCTION_MAYBE_W(SQLTables)( // Get a list of all databases. if (catalog == SQL_ALL_CATALOGS && schema.empty() && table.empty()) { - query << " CAST(name, 'Nullable(String)') AS TABLE_CAT,"; - query << " CAST(NULL, 'Nullable(String)') AS TABLE_SCHEM,"; - query << " CAST(NULL, 'Nullable(String)') AS TABLE_NAME,"; - query << " CAST(NULL, 'Nullable(String)') AS TABLE_TYPE,"; - query << " CAST(NULL, 'Nullable(String)') AS REMARKS"; + query << " cast(name, 'nullable(string)') AS TABLE_CAT,"; + query << " cast(NULL, 'nullable(string)') AS TABLE_SCHEM,"; + query << " cast(NULL, 'nullable(string)') AS TABLE_NAME,"; + query << " cast(NULL, 'nullable(string)') AS TABLE_TYPE,"; + query << " cast(NULL, 'nullable(string)') AS REMARKS"; query << " FROM system.databases"; } // Get a list of all schemas (currently, just an empty list). else if (catalog.empty() && schema == SQL_ALL_SCHEMAS && table.empty()) { - query << " CAST(NULL, 'Nullable(String)') AS TABLE_CAT,"; - query << " CAST(NULL, 'Nullable(String)') AS TABLE_SCHEM,"; - query << " CAST(NULL, 'Nullable(String)') AS TABLE_NAME,"; - query << " CAST(NULL, 'Nullable(String)') AS TABLE_TYPE,"; - query << " CAST(NULL, 'Nullable(String)') AS REMARKS"; + query << " cast(NULL, 'nullable(string)') AS TABLE_CAT,"; + query << " cast(NULL, 'nullable(string)') AS TABLE_SCHEM,"; + query << " cast(NULL, 'nullable(string)') AS TABLE_NAME,"; + query << " cast(NULL, 'nullable(string)') AS TABLE_TYPE,"; + query << " cast(NULL, 'nullable(string)') AS REMARKS"; query << " WHERE (1 == 0)"; } // Get a list of all valid table types (currently, 'TABLE' only.) else if (catalog.empty() && schema.empty() && table.empty() && table_type_list == SQL_ALL_TABLE_TYPES) { - query << " CAST(NULL, 'Nullable(String)') AS TABLE_CAT,"; - query << " CAST(NULL, 'Nullable(String)') AS TABLE_SCHEM,"; - query << " CAST(NULL, 'Nullable(String)') AS TABLE_NAME,"; - query << " CAST('TABLE', 'Nullable(String)') AS TABLE_TYPE,"; - query << " CAST(NULL, 'Nullable(String)') AS REMARKS"; + query << " cast(NULL, 'nullable(string)') AS TABLE_CAT,"; + query << " cast(NULL, 'nullable(string)') AS TABLE_SCHEM,"; + query << " cast(NULL, 'nullable(string)') AS TABLE_NAME,"; + query << " cast('TABLE', 'nullable(string)') AS TABLE_TYPE,"; + query << " cast(NULL, 'nullable(string)') AS REMARKS"; } // Get a list of tables matching all criteria. else { - query << " CAST(database, 'Nullable(String)') AS TABLE_CAT,"; - query << " CAST(NULL, 'Nullable(String)') AS TABLE_SCHEM,"; - query << " CAST(name, 'Nullable(String)') AS TABLE_NAME,"; - query << " CAST('TABLE', 'Nullable(String)') AS TABLE_TYPE,"; - query << " CAST(NULL, 'Nullable(String)') AS REMARKS"; + query << " cast(database, 'nullable(string)') AS TABLE_CAT,"; + query << " cast(NULL, 'nullable(string)') AS TABLE_SCHEM,"; + query << " cast(name, 'nullable(string)') AS TABLE_NAME,"; + query << " cast('TABLE', 'nullable(string)') AS TABLE_TYPE,"; + query << " cast(NULL, 'nullable(string)') AS REMARKS"; query << " FROM system.tables"; query << " WHERE (1 == 1)"; @@ -905,28 +905,28 @@ SQLRETURN SQL_API EXPORTED_FUNCTION_MAYBE_W(SQLTables)( // Note, that 'catalog' variable will be set to "%" above (or to the connected database name), even if CatalogName == nullptr. if (is_pattern && !is_odbc_v2) { if (!isMatchAnythingCatalogFnPatternArg(catalog)) - query << " AND isNotNull(TABLE_CAT) AND coalesce(TABLE_CAT, '') LIKE '" << escapeForSQL(catalog) << "'"; + query << " AND is_not_null(TABLE_CAT) AND coalesce(TABLE_CAT, '') LIKE '" << escapeForSQL(catalog) << "'"; } else if (CatalogName) { - query << " AND isNotNull(TABLE_CAT) AND coalesce(TABLE_CAT, '') == '" << escapeForSQL(catalog) << "'"; + query << " AND is_not_null(TABLE_CAT) AND coalesce(TABLE_CAT, '') == '" << escapeForSQL(catalog) << "'"; } // Note, that 'schema' variable will be set to "%" above, even if SchemaName == nullptr. if (is_pattern) { if (!isMatchAnythingCatalogFnPatternArg(schema)) - query << " AND isNotNull(TABLE_SCHEM) AND coalesce(TABLE_SCHEM, '') LIKE '" << escapeForSQL(schema) << "'"; + query << " AND is_not_null(TABLE_SCHEM) AND coalesce(TABLE_SCHEM, '') LIKE '" << escapeForSQL(schema) << "'"; } else if (SchemaName) { - query << " AND isNotNull(TABLE_SCHEM) AND coalesce(TABLE_SCHEM, '') == '" << escapeForSQL(schema) << "'"; + query << " AND is_not_null(TABLE_SCHEM) AND coalesce(TABLE_SCHEM, '') == '" << escapeForSQL(schema) << "'"; } // Note, that 'table' variable will be set to "%" above, even if TableName == nullptr. if (is_pattern) { if (!isMatchAnythingCatalogFnPatternArg(table)) - query << " AND isNotNull(TABLE_NAME) AND coalesce(TABLE_NAME, '') LIKE '" << escapeForSQL(table) << "'"; + query << " AND is_not_null(TABLE_NAME) AND coalesce(TABLE_NAME, '') LIKE '" << escapeForSQL(table) << "'"; } else if (TableName) { - query << " AND isNotNull(TABLE_NAME) AND coalesce(TABLE_NAME, '') == '" << escapeForSQL(table) << "'"; + query << " AND is_not_null(TABLE_NAME) AND coalesce(TABLE_NAME, '') == '" << escapeForSQL(table) << "'"; } // Table type list is not affected by the value of SQL_ATTR_METADATA_ID, so we always treat it as a list of patterns. @@ -936,7 +936,7 @@ SQLRETURN SQL_API EXPORTED_FUNCTION_MAYBE_W(SQLTables)( has_match_anything = has_match_anything || isMatchAnythingCatalogFnPatternArg(table_type); } if (!has_match_anything) { - query << " AND isNotNull(TABLE_TYPE) AND (1 == 0"; + query << " AND is_not_null(TABLE_TYPE) AND (1 == 0"; for (const auto & table_type : table_types) { query << " OR coalesce(TABLE_TYPE, '') LIKE '" << escapeForSQL(table_type) << "'"; } @@ -989,12 +989,12 @@ SQLRETURN SQL_API EXPORTED_FUNCTION_MAYBE_W(SQLColumns)( if (convertUnparametrizedTypeNameToTypeId(tmp_column_info.type_without_parameters) == DataSourceTypeId::Unknown) { // Interpret all unknown types as String. - tmp_column_info.type_without_parameters = "String"; + tmp_column_info.type_without_parameters = "string"; } } else { // Interpret all unparsable types as String. - tmp_column_info.type_without_parameters = "String"; + tmp_column_info.type_without_parameters = "string"; } tmp_column_info.updateTypeInfo(); @@ -1060,19 +1060,19 @@ SQLRETURN SQL_API EXPORTED_FUNCTION_MAYBE_W(SQLColumns)( // Note, that 'catalog' variable will be set to "%" above (or to the connected database name), even if CatalogName == nullptr. if (is_pattern) { if (!isMatchAnythingCatalogFnPatternArg(catalog)) - query << " AND isNotNull(TABLE_CAT) AND coalesce(TABLE_CAT, '') LIKE '" << escapeForSQL(catalog) << "'"; + query << " AND is_not_null(TABLE_CAT) AND coalesce(TABLE_CAT, '') LIKE '" << escapeForSQL(catalog) << "'"; } else if (CatalogName) { - query << " AND isNotNull(TABLE_CAT) AND coalesce(TABLE_CAT, '') == '" << escapeForSQL(catalog) << "'"; + query << " AND is_not_null(TABLE_CAT) AND coalesce(TABLE_CAT, '') == '" << escapeForSQL(catalog) << "'"; } // Note, that 'schema' variable will be set to "%" above, even if SchemaName == nullptr. if (is_pattern) { if (!isMatchAnythingCatalogFnPatternArg(schema)) - query << " AND isNotNull(TABLE_SCHEM) AND coalesce(TABLE_SCHEM, '') LIKE '" << escapeForSQL(schema) << "'"; + query << " AND is_not_null(TABLE_SCHEM) AND coalesce(TABLE_SCHEM, '') LIKE '" << escapeForSQL(schema) << "'"; } else if (SchemaName) { - query << " AND isNotNull(TABLE_SCHEM) AND coalesce(TABLE_SCHEM, '') == '" << escapeForSQL(schema) << "'"; + query << " AND is_not_null(TABLE_SCHEM) AND coalesce(TABLE_SCHEM, '') == '" << escapeForSQL(schema) << "'"; } // Note, that 'table' variable will be set to "%" above, even if TableName == nullptr. @@ -1126,40 +1126,40 @@ SQLRETURN SQL_API EXPORTED_FUNCTION_MAYBE_W(SQLGetTypeInfo)( " '" << info.sql_type_name << "' AS TYPE_NAME" - ", toInt16(" + ", to_int16(" << info.sql_type << ") AS DATA_TYPE" - ", toInt32(" + ", to_int32(" << info.column_size << ") AS COLUMN_SIZE" ", '' AS LITERAL_PREFIX" ", '' AS LITERAL_SUFFIX" ", '' AS CREATE_PARAMS" /// TODO - ", toInt16(" + ", to_int16(" << SQL_NO_NULLS << ") AS NULLABLE" - ", toInt16(" + ", to_int16(" << SQL_TRUE << ") AS CASE_SENSITIVE" - ", toInt16(" + ", to_int16(" << SQL_SEARCHABLE << ") AS SEARCHABLE" - ", toInt16(" + ", to_int16(" << info.is_unsigned << ") AS UNSIGNED_ATTRIBUTE" - ", toInt16(" + ", to_int16(" << SQL_FALSE << ") AS FIXED_PREC_SCALE" - ", toInt16(" + ", to_int16(" << SQL_FALSE << ") AS AUTO_UNIQUE_VALUE" ", TYPE_NAME AS LOCAL_TYPE_NAME" - ", toInt16(0) AS MINIMUM_SCALE" - ", toInt16(0) AS MAXIMUM_SCALE" + ", to_int16(0) AS MINIMUM_SCALE" + ", to_int16(0) AS MAXIMUM_SCALE" ", DATA_TYPE AS SQL_DATA_TYPE" - ", toInt16(0) AS SQL_DATETIME_SUB" - ", toInt32(10) AS NUM_PREC_RADIX" /// TODO - ", toInt16(0) AS INTERVAL_PRECISION"; + ", to_int16(0) AS SQL_DATETIME_SUB" + ", to_int32(10) AS NUM_PREC_RADIX" /// TODO + ", to_int16(0) AS INTERVAL_PRECISION"; }; for (const auto & name_info : types_g) { @@ -1172,15 +1172,15 @@ SQLRETURN SQL_API EXPORTED_FUNCTION_MAYBE_W(SQLGetTypeInfo)( // are SQL_TYPE_DATE, SQL_TYPE_TIME, and SQL_TYPE_TIMESTAMP, respectively; // in ODBC 2.x, the data types are SQL_DATE, SQL_TIME, and SQL_TIMESTAMP. { - auto info = statement.getTypeInfo("Date", "Date"); + auto info = statement.getTypeInfo("date", "date"); info.sql_type = SQL_DATE; - add_query_for_type("Date", info); + add_query_for_type("date", info); } { - auto info = statement.getTypeInfo("DateTime", "DateTime"); + auto info = statement.getTypeInfo("datetime", "datetime"); info.sql_type = SQL_TIMESTAMP; - add_query_for_type("DateTime", info); + add_query_for_type("datetime", info); } query << ") ORDER BY DATA_TYPE"; diff --git a/driver/escaping/escape_sequences.cpp b/driver/escaping/escape_sequences.cpp index 1bc19d7..5c0430e 100644 --- a/driver/escaping/escape_sequences.cpp +++ b/driver/escaping/escape_sequences.cpp @@ -16,17 +16,17 @@ using namespace std; namespace { const std::map fn_convert_map { - {"SQL_TINYINT", "toUInt8"}, - {"SQL_SMALLINT", "toUInt16"}, - {"SQL_INTEGER", "toInt32"}, - {"SQL_BIGINT", "toInt64"}, - {"SQL_REAL", "toFloat32"}, - {"SQL_DOUBLE", "toFloat64"}, - {"SQL_VARCHAR", "toString"}, - {"SQL_DATE", "toDate"}, - {"SQL_TYPE_DATE", "toDate"}, - {"SQL_TIMESTAMP", "toDateTime"}, - {"SQL_TYPE_TIMESTAMP", "toDateTime"}, + {"SQL_TINYINT", "to_uint8"}, + {"SQL_SMALLINT", "to_uint16"}, + {"SQL_INTEGER", "to_int32"}, + {"SQL_BIGINT", "to_int64"}, + {"SQL_REAL", "to_float32"}, + {"SQL_DOUBLE", "to_float64"}, + {"SQL_VARCHAR", "to_string"}, + {"SQL_DATE", "to_date"}, + {"SQL_TYPE_DATE", "to_date"}, + {"SQL_TIMESTAMP", "to_datetime"}, + {"SQL_TYPE_TIMESTAMP", "to_datetime"}, }; #define DECLARE2(TOKEN, NAME) \ @@ -56,14 +56,14 @@ const std::map literal_map { const std::map timeadd_func_map { // {Token::SQL_TSI_FRAC_SECOND, ""}, - {Token::SQL_TSI_SECOND, "addSeconds"}, - {Token::SQL_TSI_MINUTE, "addMinutes"}, - {Token::SQL_TSI_HOUR, "addHours"}, - {Token::SQL_TSI_DAY, "addDays"}, - {Token::SQL_TSI_WEEK, "addWeeks"}, - {Token::SQL_TSI_MONTH, "addMonths"}, - {Token::SQL_TSI_QUARTER, "addQuarters"}, - {Token::SQL_TSI_YEAR, "addYears"}, + {Token::SQL_TSI_SECOND, "add_seconds"}, + {Token::SQL_TSI_MINUTE, "add_minutes"}, + {Token::SQL_TSI_HOUR, "add_hours"}, + {Token::SQL_TSI_DAY, "add_days"}, + {Token::SQL_TSI_WEEK, "add_weeks"}, + {Token::SQL_TSI_MONTH, "add_months"}, + {Token::SQL_TSI_QUARTER, "add_quarters"}, + {Token::SQL_TSI_YEAR, "add_years"}, }; @@ -248,7 +248,7 @@ string processFunction(const StringView seq, Lexer & lex) { if (param.empty()) return seq.to_string(); lex.Consume(); - return "replaceRegexpOne(" + param + ", '^\\\\s+', '')"; + return "replace_regexp_one(" + param + ", '^\\\\s+', '')"; } else if (fn.type == Token::DAYOFWEEK) { if (!lex.Match(Token::LPARENT)) @@ -258,7 +258,7 @@ string processFunction(const StringView seq, Lexer & lex) { if (param.empty()) return seq.to_string(); lex.Consume(); - return "if(toDayOfWeek(" + param + ") = 7, 1, toDayOfWeek(" + param + ") + 1)"; + return "if(to_day_of_week(" + param + ") = 7, 1, to_day_of_week(" + param + ") + 1)"; /* } else if (fn.type == Token::DAYOFYEAR) { // Supported by ClickHouse since 18.13.0 if (!lex.Match(Token::LPARENT)) @@ -268,7 +268,7 @@ string processFunction(const StringView seq, Lexer & lex) { if (param.empty()) return seq.to_string(); lex.Consume(); - return "( toRelativeDayNum(" + param + ") - toRelativeDayNum(toStartOfYear(" + param + ")) + 1 )"; + return "( to_relative_day_num(" + param + ") - to_relative_day_num(to_start_of_year(" + param + ")) + 1 )"; */ } else if (function_map_strip_params.find(fn.type) != function_map_strip_params.end()) { string result = function_map_strip_params.at(fn.type); @@ -316,7 +316,7 @@ string processDate(const StringView seq, Lexer & lex) { if (data.isInvalid()) { return seq.to_string(); } else { - return string("toDate(") + data.literal.to_string() + ")"; + return string("to_date(") + data.literal.to_string() + ")"; } } @@ -357,7 +357,7 @@ string processDateTime(const StringView seq, Lexer & lex) { if (data.isInvalid()) { return seq.to_string(); } else { - return string("toDateTime(") + removeMilliseconds(data.literal) + ")"; + return string("to_datetime(") + removeMilliseconds(data.literal) + ")"; } } diff --git a/driver/escaping/function_declare.h b/driver/escaping/function_declare.h index 3174e2a..823d421 100644 --- a/driver/escaping/function_declare.h +++ b/driver/escaping/function_declare.h @@ -34,13 +34,13 @@ // ASCII // BIT_LENGTH // CHAR - DECLARE2(CHAR_LENGTH, "lengthUTF8"), - DECLARE2(CHARACTER_LENGTH, "lengthUTF8"), + DECLARE2(CHAR_LENGTH, "length_utf8"), + DECLARE2(CHARACTER_LENGTH, "length_utf8"), DECLARE2(CONCAT, "concat"), // DIFFERENCE // INSERT - DECLARE2(LCASE, "lowerUTF8"), - DECLARE2(LOWER, "lowerUTF8"), + DECLARE2(LCASE, "lower_utf8"), + DECLARE2(LOWER, "lower_utf8"), // LEFT substring(s, 0, length) DECLARE2(LENGTH, "lengthUTF8"), DECLARE2(LOCATE, "" /* "position" */), // special handling @@ -49,37 +49,37 @@ DECLARE2(OCTET_LENGTH, "length"), // POSITION // REPEAT - DECLARE2(REPLACE, "replaceAll"), + DECLARE2(REPLACE, "replace_all"), // RIGHT // RTRIM // SOUNDEX // SPACE DECLARE2(SUBSTRING, "substringUTF8"), - DECLARE2(UCASE, "upperUTF8"), - DECLARE2(UPPER, "upperUTF8"), + DECLARE2(UCASE, "upper_utf8"), + DECLARE2(UPPER, "upper_utf8"), // Date DECLARE2(CURRENT_TIMESTAMP, ""), // special handling DECLARE2(CURDATE, "today"), DECLARE2(CURRENT_DATE, "today"), - DECLARE2(DAYOFMONTH, "toDayOfMonth"), + DECLARE2(DAYOFMONTH, "to_day_of_month"), DECLARE2(DAYOFWEEK, "" /* "toDayOfWeek" */), // special handling - DECLARE2(DAYOFYEAR, " toDayOfYear"), // Supported by ClickHouse since 18.13.0 + DECLARE2(DAYOFYEAR, " to_day_of_year"), // Supported by ClickHouse since 18.13.0 DECLARE2(EXTRACT, "EXTRACT"), // Do not touch extract inside {fn ... } - DECLARE2(HOUR, "toHour"), - DECLARE2(MINUTE, "toMinute"), - DECLARE2(MONTH, "toMonth"), + DECLARE2(HOUR, "to_hour"), + DECLARE2(MINUTE, "to_minute"), + DECLARE2(MONTH, "to_month"), DECLARE2(NOW, "now"), - DECLARE2(SECOND, "toSecond"), + DECLARE2(SECOND, "to_second"), DECLARE2(TIMESTAMPADD, ""), // special handling - DECLARE2(TIMESTAMPDIFF, "dateDiff"), + DECLARE2(TIMESTAMPDIFF, "date_diff"), DECLARE2(WEEK, "toISOWeek"), - DECLARE2(SQL_TSI_QUARTER, "toQuarter"), - DECLARE2(YEAR, "toYear"), + DECLARE2(SQL_TSI_QUARTER, "to_quarter"), + DECLARE2(YEAR, "to_year"), // DECLARE2(DATABASE, ""), - DECLARE2(IFNULL, "ifNull"), + DECLARE2(IFNULL, "if_null"), // DECLARE2(USER, ""), // TODO. diff --git a/driver/statement.cpp b/driver/statement.cpp index fd9fac6..b77debe 100755 --- a/driver/statement.cpp +++ b/driver/statement.cpp @@ -169,8 +169,8 @@ void Statement::requestNextPackOfResultSets(std::unique_ptr && mu } result_reader = make_result_reader( - response->get("X-ClickHouse-Format", connection.default_format), - response->get("X-ClickHouse-Timezone", Poco::Timezone::name()), + response->get("X-proton-Format", connection.default_format), + response->get("X-proton-Timezone", Poco::Timezone::name()), *in, std::move(mutator) ); @@ -292,7 +292,7 @@ std::string Statement::buildFinalQuery(const std::vector& para std::string param_type; if (param_bindings.size() <= i) { - param_type = "Nullable(Nothing)"; + param_type = "nullable(nothing)"; } else { const auto & binding_info = param_bindings[i]; diff --git a/driver/test/column_bindings_it.cpp b/driver/test/column_bindings_it.cpp index b86fa3c..ae458f6 100755 --- a/driver/test/column_bindings_it.cpp +++ b/driver/test/column_bindings_it.cpp @@ -34,12 +34,12 @@ TEST_P(ColumnArrayBindingsTest, ColumnWise) { const std::size_t total_rows_expected = std::get<1>(GetParam()); const std::string query_orig = R"SQL( SELECT - CAST(number, 'Int32') AS col1, - CAST(CAST(number, 'String'), 'FixedString(30)') AS col2, - CAST(number, 'Float64') AS col3, - CAST(if((number % 8) = 3, NULL, repeat('x', number % 41)), 'Nullable(String)') AS col4, - CAST(number, 'UInt64') AS col5, - CAST(number, 'Float32') AS col6 + cast(number, 'int32') AS col1, + cast(cast(number, 'string'), 'fixed_string(30)') AS col2, + cast(number, 'float64') AS col3, + cast(if((number % 8) = 3, NULL, repeat('x', number % 41)), 'nullable(string)') AS col4, + cast(number, 'uint64') AS col5, + cast(number, 'float32') AS col6 FROM numbers( )SQL" + std::to_string(total_rows_expected) + ")"; @@ -279,12 +279,12 @@ TEST_P(ColumnArrayBindingsTest, RowWise) { const std::size_t total_rows_expected = std::get<1>(GetParam()); const std::string query_orig = R"SQL( SELECT - CAST(number, 'Int32') AS col1, - CAST(CAST(number, 'String'), 'FixedString(30)') AS col2, - CAST(number, 'Float64') AS col3, - CAST(if((number % 8) = 3, NULL, repeat('x', number % 41)), 'Nullable(String)') AS col4, - CAST(number, 'UInt64') AS col5, - CAST(number, 'Float32') AS col6 + cast(number, 'int32') AS col1, + cast(cast(number, 'string'), 'fixed_string(30)') AS col2, + cast(number, 'float64') AS col3, + cast(if((number % 8) = 3, NULL, repeat('x', number % 41)), 'nullable(string)') AS col4, + cast(number, 'uint64') AS col5, + cast(number, 'float32') AS col6 FROM numbers( )SQL" + std::to_string(total_rows_expected) + ")"; diff --git a/driver/test/common_utils.h b/driver/test/common_utils.h index 69c07e7..9dce2b8 100644 --- a/driver/test/common_utils.h +++ b/driver/test/common_utils.h @@ -49,7 +49,7 @@ inline std::optional get_env_var(const std::string & name) { if (Poco::Environment::has(name)) { - return Poco::Environment::get("TZ"); + return Poco::Environment::get(name); } return {}; diff --git a/driver/test/datetime_it.cpp b/driver/test/datetime_it.cpp index 0f4059c..0413911 100755 --- a/driver/test/datetime_it.cpp +++ b/driver/test/datetime_it.cpp @@ -170,54 +170,55 @@ INSTANTIATE_TEST_SUITE_P( MiscellaneousTest, DateTime, ::testing::Values( - DateTimeParams{"Date", "ODBCDriver2", "Europe/Moscow", - "toDate('2020-03-25')", SQL_TYPE_DATE, + DateTimeParams{"Date", "ODBCDriver2", "UTC-3", + "to_date('2020-03-25')", SQL_TYPE_DATE, "2020-03-25", SQL_TIMESTAMP_STRUCT{2020, 3, 25, 0, 0, 0, 0} }, - DateTimeParams{"DateTime", "ODBCDriver2", "Europe/Moscow", - "toDateTime('2020-03-25 12:11:22')", SQL_TYPE_TIMESTAMP, + DateTimeParams{"DateTime", "ODBCDriver2", "UTC-3", + "to_datetime('2020-03-25 12:11:22')", SQL_TYPE_TIMESTAMP, "2020-03-25 12:11:22", SQL_TIMESTAMP_STRUCT{2020, 3, 25, 12, 11, 22, 0} }, - DateTimeParams{"DateTime_TZ", "ODBCDriver2", "Europe/Moscow", - "toDateTime('2020-03-25 12:11:22', 'Asia/Kathmandu')", SQL_TYPE_TIMESTAMP, + DateTimeParams{"DateTime_TZ", "ODBCDriver2", "UTC-3", + "to_datetime('2020-03-25 12:11:22', 'Asia/Kathmandu')", SQL_TYPE_TIMESTAMP, "2020-03-25 12:11:22", SQL_TIMESTAMP_STRUCT{2020, 3, 25, 12, 11, 22, 0} }, - DateTimeParams{"DateTime64_0", "ODBCDriver2", "Europe/Moscow", - "toDateTime64('2020-03-25 12:11:22.123456789', 0)", SQL_TYPE_TIMESTAMP, + DateTimeParams{"DateTime64_0", "ODBCDriver2", "UTC-3", + "to_datetime64('2020-03-25 12:11:22.123456789', 0)", SQL_TYPE_TIMESTAMP, "2020-03-25 12:11:22", SQL_TIMESTAMP_STRUCT{2020, 3, 25, 12, 11, 22, 0} }, - DateTimeParams{"DateTime64_4", "ODBCDriver2", "Europe/Moscow", - "toDateTime64('2020-03-25 12:11:22.123456789', 4)", SQL_TYPE_TIMESTAMP, + DateTimeParams{"DateTime64_4", "ODBCDriver2", "UTC-3", + "to_datetime64('2020-03-25 12:11:22.123456789', 4)", SQL_TYPE_TIMESTAMP, "2020-03-25 12:11:22.1234", SQL_TIMESTAMP_STRUCT{2020, 3, 25, 12, 11, 22, 123400000} }, - DateTimeParams{"DateTime64_9", "ODBCDriver2", "Europe/Moscow", - "toDateTime64('2020-03-25 12:11:22.123456789', 9)", SQL_TYPE_TIMESTAMP, + DateTimeParams{"DateTime64_9", "ODBCDriver2", "UTC-3", + "to_datetime64('2020-03-25 12:11:22.123456789', 9)", SQL_TYPE_TIMESTAMP, "2020-03-25 12:11:22.123456789", SQL_TIMESTAMP_STRUCT{2020, 3, 25, 12, 11, 22, 123456789} }, - DateTimeParams{"DateTime64_9_TZ", "ODBCDriver2", "Europe/Moscow", - "toDateTime64('2020-03-25 12:11:22.123456789', 9, 'Asia/Kathmandu')", SQL_TYPE_TIMESTAMP, + DateTimeParams{"DateTime64_9_TZ", "ODBCDriver2", "UTC-3", + "to_datetime64('2020-03-25 12:11:22.123456789', 9, 'Asia/Kathmandu')", SQL_TYPE_TIMESTAMP, "2020-03-25 12:11:22.123456789", SQL_TIMESTAMP_STRUCT{2020, 3, 25, 12, 11, 22, 123456789} }, - // TODO: remove this once the formats behave identically. - - DateTimeParams{"Date", "RowBinaryWithNamesAndTypes", "Europe/Moscow", - "toDate('2020-03-25')", SQL_TYPE_DATE, + // windows do not support Europe/Moscow, but both windows and linux support UTC-3 + DateTimeParams{"Date", "RowBinaryWithNamesAndTypes", "UTC-3", + "to_date('2020-03-25')", SQL_TYPE_DATE, "2020-03-25", SQL_TIMESTAMP_STRUCT{2020, 3, 25, 0, 0, 0, 0} }, - DateTimeParams{"DateTime_TZ", "RowBinaryWithNamesAndTypes", "Europe/Moscow", - "toDateTime('2020-03-25 12:11:22', 'Asia/Kathmandu')", SQL_TYPE_TIMESTAMP, + DateTimeParams{"DateTime_TZ", "RowBinaryWithNamesAndTypes", "UTC-3", + "to_datetime('2020-03-25 12:11:22', 'Asia/Kathmandu')", SQL_TYPE_TIMESTAMP, "2020-03-25 09:26:22", SQL_TIMESTAMP_STRUCT{2020, 3, 25, 9, 26, 22, 0} }, - DateTimeParams{"DateTime64_9_TZ", "RowBinaryWithNamesAndTypes", "Europe/Moscow", - "toDateTime64('2020-03-25 12:11:22.123456789', 9, 'Asia/Kathmandu')", SQL_TYPE_TIMESTAMP, + DateTimeParams{"DateTime64_9_TZ", "RowBinaryWithNamesAndTypes", "UTC-3", + "to_datetime64('2020-03-25 12:11:22.123456789', 9, 'Asia/Kathmandu')", SQL_TYPE_TIMESTAMP, "2020-03-25 09:26:22.123456789", SQL_TIMESTAMP_STRUCT{2020, 3, 25, 9, 26, 22, 123456789} - }/*, + } + + /*, // TODO: uncomment once the target ClickHouse server is 21.4+ - DateTimeParams{"DateTime64_9_TZ_pre_epoch", "RowBinaryWithNamesAndTypes", "Europe/Moscow", - "toDateTime64('1955-03-25 12:11:22.123456789', 9, 'Asia/Kathmandu')", SQL_TYPE_TIMESTAMP, + DateTimeParams{"DateTime64_9_TZ_pre_epoch", "RowBinaryWithNamesAndTypes", "UTC-3", + "to_dateTime64('1955-03-25 12:11:22.123456789', 9, 'Asia/Kathmandu')", SQL_TYPE_TIMESTAMP, "1955-03-25 09:26:22.123456789", SQL_TIMESTAMP_STRUCT{1955, 3, 25, 9, 26, 22, 123456789} } */ diff --git a/driver/test/escape_sequences_ut.cpp b/driver/test/escape_sequences_ut.cpp index c415779..897d5a4 100644 --- a/driver/test/escape_sequences_ut.cpp +++ b/driver/test/escape_sequences_ut.cpp @@ -3,90 +3,90 @@ #include TEST(EscapeSequencesCase, ParseIdent1) { - ASSERT_EQ(replaceEscapeSequences("SELECT SUM({fn CONVERT(abc, SQL_BIGINT)})"), - "SELECT SUM(toInt64(abc))"); + ASSERT_EQ(replaceEscapeSequences("SELECT sum({fn CONVERT(abc, SQL_BIGINT)})"), + "SELECT sum(to_int64(abc))"); } TEST(EscapeSequencesCase, ParseIdent2) { - ASSERT_EQ(replaceEscapeSequences("SELECT SUM({fn CONVERT(`abc`, SQL_BIGINT)})"), - "SELECT SUM(toInt64(`abc`))"); + ASSERT_EQ(replaceEscapeSequences("SELECT sum({fn CONVERT(`abc`, SQL_BIGINT)})"), + "SELECT sum(to_int64(`abc`))"); } TEST(EscapeSequencesCase, ParseIdent3) { - ASSERT_EQ(replaceEscapeSequences("SELECT SUM({fn CONVERT(`0 a b $ c`, SQL_BIGINT)})"), - "SELECT SUM(toInt64(`0 a b $ c`))"); + ASSERT_EQ(replaceEscapeSequences("SELECT sum({fn CONVERT(`0 a b $ c`, SQL_BIGINT)})"), + "SELECT sum(to_int64(`0 a b $ c`))"); } TEST(EscapeSequencesCase, ParseIdent4) { - ASSERT_EQ(replaceEscapeSequences("SELECT SUM({fn CONVERT(abc.`0 a b $ c`, SQL_BIGINT)})"), - "SELECT SUM(toInt64(abc.`0 a b $ c`))"); + ASSERT_EQ(replaceEscapeSequences("SELECT sum({fn CONVERT(abc.`0 a b $ c`, SQL_BIGINT)})"), + "SELECT sum(to_int64(abc.`0 a b $ c`))"); } TEST(EscapeSequencesCase, ParseIdent5) { - ASSERT_EQ(replaceEscapeSequences("SELECT SUM({fn CONVERT(`0 a b $ c`.abc, SQL_BIGINT)})"), - "SELECT SUM(toInt64(`0 a b $ c`.abc))"); + ASSERT_EQ(replaceEscapeSequences("SELECT sum({fn CONVERT(`0 a b $ c`.abc, SQL_BIGINT)})"), + "SELECT sum(to_int64(`0 a b $ c`.abc))"); } TEST(EscapeSequencesCase, ParseIdent6) { - ASSERT_EQ(replaceEscapeSequences("SELECT SUM({fn CONVERT(abc.`0 a b $ c`.abc, SQL_BIGINT)})"), - "SELECT SUM(toInt64(abc.`0 a b $ c`.abc))"); + ASSERT_EQ(replaceEscapeSequences("SELECT sum({fn CONVERT(abc.`0 a b $ c`.abc, SQL_BIGINT)})"), + "SELECT sum(to_int64(abc.`0 a b $ c`.abc))"); } TEST(EscapeSequencesCase, ParseIdent7) { - ASSERT_EQ(replaceEscapeSequences("SELECT SUM({fn CONVERT(`0 a b $ c`.abc.`0 a b $ c`, SQL_BIGINT)})"), - "SELECT SUM(toInt64(`0 a b $ c`.abc.`0 a b $ c`))"); + ASSERT_EQ(replaceEscapeSequences("SELECT sum({fn CONVERT(`0 a b $ c`.abc.`0 a b $ c`, SQL_BIGINT)})"), + "SELECT sum(to_int64(`0 a b $ c`.abc.`0 a b $ c`))"); } TEST(EscapeSequencesCase, ParseIdent_Negative1) { - ASSERT_EQ(replaceEscapeSequences("SELECT SUM({fn CONVERT(0 a b $ c, SQL_BIGINT)})"), - "SELECT SUM({fn CONVERT(0 a b $ c, SQL_BIGINT)})"); + ASSERT_EQ(replaceEscapeSequences("SELECT sum({fn CONVERT(0 a b $ c, SQL_BIGINT)})"), + "SELECT sum({fn CONVERT(0 a b $ c, SQL_BIGINT)})"); } TEST(EscapeSequencesCase, ParseIdent_Negative2) { - ASSERT_EQ(replaceEscapeSequences("SELECT SUM({fn CONVERT(.abc, SQL_BIGINT)})"), - "SELECT SUM({fn CONVERT(.abc, SQL_BIGINT)})"); + ASSERT_EQ(replaceEscapeSequences("SELECT sum({fn CONVERT(.abc, SQL_BIGINT)})"), + "SELECT sum({fn CONVERT(.abc, SQL_BIGINT)})"); } TEST(EscapeSequencesCase, ParseIdent_Negative3) { - ASSERT_EQ(replaceEscapeSequences("SELECT SUM({fn CONVERT(.`abc`, SQL_BIGINT)})"), - "SELECT SUM({fn CONVERT(.`abc`, SQL_BIGINT)})"); + ASSERT_EQ(replaceEscapeSequences("SELECT sum({fn CONVERT(.`abc`, SQL_BIGINT)})"), + "SELECT sum({fn CONVERT(.`abc`, SQL_BIGINT)})"); } TEST(EscapeSequencesCase, ParseIdent_Negative4) { - ASSERT_EQ(replaceEscapeSequences("SELECT SUM({fn CONVERT(abc., SQL_BIGINT)})"), - "SELECT SUM({fn CONVERT(abc., SQL_BIGINT)})"); + ASSERT_EQ(replaceEscapeSequences("SELECT sum({fn CONVERT(abc., SQL_BIGINT)})"), + "SELECT sum({fn CONVERT(abc., SQL_BIGINT)})"); } TEST(EscapeSequencesCase, ParseIdent_Negative5) { - ASSERT_EQ(replaceEscapeSequences("SELECT SUM({fn CONVERT(`abc`., SQL_BIGINT)})"), - "SELECT SUM({fn CONVERT(`abc`., SQL_BIGINT)})"); + ASSERT_EQ(replaceEscapeSequences("SELECT sum({fn CONVERT(`abc`., SQL_BIGINT)})"), + "SELECT sum({fn CONVERT(`abc`., SQL_BIGINT)})"); } TEST(EscapeSequencesCase, ParseIdent_Negative6) { - ASSERT_EQ(replaceEscapeSequences("SELECT SUM({fn CONVERT(abc..abc, SQL_BIGINT)})"), - "SELECT SUM({fn CONVERT(abc..abc, SQL_BIGINT)})"); + ASSERT_EQ(replaceEscapeSequences("SELECT sum({fn CONVERT(abc..abc, SQL_BIGINT)})"), + "SELECT sum({fn CONVERT(abc..abc, SQL_BIGINT)})"); } TEST(EscapeSequencesCase, ParseConvert1) { - ASSERT_EQ(replaceEscapeSequences("SELECT {fn CONVERT(1, SQL_BIGINT)}"), "SELECT toInt64(1)"); + ASSERT_EQ(replaceEscapeSequences("SELECT {fn CONVERT(1, SQL_BIGINT)}"), "SELECT to_int64(1)"); } TEST(EscapeSequencesCase, ParseConvert2) { - ASSERT_EQ(replaceEscapeSequences("SELECT {fn CONVERT(-1.2, SQL_BIGINT)}"), "SELECT toInt64(-1.2)"); + ASSERT_EQ(replaceEscapeSequences("SELECT {fn CONVERT(-1.2, SQL_BIGINT)}"), "SELECT to_int64(-1.2)"); } TEST(EscapeSequencesCase, ParseConvert3) { - ASSERT_EQ(replaceEscapeSequences("SELECT SUM({fn CONVERT(amount, SQL_BIGINT)})"), "SELECT SUM(toInt64(amount))"); + ASSERT_EQ(replaceEscapeSequences("SELECT sum({fn CONVERT(amount, SQL_BIGINT)})"), "SELECT sum(to_int64(amount))"); } TEST(EscapeSequencesCase, ParseConvert4) { - ASSERT_EQ(replaceEscapeSequences("SELECT SUM({fn CONVERT(Custom_SQL_Query.amount, SQL_BIGINT)})"), - "SELECT SUM(toInt64(Custom_SQL_Query.amount))"); + ASSERT_EQ(replaceEscapeSequences("SELECT sum({fn CONVERT(Custom_SQL_Query.amount, SQL_BIGINT)})"), + "SELECT sum(to_int64(Custom_SQL_Query.amount))"); } TEST(EscapeSequencesCase, ParseConvert5) { - ASSERT_EQ(replaceEscapeSequences("SELECT SUM({fn CONVERT(`Custom_SQL_Query`.`amount`, SQL_BIGINT)})"), - "SELECT SUM(toInt64(`Custom_SQL_Query`.`amount`))"); + ASSERT_EQ(replaceEscapeSequences("SELECT sum({fn CONVERT(`Custom_SQL_Query`.`amount`, SQL_BIGINT)})"), + "SELECT sum(to_int64(`Custom_SQL_Query`.`amount`))"); } TEST(EscapeSequencesCase, ParseConvert6_0) { @@ -94,12 +94,12 @@ TEST(EscapeSequencesCase, ParseConvert6_0) { } TEST(EscapeSequencesCase, ParseConvert6) { - ASSERT_EQ(replaceEscapeSequences("SELECT {fn CONVERT({fn ROUND(1.1 + 2.4, 1)}, SQL_BIGINT)}"), "SELECT toInt64(round(1.1 + 2.4, 1))"); + ASSERT_EQ(replaceEscapeSequences("SELECT {fn CONVERT({fn ROUND(1.1 + 2.4, 1)}, SQL_BIGINT)}"), "SELECT to_int64(round(1.1 + 2.4, 1))"); } TEST(EscapeSequencesCase, ParseConvert6_1) { ASSERT_EQ(replaceEscapeSequences("SELECT {fn CONVERT( {fn ROUND( 1.1 + 2.4 , 1 ) } , SQL_BIGINT ) }"), - "SELECT toInt64(round( 1.1 + 2.4 , 1 ) )"); + "SELECT to_int64(round( 1.1 + 2.4 , 1 ) )"); } @@ -154,10 +154,10 @@ TEST(EscapeSequencesCase, ParseAbs4) { } TEST(EscapeSequencesCase, ParseTruncate) { - ASSERT_EQ(replaceEscapeSequences("SELECT CAST({fn TRUNCATE(1.1 + 2.4, 1)} AS INTEGER) AS `yr_date_ok`"), - "SELECT CAST(trunc(1.1 + 2.4, 1) AS INTEGER) AS `yr_date_ok`"); + ASSERT_EQ(replaceEscapeSequences("SELECT cast({fn TRUNCATE(1.1 + 2.4, 1)} AS INTEGER) AS `yr_date_ok`"), + "SELECT cast(trunc(1.1 + 2.4, 1) AS INTEGER) AS `yr_date_ok`"); //ASSERT_EQ( - // replaceEscapeSequences("SELECT CAST({fn TRUNCATE(EXTRACT(YEAR FROM `Custom_SQL_Query`.`date`),0)} AS INTEGER) AS `yr_date_ok`"), + // replaceEscapeSequences("SELECT cast({fn TRUNCATE(EXTRACT(YEAR FROM `Custom_SQL_Query`.`date`),0)} AS INTEGER) AS `yr_date_ok`"), // "TODO: convert extract() function" //); } @@ -168,53 +168,53 @@ TEST(EscapeSequencesCase, ParseCurdate1) { TEST(EscapeSequencesCase, ParseTimestampdiff2) { - ASSERT_EQ(replaceEscapeSequences("SELECT {fn TIMESTAMPDIFF(SQL_TSI_DAY,CAST(`test`.`odbc1`.`datetime` AS DATE),{fn CURDATE()} )}"), - "SELECT dateDiff('day',CAST(`test`.`odbc1`.`datetime` AS DATE),today() )"); + ASSERT_EQ(replaceEscapeSequences("SELECT {fn TIMESTAMPDIFF(SQL_TSI_DAY,cast(`test`.`odbc1`.`datetime` AS DATE),{fn CURDATE()} )}"), + "SELECT date_diff('day',cast(`test`.`odbc1`.`datetime` AS DATE),today() )"); } TEST(EscapeSequencesCase, Parsetimestampdiff) { ASSERT_EQ(replaceEscapeSequences( - "SELECT {fn TIMESTAMPDIFF(SQL_TSI_DAY,CAST(`activity`.`min_activation_yabrowser` AS DATE),CAST(`activity`.`date` AS " - "DATE))} AS `Calculation_503558746242125826`, SUM({fn CONVERT(1, SQL_BIGINT)}) AS `sum_Number_of_Records_ok`"), - "SELECT dateDiff('day',CAST(`activity`.`min_activation_yabrowser` AS DATE),CAST(`activity`.`date` AS DATE)) AS " - "`Calculation_503558746242125826`, SUM(toInt64(1)) AS `sum_Number_of_Records_ok`"); + "SELECT {fn TIMESTAMPDIFF(SQL_TSI_DAY,cast(`activity`.`min_activation_yabrowser` AS DATE),cast(`activity`.`date` AS " + "DATE))} AS `Calculation_503558746242125826`, sum({fn CONVERT(1, SQL_BIGINT)}) AS `sum_Number_of_Records_ok`"), + "SELECT date_diff('day',cast(`activity`.`min_activation_yabrowser` AS DATE),cast(`activity`.`date` AS DATE)) AS " + "`Calculation_503558746242125826`, sum(to_int64(1)) AS `sum_Number_of_Records_ok`"); } TEST(EscapeSequencesCase, ParseTimestampadd1) { - ASSERT_EQ(replaceEscapeSequences("SELECT {fn TIMESTAMPADD(SQL_TSI_YEAR, 1, {fn CURDATE()})}"), "SELECT addYears(today(), 1)"); + ASSERT_EQ(replaceEscapeSequences("SELECT {fn TIMESTAMPADD(SQL_TSI_YEAR, 1, {fn CURDATE()})}"), "SELECT add_years(today(), 1)"); } TEST(EscapeSequencesCase, ParseTimestampadd2) { ASSERT_EQ(replaceEscapeSequences("SELECT {fn TIMESTAMPADD( SQL_TSI_YEAR , 1 , {fn CURDATE() } ) }"), - "SELECT addYears(today() , 1)"); + "SELECT add_years(today() , 1)"); } TEST(EscapeSequencesCase, ParseTimestampadd3) { - ASSERT_EQ(replaceEscapeSequences("SELECT {fn TIMESTAMPADD(SQL_TSI_DAY,1,CAST({fn CURRENT_TIMESTAMP(0)} AS DATE))}"), - "SELECT addDays(CAST(now() AS DATE), 1)"); + ASSERT_EQ(replaceEscapeSequences("SELECT {fn TIMESTAMPADD(SQL_TSI_DAY,1,cast({fn CURRENT_TIMESTAMP(0)} AS DATE))}"), + "SELECT add_days(cast(now() AS DATE), 1)"); } TEST(EscapeSequencesCase, ParseTimestampadd4) { - ASSERT_EQ(replaceEscapeSequences("SELECT {fn TIMESTAMPADD( SQL_TSI_DAY , 1 , CAST( {fn CURRENT_TIMESTAMP( 0 ) } AS DATE ) ) } "), - "SELECT addDays(CAST( now() AS DATE ), 1) "); + ASSERT_EQ(replaceEscapeSequences("SELECT {fn TIMESTAMPADD( SQL_TSI_DAY , 1 , cast( {fn CURRENT_TIMESTAMP( 0 ) } AS DATE ) ) } "), + "SELECT add_days(cast( now() AS DATE ), 1) "); } TEST(EscapeSequencesCase, ParseTimestampadd5) { - ASSERT_EQ(replaceEscapeSequences("SELECT {fn TIMESTAMPADD(SQL_TSI_DAY, CAST(CAST(1 AS DATE) AS DATE), 1)}"), - "SELECT addDays(1, CAST(CAST(1 AS DATE) AS DATE))"); + ASSERT_EQ(replaceEscapeSequences("SELECT {fn TIMESTAMPADD(SQL_TSI_DAY, cast(cast(1 AS DATE) AS DATE), 1)}"), + "SELECT add_days(1, cast(cast(1 AS DATE) AS DATE))"); } TEST(EscapeSequencesCase, ParseTimestampadd6) { ASSERT_EQ( replaceEscapeSequences( - "SELECT {fn TIMESTAMPADD(SQL_TSI_DAY,(({fn MOD(({fn DAYOFWEEK(CAST(`publishers_report`.`install_date` AS DATE))}), 7)})),1)}"), - "SELECT addDays(1, ((modulo((if(toDayOfWeek(CAST(`publishers_report`.`install_date` AS DATE)) = 7, 1, " - "toDayOfWeek(CAST(`publishers_report`.`install_date` AS DATE)) + 1)), 7))))"); + "SELECT {fn TIMESTAMPADD(SQL_TSI_DAY,(({fn MOD(({fn DAYOFWEEK(cast(`publishers_report`.`install_date` AS DATE))}), 7)})),1)}"), + "SELECT add_days(1, ((modulo((if(to_day_of_week(cast(`publishers_report`.`install_date` AS DATE)) = 7, 1, " + "to_day_of_week(cast(`publishers_report`.`install_date` AS DATE)) + 1)), 7))))"); } TEST(EscapeSequencesCase, ParseTimestampadd7) { - ASSERT_EQ(replaceEscapeSequences("SELECT {fn TIMESTAMPADD(SQL_TSI_HOUR,EXTRACT(HOUR FROM `abc`.`xyz`),CAST(`klm`.`nop` AS DATE))}"), - "SELECT addHours(CAST(`klm`.`nop` AS DATE), EXTRACT(HOUR FROM `abc`.`xyz`))"); + ASSERT_EQ(replaceEscapeSequences("SELECT {fn TIMESTAMPADD(SQL_TSI_HOUR,EXTRACT(HOUR FROM `abc`.`xyz`),cast(`klm`.`nop` AS DATE))}"), + "SELECT add_hours(cast(`klm`.`nop` AS DATE), EXTRACT(HOUR FROM `abc`.`xyz`))"); } TEST(EscapeSequencesCase, ParseCurrentTimestamp1) { @@ -226,31 +226,31 @@ TEST(EscapeSequencesCase, ParseCurrentTimestamp2) { TEST(EscapeSequencesCase, ParseExtract1) { ASSERT_EQ(replaceEscapeSequences( - "SELECT CAST({fn TRUNCATE(EXTRACT(YEAR FROM `odbc1`.`date`),0)} AS INTEGER) AS `yr_date_ok` FROM `test`.`odbc1`"), - "SELECT CAST(trunc(EXTRACT(YEAR FROM `odbc1`.`date`),0) AS INTEGER) AS `yr_date_ok` FROM `test`.`odbc1`"); + "SELECT cast({fn TRUNCATE(EXTRACT(YEAR FROM `odbc1`.`date`),0)} AS INTEGER) AS `yr_date_ok` FROM `test`.`odbc1`"), + "SELECT cast(trunc(EXTRACT(YEAR FROM `odbc1`.`date`),0) AS INTEGER) AS `yr_date_ok` FROM `test`.`odbc1`"); } TEST(EscapeSequencesCase, ParseExtract2) { ASSERT_EQ( - replaceEscapeSequences("SELECT CAST({fn TRUNCATE(EXTRACT(YEAR FROM `Custom_SQL_Query`.`date`),0)} AS INTEGER) AS `yr_date_ok`"), - "SELECT CAST(trunc(EXTRACT(YEAR FROM `Custom_SQL_Query`.`date`),0) AS INTEGER) AS `yr_date_ok`" - //"SELECT CAST(trunc(toYear(`Custom_SQL_Query`.`date`), 0) AS INTEGER) AS `yr_date_ok`" + replaceEscapeSequences("SELECT cast({fn TRUNCATE(EXTRACT(YEAR FROM `Custom_SQL_Query`.`date`),0)} AS INTEGER) AS `yr_date_ok`"), + "SELECT cast(trunc(EXTRACT(YEAR FROM `Custom_SQL_Query`.`date`),0) AS INTEGER) AS `yr_date_ok`" + //"SELECT cast(trunc(toYear(`Custom_SQL_Query`.`date`), 0) AS INTEGER) AS `yr_date_ok`" ); } TEST(EscapeSequencesCase, ParseQuarter) { ASSERT_EQ(replaceEscapeSequences("SELECT {fn QUARTER(`Custom_SQL_Query`.`date`)} AS `qr_sentDate_ok`"), - "SELECT toQuarter(`Custom_SQL_Query`.`date`) AS `qr_sentDate_ok`"); + "SELECT to_quarter(`Custom_SQL_Query`.`date`) AS `qr_sentDate_ok`"); } TEST(EscapeSequencesCase, ParseDayOfWeek1) { ASSERT_EQ(replaceEscapeSequences("SELECT {fn DAYOFWEEK(`Custom_SQL_Query`.`date`)} AS `dw_sentDate_ok`"), - "SELECT if(toDayOfWeek(`Custom_SQL_Query`.`date`) = 7, 1, toDayOfWeek(`Custom_SQL_Query`.`date`) + 1) AS `dw_sentDate_ok`"); + "SELECT if(to_day_of_week(`Custom_SQL_Query`.`date`) = 7, 1, to_day_of_week(`Custom_SQL_Query`.`date`) + 1) AS `dw_sentDate_ok`"); } TEST(EscapeSequencesCase, ParseDayOfWeek2) { - ASSERT_EQ(replaceEscapeSequences("SELECT {fn DAYOFWEEK(CAST('2018-04-15' AS DATE))}, 1, 'sun'"), - "SELECT if(toDayOfWeek(CAST('2018-04-15' AS DATE)) = 7, 1, toDayOfWeek(CAST('2018-04-15' AS DATE)) + 1), 1, 'sun'"); + ASSERT_EQ(replaceEscapeSequences("SELECT {fn DAYOFWEEK(cast('2018-04-15' AS DATE))}, 1, 'sun'"), + "SELECT if(to_day_of_week(cast('2018-04-15' AS DATE)) = 7, 1, to_day_of_week(cast('2018-04-15' AS DATE)) + 1), 1, 'sun'"); } @@ -263,30 +263,30 @@ TEST(EscapeSequencesCase, ParseCurrentTimestamp) { TEST(EscapeSequencesCase, ParseComplexDateExpr) { ASSERT_EQ( replaceEscapeSequences( - "SELECT CAST({fn TRUNCATE(EXTRACT(YEAR FROM {fn TIMESTAMPADD(SQL_TSI_DAY,(-1 * ({fn DAYOFWEEK(`EmailDataD`.`sentDate`)} - 1)),CAST(`EmailDataD`.`sentDate` AS DATE))}),0)} AS INTEGER) AS `yr_Calculation_ok`"), - "SELECT CAST(trunc(toYear(addDays(CAST(`EmailDataD`.`sentDate` AS DATE), (-1 *(toDayOfWeek(`EmailDataD`.`sentDate`) - 1)))), 0) AS INTEGER) AS `yr_Calculation_ok`" + "SELECT cast({fn TRUNCATE(EXTRACT(YEAR FROM {fn TIMESTAMPADD(SQL_TSI_DAY,(-1 * ({fn DAYOFWEEK(`EmailDataD`.`sentDate`)} - 1)),cast(`EmailDataD`.`sentDate` AS DATE))}),0)} AS INTEGER) AS `yr_Calculation_ok`"), + "SELECT cast(trunc(toYear(add_days(cast(`EmailDataD`.`sentDate` AS DATE), (-1 *(to_day_of_week(`EmailDataD`.`sentDate`) - 1)))), 0) AS INTEGER) AS `yr_Calculation_ok`" ); } TEST(EscapeSequencesCase, ParseComplexDateExpr2) { ASSERT_EQ( replaceEscapeSequences( - "WHERE (({fn TIMESTAMPADD(SQL_TSI_DAY,(-1 * ({fn DAYOFWEEK(`EmailDataD`.`sentDate`)} - 1)),CAST(`EmailDataD`.`sentDate` AS DATE))} >= {ts '2017-01-01 00:00:00'}) AND ({fn TIMESTAMPADD(SQL_TSI_DAY,(-1 *({fn DAYOFWEEK(`EmailDataD`.`sentDate`)} - 1)),CAST(`EmailDataD`.`sentDate` AS DATE))} < {ts '2018-01-01 00:00:00'}))"), - "WHERE ((addDays(CAST(`EmailDataD`.`sentDate` AS DATE), (-1 *(toDayOfWeek(`EmailDataD`.`sentDate`) - 1))) >= toDateTime('2017-01-01 00:00:00')) AND (addDays(CAST(`EmailDataD`.`sentDate` AS DATE), (-1 *(toDayOfWeek(`EmailDataD`.`sentDate`) - 1))) < toDateTime('2018-01-01 00:00:00')))" + "WHERE (({fn TIMESTAMPADD(SQL_TSI_DAY,(-1 * ({fn DAYOFWEEK(`EmailDataD`.`sentDate`)} - 1)),cast(`EmailDataD`.`sentDate` AS DATE))} >= {ts '2017-01-01 00:00:00'}) AND ({fn TIMESTAMPADD(SQL_TSI_DAY,(-1 *({fn DAYOFWEEK(`EmailDataD`.`sentDate`)} - 1)),cast(`EmailDataD`.`sentDate` AS DATE))} < {ts '2018-01-01 00:00:00'}))"), + "WHERE ((add_days(cast(`EmailDataD`.`sentDate` AS DATE), (-1 *(to_day_of_week(`EmailDataD`.`sentDate`) - 1))) >= to_dateTime('2017-01-01 00:00:00')) AND (add_days(cast(`EmailDataD`.`sentDate` AS DATE), (-1 *(to_day_of_week(`EmailDataD`.`sentDate`) - 1))) < to_dateTime('2018-01-01 00:00:00')))" ); } */ TEST(EscapeSequencesCase, DateTime) { - ASSERT_EQ(replaceEscapeSequences("SELECT {d '2017-01-01'}"), "SELECT toDate('2017-01-01')"); + ASSERT_EQ(replaceEscapeSequences("SELECT {d '2017-01-01'}"), "SELECT to_date('2017-01-01')"); - ASSERT_EQ(replaceEscapeSequences("SELECT {ts '2017-01-01 10:01:01'}"), "SELECT toDateTime('2017-01-01 10:01:01')"); + ASSERT_EQ(replaceEscapeSequences("SELECT {ts '2017-01-01 10:01:01'}"), "SELECT to_datetime('2017-01-01 10:01:01')"); // We cutting off milliseconds from timestamp because CH server // doesn't support them. - ASSERT_EQ(replaceEscapeSequences("SELECT {ts '2017-01-01 10:01:01.555'}"), "SELECT toDateTime('2017-01-01 10:01:01')"); + ASSERT_EQ(replaceEscapeSequences("SELECT {ts '2017-01-01 10:01:01.555'}"), "SELECT to_datetime('2017-01-01 10:01:01')"); // Strange date format. Symbols after last dot shouldn't be cutted off. - ASSERT_EQ(replaceEscapeSequences("SELECT {ts '2017.01.01 10:01:01'}"), "SELECT toDateTime('2017.01.01 10:01:01')"); + ASSERT_EQ(replaceEscapeSequences("SELECT {ts '2017.01.01 10:01:01'}"), "SELECT to_datetime('2017.01.01 10:01:01')"); } TEST(EscapeSequencesCase, LOCATE) { @@ -295,10 +295,10 @@ TEST(EscapeSequencesCase, LOCATE) { } TEST(EscapeSequencesCase, LCASE) { - ASSERT_EQ(replaceEscapeSequences("{fn LCASE(`dm_ExperimentsData`.`Campaign`)}"), "lowerUTF8(`dm_ExperimentsData`.`Campaign`)"); + ASSERT_EQ(replaceEscapeSequences("{fn LCASE(`dm_ExperimentsData`.`Campaign`)}"), "lower_utf8(`dm_ExperimentsData`.`Campaign`)"); } TEST(EscapeSequencesCase, LTRIM) { ASSERT_EQ(replaceEscapeSequences("{fn LTRIM(`dm_ExperimentsData`.`Campaign`)}"), - "replaceRegexpOne(`dm_ExperimentsData`.`Campaign`, '^\\\\s+', '')"); + "replace_regexp_one(`dm_ExperimentsData`.`Campaign`, '^\\\\s+', '')"); } diff --git a/driver/test/misc_it.cpp b/driver/test/misc_it.cpp index 2263b80..1d3a636 100755 --- a/driver/test/misc_it.cpp +++ b/driver/test/misc_it.cpp @@ -19,9 +19,11 @@ TEST_F(MiscellaneousTest, RowArraySizeAttribute) { rc = ODBC_CALL_ON_STMT_THROW(hstmt, SQLGetStmtAttr(hstmt, SQL_ATTR_ROW_ARRAY_SIZE, &size, sizeof(size), 0)); ASSERT_EQ(size, 1); } - + + // Microsoft ODBC will throw "HY090 Invalid string or buffer length" when setting SQL_ATTR_ROW_ARRAY_SIZE to 0. + { - size = 0; + size = 3; rc = ODBC_CALL_ON_STMT_THROW(hstmt, SQLSetStmtAttr(hstmt, SQL_ATTR_ROW_ARRAY_SIZE, (SQLPOINTER)size, 0)); ASSERT_EQ(rc, SQL_SUCCESS); } @@ -29,7 +31,7 @@ TEST_F(MiscellaneousTest, RowArraySizeAttribute) { { size = 123; rc = ODBC_CALL_ON_STMT_THROW(hstmt, SQLGetStmtAttr(hstmt, SQL_ATTR_ROW_ARRAY_SIZE, &size, sizeof(size), 0)); - ASSERT_EQ(size, 0); + ASSERT_EQ(size, 3); } { @@ -59,7 +61,7 @@ TEST_F(MiscellaneousTest, RowArraySizeAttribute) { TEST_F(MiscellaneousTest, SQLGetData_ZeroOutputBufferSize) { const std::string col_str = "1234567890"; - const std::string query_orig = "SELECT CAST('" + col_str + "', 'String') AS col"; + const std::string query_orig = "SELECT cast('" + col_str + "', 'string') AS col"; const auto query = fromUTF8(query_orig); auto * query_wptr = const_cast(query.c_str()); @@ -274,7 +276,7 @@ TEST_P(HugeIntTypeReporting, Check) { const auto cs = "DSN=" + dsn + ";" + cs_extras; connect(cs); - const auto query_orig = "SELECT CAST('0', '" + type + "') AS col"; + const auto query_orig = "SELECT cast('0', '" + type + "') AS col"; const auto query = fromUTF8(query_orig); auto * query_wptr = const_cast(query.c_str()); @@ -293,7 +295,7 @@ INSTANTIATE_TEST_SUITE_P( ::testing::Values( // TODO: uncomment each type once its support is implemented. - "UInt64"/*, "Int128", "UInt128", "Int256", "UInt256"*/ + "uint64"/*, "Int128", "UInt128", "Int256", "UInt256"*/ ), ::testing::Values( diff --git a/driver/test/nano_it.cpp b/driver/test/nano_it.cpp index 7006764..047a9ff 100644 --- a/driver/test/nano_it.cpp +++ b/driver/test/nano_it.cpp @@ -121,54 +121,54 @@ void run_test(nanodbc::string const & connection_string) { show(results); } - execute(connection, NANODBC_TEXT("drop table if exists simple_test;")); - execute(connection, NANODBC_TEXT("create table simple_test (a int) engine Log;")); - execute(connection, NANODBC_TEXT("insert into simple_test values (1);")); - execute(connection, NANODBC_TEXT("insert into simple_test values (2);")); + execute(connection, NANODBC_TEXT("drop stream if exists simple_test;")); + execute(connection, NANODBC_TEXT("create stream simple_test (a int);")); + execute(connection, NANODBC_TEXT("insert into simple_test (* except _tp_time) values (1);")); + execute(connection, NANODBC_TEXT("insert into simple_test (* except _tp_time) values (2);")); { - auto results = execute(connection, NANODBC_TEXT("select * from simple_test;")); + auto results = execute(connection, NANODBC_TEXT("select (* except _tp_time) from simple_test where _tp_time > earliest_ts() limit 2;")); show(results); } - execute(connection, NANODBC_TEXT("CREATE DATABASE IF NOT EXISTS test;")); - execute(connection, NANODBC_TEXT("DROP TABLE IF EXISTS test.strings;")); - execute(connection, NANODBC_TEXT("CREATE TABLE test.strings (id UInt64, str String, dt DateTime DEFAULT now()) engine = Log;")); - execute(connection, NANODBC_TEXT("INSERT INTO test.strings SELECT number, hex(number+100000), 1 FROM system.numbers LIMIT 100;")); + execute(connection, NANODBC_TEXT("DROP STREAM IF EXISTS default.strings;")); + execute(connection, NANODBC_TEXT("CREATE STREAM default.strings (id uint64, str string, dt datetime DEFAULT now());")); + execute(connection, NANODBC_TEXT("INSERT INTO default.strings (* except _tp_time) SELECT number, hex(number+100000), 1 FROM system.numbers LIMIT 100;")); { - auto results = execute(connection, NANODBC_TEXT("SELECT COUNT(*) FROM test.strings;")); + auto results = execute(connection, NANODBC_TEXT("SELECT count(*) FROM default.strings where _tp_time > earliest_ts() limit 1;")); show(results); } { - auto results = execute(connection, NANODBC_TEXT("SELECT * FROM test.strings LIMIT 100;")); + auto results = execute(connection, NANODBC_TEXT("SELECT (* except _tp_time) FROM default.strings where _tp_time > earliest_ts() LIMIT 100;")); show(results); } { auto results = execute(connection, - NANODBC_TEXT("SELECT `test`.`strings`.`str` AS `platform`, SUM(`test`.`strings`.`id`) AS `sum_installs_ok` FROM " - "`test`.`strings` GROUP BY `str`")); + NANODBC_TEXT("SELECT `default`.`strings`.`str` AS `platform`, sum(`default`.`strings`.`id`) AS `sum_installs_ok` FROM " + "`default`.`strings` WHERE _tp_time > earliest_ts() GROUP BY `str` LIMIT 100;")); show(results); } - execute(connection, NANODBC_TEXT("DROP TABLE IF EXISTS test.strings;")); + execute(connection, NANODBC_TEXT("DROP STREAM IF EXISTS default.strings;")); } // Setup - execute(connection, NANODBC_TEXT("drop table if exists simple_test;")); - execute(connection, NANODBC_TEXT("create table simple_test (a int, b varchar) engine Log;")); + execute(connection, NANODBC_TEXT("drop stream if exists simple_test;")); + execute(connection, NANODBC_TEXT("create stream simple_test (a int, b varchar);")); // Direct execution { - execute(connection, NANODBC_TEXT("insert into simple_test values (1, 'one');")); - execute(connection, NANODBC_TEXT("insert into simple_test values (2, 'two');")); - execute(connection, NANODBC_TEXT("insert into simple_test values (3, 'tri');")); + execute(connection, NANODBC_TEXT("insert into simple_test (* except _tp_time) values (1, 'one');")); + execute(connection, NANODBC_TEXT("insert into simple_test (* except _tp_time) values (2, 'two');")); + execute(connection, NANODBC_TEXT("insert into simple_test (* except _tp_time) values (3, 'tri');")); execute(connection, NANODBC_TEXT("insert into simple_test (b) values ('z');")); - nanodbc::result results = execute(connection, NANODBC_TEXT("select * from simple_test;")); + nanodbc::result results + = execute(connection, NANODBC_TEXT("select (* except _tp_time) from simple_test where _tp_time > earliest_ts() limit 4;")); show(results); } // Accessing results by name, or column number { - nanodbc::result results = execute(connection, NANODBC_TEXT("select a as first, b as second from simple_test where a = 1;")); + nanodbc::result results = execute(connection, NANODBC_TEXT("select a as first, b as second from simple_test where a = 1 and _tp_time > earliest_ts() limit 1;")); results.next(); auto const value = results.get(1); cout << endl << results.get(NANODBC_TEXT("first")) << ", " << convert(value) << endl; @@ -276,7 +276,7 @@ void run_test(nanodbc::string const & connection_string) { { nanodbc::statement statement(connection); execute(connection, NANODBC_TEXT("drop table if exists batch_test;")); - execute(connection, NANODBC_TEXT("create table batch_test (x varchar, y int, z float) engine Log;")); + execute(connection, NANODBC_TEXT("create table batch_test (x varchar, y int, z float);")); prepare(statement, NANODBC_TEXT("insert into batch_test (x, x2, y, z) values (?, ?, ?, ?);")); const size_t elements = 4; @@ -304,21 +304,22 @@ void run_test(nanodbc::string const & connection_string) { // Dates and Times { - execute(connection, NANODBC_TEXT("drop table if exists date_test;")); - execute(connection, NANODBC_TEXT("create table date_test (x datetime) engine Log;")); + execute(connection, NANODBC_TEXT("drop stream if exists date_test;")); + execute(connection, NANODBC_TEXT("create stream date_test (x datetime);")); //execute(connection, NANODBC_TEXT("insert into date_test values (current_timestamp);")); - execute(connection, NANODBC_TEXT("insert into date_test values ({fn current_timestamp});")); + execute(connection, NANODBC_TEXT("insert into date_test (* except _tp_time) values ({fn current_timestamp});")); - nanodbc::result results = execute(connection, NANODBC_TEXT("select * from date_test;")); + nanodbc::result results + = execute(connection, NANODBC_TEXT("select (* except _tp_time) from date_test where _tp_time > earliest_ts() limit 1;")); results.next(); nanodbc::date date = results.get(0); cout << endl << date.year << "-" << date.month << "-" << date.day << endl; - results = execute(connection, NANODBC_TEXT("select * from date_test;")); + results = execute(connection, NANODBC_TEXT("select (* except _tp_time) from date_test where _tp_time > earliest_ts() limit 1;")); show(results); - execute(connection, NANODBC_TEXT("drop table if exists date_test;")); + execute(connection, NANODBC_TEXT("drop stream if exists date_test;")); } // Inserting NULL values with a sentry @@ -364,7 +365,7 @@ void run_test(nanodbc::string const & connection_string) { } // Cleanup - execute(connection, NANODBC_TEXT("drop table if exists simple_test;")); + execute(connection, NANODBC_TEXT("drop stream if exists simple_test;")); } void show(nanodbc::result & results) { diff --git a/driver/test/performance_it.cpp b/driver/test/performance_it.cpp index 872e3bf..c278d91 100755 --- a/driver/test/performance_it.cpp +++ b/driver/test/performance_it.cpp @@ -114,7 +114,7 @@ TEST_F(PerformanceTest, ENABLE_FOR_OPTIMIZED_BUILDS_ONLY(NoOpAPICallOverhead)) { TEST_F(PerformanceTest, ENABLE_FOR_OPTIMIZED_BUILDS_ONLY(FetchNoExtractMultiType)) { constexpr std::size_t total_rows_expected = 10'000'000; - const std::string query_orig = "SELECT CAST('some not very long text', 'String') AS col1, CAST('12345', 'Int') AS col2, CAST('12.345', 'Float32') AS col3, CAST('-123.456789012345678', 'Float64') AS col4 FROM numbers(" + std::to_string(total_rows_expected) + ")"; + const std::string query_orig = "SELECT cast('some not very long text', 'string') AS col1, cast('12345', 'int') AS col2, cast('12.345', 'float32') AS col3, cast('-123.456789012345678', 'float64') AS col4 FROM numbers(" + std::to_string(total_rows_expected) + ")"; std::cout << "Executing query:\n\t" << query_orig << std::endl; @@ -154,7 +154,7 @@ TEST_F(PerformanceTest, ENABLE_FOR_OPTIMIZED_BUILDS_ONLY(FetchNoExtractMultiType TEST_F(PerformanceTest, ENABLE_FOR_OPTIMIZED_BUILDS_ONLY(FetchGetDataMultiType)) { constexpr std::size_t total_rows_expected = 10'000'000; - const std::string query_orig = "SELECT CAST('some not very long text', 'String') AS col1, CAST('12345', 'Int') AS col2, CAST('12.345', 'Float32') AS col3, CAST('-123.456789012345678', 'Float64') AS col4 FROM numbers(" + std::to_string(total_rows_expected) + ")"; + const std::string query_orig = "SELECT cast('some not very long text', 'string') AS col1, cast('12345', 'int') AS col2, cast('12.345', 'float32') AS col3, cast('-123.456789012345678', 'float64') AS col4 FROM numbers(" + std::to_string(total_rows_expected) + ")"; std::cout << "Executing query:\n\t" << query_orig << std::endl; @@ -249,7 +249,7 @@ TEST_F(PerformanceTest, ENABLE_FOR_OPTIMIZED_BUILDS_ONLY(FetchGetDataMultiType)) TEST_F(PerformanceTest, ENABLE_FOR_OPTIMIZED_BUILDS_ONLY(FetchBindColMultiType)) { constexpr std::size_t total_rows_expected = 10'000'000; - const std::string query_orig = "SELECT CAST('some not very long text', 'String') AS col1, CAST('12345', 'Int') AS col2, CAST('12.345', 'Float32') AS col3, CAST('-123.456789012345678', 'Float64') AS col4 FROM numbers(" + std::to_string(total_rows_expected) + ")"; + const std::string query_orig = "SELECT cast('some not very long text', 'string') AS col1, cast('12345', 'int') AS col2, cast('12.345', 'float32') AS col3, cast('-123.456789012345678', 'float64') AS col4 FROM numbers(" + std::to_string(total_rows_expected) + ")"; std::cout << "Executing query:\n\t" << query_orig << std::endl; @@ -344,7 +344,7 @@ TEST_F(PerformanceTest, ENABLE_FOR_OPTIMIZED_BUILDS_ONLY(FetchBindColMultiType)) TEST_F(PerformanceTest, ENABLE_FOR_OPTIMIZED_BUILDS_ONLY(FetchBindColSingleType_ANSI_String)) { constexpr std::size_t total_rows_expected = 10'000'000; - const std::string query_orig = "SELECT CAST('some not very long text', 'String') AS col FROM numbers(" + std::to_string(total_rows_expected) + ")"; + const std::string query_orig = "SELECT cast('some not very long text', 'string') AS col FROM numbers(" + std::to_string(total_rows_expected) + ")"; std::cout << "Executing query:\n\t" << query_orig << std::endl; @@ -397,7 +397,7 @@ TEST_F(PerformanceTest, ENABLE_FOR_OPTIMIZED_BUILDS_ONLY(FetchBindColSingleType_ TEST_F(PerformanceTest, ENABLE_FOR_OPTIMIZED_BUILDS_ONLY(FetchBindColSingleType_Unicode_String)) { constexpr std::size_t total_rows_expected = 10'000'000; - const std::string query_orig = "SELECT CAST('some not very long text', 'String') AS col FROM numbers(" + std::to_string(total_rows_expected) + ")"; + const std::string query_orig = "SELECT cast('some not very long text', 'string') AS col FROM numbers(" + std::to_string(total_rows_expected) + ")"; std::cout << "Executing query:\n\t" << query_orig << std::endl; @@ -450,7 +450,7 @@ TEST_F(PerformanceTest, ENABLE_FOR_OPTIMIZED_BUILDS_ONLY(FetchBindColSingleType_ TEST_F(PerformanceTest, ENABLE_FOR_OPTIMIZED_BUILDS_ONLY(FetchBindColSingleType_Int)) { constexpr std::size_t total_rows_expected = 10'000'000; - const std::string query_orig = "SELECT CAST('12345', 'Int') AS col FROM numbers(" + std::to_string(total_rows_expected) + ")"; + const std::string query_orig = "SELECT cast('12345', 'int') AS col FROM numbers(" + std::to_string(total_rows_expected) + ")"; std::cout << "Executing query:\n\t" << query_orig << std::endl; @@ -503,7 +503,7 @@ TEST_F(PerformanceTest, ENABLE_FOR_OPTIMIZED_BUILDS_ONLY(FetchBindColSingleType_ TEST_F(PerformanceTest, ENABLE_FOR_OPTIMIZED_BUILDS_ONLY(FetchBindColSingleType_Float64)) { constexpr std::size_t total_rows_expected = 10'000'000; - const std::string query_orig = "SELECT CAST('-123.456789012345678', 'Float64') AS col FROM numbers(" + std::to_string(total_rows_expected) + ")"; + const std::string query_orig = "SELECT cast('-123.456789012345678', 'float64') AS col FROM numbers(" + std::to_string(total_rows_expected) + ")"; std::cout << "Executing query:\n\t" << query_orig << std::endl; @@ -556,7 +556,7 @@ TEST_F(PerformanceTest, ENABLE_FOR_OPTIMIZED_BUILDS_ONLY(FetchBindColSingleType_ TEST_F(PerformanceTest, ENABLE_FOR_OPTIMIZED_BUILDS_ONLY(FetchArrayBindColSingleType_Int)) { constexpr std::size_t total_rows_expected = 10'000'000; - const std::string query_orig = "SELECT CAST('12345', 'Int') AS col FROM numbers(" + std::to_string(total_rows_expected) + ")"; + const std::string query_orig = "SELECT cast('12345', 'int') AS col FROM numbers(" + std::to_string(total_rows_expected) + ")"; std::cout << "Executing query:\n\t" << query_orig << std::endl; diff --git a/driver/test/statement_parameter_bindings_it.cpp b/driver/test/statement_parameter_bindings_it.cpp index 1f31501..d1485c5 100755 --- a/driver/test/statement_parameter_bindings_it.cpp +++ b/driver/test/statement_parameter_bindings_it.cpp @@ -10,7 +10,7 @@ class StatementParameterBindingsTest }; TEST_F(StatementParameterBindingsTest, Missing) { - const auto query = fromUTF8("SELECT isNull(?)"); + const auto query = fromUTF8("SELECT is_null(?)"); auto * query_wptr = const_cast(query.c_str()); ODBC_CALL_ON_STMT_THROW(hstmt, SQLPrepare(hstmt, query_wptr, SQL_NTS)); @@ -26,14 +26,14 @@ TEST_F(StatementParameterBindingsTest, Missing) { if (!SQL_SUCCEEDED(rc)) throw std::runtime_error("SQLFetch return code: " + std::to_string(rc)); - SQLINTEGER col = 0; + SQLCHAR col[256] = {}; SQLLEN col_ind = 0; ODBC_CALL_ON_STMT_THROW(hstmt, SQLGetData( hstmt, 1, - getCTypeFor(), + SQL_C_CHAR, &col, sizeof(col), &col_ind @@ -41,13 +41,15 @@ TEST_F(StatementParameterBindingsTest, Missing) { ); ASSERT_TRUE(col_ind >= 0 || col_ind == SQL_NTS); - ASSERT_EQ(col, 1); + char * col_ptr = reinterpret_cast(col); + const auto resulting_str = std::string{col_ptr, static_cast(col_ind)}; + ASSERT_STRCASEEQ(resulting_str.c_str(), "true"); ASSERT_EQ(SQLFetch(hstmt), SQL_NO_DATA); } TEST_F(StatementParameterBindingsTest, NoBuffer) { - const auto query = fromUTF8("SELECT isNull(?)"); + const auto query = fromUTF8("SELECT is_null(?)"); auto * query_wptr = const_cast(query.c_str()); SQLINTEGER param = 0; @@ -80,14 +82,14 @@ TEST_F(StatementParameterBindingsTest, NoBuffer) { if (!SQL_SUCCEEDED(rc)) throw std::runtime_error("SQLFetch return code: " + std::to_string(rc)); - SQLINTEGER col = 0; + SQLCHAR col[256] = {}; SQLLEN col_ind = 0; ODBC_CALL_ON_STMT_THROW(hstmt, SQLGetData( hstmt, 1, - getCTypeFor(), + SQL_C_CHAR, &col, sizeof(col), &col_ind @@ -95,13 +97,15 @@ TEST_F(StatementParameterBindingsTest, NoBuffer) { ); ASSERT_TRUE(col_ind >= 0 || col_ind == SQL_NTS); - ASSERT_EQ(col, 1); + char * col_ptr = reinterpret_cast(col); + const auto resulting_str = std::string{col_ptr, static_cast(col_ind)}; + ASSERT_STRCASEEQ(resulting_str.c_str(), "true"); ASSERT_EQ(SQLFetch(hstmt), SQL_NO_DATA); } -TEST_F(StatementParameterBindingsTest, NullStringValueForInteger) { - const auto query = fromUTF8("SELECT isNull(?)"); +TEST_F(StatementParameterBindingsTest, DISABLED_NullStringValueForInteger) { + const auto query = fromUTF8("SELECT is_null(?)"); auto * query_wptr = const_cast(query.c_str()); #if defined(_IODBCUNIX_H) @@ -146,7 +150,7 @@ TEST_F(StatementParameterBindingsTest, NullStringValueForInteger) { SQLHDESC hdesc = 0; ODBC_CALL_ON_STMT_THROW(hstmt, SQLGetStmtAttr(hstmt, SQL_ATTR_IMP_PARAM_DESC, &hdesc, 0, NULL)); ODBC_CALL_ON_DESC_THROW(hdesc, SQLSetDescField(hdesc, 1, SQL_DESC_NULLABLE, reinterpret_cast(SQL_NULLABLE), 0)); - + // SQL_DESC_NULLABLE might be readonly field in microsoft odbc driver manager. ODBC_CALL_ON_STMT_THROW(hstmt, SQLExecute(hstmt)); SQLRETURN rc = SQLFetch(hstmt); @@ -159,14 +163,14 @@ TEST_F(StatementParameterBindingsTest, NullStringValueForInteger) { if (!SQL_SUCCEEDED(rc)) throw std::runtime_error("SQLFetch return code: " + std::to_string(rc)); - SQLINTEGER col = 0; + SQLCHAR col[256] = {}; SQLLEN col_ind = 0; ODBC_CALL_ON_STMT_THROW(hstmt, SQLGetData( hstmt, 1, - getCTypeFor(), + SQL_C_CHAR, &col, sizeof(col), &col_ind @@ -174,13 +178,15 @@ TEST_F(StatementParameterBindingsTest, NullStringValueForInteger) { ); ASSERT_TRUE(col_ind >= 0 || col_ind == SQL_NTS); - ASSERT_EQ(col, 1); + char * col_ptr = reinterpret_cast(col); + const auto resulting_str = std::string{col_ptr, static_cast(col_ind)}; + ASSERT_STRCASEEQ(resulting_str.c_str(), "true"); ASSERT_EQ(SQLFetch(hstmt), SQL_NO_DATA); } -TEST_F(StatementParameterBindingsTest, NullStringValueForString) { - const auto query = fromUTF8("SELECT isNull(?)"); +TEST_F(StatementParameterBindingsTest, DISABLED_NullStringValueForString) { + const auto query = fromUTF8("SELECT is_null(?)"); auto * query_wptr = const_cast(query.c_str()); #if defined(_IODBCUNIX_H) @@ -238,14 +244,14 @@ TEST_F(StatementParameterBindingsTest, NullStringValueForString) { if (!SQL_SUCCEEDED(rc)) throw std::runtime_error("SQLFetch return code: " + std::to_string(rc)); - SQLINTEGER col = 0; + SQLCHAR col[256] = {}; SQLLEN col_ind = 0; ODBC_CALL_ON_STMT_THROW(hstmt, SQLGetData( hstmt, 1, - getCTypeFor(), + SQL_C_CHAR, &col, sizeof(col), &col_ind @@ -253,7 +259,9 @@ TEST_F(StatementParameterBindingsTest, NullStringValueForString) { ); ASSERT_TRUE(col_ind >= 0 || col_ind == SQL_NTS); - ASSERT_EQ(col, 1); + char * col_ptr = reinterpret_cast(col); + const auto resulting_str = std::string{col_ptr, static_cast(col_ind)}; + ASSERT_STRCASEEQ(resulting_str.c_str(), "true"); ASSERT_EQ(SQLFetch(hstmt), SQL_NO_DATA); } diff --git a/driver/test/statement_parameters_it.cpp b/driver/test/statement_parameters_it.cpp index cfc2691..9c7acea 100755 --- a/driver/test/statement_parameters_it.cpp +++ b/driver/test/statement_parameters_it.cpp @@ -285,7 +285,7 @@ class ParameterColumnRoundTripAsymmetric using DISABLED_ParameterColumnRoundTripGUIDSymmetric = ParameterColumnRoundTripSymmetric; TEST_P(DISABLED_ParameterColumnRoundTripGUIDSymmetric, Execute) { - execute(GetParam(), GetParam(), type_info_for("UUID"), false/* case_sensitive */); + execute(GetParam(), GetParam(), type_info_for("uuid"), false/* case_sensitive */); } INSTANTIATE_TEST_SUITE_P(TypeConversion, DISABLED_ParameterColumnRoundTripGUIDSymmetric, @@ -301,7 +301,7 @@ INSTANTIATE_TEST_SUITE_P(TypeConversion, DISABLED_ParameterColumnRoundTripGUIDSy using ParameterColumnRoundTripNumericSymmetric = ParameterColumnRoundTripSymmetric; TEST_P(ParameterColumnRoundTripNumericSymmetric, Execute) { - execute(GetParam(), GetParam(), type_info_for("Decimal")); + execute(GetParam(), GetParam(), type_info_for("decimal")); } INSTANTIATE_TEST_SUITE_P(TypeConversion, ParameterColumnRoundTripNumericSymmetric, @@ -328,7 +328,7 @@ INSTANTIATE_TEST_SUITE_P(TypeConversion, ParameterColumnRoundTripNumericSymmetri using ParameterColumnRoundTripNumericAsymmetric = ParameterColumnRoundTripAsymmetric; TEST_P(ParameterColumnRoundTripNumericAsymmetric, Execute) { - execute(std::get<0>(GetParam()), std::get<1>(GetParam()), type_info_for("Decimal")); + execute(std::get<0>(GetParam()), std::get<1>(GetParam()), type_info_for("decimal")); } INSTANTIATE_TEST_SUITE_P(TypeConversion, ParameterColumnRoundTripNumericAsymmetric, @@ -361,8 +361,8 @@ INSTANTIATE_TEST_SUITE_P(TypeConversion, ParameterColumnRoundTripDecimalAsString "-12345", "12345.6789", "-12345.6789", - "12345.000000000000", - "12345.001002003000", + "12345", + "12345.001002003", "100000000000000000", "-100000000000000000", "1.00000000000000001", diff --git a/driver/utils/type_info.cpp b/driver/utils/type_info.cpp index 66448bd..a045973 100644 --- a/driver/utils/type_info.cpp +++ b/driver/utils/type_info.cpp @@ -5,54 +5,56 @@ #include const std::map types_g = { - {"UInt8", TypeInfo {"TINYINT", true, SQL_TINYINT, 3, 1}}, - {"UInt16", TypeInfo {"SMALLINT", true, SQL_SMALLINT, 5, 2}}, - {"UInt32", TypeInfo {"INT", true, SQL_BIGINT /* was SQL_INTEGER */, 10, 4}}, // With perl, python ODBC drivers INT is uint32 and it cant store values bigger than 2147483647: 2147483648 -> -2147483648 4294967295 -> -1 - {"UInt32", TypeInfo {"INT", true, SQL_INTEGER, 10, 4}}, - {"UInt64", TypeInfo {"BIGINT", true, SQL_BIGINT, 20, 8}}, - {"Int8", TypeInfo {"TINYINT", false, SQL_TINYINT, 1 + 3, 1}}, // one char for sign - {"Int16", TypeInfo {"SMALLINT", false, SQL_SMALLINT, 1 + 5, 2}}, - {"Int32", TypeInfo {"INT", false, SQL_INTEGER, 1 + 10, 4}}, - {"Int64", TypeInfo {"BIGINT", false, SQL_BIGINT, 1 + 19, 8}}, - {"Float32", TypeInfo {"REAL", false, SQL_REAL, 7, 4}}, - {"Float64", TypeInfo {"DOUBLE", false, SQL_DOUBLE, 15, 8}}, - {"Decimal", TypeInfo {"DECIMAL", false, SQL_DECIMAL, 1 + 2 + 38, 16}}, // -0. - {"UUID", TypeInfo {"GUID", false, SQL_GUID, 8 + 1 + 4 + 1 + 4 + 1 + 4 + 12, sizeof(SQLGUID)}}, - {"String", TypeInfo {"TEXT", true, SQL_VARCHAR, TypeInfo::string_max_size, TypeInfo::string_max_size}}, - {"FixedString", TypeInfo {"TEXT", true, SQL_VARCHAR, TypeInfo::string_max_size, TypeInfo::string_max_size}}, - {"Date", TypeInfo {"DATE", true, SQL_TYPE_DATE, 10, 6}}, - {"DateTime", TypeInfo {"TIMESTAMP", true, SQL_TYPE_TIMESTAMP, 19, 16}}, - {"DateTime64", TypeInfo {"TIMESTAMP", true, SQL_TYPE_TIMESTAMP, 29, 16}}, - {"Array", TypeInfo {"TEXT", true, SQL_VARCHAR, TypeInfo::string_max_size, TypeInfo::string_max_size}}, - {"Nothing", TypeInfo {"NULL", true, SQL_TYPE_NULL, 1, 1}}, + {"uint8", TypeInfo {"TINYINT", true, SQL_TINYINT, 3, 1}}, + {"uint16", TypeInfo {"SMALLINT", true, SQL_SMALLINT, 5, 2}}, + {"uint32", TypeInfo {"INT", true, SQL_BIGINT /* was SQL_INTEGER */, 10, 4}}, // With perl, python ODBC drivers INT is uint32 and it cant store values bigger than 2147483647: 2147483648 -> -2147483648 4294967295 -> -1 + {"uint32", TypeInfo {"INT", true, SQL_INTEGER, 10, 4}}, + {"uint64", TypeInfo {"BIGINT", true, SQL_BIGINT, 20, 8}}, + {"int8", TypeInfo {"TINYINT", false, SQL_TINYINT, 1 + 3, 1}}, // one char for sign + {"int16", TypeInfo {"SMALLINT", false, SQL_SMALLINT, 1 + 5, 2}}, + {"int32", TypeInfo {"INT", false, SQL_INTEGER, 1 + 10, 4}}, + {"int64", TypeInfo {"BIGINT", false, SQL_BIGINT, 1 + 19, 8}}, + {"float32", TypeInfo {"REAL", false, SQL_REAL, 7, 4}}, + {"float64", TypeInfo {"DOUBLE", false, SQL_DOUBLE, 15, 8}}, + {"decimal", TypeInfo {"DECIMAL", false, SQL_DECIMAL, 1 + 2 + 38, 16}}, // -0. + {"uuid", TypeInfo {"GUID", false, SQL_GUID, 8 + 1 + 4 + 1 + 4 + 1 + 4 + 12, sizeof(SQLGUID)}}, + {"string", TypeInfo {"TEXT", true, SQL_VARCHAR, TypeInfo::string_max_size, TypeInfo::string_max_size}}, + {"fixed_string", TypeInfo {"TEXT", true, SQL_VARCHAR, TypeInfo::string_max_size, TypeInfo::string_max_size}}, + {"date", TypeInfo {"DATE", true, SQL_TYPE_DATE, 10, 6}}, + {"date32", TypeInfo {"DATE", true, SQL_TYPE_DATE, 10, 6}}, + {"datetime", TypeInfo {"TIMESTAMP", true, SQL_TYPE_TIMESTAMP, 19, 16}}, + {"datetime64", TypeInfo {"TIMESTAMP", true, SQL_TYPE_TIMESTAMP, 29, 16}}, + {"array", TypeInfo {"TEXT", true, SQL_VARCHAR, TypeInfo::string_max_size, TypeInfo::string_max_size}}, + {"nothing", TypeInfo {"NULL", true, SQL_TYPE_NULL, 1, 1}}, // TODO: remove these. - {"LowCardinality(String)", TypeInfo {"TEXT", true, SQL_VARCHAR, TypeInfo::string_max_size, TypeInfo::string_max_size}}, - {"LowCardinality(FixedString)", TypeInfo {"TEXT", true, SQL_VARCHAR, TypeInfo::string_max_size, TypeInfo::string_max_size}} + {"low_cardinality(string)", TypeInfo {"TEXT", true, SQL_VARCHAR, TypeInfo::string_max_size, TypeInfo::string_max_size}}, + {"low_cardinality(fixed_string)", TypeInfo {"TEXT", true, SQL_VARCHAR, TypeInfo::string_max_size, TypeInfo::string_max_size}} }; DataSourceTypeId convertUnparametrizedTypeNameToTypeId(const std::string & type_name) { - if (Poco::icompare(type_name, "Date") == 0) return DataSourceTypeId::Date; - else if (Poco::icompare(type_name, "DateTime") == 0) return DataSourceTypeId::DateTime; - else if (Poco::icompare(type_name, "DateTime64") == 0) return DataSourceTypeId::DateTime64; - else if (Poco::icompare(type_name, "Decimal") == 0) return DataSourceTypeId::Decimal; - else if (Poco::icompare(type_name, "Decimal32") == 0) return DataSourceTypeId::Decimal32; - else if (Poco::icompare(type_name, "Decimal64") == 0) return DataSourceTypeId::Decimal64; - else if (Poco::icompare(type_name, "Decimal128") == 0) return DataSourceTypeId::Decimal128; - else if (Poco::icompare(type_name, "FixedString") == 0) return DataSourceTypeId::FixedString; - else if (Poco::icompare(type_name, "Float32") == 0) return DataSourceTypeId::Float32; - else if (Poco::icompare(type_name, "Float64") == 0) return DataSourceTypeId::Float64; - else if (Poco::icompare(type_name, "Int8") == 0) return DataSourceTypeId::Int8; - else if (Poco::icompare(type_name, "Int16") == 0) return DataSourceTypeId::Int16; - else if (Poco::icompare(type_name, "Int32") == 0) return DataSourceTypeId::Int32; - else if (Poco::icompare(type_name, "Int64") == 0) return DataSourceTypeId::Int64; - else if (Poco::icompare(type_name, "Nothing") == 0) return DataSourceTypeId::Nothing; - else if (Poco::icompare(type_name, "String") == 0) return DataSourceTypeId::String; - else if (Poco::icompare(type_name, "UInt8") == 0) return DataSourceTypeId::UInt8; - else if (Poco::icompare(type_name, "UInt16") == 0) return DataSourceTypeId::UInt16; - else if (Poco::icompare(type_name, "UInt32") == 0) return DataSourceTypeId::UInt32; - else if (Poco::icompare(type_name, "UInt64") == 0) return DataSourceTypeId::UInt64; - else if (Poco::icompare(type_name, "UUID") == 0) return DataSourceTypeId::UUID; + if (Poco::icompare(type_name, "date") == 0) return DataSourceTypeId::Date; + else if (Poco::icompare(type_name, "date32") == 0) return DataSourceTypeId::Date; + else if (Poco::icompare(type_name, "datetime") == 0) return DataSourceTypeId::DateTime; + else if (Poco::icompare(type_name, "datetime64") == 0) return DataSourceTypeId::DateTime64; + else if (Poco::icompare(type_name, "decimal") == 0) return DataSourceTypeId::Decimal; + else if (Poco::icompare(type_name, "decimal32") == 0) return DataSourceTypeId::Decimal32; + else if (Poco::icompare(type_name, "decimal64") == 0) return DataSourceTypeId::Decimal64; + else if (Poco::icompare(type_name, "decimal128") == 0) return DataSourceTypeId::Decimal128; + else if (Poco::icompare(type_name, "fixed_string") == 0) return DataSourceTypeId::FixedString; + else if (Poco::icompare(type_name, "float32") == 0) return DataSourceTypeId::Float32; + else if (Poco::icompare(type_name, "float64") == 0) return DataSourceTypeId::Float64; + else if (Poco::icompare(type_name, "int8") == 0) return DataSourceTypeId::Int8; + else if (Poco::icompare(type_name, "int16") == 0) return DataSourceTypeId::Int16; + else if (Poco::icompare(type_name, "int32") == 0) return DataSourceTypeId::Int32; + else if (Poco::icompare(type_name, "int64") == 0) return DataSourceTypeId::Int64; + else if (Poco::icompare(type_name, "nothing") == 0) return DataSourceTypeId::Nothing; + else if (Poco::icompare(type_name, "string") == 0) return DataSourceTypeId::String; + else if (Poco::icompare(type_name, "uint8") == 0) return DataSourceTypeId::UInt8; + else if (Poco::icompare(type_name, "uint16") == 0) return DataSourceTypeId::UInt16; + else if (Poco::icompare(type_name, "uint32") == 0) return DataSourceTypeId::UInt32; + else if (Poco::icompare(type_name, "uint64") == 0) return DataSourceTypeId::UInt64; + else if (Poco::icompare(type_name, "uuid") == 0) return DataSourceTypeId::UUID; else if (Poco::icompare(type_name, "TINYINT") == 0) return DataSourceTypeId::Int8; else if (Poco::icompare(type_name, "SMALLINT") == 0) return DataSourceTypeId::Int16; @@ -70,27 +72,27 @@ DataSourceTypeId convertUnparametrizedTypeNameToTypeId(const std::string & type_ std::string convertTypeIdToUnparametrizedCanonicalTypeName(DataSourceTypeId type_id) { switch (type_id) { - case DataSourceTypeId::Date: return "Date"; - case DataSourceTypeId::DateTime: return "DateTime"; - case DataSourceTypeId::DateTime64: return "DateTime64"; - case DataSourceTypeId::Decimal: return "Decimal"; - case DataSourceTypeId::Decimal32: return "Decimal32"; - case DataSourceTypeId::Decimal64: return "Decimal64"; - case DataSourceTypeId::Decimal128: return "Decimal128"; - case DataSourceTypeId::FixedString: return "FixedString"; - case DataSourceTypeId::Float32: return "Float32"; - case DataSourceTypeId::Float64: return "Float64"; - case DataSourceTypeId::Int8: return "Int8"; - case DataSourceTypeId::Int16: return "Int16"; - case DataSourceTypeId::Int32: return "Int32"; - case DataSourceTypeId::Int64: return "Int64"; - case DataSourceTypeId::Nothing: return "Nothing"; - case DataSourceTypeId::String: return "String"; - case DataSourceTypeId::UInt8: return "UInt8"; - case DataSourceTypeId::UInt16: return "UInt16"; - case DataSourceTypeId::UInt32: return "UInt32"; - case DataSourceTypeId::UInt64: return "UInt64"; - case DataSourceTypeId::UUID: return "UUID"; + case DataSourceTypeId::Date: return "date"; + case DataSourceTypeId::DateTime: return "datetime"; + case DataSourceTypeId::DateTime64: return "datetime64"; + case DataSourceTypeId::Decimal: return "decimal"; + case DataSourceTypeId::Decimal32: return "decimal32"; + case DataSourceTypeId::Decimal64: return "decimal64"; + case DataSourceTypeId::Decimal128: return "decimal128"; + case DataSourceTypeId::FixedString: return "fixed_string"; + case DataSourceTypeId::Float32: return "float32"; + case DataSourceTypeId::Float64: return "float64"; + case DataSourceTypeId::Int8: return "int8"; + case DataSourceTypeId::Int16: return "int16"; + case DataSourceTypeId::Int32: return "int32"; + case DataSourceTypeId::Int64: return "int64"; + case DataSourceTypeId::Nothing: return "nothing"; + case DataSourceTypeId::String: return "string"; + case DataSourceTypeId::UInt8: return "uint8"; + case DataSourceTypeId::UInt16: return "uint16"; + case DataSourceTypeId::UInt32: return "uint32"; + case DataSourceTypeId::UInt64: return "uint64"; + case DataSourceTypeId::UUID: return "uuid"; default: throw std::runtime_error("unknown type id"); @@ -327,7 +329,7 @@ bool isStreamParam(SQLSMALLINT param_io_type) noexcept { std::string convertCTypeToDataSourceType(const BoundTypeInfo & type_info) { const auto set_nullability = [is_nullable = type_info.is_nullable] (const std::string & type_name) { - return (is_nullable ? "Nullable(" + type_name + ")" : type_name); + return (is_nullable ? "nullable(" + type_name + ")" : type_name); }; std::string type_name; @@ -335,66 +337,66 @@ std::string convertCTypeToDataSourceType(const BoundTypeInfo & type_info) { switch (type_info.c_type) { case SQL_C_WCHAR: case SQL_C_CHAR: - type_name = set_nullability("String"); + type_name = set_nullability("string"); break; case SQL_C_BIT: - type_name = set_nullability("UInt8"); + type_name = set_nullability("uint8"); break; case SQL_C_TINYINT: case SQL_C_STINYINT: - type_name = set_nullability("Int8"); + type_name = set_nullability("int8"); break; case SQL_C_UTINYINT: - type_name = set_nullability("UInt8"); + type_name = set_nullability("uint8"); break; case SQL_C_SHORT: case SQL_C_SSHORT: - type_name = set_nullability("Int16"); + type_name = set_nullability("int16"); break; case SQL_C_USHORT: - type_name = set_nullability("UInt16"); + type_name = set_nullability("uint16"); break; case SQL_C_LONG: case SQL_C_SLONG: - type_name = set_nullability("Int32"); + type_name = set_nullability("int32"); break; case SQL_C_ULONG: - type_name = set_nullability("UInt32"); + type_name = set_nullability("uint32"); break; case SQL_C_SBIGINT: - type_name = set_nullability("Int64"); + type_name = set_nullability("int64"); break; case SQL_C_UBIGINT: - type_name = set_nullability("UInt64"); + type_name = set_nullability("uint64"); break; case SQL_C_FLOAT: - type_name = set_nullability("Float32"); + type_name = set_nullability("float32"); break; case SQL_C_DOUBLE: - type_name = set_nullability("Float64"); + type_name = set_nullability("float64"); break; case SQL_C_NUMERIC: - type_name = set_nullability("Decimal(" + std::to_string(type_info.precision) + ", " + std::to_string(type_info.scale) + ")"); + type_name = set_nullability("decimal(" + std::to_string(type_info.precision) + ", " + std::to_string(type_info.scale) + ")"); break; case SQL_C_BINARY: - type_name = set_nullability(type_info.value_max_size > 0 ? ("FixedString(" + std::to_string(type_info.value_max_size) + ")") : "String"); + type_name = set_nullability(type_info.value_max_size > 0 ? ("fixed_string(" + std::to_string(type_info.value_max_size) + ")") : "string"); break; case SQL_C_GUID: - type_name = set_nullability("UUID"); + type_name = set_nullability("uuid"); break; // case SQL_C_BOOKMARK: @@ -402,17 +404,17 @@ std::string convertCTypeToDataSourceType(const BoundTypeInfo & type_info) { case SQL_C_DATE: case SQL_C_TYPE_DATE: - type_name = set_nullability("Date"); + type_name = set_nullability("date"); break; case SQL_C_TIME: case SQL_C_TYPE_TIME: - type_name = "LowCardinality(" + set_nullability("String") + ")"; + type_name = "low_cardinality(" + set_nullability("string") + ")"; break; case SQL_C_TIMESTAMP: case SQL_C_TYPE_TIMESTAMP: - type_name = set_nullability("DateTime"); + type_name = set_nullability("datetime"); break; case SQL_C_INTERVAL_YEAR: @@ -428,7 +430,7 @@ std::string convertCTypeToDataSourceType(const BoundTypeInfo & type_info) { case SQL_C_INTERVAL_HOUR_TO_MINUTE: case SQL_C_INTERVAL_HOUR_TO_SECOND: case SQL_C_INTERVAL_MINUTE_TO_SECOND: - type_name = "LowCardinality(" + set_nullability("String") + ")"; + type_name = "low_cardinality(" + set_nullability("string") + ")"; break; } @@ -440,91 +442,91 @@ std::string convertCTypeToDataSourceType(const BoundTypeInfo & type_info) { std::string convertSQLTypeToDataSourceType(const BoundTypeInfo & type_info) { const auto set_nullability = [is_nullable = type_info.is_nullable] (const std::string & type_name) { - return (is_nullable ? "Nullable(" + type_name + ")" : type_name); + return (is_nullable ? "nullable(" + type_name + ")" : type_name); }; std::string type_name; switch (type_info.sql_type) { case SQL_TYPE_NULL: - type_name = set_nullability("Nothing"); + type_name = set_nullability("nothing"); break; case SQL_WCHAR: case SQL_CHAR: - type_name = set_nullability("String"); + type_name = set_nullability("string"); break; case SQL_WVARCHAR: case SQL_VARCHAR: - type_name = "LowCardinality(" + set_nullability("String") + ")"; + type_name = "low_cardinality(" + set_nullability("string") + ")"; break; case SQL_WLONGVARCHAR: case SQL_LONGVARCHAR: - type_name = set_nullability("String"); + type_name = set_nullability("string"); break; case SQL_BIT: - type_name = set_nullability("UInt8"); + type_name = set_nullability("uint8"); break; case SQL_TINYINT: - type_name = set_nullability("Int8"); + type_name = set_nullability("int8"); break; case SQL_SMALLINT: - type_name = set_nullability("Int16"); + type_name = set_nullability("int16"); break; case SQL_INTEGER: - type_name = set_nullability("Int32"); + type_name = set_nullability("int32"); break; case SQL_BIGINT: - type_name = set_nullability("Int64"); + type_name = set_nullability("int64"); break; case SQL_REAL: - type_name = set_nullability("Float32"); + type_name = set_nullability("float32"); break; case SQL_FLOAT: case SQL_DOUBLE: - type_name = set_nullability("Float64"); + type_name = set_nullability("float64"); break; case SQL_DECIMAL: case SQL_NUMERIC: - type_name = set_nullability("Decimal(" + std::to_string(type_info.precision) + ", " + std::to_string(type_info.scale) + ")"); + type_name = set_nullability("decimal(" + std::to_string(type_info.precision) + ", " + std::to_string(type_info.scale) + ")"); break; case SQL_BINARY: - type_name = set_nullability(type_info.value_max_size > 0 ? ("FixedString(" + std::to_string(type_info.value_max_size) + ")") : "String"); + type_name = set_nullability(type_info.value_max_size > 0 ? ("fixed_string(" + std::to_string(type_info.value_max_size) + ")") : "string"); break; case SQL_VARBINARY: - type_name = "LowCardinality(" + set_nullability("String") + ")"; + type_name = "low_cardinality(" + set_nullability("string") + ")"; break; case SQL_LONGVARBINARY: - type_name = set_nullability("String"); + type_name = set_nullability("string"); break; case SQL_GUID: - type_name = set_nullability("UUID"); + type_name = set_nullability("uuid"); break; case SQL_TYPE_DATE: - type_name = set_nullability("Date"); + type_name = set_nullability("date"); break; case SQL_TYPE_TIME: - type_name = "LowCardinality(" + set_nullability("String") + ")"; + type_name = "low_cardinality(" + set_nullability("string") + ")"; break; case SQL_TYPE_TIMESTAMP: - type_name = set_nullability("DateTime"); + type_name = set_nullability("datetime"); break; case SQL_INTERVAL_MONTH: @@ -540,7 +542,7 @@ std::string convertSQLTypeToDataSourceType(const BoundTypeInfo & type_info) { case SQL_INTERVAL_HOUR_TO_MINUTE: case SQL_INTERVAL_HOUR_TO_SECOND: case SQL_INTERVAL_MINUTE_TO_SECOND: - type_name = "LowCardinality(" + set_nullability("String") + ")"; + type_name = "low_cardinality(" + set_nullability("string") + ")"; break; } diff --git a/driver/utils/type_parser.cpp b/driver/utils/type_parser.cpp index 08bf5b4..ec4e50d 100644 --- a/driver/utils/type_parser.cpp +++ b/driver/utils/type_parser.cpp @@ -4,23 +4,23 @@ #include static TypeAst::Meta getTypeMeta(const std::string & name) { - if (name == "Array") { + if (name == "array") { return TypeAst::Array; } - if (name == "Null") { + if (name == "null") { return TypeAst::Null; } - if (name == "Nullable") { + if (name == "nullable") { return TypeAst::Nullable; } - if (name == "Tuple") { + if (name == "tuple") { return TypeAst::Tuple; } - if (name == "LowCardinality") { + if (name == "low_cardinality") { return TypeAst::LowCardinality; } diff --git a/packaging/RegConfig.patch.wxs b/packaging/RegConfig.patch.wxs index 2120ac9..c56e57c 100644 --- a/packaging/RegConfig.patch.wxs +++ b/packaging/RegConfig.patch.wxs @@ -16,7 +16,7 @@ - + @@ -39,7 +39,7 @@ - + diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 12a59c5..96b2787 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -152,5 +152,6 @@ endforeach () if(SQLCMD) # MS SQL server need change server in file: - add_test(NAME "sqlcmd" COMMAND ${SQLCMD} -i ${CMAKE_CURRENT_SOURCE_DIR}/mssql.linked.server.sql) + string(REPLACE "/" "\\" CMAKE_CURRENT_SOURCE_DIR_FOR_WINDOWS ${CMAKE_CURRENT_SOURCE_DIR}) + add_test(NAME "sqlcmd" COMMAND ${SQLCMD} -i ${CMAKE_CURRENT_SOURCE_DIR_FOR_WINDOWS}\\mssql.linked.server.sql) endif() diff --git a/test/mssql.linked.server.sql b/test/mssql.linked.server.sql index 0b79632..7b6c031 100644 --- a/test/mssql.linked.server.sql +++ b/test/mssql.linked.server.sql @@ -6,24 +6,42 @@ EXEC master.dbo.sp_addlinkedserver @server = N'clickhouse_link_test' ,@srvproduct=N'Clickhouse' ,@provider=N'MSDASQL' - ,@provstr=N'Driver={ClickHouse ODBC Driver (Unicode)};SERVER=localhost;DATABASE=system;stringmaxlength=8000;' - + ,@provstr=N'Driver={ClickHouse ODBC Driver (Unicode)};Url=http://example:3218;Database=default;Uid=default;Pwd=;stringmaxlength=1500;' +go EXEC sp_serveroption 'clickhouse_link_test','rpc','true'; EXEC sp_serveroption 'clickhouse_link_test','rpc out','true'; - +go EXEC('select * from system.numbers limit 10;') at [clickhouse_link_test]; +go select count(*) as cnt from OPENQUERY(clickhouse_link_test, 'select * from system.numbers limit 10;') - +go EXEC('select ''Just string''') at [clickhouse_link_test]; +go EXEC('select name from system.databases;') at [clickhouse_link_test]; +go EXEC('select * from system.build_options;') at [clickhouse_link_test]; +go - -exec('CREATE TABLE IF NOT EXISTS default.fixedstring ( xx FixedString(100)) ENGINE = Memory;') at [clickhouse_link_test]; -exec(N'INSERT INTO default.fixedstring VALUES (''a''), (''abcdefg''), (''абвгдеёжзийклмнопрстуфх'');') at [clickhouse_link_test]; +exec('CREATE STREAM IF NOT EXISTS default.fixedstring ( xx fixed_string(100))') at [clickhouse_link_test]; +go +exec(N'INSERT INTO default.fixedstring (* except _tp_time) VALUES (''a''), (''abcdefg''), (''абвгдеёжзийклмнопрстуфх'');') at [clickhouse_link_test]; +go --exec('INSERT INTO test.fixedstring VALUES (''a''),(''abcdefg'');') at [clickhouse_link_test]; -exec('select xx as x from default.fixedstring;') at [clickhouse_link_test]; -exec('DROP TABLE default.fixedstring;') at [clickhouse_link_test]; - +--go +exec('select xx as x from default.fixedstring where _tp_time > earliest_ts() limit 3;') at [clickhouse_link_test]; +go +exec('DROP STREAM default.fixedstring;') at [clickhouse_link_test]; +go exec('SELECT -127,-128,-129,126,127,128,255,256,257,-32767,-32768,-32769,32766,32767,32768,65535,65536,65537,-2147483647,-2147483648,-2147483649,2147483646,2147483647,2147483648,4294967295,4294967296,4294967297,-9223372036854775807,-9223372036854775808,-9223372036854775809,9223372036854775806,9223372036854775807,9223372036854775808,18446744073709551615,18446744073709551616,18446744073709551617;') at [clickhouse_link_test]; +go exec('SELECT *, (CASE WHEN (number == 1) THEN ''o'' WHEN (number == 2) THEN ''two long string'' WHEN (number == 3) THEN ''r'' WHEN (number == 4) THEN NULL ELSE ''-'' END) FROM system.numbers LIMIT 6') at [clickhouse_link_test]; +go + +exec('CREATE STREAM IF NOT EXISTS default.number (a int64, b float64)') at [clickhouse_link_test]; +go +exec(N'INSERT INTO default.number (* except _tp_time) VALUES (1000, 1.1), (1200, 100.19), (-1000, -99.1);') at [clickhouse_link_test]; +go +exec('select (* except _tp_time) from default.number where _tp_time > earliest_ts() limit 3;') at [clickhouse_link_test]; +go +exec('DROP STREAM default.number;') at [clickhouse_link_test]; +go diff --git a/test/parameterized/parameterized/datatypes.py b/test/parameterized/parameterized/datatypes.py index cc698e4..520e4cf 100755 --- a/test/parameterized/parameterized/datatypes.py +++ b/test/parameterized/parameterized/datatypes.py @@ -22,7 +22,7 @@ def check_datatype(connection, datatype, values, nullable=False, quote=False, re expected = dict() if nullable: - datatype = f"Nullable({datatype})" + datatype = f"nullable({datatype})" values.append(NULL) if expected: @@ -36,20 +36,20 @@ def check_datatype(connection, datatype, values, nullable=False, quote=False, re """, format_description=False): with Given(f"table with a column of data type {datatype}"): - connection.query("DROP TABLE IF EXISTS ps", fetch=False) - connection.query(f"CREATE TABLE ps (v {datatype}) ENGINE = Memory", fetch=False) + connection.query("DROP STREAM IF EXISTS ps", fetch=False) + connection.query(f"CREATE STREAM ps (v {datatype})", fetch=False) try: connection.connection.setencoding(encoding=encoding) for v in values: with When(f"I insert value {repr(v)}", flags=TE, format_name=False): # connection.query("INSERT INTO ps VALUES (?)", [v], fetch=False) if quote: - connection.query(f"INSERT INTO ps VALUES ('{repr(v)}')", fetch=False) + connection.query(f"INSERT INTO ps (* except _tp_time) VALUES ('{repr(v)}')", fetch=False) else: - connection.query(f"INSERT INTO ps VALUES ({repr(v)})", fetch=False) + connection.query(f"INSERT INTO ps (* except _tp_time) VALUES ({repr(v)})", fetch=False) with When("I select all values", flags=TE): - rows = connection.query("SELECT * FROM ps ORDER BY v") + rows = connection.query(f"SELECT (* except _tp_time) FROM ps ORDER BY v WHERE _tp_time > earliest_ts() LIMIT{len(values)}") if expected.get("all") is not None: with Then(f"the result is {expected.get('all')}", flags=TE, format_name=False): assert repr(rows) == expected.get("all"), error("result did not match") @@ -60,13 +60,13 @@ def check_datatype(connection, datatype, values, nullable=False, quote=False, re # comparing to NULL is not valid in SQL continue with When(f"I select value {repr(v)}", flags=TE, format_name=False): - rows = connection.query("SELECT * FROM ps WHERE v = ? ORDER BY v", [v]) + rows = connection.query("SELECT (* except _tp_time) FROM ps WHERE v = ? ORDER BY v", [v]) if expected.get(v) is not None: with Then(f"the result is {repr(expected.get(v))}", flags=TE, format_name=False): assert repr(rows) == expected.get(v), error("result did not match") finally: connection.connection.setencoding(encoding=connection.encoding) - connection.query("DROP TABLE ps", fetch=False) + connection.query("DROP STREAM ps", fetch=False) @TestScenario def sanity_check(self, connection): @@ -84,7 +84,7 @@ def sanity_check(self, connection): @Requirements(RQ_SRS_003_ParameterizedQueries_DataType_Select_Int8("1.0")) def Int8(self, connection, nullable=False): """Verify support for Int8 data type.""" - check_datatype(connection, "Int8", [-128, 0, 127], expected={ + check_datatype(connection, "int8", [-128, 0, 127], expected={ "all": "[(-128, ), (0, ), (127, )]", -128: "[(-128, )]", 0: "[(0, )]", @@ -95,7 +95,7 @@ def Int8(self, connection, nullable=False): @Requirements(RQ_SRS_003_ParameterizedQueries_DataType_Select_Int16("1.0")) def Int16(self, connection, nullable=False): """Verify support for Int16 data type.""" - check_datatype(connection, "Int16", [-32768, 0, 32767], expected={ + check_datatype(connection, "int16", [-32768, 0, 32767], expected={ "all": "[(-32768, ), (0, ), (32767, )]", -32768: "[(-32768, )]", 0: "[(0, )]", @@ -106,7 +106,7 @@ def Int16(self, connection, nullable=False): @Requirements(RQ_SRS_003_ParameterizedQueries_DataType_Select_Int32("1.0")) def Int32(self, connection, nullable=False): """Verify support for Int32 data type.""" - check_datatype(connection, "Int32", [-2147483648, 0, 2147483647], expected={ + check_datatype(connection, "int32", [-2147483648, 0, 2147483647], expected={ "all": "[(-2147483648, ), (0, ), (2147483647, )]", -2147483648: "[(-2147483648, )]", 0: "[(0, )]", @@ -117,7 +117,7 @@ def Int32(self, connection, nullable=False): @Requirements(RQ_SRS_003_ParameterizedQueries_DataType_Select_Int64("1.0")) def Int64(self, connection, nullable=False): """Verify support for Int64 data type.""" - check_datatype(connection, "Int64", [-9223372036854775808, 0, 9223372036854775807], expected={ + check_datatype(connection, "int64", [-9223372036854775808, 0, 9223372036854775807], expected={ "all": "[(-9223372036854775808, ), (0, ), (9223372036854775807, )]", -9223372036854775808: "[(-9223372036854775808, )]", 0: "[(0, )]", @@ -128,7 +128,7 @@ def Int64(self, connection, nullable=False): @Requirements(RQ_SRS_003_ParameterizedQueries_DataType_Select_UInt8("1.0")) def UInt8(self, connection, nullable=False): """Verify support for UInt8 data type.""" - check_datatype(connection, "UInt8", [0, 255], expected={ + check_datatype(connection, "uint8", [0, 255], expected={ "all": "[(0, ), (255, )]", 0: "[(0, )]", 255: "[(255, )]" @@ -138,7 +138,7 @@ def UInt8(self, connection, nullable=False): @Requirements(RQ_SRS_003_ParameterizedQueries_DataType_Select_UInt16("1.0")) def UInt16(self, connection, nullable=False): """Verify support for UInt16 data type.""" - check_datatype(connection, "UInt16", [0, 65535], expected={ + check_datatype(connection, "uint16", [0, 65535], expected={ "all": "[(0, ), (65535, )]", 0: "[(0, )]", 65535: "[(65535, )]" @@ -148,7 +148,7 @@ def UInt16(self, connection, nullable=False): @Requirements(RQ_SRS_003_ParameterizedQueries_DataType_Select_UInt32("1.0")) def UInt32(self, connection, nullable=False): """Verify support for UInt32 data type.""" - check_datatype(connection, "UInt32", [0, 4294967295], expected={ + check_datatype(connection, "uint32", [0, 4294967295], expected={ "all": "[(0, ), (4294967295, )]", 0: "[(0, )]", 4294967295: "[(4294967295, )]" @@ -158,7 +158,7 @@ def UInt32(self, connection, nullable=False): @Requirements(RQ_SRS_003_ParameterizedQueries_DataType_Select_UInt64("1.0")) def UInt64(self, connection, nullable=False): """Verify support for UInt64 data type.""" - check_datatype(connection, "UInt64", [0, 18446744073709551615], expected={ + check_datatype(connection, "uint64", [0, 18446744073709551615], expected={ "all": "[(0, ), (18446744073709551615, )]", 0: "[(0, )]", 18446744073709551615: "[(18446744073709551615, )]" @@ -172,7 +172,7 @@ def UInt64(self, connection, nullable=False): ) def Float32(self, connection, nullable=False): """Verify support for Float32 data type.""" - check_datatype(connection, "Float32", [-1, 0, float("inf"), float("-inf"), float("nan"), 13.26], expected={ + check_datatype(connection, "float32", [-1, 0, float("inf"), float("-inf"), float("nan"), 13.26], expected={ "all": "[(-inf, ), (-1.0, ), (0.0, ), (13.26, ), (inf, ), (nan, )]", 0: "[(0.0, )]", -1: "[(-1.0, )]", @@ -190,7 +190,7 @@ def Float32(self, connection, nullable=False): ) def Float64(self, connection, nullable=False): """Verify support for Float64 data type.""" - check_datatype(connection, "Float64", [-1, 0, float("inf"), 13.26, float("-inf"), float("nan")], expected={ + check_datatype(connection, "float64", [-1, 0, float("inf"), 13.26, float("-inf"), float("nan")], expected={ "all": "[(-inf, ), (-1.0, ), (0.0, ), (13.26, ), (inf, ), (nan, )]", 0: "[(0.0, )]", -1: "[(-1.0, )]", @@ -211,7 +211,7 @@ def Decimal32(self, connection, nullable=False): decimal.Decimal('99999.9999'): "[(Decimal('99999.9999'), )]" } - check_datatype(connection, "Decimal32(4)", [ + check_datatype(connection, "decimal32(4)", [ decimal.Decimal('-99999.9999'), decimal.Decimal('10.1234'), decimal.Decimal('99999.9999') @@ -228,7 +228,7 @@ def Decimal64(self, connection, nullable=False): decimal.Decimal('99999999999999.9999'): "[(Decimal('99999999999999.9999'), )]" } - check_datatype(connection, "Decimal64(4)", [ + check_datatype(connection, "decimal64(4)", [ decimal.Decimal('-99999999999999.9999'), decimal.Decimal('10.1234'), decimal.Decimal('99999999999999.9999') @@ -245,7 +245,7 @@ def Decimal128(self, connection, nullable=False): decimal.Decimal('9999999999999999999999999999999999.9999'): "[(Decimal('9999999999999999999999999999999999.9999'), )]" } - check_datatype(connection, "Decimal128(4)", [ + check_datatype(connection, "decimal128(4)", [ decimal.Decimal('-9999999999999999999999999999999999.9999'), decimal.Decimal('10.1234'), decimal.Decimal('9999999999999999999999999999999999.9999') @@ -268,7 +268,7 @@ def String(self, connection, nullable=False): values[0]: f"[('{values[0]}', ), ('{values[1]}', )]", values[1]: f"[('{values[0]}', ), ('{values[1]}', )]" } - check_datatype(connection, "String", values=values, expected=expected, + check_datatype(connection, "string", values=values, expected=expected, encoding="utf-8", quote=True, nullable=nullable) with Scenario("ascii", flags=TE, description="ASCII encoding."): @@ -278,7 +278,7 @@ def String(self, connection, nullable=False): values[0]: f"[('{values[0]}', ), ('{values[1]}', )]", values[1]: f"[('{values[0]}', ), ('{values[1]}', )]" } - check_datatype(connection, "String", values=values, expected=expected, + check_datatype(connection, "string", values=values, expected=expected, encoding="ascii", quote=True, nullable=nullable) with Scenario("utf8", @@ -311,7 +311,7 @@ def String(self, connection, nullable=False): values[0]: f"[('{values[0]}', )]", values[1]: f"[('{values[1]}', )]" } - check_datatype(connection, "String", values=values, expected=expected, + check_datatype(connection, "string", values=values, expected=expected, encoding="utf-8", quote=True, nullable=nullable) with Scenario("ascii", @@ -328,7 +328,7 @@ def String(self, connection, nullable=False): values[0]: f"[('{values[0]}', )]", values[1]: f"[('{values[1]}', )]" } - check_datatype(connection, "String", values=values, expected=expected, + check_datatype(connection, "string", values=values, expected=expected, encoding="ascii", quote=True, nullable=nullable) with Scenario("binary", @@ -343,7 +343,7 @@ def String(self, connection, nullable=False): "all": f"[('{values[0]}', )]", values[0]: f"[('{values[0]}', )]", } - check_datatype(connection, "String", values=values, expected=expected, encoding="ascii", quote=False, nullable=nullable) + check_datatype(connection, "string", values=values, expected=expected, encoding="ascii", quote=False, nullable=nullable) @TestScenario @Requirements(RQ_SRS_003_ParameterizedQueries_DataType_Select_FixedString("1.0")) @@ -361,7 +361,7 @@ def FixedString(self, connection, nullable=False): values[1]: "[('hello\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00', )]", values[2]: f"[('{values[2]}\\x00', )]" } - check_datatype(connection, "FixedString(16)", values=values, expected=expected, + check_datatype(connection, "fixed_string(16)", values=values, expected=expected, encoding="utf-8", quote=True, nullable=nullable) with Scenario("ascii", flags=TE, description="ASCII encoding."): @@ -376,7 +376,7 @@ def FixedString(self, connection, nullable=False): values[1]: "[('hello\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00', )]", values[2]: "[('ABCDEFGHIJKLMN\\x00\\x00', )]" } - check_datatype(connection, "FixedString(16)", values=values, expected=expected, + check_datatype(connection, "fixed_string(16)", values=values, expected=expected, encoding="ascii", quote=True, nullable=nullable) @TestScenario @@ -394,7 +394,7 @@ def Date(self, connection, nullable=False): values[1]: "[(datetime.date(2000, 12, 31), )]", values[2]: "[(datetime.date(2024, 5, 5), )]" } - check_datatype(connection, "Date", values=values, expected=expected, quote=True, nullable=nullable) + check_datatype(connection, "date", values=values, expected=expected, quote=True, nullable=nullable) @TestScenario @Requirements(RQ_SRS_003_ParameterizedQueries_DataType_Select_DateTime("1.0")) @@ -411,7 +411,7 @@ def DateTime(self, connection, nullable=False): values[1]: "[(datetime.datetime(2000, 12, 31, 23, 59, 59), )]", values[2]: "[(datetime.datetime(2024, 5, 5, 13, 31, 32), )]" } - check_datatype(connection, "DateTime", values=values, expected=expected, quote=True, nullable=nullable) + check_datatype(connection, "datetime", values=values, expected=expected, quote=True, nullable=nullable) @TestScenario @Requirements(RQ_SRS_003_ParameterizedQueries_DataType_Select_Enum("1.0")) @@ -427,7 +427,7 @@ def Enum(self, connection, nullable=False): }, encoding="utf-8", quote=True, nullable=nullable) with Scenario("ascii", flags=TE, description="ASCII encoding"): - check_datatype(connection, "Enum('hello' = 1, 'world' = 2)", ["hello", "world"], expected={ + check_datatype(connection, "enum('hello' = 1, 'world' = 2)", ["hello", "world"], expected={ "all": "[('hello', ), ('world', )]", "hello": "[('hello', )]", "world": "[('world', )]" @@ -448,7 +448,7 @@ def UUID(self, connection, nullable=False): uuid1: f"[('{uuid1}', )]", uuid2: f"[('{uuid2}', )]" } - check_datatype(connection, "UUID", values=values, expected=expected, quote=True, nullable=nullable) + check_datatype(connection, "uuid", values=values, expected=expected, quote=True, nullable=nullable) @TestScenario @Requirements(RQ_SRS_003_ParameterizedQueries_DataType_Select_IPv4("1.0")) @@ -463,7 +463,7 @@ def IPv4(self, connection, nullable=False): ipv40: f"[('{ipv40}', )]", ipv41: f"[('{ipv41}', )]" } - check_datatype(connection, "IPv4", values=values, expected=expected, quote=True, nullable=nullable) + check_datatype(connection, "ipv4", values=values, expected=expected, quote=True, nullable=nullable) @TestScenario @Requirements(RQ_SRS_003_ParameterizedQueries_DataType_Select_IPv6("1.0")) @@ -478,7 +478,7 @@ def IPv6(self, connection, nullable=False): ipv60: f"[('{ipv60}', )]", ipv61: f"[('{ipv61}', )]" } - check_datatype(connection, "IPv6", values=values, expected=expected, quote=True, nullable=nullable) + check_datatype(connection, "ipv6", values=values, expected=expected, quote=True, nullable=nullable) @TestFeature @Requirements(RQ_SRS_003_ParameterizedQueries_DataType_Select_Nullable("1.0")) @@ -496,24 +496,24 @@ def datatypes(self, nullable=False): args = {"connection": connection, "nullable": nullable} Scenario("Sanity check", run=sanity_check, args={"connection": connection}) - Scenario("Int8", run=Int8, args=args, flags=TE) - Scenario("Int16", run=Int16, args=args, flags=TE) - Scenario("Int32", run=Int32, args=args, flags=TE) - Scenario("Int64", run=Int64, args=args, flags=TE) - Scenario("UInt8", run=UInt8, args=args, flags=TE) - Scenario("UInt16", run=UInt16, args=args, flags=TE) - Scenario("UInt32", run=UInt32, args=args, flags=TE) - Scenario("UInt64", run=UInt64, args=args, flags=TE) - Scenario("Float32", run=Float32, args=args, flags=TE) - Scenario("Float64", run=Float64, args=args, flags=TE) - Scenario("Decimal32", run=Decimal32, args=args, flags=TE) - Scenario("Decimal64", run=Decimal64, args=args, flags=TE) - Scenario("Decimal128", run=Decimal128, args=args, flags=TE) - Scenario("String", run=String, args=args, flags=TE) - Scenario("FixedString", run=FixedString, args=args, flags=TE) - Scenario("Date", run=Date, args=args, flags=TE) - Scenario("DateTime", run=DateTime, args=args, flags=TE) - Scenario("Enum", run=Enum, args=args, flags=TE) - Scenario("UUID", run=UUID, args=args, flags=TE) - Scenario("IPv4", run=IPv4, args=args, flags=TE) - Scenario("IPv6", run=IPv6, args=args, flags=TE) + Scenario("int8", run=Int8, args=args, flags=TE) + Scenario("int16", run=Int16, args=args, flags=TE) + Scenario("int32", run=Int32, args=args, flags=TE) + Scenario("int64", run=Int64, args=args, flags=TE) + Scenario("uint8", run=UInt8, args=args, flags=TE) + Scenario("uint16", run=UInt16, args=args, flags=TE) + Scenario("uint32", run=UInt32, args=args, flags=TE) + Scenario("uint64", run=UInt64, args=args, flags=TE) + Scenario("float32", run=Float32, args=args, flags=TE) + Scenario("float64", run=Float64, args=args, flags=TE) + Scenario("decimal32", run=Decimal32, args=args, flags=TE) + Scenario("decimal64", run=Decimal64, args=args, flags=TE) + Scenario("decimal128", run=Decimal128, args=args, flags=TE) + Scenario("string", run=String, args=args, flags=TE) + Scenario("fixed_string", run=FixedString, args=args, flags=TE) + Scenario("date", run=Date, args=args, flags=TE) + Scenario("datetime", run=DateTime, args=args, flags=TE) + Scenario("enum", run=Enum, args=args, flags=TE) + Scenario("uuid", run=UUID, args=args, flags=TE) + Scenario("ipv4", run=IPv4, args=args, flags=TE) + Scenario("ipv6", run=IPv6, args=args, flags=TE) diff --git a/test/parameterized/parameterized/funcvalues.py b/test/parameterized/parameterized/funcvalues.py index 4993829..7d84724 100644 --- a/test/parameterized/parameterized/funcvalues.py +++ b/test/parameterized/parameterized/funcvalues.py @@ -19,7 +19,7 @@ def isNull(self, connection): ] with Given("PyODBC connection"): for value in values: - query = "SELECT isNull(?)" + query = "SELECT is_null(?)" with When(f"I run '{query}' with {repr(value)} parameter"): rows = connection.query(query, [value]) expected = "[(0, )]" @@ -31,21 +31,21 @@ def isNull(self, connection): def Null(self, connection): """Verify support for handling NULL value.""" with Given("PyODBC connection"): - query = "SELECT isNull(?)" + query = "SELECT is_null(?)" with When(f"I run '{query}' with [None] parameter", flags=TE): rows = connection.query(query, [None]) expected = "[(1, )]" with Then(f"the result is {expected}", flags=TE): assert repr(rows) == expected, error("result did not match") - query = "SELECT arrayReduce('count', [?, ?])" + query = "SELECT array_reduce('count', [?, ?])" with When(f"I run '{query}' with [None, None] parameter", flags=TE): rows = connection.query(query, [None, None]) expected = "[(0, )]" with Then(f"the result is {expected}", flags=TE): assert repr(rows) == expected, error("result did not match") - query = "SELECT arrayReduce('count', [1, ?, ?])" + query = "SELECT array_reduce('count', [1, ?, ?])" with When(f"I run '{query}' with [1, None, None])", flags=TE): rows = connection.query(query, [1, None, None]) expected = "[(1, )]" @@ -61,5 +61,5 @@ def funcvalues(self, nullable=False): with Logs() as logs, PyODBCConnection(logs=logs) as connection: args = {"connection": connection} - Scenario("isNull", run=isNull, args=args, flags=TE) - Scenario("Null", run=Null, args=args, flags=TE) + Scenario("is_null", run=isNull, args=args, flags=TE) + Scenario("null", run=Null, args=args, flags=TE) diff --git a/test/parameterized/parameterized/sanity.py b/test/parameterized/parameterized/sanity.py index 7227cda..65b26a7 100755 --- a/test/parameterized/parameterized/sanity.py +++ b/test/parameterized/parameterized/sanity.py @@ -25,50 +25,50 @@ def query(query, *args, **kwargs): query("SELECT 1") table_schema = ( - "CREATE TABLE ps (i UInt8, ni Nullable(UInt8), s String, d Date, dt DateTime, " - "f Float32, dc Decimal32(3), fs FixedString(8)) ENGINE = Memory" + "CREATE STREAM ps (i uint8, ni nullable(uint8), s string, d date, dt datetime, " + "f float32, dc decimal32(3), fs fixed_string(8))" ) with Given("table", description=f"Table schema {table_schema}", format_description=False): - query("DROP TABLE IF EXISTS ps", fetch=False) + query("DROP STREAM IF EXISTS ps", fetch=False) query(table_schema, fetch=False) try: with When("I want to insert a couple of rows"): - query("INSERT INTO ps VALUES (1, NULL, 'Hello, world', '2005-05-05', '2005-05-05 05:05:05', " + query("INSERT INTO ps (* except _tp_time) VALUES (1, NULL, 'Hello, world', '2005-05-05', '2005-05-05 05:05:05', " "1.333, 10.123, 'fstring0')", fetch=False) - query("INSERT INTO ps VALUES (2, NULL, 'test', '2019-05-25', '2019-05-25 15:00:00', " + query("INSERT INTO ps (* except _tp_time) VALUES (2, NULL, 'test', '2019-05-25', '2019-05-25 15:00:00', " "1.433, 11.124, 'fstring1')", fetch=False) - query("SELECT * FROM ps") + query("SELECT (* except _tp_time) FROM ps where _tp_time > earliest_ts() limit 2") with When("I want to select using parameter of type UInt8", flags=TE): - query("SELECT * FROM ps WHERE i = ? ORDER BY i, s, d", [1]) + query("SELECT (* except _tp_time) FROM ps WHERE i = ? AND _tp_time > earliest_ts() ORDER BY i, s, d LIMIT 1", [1]) with When("I want to select using parameter of type Nullable(UInt8)", flags=TE): - query("SELECT * FROM ps WHERE ni = ? ORDER BY i, s, d", [None]) + query("SELECT (* except _tp_time) FROM ps WHERE ni = ? AND _tp_time > earliest_ts() ORDER BY i, s, d LIMIT 1", [None]) with When("I want to select using parameter of type String", flags=TE): - query("SELECT * FROM ps WHERE s = ? ORDER BY i, s, d", ["Hello, world"]) + query("SELECT (* except _tp_time) FROM ps WHERE s = ? AND _tp_time > earliest_ts() ORDER BY i, s, d LIMIT 1", ["Hello, world"]) with When("I want to select using parameter of type Date", flags=TE): - query("SELECT * FROM ps WHERE d = ? ORDER BY i, s, d", [datetime.date(2019,5,25)]) + query("SELECT (* except _tp_time) FROM ps WHERE d = ? AND _tp_time > earliest_ts() ORDER BY i, s, d LIMIT 1", [datetime.date(2019,5,25)]) with When("I want to select using parameter of type DateTime", flags=TE): - query("SELECT * FROM ps WHERE dt = ? ORDER BY i, s, d", [datetime.datetime(2005, 5, 5, 5, 5, 5)]) + query("SELECT (* except _tp_time) FROM ps WHERE dt = ? AND _tp_time > earliest_ts() ORDER BY i, s, d LIMIT 1", [datetime.datetime(2005, 5, 5, 5, 5, 5)]) with When("I want to select using parameter of type Float32", flags=TE): - query("SELECT * FROM ps WHERE f = ? ORDER BY i, s, d", [1.333]) + query("SELECT (* except _tp_time) FROM ps WHERE f = ? AND _tp_time > earliest_ts() ORDER BY i, s, d LIMIT 1", [1.333]) with When("I want to select using parameter of type Decimal32(3)", flags=TE): - query("SELECT * FROM ps WHERE dc = ? ORDER BY i, s, d", [decimal.Decimal('10.123')]) + query("SELECT (* except _tp_time) FROM ps WHERE dc = ? AND _tp_time > earliest_ts() ORDER BY i, s, d LIMIT 1", [decimal.Decimal('10.123')]) with When("I want to select using parameter of type FixedString(8)", flags=TE): - query("SELECT * FROM ps WHERE fs = ? ORDER BY i, s, d", [u"fstring0"]) + query("SELECT (* except _tp_time) FROM ps WHERE fs = ? AND _tp_time > earliest_ts() ORDER BY i, s, d LIMIT 1", [u"fstring0"]) with When("I want to select using parameters of type UInt8 and String", flags=TE): - query("SELECT * FROM ps WHERE i = ? and s = ? ORDER BY i, s, d", [2, "test"]) + query("SELECT (* except _tp_time) FROM ps WHERE i = ? and s = ? and _tp_time > earliest_ts() ORDER BY i, s, d LIMIT 1", [2, "test"]) with When("I want to select using parameters of type UInt8, String, and Date", flags=TE): - query("SELECT * FROM ps WHERE i = ? and s = ? and d = ? ORDER BY i, s, d", + query("SELECT (* except _tp_time) FROM ps WHERE i = ? and s = ? and d = ? and _tp_time > earliest_ts() ORDER BY i, s, d LIMIT 1", [2, "test", datetime.date(2019,5,25)]) finally: - query("DROP TABLE ps", fetch=False) + query("DROP STREAM ps", fetch=False) diff --git a/test/test.sh b/test/test.sh index d956ada..09a16ce 100755 --- a/test/test.sh +++ b/test/test.sh @@ -32,102 +32,106 @@ function q { } q "SELECT * FROM system.build_options;" -q "CREATE DATABASE IF NOT EXISTS test;" -q "DROP TABLE IF EXISTS test.odbc1;" -q "CREATE TABLE test.odbc1 (ui64 UInt64, string String, date Date, datetime DateTime) ENGINE = Memory;" -q "INSERT INTO test.odbc1 VALUES (1, '2', 3, 4);" -q "INSERT INTO test.odbc1 VALUES (10, '20', 30, 40);" -q "INSERT INTO test.odbc1 VALUES (100, '200', 300, 400);" -q "SELECT * FROM test.odbc1 WHERE ui64=1;" +# q "CREATE DATABASE IF NOT EXISTS test;" +q "DROP STREAM IF EXISTS odbc1;" +q "CREATE STREAM odbc1 (ui64 uint64, String string, Date date, DateTime datetime)" +q "INSERT INTO odbc1 (* except _tp_time) VALUES (1, '2', 3, 4);" +q "INSERT INTO odbc1 (* except _tp_time) VALUES (10, '20', 30, 40);" +q "INSERT INTO odbc1 (* except _tp_time) VALUES (100, '200', 300, 400);" +sleep 2s +q "SELECT (* except _tp_time) FROM table(odbc1) WHERE ui64=1" q 'SELECT {fn CONVERT(1, SQL_BIGINT)}' q "SELECT {fn CONVERT(100000, SQL_TINYINT)}" q "SELECT {fn CONCAT('a', 'b')}" -q 'SELECT CAST({fn TRUNCATE(1.1 + 2.4, 1)} AS INTEGER) AS `yr_date_ok`' +q 'SELECT cast({fn TRUNCATE(1.1 + 2.4, 1)} AS INTEGER) AS `yr_date_ok`' -q $'SELECT COUNT({fn ABS(`test`.`odbc1`.`ui64`)}) FROM test.odbc1' +q $'SELECT count({fn ABS(`ui64`)}) FROM table(odbc1)' -q $'SELECT {fn TIMESTAMPDIFF(SQL_TSI_DAY,CAST(`test`.`odbc1`.`datetime` AS DATE),CAST(`test`.`odbc1`.`date` AS DATE))} AS `Calculation_503558746242125826`, SUM({fn CONVERT(1, SQL_BIGINT)}) AS `sum_Number_of_Records_ok` FROM `test`.`odbc1` WHERE (CAST(`test`.`odbc1`.`datetime` AS DATE) <> {d \'1970-01-01\'}) GROUP BY `Calculation_503558746242125826`' +q $'SELECT {fn TIMESTAMPDIFF(SQL_TSI_DAY,cast(`DateTime` AS DATE),cast(`Date` AS DATE))} AS `Calculation_503558746242125826`, sum({fn CONVERT(1, SQL_BIGINT)}) AS `sum_Number_of_Records_ok` FROM table(`odbc1`) as `odbc1` WHERE (cast(`DateTime` AS DATE) <> {d \'1970-01-01\'}) GROUP BY `Calculation_503558746242125826`' -q $'SELECT COUNT({fn ABS(`test`.`odbc1`.`ui64`)}) AS `TEMP_Calculation_559572257702191122__2716881070__0_`, SUM({fn ABS(`test`.`odbc1`.`ui64`)}) AS `TEMP_Calculation_559572257702191122__3054398615__0_` FROM test.odbc1;' +q $'SELECT count({fn ABS(`ui64`)}) AS `TEMP_Calculation_559572257702191122__2716881070__0_`, sum({fn ABS(`ui64`)}) AS `TEMP_Calculation_559572257702191122__3054398615__0_` FROM table(odbc1) as `odbc1`;' -q $'SELECT SUM((CASE WHEN (`test`.`odbc1`.`ui64` * `test`.`odbc1`.`ui64`) < 0 THEN NULL ELSE {fn SQRT((`test`.`odbc1`.`ui64` * `test`.`odbc1`.`ui64`))} END)) AS `TEMP_Calculation_559572257701634065__1464080195__0_`, COUNT((CASE WHEN (`test`.`odbc1`.`ui64` * `test`.`odbc1`.`ui64`) < 0 THEN NULL ELSE {fn SQRT((`test`.`odbc1`.`ui64` * `test`.`odbc1`.`ui64`))} END)) AS `TEMP_Calculation_559572257701634065__2225718044__0_` FROM test.odbc1;' +q $'SELECT sum((CASE WHEN (`ui64` * `ui64`) < 0 THEN NULL ELSE {fn SQRT((`ui64` * `ui64`))} END)) AS `TEMP_Calculation_559572257701634065__1464080195__0_`, count((CASE WHEN (`ui64` * `ui64`) < 0 THEN NULL ELSE {fn SQRT((`ui64` * `ui64`))} END)) AS `TEMP_Calculation_559572257701634065__2225718044__0_` FROM table(odbc1) as `odbc1`;' -# SELECT (CASE WHEN (NOT = 'True') OR (`test`.`odbc1`.`string` = 'True') OR (`test`.`odbc1`.`string2` = 'True') THEN 1 WHEN NOT (NOT = 'True') OR (`test`.`odbc1`.`string` = 'True') OR (`test`.`odbc1`.`string` = 'True') OR (`test`.`odbc1`.`string2` = 'True') THEN 0 ELSE NULL END) AS `Calculation_597289912116125696`, -# SUM({fn CONVERT(1, SQL_BIGINT)}) AS `sum_Number_of_Records_ok` FROM `test`.`odbc1` GROUP BY `Calculation_597289912116125696`, `string`, `ui64` +# SELECT (CASE WHEN (NOT = 'True') OR (`string` = 'True') OR (`string2` = 'True') THEN 1 WHEN NOT (NOT = 'True') OR (`string` = 'True') OR (`string` = 'True') OR (`string2` = 'True') THEN 0 ELSE NULL END) AS `Calculation_597289912116125696`, +# sum({fn CONVERT(1, SQL_BIGINT)}) AS `sum_Number_of_Records_ok` FROM table(`odbc1`) as `odbc1` GROUP BY `Calculation_597289912116125696`, `string`, `ui64` -q "DROP TABLE IF EXISTS test.purchase_stat;" -q "CREATE TABLE test.purchase_stat (purchase_id UInt64, purchase_date DateTime, offer_category UInt64, amount UInt64) ENGINE = Memory;" -q $'SELECT SUM({fn CONVERT(Custom_SQL_Query.amount, SQL_BIGINT)}) AS sum_amount FROM (SELECT purchase_date, offer_category, SUM(amount) AS amount, COUNT(DISTINCT purchase_id) AS purchase_id FROM test.purchase_stat WHERE (offer_category = 1) GROUP BY purchase_date, offer_category) Custom_SQL_Query HAVING (COUNT(1) > 0)' -q $'SELECT (CASE WHEN (`test`.`odbc1`.`ui64` > 0) THEN 1 WHEN NOT (`test`.`odbc1`.`ui64` > 0) THEN 0 ELSE NULL END) AS `Calculation_162692564973015040`, SUM({fn CONVERT(1, SQL_BIGINT)}) AS `sum_Number_of_Records_ok` FROM `test`.`odbc1` GROUP BY (CASE WHEN (`test`.`odbc1`.`ui64` > 0) THEN 1 WHEN NOT (`test`.`odbc1`.`ui64` > 0) THEN 0 ELSE NULL END)' +q "DROP STREAM IF EXISTS purchase_stat;" +q "CREATE STREAM purchase_stat (purchase_id uint64, purchase_date datetime, offer_category uint64, amount uint64)" +# q $'SELECT sum({fn CONVERT(Custom_SQL_Query.amount, SQL_BIGINT)}) AS sum_amount FROM (SELECT purchase_date, offer_category, sum(amount) AS amount, count(DISTINCT purchase_id) AS purchase_id FROM table(purchase_stat) as purchase_stat WHERE (offer_category = 1) GROUP BY purchase_date, offer_category) Custom_SQL_Query HAVING (count(1) > 0)' +q $'SELECT (CASE WHEN (`ui64` > 0) THEN 1 WHEN NOT (`ui64` > 0) THEN 0 ELSE NULL END) AS `Calculation_162692564973015040`, sum({fn CONVERT(1, SQL_BIGINT)}) AS `sum_Number_of_Records_ok` FROM table(`odbc1`) as `odbc1` GROUP BY (CASE WHEN (`ui64` > 0) THEN 1 WHEN NOT (`ui64` > 0) THEN 0 ELSE NULL END)' q $"SELECT {d '2017-08-30'}" -q 'SELECT CAST(CAST(`odbc1`.`date` AS DATE) AS DATE) AS `tdy_Calculation_687361904651595777_ok` FROM `test`.`odbc1`' +q 'SELECT cast(cast(`Date` AS DATE) AS DATE) AS `tdy_Calculation_687361904651595777_ok` FROM table(`odbc1`)' q 'SELECT {fn CURDATE()}' -q $'SELECT `test`.`odbc1`.`ui64` AS `bannerid`, SUM((CASE WHEN `test`.`odbc1`.`ui64` = 0 THEN NULL ELSE `test`.`odbc1`.`ui64` / `test`.`odbc1`.`ui64` END)) AS `sum_Calculation_582934706662502402_ok`, SUM(`test`.`odbc1`.`ui64`) AS `sum_clicks_ok`, SUM(`test`.`odbc1`.`ui64`) AS `sum_shows_ok`, SUM(`test`.`odbc1`.`ui64`) AS `sum_true_installs_ok`, CAST(CAST(`test`.`odbc1`.`date` AS DATE) AS DATE) AS `tdy_Calculation_582934706642255872_ok` FROM `test`.`odbc1` WHERE (`test`.`odbc1`.`string` = \'YandexBrowser\') GROUP BY `test`.`odbc1`.`ui64`, CAST(CAST(`test`.`odbc1`.`date` AS DATE) AS DATE)' +q $'SELECT `ui64` AS `bannerid`, sum((CASE WHEN `ui64` = 0 THEN NULL ELSE `ui64` / `ui64` END)) AS `sum_Calculation_582934706662502402_ok`, sum(`ui64`) AS `sum_clicks_ok`, sum(`ui64`) AS `sum_shows_ok`, sum(`ui64`) AS `sum_true_installs_ok`, cast(cast(`Date` AS DATE) AS DATE) AS `tdy_Calculation_582934706642255872_ok` FROM table(`odbc1`) as `odbc1` WHERE (`String` = \'YandexBrowser\') GROUP BY `ui64`, cast(cast(`Date` AS DATE) AS DATE)' -q $'SELECT test.odbc1.ui64 AS BannerID, SUM((CASE WHEN test.odbc1.ui64 = 0 THEN NULL ELSE test.odbc1.ui64 / test.odbc1.ui64 END)) AS sum_Calculation_500744014152380416_ok, SUM(test.odbc1.ui64) AS sum_ch_installs_ok, SUM(test.odbc1.ui64) AS sum_goodshows_ok FROM test.odbc1 GROUP BY test.odbc1.ui64' -q $'SELECT test.odbc1.ui64 AS BannerID, SUM((CASE WHEN test.odbc1.ui64 > 0 THEN NULL ELSE test.odbc1.ui64 / test.odbc1.ui64 END)) AS sum_Calculation_500744014152380416_ok, SUM(test.odbc1.ui64) AS sum_ch_installs_ok, SUM(test.odbc1.ui64) AS sum_goodshows_ok FROM test.odbc1 GROUP BY test.odbc1.ui64' +q $'SELECT ui64 AS BannerID, sum((CASE WHEN ui64 = 0 THEN NULL ELSE ui64 / ui64 END)) AS sum_Calculation_500744014152380416_ok, sum(ui64) AS sum_ch_installs_ok, sum(ui64) AS sum_goodshows_ok FROM table(odbc1) as `odbc1` GROUP BY ui64' +q $'SELECT ui64 AS BannerID, sum((CASE WHEN ui64 > 0 THEN NULL ELSE ui64 / ui64 END)) AS sum_Calculation_500744014152380416_ok, sum(ui64) AS sum_ch_installs_ok, sum(ui64) AS sum_goodshows_ok FROM table(odbc1) as `odbc1` GROUP BY ui64' -q "DROP TABLE IF EXISTS test.test_tableau;" -q "create table test.test_tableau (country String, clicks UInt64, shows UInt64) engine Log" -q "insert into test.test_tableau values ('ru',10000,100500),('ua',1000,6000),('by',2000,6500),('tr',100,500)" -q "insert into test.test_tableau values ('undefined',0,2)" -q "insert into test.test_tableau values ('injected',1,0)" -q 'SELECT test.test_tableau.country AS country, SUM((CASE WHEN test.test_tableau.shows = 0 THEN NULL ELSE CAST(test.test_tableau.clicks AS FLOAT) / test.test_tableau.shows END)) AS sum_Calculation_920986154656493569_ok, SUM({fn POWER(CAST(test.test_tableau.clicks AS FLOAT),2)}) AS sum_Calculation_920986154656579587_ok FROM test.test_tableau GROUP BY test.test_tableau.country;' -q "DROP TABLE test.test_tableau;" +q "DROP STREAM IF EXISTS test_tableau;" +q "create stream test_tableau (country string, clicks uint64, shows uint64)" +q "insert into test_tableau (* except _tp_time) values ('ru',10000,100500)('ua',1000,6000)('by',2000,6500)('tr',100,500)" +q "insert into test_tableau (* except _tp_time) values ('undefined',0,2)" +q "insert into test_tableau (* except _tp_time) values ('injected',1,0)" +sleep 2s +q 'SELECT test_tableau.country AS country, sum((CASE WHEN test_tableau.shows = 0 THEN NULL ELSE cast(test_tableau.clicks AS FLOAT) / test_tableau.shows END)) AS sum_Calculation_920986154656493569_ok, sum({fn POWER(cast(test_tableau.clicks AS FLOAT),2)}) AS sum_Calculation_920986154656579587_ok FROM table(test_tableau) as test_tableau GROUP BY test_tableau.country;' +q "DROP STREAM test_tableau;" q 'SELECT NULL' q 'SELECT [NULL]' -q "DROP TABLE IF EXISTS test.adv_watch;" -q "create table test.adv_watch (rocket_date Date, rocket_datetime dateTime, ivi_id UInt64) engine Log" -q "insert into test.adv_watch values (1,2,3)" -q "insert into test.adv_watch values (1, {fn TIMESTAMPADD(SQL_TSI_DAY,-8,CAST({fn CURRENT_TIMESTAMP(0)} AS DATE))}, 3)" -q 'SELECT `test`.`adv_watch`.`rocket_date` AS `rocket_date`, COUNT(DISTINCT `test`.`adv_watch`.`ivi_id`) AS `usr_Calculation_683139814283419648_ok` FROM `test`.`adv_watch` WHERE ((`adv_watch`.`rocket_datetime` >= {fn TIMESTAMPADD(SQL_TSI_DAY,-9,CAST({fn CURRENT_TIMESTAMP(0)} AS DATE))}) AND (`test`.`adv_watch`.`rocket_datetime` < {fn TIMESTAMPADD(SQL_TSI_DAY,1,CAST({fn CURRENT_TIMESTAMP(0)} AS DATE))})) GROUP BY `test`.`adv_watch`.`rocket_date`' -q 'SELECT CAST({fn TRUNCATE(EXTRACT(YEAR FROM `test`.`adv_watch`.`rocket_date`),0)} AS INTEGER) AS `yr_rocket_date_ok` FROM `test`.`adv_watch` GROUP BY CAST({fn TRUNCATE(EXTRACT(YEAR FROM `test`.`adv_watch`.`rocket_date`),0)} AS INTEGER)' -q "DROP TABLE test.adv_watch;" +q "DROP STREAM IF EXISTS adv_watch;" +q "create stream adv_watch (rocket_date date, rocket_datetime datetime, ivi_id uint64)" +q "insert into adv_watch (* except _tp_time) values (1,2,3)" +q "insert into adv_watch (* except _tp_time) values (1, {fn TIMESTAMPADD(SQL_TSI_DAY,-8,cast({fn CURRENT_TIMESTAMP(0)} AS DATE))}, 3)" +sleep 2s +q 'SELECT `adv_watch`.`rocket_date` AS `rocket_date`, count(DISTINCT `adv_watch`.`ivi_id`) AS `usr_Calculation_683139814283419648_ok` FROM table(`adv_watch`) as `adv_watch` WHERE ((`adv_watch`.`rocket_datetime` >= {fn TIMESTAMPADD(SQL_TSI_DAY,-9,cast({fn CURRENT_TIMESTAMP(0)} AS DATE))}) AND (`adv_watch`.`rocket_datetime` < {fn TIMESTAMPADD(SQL_TSI_DAY,1,cast({fn CURRENT_TIMESTAMP(0)} AS DATE))})) GROUP BY `adv_watch`.`rocket_date`' +q 'SELECT cast({fn TRUNCATE(EXTRACT(YEAR FROM `adv_watch`.`rocket_date`),0)} AS INTEGER) AS `yr_rocket_date_ok` FROM table(`adv_watch`) as `adv_watch` GROUP BY cast({fn TRUNCATE(EXTRACT(YEAR FROM `adv_watch`.`rocket_date`),0)} AS INTEGER)' +q "DROP STREAM adv_watch;" # https://github.com/yandex/clickhouse-odbc/issues/43 -q 'DROP TABLE IF EXISTS test.gamoraparams;' -q 'CREATE TABLE test.gamoraparams ( user_id Int64, date Date, dt DateTime, p1 Nullable(Int32), platforms Nullable(Int32), max_position Nullable(Int32), vv Nullable(Int32), city Nullable(String), third_party Nullable(Int8), mobile_tablet Nullable(Int8), mobile_phone Nullable(Int8), desktop Nullable(Int8), web_mobile Nullable(Int8), tv_attach Nullable(Int8), smart_tv Nullable(Int8), subsite_id Nullable(Int32), view_in_second Nullable(Int32), view_in_second_presto Nullable(Int32)) ENGINE = MergeTree(date, user_id, 8192)' -q 'insert into test.gamoraparams values (1, {fn CURRENT_TIMESTAMP }, CAST({fn CURRENT_TIMESTAMP(0)} AS DATE), Null, Null,Null,Null,Null, Null,Null,Null,Null,Null,Null,Null,Null,Null,Null);' -q 'SELECT `Custom_SQL_Query`.`platforms` AS `platforms` FROM (select platforms from test.gamoraparams where platforms is null limit 1) `Custom_SQL_Query` GROUP BY `platforms`' -q 'SELECT CAST({fn TRUNCATE(EXTRACT(YEAR FROM `test`.`gamoraparams`.`dt`),0)} AS INTEGER) AS `yr_date_ok` FROM `test`.`gamoraparams` GROUP BY `yr_date_ok`'; -q 'DROP TABLE test.gamoraparams;' - -q $'SELECT CAST(EXTRACT(YEAR FROM `odbc1`.`date`) AS INTEGER) AS `yr_date_ok` FROM `test`.`odbc1`' -q $'SELECT CAST({fn TRUNCATE(EXTRACT(YEAR FROM `odbc1`.`date`),0)} AS INTEGER) AS `yr_date_ok` FROM `test`.`odbc1`' -q $'SELECT SUM({fn CONVERT(1, SQL_BIGINT)}) AS `sum_Number_of_Records_ok`, CAST({fn TRUNCATE(EXTRACT(YEAR FROM `odbc1`.`date`),0)} AS INTEGER) AS `yr_date_ok` FROM `test`.`odbc1` GROUP BY CAST({fn TRUNCATE(EXTRACT(YEAR FROM `odbc1`.`date`),0)} AS INTEGER)' - -q 'SELECT CAST({fn TRUNCATE(EXTRACT(YEAR FROM CAST(`test`.`odbc1`.`date` AS DATE)),0)} AS INTEGER) AS `yr_Calculation_860750537261912064_ok` FROM `test`.`odbc1` GROUP BY `yr_Calculation_860750537261912064_ok`' -q 'SELECT {fn TIMESTAMPADD(SQL_TSI_DAY,CAST({fn TRUNCATE((-1 * ({fn DAYOFYEAR(`test`.`odbc1`.`date`)} - 1)),0)} AS INTEGER),CAST(`test`.`odbc1`.`date` AS DATE))} AS `tyr__date_ok` FROM `test`.`odbc1` GROUP BY `tyr__date_ok`' - -q 'SELECT {fn TIMESTAMPADD(SQL_TSI_DAY,(-1 * ({fn MOD((7 + {fn DAYOFWEEK(CAST(`test`.`odbc1`.`date` AS DATE))} - 2), 7)})),CAST(CAST(`test`.`odbc1`.`date` AS DATE) AS DATE))} AS `twk_date_ok` FROM `test`.`odbc1` GROUP BY `twk_date_ok`' -q 'SELECT {fn TIMESTAMPADD(SQL_TSI_DAY,CAST({fn TRUNCATE((-1 * ({fn DAYOFYEAR(CAST(`test`.`odbc1`.`date` AS DATE))} - 1)),0)} AS INTEGER),CAST(CAST(`test`.`odbc1`.`date` AS DATE) AS DATE))} AS `tyr_Calculation_681450978608578560_ok` FROM `test`.`odbc1` GROUP BY `tyr_Calculation_681450978608578560_ok`' -q 'SELECT {fn TIMESTAMPADD(SQL_TSI_MONTH,CAST({fn TRUNCATE((3 * (CAST({fn TRUNCATE({fn QUARTER(CAST(`test`.`odbc1`.`date` AS DATE))},0)} AS INTEGER) - 1)),0)} AS INTEGER),{fn TIMESTAMPADD(SQL_TSI_DAY,CAST({fn TRUNCATE((-1 * ({fn DAYOFYEAR(CAST(`test`.`odbc1`.`date` AS DATE))} - 1)),0)} AS INTEGER),CAST(CAST(`test`.`odbc1`.`date` AS DATE) AS DATE))})} AS `tqr_Calculation_681450978608578560_ok` FROM `test`.`odbc1` GROUP BY `tqr_Calculation_681450978608578560_ok`' -q 'SELECT {fn TIMESTAMPADD(SQL_TSI_DAY,CAST({fn TRUNCATE((-1 * (EXTRACT(DAY FROM CAST(`test`.`odbc1`.`date` AS DATE)) - 1)),0)} AS INTEGER),CAST(CAST(`test`.`odbc1`.`date` AS DATE) AS DATE))} AS `tmn_Calculation_681450978608578560_ok` FROM `test`.`odbc1` GROUP BY `tmn_Calculation_681450978608578560_ok`' - -q $'SELECT (CASE WHEN (`test`.`odbc1`.`ui64` < 5) THEN replaceRegexpOne(toString(`test`.`odbc1`.`ui64`), \'^\\s+\', \'\') WHEN (`test`.`odbc1`.`ui64` < 10) THEN \'5-9\' WHEN (`test`.`odbc1`.`ui64` < 20) THEN \'10-19\' WHEN (`test`.`odbc1`.`ui64` >= 20) THEN \'20+\' ELSE NULL END) AS `Calculation_582653228063055875`, SUM(`test`.`odbc1`.`ui64`) AS `sum_traf_se_ok` FROM `test`.`odbc1` GROUP BY `Calculation_582653228063055875` ORDER BY `Calculation_582653228063055875`' +q 'DROP STREAM IF EXISTS gamoraparams;' +q 'CREATE STREAM gamoraparams ( user_id int64, Date date, dt datetime, p1 nullable(int32), platforms nullable(int32), max_position nullable(int32), vv nullable(int32), city nullable(string), third_party nullable(int8), mobile_tablet nullable(int8), mobile_phone nullable(int8), desktop nullable(int8), web_mobile nullable(int8), tv_attach nullable(int8), smart_tv nullable(int8), subsite_id nullable(int32), view_in_second nullable(int32), view_in_second_presto nullable(int32))' +q 'insert into gamoraparams (* except _tp_time) values (1, {fn CURRENT_TIMESTAMP }, cast({fn CURRENT_TIMESTAMP(0)} AS DATE), Null, Null,Null,Null,Null, Null,Null,Null,Null,Null,Null,Null,Null,Null,Null);' +sleep 2s +# q 'SELECT `Custom_SQL_Query`.`platforms` AS `platforms` FROM (select platforms from gamoraparams where platforms is null limit 1) `Custom_SQL_Query` GROUP BY `platforms`' +q 'SELECT cast({fn TRUNCATE(EXTRACT(YEAR FROM `gamoraparams`.`dt`),0)} AS INTEGER) AS `yr_date_ok` FROM table(`gamoraparams`) as `gamoraparams` GROUP BY `yr_date_ok`'; +q 'DROP STREAM gamoraparams;' + +q $'SELECT cast(EXTRACT(YEAR FROM `Date`) AS INTEGER) AS `yr_date_ok` FROM table(`odbc1`) as `odbc1`' +q $'SELECT cast({fn TRUNCATE(EXTRACT(YEAR FROM `Date`),0)} AS INTEGER) AS `yr_date_ok` FROM table(`odbc1`) as `odbc1`' +q $'SELECT sum({fn CONVERT(1, SQL_BIGINT)}) AS `sum_Number_of_Records_ok`, cast({fn TRUNCATE(EXTRACT(YEAR FROM `Date`),0)} AS INTEGER) AS `yr_date_ok` FROM table(`odbc1`) as `odbc1` GROUP BY cast({fn TRUNCATE(EXTRACT(YEAR FROM `Date`),0)} AS INTEGER)' + +q 'SELECT cast({fn TRUNCATE(EXTRACT(YEAR FROM cast(`Date` AS DATE)),0)} AS INTEGER) AS `yr_Calculation_860750537261912064_ok` FROM table(`odbc1`) as `odbc1` GROUP BY `yr_Calculation_860750537261912064_ok`' +q 'SELECT {fn TIMESTAMPADD(SQL_TSI_DAY,cast({fn TRUNCATE((-1 * ({fn DAYOFYEAR(`Date`)} - 1)),0)} AS INTEGER),cast(`Date` AS DATE))} AS `tyr__date_ok` FROM table(`odbc1`) as `odbc1` GROUP BY `tyr__date_ok`' + +q 'SELECT {fn TIMESTAMPADD(SQL_TSI_DAY,(-1 * ({fn MOD((7 + {fn DAYOFWEEK(cast(`Date` AS DATE))} - 2), 7)})),cast(cast(`Date` AS DATE) AS DATE))} AS `twk_date_ok` FROM table(`odbc1`) as `odbc1` GROUP BY `twk_date_ok`' +q 'SELECT {fn TIMESTAMPADD(SQL_TSI_DAY,cast({fn TRUNCATE((-1 * ({fn DAYOFYEAR(cast(`Date` AS DATE))} - 1)),0)} AS INTEGER),cast(cast(`Date` AS DATE) AS DATE))} AS `tyr_Calculation_681450978608578560_ok` FROM table(`odbc1`) as `odbc1` GROUP BY `tyr_Calculation_681450978608578560_ok`' +q 'SELECT {fn TIMESTAMPADD(SQL_TSI_MONTH,cast({fn TRUNCATE((3 * (cast({fn TRUNCATE({fn QUARTER(cast(`Date` AS DATE))},0)} AS INTEGER) - 1)),0)} AS INTEGER),{fn TIMESTAMPADD(SQL_TSI_DAY,cast({fn TRUNCATE((-1 * ({fn DAYOFYEAR(cast(`Date` AS DATE))} - 1)),0)} AS INTEGER),cast(cast(`Date` AS DATE) AS DATE))})} AS `tqr_Calculation_681450978608578560_ok` FROM table(`odbc1`) as `odbc1` GROUP BY `tqr_Calculation_681450978608578560_ok`' +q 'SELECT {fn TIMESTAMPADD(SQL_TSI_DAY,cast({fn TRUNCATE((-1 * (EXTRACT(DAY FROM cast(`Date` AS DATE)) - 1)),0)} AS INTEGER),cast(cast(`Date` AS DATE) AS DATE))} AS `tmn_Calculation_681450978608578560_ok` FROM table(`odbc1`) as `odbc1` GROUP BY `tmn_Calculation_681450978608578560_ok`' + +q $'SELECT (CASE WHEN (`ui64` < 5) THEN replaceRegexpOne(toString(`ui64`), \'^\\s+\', \'\') WHEN (`ui64` < 10) THEN \'5-9\' WHEN (`ui64` < 20) THEN \'10-19\' WHEN (`ui64` >= 20) THEN \'20+\' ELSE NULL END) AS `Calculation_582653228063055875`, sum(`ui64`) AS `sum_traf_se_ok` FROM table(`odbc1`) as `odbc1` GROUP BY `Calculation_582653228063055875` ORDER BY `Calculation_582653228063055875`' q $"SELECT *, (CASE WHEN (number == 1) THEN 'o' WHEN (number == 2) THEN 'two long string' WHEN (number == 3) THEN 'r' WHEN (number == 4) THEN NULL ELSE '-' END) FROM system.numbers LIMIT 6" # todo: test with fail on comparsion: -q $"SELECT {fn DAYOFWEEK(CAST('2018-04-16' AS DATE))}, 7, 'sat'" -q $"SELECT {fn DAYOFWEEK(CAST('2018-04-15' AS DATE))}, 1, 'sun'" -q $"SELECT {fn DAYOFWEEK(CAST('2018-04-16' AS DATE))}, 2, 'mon'" -q $"SELECT {fn DAYOFWEEK(CAST('2018-04-17' AS DATE))}, 3, 'thu'" -q $"SELECT {fn DAYOFWEEK(CAST('2018-04-18' AS DATE))}, 4, 'wed'" -q $"SELECT {fn DAYOFWEEK(CAST('2018-04-19' AS DATE))}, 5, 'thu'" -q $"SELECT {fn DAYOFWEEK(CAST('2018-04-20' AS DATE))}, 6, 'fri'" -q $"SELECT {fn DAYOFWEEK(CAST('2018-04-21' AS DATE))}, 7, 'sat'" -q $"SELECT {fn DAYOFWEEK(CAST('2018-04-22' AS DATE))}, 1, 'sun'" - -q $"SELECT {fn DAYOFYEAR(CAST('2018-01-01' AS DATE))}, 1" -q $"SELECT {fn DAYOFYEAR(CAST('2018-04-20' AS DATE))}, 110" -q $"SELECT {fn DAYOFYEAR(CAST('2018-12-31' AS DATE))}, 365" +q $"SELECT {fn DAYOFWEEK(cast('2018-04-16' AS DATE))}, 7, 'sat'" +q $"SELECT {fn DAYOFWEEK(cast('2018-04-15' AS DATE))}, 1, 'sun'" +q $"SELECT {fn DAYOFWEEK(cast('2018-04-16' AS DATE))}, 2, 'mon'" +q $"SELECT {fn DAYOFWEEK(cast('2018-04-17' AS DATE))}, 3, 'thu'" +q $"SELECT {fn DAYOFWEEK(cast('2018-04-18' AS DATE))}, 4, 'wed'" +q $"SELECT {fn DAYOFWEEK(cast('2018-04-19' AS DATE))}, 5, 'thu'" +q $"SELECT {fn DAYOFWEEK(cast('2018-04-20' AS DATE))}, 6, 'fri'" +q $"SELECT {fn DAYOFWEEK(cast('2018-04-21' AS DATE))}, 7, 'sat'" +q $"SELECT {fn DAYOFWEEK(cast('2018-04-22' AS DATE))}, 1, 'sun'" + +q $"SELECT {fn DAYOFYEAR(cast('2018-01-01' AS DATE))}, 1" +q $"SELECT {fn DAYOFYEAR(cast('2018-04-20' AS DATE))}, 110" +q $"SELECT {fn DAYOFYEAR(cast('2018-12-31' AS DATE))}, 365" q $'SELECT name, {fn REPLACE(`name`, \'E\',\'!\')} AS `r1` FROM system.build_options' q $'SELECT {fn REPLACE(\'ABCDABCD\' , \'B\',\'E\')} AS `r1`' @@ -137,7 +141,7 @@ q $'SELECT {fn REPLACE(\'ABCDEFGHIJKLMNOPQRSTUVWXYZ\', \'E\',\'!\')} AS `r1`' q $"SELECT 'абвгдеёжзийклмнопрстуфхцчшщъыьэюяАБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ'" -q "SELECT toNullable(42), toNullable('abc'), NULL" +q "SELECT to_nullable(42), to_nullable('abc'), NULL" q "SELECT 1, 'string', NULL" q "SELECT 1, NULL, 2, 3, NULL, 4" q "SELECT 'stringlong', NULL, 2, NULL" @@ -145,45 +149,48 @@ q "SELECT 'stringlong', NULL, 2, NULL" q $"SELECT -127,-128,-129,126,127,128,255,256,257,-32767,-32768,-32769,32766,32767,32768,65535,65536,65537,-2147483647,-2147483648,-2147483649,2147483646,2147483647,2147483648,4294967295,4294967296,4294967297,-9223372036854775807,-9223372036854775808,-9223372036854775809,9223372036854775806,9223372036854775807,9223372036854775808,18446744073709551615,18446744073709551616,18446744073709551617" q $"SELECT 2147483647, 2147483648, 2147483647+1, 2147483647+10, 4294967295" +q "DROP STREAM if exists fixedstring;" +q "CREATE STREAM IF NOT EXISTS fixedstring ( xx fixed_string(100))" +q "INSERT INTO fixedstring (* except _tp_time) VALUES ('a'), ('abcdefg'), ('абвгдеёжзийклмнопрстуфхцч')"; +sleep 2s +q "select xx as x from table(fixedstring) as fixedstring;" +q "DROP STREAM fixedstring;" -q "CREATE TABLE IF NOT EXISTS test.fixedstring ( xx FixedString(100)) ENGINE = Memory;" -q "INSERT INTO test.fixedstring VALUES ('a'), ('abcdefg'), ('абвгдеёжзийклмнопрстуфхцч')"; -q "select xx as x from test.fixedstring;" -q "DROP TABLE test.fixedstring;" - -q 'DROP TABLE IF EXISTS test.increment;' -q 'CREATE TABLE test.increment (n UInt64) engine Log;' +q 'DROP STREAM IF EXISTS increment;' +q 'CREATE STREAM increment (n uint64);' NUM=${NUM=100} for i in `seq 1 ${NUM}`; do - q "insert into test.increment values ($i);" > /dev/null - q 'select * from test.increment;' > /dev/null + q "insert into increment (* except _tp_time) values ($i);" > /dev/null + q 'select * from table(increment) as increment;' > /dev/null done - -q 'select * from test.increment;' +sleep 2s +q 'select * from table(increment) as increment;' echo "should be ${NUM}:" -q 'select count(*) from test.increment;' +q 'select count(*) from table(increment) as increment;' -q 'DROP TABLE test.increment;' +q 'DROP STREAM increment;' -q "DROP TABLE IF EXISTS test.decimal;" -q "CREATE TABLE IF NOT EXISTS test.decimal (a DECIMAL(9,0), b DECIMAL(18,0), c DECIMAL(38,0), d DECIMAL(9, 9), e Decimal64(18), f Decimal128(38), g Decimal32(5), h Decimal64(9), i Decimal128(18), j dec(4,2)) ENGINE = Memory;" -q "INSERT INTO test.decimal (a, b, c, d, e, f, g, h, i, j) VALUES (42, 42, 42, 0.42, 0.42, 0.42, 42.42, 42.42, 42.42, 42.42);" -q "INSERT INTO test.decimal (a, b, c, d, e, f, g, h, i, j) VALUES (-42, -42, -42, -0.42, -0.42, -0.42, -42.42, -42.42, -42.42, -42.42);" -q "SELECT * FROM test.decimal;" +q "DROP STREAM IF EXISTS decimal;" +q "CREATE STREAM IF NOT EXISTS decimal (a DECIMAL(9,0), b DECIMAL(18,0), c DECIMAL(38,0), d DECIMAL(9, 9), e Decimal64(18), f Decimal128(38), g Decimal32(5), h Decimal64(9), i Decimal128(18), j decimal(4,2))" +q "INSERT INTO decimal (a, b, c, d, e, f, g, h, i, j) VALUES (42, 42, 42, 0.42, 0.42, 0.42, 42.42, 42.42, 42.42, 42.42);" +q "INSERT INTO decimal (a, b, c, d, e, f, g, h, i, j) VALUES (-42, -42, -42, -0.42, -0.42, -0.42, -42.42, -42.42, -42.42, -42.42);" +sleep 2s +q "SELECT * FROM table(decimal) as decimal;" -q "drop table if exists test.lc;" -q "create table test.lc (b LowCardinality(String)) engine=MergeTree order by b;" -q "insert into test.lc select '0123456789' from numbers(100);" -q "select count(), b from test.lc group by b;" -q "select * from test.lc limit 10;" -q "drop table test.lc;" +q "drop stream if exists lc;" +q "create stream lc (b low_cardinality(string)) order by b;" +q "insert into lc (* except _tp_time) select '0123456789' from numbers(100);" +sleep 2s +q "select count(), b from table(lc) as lc group by b;" +q "select * from table(lc) as lc limit 10;" +q "drop stream lc;" # These queries can only be executed within session q "SET max_threads=10;" -q "CREATE TEMPORARY TABLE increment (n UInt64);" +q "CREATE TEMPORARY STREAM increment (n uint64) ENGINE = Memory;" # q "SELECT number, toString(number), toDate(number) FROM system.numbers LIMIT 10000;" diff --git a/tests-pyodbc/__init__.py b/tests-pyodbc/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests-pyodbc/config.yaml b/tests-pyodbc/config.yaml new file mode 100644 index 0000000..3b64999 --- /dev/null +++ b/tests-pyodbc/config.yaml @@ -0,0 +1,6 @@ +data_source: + driver: 'ClickHouse ODBC Driver (Unicode)' + url: 'http://localhost:3218' + database: 'default' + uid: 'default' + pwd: '' diff --git a/tests-pyodbc/requirements.txt b/tests-pyodbc/requirements.txt new file mode 100644 index 0000000..8b3cb75 --- /dev/null +++ b/tests-pyodbc/requirements.txt @@ -0,0 +1,3 @@ +pyodbc>=5.0.1 +pytest>=7.4.3 +pyyaml diff --git a/tests-pyodbc/test_suites/conftest.py b/tests-pyodbc/test_suites/conftest.py new file mode 100644 index 0000000..973d2c6 --- /dev/null +++ b/tests-pyodbc/test_suites/conftest.py @@ -0,0 +1,10 @@ +import pyodbc +import pytest +import yaml + + +@pytest.fixture(autouse=True) +def get_connection(): + with open('../config.yaml', 'r', encoding='utf-8') as f: + cfg = yaml.full_load(f)['data_source'] + return pyodbc.connect(';'.join([f'{k}={v}' for k, v in cfg.items()]), autocommit=True) diff --git a/tests-pyodbc/test_suites/test_types.py b/tests-pyodbc/test_suites/test_types.py new file mode 100644 index 0000000..9232588 --- /dev/null +++ b/tests-pyodbc/test_suites/test_types.py @@ -0,0 +1,25 @@ +import time + +import pyodbc +import pytest + +import type_testsuites as type_t +import utils + + +class TestType: + @pytest.mark.parametrize(type_t.arg_name, type_t.args, ids=type_t.ids) + def test_type(self, + get_connection: pyodbc.Connection, + stream_suffix: str, + type_name: str, + input_list: list, + expect_output: list): + with get_connection as conn: + with conn.cursor() as cursor: + cursor.execute(f"drop stream if exists `test_{stream_suffix}`") + cursor.execute(f"create stream `test_{stream_suffix}` (`x` {type_name})") + cursor.executemany(f"insert into `test_{stream_suffix}` (`x`) values (?)", input_list) + result = cursor.execute(f"select x from `test_{stream_suffix}` where _tp_time > earliest_ts() limit {len(input_list)}").fetchall() + #cursor.execute(f"drop stream if exists `test_{stream_suffix}`") + utils.assert_eq2d(expect_output, result) diff --git a/tests-pyodbc/test_suites/type_testsuites.py b/tests-pyodbc/test_suites/type_testsuites.py new file mode 100644 index 0000000..9c52f4b --- /dev/null +++ b/tests-pyodbc/test_suites/type_testsuites.py @@ -0,0 +1,68 @@ +import datetime +import decimal +import uuid + + +def type_test_paramize(stream_suffix: str, type_name: str, input_list: list, expect_output: list = None) -> list: + input_list = [(item,) for item in input_list] + if expect_output is None: + expect_output = input_list + else: + expect_output = [(item,) for item in expect_output] + return [stream_suffix, type_name, input_list, expect_output] + + +arg_name = ["stream_suffix", "type_name", "input_list", "expect_output"] +args = [ + type_test_paramize("uint8", "uint8", [0, 1, (1 << 7) - 1, (1 << 8) - 1]), + type_test_paramize("uint16", "uint16", [0, 2, (1 << 15) - 1, (1 << 16) - 1]), + type_test_paramize("uint32", "uint32", [0, 3, (1 << 31) - 1, (1 << 32) - 1]), + type_test_paramize("uint64", "uint64", [0, 4, (1 << 63) - 1]), + type_test_paramize("int8", "int8", [0, 1, (1 << 7) - 1, -(1 << 7)]), + type_test_paramize("int16", "int16", [0, 2, (1 << 15) - 1, -(1 << 15)]), + type_test_paramize("int32", "int32", [0, 3, (1 << 31) - 1, -(1 << 31)]), + type_test_paramize("int64", "int64", [0, 4, (1 << 63) - 1, -(1 << 63)]), + type_test_paramize("float32", "float32", [0, 1, 3.141592, 1e5 + .1, - (1e5 + .1)]), # only support 6 decimal digits + type_test_paramize("float64", "float64", [0, 1, 3.141592, 1e10 + .1, - (1e10 + .1)]), + # only support 6 decimal digits + type_test_paramize("date", "date", [ + datetime.date(2023, 1, 1), + datetime.date(2149, 6, 6) + ]), + type_test_paramize("date32", "date32", [ + datetime.date(2023, 1, 1), + datetime.date(2149, 6, 6) + ]), + type_test_paramize("datetime", "datetime", [ + datetime.datetime(2023, 1, 1, 2, 30, 11), + datetime.datetime(2106, 2, 7, 6, 28, 15) + ]), + type_test_paramize("datetime64", "datetime64", [ + datetime.datetime(2023, 1, 1, 2, 30, 11), + datetime.datetime(2106, 2, 7, 6, 28, 15), + ]), + type_test_paramize("decimal", "decimal(10,1)", [ + decimal.Decimal("100000000.1"), + decimal.Decimal("199999999.9") + ]), + type_test_paramize("decimal32", "decimal32(9)", [ + decimal.Decimal("0." + "0" * 8 + "1"), + decimal.Decimal("0." + "9" * 9) + ]), + type_test_paramize("decimal64", "decimal64(18)", [ + decimal.Decimal("0." + "0" * 17 + "1"), + decimal.Decimal("0." + "9" * 18) + ]), + type_test_paramize("decimal128", "decimal128(37)", [ + decimal.Decimal("0." + "0" * 36 + "1"), + decimal.Decimal("0." + "9" * 37) + ]), + type_test_paramize("fixed_string", "fixed_string(9)", ["123456789"]), + type_test_paramize("string", "string", ["1234567890abcdefghijklmnopqrstuvwxyz", "你好,世界!"]), + type_test_paramize("uuid", "uuid", [uuid.uuid5(uuid.NAMESPACE_URL, "www.baidu.com")], + [uuid.uuid5(uuid.NAMESPACE_URL, "www.baidu.com").__str__()]), + type_test_paramize("array", "array(string)", ["['1','2','3']"]), + type_test_paramize("array", "array(int32)", ["[1,2,3]"]), + +] +ids = [f"type_test_{param[1]}" for param in args] diff --git a/tests-pyodbc/test_suites/utils.py b/tests-pyodbc/test_suites/utils.py new file mode 100644 index 0000000..5b53708 --- /dev/null +++ b/tests-pyodbc/test_suites/utils.py @@ -0,0 +1,6 @@ +def assert_eq2d(a: list, b: list): + assert len(a) == len(b), f"list length neq: {len(a)} != {len(b)}" + for i in range(len(a)): + assert len(a[i]) == len(b[i]), f" At index {i} diff: {a[i]} != {b[i]}" + for j, x, y in zip(range(len(a[i])), a[i], b[i]): + assert x == y, f" At index {i}, {j} diff: {x} != {y}" From f899b05ec2042ea609b9fc35d1abf72cf0aa9c1a Mon Sep 17 00:00:00 2001 From: Leo Cai Date: Mon, 6 Nov 2023 10:20:23 +0800 Subject: [PATCH 2/5] add comment to explain a bugfix about TimeZone --- driver/test/datetime_it.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/driver/test/datetime_it.cpp b/driver/test/datetime_it.cpp index 0413911..db26b73 100755 --- a/driver/test/datetime_it.cpp +++ b/driver/test/datetime_it.cpp @@ -170,6 +170,8 @@ INSTANTIATE_TEST_SUITE_P( MiscellaneousTest, DateTime, ::testing::Values( + // in Windows, environment variable should be in format of 'tzn [+|-]hh[:mm[:ss] ][dzn]' + // reference: https://learn.microsoft.com/en-us/cpp/c-runtime-library/reference/tzset DateTimeParams{"Date", "ODBCDriver2", "UTC-3", "to_date('2020-03-25')", SQL_TYPE_DATE, "2020-03-25", SQL_TIMESTAMP_STRUCT{2020, 3, 25, 0, 0, 0, 0} From 50a363174cbcbad2e59e29733209000bf8221e41 Mon Sep 17 00:00:00 2001 From: Leo Cai Date: Mon, 6 Nov 2023 14:04:20 +0800 Subject: [PATCH 3/5] update README.md --- README.md | 66 +++++++++++++++++-------------------------------------- 1 file changed, 20 insertions(+), 46 deletions(-) diff --git a/README.md b/README.md index bdcbc4a..cece6d8 100644 --- a/README.md +++ b/README.md @@ -322,7 +322,7 @@ Comprehensive explanations (possibly, with some irrelevant vendor-specific detai - [ODBC Troubleshooting: How to Enable Driver-manager Tracing](https://www.simba.com/blog/odbc-troubleshooting-tracing/) - [Tracing Application Behavior](http://www.iodbc.org/dataspace/doc/iodbc/wiki/iodbcWiki/FAQ#Tracing%20Application%20Behavior) -### Building from sources: Windows +### Building from sources: Windows (Recommend) #### Build-time dependencies @@ -330,55 +330,29 @@ CMake bundled with the recent versions of Visual Studio can be used. An SDK required for building the ODBC driver is included in Windows SDK, which in its turn is also bundled with Visual Studio. -You will need to install WiX toolset to be able to generate `.msi` packages. You can download and install it from [WiX toolset home page](https://wixtoolset.org/). +You will need to install WiXv3 toolset to be able to generate `.msi` packages. You can download and install it from [WiX toolset home page](https://wixtoolset.org/). #### Build steps -All of the following commands have to be issued in Visual Studio Command Prompt: - -- use `x86 Native Tools Command Prompt for VS 2019` or equivalent for 32-bit builds -- use `x64 Native Tools Command Prompt for VS 2019` or equivalent for 64-bit builds - Clone the repo with submodules: -```sh -git clone --recursive git@github.com:ClickHouse/clickhouse-odbc.git -``` - -Enter the cloned source tree, create a temporary build folder, and generate the solution and project files in it: - -```sh -cd clickhouse-odbc -mkdir build -cd build - -# Configuration options for the project can be specified in the next command in a form of '-Dopt=val' - -# Use the following command for 32-bit build only. -cmake -A Win32 -DCMAKE_BUILD_TYPE=RelWithDebInfo .. - -# Use the following command for 64-bit build only. -cmake -A x64 -DCMAKE_BUILD_TYPE=RelWithDebInfo .. +```shell +# clone the repository +git clone --recursive https://github.com/timeplus-io/proton-odbc.git +cd proton-odbc ``` -Build the generated solution in-place: +Open it with Visual Studio. Select Configuration to "vs2022-x64-RelWithDebInfo". -```sh -cmake --build . --config RelWithDebInfo -cmake --build . --config RelWithDebInfo --target package -``` +Then, from the main menu, select Build > Build All. Make sure that a CMake target is already selected in the Startup Item dropdown in the toolbar. -...and, optionally, run tests (note, that for non-unit tests, preconfigured driver and DSN entries must exist, that point to the binaries generated in this build folder): +After that, from the main menu, select View > Terminal. In the terminal, run command: -```sh -cmake --build . --config RelWithDebInfo --target test +```bash +cmake --build build-win-vs2022-x64-RelWithDebInfo --target package --config RelWithDebInfo ``` -...or open the IDE and build `all`, `package`, and `test` targets manually from there: - -```sh -cmake --open . -``` +You can find `.msi` package in the dir `build-win-vs2022-x64-RelWithDebInfo`. Click it and the odbc will be installed. ### Building from sources: macOS @@ -426,13 +400,13 @@ brew install git cmake make poco openssl icu4c unixodbc Clone the repo recursively with submodules: ```sh -git clone --recursive git@github.com:ClickHouse/clickhouse-odbc.git +git clone --recursive https://github.com/timeplus-io/proton-odbc.git ``` Enter the cloned source tree, create a temporary build folder, and generate a Makefile for the project in it: ```sh -cd clickhouse-odbc +cd proton-odbc mkdir build cd build @@ -498,13 +472,13 @@ scl enable devtoolset-8 -- bash Clone the repo with submodules: ```sh -git clone --recursive git@github.com:ClickHouse/clickhouse-odbc.git +git clone --recursive https://github.com/timeplus-io/proton-odbc.git ``` Enter the cloned source tree, create a temporary build folder, and generate a Makefile for the project in it: ```sh -cd clickhouse-odbc +cd proton-odbc mkdir build cd build @@ -526,9 +500,9 @@ cmake3 --build . --config RelWithDebInfo --target package cmake3 --build . --config RelWithDebInfo --target test ``` -### Building from sources: Debian/Ubuntu +### Building from sources: Debian/Ubuntu (Recommend) -#### Build-time dependencies: UnixODBC +#### Build-time dependencies: UnixODBC (Recommend) Execute the following in the terminal: @@ -556,13 +530,13 @@ If the version of `cmake` is not recent enough, you can install a newer version Clone the repo with submodules: ```sh -git clone --recursive git@github.com:ClickHouse/clickhouse-odbc.git +git clone --recursive https://github.com/timeplus-io/proton-odbc.git ``` Enter the cloned source tree, create a temporary build folder, and generate a Makefile for the project in it: ```sh -cd clickhouse-odbc +cd proton-odbc mkdir build cd build From 217afc9dbeeb04dd6860ee3d7d458ac38b908e37 Mon Sep 17 00:00:00 2001 From: Leo Cai Date: Thu, 9 Nov 2023 16:15:29 +0800 Subject: [PATCH 4/5] fix in linux --- .clang-format | 4 +- .github/workflows/Linux.yml | 34 ++-- .github/workflows/Windows.yml | 34 ++-- .github/workflows/macOS.yml | 34 ++-- .travis.yml | 34 ++-- CMakeLists.txt | 22 +-- README.md | 12 +- driver/driver.cpp | 2 +- driver/platform/win/resource.rc | 2 +- driver/test/nano_it.cpp | 20 ++- .../test/statement_parameter_bindings_it.cpp | 8 +- packaging/RegConfig.patch.wxs | 32 ++-- packaging/clickhouse-odbc.tdc.sample | 4 +- packaging/debian/control | 2 +- packaging/odbc.ini.sample | 18 +-- packaging/odbcinst.ini.sample | 12 +- test/docker/Dockerfile | 34 ++-- test/docker/Dockerfile.centos7 | 2 +- test/mssql.linked.server.sql | 46 +++--- test/parameterized/parameterized/datatypes.py | 4 +- test/parameterized/parameterized/sanity.py | 24 +-- test/parameterized/regression.py | 2 +- test/parameterized/utils/utils.py | 4 +- test/test.py | 2 +- test/test.sh | 146 +++++++++--------- tests-pyodbc/config.yaml | 12 +- tests-pyodbc/test_suites/conftest.py | 10 +- tests-pyodbc/test_suites/test_grant.py | 11 ++ tests-pyodbc/test_suites/test_types.py | 9 +- 29 files changed, 310 insertions(+), 270 deletions(-) create mode 100644 tests-pyodbc/test_suites/test_grant.py diff --git a/.clang-format b/.clang-format index f67cb3d..c0e4e7b 100644 --- a/.clang-format +++ b/.clang-format @@ -21,11 +21,11 @@ BreakConstructorInitializersBeforeComma: false Cpp11BracedListStyle: true ColumnLimit: 140 ConstructorInitializerAllOnOneLineOrOnePerLine: true -#ConstructorInitializerIndentWidth: 60 +ConstructorInitializerIndentWidth: 60 ExperimentalAutoDetectBinPacking: true UseTab: Never TabWidth: 4 -#IndentWidth: 4 +IndentWidth: 4 Standard: Cpp11 PointerAlignment: Middle MaxEmptyLinesToKeep: 2 diff --git a/.github/workflows/Linux.yml b/.github/workflows/Linux.yml index a177641..f00ecbd 100644 --- a/.github/workflows/Linux.yml +++ b/.github/workflows/Linux.yml @@ -114,7 +114,7 @@ jobs: -DODBC_PROVIDER=${{ matrix.odbc_provider }} -DCH_ODBC_RUNTIME_LINK_STATIC=${{ fromJSON('{"static-runtime": "ON", "dynamic-runtime": "OFF"}')[matrix.runtime_link] }} -DCH_ODBC_PREFER_BUNDLED_THIRD_PARTIES=${{ fromJSON('{"bundled-third-parties": "ON", "system-third-parties": "OFF"}')[matrix.third_parties] }} - -DTEST_DSN_LIST="ClickHouse DSN (ANSI);ClickHouse DSN (Unicode);ClickHouse DSN (ANSI, RBWNAT)" + -DTEST_DSN_LIST="Proton DSN (ANSI);Proton DSN (Unicode);Proton DSN (ANSI, RBWNAT)" - name: Build run: cmake --build ${{ github.workspace }}/build --config ${{ matrix.build_type }} @@ -169,15 +169,15 @@ jobs: DebugFile = ${{ github.workspace }}/run/odbc-driver-manager-debug.log [ODBC Drivers] - ClickHouse ODBC Driver (ANSI) = Installed - ClickHouse ODBC Driver (Unicode) = Installed + Proton ODBC Driver (ANSI) = Installed + Proton ODBC Driver (Unicode) = Installed - [ClickHouse ODBC Driver (ANSI)] + [Proton ODBC Driver (ANSI)] Driver = ${{ github.workspace }}/build/driver/libclickhouseodbc.so Setup = ${{ github.workspace }}/build/driver/libclickhouseodbc.so UsageCount = 1 - [ClickHouse ODBC Driver (Unicode)] + [Proton ODBC Driver (Unicode)] Driver = ${{ github.workspace }}/build/driver/libclickhouseodbcw.so Setup = ${{ github.workspace }}/build/driver/libclickhouseodbcw.so UsageCount = 1 @@ -191,27 +191,27 @@ jobs: DebugFile = ${{ github.workspace }}/run/odbc-driver-manager-debug.log [ODBC Data Sources] - ClickHouse DSN (ANSI) = ClickHouse ODBC Driver (ANSI) - ClickHouse DSN (Unicode) = ClickHouse ODBC Driver (Unicode) - ClickHouse DSN (ANSI, RBWNAT) = ClickHouse ODBC Driver (ANSI) + Proton DSN (ANSI) = Proton ODBC Driver (ANSI) + Proton DSN (Unicode) = Proton ODBC Driver (Unicode) + Proton DSN (ANSI, RBWNAT) = Proton ODBC Driver (ANSI) - [ClickHouse DSN (ANSI)] - Driver = ClickHouse ODBC Driver (ANSI) - Description = Test DSN for ClickHouse ODBC Driver (ANSI) + [Proton DSN (ANSI)] + Driver = Proton ODBC Driver (ANSI) + Description = Test DSN for Proton ODBC Driver (ANSI) Url = http://${CLICKHOUSE_SERVER_IP} DriverLog = yes DriverLogFile = ${{ github.workspace }}/run/clickhouse-odbc-driver.log - [ClickHouse DSN (Unicode)] - Driver = ClickHouse ODBC Driver (Unicode) - Description = Test DSN for ClickHouse ODBC Driver (Unicode) + [Proton DSN (Unicode)] + Driver = Proton ODBC Driver (Unicode) + Description = Test DSN for Proton ODBC Driver (Unicode) Url = http://${CLICKHOUSE_SERVER_IP} DriverLog = yes DriverLogFile = ${{ github.workspace }}/run/clickhouse-odbc-driver-w.log - [ClickHouse DSN (ANSI, RBWNAT)] - Driver = ClickHouse ODBC Driver (ANSI) - Description = Test DSN for ClickHouse ODBC Driver (ANSI) that uses RowBinaryWithNamesAndTypes as data source communication default format + [Proton DSN (ANSI, RBWNAT)] + Driver = Proton ODBC Driver (ANSI) + Description = Test DSN for Proton ODBC Driver (ANSI) that uses RowBinaryWithNamesAndTypes as data source communication default format Url = http://${CLICKHOUSE_SERVER_IP}/query?default_format=RowBinaryWithNamesAndTypes DriverLog = yes DriverLogFile = ${{ github.workspace }}/run/clickhouse-odbc-driver.log diff --git a/.github/workflows/Windows.yml b/.github/workflows/Windows.yml index e299091..9968075 100644 --- a/.github/workflows/Windows.yml +++ b/.github/workflows/Windows.yml @@ -77,7 +77,7 @@ jobs: -DODBC_PROVIDER=${{ matrix.odbc_provider }} -DCH_ODBC_RUNTIME_LINK_STATIC=${{ fromJSON('{"static-runtime": "ON", "dynamic-runtime": "OFF"}')[matrix.runtime_link] }} -DCH_ODBC_PREFER_BUNDLED_THIRD_PARTIES=${{ fromJSON('{"bundled-third-parties": "ON", "system-third-parties": "OFF"}')[matrix.third_parties] }} - -DTEST_DSN_LIST="ClickHouse DSN (ANSI);ClickHouse DSN (Unicode);ClickHouse DSN (ANSI, RBWNAT)" + -DTEST_DSN_LIST="Proton DSN (ANSI);Proton DSN (Unicode);Proton DSN (ANSI, RBWNAT)" - name: Build run: cmake --build ${{ github.workspace }}/build --config ${{ matrix.build_type }} @@ -131,15 +131,15 @@ jobs: # DebugFile = ${{ github.workspace }}/run/odbc-driver-manager-debug.log # [ODBC Drivers] - # ClickHouse ODBC Driver (ANSI) = Installed - # ClickHouse ODBC Driver (Unicode) = Installed + # Proton ODBC Driver (ANSI) = Installed + # Proton ODBC Driver (Unicode) = Installed - # [ClickHouse ODBC Driver (ANSI)] + # [Proton ODBC Driver (ANSI)] # Driver = ${{ github.workspace }}/build/driver/libclickhouseodbc.so # Setup = ${{ github.workspace }}/build/driver/libclickhouseodbc.so # UsageCount = 1 - # [ClickHouse ODBC Driver (Unicode)] + # [Proton ODBC Driver (Unicode)] # Driver = ${{ github.workspace }}/build/driver/libclickhouseodbcw.so # Setup = ${{ github.workspace }}/build/driver/libclickhouseodbcw.so # UsageCount = 1 @@ -153,27 +153,27 @@ jobs: # DebugFile = ${{ github.workspace }}/run/odbc-driver-manager-debug.log # [ODBC Data Sources] - # ClickHouse DSN (ANSI) = ClickHouse ODBC Driver (ANSI) - # ClickHouse DSN (Unicode) = ClickHouse ODBC Driver (Unicode) - # ClickHouse DSN (ANSI, RBWNAT) = ClickHouse ODBC Driver (ANSI) + # Proton DSN (ANSI) = Proton ODBC Driver (ANSI) + # Proton DSN (Unicode) = Proton ODBC Driver (Unicode) + # Proton DSN (ANSI, RBWNAT) = Proton ODBC Driver (ANSI) - # [ClickHouse DSN (ANSI)] - # Driver = ClickHouse ODBC Driver (ANSI) - # Description = Test DSN for ClickHouse ODBC Driver (ANSI) + # [Proton DSN (ANSI)] + # Driver = Proton ODBC Driver (ANSI) + # Description = Test DSN for Proton ODBC Driver (ANSI) # Url = http://${CLICKHOUSE_SERVER_IP} # DriverLog = yes # DriverLogFile = ${{ github.workspace }}/run/clickhouse-odbc-driver.log - # [ClickHouse DSN (Unicode)] - # Driver = ClickHouse ODBC Driver (Unicode) - # Description = Test DSN for ClickHouse ODBC Driver (Unicode) + # [Proton DSN (Unicode)] + # Driver = Proton ODBC Driver (Unicode) + # Description = Test DSN for Proton ODBC Driver (Unicode) # Url = http://${CLICKHOUSE_SERVER_IP} # DriverLog = yes # DriverLogFile = ${{ github.workspace }}/run/clickhouse-odbc-driver-w.log - # [ClickHouse DSN (ANSI, RBWNAT)] - # Driver = ClickHouse ODBC Driver (ANSI) - # Description = Test DSN for ClickHouse ODBC Driver (ANSI) that uses RowBinaryWithNamesAndTypes as data source communication default format + # [Proton DSN (ANSI, RBWNAT)] + # Driver = Proton ODBC Driver (ANSI) + # Description = Test DSN for Proton ODBC Driver (ANSI) that uses RowBinaryWithNamesAndTypes as data source communication default format # Url = http://${CLICKHOUSE_SERVER_IP}/query?default_format=RowBinaryWithNamesAndTypes # DriverLog = yes # DriverLogFile = ${{ github.workspace }}/run/clickhouse-odbc-driver.log diff --git a/.github/workflows/macOS.yml b/.github/workflows/macOS.yml index 4e8474e..cedb330 100644 --- a/.github/workflows/macOS.yml +++ b/.github/workflows/macOS.yml @@ -104,7 +104,7 @@ jobs: -DOPENSSL_ROOT_DIR=/usr/local/opt/openssl -DCH_ODBC_RUNTIME_LINK_STATIC=${{ fromJSON('{"static-runtime": "ON", "dynamic-runtime": "OFF"}')[matrix.runtime_link] }} -DCH_ODBC_PREFER_BUNDLED_THIRD_PARTIES=${{ fromJSON('{"bundled-third-parties": "ON", "system-third-parties": "OFF"}')[matrix.third_parties] }} - -DTEST_DSN_LIST="ClickHouse DSN (ANSI);ClickHouse DSN (Unicode);ClickHouse DSN (ANSI, RBWNAT)" + -DTEST_DSN_LIST="Proton DSN (ANSI);Proton DSN (Unicode);Proton DSN (ANSI, RBWNAT)" - name: Build run: cmake --build ${{ github.workspace }}/build --config ${{ matrix.build_type }} @@ -156,15 +156,15 @@ jobs: # DebugFile = ${{ github.workspace }}/run/odbc-driver-manager-debug.log # [ODBC Drivers] - # ClickHouse ODBC Driver (ANSI) = Installed - # ClickHouse ODBC Driver (Unicode) = Installed + # Proton ODBC Driver (ANSI) = Installed + # Proton ODBC Driver (Unicode) = Installed - # [ClickHouse ODBC Driver (ANSI)] + # [Proton ODBC Driver (ANSI)] # Driver = ${{ github.workspace }}/build/driver/libclickhouseodbc.so # Setup = ${{ github.workspace }}/build/driver/libclickhouseodbc.so # UsageCount = 1 - # [ClickHouse ODBC Driver (Unicode)] + # [Proton ODBC Driver (Unicode)] # Driver = ${{ github.workspace }}/build/driver/libclickhouseodbcw.so # Setup = ${{ github.workspace }}/build/driver/libclickhouseodbcw.so # UsageCount = 1 @@ -178,27 +178,27 @@ jobs: # DebugFile = ${{ github.workspace }}/run/odbc-driver-manager-debug.log # [ODBC Data Sources] - # ClickHouse DSN (ANSI) = ClickHouse ODBC Driver (ANSI) - # ClickHouse DSN (Unicode) = ClickHouse ODBC Driver (Unicode) - # ClickHouse DSN (ANSI, RBWNAT) = ClickHouse ODBC Driver (ANSI) + # Proton DSN (ANSI) = Proton ODBC Driver (ANSI) + # Proton DSN (Unicode) = Proton ODBC Driver (Unicode) + # Proton DSN (ANSI, RBWNAT) = Proton ODBC Driver (ANSI) - # [ClickHouse DSN (ANSI)] - # Driver = ClickHouse ODBC Driver (ANSI) - # Description = Test DSN for ClickHouse ODBC Driver (ANSI) + # [Proton DSN (ANSI)] + # Driver = Proton ODBC Driver (ANSI) + # Description = Test DSN for Proton ODBC Driver (ANSI) # Url = http://${CLICKHOUSE_SERVER_IP} # DriverLog = yes # DriverLogFile = ${{ github.workspace }}/run/clickhouse-odbc-driver.log - # [ClickHouse DSN (Unicode)] - # Driver = ClickHouse ODBC Driver (Unicode) - # Description = Test DSN for ClickHouse ODBC Driver (Unicode) + # [Proton DSN (Unicode)] + # Driver = Proton ODBC Driver (Unicode) + # Description = Test DSN for Proton ODBC Driver (Unicode) # Url = http://${CLICKHOUSE_SERVER_IP} # DriverLog = yes # DriverLogFile = ${{ github.workspace }}/run/clickhouse-odbc-driver-w.log - # [ClickHouse DSN (ANSI, RBWNAT)] - # Driver = ClickHouse ODBC Driver (ANSI) - # Description = Test DSN for ClickHouse ODBC Driver (ANSI) that uses RowBinaryWithNamesAndTypes as data source communication default format + # [Proton DSN (ANSI, RBWNAT)] + # Driver = Proton ODBC Driver (ANSI) + # Description = Test DSN for Proton ODBC Driver (ANSI) that uses RowBinaryWithNamesAndTypes as data source communication default format # Url = http://${CLICKHOUSE_SERVER_IP}/query?default_format=RowBinaryWithNamesAndTypes # DriverLog = yes # DriverLogFile = ${{ github.workspace }}/run/clickhouse-odbc-driver.log diff --git a/.travis.yml b/.travis.yml index 8019037..56631f6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -390,15 +390,15 @@ install: |- DebugFile = ${RUN_DIR}/odbc-driver-manager-debug.log [ODBC Drivers] - ClickHouse ODBC Driver (ANSI) = Installed - ClickHouse ODBC Driver (Unicode) = Installed + Proton ODBC Driver (ANSI) = Installed + Proton ODBC Driver (Unicode) = Installed - [ClickHouse ODBC Driver (ANSI)] + [Proton ODBC Driver (ANSI)] Driver = ${ODBC_DRIVER_PATH} Setup = ${ODBC_DRIVER_PATH} UsageCount = 1 - [ClickHouse ODBC Driver (Unicode)] + [Proton ODBC Driver (Unicode)] Driver = ${ODBC_DRIVERW_PATH} Setup = ${ODBC_DRIVERW_PATH} UsageCount = 1 @@ -412,27 +412,27 @@ install: |- DebugFile = ${RUN_DIR}/odbc-driver-manager-debug.log [ODBC Data Sources] - ClickHouse DSN (ANSI) = ClickHouse ODBC Driver (ANSI) - ClickHouse DSN (Unicode) = ClickHouse ODBC Driver (Unicode) - ClickHouse DSN (ANSI, RBWNAT) = ClickHouse ODBC Driver (ANSI) + Proton DSN (ANSI) = Proton ODBC Driver (ANSI) + Proton DSN (Unicode) = Proton ODBC Driver (Unicode) + Proton DSN (ANSI, RBWNAT) = Proton ODBC Driver (ANSI) - [ClickHouse DSN (ANSI)] - Driver = ClickHouse ODBC Driver (ANSI) - Description = Test DSN for ClickHouse ODBC Driver (ANSI) + [Proton DSN (ANSI)] + Driver = Proton ODBC Driver (ANSI) + Description = Test DSN for Proton ODBC Driver (ANSI) Url = http://${CLICKHOUSE_SERVER_IP} DriverLog = yes DriverLogFile = ${RUN_DIR}/clickhouse-odbc-driver.log - [ClickHouse DSN (Unicode)] - Driver = ClickHouse ODBC Driver (Unicode) - Description = Test DSN for ClickHouse ODBC Driver (Unicode) + [Proton DSN (Unicode)] + Driver = Proton ODBC Driver (Unicode) + Description = Test DSN for Proton ODBC Driver (Unicode) Url = http://${CLICKHOUSE_SERVER_IP} DriverLog = yes DriverLogFile = ${RUN_DIR}/clickhouse-odbc-driver-w.log - [ClickHouse DSN (ANSI, RBWNAT)] - Driver = ClickHouse ODBC Driver (ANSI) - Description = Test DSN for ClickHouse ODBC Driver (ANSI) that uses RowBinaryWithNamesAndTypes as data source communication default format + [Proton DSN (ANSI, RBWNAT)] + Driver = Proton ODBC Driver (ANSI) + Description = Test DSN for Proton ODBC Driver (ANSI) that uses RowBinaryWithNamesAndTypes as data source communication default format Url = http://${CLICKHOUSE_SERVER_IP}/query?default_format=RowBinaryWithNamesAndTypes DriverLog = yes DriverLogFile = ${RUN_DIR}/clickhouse-odbc-driver.log @@ -441,7 +441,7 @@ install: |- fi .configure: &configure |- - CMAKE_CONFIGURE_ARGS="-DTEST_DSN_LIST=\"ClickHouse DSN (ANSI);ClickHouse DSN (Unicode);ClickHouse DSN (ANSI, RBWNAT)\" $CMAKE_CONFIGURE_EXTRA_ARGS" + CMAKE_CONFIGURE_ARGS="-DTEST_DSN_LIST=\"Proton DSN (ANSI);Proton DSN (Unicode);Proton DSN (ANSI, RBWNAT)\" $CMAKE_CONFIGURE_EXTRA_ARGS" if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then CMAKE_CONFIGURE_ARGS="-DICU_ROOT=$(brew --prefix)/opt/icu4c $CMAKE_CONFIGURE_ARGS" fi diff --git a/CMakeLists.txt b/CMakeLists.txt index e999a6a..8212e97 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -5,10 +5,10 @@ set (CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake/M include (cmake/prevent_in_source_builds.cmake) project ( - clickhouse-odbc + proton-odbc VERSION 1.2.1.20220905 - DESCRIPTION "The official ODBC driver implementation for accessing ClickHouse as a data source." - HOMEPAGE_URL "https://github.com/ClickHouse/clickhouse-odbc" + DESCRIPTION "The official ODBC driver implementation for accessing Proton as a data source." + HOMEPAGE_URL "https://github.com/timeplus-io/proton-odbc" LANGUAGES C CXX ) @@ -59,8 +59,8 @@ cmake_dependent_option (CH_ODBC_PREFER_BUNDLED_NANODBC "Prefer bundled over syst option (CH_ODBC_RUNTIME_LINK_STATIC "Link with compiler and language runtime statically" OFF) option (CH_ODBC_THIRD_PARTY_LINK_STATIC "Link with third party libraries statically" ON) -set (CH_ODBC_DEFAULT_DSN_ANSI "ClickHouse DSN (ANSI)" CACHE STRING "Default ANSI DSN name") -set (CH_ODBC_DEFAULT_DSN_UNICODE "ClickHouse DSN (Unicode)" CACHE STRING "Default Unicode DSN name") +set (CH_ODBC_DEFAULT_DSN_ANSI "Proton DSN (ANSI)" CACHE STRING "Default ANSI DSN name") +set (CH_ODBC_DEFAULT_DSN_UNICODE "Proton DSN (Unicode)" CACHE STRING "Default Unicode DSN name") if (MSVC) # This default encoding mode will be overriden by UNICODE, in the corresponding cases. @@ -229,13 +229,13 @@ if (CH_ODBC_ENABLE_INSTALL) cpack_add_component_group (ANSIGroup DISPLAY_NAME "ANSI ${ARCH_BITS}-bit Driver" - DESCRIPTION "ClickHouse ODBC Driver (ANSI, ${ARCH_BITS}-bit)" + DESCRIPTION "Proton ODBC Driver (ANSI, ${ARCH_BITS}-bit)" EXPANDED ) cpack_add_component (ANSIDriver DISPLAY_NAME "Driver" - DESCRIPTION "ClickHouse ODBC Driver (ANSI, ${ARCH_BITS}-bit)" + DESCRIPTION "Proton ODBC Driver (ANSI, ${ARCH_BITS}-bit)" REQUIRED GROUP ANSIGroup ) @@ -250,13 +250,13 @@ if (CH_ODBC_ENABLE_INSTALL) cpack_add_component_group (UnicodeGroup DISPLAY_NAME "Unicode ${ARCH_BITS}-bit Driver" - DESCRIPTION "ClickHouse ODBC Driver (Unicode, ${ARCH_BITS}-bit)" + DESCRIPTION "Proton ODBC Driver (Unicode, ${ARCH_BITS}-bit)" EXPANDED ) cpack_add_component (UnicodeDriver DISPLAY_NAME "Driver" - DESCRIPTION "ClickHouse ODBC Driver (Unicode, ${ARCH_BITS}-bit)" + DESCRIPTION "Proton ODBC Driver (Unicode, ${ARCH_BITS}-bit)" DEPENDS ANSIDriver GROUP UnicodeGroup ) @@ -297,7 +297,7 @@ if (CH_ODBC_ENABLE_INSTALL) set (CPACK_PACKAGE_VENDOR "Yandex LLC") set (CPACK_PACKAGE_DESCRIPTION "The official ODBC driver implementation for accessing ClickHouse as a data source.") - set (CPACK_PACKAGE_DESCRIPTION_SUMMARY "ClickHouse ODBC Driver (${ARCH_BITS}-bit)") + set (CPACK_PACKAGE_DESCRIPTION_SUMMARY "Proton ODBC Driver (${ARCH_BITS}-bit)") set (CPACK_PACKAGE_DESCRIPTION_FILE "${PROJECT_SOURCE_DIR}/packaging/Readme.rtf") set (CPACK_RESOURCE_FILE_LICENSE "${PROJECT_SOURCE_DIR}/packaging/License.rtf") set (CPACK_PACKAGE_CHECKSUM "SHA256") @@ -321,7 +321,7 @@ if (CH_ODBC_ENABLE_INSTALL) set (CPACK_RPM_DEBUGINFO_PACKAGE OFF) set (CPACK_RPM_PACKAGE_SOURCES OFF) - set (CPACK_WIX_ROOT_FEATURE_TITLE "ClickHouse ODBC Driver") + set (CPACK_WIX_ROOT_FEATURE_TITLE "Proton ODBC Driver") set (CPACK_WIX_ROOT_FEATURE_DESCRIPTION "${CPACK_PACKAGE_DESCRIPTION_SUMMARY}") set (CPACK_WIX_PATCH_FILE "${PROJECT_SOURCE_DIR}/packaging/RegConfig.patch.wxs") if ("${ARCH_BITS}" STREQUAL "32") diff --git a/README.md b/README.md index cece6d8..b7d7768 100644 --- a/README.md +++ b/README.md @@ -170,8 +170,8 @@ The list of configuration options recognized during the CMake generation step is | `CH_ODBC_PREFER_BUNDLED_NANODBC` | inherits value of `CH_ODBC_PREFER_BUNDLED_THIRD_PARTIES` | Prefer bundled over system variants of nanodbc library | | `CH_ODBC_RUNTIME_LINK_STATIC` | `OFF` | Link with compiler and language runtime statically | | `CH_ODBC_THIRD_PARTY_LINK_STATIC` | `ON` | Link with third party libraries statically | -| `CH_ODBC_DEFAULT_DSN_ANSI` | `ClickHouse DSN (ANSI)` | Default ANSI DSN name | -| `CH_ODBC_DEFAULT_DSN_UNICODE` | `ClickHouse DSN (Unicode)` | Default Unicode DSN name | +| `CH_ODBC_DEFAULT_DSN_ANSI` | `Proton DSN (ANSI)` | Default ANSI DSN name | +| `CH_ODBC_DEFAULT_DSN_UNICODE` | `Proton DSN (Unicode)` | Default Unicode DSN name | | `TEST_DSN_LIST` | `${CH_ODBC_DEFAULT_DSN_ANSI};${CH_ODBC_DEFAULT_DSN_UNICODE}` | `;`-separated list of DSNs, each test will be executed with each of these DSNs | Configuration options above can be specified in the first `cmake` command (generation step) in a form of `-Dopt=val`. @@ -348,12 +348,18 @@ Then, from the main menu, select Build > Build All. Make sure that a CMake targe After that, from the main menu, select View > Terminal. In the terminal, run command: -```bash +```sh cmake --build build-win-vs2022-x64-RelWithDebInfo --target package --config RelWithDebInfo ``` You can find `.msi` package in the dir `build-win-vs2022-x64-RelWithDebInfo`. Click it and the odbc will be installed. +...and, optionally, run tests (note, that for non-unit tests, preconfigured driver and DSN entries must exist, that point to the binaries generated in this build folder): + +```sh +cmake --build build-win-vs2022-x64-RelWithDebInfo --config RelWithDebInfo --target run_tests +``` + ### Building from sources: macOS #### Build-time dependencies diff --git a/driver/driver.cpp b/driver/driver.cpp index 9ab61aa..0eafdba 100755 --- a/driver/driver.cpp +++ b/driver/driver.cpp @@ -107,7 +107,7 @@ void Driver::writeLogSessionStart(std::ostream & stream) { } stream << " ====================" << std::endl; - stream << "ClickHouse ODBC Driver"; + stream << "Proton ODBC Driver"; stream << " VERSION=" << VERSION_STRING; stream << " SYSTEM=" << SYSTEM_STRING; stream << " " << ODBC_PROVIDER; diff --git a/driver/platform/win/resource.rc b/driver/platform/win/resource.rc index 7b53d1b..6bfc18f 100755 --- a/driver/platform/win/resource.rc +++ b/driver/platform/win/resource.rc @@ -135,7 +135,7 @@ BEGIN BEGIN VALUE "Comments", "" VALUE "CompanyName", "Yandex" - VALUE "FileDescription", "ClickHouse ODBC driver" + VALUE "FileDescription", "Proton ODBC driver" VALUE "FileVersion", VERSION_STRING VALUE "InternalName", "clickhouse-odbc" VALUE "LegalCopyright", "(c) 2017-2018" diff --git a/driver/test/nano_it.cpp b/driver/test/nano_it.cpp index 047a9ff..e41cc40 100644 --- a/driver/test/nano_it.cpp +++ b/driver/test/nano_it.cpp @@ -125,26 +125,28 @@ void run_test(nanodbc::string const & connection_string) { execute(connection, NANODBC_TEXT("create stream simple_test (a int);")); execute(connection, NANODBC_TEXT("insert into simple_test (* except _tp_time) values (1);")); execute(connection, NANODBC_TEXT("insert into simple_test (* except _tp_time) values (2);")); + execute(connection, NANODBC_TEXT("select sleep(2)")); { - auto results = execute(connection, NANODBC_TEXT("select (* except _tp_time) from simple_test where _tp_time > earliest_ts() limit 2;")); + auto results = execute(connection, NANODBC_TEXT("select (* except _tp_time) from simple_test;")); show(results); } execute(connection, NANODBC_TEXT("DROP STREAM IF EXISTS default.strings;")); execute(connection, NANODBC_TEXT("CREATE STREAM default.strings (id uint64, str string, dt datetime DEFAULT now());")); execute(connection, NANODBC_TEXT("INSERT INTO default.strings (* except _tp_time) SELECT number, hex(number+100000), 1 FROM system.numbers LIMIT 100;")); + execute(connection, NANODBC_TEXT("select sleep(2)")); { - auto results = execute(connection, NANODBC_TEXT("SELECT count(*) FROM default.strings where _tp_time > earliest_ts() limit 1;")); + auto results = execute(connection, NANODBC_TEXT("SELECT count(*) FROM default.strings;")); show(results); } { - auto results = execute(connection, NANODBC_TEXT("SELECT (* except _tp_time) FROM default.strings where _tp_time > earliest_ts() LIMIT 100;")); + auto results = execute(connection, NANODBC_TEXT("SELECT (* except _tp_time) FROM default.strings;")); show(results); } { auto results = execute(connection, NANODBC_TEXT("SELECT `default`.`strings`.`str` AS `platform`, sum(`default`.`strings`.`id`) AS `sum_installs_ok` FROM " - "`default`.`strings` WHERE _tp_time > earliest_ts() GROUP BY `str` LIMIT 100;")); + "`default`.`strings` GROUP BY `str`;")); show(results); } @@ -161,14 +163,15 @@ void run_test(nanodbc::string const & connection_string) { execute(connection, NANODBC_TEXT("insert into simple_test (* except _tp_time) values (2, 'two');")); execute(connection, NANODBC_TEXT("insert into simple_test (* except _tp_time) values (3, 'tri');")); execute(connection, NANODBC_TEXT("insert into simple_test (b) values ('z');")); + execute(connection, NANODBC_TEXT("select sleep(2)")); nanodbc::result results - = execute(connection, NANODBC_TEXT("select (* except _tp_time) from simple_test where _tp_time > earliest_ts() limit 4;")); + = execute(connection, NANODBC_TEXT("select (* except _tp_time) from simple_test;")); show(results); } // Accessing results by name, or column number { - nanodbc::result results = execute(connection, NANODBC_TEXT("select a as first, b as second from simple_test where a = 1 and _tp_time > earliest_ts() limit 1;")); + nanodbc::result results = execute(connection, NANODBC_TEXT("select a as first, b as second from simple_test where a = 1;")); results.next(); auto const value = results.get(1); cout << endl << results.get(NANODBC_TEXT("first")) << ", " << convert(value) << endl; @@ -308,15 +311,16 @@ void run_test(nanodbc::string const & connection_string) { execute(connection, NANODBC_TEXT("create stream date_test (x datetime);")); //execute(connection, NANODBC_TEXT("insert into date_test values (current_timestamp);")); execute(connection, NANODBC_TEXT("insert into date_test (* except _tp_time) values ({fn current_timestamp});")); + execute(connection, NANODBC_TEXT("select sleep(2)")); nanodbc::result results - = execute(connection, NANODBC_TEXT("select (* except _tp_time) from date_test where _tp_time > earliest_ts() limit 1;")); + = execute(connection, NANODBC_TEXT("select (* except _tp_time) from date_test;")); results.next(); nanodbc::date date = results.get(0); cout << endl << date.year << "-" << date.month << "-" << date.day << endl; - results = execute(connection, NANODBC_TEXT("select (* except _tp_time) from date_test where _tp_time > earliest_ts() limit 1;")); + results = execute(connection, NANODBC_TEXT("select (* except _tp_time) from date_test;")); show(results); execute(connection, NANODBC_TEXT("drop stream if exists date_test;")); diff --git a/driver/test/statement_parameter_bindings_it.cpp b/driver/test/statement_parameter_bindings_it.cpp index d1485c5..407100d 100755 --- a/driver/test/statement_parameter_bindings_it.cpp +++ b/driver/test/statement_parameter_bindings_it.cpp @@ -26,7 +26,7 @@ TEST_F(StatementParameterBindingsTest, Missing) { if (!SQL_SUCCEEDED(rc)) throw std::runtime_error("SQLFetch return code: " + std::to_string(rc)); - SQLCHAR col[256] = {}; + SQLCHAR col[8] = {}; SQLLEN col_ind = 0; ODBC_CALL_ON_STMT_THROW(hstmt, @@ -82,7 +82,7 @@ TEST_F(StatementParameterBindingsTest, NoBuffer) { if (!SQL_SUCCEEDED(rc)) throw std::runtime_error("SQLFetch return code: " + std::to_string(rc)); - SQLCHAR col[256] = {}; + SQLCHAR col[8] = {}; SQLLEN col_ind = 0; ODBC_CALL_ON_STMT_THROW(hstmt, @@ -163,7 +163,7 @@ TEST_F(StatementParameterBindingsTest, DISABLED_NullStringValueForInteger) { if (!SQL_SUCCEEDED(rc)) throw std::runtime_error("SQLFetch return code: " + std::to_string(rc)); - SQLCHAR col[256] = {}; + SQLCHAR col[8] = {}; SQLLEN col_ind = 0; ODBC_CALL_ON_STMT_THROW(hstmt, @@ -244,7 +244,7 @@ TEST_F(StatementParameterBindingsTest, DISABLED_NullStringValueForString) { if (!SQL_SUCCEEDED(rc)) throw std::runtime_error("SQLFetch return code: " + std::to_string(rc)); - SQLCHAR col[256] = {}; + SQLCHAR col[8] = {}; SQLLEN col_ind = 0; ODBC_CALL_ON_STMT_THROW(hstmt, diff --git a/packaging/RegConfig.patch.wxs b/packaging/RegConfig.patch.wxs index c56e57c..14c7205 100644 --- a/packaging/RegConfig.patch.wxs +++ b/packaging/RegConfig.patch.wxs @@ -1,7 +1,7 @@ - - + + @@ -11,20 +11,20 @@ - + - - - - + + + + - + - - + + @@ -34,15 +34,15 @@ - + - - - - + + + + - + diff --git a/packaging/clickhouse-odbc.tdc.sample b/packaging/clickhouse-odbc.tdc.sample index 8e591a5..c8741b7 100644 --- a/packaging/clickhouse-odbc.tdc.sample +++ b/packaging/clickhouse-odbc.tdc.sample @@ -1,7 +1,7 @@ - - + + diff --git a/packaging/debian/control b/packaging/debian/control index f54caa6..3884e16 100644 --- a/packaging/debian/control +++ b/packaging/debian/control @@ -9,5 +9,5 @@ Package: clickhouse-odbc Section: database Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends} -Description: Clickhouse odbc driver +Description: Proton ODBC driver . diff --git a/packaging/odbc.ini.sample b/packaging/odbc.ini.sample index abbcbba..3b6162c 100644 --- a/packaging/odbc.ini.sample +++ b/packaging/odbc.ini.sample @@ -1,12 +1,12 @@ # Insert the content of this file into ~/.odbc.ini or /etc/odbc.ini files. [ODBC Data Sources] -ClickHouse DSN (ANSI) = ClickHouse ODBC Driver (ANSI) -ClickHouse DSN (Unicode) = ClickHouse ODBC Driver (Unicode) +Proton DSN (ANSI) = Proton ODBC Driver (ANSI) +Proton DSN (Unicode) = Proton ODBC Driver (Unicode) -[ClickHouse DSN (ANSI)] -Driver = ClickHouse ODBC Driver (ANSI) -Description = DSN (localhost) for ClickHouse ODBC Driver (ANSI) +[Proton DSN (ANSI)] +Driver = Proton ODBC Driver (ANSI) +Description = DSN (localhost) for Proton ODBC Driver (ANSI) ### New all-in one way to specify connection with [optional] settings: # Url = https://default:password@localhost:8443/query?database=default&max_result_bytes=4000000&buffer_size=3000000 @@ -22,7 +22,7 @@ Description = DSN (localhost) for ClickHouse ODBC Driver (ANSI) # Port = 8123 # Proto = http -# Timeout for http queries to ClickHouse server (default is 30 seconds) +# Timeout for http queries to Proton server (default is 30 seconds) # Timeout=60 # SSLMode: @@ -36,7 +36,7 @@ Description = DSN (localhost) for ClickHouse ODBC Driver (ANSI) # DriverLog = yes # DriverLogFile = /tmp/chlickhouse-odbc-driver.log -[ClickHouse DSN (Unicode)] -Driver = ClickHouse ODBC Driver (Unicode) -Description = DSN (localhost) for ClickHouse ODBC Driver (Unicode) +[Proton DSN (Unicode)] +Driver = Proton ODBC Driver (Unicode) +Description = DSN (localhost) for Proton ODBC Driver (Unicode) # ... diff --git a/packaging/odbcinst.ini.sample b/packaging/odbcinst.ini.sample index 2231275..c6b4faa 100644 --- a/packaging/odbcinst.ini.sample +++ b/packaging/odbcinst.ini.sample @@ -2,17 +2,17 @@ # and fix the absolute paths to libclickhouseodbc.so and libclickhouseodbcw.so files. [ODBC Drivers] -ClickHouse ODBC Driver (ANSI) = Installed -ClickHouse ODBC Driver (Unicode) = Installed +Proton ODBC Driver (ANSI) = Installed +Proton ODBC Driver (Unicode) = Installed -[ClickHouse ODBC Driver (ANSI)] -Description = ODBC Driver (ANSI) for ClickHouse +[Proton ODBC Driver (ANSI)] +Description = ODBC Driver (ANSI) for Proton Driver = /usr/local/lib64/libclickhouseodbc.so Setup = /usr/local/lib64/libclickhouseodbc.so UsageCount = 1 -[ClickHouse ODBC Driver (Unicode)] -Description = ODBC Driver (Unicode) for ClickHouse +[Proton ODBC Driver (Unicode)] +Description = ODBC Driver (Unicode) for Proton Driver = /usr/local/lib64/libclickhouseodbcw.so Setup = /usr/local/lib64/libclickhouseodbcw.so UsageCount = 1 diff --git a/test/docker/Dockerfile b/test/docker/Dockerfile index aabda37..ea40b43 100644 --- a/test/docker/Dockerfile +++ b/test/docker/Dockerfile @@ -118,7 +118,7 @@ RUN mkdir -p ${BIN_DIR} \ && cmake \ -G Ninja \ -DODBC_PROVIDER=${CMAKE_ODBC_PROVIDER} \ - -DTEST_DSN_LIST="ClickHouse DSN (ANSI);ClickHouse DSN (Unicode);ClickHouse DSN (ANSI, RBWNAT)" \ + -DTEST_DSN_LIST="Proton DSN (ANSI);Proton DSN (Unicode);Proton DSN (ANSI, RBWNAT)" \ ${CMAKE_ADDITIONAL_FLAGS} \ /clickhouse-odbc/ \ && ninja @@ -163,15 +163,15 @@ Debug = 1 DebugFile = \${LOG_DIR}/odbc-driver-manager-debug.log [ODBC Drivers] -ClickHouse ODBC Driver (ANSI) = Installed -ClickHouse ODBC Driver (Unicode) = Installed +Proton ODBC Driver (ANSI) = Installed +Proton ODBC Driver (Unicode) = Installed -[ClickHouse ODBC Driver (ANSI)] +[Proton ODBC Driver (ANSI)] Driver = ${BIN_DIR}/driver/libclickhouseodbc.so Setup = ${BIN_DIR}/driver/libclickhouseodbc.so UsageCount = 1 -[ClickHouse ODBC Driver (Unicode)] +[Proton ODBC Driver (Unicode)] Driver = ${BIN_DIR}/driver/libclickhouseodbcw.so Setup = ${BIN_DIR}/driver/libclickhouseodbcw.so UsageCount = 1 @@ -185,27 +185,27 @@ Debug = 1 DebugFile = \${LOG_DIR}/odbc-driver-manager-debug.log [ODBC Data Sources] -ClickHouse DSN (ANSI) = ClickHouse ODBC Driver (ANSI) -ClickHouse DSN (Unicode) = ClickHouse ODBC Driver (Unicode) -ClickHouse DSN (ANSI, RBWNAT) = ClickHouse ODBC Driver (ANSI) +Proton DSN (ANSI) = Proton ODBC Driver (ANSI) +Proton DSN (Unicode) = Proton ODBC Driver (Unicode) +Proton DSN (ANSI, RBWNAT) = Proton ODBC Driver (ANSI) -[ClickHouse DSN (ANSI)] -Driver = ClickHouse ODBC Driver (ANSI) -Description = Test DSN for ClickHouse ODBC Driver (ANSI) +[Proton DSN (ANSI)] +Driver = Proton ODBC Driver (ANSI) +Description = Test DSN for Proton ODBC Driver (ANSI) Url = \${CH_SERVER_URL} DriverLog = yes DriverLogFile = \${LOG_DIR}/clickhouse-odbc-driver.log -[ClickHouse DSN (Unicode)] -Driver = ClickHouse ODBC Driver (Unicode) -Description = Test DSN for ClickHouse ODBC Driver (Unicode) +[Proton DSN (Unicode)] +Driver = Proton ODBC Driver (Unicode) +Description = Test DSN for Proton ODBC Driver (Unicode) Url = \${CH_SERVER_URL} DriverLog = yes DriverLogFile = \${LOG_DIR}/clickhouse-odbc-driver-w.log -[ClickHouse DSN (ANSI, RBWNAT)] -Driver = ClickHouse ODBC Driver (ANSI) -Description = Test DSN for ClickHouse ODBC Driver (ANSI) that uses RowBinaryWithNamesAndTypes as data source communication default format +[Proton DSN (ANSI, RBWNAT)] +Driver = Proton ODBC Driver (ANSI) +Description = Test DSN for Proton ODBC Driver (ANSI) that uses RowBinaryWithNamesAndTypes as data source communication default format Url = \${CH_SERVER_URL}/query?default_format=RowBinaryWithNamesAndTypes DriverLog = yes DriverLogFile = \${LOG_DIR}/clickhouse-odbc-driver.log diff --git a/test/docker/Dockerfile.centos7 b/test/docker/Dockerfile.centos7 index 42810e9..498bd2a 100644 --- a/test/docker/Dockerfile.centos7 +++ b/test/docker/Dockerfile.centos7 @@ -1,6 +1,6 @@ FROM centos:7 -# image to build & test clickhouse odbc on centos7 & docker +# image to build & test Proton ODBC on centos7 & docker # install all needed build & test dependancies # docker build -f Dockerfile.centos7 . -t clickhouse-odbc-tester-centos7 diff --git a/test/mssql.linked.server.sql b/test/mssql.linked.server.sql index 7b6c031..33bfc00 100644 --- a/test/mssql.linked.server.sql +++ b/test/mssql.linked.server.sql @@ -1,47 +1,49 @@ -- net stop MSSQLSERVER && net start MSSQLSERVER -- sqlcmd -i mssql.linked.server.sql -EXEC master.dbo.sp_dropserver N'clickhouse_link_test'; +EXEC master.dbo.sp_dropserver N'proton_link_test'; EXEC master.dbo.sp_addlinkedserver - @server = N'clickhouse_link_test' - ,@srvproduct=N'Clickhouse' + @server = N'proton_link_test' + ,@srvproduct=N'Proton' ,@provider=N'MSDASQL' - ,@provstr=N'Driver={ClickHouse ODBC Driver (Unicode)};Url=http://example:3218;Database=default;Uid=default;Pwd=;stringmaxlength=1500;' + ,@provstr=N'Driver={Proton ODBC Driver (Unicode)};Url=http://example:3218;Database=default;Uid=default;Pwd=;stringmaxlength=8000;' go -EXEC sp_serveroption 'clickhouse_link_test','rpc','true'; -EXEC sp_serveroption 'clickhouse_link_test','rpc out','true'; +EXEC sp_serveroption 'proton_link_test','rpc','true'; +EXEC sp_serveroption 'proton_link_test','rpc out','true'; go -EXEC('select * from system.numbers limit 10;') at [clickhouse_link_test]; +EXEC('select * from system.numbers limit 10;') at [proton_link_test]; go -select count(*) as cnt from OPENQUERY(clickhouse_link_test, 'select * from system.numbers limit 10;') +select count(*) as cnt from OPENQUERY(proton_link_test, 'select * from system.numbers limit 10;') go -EXEC('select ''Just string''') at [clickhouse_link_test]; +EXEC('select ''Just string''') at [proton_link_test]; go -EXEC('select name from system.databases;') at [clickhouse_link_test]; +EXEC('select name from system.databases;') at [proton_link_test]; go -EXEC('select * from system.build_options;') at [clickhouse_link_test]; +EXEC('select * from system.build_options;') at [proton_link_test]; go -exec('CREATE STREAM IF NOT EXISTS default.fixedstring ( xx fixed_string(100))') at [clickhouse_link_test]; +exec('CREATE STREAM IF NOT EXISTS default.fixedstring ( xx fixed_string(100))') at [proton_link_test]; go -exec(N'INSERT INTO default.fixedstring (* except _tp_time) VALUES (''a''), (''abcdefg''), (''абвгдеёжзийклмнопрстуфх'');') at [clickhouse_link_test]; +exec(N'INSERT INTO default.fixedstring (* except _tp_time) VALUES (''a''), (''abcdefg''), (''абвгдеёжзийклмнопрстуфх'');') at [proton_link_test]; go ---exec('INSERT INTO test.fixedstring VALUES (''a''),(''abcdefg'');') at [clickhouse_link_test]; +--exec('INSERT INTO test.fixedstring VALUES (''a''),(''abcdefg'');') at [proton_link_test]; --go -exec('select xx as x from default.fixedstring where _tp_time > earliest_ts() limit 3;') at [clickhouse_link_test]; +exec('select sleep(2);') at [proton_link_test]; +exec('select xx as x from default.fixedstring;') at [proton_link_test]; go -exec('DROP STREAM default.fixedstring;') at [clickhouse_link_test]; +exec('DROP STREAM default.fixedstring;') at [proton_link_test]; go -exec('SELECT -127,-128,-129,126,127,128,255,256,257,-32767,-32768,-32769,32766,32767,32768,65535,65536,65537,-2147483647,-2147483648,-2147483649,2147483646,2147483647,2147483648,4294967295,4294967296,4294967297,-9223372036854775807,-9223372036854775808,-9223372036854775809,9223372036854775806,9223372036854775807,9223372036854775808,18446744073709551615,18446744073709551616,18446744073709551617;') at [clickhouse_link_test]; +exec('SELECT -127,-128,-129,126,127,128,255,256,257,-32767,-32768,-32769,32766,32767,32768,65535,65536,65537,-2147483647,-2147483648,-2147483649,2147483646,2147483647,2147483648,4294967295,4294967296,4294967297,-9223372036854775807,-9223372036854775808,-9223372036854775809,9223372036854775806,9223372036854775807,9223372036854775808,18446744073709551615,18446744073709551616,18446744073709551617;') at [proton_link_test]; go -exec('SELECT *, (CASE WHEN (number == 1) THEN ''o'' WHEN (number == 2) THEN ''two long string'' WHEN (number == 3) THEN ''r'' WHEN (number == 4) THEN NULL ELSE ''-'' END) FROM system.numbers LIMIT 6') at [clickhouse_link_test]; +exec('SELECT *, (CASE WHEN (number == 1) THEN ''o'' WHEN (number == 2) THEN ''two long string'' WHEN (number == 3) THEN ''r'' WHEN (number == 4) THEN NULL ELSE ''-'' END) FROM system.numbers LIMIT 6') at [proton_link_test]; go -exec('CREATE STREAM IF NOT EXISTS default.number (a int64, b float64)') at [clickhouse_link_test]; +exec('CREATE STREAM IF NOT EXISTS default.number (a int64, b float64)') at [proton_link_test]; go -exec(N'INSERT INTO default.number (* except _tp_time) VALUES (1000, 1.1), (1200, 100.19), (-1000, -99.1);') at [clickhouse_link_test]; +exec(N'INSERT INTO default.number (* except _tp_time) VALUES (1000, 1.1), (1200, 100.19), (-1000, -99.1);') at [proton_link_test]; go -exec('select (* except _tp_time) from default.number where _tp_time > earliest_ts() limit 3;') at [clickhouse_link_test]; +exec('select sleep(2);') at [proton_link_test]; +exec('select (* except _tp_time) from default.number;') at [proton_link_test]; go -exec('DROP STREAM default.number;') at [clickhouse_link_test]; +exec('DROP STREAM default.number;') at [proton_link_test]; go diff --git a/test/parameterized/parameterized/datatypes.py b/test/parameterized/parameterized/datatypes.py index 520e4cf..0174084 100755 --- a/test/parameterized/parameterized/datatypes.py +++ b/test/parameterized/parameterized/datatypes.py @@ -1,6 +1,7 @@ import datetime import decimal import uuid +import time from testflows.core import TestFeature, TestScenario from testflows.core import Requirements, Feature, Scenario, Given, When, Then, TE @@ -47,9 +48,10 @@ def check_datatype(connection, datatype, values, nullable=False, quote=False, re connection.query(f"INSERT INTO ps (* except _tp_time) VALUES ('{repr(v)}')", fetch=False) else: connection.query(f"INSERT INTO ps (* except _tp_time) VALUES ({repr(v)})", fetch=False) + time.sleep(2) with When("I select all values", flags=TE): - rows = connection.query(f"SELECT (* except _tp_time) FROM ps ORDER BY v WHERE _tp_time > earliest_ts() LIMIT{len(values)}") + rows = connection.query(f"SELECT (* except _tp_time) FROM ps ORDER BY v") if expected.get("all") is not None: with Then(f"the result is {expected.get('all')}", flags=TE, format_name=False): assert repr(rows) == expected.get("all"), error("result did not match") diff --git a/test/parameterized/parameterized/sanity.py b/test/parameterized/parameterized/sanity.py index 65b26a7..e392d46 100755 --- a/test/parameterized/parameterized/sanity.py +++ b/test/parameterized/parameterized/sanity.py @@ -1,6 +1,7 @@ #!/usr/bin/env python3 import datetime import decimal +import time from testflows.core import TestScenario, Given, When, Then from testflows.core import TE @@ -38,37 +39,38 @@ def query(query, *args, **kwargs): "1.333, 10.123, 'fstring0')", fetch=False) query("INSERT INTO ps (* except _tp_time) VALUES (2, NULL, 'test', '2019-05-25', '2019-05-25 15:00:00', " "1.433, 11.124, 'fstring1')", fetch=False) - query("SELECT (* except _tp_time) FROM ps where _tp_time > earliest_ts() limit 2") + time.sleep(2) + query("SELECT (* except _tp_time) FROM ps") with When("I want to select using parameter of type UInt8", flags=TE): - query("SELECT (* except _tp_time) FROM ps WHERE i = ? AND _tp_time > earliest_ts() ORDER BY i, s, d LIMIT 1", [1]) + query("SELECT (* except _tp_time) FROM ps WHERE i = ? ORDER BY i, s, d", [1]) with When("I want to select using parameter of type Nullable(UInt8)", flags=TE): - query("SELECT (* except _tp_time) FROM ps WHERE ni = ? AND _tp_time > earliest_ts() ORDER BY i, s, d LIMIT 1", [None]) + query("SELECT (* except _tp_time) FROM ps WHERE ni = ? ORDER BY i, s, d", [None]) with When("I want to select using parameter of type String", flags=TE): - query("SELECT (* except _tp_time) FROM ps WHERE s = ? AND _tp_time > earliest_ts() ORDER BY i, s, d LIMIT 1", ["Hello, world"]) + query("SELECT (* except _tp_time) FROM ps WHERE s = ? ORDER BY i, s, d", ["Hello, world"]) with When("I want to select using parameter of type Date", flags=TE): - query("SELECT (* except _tp_time) FROM ps WHERE d = ? AND _tp_time > earliest_ts() ORDER BY i, s, d LIMIT 1", [datetime.date(2019,5,25)]) + query("SELECT (* except _tp_time) FROM ps WHERE d = ? ORDER BY i, s, d", [datetime.date(2019,5,25)]) with When("I want to select using parameter of type DateTime", flags=TE): - query("SELECT (* except _tp_time) FROM ps WHERE dt = ? AND _tp_time > earliest_ts() ORDER BY i, s, d LIMIT 1", [datetime.datetime(2005, 5, 5, 5, 5, 5)]) + query("SELECT (* except _tp_time) FROM ps WHERE dt = ? ORDER BY i, s, d", [datetime.datetime(2005, 5, 5, 5, 5, 5)]) with When("I want to select using parameter of type Float32", flags=TE): - query("SELECT (* except _tp_time) FROM ps WHERE f = ? AND _tp_time > earliest_ts() ORDER BY i, s, d LIMIT 1", [1.333]) + query("SELECT (* except _tp_time) FROM ps WHERE f = ? ORDER BY i, s, d", [1.333]) with When("I want to select using parameter of type Decimal32(3)", flags=TE): - query("SELECT (* except _tp_time) FROM ps WHERE dc = ? AND _tp_time > earliest_ts() ORDER BY i, s, d LIMIT 1", [decimal.Decimal('10.123')]) + query("SELECT (* except _tp_time) FROM ps WHERE dc = ? ORDER BY i, s, d", [decimal.Decimal('10.123')]) with When("I want to select using parameter of type FixedString(8)", flags=TE): - query("SELECT (* except _tp_time) FROM ps WHERE fs = ? AND _tp_time > earliest_ts() ORDER BY i, s, d LIMIT 1", [u"fstring0"]) + query("SELECT (* except _tp_time) FROM ps WHERE fs = ? ORDER BY i, s, d", [u"fstring0"]) with When("I want to select using parameters of type UInt8 and String", flags=TE): - query("SELECT (* except _tp_time) FROM ps WHERE i = ? and s = ? and _tp_time > earliest_ts() ORDER BY i, s, d LIMIT 1", [2, "test"]) + query("SELECT (* except _tp_time) FROM ps WHERE i = ? and s = ? ORDER BY i, s, d", [2, "test"]) with When("I want to select using parameters of type UInt8, String, and Date", flags=TE): - query("SELECT (* except _tp_time) FROM ps WHERE i = ? and s = ? and d = ? and _tp_time > earliest_ts() ORDER BY i, s, d LIMIT 1", + query("SELECT (* except _tp_time) FROM ps WHERE i = ? and s = ? and d = ? ORDER BY i, s, d", [2, "test", datetime.date(2019,5,25)]) finally: query("DROP STREAM ps", fetch=False) diff --git a/test/parameterized/regression.py b/test/parameterized/regression.py index e6a3eb7..0b0f347 100755 --- a/test/parameterized/regression.py +++ b/test/parameterized/regression.py @@ -15,7 +15,7 @@ def parameterized(self): """Test suite for clickhouse-odbc support of parameterized queries. """ - dsn = os.getenv("DSN", "ClickHouse DSN (ANSI)") + dsn = os.getenv("DSN", "Proton DSN (ANSI)") with Feature(f"{dsn}", flags=TE): Scenario(run=load("parameterized.sanity", test="sanity"), flags=TE) Feature(run=load("parameterized.datatypes", test="datatypes"), flags=TE) diff --git a/test/parameterized/utils/utils.py b/test/parameterized/utils/utils.py index 49b8bfa..13b141e 100644 --- a/test/parameterized/utils/utils.py +++ b/test/parameterized/utils/utils.py @@ -15,7 +15,7 @@ @contextmanager def Logs(): - """ClickHouse and ODBC driver logs context manager. + """Proton and ODBC driver logs context manager. """ class _Logs: def __init__(self, *args): @@ -50,7 +50,7 @@ def read(self, timeout=None): def PyODBCConnection(encoding="utf-8", logs=None): """PyODBC connector context manager. """ - dsn = os.getenv("DSN", "ClickHouse DSN (ANSI)") + dsn = os.getenv("DSN", "Proton DSN (ANSI)") note(f"Using DNS={dsn}") connection = pyodbc.connect(f"DSN={dsn};") try: diff --git a/test/test.py b/test/test.py index 4ddea6c..6e8ea4b 100755 --- a/test/test.py +++ b/test/test.py @@ -17,7 +17,7 @@ is_windows = (os.name == 'nt') def main(): - dsn = 'ClickHouse DSN (ANSI)' + dsn = 'Proton DSN (ANSI)' if len(sys.argv) >= 2: dsn = sys.argv[1] diff --git a/test/test.sh b/test/test.sh index 09a16ce..3a10f5c 100755 --- a/test/test.sh +++ b/test/test.sh @@ -32,90 +32,90 @@ function q { } q "SELECT * FROM system.build_options;" -# q "CREATE DATABASE IF NOT EXISTS test;" -q "DROP STREAM IF EXISTS odbc1;" -q "CREATE STREAM odbc1 (ui64 uint64, String string, Date date, DateTime datetime)" -q "INSERT INTO odbc1 (* except _tp_time) VALUES (1, '2', 3, 4);" -q "INSERT INTO odbc1 (* except _tp_time) VALUES (10, '20', 30, 40);" -q "INSERT INTO odbc1 (* except _tp_time) VALUES (100, '200', 300, 400);" +q "CREATE DATABASE IF NOT EXISTS test;" +q "DROP STREAM IF EXISTS test.odbc1;" +q "CREATE STREAM test.odbc1 (ui64 uint64, str string, date_col Date, datetime_col datetime)" +q "INSERT INTO test.odbc1 (* except _tp_time) VALUES (1, '2', 3, 4);" +q "INSERT INTO test.odbc1 (* except _tp_time) VALUES (10, '20', 30, 40);" +q "INSERT INTO test.odbc1 (* except _tp_time) VALUES (100, '200', 300, 400);" sleep 2s -q "SELECT (* except _tp_time) FROM table(odbc1) WHERE ui64=1" +q "SELECT (* except _tp_time) FROM test.odbc1 WHERE ui64=1;" q 'SELECT {fn CONVERT(1, SQL_BIGINT)}' q "SELECT {fn CONVERT(100000, SQL_TINYINT)}" q "SELECT {fn CONCAT('a', 'b')}" q 'SELECT cast({fn TRUNCATE(1.1 + 2.4, 1)} AS INTEGER) AS `yr_date_ok`' -q $'SELECT count({fn ABS(`ui64`)}) FROM table(odbc1)' +q $'SELECT count({fn ABS(`test`.`odbc1`.`ui64`)}) FROM test.odbc1' -q $'SELECT {fn TIMESTAMPDIFF(SQL_TSI_DAY,cast(`DateTime` AS DATE),cast(`Date` AS DATE))} AS `Calculation_503558746242125826`, sum({fn CONVERT(1, SQL_BIGINT)}) AS `sum_Number_of_Records_ok` FROM table(`odbc1`) as `odbc1` WHERE (cast(`DateTime` AS DATE) <> {d \'1970-01-01\'}) GROUP BY `Calculation_503558746242125826`' +q $'SELECT {fn TIMESTAMPDIFF(SQL_TSI_DAY,cast(`test`.`odbc1`.`datetime_col` AS DATE),cast(`test`.`odbc1`.`date_col` AS DATE))} AS `Calculation_503558746242125826`, sum({fn CONVERT(1, SQL_BIGINT)}) AS `sum_Number_of_Records_ok` FROM `test`.`odbc1` WHERE (cast(`test`.`odbc1`.`datetime_col` AS DATE) <> {d \'1970-01-01\'}) GROUP BY `Calculation_503558746242125826`' -q $'SELECT count({fn ABS(`ui64`)}) AS `TEMP_Calculation_559572257702191122__2716881070__0_`, sum({fn ABS(`ui64`)}) AS `TEMP_Calculation_559572257702191122__3054398615__0_` FROM table(odbc1) as `odbc1`;' +q $'SELECT count({fn ABS(`test`.`odbc1`.`ui64`)}) AS `TEMP_Calculation_559572257702191122__2716881070__0_`, sum({fn ABS(`test`.`odbc1`.`ui64`)}) AS `TEMP_Calculation_559572257702191122__3054398615__0_` FROM test.odbc1;' -q $'SELECT sum((CASE WHEN (`ui64` * `ui64`) < 0 THEN NULL ELSE {fn SQRT((`ui64` * `ui64`))} END)) AS `TEMP_Calculation_559572257701634065__1464080195__0_`, count((CASE WHEN (`ui64` * `ui64`) < 0 THEN NULL ELSE {fn SQRT((`ui64` * `ui64`))} END)) AS `TEMP_Calculation_559572257701634065__2225718044__0_` FROM table(odbc1) as `odbc1`;' +q $'SELECT sum((CASE WHEN (`test`.`odbc1`.`ui64` * `test`.`odbc1`.`ui64`) < 0 THEN NULL ELSE {fn SQRT((`test`.`odbc1`.`ui64` * `test`.`odbc1`.`ui64`))} END)) AS `TEMP_Calculation_559572257701634065__1464080195__0_`, count((CASE WHEN (`test`.`odbc1`.`ui64` * `test`.`odbc1`.`ui64`) < 0 THEN NULL ELSE {fn SQRT((`test`.`odbc1`.`ui64` * `test`.`odbc1`.`ui64`))} END)) AS `TEMP_Calculation_559572257701634065__2225718044__0_` FROM test.odbc1;' -# SELECT (CASE WHEN (NOT = 'True') OR (`string` = 'True') OR (`string2` = 'True') THEN 1 WHEN NOT (NOT = 'True') OR (`string` = 'True') OR (`string` = 'True') OR (`string2` = 'True') THEN 0 ELSE NULL END) AS `Calculation_597289912116125696`, -# sum({fn CONVERT(1, SQL_BIGINT)}) AS `sum_Number_of_Records_ok` FROM table(`odbc1`) as `odbc1` GROUP BY `Calculation_597289912116125696`, `string`, `ui64` +# SELECT (CASE WHEN (NOT = 'True') OR (`test`.`odbc1`.`str` = 'True') OR (`test`.`odbc1`.`string2` = 'True') THEN 1 WHEN NOT (NOT = 'True') OR (`test`.`odbc1`.`str` = 'True') OR (`test`.`odbc1`.`str` = 'True') OR (`test`.`odbc1`.`string2` = 'True') THEN 0 ELSE NULL END) AS `Calculation_597289912116125696`, +# sum({fn CONVERT(1, SQL_BIGINT)}) AS `sum_Number_of_Records_ok` FROM `test`.`odbc1` GROUP BY `Calculation_597289912116125696`, `str`, `ui64` -q "DROP STREAM IF EXISTS purchase_stat;" -q "CREATE STREAM purchase_stat (purchase_id uint64, purchase_date datetime, offer_category uint64, amount uint64)" -# q $'SELECT sum({fn CONVERT(Custom_SQL_Query.amount, SQL_BIGINT)}) AS sum_amount FROM (SELECT purchase_date, offer_category, sum(amount) AS amount, count(DISTINCT purchase_id) AS purchase_id FROM table(purchase_stat) as purchase_stat WHERE (offer_category = 1) GROUP BY purchase_date, offer_category) Custom_SQL_Query HAVING (count(1) > 0)' -q $'SELECT (CASE WHEN (`ui64` > 0) THEN 1 WHEN NOT (`ui64` > 0) THEN 0 ELSE NULL END) AS `Calculation_162692564973015040`, sum({fn CONVERT(1, SQL_BIGINT)}) AS `sum_Number_of_Records_ok` FROM table(`odbc1`) as `odbc1` GROUP BY (CASE WHEN (`ui64` > 0) THEN 1 WHEN NOT (`ui64` > 0) THEN 0 ELSE NULL END)' +q "DROP STREAM IF EXISTS test.purchase_stat;" +q "CREATE STREAM test.purchase_stat (purchase_id uint64, purchase_date datetime, offer_category uint64, amount uint64);" +q $'SELECT sum({fn CONVERT(Custom_SQL_Query.amount, SQL_BIGINT)}) AS sum_amount FROM (SELECT purchase_date, offer_category, sum(amount) AS amount, count(DISTINCT purchase_id) AS purchase_id FROM test.purchase_stat WHERE (offer_category = 1) GROUP BY purchase_date, offer_category) Custom_SQL_Query HAVING (count(1) > 0)' +q $'SELECT (CASE WHEN (`test`.`odbc1`.`ui64` > 0) THEN 1 WHEN NOT (`test`.`odbc1`.`ui64` > 0) THEN 0 ELSE NULL END) AS `Calculation_162692564973015040`, sum({fn CONVERT(1, SQL_BIGINT)}) AS `sum_Number_of_Records_ok` FROM `test`.`odbc1` GROUP BY (CASE WHEN (`test`.`odbc1`.`ui64` > 0) THEN 1 WHEN NOT (`test`.`odbc1`.`ui64` > 0) THEN 0 ELSE NULL END)' q $"SELECT {d '2017-08-30'}" -q 'SELECT cast(cast(`Date` AS DATE) AS DATE) AS `tdy_Calculation_687361904651595777_ok` FROM table(`odbc1`)' +q 'SELECT cast(cast(`odbc1`.`date_col` AS DATE) AS DATE) AS `tdy_Calculation_687361904651595777_ok` FROM `test`.`odbc1`' q 'SELECT {fn CURDATE()}' -q $'SELECT `ui64` AS `bannerid`, sum((CASE WHEN `ui64` = 0 THEN NULL ELSE `ui64` / `ui64` END)) AS `sum_Calculation_582934706662502402_ok`, sum(`ui64`) AS `sum_clicks_ok`, sum(`ui64`) AS `sum_shows_ok`, sum(`ui64`) AS `sum_true_installs_ok`, cast(cast(`Date` AS DATE) AS DATE) AS `tdy_Calculation_582934706642255872_ok` FROM table(`odbc1`) as `odbc1` WHERE (`String` = \'YandexBrowser\') GROUP BY `ui64`, cast(cast(`Date` AS DATE) AS DATE)' +q $'SELECT `test`.`odbc1`.`ui64` AS `bannerid`, sum((CASE WHEN `test`.`odbc1`.`ui64` = 0 THEN NULL ELSE `test`.`odbc1`.`ui64` / `test`.`odbc1`.`ui64` END)) AS `sum_Calculation_582934706662502402_ok`, sum(`test`.`odbc1`.`ui64`) AS `sum_clicks_ok`, sum(`test`.`odbc1`.`ui64`) AS `sum_shows_ok`, sum(`test`.`odbc1`.`ui64`) AS `sum_true_installs_ok`, cast(cast(`test`.`odbc1`.`date_col` AS DATE) AS DATE) AS `tdy_Calculation_582934706642255872_ok` FROM `test`.`odbc1` WHERE (`test`.`odbc1`.`str` = \'YandexBrowser\') GROUP BY `test`.`odbc1`.`ui64`, cast(cast(`test`.`odbc1`.`date_col` AS DATE) AS DATE)' -q $'SELECT ui64 AS BannerID, sum((CASE WHEN ui64 = 0 THEN NULL ELSE ui64 / ui64 END)) AS sum_Calculation_500744014152380416_ok, sum(ui64) AS sum_ch_installs_ok, sum(ui64) AS sum_goodshows_ok FROM table(odbc1) as `odbc1` GROUP BY ui64' -q $'SELECT ui64 AS BannerID, sum((CASE WHEN ui64 > 0 THEN NULL ELSE ui64 / ui64 END)) AS sum_Calculation_500744014152380416_ok, sum(ui64) AS sum_ch_installs_ok, sum(ui64) AS sum_goodshows_ok FROM table(odbc1) as `odbc1` GROUP BY ui64' +q $'SELECT test.odbc1.ui64 AS BannerID, sum((CASE WHEN test.odbc1.ui64 = 0 THEN NULL ELSE test.odbc1.ui64 / test.odbc1.ui64 END)) AS sum_Calculation_500744014152380416_ok, sum(test.odbc1.ui64) AS sum_ch_installs_ok, sum(test.odbc1.ui64) AS sum_goodshows_ok FROM test.odbc1 GROUP BY test.odbc1.ui64' +q $'SELECT test.odbc1.ui64 AS BannerID, sum((CASE WHEN test.odbc1.ui64 > 0 THEN NULL ELSE test.odbc1.ui64 / test.odbc1.ui64 END)) AS sum_Calculation_500744014152380416_ok, sum(test.odbc1.ui64) AS sum_ch_installs_ok, sum(test.odbc1.ui64) AS sum_goodshows_ok FROM test.odbc1 GROUP BY test.odbc1.ui64' -q "DROP STREAM IF EXISTS test_tableau;" -q "create stream test_tableau (country string, clicks uint64, shows uint64)" -q "insert into test_tableau (* except _tp_time) values ('ru',10000,100500)('ua',1000,6000)('by',2000,6500)('tr',100,500)" -q "insert into test_tableau (* except _tp_time) values ('undefined',0,2)" -q "insert into test_tableau (* except _tp_time) values ('injected',1,0)" +q "DROP STREAM IF EXISTS test.test_tableau;" +q "create stream test.test_tableau (country string, clicks uint64, shows uint64)" +q "insert into test.test_tableau (* except _tp_time) values ('ru',10000,100500),('ua',1000,6000),('by',2000,6500),('tr',100,500)" +q "insert into test.test_tableau (* except _tp_time) values ('undefined',0,2)" +q "insert into test.test_tableau (* except _tp_time) values ('injected',1,0)" sleep 2s -q 'SELECT test_tableau.country AS country, sum((CASE WHEN test_tableau.shows = 0 THEN NULL ELSE cast(test_tableau.clicks AS FLOAT) / test_tableau.shows END)) AS sum_Calculation_920986154656493569_ok, sum({fn POWER(cast(test_tableau.clicks AS FLOAT),2)}) AS sum_Calculation_920986154656579587_ok FROM table(test_tableau) as test_tableau GROUP BY test_tableau.country;' -q "DROP STREAM test_tableau;" +q 'SELECT test.test_tableau.country AS country, sum((CASE WHEN test.test_tableau.shows = 0 THEN NULL ELSE cast(test.test_tableau.clicks AS FLOAT) / test.test_tableau.shows END)) AS sum_Calculation_920986154656493569_ok, sum({fn POWER(cast(test.test_tableau.clicks AS FLOAT),2)}) AS sum_Calculation_920986154656579587_ok FROM test.test_tableau GROUP BY test.test_tableau.country;' +q "DROP STREAM test.test_tableau;" q 'SELECT NULL' q 'SELECT [NULL]' -q "DROP STREAM IF EXISTS adv_watch;" -q "create stream adv_watch (rocket_date date, rocket_datetime datetime, ivi_id uint64)" -q "insert into adv_watch (* except _tp_time) values (1,2,3)" -q "insert into adv_watch (* except _tp_time) values (1, {fn TIMESTAMPADD(SQL_TSI_DAY,-8,cast({fn CURRENT_TIMESTAMP(0)} AS DATE))}, 3)" +q "DROP STREAM IF EXISTS test.adv_watch;" +q "create stream test.adv_watch (rocket_date Date, rocket_datetime dateTime, ivi_id uint64)" +q "insert into test.adv_watch (* except _tp_time) values (1,2,3)" +q "insert into test.adv_watch (* except _tp_time) values (1, {fn TIMESTAMPADD(SQL_TSI_DAY,-8,cast({fn CURRENT_TIMESTAMP(0)} AS DATE))}, 3)" sleep 2s -q 'SELECT `adv_watch`.`rocket_date` AS `rocket_date`, count(DISTINCT `adv_watch`.`ivi_id`) AS `usr_Calculation_683139814283419648_ok` FROM table(`adv_watch`) as `adv_watch` WHERE ((`adv_watch`.`rocket_datetime` >= {fn TIMESTAMPADD(SQL_TSI_DAY,-9,cast({fn CURRENT_TIMESTAMP(0)} AS DATE))}) AND (`adv_watch`.`rocket_datetime` < {fn TIMESTAMPADD(SQL_TSI_DAY,1,cast({fn CURRENT_TIMESTAMP(0)} AS DATE))})) GROUP BY `adv_watch`.`rocket_date`' -q 'SELECT cast({fn TRUNCATE(EXTRACT(YEAR FROM `adv_watch`.`rocket_date`),0)} AS INTEGER) AS `yr_rocket_date_ok` FROM table(`adv_watch`) as `adv_watch` GROUP BY cast({fn TRUNCATE(EXTRACT(YEAR FROM `adv_watch`.`rocket_date`),0)} AS INTEGER)' -q "DROP STREAM adv_watch;" +q 'SELECT `test`.`adv_watch`.`rocket_date` AS `rocket_date`, count(DISTINCT `test`.`adv_watch`.`ivi_id`) AS `usr_Calculation_683139814283419648_ok` FROM `test`.`adv_watch` WHERE ((`adv_watch`.`rocket_datetime` >= {fn TIMESTAMPADD(SQL_TSI_DAY,-9,cast({fn CURRENT_TIMESTAMP(0)} AS DATE))}) AND (`test`.`adv_watch`.`rocket_datetime` < {fn TIMESTAMPADD(SQL_TSI_DAY,1,cast({fn CURRENT_TIMESTAMP(0)} AS DATE))})) GROUP BY `test`.`adv_watch`.`rocket_date`' +q 'SELECT cast({fn TRUNCATE(EXTRACT(YEAR FROM `test`.`adv_watch`.`rocket_date`),0)} AS INTEGER) AS `yr_rocket_date_ok` FROM `test`.`adv_watch` GROUP BY cast({fn TRUNCATE(EXTRACT(YEAR FROM `test`.`adv_watch`.`rocket_date`),0)} AS INTEGER)' +q "DROP STREAM test.adv_watch;" # https://github.com/yandex/clickhouse-odbc/issues/43 -q 'DROP STREAM IF EXISTS gamoraparams;' -q 'CREATE STREAM gamoraparams ( user_id int64, Date date, dt datetime, p1 nullable(int32), platforms nullable(int32), max_position nullable(int32), vv nullable(int32), city nullable(string), third_party nullable(int8), mobile_tablet nullable(int8), mobile_phone nullable(int8), desktop nullable(int8), web_mobile nullable(int8), tv_attach nullable(int8), smart_tv nullable(int8), subsite_id nullable(int32), view_in_second nullable(int32), view_in_second_presto nullable(int32))' -q 'insert into gamoraparams (* except _tp_time) values (1, {fn CURRENT_TIMESTAMP }, cast({fn CURRENT_TIMESTAMP(0)} AS DATE), Null, Null,Null,Null,Null, Null,Null,Null,Null,Null,Null,Null,Null,Null,Null);' +q 'DROP STREAM IF EXISTS test.gamoraparams;' +q 'CREATE STREAM test.gamoraparams ( user_id int64, date_col Date, dt datetime, p1 nullable(int32), platforms nullable(int32), max_position nullable(int32), vv nullable(int32), city nullable(string), third_party nullable(int8), mobile_tablet nullable(int8), mobile_phone nullable(int8), desktop nullable(int8), web_mobile nullable(int8), tv_attach nullable(int8), smart_tv nullable(int8), subsite_id nullable(int32), view_in_second nullable(int32), view_in_second_presto nullable(int32))' +q 'insert into test.gamoraparams (* except _tp_time) values (1, {fn CURRENT_TIMESTAMP }, cast({fn CURRENT_TIMESTAMP(0)} AS DATE), Null, Null,Null,Null,Null, Null,Null,Null,Null,Null,Null,Null,Null,Null,Null);' sleep 2s -# q 'SELECT `Custom_SQL_Query`.`platforms` AS `platforms` FROM (select platforms from gamoraparams where platforms is null limit 1) `Custom_SQL_Query` GROUP BY `platforms`' -q 'SELECT cast({fn TRUNCATE(EXTRACT(YEAR FROM `gamoraparams`.`dt`),0)} AS INTEGER) AS `yr_date_ok` FROM table(`gamoraparams`) as `gamoraparams` GROUP BY `yr_date_ok`'; -q 'DROP STREAM gamoraparams;' +q 'SELECT `Custom_SQL_Query`.`platforms` AS `platforms` FROM (select platforms from test.gamoraparams where platforms is null limit 1) `Custom_SQL_Query` GROUP BY `platforms`' +q 'SELECT cast({fn TRUNCATE(EXTRACT(YEAR FROM `test`.`gamoraparams`.`dt`),0)} AS INTEGER) AS `yr_date_ok` FROM `test`.`gamoraparams` GROUP BY `yr_date_ok`'; +q 'DROP STREAM test.gamoraparams;' -q $'SELECT cast(EXTRACT(YEAR FROM `Date`) AS INTEGER) AS `yr_date_ok` FROM table(`odbc1`) as `odbc1`' -q $'SELECT cast({fn TRUNCATE(EXTRACT(YEAR FROM `Date`),0)} AS INTEGER) AS `yr_date_ok` FROM table(`odbc1`) as `odbc1`' -q $'SELECT sum({fn CONVERT(1, SQL_BIGINT)}) AS `sum_Number_of_Records_ok`, cast({fn TRUNCATE(EXTRACT(YEAR FROM `Date`),0)} AS INTEGER) AS `yr_date_ok` FROM table(`odbc1`) as `odbc1` GROUP BY cast({fn TRUNCATE(EXTRACT(YEAR FROM `Date`),0)} AS INTEGER)' +q $'SELECT cast(EXTRACT(YEAR FROM `odbc1`.`date_col`) AS INTEGER) AS `yr_date_ok` FROM `test`.`odbc1`' +q $'SELECT cast({fn TRUNCATE(EXTRACT(YEAR FROM `odbc1`.`date_col`),0)} AS INTEGER) AS `yr_date_ok` FROM `test`.`odbc1`' +q $'SELECT sum({fn CONVERT(1, SQL_BIGINT)}) AS `sum_Number_of_Records_ok`, cast({fn TRUNCATE(EXTRACT(YEAR FROM `odbc1`.`date_col`),0)} AS INTEGER) AS `yr_date_ok` FROM `test`.`odbc1` GROUP BY cast({fn TRUNCATE(EXTRACT(YEAR FROM `odbc1`.`date_col`),0)} AS INTEGER)' -q 'SELECT cast({fn TRUNCATE(EXTRACT(YEAR FROM cast(`Date` AS DATE)),0)} AS INTEGER) AS `yr_Calculation_860750537261912064_ok` FROM table(`odbc1`) as `odbc1` GROUP BY `yr_Calculation_860750537261912064_ok`' -q 'SELECT {fn TIMESTAMPADD(SQL_TSI_DAY,cast({fn TRUNCATE((-1 * ({fn DAYOFYEAR(`Date`)} - 1)),0)} AS INTEGER),cast(`Date` AS DATE))} AS `tyr__date_ok` FROM table(`odbc1`) as `odbc1` GROUP BY `tyr__date_ok`' +q 'SELECT cast({fn TRUNCATE(EXTRACT(YEAR FROM cast(`test`.`odbc1`.`date_col` AS DATE)),0)} AS INTEGER) AS `yr_Calculation_860750537261912064_ok` FROM `test`.`odbc1` GROUP BY `yr_Calculation_860750537261912064_ok`' +q 'SELECT {fn TIMESTAMPADD(SQL_TSI_DAY,cast({fn TRUNCATE((-1 * ({fn DAYOFYEAR(`test`.`odbc1`.`date_col`)} - 1)),0)} AS INTEGER),cast(`test`.`odbc1`.`date_col` AS DATE))} AS `tyr__date_ok` FROM `test`.`odbc1` GROUP BY `tyr__date_ok`' -q 'SELECT {fn TIMESTAMPADD(SQL_TSI_DAY,(-1 * ({fn MOD((7 + {fn DAYOFWEEK(cast(`Date` AS DATE))} - 2), 7)})),cast(cast(`Date` AS DATE) AS DATE))} AS `twk_date_ok` FROM table(`odbc1`) as `odbc1` GROUP BY `twk_date_ok`' -q 'SELECT {fn TIMESTAMPADD(SQL_TSI_DAY,cast({fn TRUNCATE((-1 * ({fn DAYOFYEAR(cast(`Date` AS DATE))} - 1)),0)} AS INTEGER),cast(cast(`Date` AS DATE) AS DATE))} AS `tyr_Calculation_681450978608578560_ok` FROM table(`odbc1`) as `odbc1` GROUP BY `tyr_Calculation_681450978608578560_ok`' -q 'SELECT {fn TIMESTAMPADD(SQL_TSI_MONTH,cast({fn TRUNCATE((3 * (cast({fn TRUNCATE({fn QUARTER(cast(`Date` AS DATE))},0)} AS INTEGER) - 1)),0)} AS INTEGER),{fn TIMESTAMPADD(SQL_TSI_DAY,cast({fn TRUNCATE((-1 * ({fn DAYOFYEAR(cast(`Date` AS DATE))} - 1)),0)} AS INTEGER),cast(cast(`Date` AS DATE) AS DATE))})} AS `tqr_Calculation_681450978608578560_ok` FROM table(`odbc1`) as `odbc1` GROUP BY `tqr_Calculation_681450978608578560_ok`' -q 'SELECT {fn TIMESTAMPADD(SQL_TSI_DAY,cast({fn TRUNCATE((-1 * (EXTRACT(DAY FROM cast(`Date` AS DATE)) - 1)),0)} AS INTEGER),cast(cast(`Date` AS DATE) AS DATE))} AS `tmn_Calculation_681450978608578560_ok` FROM table(`odbc1`) as `odbc1` GROUP BY `tmn_Calculation_681450978608578560_ok`' +q 'SELECT {fn TIMESTAMPADD(SQL_TSI_DAY,(-1 * ({fn MOD((7 + {fn DAYOFWEEK(cast(`test`.`odbc1`.`date_col` AS DATE))} - 2), 7)})),cast(cast(`test`.`odbc1`.`date_col` AS DATE) AS DATE))} AS `twk_date_ok` FROM `test`.`odbc1` GROUP BY `twk_date_ok`' +q 'SELECT {fn TIMESTAMPADD(SQL_TSI_DAY,cast({fn TRUNCATE((-1 * ({fn DAYOFYEAR(cast(`test`.`odbc1`.`date_col` AS DATE))} - 1)),0)} AS INTEGER),cast(cast(`test`.`odbc1`.`date_col` AS DATE) AS DATE))} AS `tyr_Calculation_681450978608578560_ok` FROM `test`.`odbc1` GROUP BY `tyr_Calculation_681450978608578560_ok`' +q 'SELECT {fn TIMESTAMPADD(SQL_TSI_MONTH,cast({fn TRUNCATE((3 * (cast({fn TRUNCATE({fn QUARTER(cast(`test`.`odbc1`.`date_col` AS DATE))},0)} AS INTEGER) - 1)),0)} AS INTEGER),{fn TIMESTAMPADD(SQL_TSI_DAY,cast({fn TRUNCATE((-1 * ({fn DAYOFYEAR(cast(`test`.`odbc1`.`date_col` AS DATE))} - 1)),0)} AS INTEGER),cast(cast(`test`.`odbc1`.`date_col` AS DATE) AS DATE))})} AS `tqr_Calculation_681450978608578560_ok` FROM `test`.`odbc1` GROUP BY `tqr_Calculation_681450978608578560_ok`' +q 'SELECT {fn TIMESTAMPADD(SQL_TSI_DAY,cast({fn TRUNCATE((-1 * (EXTRACT(DAY FROM cast(`test`.`odbc1`.`date_col` AS DATE)) - 1)),0)} AS INTEGER),cast(cast(`test`.`odbc1`.`date_col` AS DATE) AS DATE))} AS `tmn_Calculation_681450978608578560_ok` FROM `test`.`odbc1` GROUP BY `tmn_Calculation_681450978608578560_ok`' -q $'SELECT (CASE WHEN (`ui64` < 5) THEN replaceRegexpOne(toString(`ui64`), \'^\\s+\', \'\') WHEN (`ui64` < 10) THEN \'5-9\' WHEN (`ui64` < 20) THEN \'10-19\' WHEN (`ui64` >= 20) THEN \'20+\' ELSE NULL END) AS `Calculation_582653228063055875`, sum(`ui64`) AS `sum_traf_se_ok` FROM table(`odbc1`) as `odbc1` GROUP BY `Calculation_582653228063055875` ORDER BY `Calculation_582653228063055875`' +q $'SELECT (CASE WHEN (`test`.`odbc1`.`ui64` < 5) THEN replaceRegexpOne(toString(`test`.`odbc1`.`ui64`), \'^\\s+\', \'\') WHEN (`test`.`odbc1`.`ui64` < 10) THEN \'5-9\' WHEN (`test`.`odbc1`.`ui64` < 20) THEN \'10-19\' WHEN (`test`.`odbc1`.`ui64` >= 20) THEN \'20+\' ELSE NULL END) AS `Calculation_582653228063055875`, sum(`test`.`odbc1`.`ui64`) AS `sum_traf_se_ok` FROM `test`.`odbc1` GROUP BY `Calculation_582653228063055875` ORDER BY `Calculation_582653228063055875`' q $"SELECT *, (CASE WHEN (number == 1) THEN 'o' WHEN (number == 2) THEN 'two long string' WHEN (number == 3) THEN 'r' WHEN (number == 4) THEN NULL ELSE '-' END) FROM system.numbers LIMIT 6" # todo: test with fail on comparsion: @@ -150,44 +150,44 @@ q $"SELECT -127,-128,-129,126,127,128,255,256,257,-32767,-32768,-32769,32766,327 q $"SELECT 2147483647, 2147483648, 2147483647+1, 2147483647+10, 4294967295" q "DROP STREAM if exists fixedstring;" -q "CREATE STREAM IF NOT EXISTS fixedstring ( xx fixed_string(100))" -q "INSERT INTO fixedstring (* except _tp_time) VALUES ('a'), ('abcdefg'), ('абвгдеёжзийклмнопрстуфхцч')"; +q "CREATE STREAM IF NOT EXISTS test.fixedstring ( xx fixed_string(100));" +q "INSERT INTO test.fixedstring (* except _tp_time) VALUES ('a'), ('abcdefg'), ('абвгдеёжзийклмнопрстуфхцч')"; sleep 2s -q "select xx as x from table(fixedstring) as fixedstring;" -q "DROP STREAM fixedstring;" +q "select xx as x from test.fixedstring;" +q "DROP STREAM test.fixedstring;" -q 'DROP STREAM IF EXISTS increment;' -q 'CREATE STREAM increment (n uint64);' +q 'DROP STREAM IF EXISTS test.increment;' +q 'CREATE STREAM test.increment (n uint64);' NUM=${NUM=100} for i in `seq 1 ${NUM}`; do - q "insert into increment (* except _tp_time) values ($i);" > /dev/null - q 'select * from table(increment) as increment;' > /dev/null + q "insert into test.increment (* except _tp_time) values ($i);" > /dev/null + q 'select * from test.increment;' > /dev/null done sleep 2s -q 'select * from table(increment) as increment;' +q 'select * from test.increment;' echo "should be ${NUM}:" -q 'select count(*) from table(increment) as increment;' +q 'select count(*) from test.increment;' -q 'DROP STREAM increment;' +q 'DROP STREAM test.increment;' -q "DROP STREAM IF EXISTS decimal;" -q "CREATE STREAM IF NOT EXISTS decimal (a DECIMAL(9,0), b DECIMAL(18,0), c DECIMAL(38,0), d DECIMAL(9, 9), e Decimal64(18), f Decimal128(38), g Decimal32(5), h Decimal64(9), i Decimal128(18), j decimal(4,2))" -q "INSERT INTO decimal (a, b, c, d, e, f, g, h, i, j) VALUES (42, 42, 42, 0.42, 0.42, 0.42, 42.42, 42.42, 42.42, 42.42);" -q "INSERT INTO decimal (a, b, c, d, e, f, g, h, i, j) VALUES (-42, -42, -42, -0.42, -0.42, -0.42, -42.42, -42.42, -42.42, -42.42);" +q "DROP STREAM IF EXISTS test.decimal;" +q "CREATE STREAM IF NOT EXISTS test.decimal (a DECIMAL(9,0), b DECIMAL(18,0), c DECIMAL(38,0), d DECIMAL(9, 9), e Decimal64(18), f Decimal128(38), g Decimal32(5), h Decimal64(9), i Decimal128(18), j decimal(4,2));" +q "INSERT INTO test.decimal (a, b, c, d, e, f, g, h, i, j) VALUES (42, 42, 42, 0.42, 0.42, 0.42, 42.42, 42.42, 42.42, 42.42);" +q "INSERT INTO test.decimal (a, b, c, d, e, f, g, h, i, j) VALUES (-42, -42, -42, -0.42, -0.42, -0.42, -42.42, -42.42, -42.42, -42.42);" sleep 2s -q "SELECT * FROM table(decimal) as decimal;" +q "SELECT * FROM test.decimal;" -q "drop stream if exists lc;" -q "create stream lc (b low_cardinality(string)) order by b;" -q "insert into lc (* except _tp_time) select '0123456789' from numbers(100);" +q "drop stream if exists test.lc;" +q "create stream test.lc (b low_cardinality(string)) order by b;" +q "insert into test.lc (* except _tp_time) select '0123456789' from numbers(100);" sleep 2s -q "select count(), b from table(lc) as lc group by b;" -q "select * from table(lc) as lc limit 10;" -q "drop stream lc;" +q "select count(), b from test.lc group by b;" +q "select * from test.lc limit 10;" +q "drop stream test.lc;" # These queries can only be executed within session q "SET max_threads=10;" diff --git a/tests-pyodbc/config.yaml b/tests-pyodbc/config.yaml index 3b64999..7d83726 100644 --- a/tests-pyodbc/config.yaml +++ b/tests-pyodbc/config.yaml @@ -1,6 +1,12 @@ -data_source: - driver: 'ClickHouse ODBC Driver (Unicode)' - url: 'http://localhost:3218' +default_data_source: + driver: 'Proton ODBC Driver (Unicode)' + url: 'http://localhost:8123' database: 'default' uid: 'default' pwd: '' +invalid_data_source: + driver: 'Proton ODBC Driver (Unicode)' + url: 'http://localhost:8123' + database: 'system' + uid: 'system' + pwd: 'pasword' diff --git a/tests-pyodbc/test_suites/conftest.py b/tests-pyodbc/test_suites/conftest.py index 973d2c6..4803dad 100644 --- a/tests-pyodbc/test_suites/conftest.py +++ b/tests-pyodbc/test_suites/conftest.py @@ -4,7 +4,13 @@ @pytest.fixture(autouse=True) -def get_connection(): +def get_default_connection(): with open('../config.yaml', 'r', encoding='utf-8') as f: - cfg = yaml.full_load(f)['data_source'] + cfg = yaml.full_load(f)['default_data_source'] + return pyodbc.connect(';'.join([f'{k}={v}' for k, v in cfg.items()]), autocommit=True) + +@pytest.fixture(autouse=True) +def get_invalid_connection(): + with open('../config.yaml', 'r', encoding='utf-8') as f: + cfg = yaml.full_load(f)['invalid_data_source'] return pyodbc.connect(';'.join([f'{k}={v}' for k, v in cfg.items()]), autocommit=True) diff --git a/tests-pyodbc/test_suites/test_grant.py b/tests-pyodbc/test_suites/test_grant.py new file mode 100644 index 0000000..3266399 --- /dev/null +++ b/tests-pyodbc/test_suites/test_grant.py @@ -0,0 +1,11 @@ +import pyodbc +import pytest + + +class TestGrant: + def test_invalid(self, get_invalid_connection: pyodbc.Connection): + with pytest.raises(pyodbc.Error) as auth_err: + with get_invalid_connection as conn: + with conn.cursor() as cursor: + cursor.execute("select 1") + assert 'Authentication failed' in str(auth_err.value) diff --git a/tests-pyodbc/test_suites/test_types.py b/tests-pyodbc/test_suites/test_types.py index 9232588..cec1802 100644 --- a/tests-pyodbc/test_suites/test_types.py +++ b/tests-pyodbc/test_suites/test_types.py @@ -10,16 +10,17 @@ class TestType: @pytest.mark.parametrize(type_t.arg_name, type_t.args, ids=type_t.ids) def test_type(self, - get_connection: pyodbc.Connection, + get_default_connection: pyodbc.Connection, stream_suffix: str, type_name: str, input_list: list, expect_output: list): - with get_connection as conn: + with get_default_connection as conn: with conn.cursor() as cursor: cursor.execute(f"drop stream if exists `test_{stream_suffix}`") cursor.execute(f"create stream `test_{stream_suffix}` (`x` {type_name})") cursor.executemany(f"insert into `test_{stream_suffix}` (`x`) values (?)", input_list) - result = cursor.execute(f"select x from `test_{stream_suffix}` where _tp_time > earliest_ts() limit {len(input_list)}").fetchall() - #cursor.execute(f"drop stream if exists `test_{stream_suffix}`") + time.sleep(3) + result = cursor.execute(f"select x from `test_{stream_suffix}`").fetchall() + cursor.execute(f"drop stream if exists `test_{stream_suffix}`") utils.assert_eq2d(expect_output, result) From b9fe92522571774ca92ac599cac01484aba7fc26 Mon Sep 17 00:00:00 2001 From: Leo Cai Date: Thu, 9 Nov 2023 17:56:23 +0800 Subject: [PATCH 5/5] add a grant test and fix some tests in linux --- driver/test/nano_it.cpp | 16 ++++++++-------- test/mssql.linked.server.sql | 10 ---------- tests-pyodbc/config.yaml | 6 ++++++ tests-pyodbc/test_suites/conftest.py | 6 ++++++ tests-pyodbc/test_suites/test_grant.py | 5 +++++ 5 files changed, 25 insertions(+), 18 deletions(-) diff --git a/driver/test/nano_it.cpp b/driver/test/nano_it.cpp index e41cc40..9a860ba 100644 --- a/driver/test/nano_it.cpp +++ b/driver/test/nano_it.cpp @@ -130,27 +130,27 @@ void run_test(nanodbc::string const & connection_string) { auto results = execute(connection, NANODBC_TEXT("select (* except _tp_time) from simple_test;")); show(results); } - execute(connection, NANODBC_TEXT("DROP STREAM IF EXISTS default.strings;")); - execute(connection, NANODBC_TEXT("CREATE STREAM default.strings (id uint64, str string, dt datetime DEFAULT now());")); - execute(connection, NANODBC_TEXT("INSERT INTO default.strings (* except _tp_time) SELECT number, hex(number+100000), 1 FROM system.numbers LIMIT 100;")); + execute(connection, NANODBC_TEXT("CREATE DATABASE IF NOT EXISTS test;")); + execute(connection, NANODBC_TEXT("CREATE STREAM test.strings (id uint64, str string, dt datetime DEFAULT now());")); + execute(connection, NANODBC_TEXT("INSERT INTO test.strings (* except _tp_time) SELECT number, hex(number+100000), 1 FROM system.numbers LIMIT 100;")); execute(connection, NANODBC_TEXT("select sleep(2)")); { - auto results = execute(connection, NANODBC_TEXT("SELECT count(*) FROM default.strings;")); + auto results = execute(connection, NANODBC_TEXT("SELECT count(*) FROM test.strings;")); show(results); } { - auto results = execute(connection, NANODBC_TEXT("SELECT (* except _tp_time) FROM default.strings;")); + auto results = execute(connection, NANODBC_TEXT("SELECT (* except _tp_time) FROM test.strings;")); show(results); } { auto results = execute(connection, - NANODBC_TEXT("SELECT `default`.`strings`.`str` AS `platform`, sum(`default`.`strings`.`id`) AS `sum_installs_ok` FROM " - "`default`.`strings` GROUP BY `str`;")); + NANODBC_TEXT("SELECT `test`.`strings`.`str` AS `platform`, sum(`test`.`strings`.`id`) AS `sum_installs_ok` FROM " + "`test`.`strings` GROUP BY `str`;")); show(results); } - execute(connection, NANODBC_TEXT("DROP STREAM IF EXISTS default.strings;")); + execute(connection, NANODBC_TEXT("DROP STREAM IF EXISTS test.strings;")); } // Setup diff --git a/test/mssql.linked.server.sql b/test/mssql.linked.server.sql index 33bfc00..34c5502 100644 --- a/test/mssql.linked.server.sql +++ b/test/mssql.linked.server.sql @@ -37,13 +37,3 @@ exec('SELECT -127,-128,-129,126,127,128,255,256,257,-32767,-32768,-32769,32766,3 go exec('SELECT *, (CASE WHEN (number == 1) THEN ''o'' WHEN (number == 2) THEN ''two long string'' WHEN (number == 3) THEN ''r'' WHEN (number == 4) THEN NULL ELSE ''-'' END) FROM system.numbers LIMIT 6') at [proton_link_test]; go - -exec('CREATE STREAM IF NOT EXISTS default.number (a int64, b float64)') at [proton_link_test]; -go -exec(N'INSERT INTO default.number (* except _tp_time) VALUES (1000, 1.1), (1200, 100.19), (-1000, -99.1);') at [proton_link_test]; -go -exec('select sleep(2);') at [proton_link_test]; -exec('select (* except _tp_time) from default.number;') at [proton_link_test]; -go -exec('DROP STREAM default.number;') at [proton_link_test]; -go diff --git a/tests-pyodbc/config.yaml b/tests-pyodbc/config.yaml index 7d83726..73e391e 100644 --- a/tests-pyodbc/config.yaml +++ b/tests-pyodbc/config.yaml @@ -10,3 +10,9 @@ invalid_data_source: database: 'system' uid: 'system' pwd: 'pasword' +valid_data_source: + driver: 'Proton ODBC Driver (Unicode)' + url: 'http://localhost:8123' + database: 'system' + uid: 'system' + pwd: 'sys@t+' diff --git a/tests-pyodbc/test_suites/conftest.py b/tests-pyodbc/test_suites/conftest.py index 4803dad..3252c42 100644 --- a/tests-pyodbc/test_suites/conftest.py +++ b/tests-pyodbc/test_suites/conftest.py @@ -14,3 +14,9 @@ def get_invalid_connection(): with open('../config.yaml', 'r', encoding='utf-8') as f: cfg = yaml.full_load(f)['invalid_data_source'] return pyodbc.connect(';'.join([f'{k}={v}' for k, v in cfg.items()]), autocommit=True) + +@pytest.fixture(autouse=True) +def get_valid_connection(): + with open('../config.yaml', 'r', encoding='utf-8') as f: + cfg = yaml.full_load(f)['valid_data_source'] + return pyodbc.connect(';'.join([f'{k}={v}' for k, v in cfg.items()]), autocommit=True) diff --git a/tests-pyodbc/test_suites/test_grant.py b/tests-pyodbc/test_suites/test_grant.py index 3266399..d7c8774 100644 --- a/tests-pyodbc/test_suites/test_grant.py +++ b/tests-pyodbc/test_suites/test_grant.py @@ -9,3 +9,8 @@ def test_invalid(self, get_invalid_connection: pyodbc.Connection): with conn.cursor() as cursor: cursor.execute("select 1") assert 'Authentication failed' in str(auth_err.value) + + def test_valid(self, get_valid_connection: pyodbc.Connection): + with get_valid_connection as conn: + with conn.cursor() as cursor: + cursor.execute("select 1")